ModelAi/server.js

146 lines
4.0 KiB
JavaScript
Raw Normal View History

2025-09-15 10:04:47 +08:00
// server.js
const express = require("express");
const multer = require("multer");
const cors = require("cors");
const fs = require("fs");
const path = require("path");
const OpenAI = require("openai");
const bodyParser = require("body-parser");
const app = express();
const port = 30003;
const mysql = require("mysql2/promise");
// 初始化数据库连接池
const pool = mysql.createPool({
host: "localhost",
user: "root",
password: "maibu520",
database: "fastbee",
waitForConnections: true,
connectionLimit: 10,
});
// 允许大文件请求体
app.use(bodyParser.json({ limit: "500mb" }));
app.use(bodyParser.urlencoded({ limit: "500mb", extended: true }));
// Multer 上传目录
const upload = multer({
dest: "uploads/",
limits: { fileSize: 500 * 1024 * 1024 }, // 500MB
});
// Node <20 兼容 File
if (typeof File === "undefined") {
globalThis.File = require("node:buffer").File;
}
// 启用 CORS
app.use(cors());
app.use(express.json({ limit: "200mb" })); // 支持大 JSON
app.use(express.urlencoded({ extended: true })); // 支持 form-data 字段
// 初始化混元客户端
const client = new OpenAI({
apiKey: "sk-LVfG90qgdhf9kKQUucqBSLioxamDu7gBeW9boXqKOxIDJt7H",
baseURL: "https://api.hunyuan.cloud.tencent.com/v1",
timeout: 120000,
});
app.get("/test-db", async (req, res) => {
try {
await pool.query(
"INSERT INTO ai_chat_messages (session_id, role, content) VALUES (?, 'user', ?)",
["sess1", "hello"]
);
res.send("OK");
} catch (err) {
console.error(err);
res.send("FAIL");
}
});
// ====================== API ======================
app.post("/analyze", upload.single("file"), async (req, res) => {
try {
res.setHeader("Content-Type", "text/plain; charset=utf-8");
res.setHeader("Transfer-Encoding", "chunked");
res.setHeader("Cache-Control", "no-cache");
res.setHeader("Connection", "keep-alive");
const { userPrompt, imageBase64, userId, sessionId } = req.body;
// 如果前端没有传 sessionId生成一个
const sessId = sessionId || Date.now().toString();
// 1⃣ 保存用户提问到数据库
await pool.query(
"INSERT INTO ai_chat_messages (session_id, role, content) VALUES (?, 'user', ?)",
[sessId, userPrompt || ""]
);
// 2⃣ 调用 HunYuan Vision
const completion = await client.chat.completions.create({
model: "hunyuan-vision",
stream: true,
do_scene: true,
do_behavior: true,
llm_model: "hunyuan-turbos-latest",
messages: [
{
role: "system",
content: "你是一个图像分析专家,擅长描述和解释图片内容。",
},
{
role: "user",
content: [
{ type: "text", text: userPrompt || "请描述图片内容。" },
imageBase64
? {
type: "image_url",
image_url: {
url: imageBase64.startsWith("data:")
? imageBase64
: `data:image/png;base64,${imageBase64}`,
},
}
: null,
].filter(Boolean),
},
],
});
// 3⃣ 处理流式返回,并同时拼接内容
let assistantContent = "";
for await (const chunk of completion) {
const delta = chunk.choices[0]?.delta?.content || "";
if (delta) {
assistantContent += delta;
res.write(`data: ${JSON.stringify({ content: delta })}\n\n`);
}
}
res.write("data: [DONE]\n\n");
res.end();
// 4⃣ 保存模型回复到数据库
await pool.query(
"INSERT INTO chat_messages (session_id, role, content) VALUES (?, 'assistant', ?)",
[sessId, assistantContent]
);
} catch (err) {
console.error("❌ 调用失败:", err.response?.data || err.message);
res.status(500).json({ code: 1, error: err.response?.data || err.message });
}
});
// 静态文件服务
app.use("/uploads", express.static("uploads"));
// 启动服务
app.listen(port, () => {
console.log(`✅ 服务器运行http://localhost:${port}`);
});