esp-ai-plugin-llm-chatglm
Version:
chatglm
114 lines (109 loc) • 4.66 kB
JavaScript
const { OpenAI } = require('openai');
// 避免每个用户重复创建对象
const device_open_obj = {};
module.exports = {
// 插件名字
name: "esp-ai-plugin-llm-chatglm",
// 插件类型 LLM | TTS | IAT
type: "LLM",
main({ devLog, device_id, is_pre_connect, llm_config, text, llmServerErrorCb, llm_init_messages = [], llm_historys = [], cb, llm_params_set, logWSServer, connectServerBeforeCb, connectServerCb, log }) {
try {
const { api_key, model, ...other_config } = llm_config;
if (!api_key) return log.error(`请配给 chatglm 配置 api_key 参数。`);
if (!model) return log.error(`请配给 chatglm 配置 model 参数。`);
// 预先连接函数
async function preConnect() {
const params = {
apiKey: api_key,
baseURL: 'https://open.bigmodel.cn/api/paas/v4/',
model: model
};
const openai = new OpenAI(llm_params_set ? llm_params_set({ ...params }) : params);
await openai.chat.completions.create({
model: model,
messages: [{ "role": "user", "content": "测试" }],
stream: false,
});
}
if (is_pre_connect) {
preConnect();
return;
}
// 如果关闭后 message 还没有被关闭,需要定义一个标志控制
let shouldClose = false;
// 这个对象是固定写法,每个 TTS 都必须按这个结构定义
const texts = {
all_text: "",
count_text: "",
index: 0,
};
// 告诉框架要开始连接 LLM 服务了
connectServerBeforeCb();
let openai = device_open_obj[device_id];
if (!device_open_obj[device_id]) {
connectServerBeforeCb();
openai = new OpenAI({
apiKey: api_key,
baseURL: 'https://open.bigmodel.cn/api/paas/v4/',
});
device_open_obj[device_id] = openai;
}
async function main() {
try {
const stream = await openai.chat.completions.create({
messages: [
...llm_init_messages,
...llm_historys,
{
"role": "user",
"content": text
},
],
model: model,
top_p: 0.7,
temperature: 0.9,
stream: true,
});
connectServerCb(true);
logWSServer({
close: () => {
connectServerCb(false);
stream.controller.abort();
shouldClose = true;
}
});
for await (const part of stream) {
if (shouldClose) break;
const chunk_text = part.choices[0]?.delta?.content || '';
// console.log('LLM 输出 :', chunk_text);
devLog === 2 && log.llm_info('LLM 输出 :', chunk_text);
texts["count_text"] += chunk_text;
cb({ text, texts, chunk_text: chunk_text });
}
if (shouldClose) return;
cb({
text,
is_over: true,
texts,
shouldClose,
});
connectServerCb(false);
// devLog && log.llm_info('\n===\n', httpResponse, '\n===\n')
devLog && log.llm_info('===');
devLog && log.llm_info(texts["count_text"]);
devLog && log.llm_info('===');
devLog && log.llm_info('LLM connect close!\n');
} catch (error) {
console.log(error);
llmServerErrorCb("chatglmLLM 报错: " + error);
connectServerCb(false);
}
}
main();
} catch (err) {
console.log(err);
log.error("chatglm LLM 插件错误:", err);
connectServerCb(false);
}
}
};