UNPKG

koishi-plugin-chatluna-qwen-adapter

Version:
600 lines (590 loc) 22.4 kB
var __defProp = Object.defineProperty; var __getOwnPropDesc = Object.getOwnPropertyDescriptor; var __getOwnPropNames = Object.getOwnPropertyNames; var __hasOwnProp = Object.prototype.hasOwnProperty; var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); var __commonJS = (cb, mod) => function __require() { return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports; }; var __export = (target, all) => { for (var name2 in all) __defProp(target, name2, { get: all[name2], enumerable: true }); }; var __copyProps = (to, from, except, desc) => { if (from && typeof from === "object" || typeof from === "function") { for (let key of __getOwnPropNames(from)) if (!__hasOwnProp.call(to, key) && key !== except) __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); } return to; }; var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); // src/locales/zh-CN.schema.yml var require_zh_CN_schema = __commonJS({ "src/locales/zh-CN.schema.yml"(exports2, module2) { module2.exports = { $inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["阿里云百炼的 API Key", "API 请求地址(可选)", "是否启用此配置"], $desc: "阿里云百炼的 API Key 列表。" }, additionalModels: { $desc: "额外模型列表。", $inner: { model: "模型名称", modelType: "模型类型", contextSize: "模型上下文大小", modelCapabilities: { $desc: "模型支持的能力", $inner: ["工具调用", "图片视觉输入"] } } } }, { $desc: "模型配置", maxContextRatio: "最大上下文使用比例(0~1),控制可用的模型上下文窗口大小的最大百分比。例如 0.35 表示最多使用模型上下文的 35%。", temperature: "回复的随机性程度,数值越高,回复越随机。", enableSearch: "是否启用模型自带夸克搜索功能。" }] }; } }); // src/locales/en-US.schema.yml var require_en_US_schema = __commonJS({ "src/locales/en-US.schema.yml"(exports2, module2) { module2.exports = { $inner: [{}, { $desc: "API Configuration", apiKeys: { $inner: ["Tongyi Qianwen API Key", "API Endpoint (optional)", "Enabled"], $desc: "Tongyi Qianwen API Keys" }, additionalModels: { $desc: "Additional models", $inner: { model: "Model name", modelType: "Model type", contextSize: "Context size", modelCapabilities: { $desc: "Model supported capabilities", $inner: ["Tool calling", "Visual image input"] } } } }, { $desc: "Model Parameters", maxContextRatio: "Maximum context usage ratio (0-1). Controls the maximum percentage of model context window available for use. For example, 0.35 means at most 35% of the model context can be used.", temperature: "Sampling temperature (higher values increase randomness)", enableSearch: "Enable built-in Quark search" }] }; } }); // src/index.ts var index_exports = {}; __export(index_exports, { Config: () => Config, apply: () => apply, inject: () => inject, name: () => name }); module.exports = __toCommonJS(index_exports); var import_chat = require("koishi-plugin-chatluna/services/chat"); var import_koishi = require("koishi"); // src/client.ts var import_client = require("koishi-plugin-chatluna/llm-core/platform/client"); var import_model = require("koishi-plugin-chatluna/llm-core/platform/model"); var import_types2 = require("koishi-plugin-chatluna/llm-core/platform/types"); var import_error2 = require("koishi-plugin-chatluna/utils/error"); // src/requester.ts var import_outputs = require("@langchain/core/outputs"); var import_error = require("koishi-plugin-chatluna/utils/error"); var import_sse = require("koishi-plugin-chatluna/utils/sse"); var import_logger = require("koishi-plugin-chatluna/utils/logger"); var import_api = require("koishi-plugin-chatluna/llm-core/platform/api"); // src/utils.ts var import_messages = require("@langchain/core/messages"); var import_zod_to_json_schema = require("zod-to-json-schema"); var import_string = require("koishi-plugin-chatluna/utils/string"); var import_v1_shared_adapter = require("@chatluna/v1-shared-adapter"); var import_types = require("@langchain/core/utils/types"); function formatToolsToQWenTools(tools) { if (tools.length < 1) { return void 0; } return tools.map(formatToolToQWenTool); } __name(formatToolsToQWenTools, "formatToolsToQWenTools"); function formatToolToQWenTool(tool) { const parameters = (0, import_v1_shared_adapter.removeAdditionalProperties)( (0, import_types.isZodSchemaV3)(tool.schema) ? (0, import_zod_to_json_schema.zodToJsonSchema)(tool.schema, { allowedAdditionalProperties: void 0 }) : tool.schema ); return { type: "function", function: { name: tool.name, description: tool.description, // any? // eslint-disable-next-line @typescript-eslint/no-explicit-any parameters } }; } __name(formatToolToQWenTool, "formatToolToQWenTool"); async function langchainMessageToQWenMessage(messages, plugin, model) { const result = []; for (const rawMessage of messages) { const role = messageTypeToQWenRole(rawMessage.getType()); const msg = { content: rawMessage.content || null, name: role === "assistant" || role === "tool" ? rawMessage.name : void 0, role, tool_call_id: rawMessage.tool_call_id }; if (rawMessage.getType() === "ai") { const toolCalls = rawMessage.tool_calls; if (Array.isArray(toolCalls) && toolCalls.length > 0) { msg.tool_calls = toolCalls.map((toolCall) => ({ id: toolCall.id, type: "function", function: { name: toolCall.name, arguments: JSON.stringify(toolCall.args) } })); } } if (msg.tool_calls == null) { delete msg.tool_calls; } if (msg.tool_call_id == null) { delete msg.tool_call_id; } if (msg.tool_calls) { for (const toolCall of msg.tool_calls) { const tool = toolCall.function; if (!tool.arguments) { continue; } tool.arguments = JSON.stringify(JSON.parse(tool.arguments)); } } const images = rawMessage.additional_kwargs.images; if ((model?.includes("qwen-vl") || model?.includes("omni") || model?.includes("qwen2.5-vl") || model?.includes("qwen2.5-omni") || model?.includes("qwen-omni") || model?.includes("qwen2-vl") || model?.includes("qvq")) && images != null) { msg.content = [ { type: "text", text: rawMessage.content } ]; for (const image of images) { msg.content.push({ type: "image_url", image_url: { url: image, detail: "low" } }); } } else if (Array.isArray(msg.content) && msg.content.length > 0) { msg.content = await Promise.all( msg.content.map(async (content) => { if (!(0, import_string.isMessageContentImageUrl)(content)) return content; try { const url = await (0, import_v1_shared_adapter.fetchImageUrl)(plugin, content); return { type: "image_url", image_url: { url, detail: "low" } }; } catch { return content; } }) ); } result.push(msg); } return result; } __name(langchainMessageToQWenMessage, "langchainMessageToQWenMessage"); function messageTypeToQWenRole(type) { switch (type) { case "system": return "system"; case "ai": return "assistant"; case "human": return "user"; case "function": return "function"; case "tool": return "tool"; default: throw new Error(`Unknown message type: ${type}`); } } __name(messageTypeToQWenRole, "messageTypeToQWenRole"); function convertDeltaToMessageChunk(delta, defaultRole) { const role = ((delta.role?.length ?? 0) > 0 ? delta.role : defaultRole).toLowerCase(); const content = delta.content ?? ""; const reasoningContent = delta.reasoning_content ?? ""; const additionalKwargs = {}; if (reasoningContent.length > 0) { additionalKwargs.reasoning_content = reasoningContent; } if (role === "user") { return new import_messages.HumanMessageChunk({ content }); } else if (role === "assistant") { const toolCallChunks = []; if (Array.isArray(delta.tool_calls)) { for (const rawToolCall of delta.tool_calls) { toolCallChunks.push({ name: rawToolCall.function?.name, args: rawToolCall.function?.arguments, id: rawToolCall.id, index: rawToolCall.index }); } } return new import_messages.AIMessageChunk({ content, tool_call_chunks: toolCallChunks, additional_kwargs: additionalKwargs }); } else if (role === "system") { return new import_messages.SystemMessageChunk({ content }); } else if (role === "function") { return new import_messages.FunctionMessageChunk({ content, additional_kwargs: additionalKwargs, name: delta.name }); } else if (role === "tool") { return new import_messages.ToolMessageChunk({ content, additional_kwargs: additionalKwargs, tool_call_id: delta.tool_call_id }); } else { return new import_messages.ChatMessageChunk({ content, role }); } } __name(convertDeltaToMessageChunk, "convertDeltaToMessageChunk"); // src/requester.ts var import_v1_shared_adapter2 = require("@chatluna/v1-shared-adapter"); var import_messages2 = require("@langchain/core/messages"); var import_koishi_plugin_chatluna = require("koishi-plugin-chatluna"); var QWenRequester = class extends import_api.ModelRequester { constructor(ctx, _configPool, _pluginConfig, _plugin) { super(ctx, _configPool, _pluginConfig, _plugin); this._pluginConfig = _pluginConfig; } static { __name(this, "QWenRequester"); } async *completionStreamInternal(params) { let model = params.model; let enabledThinking = null; if (model.includes("thinking")) { enabledThinking = !model.includes("-non-thinking"); model = model.replace("-non-thinking", "").replace("-thinking", ""); } else if (model.includes("default")) { enabledThinking = true; model = model.replace("-default", "-thinking"); } const requestParams = { model, messages: await langchainMessageToQWenMessage( params.input, this._plugin, model ), tools: params.tools != null && !params.model.includes("vl") ? formatToolsToQWenTools(params.tools) : void 0, stream: true, top_p: params.topP, temperature: params.temperature, enable_search: params.model.includes("vl") ? void 0 : this._pluginConfig.enableSearch, enabled_thinking: enabledThinking }; try { const response = await this.post( "chat/completions", requestParams, { signal: params.signal } ); let iterator; try { iterator = (0, import_sse.sseIterable)(response); } catch (e) { if (e instanceof import_error.ChatLunaError && e.message.includes("data_inspection_failed")) { throw new import_error.ChatLunaError( import_error.ChatLunaErrorCode.API_UNSAFE_CONTENT, e ); } throw e; } const defaultRole = "assistant"; let reasoningContent = ""; let isSetReasoningTime = false; let reasoningTime = 0; for await (const event of iterator) { const chunk = event.data; if (chunk === "[DONE]") { return; } let data; try { data = JSON.parse(chunk); } catch (err) { throw new import_error.ChatLunaError( import_error.ChatLunaErrorCode.API_REQUEST_FAILED, new Error( "error when calling qwen completion, Result: " + chunk ) ); } const choice = data.choices?.[0]; if (data.usage) { yield new import_outputs.ChatGenerationChunk({ message: new import_messages2.AIMessageChunk({ content: "", response_metadata: { tokenUsage: { promptTokens: data.usage.prompt_tokens, completionTokens: data.usage.completion_tokens, totalTokens: data.usage.total_tokens } } }), text: "" }); } if (!choice) { continue; } const delta = choice.delta; const messageChunk = convertDeltaToMessageChunk( delta, defaultRole ); if (delta.reasoning_content) { reasoningContent = reasoningContent + delta.reasoning_content; if (reasoningTime === 0) { reasoningTime = Date.now(); } } if ((delta.reasoning_content == null || delta.reasoning_content === "") && delta.content && delta.content.length > 0 && reasoningTime > 0 && !isSetReasoningTime) { reasoningTime = Date.now() - reasoningTime; messageChunk.additional_kwargs.reasoning_time = reasoningTime; isSetReasoningTime = true; } const generationChunk = new import_outputs.ChatGenerationChunk({ message: messageChunk, text: messageChunk.content }); yield generationChunk; if (choice.finish_reason === "stop") { break; } } if (reasoningContent.length > 0) { import_koishi_plugin_chatluna.logger.debug( "reasoningContent: " + reasoningContent + ", reasoningTime: " + reasoningTime / 1e3 + "s" ); } } catch (e) { if (this.ctx.chatluna.config.isLog) { await (0, import_logger.trackLogToLocal)( "Request", JSON.stringify(requestParams), this.logger ); } if (e instanceof import_error.ChatLunaError) { throw e; } else { throw new import_error.ChatLunaError(import_error.ChatLunaErrorCode.API_REQUEST_FAILED, e); } } } async embeddings(params) { const requestContext = (0, import_v1_shared_adapter2.createRequestContext)( this.ctx, this._config.value, this._pluginConfig, this._plugin, this ); return await (0, import_v1_shared_adapter2.createEmbeddings)(requestContext, params); } concatUrl(url) { return "https://dashscope.aliyuncs.com/compatible-mode/v1/" + url; } get logger() { return this.ctx.logger("chatluna-qwen-adapter"); } }; // src/client.ts var import_v1_shared_adapter3 = require("@chatluna/v1-shared-adapter"); var QWenClient = class extends import_client.PlatformModelAndEmbeddingsClient { constructor(ctx, _config, plugin) { super(ctx, plugin.platformConfigPool); this._config = _config; this.plugin = plugin; this._requester = new QWenRequester( ctx, plugin.platformConfigPool, _config, plugin ); } static { __name(this, "QWenClient"); } platform = "qwen"; _requester; async refreshModels() { const rawModels = [ ["qwen-turbo", 1e5], ["qwen-long", 1e6], ["qwen-plus", 131072], ["qwen-plus-character", 32768], ["qwen-max", 30720], ["qwen-max-latest", 131072], ["qwen3-max", 262144], ["qwen-plus-latest-non-thinking", 1e6], ["qwen-plus-latest-thinking", 1e6], ["qwen-turbo-latest-non-thinking", 1e6], ["qwen-turbo-latest-thinking", 1e6], ["qwen-flash", 1e6], ["qwen3-vl-plus-thinking", 262144], ["qwen3-vl-plus-non-thinking", 262144], ["qwen-vl-max", 131072], ["qwen-vl-max-latest", 131072], ["qwen-vl-plus", 131072], ["qwen-vl-plus-latest", 131072], ["qwen-vl-ocr", 34096], ["qwen-vl-ocr-latest", 34096], ["qwq-32b-preview", 30720], ["qvq-72b-preview", 30720], ["qwq-plus", 131072], ["qwq-plus-latest", 131072], ["qwen-omni-turbo", 32768], ["qwen-omni-turbo-latest", 32768], ["qwen-math-plus", 4e3], ["qwen-math-turbo", 4e3], ["qwen3-next-80b-a3b-default", 126976], ["qwen3-next-80b-a3b-instruct", 126024], ["qwen3-235b-a22b-default-2507", 131072], ["qwen3-235b-a22b-instruct-2507", 131072], ["qwen3-32b-thinking", 131072], ["qwen3-32b-non-thinking", 131072], ["qwen3-30b-a3b-thinking", 131072], ["qwen3-30b-a3b-non-thinking", 131072], ["qwen3-14b-thinking", 131072], ["qwen3-14b-non-thinking", 131072], ["qwen3-8b-thinking", 131072], ["qwen3-8b-non-thinking", 131072], ["qwen3-4b-thinking", 131072], ["qwen3-4b-non-thinking", 131072], ["qwen3-1.7b-thinking", 30720], ["qwen3-1.7b-non-thinking", 30720], ["qwen3-0.6b-thinking", 30720], ["qwen3-0.6b-non-thinking", 30720], ["qwen3-omni-flash-thinking", 65536], ["qwen3-omni-flash-non-thinking", 65536], ["qwen-omni-turbo", 32768], ["qwen-omni-latest", 32768], ["qwen3-vl-235b-a22b-default", 131072], ["qwen3-vl-235b-a22b-instruct", 131072], ["qwen2.5-vl-72b-instruct", 131072], ["qwen2.5-vl-32b-instruct", 129024], ["qwen2.5-vl-7b-instruct", 8192], ["qwen2.5-vl-3b-instruct", 8192], ["qwen-vl-v1", 8e3], ["Moonshot-Kimi-K2-Instruct", 131072], ["deepseek-r1", 131072], ["deepseek-v3", 65536], ["text-embedding-v1", 2048], ["text-embedding-v2", 2048], ["text-embedding-v3", 8192] ]; const additionalModels = this._config.additionalModels.map( ({ model, modelType, contextSize, modelCapabilities }) => ({ name: model, type: modelType === "Embeddings 嵌入模型" ? import_types2.ModelType.embeddings : import_types2.ModelType.llm, capabilities: modelCapabilities, maxTokens: contextSize ?? 4096 }) ); return rawModels.map(([model, token]) => { return { name: model, type: model.includes("embedding") ? import_types2.ModelType.embeddings : import_types2.ModelType.llm, maxTokens: token, capabilities: [ (model.includes("qwen-plus") || model.includes("qwen-max") || model.includes("qwen-turbo") || model.includes("qwen3") || model.includes("qwen2.5") || model.includes("omni") || model.includes("Kimi-K2") || model.includes("deepseek")) && import_types2.ModelCapabilities.ToolCall, (0, import_v1_shared_adapter3.supportImageInput)(model) && import_types2.ModelCapabilities.ImageInput ].filter(Boolean) }; }).concat(additionalModels); } _createModel(model) { const info = this._modelInfos[model]; if (info == null) { throw new import_error2.ChatLunaError(import_error2.ChatLunaErrorCode.MODEL_NOT_FOUND); } if (info.type === import_types2.ModelType.llm) { const modelMaxContextSize = info.maxTokens; return new import_model.ChatLunaChatModel({ modelInfo: info, requester: this._requester, model, modelMaxContextSize, maxTokenLimit: Math.floor( (info.maxTokens || modelMaxContextSize || 128e3) * this._config.maxContextRatio ), timeout: this._config.timeout, temperature: this._config.temperature, maxRetries: this._config.maxRetries, llmType: "qwen", isThinkModel: model.includes("reasoner") || model.includes("r1") || model.includes("thinking") || model.includes("qwq") }); } return new import_model.ChatLunaEmbeddings({ client: this._requester, model: info.name, batchSize: 5, maxRetries: this._config.maxRetries }); } }; // src/index.ts var import_types3 = require("koishi-plugin-chatluna/llm-core/platform/types"); function apply(ctx, config) { ctx.on("ready", async () => { const plugin = new import_chat.ChatLunaPlugin(ctx, config, "qwen"); plugin.parseConfig((config2) => { return config2.apiKeys.filter(([apiKey, enabled]) => { return apiKey.length > 0 && enabled; }).map(([apiKey]) => { return { apiKey, apiEndpoint: "", platform: "qwen", chatLimit: config2.chatTimeLimit, timeout: config2.timeout, maxRetries: config2.maxRetries, concurrentMaxSize: config2.chatConcurrentMaxSize }; }); }); plugin.registerClient(() => new QWenClient(ctx, config, plugin)); await plugin.initClient(); }); } __name(apply, "apply"); var Config = import_koishi.Schema.intersect([ import_chat.ChatLunaPlugin.Config, import_koishi.Schema.object({ apiKeys: import_koishi.Schema.array( import_koishi.Schema.tuple([ import_koishi.Schema.string().role("secret").default(""), import_koishi.Schema.boolean().default(true) ]) ).default([[]]).role("table"), additionalModels: import_koishi.Schema.array( import_koishi.Schema.object({ model: import_koishi.Schema.string(), modelType: import_koishi.Schema.union([ "LLM 大语言模型", "Embeddings 嵌入模型" ]).default("LLM 大语言模型"), modelCapabilities: import_koishi.Schema.array( import_koishi.Schema.union([ import_types3.ModelCapabilities.ToolCall, import_types3.ModelCapabilities.ImageInput ]) ).default([import_types3.ModelCapabilities.ToolCall]).role("checkbox"), contextSize: import_koishi.Schema.number().default(128e3) }) ).default([]).role("table") }), import_koishi.Schema.object({ maxContextRatio: import_koishi.Schema.number().min(0).max(1).step(1e-4).role("slider").default(0.35), temperature: import_koishi.Schema.percent().min(0).max(2).step(0.1).default(1), enableSearch: import_koishi.Schema.boolean().default(true) }) ]).i18n({ "zh-CN": require_zh_CN_schema(), "en-US": require_en_US_schema() // eslint-disable-next-line @typescript-eslint/no-explicit-any }); var inject = ["chatluna"]; var name = "chatluna-qwen-adapter"; // Annotate the CommonJS export names for ESM import in node: 0 && (module.exports = { Config, apply, inject, name });