UNPKG

koishi-plugin-chatluna-openai-adapter

Version:
249 lines (241 loc) 8.59 kB
var __defProp = Object.defineProperty; var __getOwnPropNames = Object.getOwnPropertyNames; var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); var __commonJS = (cb, mod) => function __require() { return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports; }; // src/locales/zh-CN.schema.yml var require_zh_CN_schema = __commonJS({ "src/locales/zh-CN.schema.yml"(exports, module) { module.exports = { $inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["OpenAI 的 API Key", "OpenAI API 的请求地址", "是否启用此配置"], $desc: "OpenAI 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxContextRatio: "最大上下文使用比例(0~1),控制可用的模型上下文窗口大小的最大百分比。例如 0.35 表示最多使用模型上下文的 35%。", temperature: "回复的随机性程度,数值越高,回复越随机。", presencePenalty: "重复惩罚系数,数值越高,越不易重复出现已出现过至少一次的 Token(范围:-2~2,步长:0.1)。", frequencyPenalty: "频率惩罚系数,数值越高,越不易重复出现次数较多的 Token(范围:-2~2,步长:0.1)。" }] }; } }); // src/locales/en-US.schema.yml var require_en_US_schema = __commonJS({ "src/locales/en-US.schema.yml"(exports, module) { module.exports = { $inner: [{}, { $desc: "API Configuration", apiKeys: { $inner: ["OpenAI API Key", "OpenAI API Endpoint", "Enabled"], $desc: "OpenAI API credentials" } }, { $desc: "Model Parameters", maxContextRatio: "Maximum context usage ratio (0-1). Controls the maximum percentage of model context window available for use. For example, 0.35 means at most 35% of the model context can be used.", temperature: "Sampling temperature (higher values increase randomness)", presencePenalty: "Token presence penalty (-2 to 2, step 0.1, discourages token repetition)", frequencyPenalty: "Token frequency penalty (-2 to 2, step 0.1, reduces frequent token repetition)" }] }; } }); // src/index.ts import { ChatLunaPlugin } from "koishi-plugin-chatluna/services/chat"; import { Schema } from "koishi"; // src/client.ts import { PlatformModelAndEmbeddingsClient } from "koishi-plugin-chatluna/llm-core/platform/client"; import { ChatLunaChatModel, ChatLunaEmbeddings } from "koishi-plugin-chatluna/llm-core/platform/model"; import { ModelCapabilities, ModelType } from "koishi-plugin-chatluna/llm-core/platform/types"; import { ChatLunaError as ChatLunaError2, ChatLunaErrorCode } from "koishi-plugin-chatluna/utils/error"; // src/requester.ts import { ModelRequester } from "koishi-plugin-chatluna/llm-core/platform/api"; import { completionStream, createEmbeddings, createRequestContext } from "@chatluna/v1-shared-adapter"; import { ChatLunaError } from "koishi-plugin-chatluna/utils/error"; var OpenAIRequester = class extends ModelRequester { static { __name(this, "OpenAIRequester"); } constructor(ctx, _configPool, _pluginConfig, _plugin) { super(ctx, _configPool, _pluginConfig, _plugin); } async *completionStreamInternal(params) { const requestContext = createRequestContext( this.ctx, this._config.value, this._pluginConfig, this._plugin, this ); yield* completionStream(requestContext, params); } async embeddings(params) { const requestContext = createRequestContext( this.ctx, this._config.value, this._pluginConfig, this._plugin, this ); return await createEmbeddings(requestContext, params); } async getModels(config) { let data; try { const response = await this.get( "models", {}, { signal: config?.signal } ); data = await response.text(); data = JSON.parse(data); return data.data.map((model) => model.id); } catch (e) { if (e instanceof ChatLunaError) { throw e; } const error = new Error( "error when listing openai models, Result: " + JSON.stringify(data) ); throw error; } } get logger() { return logger; } }; // src/client.ts import { getModelMaxContextSize, supportImageInput } from "@chatluna/v1-shared-adapter"; var OpenAIClient = class extends PlatformModelAndEmbeddingsClient { constructor(ctx, _config, plugin) { super(ctx, plugin.platformConfigPool); this._config = _config; this.plugin = plugin; this._requester = new OpenAIRequester( ctx, plugin.platformConfigPool, _config, plugin ); } static { __name(this, "OpenAIClient"); } platform = "openai"; _requester; get logger() { return logger; } async refreshModels(config) { try { const rawModels = await this._requester.getModels(config); return rawModels.filter( (model) => model.includes("gpt") || model.includes("text-embedding") || model.includes("o1") || model.includes("o3") || model.includes("o4") ).filter( (model) => !(model.includes("instruct") || [ "whisper", "tts", "dall-e", "audio", "realtime" ].some((keyword) => model.includes(keyword))) ).map((model) => { return { name: model, type: model.includes("embedding") ? ModelType.embeddings : ModelType.llm, capabilities: [ ModelCapabilities.ToolCall, supportImageInput(model) ? ModelCapabilities.ImageInput : void 0 ].filter(Boolean) }; }); } catch (e) { if (e instanceof ChatLunaError2) { throw e; } throw new ChatLunaError2(ChatLunaErrorCode.MODEL_INIT_ERROR, e); } } _createModel(model) { const info = this._modelInfos[model]; if (info == null) { throw new ChatLunaError2(ChatLunaErrorCode.MODEL_NOT_FOUND); } if (info.type === ModelType.llm) { const modelMaxContextSize = getModelMaxContextSize(info); return new ChatLunaChatModel({ modelInfo: info, requester: this._requester, model, maxTokenLimit: Math.floor( (info.maxTokens || modelMaxContextSize || 128e3) * this._config.maxContextRatio ), modelMaxContextSize, frequencyPenalty: this._config.frequencyPenalty, presencePenalty: this._config.presencePenalty, timeout: this._config.timeout, temperature: this._config.temperature, maxRetries: this._config.maxRetries, llmType: "openai" }); } return new ChatLunaEmbeddings({ client: this._requester, model, batchSize: 256, maxRetries: this._config.maxRetries }); } }; // src/index.ts import { createLogger } from "koishi-plugin-chatluna/utils/logger"; var logger; function apply(ctx, config) { logger = createLogger(ctx, "chatluna-openai-adapter"); ctx.on("ready", async () => { const plugin = new ChatLunaPlugin(ctx, config, "openai"); plugin.parseConfig((config2) => { return config2.apiKeys.filter(([apiKey, _, enabled]) => { return apiKey.length > 0 && enabled; }).map(([apiKey, apiEndpoint]) => { return { apiKey, apiEndpoint, platform: "openai", chatLimit: config2.chatTimeLimit, timeout: config2.timeout, maxRetries: config2.maxRetries, concurrentMaxSize: config2.chatConcurrentMaxSize }; }); }); plugin.registerClient(() => new OpenAIClient(ctx, config, plugin)); await plugin.initClient(); }); } __name(apply, "apply"); var Config2 = Schema.intersect([ ChatLunaPlugin.Config, Schema.object({ apiKeys: Schema.array( Schema.tuple([ Schema.string().role("secret").default(""), Schema.string().default("https://api.openai.com/v1"), Schema.boolean().default(true) ]) ).default([[]]).role("table") }), Schema.object({ maxContextRatio: Schema.number().min(0).max(1).step(1e-4).role("slider").default(0.35), temperature: Schema.percent().min(0).max(2).step(0.1).default(1), presencePenalty: Schema.number().min(-2).max(2).step(0.1).default(0), frequencyPenalty: Schema.number().min(-2).max(2).step(0.1).default(0) }) ]).i18n({ "zh-CN": require_zh_CN_schema(), "en-US": require_en_US_schema() // eslint-disable-next-line @typescript-eslint/no-explicit-any }); var inject = ["chatluna"]; var name = "chatluna-openai-adapter"; export { Config2 as Config, apply, inject, logger, name };