UNPKG

@h-lumos/llm-sdk

Version:
1,799 lines (1,798 loc) 50.9 kB
import F, { OpenAIError as _, APIError as h } from "openai"; import { APIConnectionError as yt, APIConnectionTimeoutError as bt, APIError as wt, APIUserAbortError as vt, AuthenticationError as xt, BadRequestError as Et, ConflictError as Ct, InternalServerError as St, NotFoundError as At, default as Rt, OpenAIError as It, PermissionDeniedError as kt, RateLimitError as Pt, UnprocessableEntityError as Tt } from "openai"; import { APIClient as v } from "openai/core"; import { Stream as y, _iterSSEMessages as q } from "openai/streaming"; import { randomUUID as z } from "crypto"; import { Page as ne } from "openai/pagination"; import { createHmac as H } from "node:crypto"; import { toFile as k } from "openai/uploads"; let f = class { constructor(e) { this._client = e; } }; const Ge = (a) => a instanceof Error ? a : new Error(a); function G(a) { return Array.isArray(a) ? a : [a]; } function $(a, e) { return Math.floor(Math.random() * (e - a + 1)) + a; } let re = class w extends f { constructor() { super(...arguments), this.endpoints = { "ernie-bot": "/chat/completions", "ernie-bot-turbo": "/chat/eb-instant", "ernie-bot-4": "/chat/completions_pro", "ernie-bot-8k": "/chat/ernie_bot_8k" }; } async create(e, t) { var l; const { model: s = "ernie-bot", ...n } = w.buildCreateParams(e), r = this.endpoints[s]; if (!r) throw new _(`Invalid model: ${s}`); const o = n.stream, i = { ...t == null ? void 0 : t.headers, // Note: 如果是 stream 的话,需要设置 Accept 为 text/event-stream Accept: o ? "text/event-stream" : "application/json" }, c = await this._client.post(r, { ...t, body: n, headers: i, // 文心一言的响应内容被包裹了一层,需要解构并转换为 OpenAI 的格式 // 设置 __binaryResponse 为 true, 是为了让 client 返回原始的 response stream: !1, __binaryResponse: !0 }); if (o) { const u = new AbortController(); return (l = t == null ? void 0 : t.signal) == null || l.addEventListener("abort", () => { u.abort(); }), w.fromOpenAIStream(s, y.fromSSEResponse(c, u), u); } return w.fromResponse(s, await c.json()); } static buildCreateParams(e) { const { messages: t = [], presence_penalty: s, user: n, stop: r, ...o } = e, i = t[0], c = i && i.role === "system" ? i.content : void 0; c && t.splice(0, 1); const l = { ...o, messages: t }; return c && (l.system = c), n && (l.user_id = n), s && (l.penalty_score = s), r && (l.stop = G(r)), l; } static fromResponse(e, t) { w.assert(t); const s = t.result, n = { index: 0, message: { role: "assistant", content: s.result }, logprobs: null, finish_reason: "stop" }; return s.is_end ? n.finish_reason = "stop" : s.is_truncated ? n.finish_reason = "length" : s.need_clear_history && (n.finish_reason = "content_filter"), { id: s.id, model: e, choices: [n], created: parseInt(s.created, 10), object: "chat.completion", usage: s.usage }; } static fromOpenAIStream(e, t, s) { async function* n() { for await (const r of t) { w.assert(r); const o = r.result, i = { index: 0, delta: { role: "assistant", content: o.result || "" }, finish_reason: null }; o.is_end ? i.finish_reason = "stop" : o.is_truncated ? i.finish_reason = "length" : o.need_clear_history && (i.finish_reason = "content_filter"), yield { id: o.id, model: e, choices: [i], object: "chat.completion.chunk", created: parseInt(o.created, 10), // openai-node 上 已经有讨论添加 usage 的问题 // 文心一言是有提供的,这里主要是为了向前兼容 usage: o.usage }; } } return new y(n, s); } /** * 构建错误 * * @param code - * @param message - * @returns 错误 */ static makeAPIError(e, t) { const s = { code: e, message: t }; switch (e) { case 2: return h.generate(500, s, t, {}); case 6: case 111: return h.generate(403, s, t, {}); case 17: case 18: case 19: case 40407: return h.generate(429, s, t, {}); case 110: case 40401: return h.generate(401, s, t, {}); case 336003: return h.generate(400, s, t, {}); case 336100: return h.generate(500, s, t, {}); default: return h.generate(void 0, s, t, {}); } } /** * 如果 code 不为 0,抛出 APIError * * @param code - * @param message - */ static assert(e) { if (e.errorCode !== 0) throw w.makeAPIError(e.errorCode, e.errorMsg); } }, oe = class extends f { constructor() { super(...arguments), this.completions = new re(this._client); } }, ae = class N extends f { constructor() { super(...arguments), this.endpoints = { "ernie-text-embedding": "/embeddings/embedding-v1" }; } /** * 创建文本嵌入向量 * * @param params 请求参数,包含模型、用户信息和输入文本 * @param options 可选请求配置 * @returns 包含嵌入向量的Promise对象 * * @see 官方文档 https://cloud.baidu.com/doc/WENXINWORKSHOP/s/alj562vvu */ async create(e, t) { const { model: s, user: n, input: r } = e, o = this.endpoints[s]; if (!o) throw new _(`Invalid model: ${s}`); const i = { input: r, user_id: n }, c = await this._client.post(o, { body: i, ...t, __binaryResponse: !0 // 标识需要原始响应对象 }); return N.fromResponse(s, await c.json()); } /** * 将百度API响应格式转换为OpenAI兼容格式 * @param model - 使用的嵌入模型名称 * @param data - 来自百度API的原始响应数据 * @returns OpenAI标准格式的嵌入响应 */ static fromResponse(e, t) { N.assert(t); const { result: s } = t; return { data: s.data, // 嵌入向量数据 model: e, // 使用的模型名称 object: "list", // 固定返回列表类型 usage: s.usage // API用量统计信息 }; } /** * 如果 code 不为 0,抛出 APIError * * @param code - * @param message - */ static assert(e) { if (e.errorCode === 0) return; const t = { code: e.errorCode, message: e.errorMsg }; throw h.generate(void 0, t, void 0, void 0); } }; class We extends v { /** * 初始化SDK客户端实例 * @param options 配置选项,包含API密钥、基础URL等参数 */ constructor(e = {}) { const { apiKey: t = process.env.EB_API_KEY || "", // 从环境变量EB_API_KEY获取或置空 baseURL: s = "https://aistudio.baidu.com/llm/lmapi/v1", // 默认API地址 timeout: n = 3e4, // 30秒请求超时 fetch: r = globalThis.fetch, // 使用全局fetch方法 httpAgent: o = void 0, // 禁用HTTP代理 ...i // 其他配置参数 } = e; super({ baseURL: s, timeout: n, fetch: r, httpAgent: o, ...i // 合并剩余配置参数 }), this.chat = new oe(this), this.embeddings = new ae(this), this._options = e, this.apiKey = t; } authHeaders() { return { Authorization: `token ${this.apiKey}` }; } defaultHeaders(e) { return { ...super.defaultHeaders(e), ...this._options.defaultHeaders }; } defaultQuery() { return this._options.defaultQuery; } } let L = class { constructor(e) { this._client = e; } }, ie = class extends L { async create(e, t) { var c; const { stream: s, model: n } = e, r = this.buildCreateParams(e), o = `/models/${n}:generateContent`, i = await this._client.post(o, { ...t, query: s ? { alt: "sse" } : {}, body: r, stream: !1, __binaryResponse: !0 }); if (s) { const l = new AbortController(); return (c = t == null ? void 0 : t.signal) == null || c.addEventListener("abort", () => { l.abort(); }), this.afterSSEResponse(n, i, l); } return this.afterResponse(n, i); } buildCreateParams(e) { const { messages: t = [], max_tokens: s, top_p: n, top_k: r, stop: o, temperature: i } = e; function c(d) { const m = []; if (typeof d == "string") return m.push({ text: d }), m; for (const g of d) g.type === "text" && m.push({ text: g.text }); return m; } function l(d) { return d === "user" ? "user" : "model"; } const u = {}, p = { contents: t.map((d) => ({ role: l(d.role), parts: c(d.content) })), generationConfig: u }; return i != null && (u.temperature = i), r != null && (u.topK = r), n != null && (u.topP = n), o != null && (u.stopSequences = G(o)), s != null && (u.maxOutputTokens = s), p; } async afterResponse(e, t) { const n = (await t.json()).candidates.map((r) => { const [o] = r.content.parts, i = { index: r.index, message: { role: "assistant", content: o.text }, logprobs: null, finish_reason: "stop" }; switch (r.finishReason) { case "MAX_TOKENS": i.finish_reason = "length"; break; case "SAFETY": case "RECITATION": i.finish_reason = "content_filter"; break; default: i.finish_reason = "stop"; } return i; }); return { id: z(), model: e, choices: n, object: "chat.completion", created: Date.now() / 10, // TODO 需要支持 usage usage: { completion_tokens: 0, prompt_tokens: 0, total_tokens: 0 } }; } afterSSEResponse(e, t, s) { const n = y.fromSSEResponse(t, s), r = (i) => i.candidates.map((c) => { const [l] = c.content.parts, u = { index: c.index, delta: { role: "assistant", content: l.text || "" }, finish_reason: null }; switch (c.finishReason) { case "MAX_TOKENS": u.finish_reason = "length"; break; case "SAFETY": case "RECITATION": u.finish_reason = "content_filter"; break; default: u.finish_reason = "stop"; } return u; }); async function* o() { for await (const i of n) yield { id: z(), model: e, choices: r(i), object: "chat.completion.chunk", created: Date.now() / 10 }; } return new y(o, s); } }, ce = class extends L { constructor() { super(...arguments), this.completions = new ie(this._client); } }; class D extends L { /** * Retrieves a model instance, providing basic information about the model such as * the owner and permissioning. */ async retrieve(e, t) { return { id: (await this._client.get(`/models/${e}`, t)).name, created: 0, object: "model", owned_by: "google" }; } /** * Lists the currently available models, and provides basic information about each * one such as the owner and availability. */ list(e) { return this._client.getAPIList("/models", le, e); } } class le extends ne { constructor(e, t, s, n) { const r = s.models.map((o) => ({ id: o.name, created: 0, object: "model", owned_by: "google" })); super(e, t, { data: r, object: "list" }, n); } } ((a) => { a.Model = F.Models.Model, a.ModelsPage = F.Models.ModelsPage; })(D || (D = {})); const ue = "https://generativelanguage.googleapis.com/v1"; class Ze extends v { constructor(e = {}) { const { apiKey: t = process.env.GEMINI_API_KEY || "", baseURL: s = process.env.GEMINI_BASE_URL || ue, timeout: n = 3e4, fetch: r = globalThis.fetch, httpAgent: o = void 0, ...i } = e; super({ baseURL: s, timeout: n, fetch: r, httpAgent: o, ...i }), this.chat = new ce(this), this.models = new D(this), this._options = e, this.apiKey = t; } defaultHeaders(e) { return { ...super.defaultHeaders(e), ...this._options.defaultHeaders }; } defaultQuery() { return { ...this._options.defaultQuery, key: this.apiKey }; } } let J = class { constructor(e) { this._client = e; } }, de = class j extends J { async create(e, t) { var g; const s = this._client, { model: n, messages: r, temperature: o = 0.8, top_p: i, stream: c } = e, l = Math.floor(Date.now() / 1e3), u = { app_id: s.appId, secret_id: s.secretId, timestamp: l, expired: l + 7200, temperature: o, top_p: i, stream: c ? 1 : 0, messages: r }, p = "/chat/completions", d = s.generateAuthorization(p, u), m = await this._client.post(p, { ...t, body: u, headers: { ...t == null ? void 0 : t.headers, Authorization: d }, stream: !1, __binaryResponse: !0 }); if (e.stream) { const b = new AbortController(); return (g = t == null ? void 0 : t.signal) == null || g.addEventListener("abort", () => { b.abort(); }), j.fromSSEResponse(n, y.fromSSEResponse(m, b), b); } return j.fromResponse(n, await m.json()); } static fromSSEResponse(e, t, s) { async function* n() { for await (const r of t) { if (r.error) throw new h(void 0, r.error, void 0, void 0); const i = { index: 0, delta: { role: "assistant", content: r.choices[0].delta.content || "" }, finish_reason: null }; yield { id: r.id, model: e, choices: [i], object: "chat.completion.chunk", created: parseInt(r.created, 10) }; } } return new y(n, s); } static fromResponse(e, t) { if (t.error) throw new h(void 0, t.error, void 0, void 0); const s = t.choices[0], n = { index: 0, message: { role: "assistant", content: s.messages.content }, logprobs: null, finish_reason: s.finish_reason }; return { id: t.id, model: e, choices: [n], created: parseInt(t.created), object: "chat.completion", usage: t.usage }; } hash(e) { return H("sha1", this._client.secretKey).update(Buffer.from(e, "utf8")).digest("base64"); } }, he = class extends J { constructor() { super(...arguments), this.completions = new de(this._client); } }; class st extends v { constructor(e = {}) { const { appId: t = process.env.HUNYUAN_APP_ID || "", secretId: s = process.env.HUNYUAN_SECRET_ID || "", secretKey: n = process.env.HUNYUAN_SECRET_KEY || "", baseURL: r = "https://hunyuan.cloud.tencent.com/hyllm/v1", timeout: o = 3e4, fetch: i = globalThis.fetch, httpAgent: c = void 0, ...l } = e; super({ baseURL: r, timeout: o, fetch: i, httpAgent: c, ...l }), this.chat = new he(this), this._options = e, this.appId = parseInt(t, 10), this.secretKey = n, this.secretId = s; } defaultHeaders(e) { return { ...super.defaultHeaders(e), ...this._options.defaultHeaders }; } defaultQuery() { return this._options.defaultQuery; } generateAuthorization(e, t) { const s = this.buildURL(e, {}).replace("https://", ""), n = []; return Object.keys(t).sort().forEach((r) => { const o = t[r]; o != null && (typeof o == "object" ? n.push(`${r}=${JSON.stringify(o)}`) : n.push(`${r}=${o}`)); }), this.hash(`${s}?${n.join("&")}`); } hash(e) { return H("sha1", this.secretKey).update(Buffer.from(e, "utf8")).digest("base64"); } } async function* pe(a, e, t) { if (!a.body) throw t.abort(), new _("Attempted to iterate over a response with no body"); const s = new U(), n = ge(a.body); for await (const r of n) for (const o of s.decode(r)) { const i = e.decode(o); i && (yield i); } for (const r of s.flush()) { const o = e.decode(r); o && (yield o); } } class fe { constructor() { this.event = null, this.data = [], this.chunks = []; } decode(e) { if (e.endsWith("\r") && (e = e.substring(0, e.length - 1)), !e) { if (!this.event && !this.data.length) return null; const r = { event: this.event, data: this.data.join(` `), raw: this.chunks }; return this.event = null, this.data = [], this.chunks = [], r; } if (this.chunks.push(e), e.startsWith(":")) return null; let [t, s, n] = me(e, ":"); return n.startsWith(" ") && (n = n.substring(1)), t === "event" ? this.event = n : t === "data" && this.data.push(n), null; } } const C = class C { // TextDecoder found in browsers; not typed to avoid pulling in either "dom" or "node" types. constructor() { this.buffer = [], this.trailingCR = !1; } decode(e) { let t = this.decodeText(e); if (this.trailingCR && (t = "\r" + t, this.trailingCR = !1), t.endsWith("\r") && (this.trailingCR = !0, t = t.slice(0, -1)), !t) return []; const s = C.NEWLINE_CHARS.has(t[t.length - 1] || ""); let n = t.split(C.NEWLINE_REGEXP); return n.length === 1 && !s ? (this.buffer.push(n[0]), []) : (this.buffer.length > 0 && (n = [this.buffer.join("") + n[0], ...n.slice(1)], this.buffer = []), s || (this.buffer = [n.pop() || ""]), n); } decodeText(e) { if (e == null) return ""; if (typeof e == "string") return e; if (typeof Buffer < "u") { if (e instanceof Buffer) return e.toString(); if (e instanceof Uint8Array) return Buffer.from(e).toString(); throw new _( `Unexpected: received non-Uint8Array (${e.constructor.name}) stream chunk in an environment with a global "Buffer" defined, which this library assumes to be Node. Please report this error.` ); } if (typeof TextDecoder < "u") { if (e instanceof Uint8Array || e instanceof ArrayBuffer) return this.textDecoder ?? (this.textDecoder = new TextDecoder("utf8")), this.textDecoder.decode(e); throw new _( `Unexpected: received non-Uint8Array/ArrayBuffer (${e.constructor.name}) in a web platform. Please report this error.` ); } throw new _( "Unexpected: neither Buffer nor TextDecoder are available as globals. Please report this error." ); } flush() { if (!this.buffer.length && !this.trailingCR) return []; const e = [this.buffer.join("")]; return this.buffer = [], this.trailingCR = !1, e; } }; C.NEWLINE_CHARS = /* @__PURE__ */ new Set([` `, "\r", "\v", "\f", "", "", "", "…", "\u2028", "\u2029"]), C.NEWLINE_REGEXP = /\r\n|[\n\r\x0b\x0c\x1c\x1d\x1e\x85\u2028\u2029]/g; let U = C; function me(a, e) { const t = a.indexOf(e); return t !== -1 ? [a.substring(0, t), e, a.substring(t + e.length)] : [a, "", ""]; } function ge(a) { if (a[Symbol.asyncIterator]) return a; const e = a.getReader(); return { async next() { try { const t = await e.read(); return t != null && t.done && e.releaseLock(), t; } catch (t) { throw e.releaseLock(), t; } }, async return() { const t = e.cancel(); return e.releaseLock(), await t, { done: !0, value: void 0 }; }, [Symbol.asyncIterator]() { return this; } }; } function B(a) { if (a.base_resp.status_code === 0) return; const e = { code: a.base_resp.status_code, message: a.base_resp.status_msg }; throw new h(void 0, e, void 0, void 0); } let _e = class K extends f { constructor() { super(...arguments), this.resources = { "abab5-chat": { model: "abab5-chat", endpoint: "/text/chatcompletion" }, "abab5.5-chat": { model: "abab5.5-chat", endpoint: "/text/chatcompletion" }, "abab5.5-chat-pro": { model: "abab5.5-chat", endpoint: "/text/chatcompletion_pro" } }, this.system = "MM智能助理是一款由MiniMax自研的,没有调用其他产品的接口的大型语言模型。MiniMax是一家中国科技公司,一直致力于进行大模型相关的研究。"; } async create(e, t) { var o; const s = this.resources[e.model]; if (!s) throw new _(`Invalid model: ${e.model}`); const n = this.buildCreateParams(e), r = await this._client.post(s.endpoint, { ...t, body: { ...n, model: s.model }, stream: !1, __binaryResponse: !0 }); if (n.stream) { const i = new AbortController(); return (o = t == null ? void 0 : t.signal) == null || o.addEventListener("abort", () => { i.abort(); }), K.fromSSEResponse(e.model, r, i); } return K.fromResponse(e.model, await r.json()); } buildCreateParams(e) { const { model: t, messages: s = [], max_tokens: n, ...r } = e, o = { model: t, messages: [], ...r }; n && (o.tokens_to_generate = n); const i = s[0], c = i && i.role === "system" ? i.content : null; return c && s.splice(0, 1), t === "abab5.5-chat-pro" ? (o.bot_setting = [ { bot_name: "MM智能助理", content: c || this.system } ], o.reply_constraints = { sender_type: "BOT", sender_name: "MM智能助理" }) : (o.role_meta = { bot_name: "MM智能助理", user_name: "用户" }, o.prompt = c || this.system), o.messages = s.map((l) => { switch (l.role) { case "assistant": return { sender_type: "BOT", text: l.content }; default: { const u = { sender_type: "USER", text: l.content }; return t == "abab5.5-chat-pro" && (u.sender_name = "用户"), u; } } }), e.stream && (o.use_standard_sse = !0), o; } static fromResponse(e, t) { return B(t), { id: t.id, model: t.model, choices: t.choices.map((s, n) => { const { finish_reason: r } = s; return e === "abab5.5-chat-pro" ? { index: n, message: { role: "assistant", content: s.messages[0].text }, logprobs: null, finish_reason: r } : { index: n, message: { role: "assistant", content: s.text }, logprobs: null, finish_reason: r }; }), created: t.created, object: "chat.completion", usage: t.usage }; } static fromSSEResponse(e, t, s) { let n = !1; const r = new fe(); function o(c) { return { id: c.request_id, model: e, choices: c.choices.map((l, u) => { const { finish_reason: p = null } = l; if (e === "abab5.5-chat-pro") { const d = l.messages[0].text; return { index: u, delta: { role: "assistant", content: p === "stop" ? "" : d }, finish_reason: p }; } return { index: u, delta: { role: "assistant", content: l.delta }, finish_reason: p }; }), object: "chat.completion.chunk", created: c.created }; } async function* i() { if (n) throw new Error("Cannot iterate over a consumed stream, use `.tee()` to split the stream."); n = !0; let c = !1; try { for await (const l of pe(t, r, s)) if (!c) { if (l.data.startsWith("[DONE]")) { c = !0; continue; } if (l.event === null) { let u; try { u = JSON.parse(l.data); } catch (p) { throw console.error("Could not parse message into JSON:", l.data), console.error("From chunk:", l.raw), p; } if (u && u.code) throw new h(void 0, u, void 0, void 0); yield o(u); } } c = !0; } catch (l) { if (l instanceof Error && l.name === "AbortError") return; throw l; } finally { c || s.abort(); } } return new y(i, s); } }, ye = class extends f { constructor() { super(...arguments), this.completions = new _e(this._client); } }; class be extends f { constructor() { super(...arguments), this.resources = { "speech-01": { model: "speech-01", endpoint: "/text_to_speech", resposne_type: "binary" }, "speech-01-pro": { model: "speech-01", endpoint: "/t2a_pro", resposne_type: "json" } // Note: 返回的是 SSE 流数据 // 'speech-01-stream': { // model: 'speech-01', // endpoint: '/tts/stream', // resposne_type: 'stream', // }, }; } async create(e, t) { const { input: s, voice: n, ...r } = e, o = this.resources[e.model]; if (!o) throw new _(`Invalid model: ${e.model}`); const i = { ...r, text: s, model: o.model }; n && (i.voice_id = n); const c = await this._client.post(o.endpoint, { ...t, body: i, __binaryResponse: !0 }); return t != null && t.__binaryResponse || o.resposne_type === "binary" || o.resposne_type === "stream" ? c : c.json().then((l) => (B(l), fetch(l.audio_file))); } } class we extends f { constructor() { super(...arguments), this.speech = new be(this._client); } } let ve = class extends f { /** * Creates an embedding vector representing the input text. * * See https://api.minimax.chat/document/guides/Embeddings */ async create(e, t) { const { model: s, input: n, type: r = "query" } = e, i = await (await this._client.post("/embeddings", { body: { model: s, texts: n, type: r }, ...t, __binaryResponse: !0 })).json(); return B(i), { data: i.vectors.map((c, l) => ({ embedding: c, index: l, object: "embedding" })), model: s, object: "list", usage: { prompt_tokens: i.total_tokens, total_tokens: i.total_tokens } }; } }; class ot extends v { constructor(e = {}) { const { orgId: t = process.env.MINIMAX_API_ORG || "", apiKey: s = process.env.MINIMAX_API_KEY || "", baseURL: n = "https://api.minimax.chat/v1", timeout: r = 3e4, fetch: o = globalThis.fetch, httpAgent: i = void 0, ...c } = e; super({ baseURL: n, timeout: r, fetch: o, httpAgent: i, ...c }), this.audio = new we(this), this.chat = new ye(this), this.embeddings = new ve(this), this._options = e, this.apiKey = s, this.orgId = t; } authHeaders() { return { Authorization: `Bearer ${this.apiKey}` }; } defaultHeaders(e) { return { ...super.defaultHeaders(e), ...this._options.defaultHeaders }; } defaultQuery() { return { GroupId: this.orgId, ...this._options.defaultQuery }; } } function W(a) { return a.startsWith("qwen-vl"); } function Q(a) { return W(a) ? "/services/aigc/multimodal-generation/generation" : "/services/aigc/text-generation/generation"; } function xe(a) { const { model: e, prompt: t, response_format: s, stream_options: n, ...r } = a, o = { model: e, input: { prompt: t }, parameters: r }; if (s && s.type && (o.parameters.result_format = s.type), a.stream) { const { incremental_output: i } = n || {}; o.parameters.incremental_output = i ?? !0; } return o; } function Ee(a, e) { return a === "null" || !a ? e ? null : "stop" : a; } function X(a) { const { output_tokens: e, input_tokens: t, total_tokens: s = e + t } = a; return { completion_tokens: e, prompt_tokens: t, total_tokens: s }; } function V(a, e, t) { const { model: s } = a, { output: n, usage: r } = e, o = { index: 0, text: n.text, logprobs: null, finish_reason: Ee(n.finish_reason, t) }; return { id: e.request_id, model: s, choices: [o], created: Math.floor(Date.now() / 1e3), object: "text_completion", usage: X(r) }; } function Ce(a, e, t) { let s = !1; async function* n() { if (s) throw new Error("Cannot iterate over a consumed stream, use `.tee()` to split the stream."); s = !0; let r = !1; try { for await (const o of q(e, t)) if (!r) { if (o.data.startsWith("[DONE]")) { r = !0; continue; } if (o.event === "result") { let i; try { i = JSON.parse(o.data); } catch (c) { throw console.error("Could not parse message into JSON:", o.data), console.error("From chunk:", o.raw), c; } if (i && i.code) throw new h(void 0, i, void 0, void 0); yield V(a, i, !0); } } r = !0; } catch (o) { if (o instanceof Error && o.name === "AbortError") return; throw o; } finally { r || t.abort(); } } return new y(n, t); } function Se(a) { return a.map((e) => (Array.isArray(e.content) ? e.content.forEach((t) => { t.type === "image_url" && (t.image = t.image_url.url, delete t.image_url), delete t.type; }) : e.content = [ // @ts-expect-error { text: e.content } ], e)); } function Ae(a) { return a.map((e) => { if (Array.isArray(e.content)) { const t = e.content.find((s) => s.type === "text"); return { role: e.role, content: t.text }; } return e; }); } function Re(a) { const { model: e, messages: t, raw: s, response_format: n, stream_options: r = {}, ...o } = a, i = { model: e, input: { messages: [] }, parameters: o }; if (s === !0 ? i.input.messages = t : W(e) ? i.input.messages = Se(t) : i.input.messages = Ae(t), a.tools) i.parameters.result_format = "message"; else if (n && n.type && (i.parameters.result_format = n.type), a.stream) { const c = (r == null ? void 0 : r.incremental_output) ?? !0; i.parameters.incremental_output = c; } return i; } function P(a, e) { return a === "null" || !a ? e ? null : "stop" : a; } function Ie(a, e) { const { model: t } = a, { output: s, usage: n } = e, r = { index: 0, message: { role: "assistant", content: "" }, logprobs: null, finish_reason: "stop" }; if (s.choices) { const { message: o, finish_reason: i } = s.choices[0]; r.message = { role: o.role, content: o.content }, i === "tool_calls" ? (r.finish_reason = "tool_calls", r.message.tool_calls = o.tool_calls) : r.finish_reason = P(i, !0); } else r.message.content = s.text, r.finish_reason = P(s.finish_reason); return { id: e.request_id, model: t, choices: [r], created: Math.floor(Date.now() / 1e3), object: "chat.completion", usage: X(n) }; } function ke(a, e) { const t = e.output, s = { index: 0, delta: { role: "assistant", content: "" }, finish_reason: null }; if (t.choices) { const { message: n, finish_reason: r } = t.choices[0]; s.delta = { role: n.role, content: n.content }, r === "tool_calls" ? (s.finish_reason = "tool_calls", s.delta.tool_calls = n.tool_calls) : s.finish_reason = P(r, !0); } else s.delta.content = t.text, s.finish_reason = P(t.finish_reason, !0); return { id: e.request_id, model: a.model, choices: [s], object: "chat.completion.chunk", created: Math.floor(Date.now() / 1e3) }; } function Pe(a, e, t) { let s = !1; async function* n() { if (s) throw new Error("Cannot iterate over a consumed stream, use `.tee()` to split the stream."); s = !0; let r = !1; try { for await (const o of q(e, t)) if (!r) { if (o.data.startsWith("[DONE]")) { r = !0; continue; } if (o.event === "result") { let i; try { i = JSON.parse(o.data); } catch (c) { throw console.error("Could not parse message into JSON:", o.data), console.error("From chunk:", o.raw), c; } if (i && i.code) throw new h(void 0, i, void 0, void 0); yield ke(a, i); } } r = !0; } catch (o) { if (o instanceof Error && o.name === "AbortError") return; throw o; } finally { r || t.abort(); } } return new y(n, t); } let Te = class extends f { async create(e, t) { var i; const s = { ...t == null ? void 0 : t.headers }; e.stream && (s.Accept = "text/event-stream"); const n = Q(e.model), r = Re(e), o = await this._client.post(n, { ...t, body: r, headers: s, // 通义千问的响应内容被包裹了一层,需要解构并转换为 OpenAI 的格式 // 设置 __binaryResponse 为 true, 是为了让 client 返回原始的 response stream: !1, __binaryResponse: !0 }); if (e.stream) { const c = new AbortController(); return (i = t == null ? void 0 : t.signal) == null || i.addEventListener("abort", () => { c.abort(); }), Pe(r, o, c); } return Ie(r, await o.json()); } }, Z = class extends f { constructor() { super(...arguments), this.completions = new Te(this._client); } }, ee = class extends f { async create(e, t) { var i; const s = { ...t == null ? void 0 : t.headers }; e.stream && (s.Accept = "text/event-stream"); const n = Q(e.model), r = xe(e), o = await this._client.post(n, { ...t, body: r, headers: s, // 通义千问的响应内容被包裹了一层,需要解构并转换为 OpenAI 的格式 // 设置 __binaryResponse 为 true, 是为了让 client 返回原始的 response stream: !1, __binaryResponse: !0 }); if (e.stream) { const c = new AbortController(); return (i = t == null ? void 0 : t.signal) == null || i.addEventListener("abort", () => { c.abort(); }), Ce(r, o, c); } return V(r, await o.json()); } }, te = class extends f { /** * Creates an image given a prompt. */ async generate(e, t = {}) { const s = this._client, { headers: n, ...r } = t, { model: o = "wanx-v1", prompt: i, n: c = 1, cfg: l, ...u } = e, p = await s.post("/services/aigc/text2image/image-synthesis", { ...r, headers: { "X-DashScope-Async": "enable", ...n }, body: { model: o, input: { prompt: i }, parameters: { ...u, scale: l, n: c } }, __binaryResponse: !0 }).then((d) => d.json()).then((d) => d.output.task_id); return this.waitTask(p, t).then((d) => ({ created: Date.now() / 1e3, data: d })); } async waitTask(e, t) { const s = await this._client.get(`/tasks/${e}`, { ...t, __binaryResponse: !0 }).then((o) => o.json()), { task_status: n, message: r } = s.output; if (n === "PENDING" || n === "RUNNING") return new Promise((o) => { setTimeout(() => o(this.waitTask(e, t)), 5e3); }); if (n === "SUCCEEDED") return s.output.results.filter((o) => "url" in o); throw n === "FAILED" ? new _(r) : new _("Unknown task status"); } }; function Me(a) { return { model: a.model, input: { texts: a.input }, parameters: { text_type: a.type || "query" } }; } function $e(a, e) { const { output: t, usage: s } = e; return { object: "list", model: a.model, data: t.embeddings.map(({ text_index: n, embedding: r }) => ({ index: n, embedding: r, object: "embedding" })), usage: { prompt_tokens: s.total_tokens, total_tokens: s.total_tokens } }; } class se extends f { /** * Creates an embedding vector representing the input text. * * See https://help.aliyun.com/zh/dashscope/developer-reference/generic-text-vector */ async create(e, t) { const s = Me(e), n = await this._client.post("/services/embeddings/text-embedding/text-embedding", { ...t, body: s, __binaryResponse: !0 }); return $e(e, await n.json()); } } class O extends v { constructor(e = {}) { const { apiKey: t = process.env.QWEN_API_KEY || "", baseURL: s = "https://dashscope.aliyuncs.com/api/v1/", timeout: n = 3e4, fetch: r = globalThis.fetch, httpAgent: o = void 0, ...i } = e; super({ baseURL: s, timeout: n, fetch: r, httpAgent: o, ...i }), this.chat = new Z(this), this.completions = new ee(this), this.embeddings = new se(this), this.images = new te(this), this._options = e, this.apiKey = t; } authHeaders() { return { Authorization: `Bearer ${this.apiKey}` }; } defaultHeaders(e) { return { ...super.defaultHeaders(e), ...this._options.defaultHeaders }; } defaultQuery() { return this._options.defaultQuery; } async fetchWithTimeout(e, t, s, n) { const r = await super.fetchWithTimeout(e, t, s, n); if (r.ok) return r; const o = r.headers.get("content-type") || "", i = await r.text().then((c) => { if (o.includes("text/event-stream")) { const [l, u] = c.split("data:"); return u; } return c; }); return new Response(i, { status: r.status, statusText: r.statusText, headers: r.headers }); } makeStatusError(e, t, s, n) { return h.generate(e, { error: t }, s, n); } } ((a) => { a.Chat = Z, a.ChatModel = void 0, a.ChatCompletionCreateParams = void 0, a.ChatCompletionCreateParamsNonStreaming = void 0, a.ChatCompletionCreateParamsStreaming = void 0, a.Completions = ee, a.CompletionModel = void 0, a.Embeddings = se, a.Images = te; })(O || (O = {})); const ut = O; let Y = class { constructor(e) { this._client = e; } }; class Ne extends Y { constructor() { super(...arguments), this.resources = { "spark-1.5": { domain: "general", url: "wss://spark-api.xf-yun.com/v1.1/chat" }, "spark-2": { domain: "generalv2", url: "wss://spark-api.xf-yun.com/v2.1/chat" }, "spark-3": { domain: "generalv3", url: "wss://spark-api.xf-yun.com/v3.1/chat" } }; } async create(e, t) { const { model: s, messages: n, functions: r, user: o, ...i } = e, c = this.resources[s], l = this._client.generateAuthorizationURL(c.url, "GET"), u = { header: { app_id: this._client.appId }, parameter: { chat: { ...i, domain: c.domain } }, payload: { message: { text: n } } }; r && (u.payload.functions = { text: r }), o && (u.header.uid = o); const p = new AbortController(); t != null && t.signal && t.signal.addEventListener("abort", () => { p.abort(); }); const d = new globalThis.WebSocket(l); if (d.onopen = () => { d.send(JSON.stringify(u)); }, e.stream) { const m = new ReadableStream({ pull(g) { const b = new TextEncoder(); d.onmessage = (S) => { const A = JSON.parse(S.data), { header: x, payload: R } = A; if (x.code !== 0) { g.error(new h(void 0, A.header, void 0, void 0)); return; } const T = R.choices.text, [E] = T, I = { index: 0, delta: { role: E.role, content: E.content }, finish_reason: null }; x.status === 2 && (I.finish_reason = "stop"), E.function_call && (I.delta.function_call = E.function_call); const M = { id: x.sid, model: s, choices: [I], object: "chat.completion.chunk", created: Date.now() / 1e3 }; g.enqueue(b.encode(JSON.stringify(M) + ` `)); }, d.onerror = (S) => { g.error(S); }; }, cancel() { d.close(); } }); return p.signal.addEventListener("abort", () => { d.close(); }), y.fromReadableStream(m, p); } return new Promise((m, g) => { d.onmessage = (b) => { const S = JSON.parse(b.data), { header: A, payload: x } = S; if (A.status !== 2) return; const R = x.usage.text, T = x.choices.text, [E] = T, I = { index: 0, message: { role: "assistant", content: E.content }, logprobs: null, finish_reason: "stop" }, M = { id: A.sid, object: "chat.completion", created: Date.now() / 1e3, model: s, choices: [I], usage: { completion_tokens: R.completion_tokens, total_tokens: R.total_tokens, prompt_tokens: R.prompt_tokens } }; m(M); }, d.onerror = (b) => g(b); }); } } class De extends Y { constructor() { super(...arguments), this.completions = new Ne(this._client); } } let je = class extends Y { /** * See https://www.xfyun.cn/doc/spark/ImageGeneration.html */ async generate(e, t) { const { prompt: s, user: n } = e, r = { header: { app_id: this._client.appId, uid: n }, parameter: { chat: { max_tokens: 4096, domain: "general", temperature: 0.5 } }, payload: { message: { text: [{ role: "user", content: s }] } } }, o = this._client.generateAuthorizationURL("https://spark-api.cn-huabei-1.xf-yun.com/v2.1/tti", "POST"), c = await (await this._client.post(o, { ...t, body: r, __binaryResponse: !0 })).json(); if (c.header.code > 0) throw new h(void 0, c.header, void 0, void 0); return { created: Date.now() / 1e3, data: [ { // base64 encoded image url: c.payload.choices.text[0].content } ] }; } }; class pt extends v { constructor(e = {}) { const { appId: t = process.env.SPARK_APP_ID || "", apiKey: s = process.env.SPARK_API_KEY || "", apiSecret: n = process.env.SPARK_API_SECRET || "", baseURL: r = "https://spark-api.xf-yun.com", timeout: o = 3e4, fetch: i = globalThis.fetch, httpAgent: c = void 0, ...l } = e; super({ baseURL: r, timeout: o, fetch: i, httpAgent: c, ...l }), this.chat = new De(this), this.images = new je(this), this._options = e, this.appId = t, this.apiKey = s, this.apiSecret = n; } defaultQuery() { return this._options.defaultQuery; } /** * @param url - 需要签名的 URL * @param method - HTTP method * @returns 签名后的 URL */ generateAuthorizationURL(e, t = "GET") { const s = new URL(e, this.baseURL), n = (/* @__PURE__ */ new Date()).toUTCString(), r = this.generateAuthorization({ method: t, path: s.pathname, host: s.host, date: n }); return s.searchParams.set("authorization", r), s.searchParams.set("host", s.host), s.searchParams.set("date", n), s.toString(); } /** * 生成鉴权信息 * * See https://www.xfyun.cn/doc/spark/general_url_authentication.html */ generateAuthorization({ method: e, host: t, path: s, date: n }) { const r = `host: ${t} date: ${n} ${e} ${s} HTTP/1.1`, o = this.hash(r); return globalThis.btoa( `api_key="${this.apiKey}", algorithm="hmac-sha256", headers="host date request-line", signature="${o}"` ); } hash(e) { const t = H("sha256", this.apiSecret); return t.update(e), t.digest("base64"); } } class Ue { constructor(e) { this._client = e; } } class Ke extends Ue { constructor() { super(...arguments), this.models = { "imagine-v5": 33, "anime-v5": 34, "imagine-v4.1": 32, "imagine-v4": 31, "imagine-v3": 30, "imagine-v1": 28, realistic: 29, anime: 21, portrait: 26, "sdxl-1.0": 122 }; } /** * Creates a variation of a given image. */ async createVariation(e, t) { const s = this._client, n = new FormData(), { model: r, style: o = this.models[r ?? "realistic"] } = e; return n.append("image", await k(e.image)), n.append("style_id", (o || 29).toString()), n.append("prompt", e.prompt), n.append("negative_prompt", e.negative_prompt || ""), n.append("strength", (e.strength || 0).toString()), n.append("steps", (e.steps || 30).toString()), n.append("cfg", (e.cfg || 7.5).toString()), n.append("seed", (e.seed || $(1, 1e6)).toString()), { data: [ { binary: (await s.post(`/imagine/${s.apiType}/generations/variations`, { ...t, body: { body: n, [Symbol.toStringTag]: "MultipartBody" }, __binaryResponse: !0 })).body } ], created: Math.floor(Date.now() / 1e3) }; } /** * Experience the magic of Imagine's Image Remix feature, designed to breathe new life into your existing images. */ async edit(e, t) { const s = this._client, n = new FormData(), { model: r, style: o = this.models[r ?? "realistic"] } = e; return n.append("image", await k(e.image)), n.append("style_id", (o || 29).toString()), n.append("prompt", e.prompt), n.append("negative_prompt", e.negative_prompt || ""), n.append("strength", (e.strength || 0).toString()), n.append("control", e.control || "openpose"), n.append("steps", (e.steps || 30).toString()), n.append("cfg", (e.cfg || 7.5).toString()), n.append("seed", (e.seed || $(1, 1e6)).toString()), { data: [ { binary: (await s.post(`/imagine/${s.apiType}/edits/remix`, { ...t, body: { body: n, [Symbol.toStringTag]: "MultipartBody" }, __binaryResponse: !0 })).body } ], created: Math.floor(Date.now() / 1e3) }; } /** * Creates an image given a prompt. */ async generate(e, t) { const s = this._client, n = new FormData(), { model: r, style: o = this.models[r ?? "imagine-v4"] } = e; n.append("style_id", (o || 30).toString()), n.append("prompt", e.prompt), n.append("negative_prompt", e.negative_prompt || ""), n.append("aspect_ratio", e.aspect_ratio || "1:1"), n.append("steps", (e.steps || 30).toString()), n.append("cfg", (e.cfg || 7.5).toString()), n.append("seed", (e.seed || $(1, 1e6)).toString()), n.append("high_res_results", e.quality === "hd" ? "1" : "0"); const i = await s.post(`/imagine/${s.apiType}/generations`, { ...t, body: { body: n, [Symbol.toStringTag]: "MultipartBody" }, __binaryResponse: !0 }); return { created: Math.floor(Date.now() / 1e3), data: [ { binary: i.body } ] }; } /** * The image upscale feature provides a better image to the user by increasing its resolution. */ async upscale(e, t) { const s = this._client, n = new FormData(); n.append("image", await k(e.image)); const r = await s.post(`/imagine/${s.apiType}/upscale`, { ...t, body: { body: n, [Symbol.toStringTag]: "MultipartBody" }, __binaryResponse: !0 }); return { created: Math.floor(Date.now() / 1e3), data: [ { binary: r.body } ] }; } /** * Inpaint is an advanced feature of the Text-to-Image Stable Diffusion pipeline. * It allows users to remove unwanted objects or elements from an image by intelligently filling in the missing areas. */ async restoration(e, t) { const s = this._client, n = new FormData(); return n.append("image", await k(e.image)), n.append("mask", await k(e.mask)), n.append("style_id", "1"), n.append("prompt", e.prompt), n.append("neg_prompt", e.negative_prompt || ""), n.append("inpaint_strength", (e.strength || 0).toString()), n.append("cfg", (e.cfg || 7.5).toString()), { data: [ { binary: (await s.post(`/imagine/${s.apiType}/generations/variations`, { ...t, body: { body: n, [Symbol.toStringTag]: "MultipartBody" }, __binaryResponse: !0 })).body } ], created: Math.floor(Date.now() / 1e3) }; } } class ft extends v { constructor(e = {}) { const { apiKey: t = process.env.VYRO_API_KEY || "", apiType: s = process.env.VYRO_API_TYPE || "api", baseURL: n = "https://api.vyro.ai/v1", timeout: r = 3e4, fetch: o = globalThis.fetch, httpAgent: i = void 0, ...c } = e; super({ baseURL: n, timeout: r, fetch: o, httpAgent: i, ...c }), this.images = new Ke(this), this._options = e, this.apiKey = t, this.apiType = s; } authHeaders() { return { Authorization: `Bearer ${this.apiKey}` }; } defaultHeaders() { return { ...this.authHeaders(), ...this._options.defaultHeaders }; } defaultQuery() { return this._options.defaultQuery; } } const mt = { version: "0.0.1" }; export { yt as APIConnectionError, bt as APIConnectionTimeoutError, wt as APIError, f as APIResource, vt as APIUserAbortError, xt as AuthenticationError, Et as BadRequestError, Ct as ConflictError, We as ErnieAI, Ze as GeminiAI, st as HunYuanAI, St as InternalServerError, U as LineDecoder, ot as MinimaxAI, At as NotFoundError, Rt as OpenAI, It as OpenAIError, kt as PermissionDeniedError, ut as QWenAI, Pt as RateLimitError, fe as SSEDecoder, pt as SparkAI, Tt as UnprocessableEntityError, ft as VYroAI, Ge as castToError, mt as default, G as ensureArray, pe as iterMessages, $ as random, ge as readableStreamAsyncIterable };