UNPKG

@assistant-ui/react

Version:

React components for AI chat.

190 lines (189 loc) 6.76 kB
"use strict"; var __defProp = Object.defineProperty; var __getOwnPropDesc = Object.getOwnPropertyDescriptor; var __getOwnPropNames = Object.getOwnPropertyNames; var __hasOwnProp = Object.prototype.hasOwnProperty; var __export = (target, all) => { for (var name in all) __defProp(target, name, { get: all[name], enumerable: true }); }; var __copyProps = (to, from, except, desc) => { if (from && typeof from === "object" || typeof from === "function") { for (let key of __getOwnPropNames(from)) if (!__hasOwnProp.call(to, key) && key !== except) __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); } return to; }; var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); // src/runtimes/edge/createEdgeRuntimeAPI.ts var createEdgeRuntimeAPI_exports = {}; __export(createEdgeRuntimeAPI_exports, { convertToLanguageModelPrompt: () => convertToLanguageModelPrompt, createEdgeRuntimeAPI: () => createEdgeRuntimeAPI, getEdgeRuntimeResponse: () => getEdgeRuntimeResponse, getEdgeRuntimeStream: () => getEdgeRuntimeStream }); module.exports = __toCommonJS(createEdgeRuntimeAPI_exports); var import_assistantEncoderStream = require("./streams/assistantEncoderStream.cjs"); var import_EdgeRuntimeRequestOptions = require("./EdgeRuntimeRequestOptions.cjs"); var import_toLanguageModelMessages = require("./converters/toLanguageModelMessages.cjs"); var import_toLanguageModelTools = require("./converters/toLanguageModelTools.cjs"); var import_toolResultStream = require("./streams/toolResultStream.cjs"); var import_runResultStream = require("./streams/runResultStream.cjs"); var import_ModelConfigTypes = require("../../types/ModelConfigTypes.cjs"); var import_toCoreMessages = require("./converters/toCoreMessages.cjs"); var import_streamPartEncoderStream = require("./streams/utils/streamPartEncoderStream.cjs"); var voidStream = () => { return new WritableStream({ abort(reason) { console.error("Server stream processing aborted:", reason); } }); }; var getEdgeRuntimeStream = async ({ abortSignal, requestData: unsafeRequest, options: { model: modelOrCreator, system: serverSystem, tools: serverTools = {}, toolChoice, onFinish, ...unsafeSettings } }) => { const settings = import_ModelConfigTypes.LanguageModelV1CallSettingsSchema.parse(unsafeSettings); const lmServerTools = (0, import_toLanguageModelTools.toLanguageModelTools)(serverTools); const hasServerTools = Object.values(serverTools).some((v) => !!v.execute); const { system: clientSystem, tools: clientTools = [], messages, apiKey, baseUrl, modelName, ...callSettings } = import_EdgeRuntimeRequestOptions.EdgeRuntimeRequestOptionsSchema.parse(unsafeRequest); const systemMessages = []; if (serverSystem) systemMessages.push(serverSystem); if (clientSystem) systemMessages.push(clientSystem); const system = systemMessages.join("\n\n"); for (const clientTool of clientTools) { if (serverTools?.[clientTool.name]) { throw new Error( `Tool ${clientTool.name} was defined in both the client and server tools. This is not allowed.` ); } } const model = typeof modelOrCreator === "function" ? await modelOrCreator({ apiKey, baseUrl, modelName }) : modelOrCreator; let stream; const streamResult = await streamMessage({ ...settings, ...callSettings, model, abortSignal, ...!!system ? { system } : void 0, messages, tools: lmServerTools.concat(clientTools), ...toolChoice ? { toolChoice } : void 0 }); stream = streamResult.stream; const canExecuteTools = hasServerTools && toolChoice?.type !== "none"; if (canExecuteTools) { stream = stream.pipeThrough((0, import_toolResultStream.toolResultStream)(serverTools, abortSignal)); } if (canExecuteTools || onFinish) { const tees = stream.tee(); stream = tees[0]; let serverStream = tees[1]; if (onFinish) { let lastChunk; serverStream = serverStream.pipeThrough((0, import_runResultStream.runResultStream)()).pipeThrough( new TransformStream({ transform(chunk) { lastChunk = chunk; }, flush() { if (!lastChunk?.status || lastChunk.status.type === "running") return; const resultingMessages = [ ...messages, (0, import_toCoreMessages.toCoreMessage)({ role: "assistant", content: lastChunk.content }) ]; onFinish({ messages: resultingMessages, metadata: { // TODO // eslint-disable-next-line @typescript-eslint/no-non-null-asserted-optional-chain roundtrips: lastChunk.metadata?.steps, // eslint-disable-next-line @typescript-eslint/no-non-null-asserted-optional-chain steps: lastChunk.metadata?.steps } }); } }) ); } serverStream.pipeTo(voidStream()).catch((e) => { console.error("Server stream processing error:", e); }); } return stream; }; var getEdgeRuntimeResponse = async (options) => { const stream = await getEdgeRuntimeStream(options); return new Response( stream.pipeThrough((0, import_assistantEncoderStream.assistantEncoderStream)()).pipeThrough((0, import_streamPartEncoderStream.streamPartEncoderStream)()), { headers: { "Content-Type": "text/plain; charset=utf-8" } } ); }; var createEdgeRuntimeAPI = (options) => ({ POST: async (request) => getEdgeRuntimeResponse({ abortSignal: request.signal, requestData: await request.json(), options }) }); async function streamMessage({ model, system, messages, tools, toolChoice, ...options }) { return model.doStream({ inputFormat: "messages", mode: { type: "regular", ...tools ? { tools } : void 0, ...toolChoice ? { toolChoice } : void 0 }, prompt: convertToLanguageModelPrompt(system, messages), ...options }); } function convertToLanguageModelPrompt(system, messages) { const languageModelMessages = []; if (system != null) { languageModelMessages.push({ role: "system", content: system }); } languageModelMessages.push(...(0, import_toLanguageModelMessages.toLanguageModelMessages)(messages)); return languageModelMessages; } // Annotate the CommonJS export names for ESM import in node: 0 && (module.exports = { convertToLanguageModelPrompt, createEdgeRuntimeAPI, getEdgeRuntimeResponse, getEdgeRuntimeStream }); //# sourceMappingURL=createEdgeRuntimeAPI.js.map