@assistant-ui/react
Version:
Typescript/React library for AI Chat
179 lines (178 loc) • 6.22 kB
JavaScript
var __defProp = Object.defineProperty;
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
var __getOwnPropNames = Object.getOwnPropertyNames;
var __hasOwnProp = Object.prototype.hasOwnProperty;
var __export = (target, all) => {
for (var name in all)
__defProp(target, name, { get: all[name], enumerable: true });
};
var __copyProps = (to, from, except, desc) => {
if (from && typeof from === "object" || typeof from === "function") {
for (let key of __getOwnPropNames(from))
if (!__hasOwnProp.call(to, key) && key !== except)
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
}
return to;
};
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
// src/runtimes/edge/createEdgeRuntimeAPI.ts
var createEdgeRuntimeAPI_exports = {};
__export(createEdgeRuntimeAPI_exports, {
convertToLanguageModelPrompt: () => convertToLanguageModelPrompt,
createEdgeRuntimeAPI: () => createEdgeRuntimeAPI,
getEdgeRuntimeResponse: () => getEdgeRuntimeResponse,
getEdgeRuntimeStream: () => getEdgeRuntimeStream
});
module.exports = __toCommonJS(createEdgeRuntimeAPI_exports);
var import_EdgeRuntimeRequestOptions = require("./EdgeRuntimeRequestOptions.js");
var import_toLanguageModelMessages = require("./converters/toLanguageModelMessages.js");
var import_toLanguageModelTools = require("./converters/toLanguageModelTools.js");
var import_toolResultStream = require("./streams/toolResultStream.js");
var import_ModelContextTypes = require("../../model-context/ModelContextTypes.js");
var import_assistant_stream = require("assistant-stream");
var import_ai_sdk = require("assistant-stream/ai-sdk");
var getEdgeRuntimeStream = async ({
abortSignal,
requestData: unsafeRequest,
options: {
model: modelOrCreator,
system: serverSystem,
tools: serverTools = {},
toolChoice,
onFinish,
...unsafeSettings
}
}) => {
const settings = import_ModelContextTypes.LanguageModelV1CallSettingsSchema.parse(unsafeSettings);
const lmServerTools = (0, import_toLanguageModelTools.toLanguageModelTools)(serverTools);
const hasServerTools = Object.values(serverTools).some((v) => !!v.execute);
const {
system: clientSystem,
tools: clientTools = [],
messages,
apiKey,
baseUrl,
modelName,
...callSettings
} = import_EdgeRuntimeRequestOptions.EdgeRuntimeRequestOptionsSchema.parse(unsafeRequest);
const systemMessages = [];
if (serverSystem) systemMessages.push(serverSystem);
if (clientSystem) systemMessages.push(clientSystem);
const system = systemMessages.join("\n\n");
for (const clientTool of clientTools) {
if (serverTools?.[clientTool.name]) {
throw new Error(
`Tool ${clientTool.name} was defined in both the client and server tools. This is not allowed.`
);
}
}
const model = typeof modelOrCreator === "function" ? await modelOrCreator({ apiKey, baseUrl, modelName }) : modelOrCreator;
let stream;
const streamResult = await streamMessage({
...settings,
...callSettings,
model,
abortSignal,
...!!system ? { system } : void 0,
messages,
tools: lmServerTools.concat(clientTools),
...toolChoice ? { toolChoice } : void 0
});
stream = streamResult.stream.pipeThrough(new import_ai_sdk.LanguageModelV1StreamDecoder());
const canExecuteTools = hasServerTools && toolChoice?.type !== "none";
if (canExecuteTools) {
stream = stream.pipeThrough((0, import_toolResultStream.toolResultStream)(serverTools, abortSignal));
}
if (canExecuteTools || onFinish) {
const tees = stream.tee();
stream = tees[0];
let serverStream = tees[1];
if (onFinish) {
let lastChunk;
serverStream.pipeThrough(new import_assistant_stream.AssistantMessageAccumulator()).pipeTo(
new WritableStream({
write(chunk) {
lastChunk = chunk;
},
close() {
if (!lastChunk?.status || lastChunk.status.type === "running")
return;
const resultingMessages = [
...messages,
{
id: "DEFAULT",
createdAt: /* @__PURE__ */ new Date(),
role: "assistant",
content: lastChunk.content,
status: lastChunk.status,
metadata: lastChunk.metadata
}
];
onFinish({
messages: resultingMessages,
metadata: {
steps: lastChunk.metadata.steps
}
});
},
abort(e) {
console.error("Server stream processing error:", e);
}
})
);
}
}
return stream;
};
var getEdgeRuntimeResponse = async (options) => {
const stream = await getEdgeRuntimeStream(options);
return new Response(stream.pipeThrough(new import_assistant_stream.DataStreamEncoder()), {
headers: {
"Content-Type": "text/plain; charset=utf-8",
"x-vercel-ai-data-stream": "v1"
}
});
};
var createEdgeRuntimeAPI = (options) => ({
POST: async (request) => getEdgeRuntimeResponse({
abortSignal: request.signal,
requestData: await request.json(),
options
})
});
async function streamMessage({
model,
system,
messages,
tools,
toolChoice,
...options
}) {
return model.doStream({
inputFormat: "messages",
mode: {
type: "regular",
...tools ? { tools } : void 0,
...toolChoice ? { toolChoice } : void 0
},
prompt: convertToLanguageModelPrompt(system, messages),
...options
});
}
function convertToLanguageModelPrompt(system, messages) {
const languageModelMessages = [];
if (system != null) {
languageModelMessages.push({ role: "system", content: system });
}
languageModelMessages.push(...(0, import_toLanguageModelMessages.toLanguageModelMessages)(messages));
return languageModelMessages;
}
// Annotate the CommonJS export names for ESM import in node:
0 && (module.exports = {
convertToLanguageModelPrompt,
createEdgeRuntimeAPI,
getEdgeRuntimeResponse,
getEdgeRuntimeStream
});
//# sourceMappingURL=createEdgeRuntimeAPI.js.map
;