@assistant-ui/react
Version:
React components for AI chat.
166 lines • 5.2 kB
JavaScript
// src/runtimes/edge/createEdgeRuntimeAPI.ts
import { assistantEncoderStream } from "./streams/assistantEncoderStream.mjs";
import { EdgeRuntimeRequestOptionsSchema } from "./EdgeRuntimeRequestOptions.mjs";
import { toLanguageModelMessages } from "./converters/toLanguageModelMessages.mjs";
import { toLanguageModelTools } from "./converters/toLanguageModelTools.mjs";
import {
toolResultStream
} from "./streams/toolResultStream.mjs";
import { runResultStream } from "./streams/runResultStream.mjs";
import {
LanguageModelV1CallSettingsSchema
} from "../../types/ModelConfigTypes.mjs";
import { toCoreMessage } from "./converters/toCoreMessages.mjs";
import { streamPartEncoderStream } from "./streams/utils/streamPartEncoderStream.mjs";
var voidStream = () => {
return new WritableStream({
abort(reason) {
console.error("Server stream processing aborted:", reason);
}
});
};
var getEdgeRuntimeStream = async ({
abortSignal,
requestData: unsafeRequest,
options: {
model: modelOrCreator,
system: serverSystem,
tools: serverTools = {},
toolChoice,
onFinish,
...unsafeSettings
}
}) => {
const settings = LanguageModelV1CallSettingsSchema.parse(unsafeSettings);
const lmServerTools = toLanguageModelTools(serverTools);
const hasServerTools = Object.values(serverTools).some((v) => !!v.execute);
const {
system: clientSystem,
tools: clientTools = [],
messages,
apiKey,
baseUrl,
modelName,
...callSettings
} = EdgeRuntimeRequestOptionsSchema.parse(unsafeRequest);
const systemMessages = [];
if (serverSystem) systemMessages.push(serverSystem);
if (clientSystem) systemMessages.push(clientSystem);
const system = systemMessages.join("\n\n");
for (const clientTool of clientTools) {
if (serverTools?.[clientTool.name]) {
throw new Error(
`Tool ${clientTool.name} was defined in both the client and server tools. This is not allowed.`
);
}
}
const model = typeof modelOrCreator === "function" ? await modelOrCreator({ apiKey, baseUrl, modelName }) : modelOrCreator;
let stream;
const streamResult = await streamMessage({
...settings,
...callSettings,
model,
abortSignal,
...!!system ? { system } : void 0,
messages,
tools: lmServerTools.concat(clientTools),
...toolChoice ? { toolChoice } : void 0
});
stream = streamResult.stream;
const canExecuteTools = hasServerTools && toolChoice?.type !== "none";
if (canExecuteTools) {
stream = stream.pipeThrough(toolResultStream(serverTools, abortSignal));
}
if (canExecuteTools || onFinish) {
const tees = stream.tee();
stream = tees[0];
let serverStream = tees[1];
if (onFinish) {
let lastChunk;
serverStream = serverStream.pipeThrough(runResultStream()).pipeThrough(
new TransformStream({
transform(chunk) {
lastChunk = chunk;
},
flush() {
if (!lastChunk?.status || lastChunk.status.type === "running")
return;
const resultingMessages = [
...messages,
toCoreMessage({
role: "assistant",
content: lastChunk.content
})
];
onFinish({
messages: resultingMessages,
metadata: {
// TODO
// eslint-disable-next-line @typescript-eslint/no-non-null-asserted-optional-chain
roundtrips: lastChunk.metadata?.steps,
// eslint-disable-next-line @typescript-eslint/no-non-null-asserted-optional-chain
steps: lastChunk.metadata?.steps
}
});
}
})
);
}
serverStream.pipeTo(voidStream()).catch((e) => {
console.error("Server stream processing error:", e);
});
}
return stream;
};
var getEdgeRuntimeResponse = async (options) => {
const stream = await getEdgeRuntimeStream(options);
return new Response(
stream.pipeThrough(assistantEncoderStream()).pipeThrough(streamPartEncoderStream()),
{
headers: {
"Content-Type": "text/plain; charset=utf-8"
}
}
);
};
var createEdgeRuntimeAPI = (options) => ({
POST: async (request) => getEdgeRuntimeResponse({
abortSignal: request.signal,
requestData: await request.json(),
options
})
});
async function streamMessage({
model,
system,
messages,
tools,
toolChoice,
...options
}) {
return model.doStream({
inputFormat: "messages",
mode: {
type: "regular",
...tools ? { tools } : void 0,
...toolChoice ? { toolChoice } : void 0
},
prompt: convertToLanguageModelPrompt(system, messages),
...options
});
}
function convertToLanguageModelPrompt(system, messages) {
const languageModelMessages = [];
if (system != null) {
languageModelMessages.push({ role: "system", content: system });
}
languageModelMessages.push(...toLanguageModelMessages(messages));
return languageModelMessages;
}
export {
convertToLanguageModelPrompt,
createEdgeRuntimeAPI,
getEdgeRuntimeResponse,
getEdgeRuntimeStream
};
//# sourceMappingURL=createEdgeRuntimeAPI.mjs.map