jorel
Version:
A unified wrapper for working with LLMs from multiple providers, including streams, images, documents & automatic tool use.
120 lines (119 loc) • 4.36 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.convertLlmMessagesToVertexAiMessages = void 0;
const utils_1 = require("../../media/utils");
const textContentToParts = (text) => {
return [
{
text,
},
];
};
const mixedContentToParts = async (content) => {
const parts = [];
for (const _contentEntry of content) {
if (_contentEntry.type === "text") {
parts.push({
text: _contentEntry.text,
});
}
else if (_contentEntry.type === "imageData") {
if (!_contentEntry.mimeType) {
throw new Error(`Missing MIME type`);
}
parts.push({
inlineData: {
data: (0, utils_1.getBase64PartFromDataUrl)(_contentEntry.data),
mimeType: _contentEntry.mimeType,
},
});
}
else if (_contentEntry.type === "imageUrl") {
if (!_contentEntry.mimeType) {
throw new Error(`Missing MIME type`);
}
parts.push({
fileData: {
fileUri: _contentEntry.url,
mimeType: _contentEntry.mimeType,
},
});
}
else {
throw new Error(`Unsupported content type`);
}
}
return parts;
};
/** Converts unified LLM messages to Vertex AI's messages (Content) */
const convertLlmMessagesToVertexAiMessages = async (messages) => {
// 1. Extract system messages and join them together
const systemMessages = messages.filter((m) => m.role === "system");
const systemMessage = systemMessages
.map((m) => m.content)
.join("\n")
.trim() || undefined;
// 2. Create the chat messages array by converting LLM messages to Vertex AI's Content
const chatMessages = [];
for (const m of messages) {
if (m.role !== "system") {
if (m.role === "assistant") {
chatMessages.push({
role: "assistant",
parts: textContentToParts(m.content),
});
}
else if (m.role === "assistant_with_tools") {
chatMessages.push({
role: "model",
parts: [
...(m.content ? textContentToParts(m.content) : []),
...m.toolCalls.map((toolCall) => ({
functionCall: {
name: toolCall.request.function.name,
args: toolCall.request.function.arguments,
},
})),
],
});
chatMessages.push({
role: "user",
parts: m.toolCalls
.filter((toolCall) => toolCall.executionState === "completed" ||
toolCall.executionState === "error" ||
toolCall.executionState === "cancelled")
.map((toolCall) => ({
functionResponse: {
name: toolCall.request.function.name,
response: toolCall.executionState === "completed"
? toolCall.result
: { error: toolCall.error?.message || "Cancelled" },
},
})),
});
}
else if (m.role === "user") {
if (typeof m.content === "string") {
chatMessages.push({
role: "user",
parts: textContentToParts(m.content),
});
}
else if (Array.isArray(m.content)) {
chatMessages.push({
role: m.role,
parts: await mixedContentToParts(m.content),
});
}
}
else {
throw new Error(`Unsupported message role`);
}
}
}
return {
systemMessage,
chatMessages,
};
};
exports.convertLlmMessagesToVertexAiMessages = convertLlmMessagesToVertexAiMessages;