UNPKG

@llumiverse/drivers

Version:

LLM driver implementations. Currently supported are: openai, huggingface, bedrock, replicate.

134 lines 5.01 kB
// This file is used by multiple drivers // to format prompts in a way that is compatible with OpenAI's API. import { PromptRole } from "@llumiverse/common"; import { readStreamAsBase64 } from "@llumiverse/core"; /** * OpenAI text only prompts * @param segments * @returns */ export function formatOpenAILikeTextPrompt(segments) { const system = []; const safety = []; const user = []; for (const msg of segments) { if (msg.role === PromptRole.system) { system.push({ content: msg.content, role: "system" }); } else if (msg.role === PromptRole.safety) { safety.push({ content: "IMPORTANT: " + msg.content, role: "system" }); } else if (msg.role !== PromptRole.negative && msg.role !== PromptRole.mask && msg.role !== PromptRole.tool) { user.push({ content: msg.content, role: msg.role || 'user', }); } } // put system messages first and safety last return system.concat(user).concat(safety); } export async function formatOpenAILikeMultimodalPrompt(segments, opts) { const system = []; const safety = []; const others = []; for (const msg of segments) { const parts = []; //generate the parts based on PromptSegment if (msg.files) { for (const file of msg.files) { const stream = await file.getStream(); const data = await readStreamAsBase64(stream); parts.push({ type: "image_url", image_url: { url: `data:${file.mime_type || "image/jpeg"};base64,${data}`, //detail: "auto" //This is modified just before execution to "low" | "high" | "auto" }, }); } } if (msg.content) { parts.push({ text: msg.content, type: "text" }); } if (msg.role === PromptRole.system) { // For system messages, filter to only text parts const textParts = parts.filter((part) => part.type === 'text'); const systemMsg = { role: "system", content: textParts.length === 1 && !msg.files ? textParts[0].text : textParts }; system.push(systemMsg); if (opts.useToolForFormatting && opts.schema) { system.forEach(s => { if (typeof s.content === 'string') { s.content = "TOOL: " + s.content; } else if (Array.isArray(s.content)) { s.content.forEach((c) => { if (c.type === "text") c.text = "TOOL: " + c.text; }); } }); } } else if (msg.role === PromptRole.safety) { const textParts = parts.filter((part) => part.type === 'text'); const safetyMsg = { role: "system", content: textParts }; if (Array.isArray(safetyMsg.content)) { safetyMsg.content.forEach((c) => { if (c.type === "text") c.text = "DO NOT IGNORE - IMPORTANT: " + c.text; }); } system.push(safetyMsg); } else if (msg.role === PromptRole.tool) { if (!msg.tool_use_id) { throw new Error("Tool use id is required for tool messages"); } const toolMsg = { role: "tool", tool_call_id: msg.tool_use_id, content: msg.content || "" }; others.push(toolMsg); } else if (msg.role !== PromptRole.negative && msg.role !== PromptRole.mask) { if (msg.role === 'assistant') { const assistantMsg = { role: 'assistant', content: parts }; others.push(assistantMsg); } else { const userMsg = { role: 'user', content: parts }; others.push(userMsg); } } } if (opts.result_schema && !opts.useToolForFormatting) { const schemaMsg = { role: "system", content: [{ type: "text", text: "IMPORTANT: only answer using JSON, and respecting the schema included below, between the <response_schema> tags. " + `<response_schema>${JSON.stringify(opts.result_schema)}</response_schema>` }] }; system.push(schemaMsg); } // put system messages first and safety last return [].concat(system).concat(others).concat(safety); } //# sourceMappingURL=openai_format.js.map