call-ai
Version:
Lightweight library for making AI API calls with streaming support
238 lines • 9.81 kB
JavaScript
import { isToolUseType, isToolUseResponse, isOpenAIArray, CallAIError, } from "./types.js";
import { globalDebug } from "./key-management.js";
import { callAINonStreaming } from "./non-streaming.js";
import { callAIStreaming } from "./streaming.js";
import { PACKAGE_VERSION } from "./version.js";
import { callAiEnv } from "./env.js";
function callAi(prompt, options = {}) {
const debug = options.debug === undefined ? globalDebug : options.debug;
prepareRequestParams(prompt, options);
let schemaStrategy = {
strategy: "none",
model: options.model || "openai/gpt-3.5-turbo",
prepareRequest: () => {
throw new Error("Schema strategy not implemented");
},
processResponse: (response) => {
if (response && typeof response === "object") {
return JSON.stringify(response);
}
if (typeof response !== "string") {
throw new Error(`Unexpected response type: ${typeof response}`);
}
return response;
},
shouldForceStream: false,
};
if (options.schema) {
const model = options.model || "openai/gpt-3.5-turbo";
if (/claude/i.test(model) || /anthropic/i.test(model)) {
schemaStrategy = {
strategy: "tool_mode",
model,
shouldForceStream: false,
prepareRequest: (schema) => {
let toolDef = {};
if (typeof schema === "string") {
try {
toolDef = JSON.parse(schema);
}
catch (e) {
toolDef = { description: schema };
}
}
else if (schema) {
toolDef = schema;
}
const tools = [
{
type: "function",
function: {
name: toolDef.name || "execute_function",
description: toolDef.description || "Execute a function",
parameters: toolDef.parameters || {
type: "object",
properties: {},
},
},
},
];
return {
tools,
tool_choice: {
type: "function",
function: { name: tools[0].function.name },
},
};
},
processResponse: (response) => {
if (typeof response === "string") {
return response;
}
if (isToolUseType(response)) {
return response.input || "{}";
}
if (isToolUseResponse(response)) {
return response.tool_use.input || "{}";
}
if (isOpenAIArray(response)) {
if (response.length > 0 && response[0].function && response[0].function.arguments) {
return response[0].function.arguments;
}
}
return typeof response === "string" ? response : JSON.stringify(response);
},
};
}
else {
schemaStrategy = {
strategy: "json_schema",
model,
shouldForceStream: false,
prepareRequest: (schema) => {
const schemaObj = schema || {};
return {
response_format: {
type: "json_schema",
json_schema: {
name: schemaObj.name || "result",
schema: {
type: "object",
properties: schemaObj.properties || {},
required: schemaObj.required || Object.keys(schemaObj.properties || {}),
additionalProperties: schemaObj.additionalProperties !== undefined ? schemaObj.additionalProperties : false,
},
},
},
};
},
processResponse: (response) => {
if (typeof response === "string") {
return response;
}
return JSON.stringify(response);
},
};
}
}
if (options.stream) {
if (debug) {
console.log(`[callAi:${PACKAGE_VERSION}] Making streaming request`);
}
const streamPromise = (async () => {
return callAIStreaming(prompt, {
...options,
schemaStrategy,
});
})();
return createBackwardCompatStreamingProxy(streamPromise);
}
else {
if (debug) {
console.log(`[callAi:${PACKAGE_VERSION}] Making non-streaming request`);
}
const optionsWithSchema = {
...options,
schemaStrategy,
};
return callAINonStreaming(prompt, optionsWithSchema);
}
}
async function bufferStreamingResults(generator) {
let result = "";
try {
for await (const chunk of generator) {
result += chunk;
}
return result;
}
catch (error) {
if (error instanceof Error) {
const enhancedError = new CallAIError({
message: `${error.message} (Partial content: ${result.slice(0, 100)}...)`,
status: 511,
partialContent: result,
originalError: error,
});
throw enhancedError;
}
else {
const newError = new CallAIError({
message: `Streaming error: ${String(error)}`,
status: 511,
partialContent: result,
originalError: error,
});
throw newError;
}
}
}
function createBackwardCompatStreamingProxy(promise) {
return new Proxy({}, {
get(_target, prop) {
if (prop === "next" || prop === "throw" || prop === "return" || prop === Symbol.asyncIterator) {
if (prop === Symbol.asyncIterator) {
return function () {
return {
async next(value) {
try {
const generator = await promise;
return generator.next(value);
}
catch (error) {
return Promise.reject(error);
}
},
};
};
}
return async function (value) {
const generator = await promise;
switch (prop) {
case "next":
return generator.next(value);
case "throw":
return generator.throw(value);
case "return":
return generator.return(value);
default:
throw new Error(`Unknown method: ${String(prop)}`);
}
};
}
if (prop === "then" || prop === "catch" || prop === "finally") {
return promise[prop].bind(promise);
}
return undefined;
},
});
}
function prepareRequestParams(prompt, options = {}) {
const apiKey = options.apiKey || callAiEnv.CALLAI_API_KEY;
if (!apiKey) {
throw new Error("API key is required. Provide it via options.apiKey or set window.CALLAI_API_KEY");
}
if (!prompt || (typeof prompt !== "string" && !Array.isArray(prompt))) {
throw new Error(`Invalid prompt: ${prompt}. Must be a string or an array of message objects.`);
}
const messages = Array.isArray(prompt) ? prompt : [{ role: "user", content: prompt }];
if (Array.isArray(prompt)) {
for (const message of prompt) {
if (!message.role || !message.content) {
throw new Error(`Invalid message format. Each message must have 'role' and 'content' properties. Received: ${JSON.stringify(message)}`);
}
if (typeof message.role !== "string" || (typeof message.content !== "string" && !Array.isArray(message.content))) {
throw new Error(`Invalid message format. 'role' must be a string and 'content' must be a string or array. Received role: ${typeof message.role}, content: ${typeof message.content}`);
}
}
}
if (options.provider && options.provider !== "auto" && options.model && !options.model.startsWith(options.provider + "/")) {
console.warn(`[callAi:${PACKAGE_VERSION}] WARNING: Specified provider '${options.provider}' doesn't match model '${options.model}'. Using model as specified.`);
}
return {
messages,
apiKey,
};
}
export { callAi, bufferStreamingResults, createBackwardCompatStreamingProxy, prepareRequestParams, PACKAGE_VERSION };
//# sourceMappingURL=api-core.js.map