call-ai
Version:
Lightweight library for making AI API calls with streaming support
367 lines • 16.2 kB
JavaScript
import { CallAIError } from "./types.js";
import { chooseSchemaStrategy } from "./strategies/index.js";
import { responseMetadata, boxString, getMeta } from "./response-metadata.js";
import { keyStore, globalDebug } from "./key-management.js";
import { handleApiError, checkForInvalidModelError } from "./error-handling.js";
import { createBackwardCompatStreamingProxy } from "./api-core.js";
import { extractContent, extractClaudeResponse, PACKAGE_VERSION } from "./non-streaming.js";
import { createStreamingGenerator } from "./streaming.js";
import { callAiFetch, joinUrlParts } from "./utils.js";
import { callAiEnv } from "./env.js";
export { getMeta };
const FALLBACK_MODEL = "openrouter/auto";
export function callAi(prompt, options = {}) {
const schemaStrategy = chooseSchemaStrategy(options.model, options.schema || null);
if (!options.stream && schemaStrategy.shouldForceStream) {
return bufferStreamingResults(prompt, options);
}
if (options.stream !== true) {
return callAINonStreaming(prompt, options);
}
const streamPromise = (async () => {
const { endpoint, requestOptions, model, schemaStrategy } = prepareRequestParams(prompt, { ...options, stream: true });
const debug = options.debug || globalDebug;
if (debug) {
console.log(`[callAi:${PACKAGE_VERSION}] Making fetch request to: ${endpoint}`);
console.log(`[callAi:${PACKAGE_VERSION}] With model: ${model}`);
console.log(`[callAi:${PACKAGE_VERSION}] Request headers:`, JSON.stringify(requestOptions.headers));
}
let response;
try {
response = await callAiFetch(options)(endpoint, requestOptions);
if (options.debug) {
console.log(`[callAi:${PACKAGE_VERSION}] Fetch completed with status:`, response.status, response.statusText);
console.log(`[callAi:${PACKAGE_VERSION}] Response headers:`);
response.headers.forEach((value, name) => {
console.log(`[callAi:${PACKAGE_VERSION}] ${name}: ${value}`);
});
const diagnosticResponse = response.clone();
try {
const responseText = await diagnosticResponse.text();
console.log(`[callAi:${PACKAGE_VERSION}] First 500 chars of response body:`, responseText.substring(0, 500) + (responseText.length > 500 ? "..." : ""));
}
catch (e) {
console.log(`[callAi:${PACKAGE_VERSION}] Could not read response body for diagnostics:`, e);
}
}
}
catch (fetchError) {
if (options.debug) {
console.error(`[callAi:${PACKAGE_VERSION}] Network error during fetch:`, fetchError);
}
throw fetchError;
}
const contentType = response?.headers?.get?.("content-type") || "";
if (options.debug) {
console.log(`[callAi:${PACKAGE_VERSION}] Response.ok =`, response.ok);
console.log(`[callAi:${PACKAGE_VERSION}] Response.status =`, response.status);
console.log(`[callAi:${PACKAGE_VERSION}] Response.statusText =`, response.statusText);
console.log(`[callAi:${PACKAGE_VERSION}] Response.type =`, response.type);
console.log(`[callAi:${PACKAGE_VERSION}] Content-Type =`, contentType);
}
const hasHttpError = !response.ok || response.status >= 400;
const hasJsonError = contentType.includes("application/json");
if (hasHttpError || hasJsonError) {
if (options.debug) {
console.log(`[callAi:${PACKAGE_VERSION}] ⚠️ Error detected - HTTP Status: ${response.status}, Content-Type: ${contentType}`);
}
if (!options.skipRetry) {
const clonedResponse = response.clone();
let isInvalidModel = false;
try {
const modelCheckResult = await checkForInvalidModelError(clonedResponse, model, options.debug);
isInvalidModel = modelCheckResult.isInvalidModel;
if (isInvalidModel) {
if (options.debug) {
console.log(`[callAi:${PACKAGE_VERSION}] Retrying with fallback model: ${FALLBACK_MODEL}`);
}
return (await callAi(prompt, {
...options,
model: FALLBACK_MODEL,
}));
}
}
catch (modelCheckError) {
console.error(`[callAi:${PACKAGE_VERSION}] Error during model check:`, modelCheckError);
// Continue with normal error handling
}
}
try {
const errorBody = await response.text();
if (options.debug) {
console.log(`[callAi:${PACKAGE_VERSION}] Error body:`, errorBody);
}
try {
const errorJson = JSON.parse(errorBody);
if (options.debug) {
console.log(`[callAi:${PACKAGE_VERSION}] Parsed error:`, errorJson);
}
let errorMessage = "";
if (errorJson.error && typeof errorJson.error === "object" && errorJson.error.message) {
errorMessage = errorJson.error.message;
}
else if (errorJson.error && typeof errorJson.error === "string") {
errorMessage = errorJson.error;
}
else if (errorJson.message) {
errorMessage = errorJson.message;
}
else {
errorMessage = `API returned ${response.status}: ${response.statusText}`;
}
if (!errorMessage.includes(response.status.toString())) {
errorMessage = `${errorMessage} (Status: ${response.status})`;
}
if (options.debug) {
console.log(`[callAi:${PACKAGE_VERSION}] Extracted error message:`, errorMessage);
}
const error = new CallAIError({
message: errorMessage,
status: response.status,
statusText: response.statusText,
details: errorJson,
contentType,
});
throw error;
}
catch (jsonError) {
if (options.debug) {
console.log(`[callAi:${PACKAGE_VERSION}] JSON parse error:`, jsonError);
}
let errorMessage = "";
if (errorBody && errorBody.trim().length > 0) {
errorMessage = errorBody.length > 100 ? errorBody.substring(0, 100) + "..." : errorBody;
}
else {
errorMessage = `API error: ${response.status} ${response.statusText}`;
}
if (!errorMessage.includes(response.status.toString())) {
errorMessage = `${errorMessage} (Status: ${response.status})`;
}
if (options.debug) {
console.log(`[callAi:${PACKAGE_VERSION}] Extracted text error message:`, errorMessage);
}
const error = new CallAIError({
message: errorMessage,
status: response.status,
statusText: response.statusText,
details: errorBody,
contentType,
});
throw error;
}
}
catch (responseError) {
if (responseError instanceof Error) {
throw responseError;
}
const error = new CallAIError({
message: `API returned ${response.status}: ${response.statusText}`,
status: response.status,
statusText: response.statusText,
contentType,
});
throw error;
}
}
if (options.debug) {
console.log(`[callAi:${PACKAGE_VERSION}] Response OK, creating streaming generator`);
}
return createStreamingGenerator(response, options, schemaStrategy, model);
})();
if (process.env.NODE_ENV !== "production") {
if (options.debug) {
console.warn(`[callAi:${PACKAGE_VERSION}] No await found - using legacy streaming pattern. This will be removed in a future version and may cause issues with certain models.`);
}
}
return createBackwardCompatStreamingProxy(streamPromise);
}
async function bufferStreamingResults(prompt, options) {
const streamingOptions = {
...options,
stream: true,
};
try {
const generator = (await callAi(prompt, streamingOptions));
const isClaudeJson = /claude/.test(options.model || "") && options.schema;
if (isClaudeJson) {
let lastChunk = "";
for await (const chunk of generator) {
lastChunk = chunk;
}
return lastChunk;
}
else {
let result = "";
for await (const chunk of generator) {
result += chunk;
}
return result;
}
}
catch (error) {
await handleApiError(error, "Buffered streaming", options.debug, {
apiKey: options.apiKey,
endpoint: options.endpoint,
skipRefresh: options.skipRefresh,
refreshToken: options.refreshToken,
updateRefreshToken: options.updateRefreshToken,
});
return bufferStreamingResults(prompt, {
...options,
apiKey: keyStore().current,
});
}
throw new Error("Unexpected code path in bufferStreamingResults");
}
function prepareRequestParams(prompt, options) {
const apiKey = options.apiKey ||
keyStore().current ||
callAiEnv.CALLAI_API_KEY ||
"sk-vibes-proxy-managed";
const schema = options.schema || null;
const schemaStrategy = chooseSchemaStrategy(options.model, schema);
const model = schemaStrategy.model;
const customChatOrigin = options.chatUrl || callAiEnv.def.CALLAI_CHAT_URL || null;
const endpoint = options.endpoint ||
(customChatOrigin
? joinUrlParts(customChatOrigin, "/api/v1/chat/completions")
: "https://openrouter.ai/api/v1/chat/completions");
const messages = Array.isArray(prompt) ? prompt : [{ role: "user", content: prompt }];
const requestParams = {
model,
messages,
stream: options.stream !== undefined ? options.stream : false,
};
if (options.temperature !== undefined) {
requestParams.temperature = options.temperature;
}
if (options.topP !== undefined) {
requestParams.top_p = options.topP;
}
if (options.maxTokens !== undefined) {
requestParams.max_tokens = options.maxTokens;
}
if (options.stop) {
requestParams.stop = Array.isArray(options.stop) ? options.stop : [options.stop];
}
if (options.responseFormat === "json") {
requestParams.response_format = { type: "json_object" };
}
if (schema) {
Object.assign(requestParams, schemaStrategy.prepareRequest(schema, messages));
}
const headers = {
Authorization: `Bearer ${apiKey}`,
"Content-Type": "application/json",
"HTTP-Referer": options.referer || "https://vibes.diy",
"X-Title": options.title || "Vibes",
};
if (options.headers) {
Object.assign(headers, options.headers);
}
const requestOptions = {
method: "POST",
headers: {
...headers,
"Content-Type": "application/json",
},
body: JSON.stringify(requestParams),
};
if (!apiKey) {
throw new Error("API key is required. Provide it via options.apiKey or set window.CALLAI_API_KEY");
}
if (options.debug) {
console.log(`[callAi-prepareRequest:raw] Endpoint: ${endpoint}`);
console.log(`[callAi-prepareRequest:raw] Model: ${model}`);
console.log(`[callAi-prepareRequest:raw] Payload:`, JSON.stringify(requestParams));
}
return { apiKey, model, endpoint, requestOptions, schemaStrategy };
}
async function callAINonStreaming(prompt, options = {}, isRetry = false) {
try {
const startTime = Date.now();
const meta = {
model: options.model || "unknown",
timing: {
startTime: startTime,
},
};
const { endpoint, requestOptions, model, schemaStrategy } = prepareRequestParams(prompt, options);
const response = await callAiFetch(options)(endpoint, requestOptions);
if (!response.ok || response.status >= 400) {
const { isInvalidModel } = await checkForInvalidModelError(response, model, options.debug);
if (isInvalidModel && !options.skipRetry) {
return callAINonStreaming(prompt, { ...options, model: FALLBACK_MODEL }, true);
}
const error = new CallAIError({
message: `HTTP error! Status: ${response.status}`,
status: response.status,
statusCode: response.status,
contentType: "text/plain",
});
throw error;
}
let result;
if (/claude/i.test(model)) {
try {
result = await extractClaudeResponse(response);
}
catch (error) {
handleApiError(error, "Claude API response processing failed", options.debug);
}
}
else {
result = await response.json();
}
if (options.debug) {
console.log(`[callAi-nonStreaming:raw] Response:`, JSON.stringify(result));
}
if (result.error) {
if (options.debug) {
console.error("API returned an error:", result.error);
}
if (!isRetry &&
!options.skipRetry &&
result.error.message &&
result.error.message.toLowerCase().includes("not a valid model")) {
if (options.debug) {
console.warn(`Model ${model} error, retrying with ${FALLBACK_MODEL}`);
}
return callAINonStreaming(prompt, { ...options, model: FALLBACK_MODEL }, true);
}
return JSON.stringify({
error: result.error,
message: result.error.message || "API returned an error",
});
}
const content = extractContent(result, schemaStrategy);
if (result) {
meta.rawResponse = result;
}
meta.model = model;
if (meta.timing) {
meta.timing.endTime = Date.now();
meta.timing.duration = meta.timing.endTime - meta.timing.startTime;
}
const processedContent = schemaStrategy.processResponse(content);
const boxed = boxString(processedContent);
responseMetadata.set(boxed, meta);
return processedContent;
}
catch (error) {
await handleApiError(error, "Non-streaming API call", options.debug, {
apiKey: options.apiKey,
endpoint: options.endpoint,
skipRefresh: options.skipRefresh,
refreshToken: options.refreshToken,
updateRefreshToken: options.updateRefreshToken,
});
return callAINonStreaming(prompt, {
...options,
apiKey: keyStore().current,
}, true);
}
throw new Error("Unexpected code path in callAINonStreaming");
}
//# sourceMappingURL=api.js.map