ai
Version:
Vercel AI SDK - The AI Toolkit for TypeScript and JavaScript
1,669 lines (1,630 loc) • 144 kB
JavaScript
var __defProp = Object.defineProperty;
var __export = (target, all) => {
for (var name9 in all)
__defProp(target, name9, { get: all[name9], enumerable: true });
};
// streams/index.ts
import {
formatStreamPart,
parseStreamPart,
readDataStream,
parseComplexResponse
} from "@ai-sdk/ui-utils";
import { generateId as generateIdImpl } from "@ai-sdk/provider-utils";
// util/retry-with-exponential-backoff.ts
import { APICallError } from "@ai-sdk/provider";
import { getErrorMessage, isAbortError } from "@ai-sdk/provider-utils";
// util/delay.ts
async function delay(delayInMs) {
return new Promise((resolve) => setTimeout(resolve, delayInMs));
}
// util/retry-error.ts
import { AISDKError } from "@ai-sdk/provider";
var name = "AI_RetryError";
var marker = `vercel.ai.error.${name}`;
var symbol = Symbol.for(marker);
var _a;
var RetryError = class extends AISDKError {
constructor({
message,
reason,
errors
}) {
super({ name, message });
this[_a] = true;
this.reason = reason;
this.errors = errors;
this.lastError = errors[errors.length - 1];
}
static isInstance(error) {
return AISDKError.hasMarker(error, marker);
}
/**
* @deprecated use `isInstance` instead
*/
static isRetryError(error) {
return error instanceof Error && error.name === name && typeof error.reason === "string" && Array.isArray(error.errors);
}
/**
* @deprecated Do not use this method. It will be removed in the next major version.
*/
toJSON() {
return {
name: this.name,
message: this.message,
reason: this.reason,
lastError: this.lastError,
errors: this.errors
};
}
};
_a = symbol;
// util/retry-with-exponential-backoff.ts
var retryWithExponentialBackoff = ({
maxRetries = 2,
initialDelayInMs = 2e3,
backoffFactor = 2
} = {}) => async (f) => _retryWithExponentialBackoff(f, {
maxRetries,
delayInMs: initialDelayInMs,
backoffFactor
});
async function _retryWithExponentialBackoff(f, {
maxRetries,
delayInMs,
backoffFactor
}, errors = []) {
try {
return await f();
} catch (error) {
if (isAbortError(error)) {
throw error;
}
if (maxRetries === 0) {
throw error;
}
const errorMessage = getErrorMessage(error);
const newErrors = [...errors, error];
const tryNumber = newErrors.length;
if (tryNumber > maxRetries) {
throw new RetryError({
message: `Failed after ${tryNumber} attempts. Last error: ${errorMessage}`,
reason: "maxRetriesExceeded",
errors: newErrors
});
}
if (error instanceof Error && APICallError.isAPICallError(error) && error.isRetryable === true && tryNumber <= maxRetries) {
await delay(delayInMs);
return _retryWithExponentialBackoff(
f,
{ maxRetries, delayInMs: backoffFactor * delayInMs, backoffFactor },
newErrors
);
}
if (tryNumber === 1) {
throw error;
}
throw new RetryError({
message: `Failed after ${tryNumber} attempts with non-retryable error: '${errorMessage}'`,
reason: "errorNotRetryable",
errors: newErrors
});
}
}
// core/telemetry/assemble-operation-name.ts
function assembleOperationName({
operationName,
telemetry
}) {
return {
"operation.name": `${operationName}${(telemetry == null ? void 0 : telemetry.functionId) != null ? ` ${telemetry.functionId}` : ""}`
};
}
// core/telemetry/get-base-telemetry-attributes.ts
function getBaseTelemetryAttributes({
model,
settings,
telemetry,
headers
}) {
var _a9;
return {
"ai.model.provider": model.provider,
"ai.model.id": model.modelId,
// settings:
...Object.entries(settings).reduce((attributes, [key, value]) => {
attributes[`ai.settings.${key}`] = value;
return attributes;
}, {}),
// special telemetry information
"resource.name": telemetry == null ? void 0 : telemetry.functionId,
"ai.telemetry.functionId": telemetry == null ? void 0 : telemetry.functionId,
// add metadata as attributes:
...Object.entries((_a9 = telemetry == null ? void 0 : telemetry.metadata) != null ? _a9 : {}).reduce(
(attributes, [key, value]) => {
attributes[`ai.telemetry.metadata.${key}`] = value;
return attributes;
},
{}
),
// request headers
...Object.entries(headers != null ? headers : {}).reduce((attributes, [key, value]) => {
if (value !== void 0) {
attributes[`ai.request.headers.${key}`] = value;
}
return attributes;
}, {})
};
}
// core/telemetry/get-tracer.ts
import { trace } from "@opentelemetry/api";
// core/telemetry/noop-tracer.ts
var noopTracer = {
startSpan() {
return noopSpan;
},
startActiveSpan(name9, arg1, arg2, arg3) {
if (typeof arg1 === "function") {
return arg1(noopSpan);
}
if (typeof arg2 === "function") {
return arg2(noopSpan);
}
if (typeof arg3 === "function") {
return arg3(noopSpan);
}
}
};
var noopSpan = {
spanContext() {
return noopSpanContext;
},
setAttribute() {
return this;
},
setAttributes() {
return this;
},
addEvent() {
return this;
},
addLink() {
return this;
},
addLinks() {
return this;
},
setStatus() {
return this;
},
updateName() {
return this;
},
end() {
return this;
},
isRecording() {
return false;
},
recordException() {
return this;
}
};
var noopSpanContext = {
traceId: "",
spanId: "",
traceFlags: 0
};
// core/telemetry/get-tracer.ts
var testTracer = void 0;
function getTracer({ isEnabled }) {
if (!isEnabled) {
return noopTracer;
}
if (testTracer) {
return testTracer;
}
return trace.getTracer("ai");
}
// core/telemetry/record-span.ts
import { SpanStatusCode } from "@opentelemetry/api";
function recordSpan({
name: name9,
tracer,
attributes,
fn,
endWhenDone = true
}) {
return tracer.startActiveSpan(name9, { attributes }, async (span) => {
try {
const result = await fn(span);
if (endWhenDone) {
span.end();
}
return result;
} catch (error) {
try {
if (error instanceof Error) {
span.recordException({
name: error.name,
message: error.message,
stack: error.stack
});
span.setStatus({
code: SpanStatusCode.ERROR,
message: error.message
});
} else {
span.setStatus({ code: SpanStatusCode.ERROR });
}
} finally {
span.end();
}
throw error;
}
});
}
// core/telemetry/select-telemetry-attributes.ts
function selectTelemetryAttributes({
telemetry,
attributes
}) {
return Object.entries(attributes).reduce((attributes2, [key, value]) => {
if (value === void 0) {
return attributes2;
}
if (typeof value === "object" && "input" in value && typeof value.input === "function") {
if ((telemetry == null ? void 0 : telemetry.recordInputs) === false) {
return attributes2;
}
const result = value.input();
return result === void 0 ? attributes2 : { ...attributes2, [key]: result };
}
if (typeof value === "object" && "output" in value && typeof value.output === "function") {
if ((telemetry == null ? void 0 : telemetry.recordOutputs) === false) {
return attributes2;
}
const result = value.output();
return result === void 0 ? attributes2 : { ...attributes2, [key]: result };
}
return { ...attributes2, [key]: value };
}, {});
}
// core/embed/embed.ts
async function embed({
model,
value,
maxRetries,
abortSignal,
headers,
experimental_telemetry: telemetry
}) {
var _a9;
const baseTelemetryAttributes = getBaseTelemetryAttributes({
model,
telemetry,
headers,
settings: { maxRetries }
});
const tracer = getTracer({ isEnabled: (_a9 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a9 : false });
return recordSpan({
name: "ai.embed",
attributes: selectTelemetryAttributes({
telemetry,
attributes: {
...assembleOperationName({ operationName: "ai.embed", telemetry }),
...baseTelemetryAttributes,
"ai.value": { input: () => JSON.stringify(value) }
}
}),
tracer,
fn: async (span) => {
const retry = retryWithExponentialBackoff({ maxRetries });
const { embedding, usage, rawResponse } = await retry(
() => (
// nested spans to align with the embedMany telemetry data:
recordSpan({
name: "ai.embed.doEmbed",
attributes: selectTelemetryAttributes({
telemetry,
attributes: {
...assembleOperationName({
operationName: "ai.embed.doEmbed",
telemetry
}),
...baseTelemetryAttributes,
// specific settings that only make sense on the outer level:
"ai.values": { input: () => [JSON.stringify(value)] }
}
}),
tracer,
fn: async (doEmbedSpan) => {
var _a10;
const modelResponse = await model.doEmbed({
values: [value],
abortSignal,
headers
});
const embedding2 = modelResponse.embeddings[0];
const usage2 = (_a10 = modelResponse.usage) != null ? _a10 : { tokens: NaN };
doEmbedSpan.setAttributes(
selectTelemetryAttributes({
telemetry,
attributes: {
"ai.embeddings": {
output: () => modelResponse.embeddings.map(
(embedding3) => JSON.stringify(embedding3)
)
},
"ai.usage.tokens": usage2.tokens
}
})
);
return {
embedding: embedding2,
usage: usage2,
rawResponse: modelResponse.rawResponse
};
}
})
)
);
span.setAttributes(
selectTelemetryAttributes({
telemetry,
attributes: {
"ai.embedding": { output: () => JSON.stringify(embedding) },
"ai.usage.tokens": usage.tokens
}
})
);
return new DefaultEmbedResult({ value, embedding, usage, rawResponse });
}
});
}
var DefaultEmbedResult = class {
constructor(options) {
this.value = options.value;
this.embedding = options.embedding;
this.usage = options.usage;
this.rawResponse = options.rawResponse;
}
};
// core/util/split-array.ts
function splitArray(array, chunkSize) {
if (chunkSize <= 0) {
throw new Error("chunkSize must be greater than 0");
}
const result = [];
for (let i = 0; i < array.length; i += chunkSize) {
result.push(array.slice(i, i + chunkSize));
}
return result;
}
// core/embed/embed-many.ts
async function embedMany({
model,
values,
maxRetries,
abortSignal,
headers,
experimental_telemetry: telemetry
}) {
var _a9;
const baseTelemetryAttributes = getBaseTelemetryAttributes({
model,
telemetry,
headers,
settings: { maxRetries }
});
const tracer = getTracer({ isEnabled: (_a9 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a9 : false });
return recordSpan({
name: "ai.embedMany",
attributes: selectTelemetryAttributes({
telemetry,
attributes: {
...assembleOperationName({ operationName: "ai.embedMany", telemetry }),
...baseTelemetryAttributes,
// specific settings that only make sense on the outer level:
"ai.values": {
input: () => values.map((value) => JSON.stringify(value))
}
}
}),
tracer,
fn: async (span) => {
const retry = retryWithExponentialBackoff({ maxRetries });
const maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
if (maxEmbeddingsPerCall == null) {
const { embeddings: embeddings2, usage } = await retry(() => {
return recordSpan({
name: "ai.embedMany.doEmbed",
attributes: selectTelemetryAttributes({
telemetry,
attributes: {
...assembleOperationName({
operationName: "ai.embedMany.doEmbed",
telemetry
}),
...baseTelemetryAttributes,
// specific settings that only make sense on the outer level:
"ai.values": {
input: () => values.map((value) => JSON.stringify(value))
}
}
}),
tracer,
fn: async (doEmbedSpan) => {
var _a10;
const modelResponse = await model.doEmbed({
values,
abortSignal,
headers
});
const embeddings3 = modelResponse.embeddings;
const usage2 = (_a10 = modelResponse.usage) != null ? _a10 : { tokens: NaN };
doEmbedSpan.setAttributes(
selectTelemetryAttributes({
telemetry,
attributes: {
"ai.embeddings": {
output: () => embeddings3.map((embedding) => JSON.stringify(embedding))
},
"ai.usage.tokens": usage2.tokens
}
})
);
return { embeddings: embeddings3, usage: usage2 };
}
});
});
span.setAttributes(
selectTelemetryAttributes({
telemetry,
attributes: {
"ai.embeddings": {
output: () => embeddings2.map((embedding) => JSON.stringify(embedding))
},
"ai.usage.tokens": usage.tokens
}
})
);
return new DefaultEmbedManyResult({ values, embeddings: embeddings2, usage });
}
const valueChunks = splitArray(values, maxEmbeddingsPerCall);
const embeddings = [];
let tokens = 0;
for (const chunk of valueChunks) {
const { embeddings: responseEmbeddings, usage } = await retry(() => {
return recordSpan({
name: "ai.embedMany.doEmbed",
attributes: selectTelemetryAttributes({
telemetry,
attributes: {
...assembleOperationName({
operationName: "ai.embedMany.doEmbed",
telemetry
}),
...baseTelemetryAttributes,
// specific settings that only make sense on the outer level:
"ai.values": {
input: () => chunk.map((value) => JSON.stringify(value))
}
}
}),
tracer,
fn: async (doEmbedSpan) => {
var _a10;
const modelResponse = await model.doEmbed({
values: chunk,
abortSignal,
headers
});
const embeddings2 = modelResponse.embeddings;
const usage2 = (_a10 = modelResponse.usage) != null ? _a10 : { tokens: NaN };
doEmbedSpan.setAttributes(
selectTelemetryAttributes({
telemetry,
attributes: {
"ai.embeddings": {
output: () => embeddings2.map((embedding) => JSON.stringify(embedding))
},
"ai.usage.tokens": usage2.tokens
}
})
);
return { embeddings: embeddings2, usage: usage2 };
}
});
});
embeddings.push(...responseEmbeddings);
tokens += usage.tokens;
}
span.setAttributes(
selectTelemetryAttributes({
telemetry,
attributes: {
"ai.embeddings": {
output: () => embeddings.map((embedding) => JSON.stringify(embedding))
},
"ai.usage.tokens": tokens
}
})
);
return new DefaultEmbedManyResult({
values,
embeddings,
usage: { tokens }
});
}
});
}
var DefaultEmbedManyResult = class {
constructor(options) {
this.values = options.values;
this.embeddings = options.embeddings;
this.usage = options.usage;
}
};
// core/generate-object/generate-object.ts
import { safeParseJSON } from "@ai-sdk/provider-utils";
// core/prompt/convert-to-language-model-prompt.ts
import { getErrorMessage as getErrorMessage2 } from "@ai-sdk/provider-utils";
// util/download-error.ts
import { AISDKError as AISDKError2 } from "@ai-sdk/provider";
var name2 = "AI_DownloadError";
var marker2 = `vercel.ai.error.${name2}`;
var symbol2 = Symbol.for(marker2);
var _a2;
var DownloadError = class extends AISDKError2 {
constructor({
url,
statusCode,
statusText,
cause,
message = cause == null ? `Failed to download ${url}: ${statusCode} ${statusText}` : `Failed to download ${url}: ${cause}`
}) {
super({ name: name2, message, cause });
this[_a2] = true;
this.url = url;
this.statusCode = statusCode;
this.statusText = statusText;
}
static isInstance(error) {
return AISDKError2.hasMarker(error, marker2);
}
/**
* @deprecated use `isInstance` instead
*/
static isDownloadError(error) {
return error instanceof Error && error.name === name2 && typeof error.url === "string" && (error.statusCode == null || typeof error.statusCode === "number") && (error.statusText == null || typeof error.statusText === "string");
}
/**
* @deprecated Do not use this method. It will be removed in the next major version.
*/
toJSON() {
return {
name: this.name,
message: this.message,
url: this.url,
statusCode: this.statusCode,
statusText: this.statusText,
cause: this.cause
};
}
};
_a2 = symbol2;
// util/download.ts
async function download({
url,
fetchImplementation = fetch
}) {
var _a9;
const urlText = url.toString();
try {
const response = await fetchImplementation(urlText);
if (!response.ok) {
throw new DownloadError({
url: urlText,
statusCode: response.status,
statusText: response.statusText
});
}
return {
data: new Uint8Array(await response.arrayBuffer()),
mimeType: (_a9 = response.headers.get("content-type")) != null ? _a9 : void 0
};
} catch (error) {
if (DownloadError.isInstance(error)) {
throw error;
}
throw new DownloadError({ url: urlText, cause: error });
}
}
// core/util/detect-image-mimetype.ts
var mimeTypeSignatures = [
{ mimeType: "image/gif", bytes: [71, 73, 70] },
{ mimeType: "image/png", bytes: [137, 80, 78, 71] },
{ mimeType: "image/jpeg", bytes: [255, 216] },
{ mimeType: "image/webp", bytes: [82, 73, 70, 70] }
];
function detectImageMimeType(image) {
for (const { bytes, mimeType } of mimeTypeSignatures) {
if (image.length >= bytes.length && bytes.every((byte, index) => image[index] === byte)) {
return mimeType;
}
}
return void 0;
}
// core/prompt/data-content.ts
import {
convertBase64ToUint8Array,
convertUint8ArrayToBase64
} from "@ai-sdk/provider-utils";
// core/prompt/invalid-data-content-error.ts
import { AISDKError as AISDKError3 } from "@ai-sdk/provider";
var name3 = "AI_InvalidDataContentError";
var marker3 = `vercel.ai.error.${name3}`;
var symbol3 = Symbol.for(marker3);
var _a3;
var InvalidDataContentError = class extends AISDKError3 {
constructor({
content,
cause,
message = `Invalid data content. Expected a base64 string, Uint8Array, ArrayBuffer, or Buffer, but got ${typeof content}.`
}) {
super({ name: name3, message, cause });
this[_a3] = true;
this.content = content;
}
static isInstance(error) {
return AISDKError3.hasMarker(error, marker3);
}
/**
* @deprecated use `isInstance` instead
*/
static isInvalidDataContentError(error) {
return error instanceof Error && error.name === name3 && error.content != null;
}
/**
* @deprecated Do not use this method. It will be removed in the next major version.
*/
toJSON() {
return {
name: this.name,
message: this.message,
stack: this.stack,
cause: this.cause,
content: this.content
};
}
};
_a3 = symbol3;
// core/prompt/data-content.ts
function convertDataContentToBase64String(content) {
if (typeof content === "string") {
return content;
}
if (content instanceof ArrayBuffer) {
return convertUint8ArrayToBase64(new Uint8Array(content));
}
return convertUint8ArrayToBase64(content);
}
function convertDataContentToUint8Array(content) {
if (content instanceof Uint8Array) {
return content;
}
if (typeof content === "string") {
try {
return convertBase64ToUint8Array(content);
} catch (error) {
throw new InvalidDataContentError({
message: "Invalid data content. Content string is not a base64-encoded media.",
content,
cause: error
});
}
}
if (content instanceof ArrayBuffer) {
return new Uint8Array(content);
}
throw new InvalidDataContentError({ content });
}
function convertUint8ArrayToText(uint8Array) {
try {
return new TextDecoder().decode(uint8Array);
} catch (error) {
throw new Error("Error decoding Uint8Array to text");
}
}
// core/prompt/invalid-message-role-error.ts
import { AISDKError as AISDKError4 } from "@ai-sdk/provider";
var name4 = "AI_InvalidMessageRoleError";
var marker4 = `vercel.ai.error.${name4}`;
var symbol4 = Symbol.for(marker4);
var _a4;
var InvalidMessageRoleError = class extends AISDKError4 {
constructor({
role,
message = `Invalid message role: '${role}'. Must be one of: "system", "user", "assistant", "tool".`
}) {
super({ name: name4, message });
this[_a4] = true;
this.role = role;
}
static isInstance(error) {
return AISDKError4.hasMarker(error, marker4);
}
/**
* @deprecated use `isInstance` instead
*/
static isInvalidMessageRoleError(error) {
return error instanceof Error && error.name === name4 && typeof error.role === "string";
}
/**
* @deprecated Do not use this method. It will be removed in the next major version.
*/
toJSON() {
return {
name: this.name,
message: this.message,
stack: this.stack,
role: this.role
};
}
};
_a4 = symbol4;
// core/prompt/convert-to-language-model-prompt.ts
async function convertToLanguageModelPrompt({
prompt,
modelSupportsImageUrls = true,
downloadImplementation = download
}) {
const languageModelMessages = [];
if (prompt.system != null) {
languageModelMessages.push({ role: "system", content: prompt.system });
}
const downloadedImages = modelSupportsImageUrls || prompt.messages == null ? null : await downloadImages(prompt.messages, downloadImplementation);
const promptType = prompt.type;
switch (promptType) {
case "prompt": {
languageModelMessages.push({
role: "user",
content: [{ type: "text", text: prompt.prompt }]
});
break;
}
case "messages": {
languageModelMessages.push(
...prompt.messages.map(
(message) => convertToLanguageModelMessage(message, downloadedImages)
)
);
break;
}
default: {
const _exhaustiveCheck = promptType;
throw new Error(`Unsupported prompt type: ${_exhaustiveCheck}`);
}
}
return languageModelMessages;
}
function convertToLanguageModelMessage(message, downloadedImages) {
const role = message.role;
switch (role) {
case "system": {
return { role: "system", content: message.content };
}
case "user": {
if (typeof message.content === "string") {
return {
role: "user",
content: [{ type: "text", text: message.content }]
};
}
return {
role: "user",
content: message.content.map(
(part) => {
var _a9, _b, _c;
switch (part.type) {
case "text": {
return part;
}
case "image": {
if (part.image instanceof URL) {
if (downloadedImages == null) {
return {
type: "image",
image: part.image,
mimeType: part.mimeType
};
} else {
const downloadedImage = downloadedImages[part.image.toString()];
return {
type: "image",
image: downloadedImage.data,
mimeType: (_a9 = part.mimeType) != null ? _a9 : downloadedImage.mimeType
};
}
}
if (typeof part.image === "string") {
try {
const url = new URL(part.image);
switch (url.protocol) {
case "http:":
case "https:": {
if (downloadedImages == null) {
return {
type: "image",
image: url,
mimeType: part.mimeType
};
} else {
const downloadedImage = downloadedImages[part.image];
return {
type: "image",
image: downloadedImage.data,
mimeType: (_b = part.mimeType) != null ? _b : downloadedImage.mimeType
};
}
}
case "data:": {
try {
const [header, base64Content] = part.image.split(",");
const mimeType = header.split(";")[0].split(":")[1];
if (mimeType == null || base64Content == null) {
throw new Error("Invalid data URL format");
}
return {
type: "image",
image: convertDataContentToUint8Array(base64Content),
mimeType
};
} catch (error) {
throw new Error(
`Error processing data URL: ${getErrorMessage2(
message
)}`
);
}
}
default: {
throw new Error(
`Unsupported URL protocol: ${url.protocol}`
);
}
}
} catch (_ignored) {
}
}
const imageUint8 = convertDataContentToUint8Array(part.image);
return {
type: "image",
image: imageUint8,
mimeType: (_c = part.mimeType) != null ? _c : detectImageMimeType(imageUint8)
};
}
}
}
)
};
}
case "assistant": {
if (typeof message.content === "string") {
return {
role: "assistant",
content: [{ type: "text", text: message.content }]
};
}
return {
role: "assistant",
content: message.content.filter(
// remove empty text parts:
(part) => part.type !== "text" || part.text !== ""
)
};
}
case "tool": {
return message;
}
default: {
const _exhaustiveCheck = role;
throw new InvalidMessageRoleError({ role: _exhaustiveCheck });
}
}
}
async function downloadImages(messages, downloadImplementation) {
const urls = messages.filter((message) => message.role === "user").map((message) => message.content).filter(
(content) => Array.isArray(content)
).flat().filter((part) => part.type === "image").map((part) => part.image).map(
(part) => (
// support string urls in image parts:
typeof part === "string" && (part.startsWith("http:") || part.startsWith("https:")) ? new URL(part) : part
)
).filter((image) => image instanceof URL);
const downloadedImages = await Promise.all(
urls.map(async (url) => ({
url,
data: await downloadImplementation({ url })
}))
);
return Object.fromEntries(
downloadedImages.map(({ url, data }) => [url.toString(), data])
);
}
// core/prompt/get-validated-prompt.ts
import { InvalidPromptError } from "@ai-sdk/provider";
function getValidatedPrompt(prompt) {
if (prompt.prompt == null && prompt.messages == null) {
throw new InvalidPromptError({
prompt,
message: "prompt or messages must be defined"
});
}
if (prompt.prompt != null && prompt.messages != null) {
throw new InvalidPromptError({
prompt,
message: "prompt and messages cannot be defined at the same time"
});
}
if (prompt.messages != null) {
for (const message of prompt.messages) {
if (message.role === "system" && typeof message.content !== "string") {
throw new InvalidPromptError({
prompt,
message: "system message content must be a string"
});
}
}
}
return prompt.prompt != null ? {
type: "prompt",
prompt: prompt.prompt,
messages: void 0,
system: prompt.system
} : {
type: "messages",
prompt: void 0,
messages: prompt.messages,
// only possible case bc of checks above
system: prompt.system
};
}
// errors/invalid-argument-error.ts
import { AISDKError as AISDKError5 } from "@ai-sdk/provider";
var name5 = "AI_InvalidArgumentError";
var marker5 = `vercel.ai.error.${name5}`;
var symbol5 = Symbol.for(marker5);
var _a5;
var InvalidArgumentError = class extends AISDKError5 {
constructor({
parameter,
value,
message
}) {
super({
name: name5,
message: `Invalid argument for parameter ${parameter}: ${message}`
});
this[_a5] = true;
this.parameter = parameter;
this.value = value;
}
static isInstance(error) {
return AISDKError5.hasMarker(error, marker5);
}
/**
* @deprecated use `isInstance` instead
*/
static isInvalidArgumentError(error) {
return error instanceof Error && error.name === name5 && typeof error.parameter === "string" && typeof error.value === "string";
}
toJSON() {
return {
name: this.name,
message: this.message,
stack: this.stack,
parameter: this.parameter,
value: this.value
};
}
};
_a5 = symbol5;
// core/prompt/prepare-call-settings.ts
function prepareCallSettings({
maxTokens,
temperature,
topP,
presencePenalty,
frequencyPenalty,
stopSequences,
seed,
maxRetries
}) {
if (maxTokens != null) {
if (!Number.isInteger(maxTokens)) {
throw new InvalidArgumentError({
parameter: "maxTokens",
value: maxTokens,
message: "maxTokens must be an integer"
});
}
if (maxTokens < 1) {
throw new InvalidArgumentError({
parameter: "maxTokens",
value: maxTokens,
message: "maxTokens must be >= 1"
});
}
}
if (temperature != null) {
if (typeof temperature !== "number") {
throw new InvalidArgumentError({
parameter: "temperature",
value: temperature,
message: "temperature must be a number"
});
}
}
if (topP != null) {
if (typeof topP !== "number") {
throw new InvalidArgumentError({
parameter: "topP",
value: topP,
message: "topP must be a number"
});
}
}
if (presencePenalty != null) {
if (typeof presencePenalty !== "number") {
throw new InvalidArgumentError({
parameter: "presencePenalty",
value: presencePenalty,
message: "presencePenalty must be a number"
});
}
}
if (frequencyPenalty != null) {
if (typeof frequencyPenalty !== "number") {
throw new InvalidArgumentError({
parameter: "frequencyPenalty",
value: frequencyPenalty,
message: "frequencyPenalty must be a number"
});
}
}
if (seed != null) {
if (!Number.isInteger(seed)) {
throw new InvalidArgumentError({
parameter: "seed",
value: seed,
message: "seed must be an integer"
});
}
}
if (maxRetries != null) {
if (!Number.isInteger(maxRetries)) {
throw new InvalidArgumentError({
parameter: "maxRetries",
value: maxRetries,
message: "maxRetries must be an integer"
});
}
if (maxRetries < 0) {
throw new InvalidArgumentError({
parameter: "maxRetries",
value: maxRetries,
message: "maxRetries must be >= 0"
});
}
}
return {
maxTokens,
temperature: temperature != null ? temperature : 0,
topP,
presencePenalty,
frequencyPenalty,
stopSequences: stopSequences != null && stopSequences.length > 0 ? stopSequences : void 0,
seed,
maxRetries: maxRetries != null ? maxRetries : 2
};
}
// core/types/token-usage.ts
function calculateCompletionTokenUsage(usage) {
return {
promptTokens: usage.promptTokens,
completionTokens: usage.completionTokens,
totalTokens: usage.promptTokens + usage.completionTokens
};
}
// core/util/prepare-response-headers.ts
function prepareResponseHeaders(init, {
contentType,
dataStreamVersion
}) {
var _a9;
const headers = new Headers((_a9 = init == null ? void 0 : init.headers) != null ? _a9 : {});
if (!headers.has("Content-Type")) {
headers.set("Content-Type", contentType);
}
if (dataStreamVersion !== void 0) {
headers.set("X-Vercel-AI-Data-Stream", dataStreamVersion);
}
return headers;
}
// core/util/schema.ts
import { validatorSymbol } from "@ai-sdk/provider-utils";
import zodToJsonSchema from "zod-to-json-schema";
var schemaSymbol = Symbol.for("vercel.ai.schema");
function jsonSchema(jsonSchema2, {
validate
} = {}) {
return {
[schemaSymbol]: true,
_type: void 0,
// should never be used directly
[validatorSymbol]: true,
jsonSchema: jsonSchema2,
validate
};
}
function isSchema(value) {
return typeof value === "object" && value !== null && schemaSymbol in value && value[schemaSymbol] === true && "jsonSchema" in value && "validate" in value;
}
function asSchema(schema) {
return isSchema(schema) ? schema : zodSchema(schema);
}
function zodSchema(zodSchema2) {
return jsonSchema(
// we assume that zodToJsonSchema will return a valid JSONSchema7:
zodToJsonSchema(zodSchema2),
{
validate: (value) => {
const result = zodSchema2.safeParse(value);
return result.success ? { success: true, value: result.data } : { success: false, error: result.error };
}
}
);
}
// core/generate-object/inject-json-schema-into-system.ts
var DEFAULT_SCHEMA_PREFIX = "JSON schema:";
var DEFAULT_SCHEMA_SUFFIX = "You MUST answer with a JSON object that matches the JSON schema above.";
function injectJsonSchemaIntoSystem({
system,
schema,
schemaPrefix = DEFAULT_SCHEMA_PREFIX,
schemaSuffix = DEFAULT_SCHEMA_SUFFIX
}) {
return [
system,
system != null ? "" : null,
// add a newline if system is not null
schemaPrefix,
JSON.stringify(schema),
schemaSuffix
].filter((line) => line != null).join("\n");
}
// core/generate-object/no-object-generated-error.ts
import { AISDKError as AISDKError6 } from "@ai-sdk/provider";
var name6 = "AI_NoObjectGeneratedError";
var marker6 = `vercel.ai.error.${name6}`;
var symbol6 = Symbol.for(marker6);
var _a6;
var NoObjectGeneratedError = class extends AISDKError6 {
// used in isInstance
constructor({ message = "No object generated." } = {}) {
super({ name: name6, message });
this[_a6] = true;
}
static isInstance(error) {
return AISDKError6.hasMarker(error, marker6);
}
/**
* @deprecated Use isInstance instead.
*/
static isNoObjectGeneratedError(error) {
return error instanceof Error && error.name === name6;
}
/**
* @deprecated Do not use this method. It will be removed in the next major version.
*/
toJSON() {
return {
name: this.name,
cause: this.cause,
message: this.message,
stack: this.stack
};
}
};
_a6 = symbol6;
// core/generate-object/generate-object.ts
async function generateObject({
model,
schema: inputSchema,
schemaName,
schemaDescription,
mode,
system,
prompt,
messages,
maxRetries,
abortSignal,
headers,
experimental_telemetry: telemetry,
...settings
}) {
var _a9;
const baseTelemetryAttributes = getBaseTelemetryAttributes({
model,
telemetry,
headers,
settings: { ...settings, maxRetries }
});
const schema = asSchema(inputSchema);
const tracer = getTracer({ isEnabled: (_a9 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a9 : false });
return recordSpan({
name: "ai.generateObject",
attributes: selectTelemetryAttributes({
telemetry,
attributes: {
...assembleOperationName({
operationName: "ai.generateObject",
telemetry
}),
...baseTelemetryAttributes,
// specific settings that only make sense on the outer level:
"ai.prompt": {
input: () => JSON.stringify({ system, prompt, messages })
},
"ai.schema": {
input: () => JSON.stringify(schema.jsonSchema)
},
"ai.schema.name": schemaName,
"ai.schema.description": schemaDescription,
"ai.settings.mode": mode
}
}),
tracer,
fn: async (span) => {
const retry = retryWithExponentialBackoff({ maxRetries });
if (mode === "auto" || mode == null) {
mode = model.defaultObjectGenerationMode;
}
let result;
let finishReason;
let usage;
let warnings;
let rawResponse;
let logprobs;
switch (mode) {
case "json": {
const validatedPrompt = getValidatedPrompt({
system: model.supportsStructuredOutputs ? system : injectJsonSchemaIntoSystem({
system,
schema: schema.jsonSchema
}),
prompt,
messages
});
const promptMessages = await convertToLanguageModelPrompt({
prompt: validatedPrompt,
modelSupportsImageUrls: model.supportsImageUrls
});
const inputFormat = validatedPrompt.type;
const generateResult = await retry(
() => recordSpan({
name: "ai.generateObject.doGenerate",
attributes: selectTelemetryAttributes({
telemetry,
attributes: {
...assembleOperationName({
operationName: "ai.generateObject.doGenerate",
telemetry
}),
...baseTelemetryAttributes,
"ai.prompt.format": {
input: () => inputFormat
},
"ai.prompt.messages": {
input: () => JSON.stringify(promptMessages)
},
"ai.settings.mode": mode,
// standardized gen-ai llm span attributes:
"gen_ai.request.model": model.modelId,
"gen_ai.system": model.provider,
"gen_ai.request.max_tokens": settings.maxTokens,
"gen_ai.request.temperature": settings.temperature,
"gen_ai.request.top_p": settings.topP
}
}),
tracer,
fn: async (span2) => {
const result2 = await model.doGenerate({
mode: {
type: "object-json",
schema: schema.jsonSchema,
name: schemaName,
description: schemaDescription
},
...prepareCallSettings(settings),
inputFormat,
prompt: promptMessages,
abortSignal,
headers
});
if (result2.text === void 0) {
throw new NoObjectGeneratedError();
}
span2.setAttributes(
selectTelemetryAttributes({
telemetry,
attributes: {
"ai.finishReason": result2.finishReason,
"ai.usage.promptTokens": result2.usage.promptTokens,
"ai.usage.completionTokens": result2.usage.completionTokens,
"ai.result.object": { output: () => result2.text },
// standardized gen-ai llm span attributes:
"gen_ai.response.finish_reasons": [result2.finishReason],
"gen_ai.usage.prompt_tokens": result2.usage.promptTokens,
"gen_ai.usage.completion_tokens": result2.usage.completionTokens
}
})
);
return { ...result2, objectText: result2.text };
}
})
);
result = generateResult.objectText;
finishReason = generateResult.finishReason;
usage = generateResult.usage;
warnings = generateResult.warnings;
rawResponse = generateResult.rawResponse;
logprobs = generateResult.logprobs;
break;
}
case "tool": {
const validatedPrompt = getValidatedPrompt({
system,
prompt,
messages
});
const promptMessages = await convertToLanguageModelPrompt({
prompt: validatedPrompt,
modelSupportsImageUrls: model.supportsImageUrls
});
const inputFormat = validatedPrompt.type;
const generateResult = await retry(
() => recordSpan({
name: "ai.generateObject.doGenerate",
attributes: selectTelemetryAttributes({
telemetry,
attributes: {
...assembleOperationName({
operationName: "ai.generateObject.doGenerate",
telemetry
}),
...baseTelemetryAttributes,
"ai.prompt.format": {
input: () => inputFormat
},
"ai.prompt.messages": {
input: () => JSON.stringify(promptMessages)
},
"ai.settings.mode": mode,
// standardized gen-ai llm span attributes:
"gen_ai.request.model": model.modelId,
"gen_ai.system": model.provider,
"gen_ai.request.max_tokens": settings.maxTokens,
"gen_ai.request.temperature": settings.temperature,
"gen_ai.request.top_p": settings.topP
}
}),
tracer,
fn: async (span2) => {
var _a10, _b;
const result2 = await model.doGenerate({
mode: {
type: "object-tool",
tool: {
type: "function",
name: schemaName != null ? schemaName : "json",
description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
parameters: schema.jsonSchema
}
},
...prepareCallSettings(settings),
inputFormat,
prompt: promptMessages,
abortSignal,
headers
});
const objectText = (_b = (_a10 = result2.toolCalls) == null ? void 0 : _a10[0]) == null ? void 0 : _b.args;
if (objectText === void 0) {
throw new NoObjectGeneratedError();
}
span2.setAttributes(
selectTelemetryAttributes({
telemetry,
attributes: {
"ai.finishReason": result2.finishReason,
"ai.usage.promptTokens": result2.usage.promptTokens,
"ai.usage.completionTokens": result2.usage.completionTokens,
"ai.result.object": { output: () => objectText },
// standardized gen-ai llm span attributes:
"gen_ai.response.finish_reasons": [result2.finishReason],
"gen_ai.usage.prompt_tokens": result2.usage.promptTokens,
"gen_ai.usage.completion_tokens": result2.usage.completionTokens
}
})
);
return { ...result2, objectText };
}
})
);
result = generateResult.objectText;
finishReason = generateResult.finishReason;
usage = generateResult.usage;
warnings = generateResult.warnings;
rawResponse = generateResult.rawResponse;
logprobs = generateResult.logprobs;
break;
}
case void 0: {
throw new Error(
"Model does not have a default object generation mode."
);
}
default: {
const _exhaustiveCheck = mode;
throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
}
}
const parseResult = safeParseJSON({ text: result, schema });
if (!parseResult.success) {
throw parseResult.error;
}
span.setAttributes(
selectTelemetryAttributes({
telemetry,
attributes: {
"ai.finishReason": finishReason,
"ai.usage.promptTokens": usage.promptTokens,
"ai.usage.completionTokens": usage.completionTokens,
"ai.result.object": {
output: () => JSON.stringify(parseResult.value)
}
}
})
);
return new DefaultGenerateObjectResult({
object: parseResult.value,
finishReason,
usage: calculateCompletionTokenUsage(usage),
warnings,
rawResponse,
logprobs
});
}
});
}
var DefaultGenerateObjectResult = class {
constructor(options) {
this.object = options.object;
this.finishReason = options.finishReason;
this.usage = options.usage;
this.warnings = options.warnings;
this.rawResponse = options.rawResponse;
this.logprobs = options.logprobs;
}
toJsonResponse(init) {
var _a9;
return new Response(JSON.stringify(this.object), {
status: (_a9 = init == null ? void 0 : init.status) != null ? _a9 : 200,
headers: prepareResponseHeaders(init, {
contentType: "application/json; charset=utf-8"
})
});
}
};
var experimental_generateObject = generateObject;
// core/generate-object/stream-object.ts
import { safeValidateTypes } from "@ai-sdk/provider-utils";
import {
isDeepEqualData,
parsePartialJson
} from "@ai-sdk/ui-utils";
// util/create-resolvable-promise.ts
function createResolvablePromise() {
let resolve;
let reject;
const promise = new Promise((res, rej) => {
resolve = res;
reject = rej;
});
return {
promise,
resolve,
reject
};
}
// util/delayed-promise.ts
var DelayedPromise = class {
constructor() {
this.status = { type: "pending" };
this._resolve = void 0;
this._reject = void 0;
}
get value() {
if (this.promise) {
return this.promise;
}
this.promise = new Promise((resolve, reject) => {
if (this.status.type === "resolved") {
resolve(this.status.value);
} else if (this.status.type === "rejected") {
reject(this.status.error);
}
this._resolve = resolve;
this._reject = reject;
});
return this.promise;
}
resolve(value) {
var _a9;
this.status = { type: "resolved", value };
if (this.promise) {
(_a9 = this._resolve) == null ? void 0 : _a9.call(this, value);
}
}
reject(error) {
var _a9;
this.status = { type: "rejected", error };
if (this.promise) {
(_a9 = this._reject) == null ? void 0 : _a9.call(this, error);
}
}
};
// core/util/async-iterable-stream.ts
function createAsyncIterableStream(source, transformer) {
const transformedStream = source.pipeThrough(
new TransformStream(transformer)
);
transformedStream[Symbol.asyncIterator] = () => {
const reader = transformedStream.getReader();
return {
async next() {
const { done, value } = await reader.read();
return done ? { done: true, value: void 0 } : { done: false, value };
}
};
};
return transformedStream;
}
// core/generate-object/stream-object.ts
async function streamObject({
model,
schema: inputSchema,
schemaName,
schemaDescription,
mode,
system,
prompt,
messages,
maxRetries,
abortSignal,
headers,
experimental_telemetry: telemetry,
onFinish,
...settings
}) {
var _a9;
const baseTelemetryAttributes = getBaseTelemetryAttributes({
model,
telemetry,
headers,
settings: { ...settings, maxRetries }
});
const tracer = getTracer({ isEnabled: (_a9 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a9 : false });
const retry = retry