@mastra/core
Version:
Mastra is a framework for building AI-powered applications and agents with a modern TypeScript stack.
778 lines (775 loc) • 22.5 kB
JavaScript
'use strict';
var chunkIAJHRFO4_cjs = require('./chunk-IAJHRFO4.cjs');
var chunk5NTO7S5I_cjs = require('./chunk-5NTO7S5I.cjs');
var chunk6VOPKVYH_cjs = require('./chunk-6VOPKVYH.cjs');
var schemaCompat = require('@mastra/schema-compat');
var zodToJson = require('@mastra/schema-compat/zod-to-json');
var ai = require('ai');
var zod = require('zod');
var MastraLLMV1 = class extends chunk6VOPKVYH_cjs.MastraBase {
#model;
#mastra;
#options;
constructor({ model, mastra, options }) {
super({ name: "aisdk" });
this.#model = model;
this.#options = options;
if (mastra) {
this.#mastra = mastra;
if (mastra.getLogger()) {
this.__setLogger(this.#mastra.getLogger());
}
}
}
__registerPrimitives(p) {
if (p.telemetry) {
this.__setTelemetry(p.telemetry);
}
if (p.logger) {
this.__setLogger(p.logger);
}
}
__registerMastra(p) {
this.#mastra = p;
}
getProvider() {
return this.#model.provider;
}
getModelId() {
return this.#model.modelId;
}
getModel() {
return this.#model;
}
_applySchemaCompat(schema) {
const model = this.#model;
const schemaCompatLayers = [];
if (model) {
const modelInfo = {
modelId: model.modelId,
supportsStructuredOutputs: model.supportsStructuredOutputs ?? false,
provider: model.provider
};
schemaCompatLayers.push(
new schemaCompat.OpenAIReasoningSchemaCompatLayer(modelInfo),
new schemaCompat.OpenAISchemaCompatLayer(modelInfo),
new schemaCompat.GoogleSchemaCompatLayer(modelInfo),
new schemaCompat.AnthropicSchemaCompatLayer(modelInfo),
new schemaCompat.DeepSeekSchemaCompatLayer(modelInfo),
new schemaCompat.MetaSchemaCompatLayer(modelInfo)
);
}
return schemaCompat.applyCompatLayer({
schema,
compatLayers: schemaCompatLayers,
mode: "aiSdkSchema"
});
}
async __text({
runId,
messages,
maxSteps = 5,
tools = {},
temperature,
toolChoice = "auto",
onStepFinish,
experimental_output,
telemetry,
threadId,
resourceId,
runtimeContext,
tracingContext,
...rest
}) {
const model = this.#model;
this.logger.debug(`[LLM] - Generating text`, {
runId,
messages,
maxSteps,
threadId,
resourceId,
tools: Object.keys(tools)
});
let schema = void 0;
if (experimental_output) {
this.logger.debug("[LLM] - Using experimental output", {
runId
});
if (chunkIAJHRFO4_cjs.isZodType(experimental_output)) {
schema = experimental_output;
if (schema instanceof zod.z.ZodArray) {
schema = schema._def.type;
}
let jsonSchemaToUse;
jsonSchemaToUse = zodToJson.zodToJsonSchema(schema, "jsonSchema7");
schema = ai.jsonSchema(jsonSchemaToUse);
} else {
schema = ai.jsonSchema(experimental_output);
}
}
const llmSpan = tracingContext.currentSpan?.createChildSpan({
name: `llm: '${model.modelId}'`,
type: "model_generation" /* MODEL_GENERATION */,
input: {
messages,
schema
},
attributes: {
model: model.modelId,
provider: model.provider,
parameters: {
temperature,
maxOutputTokens: rest.maxTokens,
topP: rest.topP,
frequencyPenalty: rest.frequencyPenalty,
presencePenalty: rest.presencePenalty
},
streaming: false
},
metadata: {
runId,
threadId,
resourceId
},
tracingPolicy: this.#options?.tracingPolicy
});
const argsForExecute = {
...rest,
messages,
model,
temperature,
tools: {
...tools
},
toolChoice,
maxSteps,
onStepFinish: async (props) => {
try {
await onStepFinish?.({ ...props, runId });
} catch (e) {
const mastraError = new chunk5NTO7S5I_cjs.MastraError(
{
id: "LLM_TEXT_ON_STEP_FINISH_CALLBACK_EXECUTION_FAILED",
domain: "LLM" /* LLM */,
category: "USER" /* USER */,
details: {
modelId: model.modelId,
modelProvider: model.provider,
runId: runId ?? "unknown",
threadId: threadId ?? "unknown",
resourceId: resourceId ?? "unknown",
finishReason: props?.finishReason,
toolCalls: props?.toolCalls ? JSON.stringify(props.toolCalls) : "",
toolResults: props?.toolResults ? JSON.stringify(props.toolResults) : "",
usage: props?.usage ? JSON.stringify(props.usage) : ""
}
},
e
);
throw mastraError;
}
this.logger.debug("[LLM] - Text Step Change:", {
text: props?.text,
toolCalls: props?.toolCalls,
toolResults: props?.toolResults,
finishReason: props?.finishReason,
usage: props?.usage,
runId
});
const remainingTokens = parseInt(props?.response?.headers?.["x-ratelimit-remaining-tokens"] ?? "", 10);
if (!isNaN(remainingTokens) && remainingTokens > 0 && remainingTokens < 2e3) {
this.logger.warn("Rate limit approaching, waiting 10 seconds", { runId });
await chunkIAJHRFO4_cjs.delay(10 * 1e3);
}
},
experimental_telemetry: {
...this.experimental_telemetry,
...telemetry
},
experimental_output: schema ? ai.Output.object({
schema
}) : void 0
};
try {
const result = await ai.generateText(argsForExecute);
if (schema && result.finishReason === "stop") {
result.object = result.experimental_output;
}
llmSpan?.end({
output: {
text: result.text,
object: result.object,
reasoning: result.reasoningDetails,
reasoningText: result.reasoning,
files: result.files,
sources: result.sources,
warnings: result.warnings
},
attributes: {
finishReason: result.finishReason,
usage: result.usage
}
});
return result;
} catch (e) {
const mastraError = new chunk5NTO7S5I_cjs.MastraError(
{
id: "LLM_GENERATE_TEXT_AI_SDK_EXECUTION_FAILED",
domain: "LLM" /* LLM */,
category: "THIRD_PARTY" /* THIRD_PARTY */,
details: {
modelId: model.modelId,
modelProvider: model.provider,
runId: runId ?? "unknown",
threadId: threadId ?? "unknown",
resourceId: resourceId ?? "unknown"
}
},
e
);
llmSpan?.error({ error: mastraError });
throw mastraError;
}
}
async __textObject({
messages,
structuredOutput,
runId,
telemetry,
threadId,
resourceId,
runtimeContext,
tracingContext,
...rest
}) {
const model = this.#model;
this.logger.debug(`[LLM] - Generating a text object`, { runId });
const llmSpan = tracingContext.currentSpan?.createChildSpan({
name: `llm: '${model.modelId}'`,
type: "model_generation" /* MODEL_GENERATION */,
input: {
messages
},
attributes: {
model: model.modelId,
provider: model.provider,
parameters: {
temperature: rest.temperature,
maxOutputTokens: rest.maxTokens,
topP: rest.topP,
frequencyPenalty: rest.frequencyPenalty,
presencePenalty: rest.presencePenalty
},
streaming: false
},
metadata: {
runId,
threadId,
resourceId
},
tracingPolicy: this.#options?.tracingPolicy
});
try {
let output = "object";
if (structuredOutput instanceof zod.z.ZodArray) {
output = "array";
structuredOutput = structuredOutput._def.type;
}
const processedSchema = this._applySchemaCompat(structuredOutput);
llmSpan?.update({
input: {
messages,
schema: processedSchema
}
});
const argsForExecute = {
...rest,
messages,
model,
// @ts-expect-error - output in our implementation can only be object or array
output,
schema: processedSchema,
experimental_telemetry: {
...this.experimental_telemetry,
...telemetry
}
};
try {
const result = await ai.generateObject(argsForExecute);
llmSpan?.end({
output: {
object: result.object,
warnings: result.warnings
},
attributes: {
finishReason: result.finishReason,
usage: result.usage
}
});
return result;
} catch (e) {
const mastraError = new chunk5NTO7S5I_cjs.MastraError(
{
id: "LLM_GENERATE_OBJECT_AI_SDK_EXECUTION_FAILED",
domain: "LLM" /* LLM */,
category: "THIRD_PARTY" /* THIRD_PARTY */,
details: {
modelId: model.modelId,
modelProvider: model.provider,
runId: runId ?? "unknown",
threadId: threadId ?? "unknown",
resourceId: resourceId ?? "unknown"
}
},
e
);
llmSpan?.error({ error: mastraError });
throw mastraError;
}
} catch (e) {
if (e instanceof chunk5NTO7S5I_cjs.MastraError) {
throw e;
}
const mastraError = new chunk5NTO7S5I_cjs.MastraError(
{
id: "LLM_GENERATE_OBJECT_AI_SDK_SCHEMA_CONVERSION_FAILED",
domain: "LLM" /* LLM */,
category: "USER" /* USER */,
details: {
modelId: model.modelId,
modelProvider: model.provider,
runId: runId ?? "unknown",
threadId: threadId ?? "unknown",
resourceId: resourceId ?? "unknown"
}
},
e
);
llmSpan?.error({ error: mastraError });
throw mastraError;
}
}
__stream({
messages,
onStepFinish,
onFinish,
maxSteps = 5,
tools = {},
runId,
temperature,
toolChoice = "auto",
experimental_output,
telemetry,
threadId,
resourceId,
runtimeContext,
tracingContext,
...rest
}) {
const model = this.#model;
this.logger.debug(`[LLM] - Streaming text`, {
runId,
threadId,
resourceId,
messages,
maxSteps,
tools: Object.keys(tools || {})
});
let schema;
if (experimental_output) {
this.logger.debug("[LLM] - Using experimental output", {
runId
});
if (typeof experimental_output.parse === "function") {
schema = experimental_output;
if (schema instanceof zod.z.ZodArray) {
schema = schema._def.type;
}
} else {
schema = ai.jsonSchema(experimental_output);
}
}
const llmSpan = tracingContext.currentSpan?.createChildSpan({
name: `llm: '${model.modelId}'`,
type: "model_generation" /* MODEL_GENERATION */,
input: {
messages
},
attributes: {
model: model.modelId,
provider: model.provider,
parameters: {
temperature,
maxOutputTokens: rest.maxTokens,
topP: rest.topP,
frequencyPenalty: rest.frequencyPenalty,
presencePenalty: rest.presencePenalty
},
streaming: true
},
metadata: {
runId,
threadId,
resourceId
},
tracingPolicy: this.#options?.tracingPolicy
});
const argsForExecute = {
model,
temperature,
tools: {
...tools
},
maxSteps,
toolChoice,
onStepFinish: async (props) => {
try {
await onStepFinish?.({ ...props, runId });
} catch (e) {
const mastraError = new chunk5NTO7S5I_cjs.MastraError(
{
id: "LLM_STREAM_ON_STEP_FINISH_CALLBACK_EXECUTION_FAILED",
domain: "LLM" /* LLM */,
category: "USER" /* USER */,
details: {
modelId: model.modelId,
modelProvider: model.provider,
runId: runId ?? "unknown",
threadId: threadId ?? "unknown",
resourceId: resourceId ?? "unknown",
finishReason: props?.finishReason,
toolCalls: props?.toolCalls ? JSON.stringify(props.toolCalls) : "",
toolResults: props?.toolResults ? JSON.stringify(props.toolResults) : "",
usage: props?.usage ? JSON.stringify(props.usage) : ""
}
},
e
);
this.logger.trackException(mastraError);
llmSpan?.error({ error: mastraError });
throw mastraError;
}
this.logger.debug("[LLM] - Stream Step Change:", {
text: props?.text,
toolCalls: props?.toolCalls,
toolResults: props?.toolResults,
finishReason: props?.finishReason,
usage: props?.usage,
runId
});
const remainingTokens = parseInt(props?.response?.headers?.["x-ratelimit-remaining-tokens"] ?? "", 10);
if (!isNaN(remainingTokens) && remainingTokens > 0 && remainingTokens < 2e3) {
this.logger.warn("Rate limit approaching, waiting 10 seconds", { runId });
await chunkIAJHRFO4_cjs.delay(10 * 1e3);
}
},
onFinish: async (props) => {
llmSpan?.end({
output: {
text: props?.text,
reasoning: props?.reasoningDetails,
reasoningText: props?.reasoning,
files: props?.files,
sources: props?.sources,
warnings: props?.warnings
},
attributes: {
finishReason: props?.finishReason,
usage: props?.usage
}
});
try {
await onFinish?.({ ...props, runId });
} catch (e) {
const mastraError = new chunk5NTO7S5I_cjs.MastraError(
{
id: "LLM_STREAM_ON_FINISH_CALLBACK_EXECUTION_FAILED",
domain: "LLM" /* LLM */,
category: "USER" /* USER */,
details: {
modelId: model.modelId,
modelProvider: model.provider,
runId: runId ?? "unknown",
threadId: threadId ?? "unknown",
resourceId: resourceId ?? "unknown",
finishReason: props?.finishReason,
toolCalls: props?.toolCalls ? JSON.stringify(props.toolCalls) : "",
toolResults: props?.toolResults ? JSON.stringify(props.toolResults) : "",
usage: props?.usage ? JSON.stringify(props.usage) : ""
}
},
e
);
llmSpan?.error({ error: mastraError });
this.logger.trackException(mastraError);
throw mastraError;
}
this.logger.debug("[LLM] - Stream Finished:", {
text: props?.text,
toolCalls: props?.toolCalls,
toolResults: props?.toolResults,
finishReason: props?.finishReason,
usage: props?.usage,
runId,
threadId,
resourceId
});
},
...rest,
messages,
experimental_telemetry: {
...this.experimental_telemetry,
...telemetry
},
experimental_output: schema ? ai.Output.object({
schema
}) : void 0
};
try {
return ai.streamText(argsForExecute);
} catch (e) {
const mastraError = new chunk5NTO7S5I_cjs.MastraError(
{
id: "LLM_STREAM_TEXT_AI_SDK_EXECUTION_FAILED",
domain: "LLM" /* LLM */,
category: "THIRD_PARTY" /* THIRD_PARTY */,
details: {
modelId: model.modelId,
modelProvider: model.provider,
runId: runId ?? "unknown",
threadId: threadId ?? "unknown",
resourceId: resourceId ?? "unknown"
}
},
e
);
llmSpan?.error({ error: mastraError });
throw mastraError;
}
}
__streamObject({
messages,
runId,
runtimeContext,
threadId,
resourceId,
onFinish,
structuredOutput,
telemetry,
tracingContext,
...rest
}) {
const model = this.#model;
this.logger.debug(`[LLM] - Streaming structured output`, {
runId,
messages
});
const llmSpan = tracingContext.currentSpan?.createChildSpan({
name: `llm: '${model.modelId}'`,
type: "model_generation" /* MODEL_GENERATION */,
input: {
messages
},
attributes: {
model: model.modelId,
provider: model.provider,
parameters: {
temperature: rest.temperature,
maxOutputTokens: rest.maxTokens,
topP: rest.topP,
frequencyPenalty: rest.frequencyPenalty,
presencePenalty: rest.presencePenalty
},
streaming: true
},
metadata: {
runId,
threadId,
resourceId
},
tracingPolicy: this.#options?.tracingPolicy
});
try {
let output = "object";
if (structuredOutput instanceof zod.z.ZodArray) {
output = "array";
structuredOutput = structuredOutput._def.type;
}
const processedSchema = this._applySchemaCompat(structuredOutput);
llmSpan?.update({
input: {
messages,
schema: processedSchema
}
});
const argsForExecute = {
...rest,
model,
onFinish: async (props) => {
llmSpan?.end({
output: {
text: props?.text,
object: props?.object,
reasoning: props?.reasoningDetails,
reasoningText: props?.reasoning,
files: props?.files,
sources: props?.sources,
warnings: props?.warnings
},
attributes: {
finishReason: props?.finishReason,
usage: props?.usage
}
});
try {
await onFinish?.({ ...props, runId });
} catch (e) {
const mastraError = new chunk5NTO7S5I_cjs.MastraError(
{
id: "LLM_STREAM_OBJECT_ON_FINISH_CALLBACK_EXECUTION_FAILED",
domain: "LLM" /* LLM */,
category: "USER" /* USER */,
details: {
modelId: model.modelId,
modelProvider: model.provider,
runId: runId ?? "unknown",
threadId: threadId ?? "unknown",
resourceId: resourceId ?? "unknown",
toolCalls: "",
toolResults: "",
finishReason: "",
usage: props?.usage ? JSON.stringify(props.usage) : ""
}
},
e
);
this.logger.trackException(mastraError);
llmSpan?.error({ error: mastraError });
throw mastraError;
}
this.logger.debug("[LLM] - Object Stream Finished:", {
usage: props?.usage,
runId,
threadId,
resourceId
});
},
messages,
// @ts-expect-error - output in our implementation can only be object or array
output,
experimental_telemetry: {
...this.experimental_telemetry,
...telemetry
},
schema: processedSchema
};
try {
return ai.streamObject(argsForExecute);
} catch (e) {
const mastraError = new chunk5NTO7S5I_cjs.MastraError(
{
id: "LLM_STREAM_OBJECT_AI_SDK_EXECUTION_FAILED",
domain: "LLM" /* LLM */,
category: "THIRD_PARTY" /* THIRD_PARTY */,
details: {
modelId: model.modelId,
modelProvider: model.provider,
runId: runId ?? "unknown",
threadId: threadId ?? "unknown",
resourceId: resourceId ?? "unknown"
}
},
e
);
llmSpan?.error({ error: mastraError });
throw mastraError;
}
} catch (e) {
if (e instanceof chunk5NTO7S5I_cjs.MastraError) {
llmSpan?.error({ error: e });
throw e;
}
const mastraError = new chunk5NTO7S5I_cjs.MastraError(
{
id: "LLM_STREAM_OBJECT_AI_SDK_SCHEMA_CONVERSION_FAILED",
domain: "LLM" /* LLM */,
category: "USER" /* USER */,
details: {
modelId: model.modelId,
modelProvider: model.provider,
runId: runId ?? "unknown",
threadId: threadId ?? "unknown",
resourceId: resourceId ?? "unknown"
}
},
e
);
llmSpan?.error({ error: mastraError });
throw mastraError;
}
}
convertToMessages(messages) {
if (Array.isArray(messages)) {
return messages.map((m) => {
if (typeof m === "string") {
return {
role: "user",
content: m
};
}
return m;
});
}
return [
{
role: "user",
content: messages
}
];
}
async generate(messages, {
output,
...rest
}) {
const msgs = this.convertToMessages(messages);
if (!output) {
const { maxSteps, onStepFinish, ...textOptions } = rest;
return await this.__text({
messages: msgs,
maxSteps,
onStepFinish,
...textOptions
});
}
return await this.__textObject({
messages: msgs,
structuredOutput: output,
...rest
});
}
stream(messages, {
maxSteps = 5,
output,
onFinish,
...rest
}) {
const msgs = this.convertToMessages(messages);
if (!output) {
return this.__stream({
messages: msgs,
maxSteps,
onFinish,
...rest
});
}
return this.__streamObject({
messages: msgs,
structuredOutput: output,
onFinish,
...rest
});
}
};
exports.MastraLLMV1 = MastraLLMV1;
//# sourceMappingURL=chunk-CYEQK4PM.cjs.map
//# sourceMappingURL=chunk-CYEQK4PM.cjs.map