UNPKG

@mastra/core

Version:

Mastra is a framework for building AI-powered applications and agents with a modern TypeScript stack.

776 lines (774 loc) • 22.3 kB
import { isZodType, delay } from './chunk-WIMFJ2BA.js'; import { MastraError } from './chunk-PZUZNPFM.js'; import { MastraBase } from './chunk-VQASQG5D.js'; import { OpenAIReasoningSchemaCompatLayer, OpenAISchemaCompatLayer, GoogleSchemaCompatLayer, AnthropicSchemaCompatLayer, DeepSeekSchemaCompatLayer, MetaSchemaCompatLayer, applyCompatLayer } from '@mastra/schema-compat'; import { zodToJsonSchema } from '@mastra/schema-compat/zod-to-json'; import { jsonSchema, Output, generateText, generateObject, streamText, streamObject } from 'ai'; import { z } from 'zod'; var MastraLLMV1 = class extends MastraBase { #model; #mastra; #options; constructor({ model, mastra, options }) { super({ name: "aisdk" }); this.#model = model; this.#options = options; if (mastra) { this.#mastra = mastra; if (mastra.getLogger()) { this.__setLogger(this.#mastra.getLogger()); } } } __registerPrimitives(p) { if (p.telemetry) { this.__setTelemetry(p.telemetry); } if (p.logger) { this.__setLogger(p.logger); } } __registerMastra(p) { this.#mastra = p; } getProvider() { return this.#model.provider; } getModelId() { return this.#model.modelId; } getModel() { return this.#model; } _applySchemaCompat(schema) { const model = this.#model; const schemaCompatLayers = []; if (model) { const modelInfo = { modelId: model.modelId, supportsStructuredOutputs: model.supportsStructuredOutputs ?? false, provider: model.provider }; schemaCompatLayers.push( new OpenAIReasoningSchemaCompatLayer(modelInfo), new OpenAISchemaCompatLayer(modelInfo), new GoogleSchemaCompatLayer(modelInfo), new AnthropicSchemaCompatLayer(modelInfo), new DeepSeekSchemaCompatLayer(modelInfo), new MetaSchemaCompatLayer(modelInfo) ); } return applyCompatLayer({ schema, compatLayers: schemaCompatLayers, mode: "aiSdkSchema" }); } async __text({ runId, messages, maxSteps = 5, tools = {}, temperature, toolChoice = "auto", onStepFinish, experimental_output, telemetry, threadId, resourceId, runtimeContext, tracingContext, ...rest }) { const model = this.#model; this.logger.debug(`[LLM] - Generating text`, { runId, messages, maxSteps, threadId, resourceId, tools: Object.keys(tools) }); let schema = void 0; if (experimental_output) { this.logger.debug("[LLM] - Using experimental output", { runId }); if (isZodType(experimental_output)) { schema = experimental_output; if (schema instanceof z.ZodArray) { schema = schema._def.type; } let jsonSchemaToUse; jsonSchemaToUse = zodToJsonSchema(schema, "jsonSchema7"); schema = jsonSchema(jsonSchemaToUse); } else { schema = jsonSchema(experimental_output); } } const llmSpan = tracingContext.currentSpan?.createChildSpan({ name: `llm: '${model.modelId}'`, type: "model_generation" /* MODEL_GENERATION */, input: { messages, schema }, attributes: { model: model.modelId, provider: model.provider, parameters: { temperature, maxOutputTokens: rest.maxTokens, topP: rest.topP, frequencyPenalty: rest.frequencyPenalty, presencePenalty: rest.presencePenalty }, streaming: false }, metadata: { runId, threadId, resourceId }, tracingPolicy: this.#options?.tracingPolicy }); const argsForExecute = { ...rest, messages, model, temperature, tools: { ...tools }, toolChoice, maxSteps, onStepFinish: async (props) => { try { await onStepFinish?.({ ...props, runId }); } catch (e) { const mastraError = new MastraError( { id: "LLM_TEXT_ON_STEP_FINISH_CALLBACK_EXECUTION_FAILED", domain: "LLM" /* LLM */, category: "USER" /* USER */, details: { modelId: model.modelId, modelProvider: model.provider, runId: runId ?? "unknown", threadId: threadId ?? "unknown", resourceId: resourceId ?? "unknown", finishReason: props?.finishReason, toolCalls: props?.toolCalls ? JSON.stringify(props.toolCalls) : "", toolResults: props?.toolResults ? JSON.stringify(props.toolResults) : "", usage: props?.usage ? JSON.stringify(props.usage) : "" } }, e ); throw mastraError; } this.logger.debug("[LLM] - Text Step Change:", { text: props?.text, toolCalls: props?.toolCalls, toolResults: props?.toolResults, finishReason: props?.finishReason, usage: props?.usage, runId }); const remainingTokens = parseInt(props?.response?.headers?.["x-ratelimit-remaining-tokens"] ?? "", 10); if (!isNaN(remainingTokens) && remainingTokens > 0 && remainingTokens < 2e3) { this.logger.warn("Rate limit approaching, waiting 10 seconds", { runId }); await delay(10 * 1e3); } }, experimental_telemetry: { ...this.experimental_telemetry, ...telemetry }, experimental_output: schema ? Output.object({ schema }) : void 0 }; try { const result = await generateText(argsForExecute); if (schema && result.finishReason === "stop") { result.object = result.experimental_output; } llmSpan?.end({ output: { text: result.text, object: result.object, reasoning: result.reasoningDetails, reasoningText: result.reasoning, files: result.files, sources: result.sources, warnings: result.warnings }, attributes: { finishReason: result.finishReason, usage: result.usage } }); return result; } catch (e) { const mastraError = new MastraError( { id: "LLM_GENERATE_TEXT_AI_SDK_EXECUTION_FAILED", domain: "LLM" /* LLM */, category: "THIRD_PARTY" /* THIRD_PARTY */, details: { modelId: model.modelId, modelProvider: model.provider, runId: runId ?? "unknown", threadId: threadId ?? "unknown", resourceId: resourceId ?? "unknown" } }, e ); llmSpan?.error({ error: mastraError }); throw mastraError; } } async __textObject({ messages, structuredOutput, runId, telemetry, threadId, resourceId, runtimeContext, tracingContext, ...rest }) { const model = this.#model; this.logger.debug(`[LLM] - Generating a text object`, { runId }); const llmSpan = tracingContext.currentSpan?.createChildSpan({ name: `llm: '${model.modelId}'`, type: "model_generation" /* MODEL_GENERATION */, input: { messages }, attributes: { model: model.modelId, provider: model.provider, parameters: { temperature: rest.temperature, maxOutputTokens: rest.maxTokens, topP: rest.topP, frequencyPenalty: rest.frequencyPenalty, presencePenalty: rest.presencePenalty }, streaming: false }, metadata: { runId, threadId, resourceId }, tracingPolicy: this.#options?.tracingPolicy }); try { let output = "object"; if (structuredOutput instanceof z.ZodArray) { output = "array"; structuredOutput = structuredOutput._def.type; } const processedSchema = this._applySchemaCompat(structuredOutput); llmSpan?.update({ input: { messages, schema: processedSchema } }); const argsForExecute = { ...rest, messages, model, // @ts-expect-error - output in our implementation can only be object or array output, schema: processedSchema, experimental_telemetry: { ...this.experimental_telemetry, ...telemetry } }; try { const result = await generateObject(argsForExecute); llmSpan?.end({ output: { object: result.object, warnings: result.warnings }, attributes: { finishReason: result.finishReason, usage: result.usage } }); return result; } catch (e) { const mastraError = new MastraError( { id: "LLM_GENERATE_OBJECT_AI_SDK_EXECUTION_FAILED", domain: "LLM" /* LLM */, category: "THIRD_PARTY" /* THIRD_PARTY */, details: { modelId: model.modelId, modelProvider: model.provider, runId: runId ?? "unknown", threadId: threadId ?? "unknown", resourceId: resourceId ?? "unknown" } }, e ); llmSpan?.error({ error: mastraError }); throw mastraError; } } catch (e) { if (e instanceof MastraError) { throw e; } const mastraError = new MastraError( { id: "LLM_GENERATE_OBJECT_AI_SDK_SCHEMA_CONVERSION_FAILED", domain: "LLM" /* LLM */, category: "USER" /* USER */, details: { modelId: model.modelId, modelProvider: model.provider, runId: runId ?? "unknown", threadId: threadId ?? "unknown", resourceId: resourceId ?? "unknown" } }, e ); llmSpan?.error({ error: mastraError }); throw mastraError; } } __stream({ messages, onStepFinish, onFinish, maxSteps = 5, tools = {}, runId, temperature, toolChoice = "auto", experimental_output, telemetry, threadId, resourceId, runtimeContext, tracingContext, ...rest }) { const model = this.#model; this.logger.debug(`[LLM] - Streaming text`, { runId, threadId, resourceId, messages, maxSteps, tools: Object.keys(tools || {}) }); let schema; if (experimental_output) { this.logger.debug("[LLM] - Using experimental output", { runId }); if (typeof experimental_output.parse === "function") { schema = experimental_output; if (schema instanceof z.ZodArray) { schema = schema._def.type; } } else { schema = jsonSchema(experimental_output); } } const llmSpan = tracingContext.currentSpan?.createChildSpan({ name: `llm: '${model.modelId}'`, type: "model_generation" /* MODEL_GENERATION */, input: { messages }, attributes: { model: model.modelId, provider: model.provider, parameters: { temperature, maxOutputTokens: rest.maxTokens, topP: rest.topP, frequencyPenalty: rest.frequencyPenalty, presencePenalty: rest.presencePenalty }, streaming: true }, metadata: { runId, threadId, resourceId }, tracingPolicy: this.#options?.tracingPolicy }); const argsForExecute = { model, temperature, tools: { ...tools }, maxSteps, toolChoice, onStepFinish: async (props) => { try { await onStepFinish?.({ ...props, runId }); } catch (e) { const mastraError = new MastraError( { id: "LLM_STREAM_ON_STEP_FINISH_CALLBACK_EXECUTION_FAILED", domain: "LLM" /* LLM */, category: "USER" /* USER */, details: { modelId: model.modelId, modelProvider: model.provider, runId: runId ?? "unknown", threadId: threadId ?? "unknown", resourceId: resourceId ?? "unknown", finishReason: props?.finishReason, toolCalls: props?.toolCalls ? JSON.stringify(props.toolCalls) : "", toolResults: props?.toolResults ? JSON.stringify(props.toolResults) : "", usage: props?.usage ? JSON.stringify(props.usage) : "" } }, e ); this.logger.trackException(mastraError); llmSpan?.error({ error: mastraError }); throw mastraError; } this.logger.debug("[LLM] - Stream Step Change:", { text: props?.text, toolCalls: props?.toolCalls, toolResults: props?.toolResults, finishReason: props?.finishReason, usage: props?.usage, runId }); const remainingTokens = parseInt(props?.response?.headers?.["x-ratelimit-remaining-tokens"] ?? "", 10); if (!isNaN(remainingTokens) && remainingTokens > 0 && remainingTokens < 2e3) { this.logger.warn("Rate limit approaching, waiting 10 seconds", { runId }); await delay(10 * 1e3); } }, onFinish: async (props) => { llmSpan?.end({ output: { text: props?.text, reasoning: props?.reasoningDetails, reasoningText: props?.reasoning, files: props?.files, sources: props?.sources, warnings: props?.warnings }, attributes: { finishReason: props?.finishReason, usage: props?.usage } }); try { await onFinish?.({ ...props, runId }); } catch (e) { const mastraError = new MastraError( { id: "LLM_STREAM_ON_FINISH_CALLBACK_EXECUTION_FAILED", domain: "LLM" /* LLM */, category: "USER" /* USER */, details: { modelId: model.modelId, modelProvider: model.provider, runId: runId ?? "unknown", threadId: threadId ?? "unknown", resourceId: resourceId ?? "unknown", finishReason: props?.finishReason, toolCalls: props?.toolCalls ? JSON.stringify(props.toolCalls) : "", toolResults: props?.toolResults ? JSON.stringify(props.toolResults) : "", usage: props?.usage ? JSON.stringify(props.usage) : "" } }, e ); llmSpan?.error({ error: mastraError }); this.logger.trackException(mastraError); throw mastraError; } this.logger.debug("[LLM] - Stream Finished:", { text: props?.text, toolCalls: props?.toolCalls, toolResults: props?.toolResults, finishReason: props?.finishReason, usage: props?.usage, runId, threadId, resourceId }); }, ...rest, messages, experimental_telemetry: { ...this.experimental_telemetry, ...telemetry }, experimental_output: schema ? Output.object({ schema }) : void 0 }; try { return streamText(argsForExecute); } catch (e) { const mastraError = new MastraError( { id: "LLM_STREAM_TEXT_AI_SDK_EXECUTION_FAILED", domain: "LLM" /* LLM */, category: "THIRD_PARTY" /* THIRD_PARTY */, details: { modelId: model.modelId, modelProvider: model.provider, runId: runId ?? "unknown", threadId: threadId ?? "unknown", resourceId: resourceId ?? "unknown" } }, e ); llmSpan?.error({ error: mastraError }); throw mastraError; } } __streamObject({ messages, runId, runtimeContext, threadId, resourceId, onFinish, structuredOutput, telemetry, tracingContext, ...rest }) { const model = this.#model; this.logger.debug(`[LLM] - Streaming structured output`, { runId, messages }); const llmSpan = tracingContext.currentSpan?.createChildSpan({ name: `llm: '${model.modelId}'`, type: "model_generation" /* MODEL_GENERATION */, input: { messages }, attributes: { model: model.modelId, provider: model.provider, parameters: { temperature: rest.temperature, maxOutputTokens: rest.maxTokens, topP: rest.topP, frequencyPenalty: rest.frequencyPenalty, presencePenalty: rest.presencePenalty }, streaming: true }, metadata: { runId, threadId, resourceId }, tracingPolicy: this.#options?.tracingPolicy }); try { let output = "object"; if (structuredOutput instanceof z.ZodArray) { output = "array"; structuredOutput = structuredOutput._def.type; } const processedSchema = this._applySchemaCompat(structuredOutput); llmSpan?.update({ input: { messages, schema: processedSchema } }); const argsForExecute = { ...rest, model, onFinish: async (props) => { llmSpan?.end({ output: { text: props?.text, object: props?.object, reasoning: props?.reasoningDetails, reasoningText: props?.reasoning, files: props?.files, sources: props?.sources, warnings: props?.warnings }, attributes: { finishReason: props?.finishReason, usage: props?.usage } }); try { await onFinish?.({ ...props, runId }); } catch (e) { const mastraError = new MastraError( { id: "LLM_STREAM_OBJECT_ON_FINISH_CALLBACK_EXECUTION_FAILED", domain: "LLM" /* LLM */, category: "USER" /* USER */, details: { modelId: model.modelId, modelProvider: model.provider, runId: runId ?? "unknown", threadId: threadId ?? "unknown", resourceId: resourceId ?? "unknown", toolCalls: "", toolResults: "", finishReason: "", usage: props?.usage ? JSON.stringify(props.usage) : "" } }, e ); this.logger.trackException(mastraError); llmSpan?.error({ error: mastraError }); throw mastraError; } this.logger.debug("[LLM] - Object Stream Finished:", { usage: props?.usage, runId, threadId, resourceId }); }, messages, // @ts-expect-error - output in our implementation can only be object or array output, experimental_telemetry: { ...this.experimental_telemetry, ...telemetry }, schema: processedSchema }; try { return streamObject(argsForExecute); } catch (e) { const mastraError = new MastraError( { id: "LLM_STREAM_OBJECT_AI_SDK_EXECUTION_FAILED", domain: "LLM" /* LLM */, category: "THIRD_PARTY" /* THIRD_PARTY */, details: { modelId: model.modelId, modelProvider: model.provider, runId: runId ?? "unknown", threadId: threadId ?? "unknown", resourceId: resourceId ?? "unknown" } }, e ); llmSpan?.error({ error: mastraError }); throw mastraError; } } catch (e) { if (e instanceof MastraError) { llmSpan?.error({ error: e }); throw e; } const mastraError = new MastraError( { id: "LLM_STREAM_OBJECT_AI_SDK_SCHEMA_CONVERSION_FAILED", domain: "LLM" /* LLM */, category: "USER" /* USER */, details: { modelId: model.modelId, modelProvider: model.provider, runId: runId ?? "unknown", threadId: threadId ?? "unknown", resourceId: resourceId ?? "unknown" } }, e ); llmSpan?.error({ error: mastraError }); throw mastraError; } } convertToMessages(messages) { if (Array.isArray(messages)) { return messages.map((m) => { if (typeof m === "string") { return { role: "user", content: m }; } return m; }); } return [ { role: "user", content: messages } ]; } async generate(messages, { output, ...rest }) { const msgs = this.convertToMessages(messages); if (!output) { const { maxSteps, onStepFinish, ...textOptions } = rest; return await this.__text({ messages: msgs, maxSteps, onStepFinish, ...textOptions }); } return await this.__textObject({ messages: msgs, structuredOutput: output, ...rest }); } stream(messages, { maxSteps = 5, output, onFinish, ...rest }) { const msgs = this.convertToMessages(messages); if (!output) { return this.__stream({ messages: msgs, maxSteps, onFinish, ...rest }); } return this.__streamObject({ messages: msgs, structuredOutput: output, onFinish, ...rest }); } }; export { MastraLLMV1 }; //# sourceMappingURL=chunk-6D2K2CAA.js.map //# sourceMappingURL=chunk-6D2K2CAA.js.map