UNPKG

@mastra/core

Version:

The core foundation of the Mastra framework, providing essential components and interfaces for building AI-powered applications.

534 lines (511 loc) 16.4 kB
import { LibSQLVector } from './chunk-47NGQPZI.js'; import { DefaultProxyStorage, augmentWithInit } from './chunk-7ZR64JAJ.js'; import { deepMerge } from './chunk-JSUPD5IG.js'; import { MastraBase } from './chunk-CLJQYXNM.js'; import { existsSync } from 'fs'; import { join } from 'path'; import fsp from 'node:fs/promises'; import os from 'node:os'; import path from 'node:path'; import { experimental_customProvider } from 'ai'; async function getModelCachePath() { const cachePath = path.join(os.homedir(), ".cache", "mastra", "fastembed-models"); await fsp.mkdir(cachePath, { recursive: true }); return cachePath; } function unbundleableImport(name) { const nonStaticallyAnalyzableName = `${name}?d=${Date.now()}`; return import(nonStaticallyAnalyzableName.split(`?`)[0]); } async function generateEmbeddings(values, modelType) { try { let mod; const importErrors = []; { try { mod = await unbundleableImport("fastembed"); } catch (e) { if (e instanceof Error) { importErrors.push(e); } else { throw e; } } } if (!mod) { throw new Error(`${importErrors.map((e) => e.message).join(` `)} This runtime does not support fastembed-js, which is the default embedder in Mastra. Scroll up to read import errors. These errors mean you can't use the default Mastra embedder on this hosting platform. You can either use Mastra Cloud which supports the default embedder, or you can configure an alternate provider. For example if you're using Memory: import { openai } from "@ai-sdk/openai"; const memory = new Memory({ embedder: openai.embedding("text-embedding-3-small"), // <- doesn't have to be openai }) Visit https://sdk.vercel.ai/docs/foundations/overview#embedding-models to find an alternate embedding provider If you do not want to use the Memory semantic recall feature, you can disable it entirely and this error will go away. const memory = new Memory({ options: { semanticRecall: false // <- an embedder will not be required with this set to false } }) `); } const { FlagEmbedding, EmbeddingModel } = mod; const model = await FlagEmbedding.init({ model: EmbeddingModel[modelType], cacheDir: await getModelCachePath() }); const embeddings = await model.embed(values); const allResults = []; for await (const result of embeddings) { allResults.push(...result.map((embedding) => Array.from(embedding))); } if (allResults.length === 0) throw new Error("No embeddings generated"); return { embeddings: allResults }; } catch (error) { console.error("Error generating embeddings:", error); throw error; } } var fastEmbedProvider = experimental_customProvider({ textEmbeddingModels: { "bge-small-en-v1.5": { specificationVersion: "v1", provider: "fastembed", modelId: "bge-small-en-v1.5", maxEmbeddingsPerCall: 256, supportsParallelCalls: true, async doEmbed({ values }) { return generateEmbeddings(values, "BGESmallENV15"); } }, "bge-base-en-v1.5": { specificationVersion: "v1", provider: "fastembed", modelId: "bge-base-en-v1.5", maxEmbeddingsPerCall: 256, supportsParallelCalls: true, async doEmbed({ values }) { return generateEmbeddings(values, "BGEBaseENV15"); } } } }); var defaultEmbedder = fastEmbedProvider.textEmbeddingModel; // src/memory/memory.ts var MemoryProcessor = class extends MastraBase { /** * Process a list of messages and return a filtered or transformed list. * @param messages The messages to process * @returns The processed messages */ process(messages, _opts) { return messages; } }; var memoryDefaultOptions = { lastMessages: 40, semanticRecall: { topK: 2, messageRange: { before: 2, after: 2 } }, threads: { generateTitle: true } }; var newMemoryDefaultOptions = { lastMessages: 10, semanticRecall: false, threads: { generateTitle: false } }; var MastraMemory = class extends MastraBase { MAX_CONTEXT_TOKENS; storage; vector; embedder; processors = []; deprecationWarnings = []; threadConfig = { ...memoryDefaultOptions }; constructor(config) { super({ component: "MEMORY", name: config.name }); if (config.options) { this.threadConfig = this.getMergedThreadConfig(config.options); } if (config.storage) { this.storage = config.storage; } else { this.storage = new DefaultProxyStorage({ config: { url: "file:memory.db" } }); } this.storage = augmentWithInit(this.storage); const semanticRecallIsEnabled = this.threadConfig.semanticRecall !== false; if (config.vector && semanticRecallIsEnabled) { this.vector = config.vector; } else if ( // if there's no configured vector store // and the vector store hasn't been explicitly disabled with vector: false config.vector !== false && // and semanticRecall is enabled semanticRecallIsEnabled ) { const oldDb = "memory-vector.db"; const hasOldDb = existsSync(join(process.cwd(), oldDb)) || existsSync(join(process.cwd(), ".mastra", oldDb)); const newDb = "memory.db"; if (hasOldDb) { this.deprecationWarnings.push( `Found deprecated Memory vector db file ${oldDb} this db is now merged with the default ${newDb} file. Delete the old one to use the new one. You will need to migrate any data if that's important to you. For now the deprecated path will be used but in a future breaking change we will only use the new db file path.` ); } this.deprecationWarnings.push(` Default vector storage is deprecated in Mastra Memory. You're using it as an implicit default by not setting a vector store. Instead of this: export const agent = new Agent({ memory: new Memory({ options: { semanticRecall: true } }) }) Do this: import { LibSQLVector } from '@mastra/libsql'; export const agent = new Agent({ memory: new Memory({ options: { semanticRecall: true }, vector: new LibSQLVector({ connectionUrl: 'file:../memory.db' }) }) }) `); this.vector = new LibSQLVector({ connectionUrl: hasOldDb ? `file:${oldDb}` : `file:${newDb}` }); } if (config.embedder) { this.embedder = config.embedder; } else if ( // if there's no configured embedder // and there's a vector store typeof this.vector !== `undefined` && // and semanticRecall is enabled semanticRecallIsEnabled ) { this.embedder = defaultEmbedder("bge-small-en-v1.5"); } if (config.processors) { this.processors = config.processors; } this.addImplicitDefaultsWarning(config); if (this.deprecationWarnings.length > 0) { setTimeout(() => { this.logger?.warn(` !MEMORY DEPRECATION WARNING! ${this.deprecationWarnings.map((w, i) => `${this.deprecationWarnings.length > 1 ? `Warning ${i + 1}: ` : ``}${w}`).join(` `)} !END MEMORY DEPRECATION WARNING! `); }, 1e3); } } // We're changing the implicit defaults from memoryDefaultOptions to newMemoryDefaultOptions so we need to log and let people know addImplicitDefaultsWarning(config) { const fromToPairs = []; const indent = (s) => s.split(` `).join(` `); const format = (v) => typeof v === `object` && !Array.isArray(v) && v !== null ? indent(JSON.stringify(v, null, 2).replaceAll(`"`, ``)) : v; const options = config.options ?? {}; if (!(`lastMessages` in options)) fromToPairs.push({ key: "lastMessages", from: memoryDefaultOptions.lastMessages, to: newMemoryDefaultOptions.lastMessages }); if (!(`semanticRecall` in options)) fromToPairs.push({ key: "semanticRecall", from: memoryDefaultOptions.semanticRecall, to: newMemoryDefaultOptions.semanticRecall }); if (!(`threads` in options)) fromToPairs.push({ key: "threads", from: memoryDefaultOptions.threads, to: newMemoryDefaultOptions.threads }); if (fromToPairs.length > 0) { const currentDefaults = `{ options: { ${fromToPairs.map(({ key, from }) => `${key}: ${format(from)}`).join(`, `)} } }`; const upcomingDefaults = `{ options: { ${fromToPairs.map(({ key, to }) => `${key}: ${format(to)}`).join(`, `)} } }`; this.deprecationWarnings.push(` Your Mastra memory instance has the following implicit default options: new Memory(${currentDefaults}) In the next release these implicit defaults will be changed to the following default settings: new Memory(${upcomingDefaults}) To keep your defaults as they are, add them directly into your Memory configuration, otherwise please add the new settings to your memory config to prepare for the change. --> This breaking change will be released on May 20th <-- `); } } setStorage(storage) { if (storage instanceof DefaultProxyStorage) { this.deprecationWarnings.push(`Importing "DefaultStorage" from '@mastra/core/storage/libsql' is deprecated. Instead of: import { DefaultStorage } from '@mastra/core/storage/libsql'; Do: import { LibSQLStore } from '@mastra/libsql'; `); } this.storage = storage; } setVector(vector) { this.vector = vector; } setEmbedder(embedder) { this.embedder = embedder; } /** * Get a system message to inject into the conversation. * This will be called before each conversation turn. * Implementations can override this to inject custom system messages. */ async getSystemMessage(_input) { return null; } /** * Get tools that should be available to the agent. * This will be called when converting tools for the agent. * Implementations can override this to provide additional tools. */ getTools(_config) { return {}; } async createEmbeddingIndex(dimensions) { const defaultDimensions = 1536; const isDefault = dimensions === defaultDimensions; const usedDimensions = dimensions ?? defaultDimensions; const indexName = isDefault ? "memory_messages" : `memory_messages_${usedDimensions}`; if (typeof this.vector === `undefined`) { throw new Error(`Tried to create embedding index but no vector db is attached to this Memory instance.`); } await this.vector.createIndex({ indexName, dimension: usedDimensions }); return { indexName }; } getMergedThreadConfig(config) { return deepMerge(this.threadConfig, config || {}); } /** * Apply all configured message processors to a list of messages. * @param messages The messages to process * @returns The processed messages */ applyProcessors(messages, opts) { const processors = opts.processors || this.processors; if (!processors || processors.length === 0) { return messages; } let processedMessages = [...messages]; for (const processor of processors) { processedMessages = processor.process(processedMessages, { systemMessage: opts.systemMessage, newMessages: opts.newMessages, memorySystemMessage: opts.memorySystemMessage }); } return processedMessages; } processMessages({ messages, processors, ...opts }) { return this.applyProcessors(messages, { processors: processors || this.processors, ...opts }); } estimateTokens(text) { return Math.ceil(text.split(" ").length * 1.3); } parseMessages(messages) { return messages.map((msg) => { let content = msg.content; if (typeof content === "string" && (content.startsWith("[") || content.startsWith("{"))) { try { content = JSON.parse(content); } catch { } } else if (typeof content === "number") { content = String(content); } return { ...msg, content }; }); } convertToUIMessages(messages) { function addToolMessageToChat({ toolMessage, messages: messages2, toolResultContents }) { const chatMessages2 = messages2.map((message) => { if (message.toolInvocations) { return { ...message, toolInvocations: message.toolInvocations.map((toolInvocation) => { const toolResult = toolMessage.content.find((tool) => tool.toolCallId === toolInvocation.toolCallId); if (toolResult) { return { ...toolInvocation, state: "result", result: toolResult.result }; } return toolInvocation; }) }; } return message; }); const resultContents = [...toolResultContents, ...toolMessage.content]; return { chatMessages: chatMessages2, toolResultContents: resultContents }; } const { chatMessages } = messages.reduce( (obj, message) => { if (message.role === "tool") { return addToolMessageToChat({ toolMessage: message, messages: obj.chatMessages, toolResultContents: obj.toolResultContents }); } let textContent = ""; let toolInvocations = []; if (typeof message.content === "string") { textContent = message.content; } else if (typeof message.content === "number") { textContent = String(message.content); } else if (Array.isArray(message.content)) { for (const content of message.content) { if (content.type === "text") { textContent += content.text; } else if (content.type === "tool-call") { const toolResult = obj.toolResultContents.find((tool) => tool.toolCallId === content.toolCallId); toolInvocations.push({ state: toolResult ? "result" : "call", toolCallId: content.toolCallId, toolName: content.toolName, args: content.args, result: toolResult?.result }); } } } obj.chatMessages.push({ id: message.id, role: message.role, content: textContent, toolInvocations, createdAt: message.createdAt }); return obj; }, { chatMessages: [], toolResultContents: [] } ); return chatMessages; } /** * Helper method to create a new thread * @param title - Optional title for the thread * @param metadata - Optional metadata for the thread * @returns Promise resolving to the created thread */ async createThread({ threadId, resourceId, title, metadata, memoryConfig }) { const thread = { id: threadId || this.generateId(), title: title || `New Thread ${(/* @__PURE__ */ new Date()).toISOString()}`, resourceId, createdAt: /* @__PURE__ */ new Date(), updatedAt: /* @__PURE__ */ new Date(), metadata }; return this.saveThread({ thread, memoryConfig }); } /** * Helper method to add a single message to a thread * @param threadId - The thread to add the message to * @param content - The message content * @param role - The role of the message sender * @param type - The type of the message * @param toolNames - Optional array of tool names that were called * @param toolCallArgs - Optional array of tool call arguments * @param toolCallIds - Optional array of tool call ids * @returns Promise resolving to the saved message */ async addMessage({ threadId, resourceId, config, content, role, type, toolNames, toolCallArgs, toolCallIds }) { const message = { id: this.generateId(), content, role, createdAt: /* @__PURE__ */ new Date(), threadId, resourceId, type, toolNames, toolCallArgs, toolCallIds }; const savedMessages = await this.saveMessages({ messages: [message], memoryConfig: config }); return savedMessages[0]; } /** * Generates a unique identifier * @returns A unique string ID */ generateId() { return crypto.randomUUID(); } }; export { MastraMemory, MemoryProcessor, memoryDefaultOptions };