mem0ai
Version:
The Memory Layer For Your AI Apps
1 lines • 274 kB
Source Map (JSON)
{"version":3,"sources":["../../src/oss/src/memory/index.ts","../../src/oss/src/types/index.ts","../../src/oss/src/embeddings/openai.ts","../../src/oss/src/embeddings/ollama.ts","../../src/oss/src/utils/logger.ts","../../src/oss/src/llms/openai.ts","../../src/oss/src/llms/openai_structured.ts","../../src/oss/src/llms/anthropic.ts","../../src/oss/src/llms/groq.ts","../../src/oss/src/llms/mistral.ts","../../src/oss/src/vector_stores/memory.ts","../../src/oss/src/vector_stores/qdrant.ts","../../src/oss/src/vector_stores/redis.ts","../../src/oss/src/llms/ollama.ts","../../src/oss/src/vector_stores/supabase.ts","../../src/oss/src/storage/SQLiteManager.ts","../../src/oss/src/storage/MemoryHistoryManager.ts","../../src/oss/src/storage/SupabaseHistoryManager.ts","../../src/oss/src/embeddings/google.ts","../../src/oss/src/llms/google.ts","../../src/oss/src/llms/azure.ts","../../src/oss/src/embeddings/azure.ts","../../src/oss/src/llms/langchain.ts","../../src/oss/src/prompts/index.ts","../../src/oss/src/graphs/tools.ts","../../src/oss/src/embeddings/langchain.ts","../../src/oss/src/vector_stores/langchain.ts","../../src/oss/src/utils/factory.ts","../../src/oss/src/storage/DummyHistoryManager.ts","../../src/oss/src/config/defaults.ts","../../src/oss/src/config/manager.ts","../../src/oss/src/memory/graph_memory.ts","../../src/oss/src/utils/bm25.ts","../../src/oss/src/graphs/utils.ts","../../src/oss/src/utils/memory.ts","../../src/oss/src/utils/telemetry.ts"],"sourcesContent":["import { v4 as uuidv4 } from \"uuid\";\nimport { createHash } from \"crypto\";\nimport {\n MemoryConfig,\n MemoryConfigSchema,\n MemoryItem,\n Message,\n SearchFilters,\n SearchResult,\n} from \"../types\";\nimport {\n EmbedderFactory,\n LLMFactory,\n VectorStoreFactory,\n HistoryManagerFactory,\n} from \"../utils/factory\";\nimport {\n getFactRetrievalMessages,\n getUpdateMemoryMessages,\n parseMessages,\n removeCodeBlocks,\n} from \"../prompts\";\nimport { DummyHistoryManager } from \"../storage/DummyHistoryManager\";\nimport { Embedder } from \"../embeddings/base\";\nimport { LLM } from \"../llms/base\";\nimport { VectorStore } from \"../vector_stores/base\";\nimport { ConfigManager } from \"../config/manager\";\nimport { MemoryGraph } from \"./graph_memory\";\nimport {\n AddMemoryOptions,\n SearchMemoryOptions,\n DeleteAllMemoryOptions,\n GetAllMemoryOptions,\n} from \"./memory.types\";\nimport { parse_vision_messages } from \"../utils/memory\";\nimport { HistoryManager } from \"../storage/base\";\nimport { captureClientEvent } from \"../utils/telemetry\";\n\nexport class Memory {\n private config: MemoryConfig;\n private customPrompt: string | undefined;\n private embedder: Embedder;\n private vectorStore: VectorStore;\n private llm: LLM;\n private db: HistoryManager;\n private collectionName: string | undefined;\n private apiVersion: string;\n private graphMemory?: MemoryGraph;\n private enableGraph: boolean;\n telemetryId: string;\n\n constructor(config: Partial<MemoryConfig> = {}) {\n // Merge and validate config\n this.config = ConfigManager.mergeConfig(config);\n\n this.customPrompt = this.config.customPrompt;\n this.embedder = EmbedderFactory.create(\n this.config.embedder.provider,\n this.config.embedder.config,\n );\n this.vectorStore = VectorStoreFactory.create(\n this.config.vectorStore.provider,\n this.config.vectorStore.config,\n );\n this.llm = LLMFactory.create(\n this.config.llm.provider,\n this.config.llm.config,\n );\n if (this.config.disableHistory) {\n this.db = new DummyHistoryManager();\n } else {\n const defaultConfig = {\n provider: \"sqlite\",\n config: {\n historyDbPath: this.config.historyDbPath || \":memory:\",\n },\n };\n\n this.db =\n this.config.historyStore && !this.config.disableHistory\n ? HistoryManagerFactory.create(\n this.config.historyStore.provider,\n this.config.historyStore,\n )\n : HistoryManagerFactory.create(\"sqlite\", defaultConfig);\n }\n\n this.collectionName = this.config.vectorStore.config.collectionName;\n this.apiVersion = this.config.version || \"v1.0\";\n this.enableGraph = this.config.enableGraph || false;\n this.telemetryId = \"anonymous\";\n\n // Initialize graph memory if configured\n if (this.enableGraph && this.config.graphStore) {\n this.graphMemory = new MemoryGraph(this.config);\n }\n\n // Initialize telemetry if vector store is initialized\n this._initializeTelemetry();\n }\n\n private async _initializeTelemetry() {\n try {\n await this._getTelemetryId();\n\n // Capture initialization event\n await captureClientEvent(\"init\", this, {\n api_version: this.apiVersion,\n client_type: \"Memory\",\n collection_name: this.collectionName,\n enable_graph: this.enableGraph,\n });\n } catch (error) {}\n }\n\n private async _getTelemetryId() {\n try {\n if (\n !this.telemetryId ||\n this.telemetryId === \"anonymous\" ||\n this.telemetryId === \"anonymous-supabase\"\n ) {\n this.telemetryId = await this.vectorStore.getUserId();\n }\n return this.telemetryId;\n } catch (error) {\n this.telemetryId = \"anonymous\";\n return this.telemetryId;\n }\n }\n\n private async _captureEvent(methodName: string, additionalData = {}) {\n try {\n await this._getTelemetryId();\n await captureClientEvent(methodName, this, {\n ...additionalData,\n api_version: this.apiVersion,\n collection_name: this.collectionName,\n });\n } catch (error) {\n console.error(`Failed to capture ${methodName} event:`, error);\n }\n }\n\n static fromConfig(configDict: Record<string, any>): Memory {\n try {\n const config = MemoryConfigSchema.parse(configDict);\n return new Memory(config);\n } catch (e) {\n console.error(\"Configuration validation error:\", e);\n throw e;\n }\n }\n\n async add(\n messages: string | Message[],\n config: AddMemoryOptions,\n ): Promise<SearchResult> {\n await this._captureEvent(\"add\", {\n message_count: Array.isArray(messages) ? messages.length : 1,\n has_metadata: !!config.metadata,\n has_filters: !!config.filters,\n infer: config.infer,\n });\n const {\n userId,\n agentId,\n runId,\n metadata = {},\n filters = {},\n infer = true,\n } = config;\n\n if (userId) filters.userId = metadata.userId = userId;\n if (agentId) filters.agentId = metadata.agentId = agentId;\n if (runId) filters.runId = metadata.runId = runId;\n\n if (!filters.userId && !filters.agentId && !filters.runId) {\n throw new Error(\n \"One of the filters: userId, agentId or runId is required!\",\n );\n }\n\n const parsedMessages = Array.isArray(messages)\n ? (messages as Message[])\n : [{ role: \"user\", content: messages }];\n\n const final_parsedMessages = await parse_vision_messages(parsedMessages);\n\n // Add to vector store\n const vectorStoreResult = await this.addToVectorStore(\n final_parsedMessages,\n metadata,\n filters,\n infer,\n );\n\n // Add to graph store if available\n let graphResult;\n if (this.graphMemory) {\n try {\n graphResult = await this.graphMemory.add(\n final_parsedMessages.map((m) => m.content).join(\"\\n\"),\n filters,\n );\n } catch (error) {\n console.error(\"Error adding to graph memory:\", error);\n }\n }\n\n return {\n results: vectorStoreResult,\n relations: graphResult?.relations,\n };\n }\n\n private async addToVectorStore(\n messages: Message[],\n metadata: Record<string, any>,\n filters: SearchFilters,\n infer: boolean,\n ): Promise<MemoryItem[]> {\n if (!infer) {\n const returnedMemories: MemoryItem[] = [];\n for (const message of messages) {\n if (message.content === \"system\") {\n continue;\n }\n const memoryId = await this.createMemory(\n message.content as string,\n {},\n metadata,\n );\n returnedMemories.push({\n id: memoryId,\n memory: message.content as string,\n metadata: { event: \"ADD\" },\n });\n }\n return returnedMemories;\n }\n const parsedMessages = messages.map((m) => m.content).join(\"\\n\");\n\n const [systemPrompt, userPrompt] = this.customPrompt\n ? [this.customPrompt, `Input:\\n${parsedMessages}`]\n : getFactRetrievalMessages(parsedMessages);\n\n const response = await this.llm.generateResponse(\n [\n { role: \"system\", content: systemPrompt },\n { role: \"user\", content: userPrompt },\n ],\n { type: \"json_object\" },\n );\n\n const cleanResponse = removeCodeBlocks(response as string);\n let facts: string[] = [];\n try {\n facts = JSON.parse(cleanResponse).facts || [];\n } catch (e) {\n console.error(\n \"Failed to parse facts from LLM response:\",\n cleanResponse,\n e,\n );\n facts = [];\n }\n\n // Get embeddings for new facts\n const newMessageEmbeddings: Record<string, number[]> = {};\n const retrievedOldMemory: Array<{ id: string; text: string }> = [];\n\n // Create embeddings and search for similar memories\n for (const fact of facts) {\n const embedding = await this.embedder.embed(fact);\n newMessageEmbeddings[fact] = embedding;\n\n const existingMemories = await this.vectorStore.search(\n embedding,\n 5,\n filters,\n );\n for (const mem of existingMemories) {\n retrievedOldMemory.push({ id: mem.id, text: mem.payload.data });\n }\n }\n\n // Remove duplicates from old memories\n const uniqueOldMemories = retrievedOldMemory.filter(\n (mem, index) =>\n retrievedOldMemory.findIndex((m) => m.id === mem.id) === index,\n );\n\n // Create UUID mapping for handling UUID hallucinations\n const tempUuidMapping: Record<string, string> = {};\n uniqueOldMemories.forEach((item, idx) => {\n tempUuidMapping[String(idx)] = item.id;\n uniqueOldMemories[idx].id = String(idx);\n });\n\n // Get memory update decisions\n const updatePrompt = getUpdateMemoryMessages(uniqueOldMemories, facts);\n\n const updateResponse = await this.llm.generateResponse(\n [{ role: \"user\", content: updatePrompt }],\n { type: \"json_object\" },\n );\n\n const cleanUpdateResponse = removeCodeBlocks(updateResponse as string);\n let memoryActions: any[] = [];\n try {\n memoryActions = JSON.parse(cleanUpdateResponse).memory || [];\n } catch (e) {\n console.error(\n \"Failed to parse memory actions from LLM response:\",\n cleanUpdateResponse,\n e,\n );\n memoryActions = [];\n }\n\n // Process memory actions\n const results: MemoryItem[] = [];\n for (const action of memoryActions) {\n try {\n switch (action.event) {\n case \"ADD\": {\n const memoryId = await this.createMemory(\n action.text,\n newMessageEmbeddings,\n metadata,\n );\n results.push({\n id: memoryId,\n memory: action.text,\n metadata: { event: action.event },\n });\n break;\n }\n case \"UPDATE\": {\n const realMemoryId = tempUuidMapping[action.id];\n await this.updateMemory(\n realMemoryId,\n action.text,\n newMessageEmbeddings,\n metadata,\n );\n results.push({\n id: realMemoryId,\n memory: action.text,\n metadata: {\n event: action.event,\n previousMemory: action.old_memory,\n },\n });\n break;\n }\n case \"DELETE\": {\n const realMemoryId = tempUuidMapping[action.id];\n await this.deleteMemory(realMemoryId);\n results.push({\n id: realMemoryId,\n memory: action.text,\n metadata: { event: action.event },\n });\n break;\n }\n }\n } catch (error) {\n console.error(`Error processing memory action: ${error}`);\n }\n }\n\n return results;\n }\n\n async get(memoryId: string): Promise<MemoryItem | null> {\n const memory = await this.vectorStore.get(memoryId);\n if (!memory) return null;\n\n const filters = {\n ...(memory.payload.userId && { userId: memory.payload.userId }),\n ...(memory.payload.agentId && { agentId: memory.payload.agentId }),\n ...(memory.payload.runId && { runId: memory.payload.runId }),\n };\n\n const memoryItem: MemoryItem = {\n id: memory.id,\n memory: memory.payload.data,\n hash: memory.payload.hash,\n createdAt: memory.payload.createdAt,\n updatedAt: memory.payload.updatedAt,\n metadata: {},\n };\n\n // Add additional metadata\n const excludedKeys = new Set([\n \"userId\",\n \"agentId\",\n \"runId\",\n \"hash\",\n \"data\",\n \"createdAt\",\n \"updatedAt\",\n ]);\n for (const [key, value] of Object.entries(memory.payload)) {\n if (!excludedKeys.has(key)) {\n memoryItem.metadata![key] = value;\n }\n }\n\n return { ...memoryItem, ...filters };\n }\n\n async search(\n query: string,\n config: SearchMemoryOptions,\n ): Promise<SearchResult> {\n await this._captureEvent(\"search\", {\n query_length: query.length,\n limit: config.limit,\n has_filters: !!config.filters,\n });\n const { userId, agentId, runId, limit = 100, filters = {} } = config;\n\n if (userId) filters.userId = userId;\n if (agentId) filters.agentId = agentId;\n if (runId) filters.runId = runId;\n\n if (!filters.userId && !filters.agentId && !filters.runId) {\n throw new Error(\n \"One of the filters: userId, agentId or runId is required!\",\n );\n }\n\n // Search vector store\n const queryEmbedding = await this.embedder.embed(query);\n const memories = await this.vectorStore.search(\n queryEmbedding,\n limit,\n filters,\n );\n\n // Search graph store if available\n let graphResults;\n if (this.graphMemory) {\n try {\n graphResults = await this.graphMemory.search(query, filters);\n } catch (error) {\n console.error(\"Error searching graph memory:\", error);\n }\n }\n\n const excludedKeys = new Set([\n \"userId\",\n \"agentId\",\n \"runId\",\n \"hash\",\n \"data\",\n \"createdAt\",\n \"updatedAt\",\n ]);\n const results = memories.map((mem) => ({\n id: mem.id,\n memory: mem.payload.data,\n hash: mem.payload.hash,\n createdAt: mem.payload.createdAt,\n updatedAt: mem.payload.updatedAt,\n score: mem.score,\n metadata: Object.entries(mem.payload)\n .filter(([key]) => !excludedKeys.has(key))\n .reduce((acc, [key, value]) => ({ ...acc, [key]: value }), {}),\n ...(mem.payload.userId && { userId: mem.payload.userId }),\n ...(mem.payload.agentId && { agentId: mem.payload.agentId }),\n ...(mem.payload.runId && { runId: mem.payload.runId }),\n }));\n\n return {\n results,\n relations: graphResults,\n };\n }\n\n async update(memoryId: string, data: string): Promise<{ message: string }> {\n await this._captureEvent(\"update\", { memory_id: memoryId });\n const embedding = await this.embedder.embed(data);\n await this.updateMemory(memoryId, data, { [data]: embedding });\n return { message: \"Memory updated successfully!\" };\n }\n\n async delete(memoryId: string): Promise<{ message: string }> {\n await this._captureEvent(\"delete\", { memory_id: memoryId });\n await this.deleteMemory(memoryId);\n return { message: \"Memory deleted successfully!\" };\n }\n\n async deleteAll(\n config: DeleteAllMemoryOptions,\n ): Promise<{ message: string }> {\n await this._captureEvent(\"delete_all\", {\n has_user_id: !!config.userId,\n has_agent_id: !!config.agentId,\n has_run_id: !!config.runId,\n });\n const { userId, agentId, runId } = config;\n\n const filters: SearchFilters = {};\n if (userId) filters.userId = userId;\n if (agentId) filters.agentId = agentId;\n if (runId) filters.runId = runId;\n\n if (!Object.keys(filters).length) {\n throw new Error(\n \"At least one filter is required to delete all memories. If you want to delete all memories, use the `reset()` method.\",\n );\n }\n\n const [memories] = await this.vectorStore.list(filters);\n for (const memory of memories) {\n await this.deleteMemory(memory.id);\n }\n\n return { message: \"Memories deleted successfully!\" };\n }\n\n async history(memoryId: string): Promise<any[]> {\n return this.db.getHistory(memoryId);\n }\n\n async reset(): Promise<void> {\n await this._captureEvent(\"reset\");\n await this.db.reset();\n\n // Check provider before attempting deleteCol\n if (this.config.vectorStore.provider.toLowerCase() !== \"langchain\") {\n try {\n await this.vectorStore.deleteCol();\n } catch (e) {\n console.error(\n `Failed to delete collection for provider '${this.config.vectorStore.provider}':`,\n e,\n );\n // Decide if you want to re-throw or just log\n }\n } else {\n console.warn(\n \"Memory.reset(): Skipping vector store collection deletion as 'langchain' provider is used. Underlying Langchain vector store data is not cleared by this operation.\",\n );\n }\n\n if (this.graphMemory) {\n await this.graphMemory.deleteAll({ userId: \"default\" }); // Assuming this is okay, or needs similar check?\n }\n\n // Re-initialize factories/clients based on the original config\n this.embedder = EmbedderFactory.create(\n this.config.embedder.provider,\n this.config.embedder.config,\n );\n // Re-create vector store instance - crucial for Langchain to reset wrapper state if needed\n this.vectorStore = VectorStoreFactory.create(\n this.config.vectorStore.provider,\n this.config.vectorStore.config, // This will pass the original client instance back\n );\n this.llm = LLMFactory.create(\n this.config.llm.provider,\n this.config.llm.config,\n );\n // Re-init DB if needed (though db.reset() likely handles its state)\n // Re-init Graph if needed\n\n // Re-initialize telemetry\n this._initializeTelemetry();\n }\n\n async getAll(config: GetAllMemoryOptions): Promise<SearchResult> {\n await this._captureEvent(\"get_all\", {\n limit: config.limit,\n has_user_id: !!config.userId,\n has_agent_id: !!config.agentId,\n has_run_id: !!config.runId,\n });\n const { userId, agentId, runId, limit = 100 } = config;\n\n const filters: SearchFilters = {};\n if (userId) filters.userId = userId;\n if (agentId) filters.agentId = agentId;\n if (runId) filters.runId = runId;\n\n const [memories] = await this.vectorStore.list(filters, limit);\n\n const excludedKeys = new Set([\n \"userId\",\n \"agentId\",\n \"runId\",\n \"hash\",\n \"data\",\n \"createdAt\",\n \"updatedAt\",\n ]);\n const results = memories.map((mem) => ({\n id: mem.id,\n memory: mem.payload.data,\n hash: mem.payload.hash,\n createdAt: mem.payload.createdAt,\n updatedAt: mem.payload.updatedAt,\n metadata: Object.entries(mem.payload)\n .filter(([key]) => !excludedKeys.has(key))\n .reduce((acc, [key, value]) => ({ ...acc, [key]: value }), {}),\n ...(mem.payload.userId && { userId: mem.payload.userId }),\n ...(mem.payload.agentId && { agentId: mem.payload.agentId }),\n ...(mem.payload.runId && { runId: mem.payload.runId }),\n }));\n\n return { results };\n }\n\n private async createMemory(\n data: string,\n existingEmbeddings: Record<string, number[]>,\n metadata: Record<string, any>,\n ): Promise<string> {\n const memoryId = uuidv4();\n const embedding =\n existingEmbeddings[data] || (await this.embedder.embed(data));\n\n const memoryMetadata = {\n ...metadata,\n data,\n hash: createHash(\"md5\").update(data).digest(\"hex\"),\n createdAt: new Date().toISOString(),\n };\n\n await this.vectorStore.insert([embedding], [memoryId], [memoryMetadata]);\n await this.db.addHistory(\n memoryId,\n null,\n data,\n \"ADD\",\n memoryMetadata.createdAt,\n );\n\n return memoryId;\n }\n\n private async updateMemory(\n memoryId: string,\n data: string,\n existingEmbeddings: Record<string, number[]>,\n metadata: Record<string, any> = {},\n ): Promise<string> {\n const existingMemory = await this.vectorStore.get(memoryId);\n if (!existingMemory) {\n throw new Error(`Memory with ID ${memoryId} not found`);\n }\n\n const prevValue = existingMemory.payload.data;\n const embedding =\n existingEmbeddings[data] || (await this.embedder.embed(data));\n\n const newMetadata = {\n ...metadata,\n data,\n hash: createHash(\"md5\").update(data).digest(\"hex\"),\n createdAt: existingMemory.payload.createdAt,\n updatedAt: new Date().toISOString(),\n ...(existingMemory.payload.userId && {\n userId: existingMemory.payload.userId,\n }),\n ...(existingMemory.payload.agentId && {\n agentId: existingMemory.payload.agentId,\n }),\n ...(existingMemory.payload.runId && {\n runId: existingMemory.payload.runId,\n }),\n };\n\n await this.vectorStore.update(memoryId, embedding, newMetadata);\n await this.db.addHistory(\n memoryId,\n prevValue,\n data,\n \"UPDATE\",\n newMetadata.createdAt,\n newMetadata.updatedAt,\n );\n\n return memoryId;\n }\n\n private async deleteMemory(memoryId: string): Promise<string> {\n const existingMemory = await this.vectorStore.get(memoryId);\n if (!existingMemory) {\n throw new Error(`Memory with ID ${memoryId} not found`);\n }\n\n const prevValue = existingMemory.payload.data;\n await this.vectorStore.delete(memoryId);\n await this.db.addHistory(\n memoryId,\n prevValue,\n null,\n \"DELETE\",\n undefined,\n undefined,\n 1,\n );\n\n return memoryId;\n }\n}\n","import { z } from \"zod\";\n\nexport interface MultiModalMessages {\n type: \"image_url\";\n image_url: {\n url: string;\n };\n}\n\nexport interface Message {\n role: string;\n content: string | MultiModalMessages;\n}\n\nexport interface EmbeddingConfig {\n apiKey?: string;\n model?: string | any;\n url?: string;\n modelProperties?: Record<string, any>;\n}\n\nexport interface VectorStoreConfig {\n collectionName?: string;\n dimension?: number;\n client?: any;\n instance?: any;\n [key: string]: any;\n}\n\nexport interface HistoryStoreConfig {\n provider: string;\n config: {\n historyDbPath?: string;\n supabaseUrl?: string;\n supabaseKey?: string;\n tableName?: string;\n };\n}\n\nexport interface LLMConfig {\n provider?: string;\n config?: Record<string, any>;\n apiKey?: string;\n model?: string | any;\n modelProperties?: Record<string, any>;\n}\n\nexport interface Neo4jConfig {\n url: string;\n username: string;\n password: string;\n}\n\nexport interface GraphStoreConfig {\n provider: string;\n config: Neo4jConfig;\n llm?: LLMConfig;\n customPrompt?: string;\n}\n\nexport interface MemoryConfig {\n version?: string;\n embedder: {\n provider: string;\n config: EmbeddingConfig;\n };\n vectorStore: {\n provider: string;\n config: VectorStoreConfig;\n };\n llm: {\n provider: string;\n config: LLMConfig;\n };\n historyStore?: HistoryStoreConfig;\n disableHistory?: boolean;\n historyDbPath?: string;\n customPrompt?: string;\n graphStore?: GraphStoreConfig;\n enableGraph?: boolean;\n}\n\nexport interface MemoryItem {\n id: string;\n memory: string;\n hash?: string;\n createdAt?: string;\n updatedAt?: string;\n score?: number;\n metadata?: Record<string, any>;\n}\n\nexport interface SearchFilters {\n userId?: string;\n agentId?: string;\n runId?: string;\n [key: string]: any;\n}\n\nexport interface SearchResult {\n results: MemoryItem[];\n relations?: any[];\n}\n\nexport interface VectorStoreResult {\n id: string;\n payload: Record<string, any>;\n score?: number;\n}\n\nexport const MemoryConfigSchema = z.object({\n version: z.string().optional(),\n embedder: z.object({\n provider: z.string(),\n config: z.object({\n modelProperties: z.record(z.string(), z.any()).optional(),\n apiKey: z.string().optional(),\n model: z.union([z.string(), z.any()]).optional(),\n }),\n }),\n vectorStore: z.object({\n provider: z.string(),\n config: z\n .object({\n collectionName: z.string().optional(),\n dimension: z.number().optional(),\n client: z.any().optional(),\n })\n .passthrough(),\n }),\n llm: z.object({\n provider: z.string(),\n config: z.object({\n apiKey: z.string().optional(),\n model: z.union([z.string(), z.any()]).optional(),\n modelProperties: z.record(z.string(), z.any()).optional(),\n }),\n }),\n historyDbPath: z.string().optional(),\n customPrompt: z.string().optional(),\n enableGraph: z.boolean().optional(),\n graphStore: z\n .object({\n provider: z.string(),\n config: z.object({\n url: z.string(),\n username: z.string(),\n password: z.string(),\n }),\n llm: z\n .object({\n provider: z.string(),\n config: z.record(z.string(), z.any()),\n })\n .optional(),\n customPrompt: z.string().optional(),\n })\n .optional(),\n historyStore: z\n .object({\n provider: z.string(),\n config: z.record(z.string(), z.any()),\n })\n .optional(),\n disableHistory: z.boolean().optional(),\n});\n","import OpenAI from \"openai\";\nimport { Embedder } from \"./base\";\nimport { EmbeddingConfig } from \"../types\";\n\nexport class OpenAIEmbedder implements Embedder {\n private openai: OpenAI;\n private model: string;\n\n constructor(config: EmbeddingConfig) {\n this.openai = new OpenAI({ apiKey: config.apiKey });\n this.model = config.model || \"text-embedding-3-small\";\n }\n\n async embed(text: string): Promise<number[]> {\n const response = await this.openai.embeddings.create({\n model: this.model,\n input: text,\n });\n return response.data[0].embedding;\n }\n\n async embedBatch(texts: string[]): Promise<number[][]> {\n const response = await this.openai.embeddings.create({\n model: this.model,\n input: texts,\n });\n return response.data.map((item) => item.embedding);\n }\n}\n","import { Ollama } from \"ollama\";\nimport { Embedder } from \"./base\";\nimport { EmbeddingConfig } from \"../types\";\nimport { logger } from \"../utils/logger\";\n\nexport class OllamaEmbedder implements Embedder {\n private ollama: Ollama;\n private model: string;\n // Using this variable to avoid calling the Ollama server multiple times\n private initialized: boolean = false;\n\n constructor(config: EmbeddingConfig) {\n this.ollama = new Ollama({\n host: config.url || \"http://localhost:11434\",\n });\n this.model = config.model || \"nomic-embed-text:latest\";\n this.ensureModelExists().catch((err) => {\n logger.error(`Error ensuring model exists: ${err}`);\n });\n }\n\n async embed(text: string): Promise<number[]> {\n try {\n await this.ensureModelExists();\n } catch (err) {\n logger.error(`Error ensuring model exists: ${err}`);\n }\n const response = await this.ollama.embeddings({\n model: this.model,\n prompt: text,\n });\n return response.embedding;\n }\n\n async embedBatch(texts: string[]): Promise<number[][]> {\n const response = await Promise.all(texts.map((text) => this.embed(text)));\n return response;\n }\n\n private async ensureModelExists(): Promise<boolean> {\n if (this.initialized) {\n return true;\n }\n const local_models = await this.ollama.list();\n if (!local_models.models.find((m: any) => m.name === this.model)) {\n logger.info(`Pulling model ${this.model}...`);\n await this.ollama.pull({ model: this.model });\n }\n this.initialized = true;\n return true;\n }\n}\n","export interface Logger {\n info: (message: string) => void;\n error: (message: string) => void;\n debug: (message: string) => void;\n warn: (message: string) => void;\n}\n\nexport const logger: Logger = {\n info: (message: string) => console.log(`[INFO] ${message}`),\n error: (message: string) => console.error(`[ERROR] ${message}`),\n debug: (message: string) => console.debug(`[DEBUG] ${message}`),\n warn: (message: string) => console.warn(`[WARN] ${message}`),\n};\n","import OpenAI from \"openai\";\nimport { LLM, LLMResponse } from \"./base\";\nimport { LLMConfig, Message } from \"../types\";\n\nexport class OpenAILLM implements LLM {\n private openai: OpenAI;\n private model: string;\n\n constructor(config: LLMConfig) {\n this.openai = new OpenAI({ apiKey: config.apiKey });\n this.model = config.model || \"gpt-4o-mini\";\n }\n\n async generateResponse(\n messages: Message[],\n responseFormat?: { type: string },\n tools?: any[],\n ): Promise<string | LLMResponse> {\n const completion = await this.openai.chat.completions.create({\n messages: messages.map((msg) => {\n const role = msg.role as \"system\" | \"user\" | \"assistant\";\n return {\n role,\n content:\n typeof msg.content === \"string\"\n ? msg.content\n : JSON.stringify(msg.content),\n };\n }),\n model: this.model,\n response_format: responseFormat as { type: \"text\" | \"json_object\" },\n ...(tools && { tools, tool_choice: \"auto\" }),\n });\n\n const response = completion.choices[0].message;\n\n if (response.tool_calls) {\n return {\n content: response.content || \"\",\n role: response.role,\n toolCalls: response.tool_calls.map((call) => ({\n name: call.function.name,\n arguments: call.function.arguments,\n })),\n };\n }\n\n return response.content || \"\";\n }\n\n async generateChat(messages: Message[]): Promise<LLMResponse> {\n const completion = await this.openai.chat.completions.create({\n messages: messages.map((msg) => {\n const role = msg.role as \"system\" | \"user\" | \"assistant\";\n return {\n role,\n content:\n typeof msg.content === \"string\"\n ? msg.content\n : JSON.stringify(msg.content),\n };\n }),\n model: this.model,\n });\n const response = completion.choices[0].message;\n return {\n content: response.content || \"\",\n role: response.role,\n };\n }\n}\n","import OpenAI from \"openai\";\nimport { LLM, LLMResponse } from \"./base\";\nimport { LLMConfig, Message } from \"../types\";\n\nexport class OpenAIStructuredLLM implements LLM {\n private openai: OpenAI;\n private model: string;\n\n constructor(config: LLMConfig) {\n this.openai = new OpenAI({ apiKey: config.apiKey });\n this.model = config.model || \"gpt-4-turbo-preview\";\n }\n\n async generateResponse(\n messages: Message[],\n responseFormat?: { type: string } | null,\n tools?: any[],\n ): Promise<string | LLMResponse> {\n const completion = await this.openai.chat.completions.create({\n messages: messages.map((msg) => ({\n role: msg.role as \"system\" | \"user\" | \"assistant\",\n content:\n typeof msg.content === \"string\"\n ? msg.content\n : JSON.stringify(msg.content),\n })),\n model: this.model,\n ...(tools\n ? {\n tools: tools.map((tool) => ({\n type: \"function\",\n function: {\n name: tool.function.name,\n description: tool.function.description,\n parameters: tool.function.parameters,\n },\n })),\n tool_choice: \"auto\" as const,\n }\n : responseFormat\n ? {\n response_format: {\n type: responseFormat.type as \"text\" | \"json_object\",\n },\n }\n : {}),\n });\n\n const response = completion.choices[0].message;\n\n if (response.tool_calls) {\n return {\n content: response.content || \"\",\n role: response.role,\n toolCalls: response.tool_calls.map((call) => ({\n name: call.function.name,\n arguments: call.function.arguments,\n })),\n };\n }\n\n return response.content || \"\";\n }\n\n async generateChat(messages: Message[]): Promise<LLMResponse> {\n const completion = await this.openai.chat.completions.create({\n messages: messages.map((msg) => ({\n role: msg.role as \"system\" | \"user\" | \"assistant\",\n content:\n typeof msg.content === \"string\"\n ? msg.content\n : JSON.stringify(msg.content),\n })),\n model: this.model,\n });\n const response = completion.choices[0].message;\n return {\n content: response.content || \"\",\n role: response.role,\n };\n }\n}\n","import Anthropic from \"@anthropic-ai/sdk\";\nimport { LLM, LLMResponse } from \"./base\";\nimport { LLMConfig, Message } from \"../types\";\n\nexport class AnthropicLLM implements LLM {\n private client: Anthropic;\n private model: string;\n\n constructor(config: LLMConfig) {\n const apiKey = config.apiKey || process.env.ANTHROPIC_API_KEY;\n if (!apiKey) {\n throw new Error(\"Anthropic API key is required\");\n }\n this.client = new Anthropic({ apiKey });\n this.model = config.model || \"claude-3-sonnet-20240229\";\n }\n\n async generateResponse(\n messages: Message[],\n responseFormat?: { type: string },\n ): Promise<string> {\n // Extract system message if present\n const systemMessage = messages.find((msg) => msg.role === \"system\");\n const otherMessages = messages.filter((msg) => msg.role !== \"system\");\n\n const response = await this.client.messages.create({\n model: this.model,\n messages: otherMessages.map((msg) => ({\n role: msg.role as \"user\" | \"assistant\",\n content:\n typeof msg.content === \"string\"\n ? msg.content\n : msg.content.image_url.url,\n })),\n system:\n typeof systemMessage?.content === \"string\"\n ? systemMessage.content\n : undefined,\n max_tokens: 4096,\n });\n\n return response.content[0].text;\n }\n\n async generateChat(messages: Message[]): Promise<LLMResponse> {\n const response = await this.generateResponse(messages);\n return {\n content: response,\n role: \"assistant\",\n };\n }\n}\n","import { Groq } from \"groq-sdk\";\nimport { LLM, LLMResponse } from \"./base\";\nimport { LLMConfig, Message } from \"../types\";\n\nexport class GroqLLM implements LLM {\n private client: Groq;\n private model: string;\n\n constructor(config: LLMConfig) {\n const apiKey = config.apiKey || process.env.GROQ_API_KEY;\n if (!apiKey) {\n throw new Error(\"Groq API key is required\");\n }\n this.client = new Groq({ apiKey });\n this.model = config.model || \"llama3-70b-8192\";\n }\n\n async generateResponse(\n messages: Message[],\n responseFormat?: { type: string },\n ): Promise<string> {\n const response = await this.client.chat.completions.create({\n model: this.model,\n messages: messages.map((msg) => ({\n role: msg.role as \"system\" | \"user\" | \"assistant\",\n content:\n typeof msg.content === \"string\"\n ? msg.content\n : JSON.stringify(msg.content),\n })),\n response_format: responseFormat as { type: \"text\" | \"json_object\" },\n });\n\n return response.choices[0].message.content || \"\";\n }\n\n async generateChat(messages: Message[]): Promise<LLMResponse> {\n const response = await this.client.chat.completions.create({\n model: this.model,\n messages: messages.map((msg) => ({\n role: msg.role as \"system\" | \"user\" | \"assistant\",\n content:\n typeof msg.content === \"string\"\n ? msg.content\n : JSON.stringify(msg.content),\n })),\n });\n\n const message = response.choices[0].message;\n return {\n content: message.content || \"\",\n role: message.role,\n };\n }\n}\n","import { Mistral } from \"@mistralai/mistralai\";\nimport { LLM, LLMResponse } from \"./base\";\nimport { LLMConfig, Message } from \"../types\";\n\nexport class MistralLLM implements LLM {\n private client: Mistral;\n private model: string;\n\n constructor(config: LLMConfig) {\n if (!config.apiKey) {\n throw new Error(\"Mistral API key is required\");\n }\n this.client = new Mistral({\n apiKey: config.apiKey,\n });\n this.model = config.model || \"mistral-tiny-latest\";\n }\n\n // Helper function to convert content to string\n private contentToString(content: any): string {\n if (typeof content === \"string\") {\n return content;\n }\n if (Array.isArray(content)) {\n // Handle ContentChunk array - extract text content\n return content\n .map((chunk) => {\n if (chunk.type === \"text\") {\n return chunk.text;\n } else {\n return JSON.stringify(chunk);\n }\n })\n .join(\"\");\n }\n return String(content || \"\");\n }\n\n async generateResponse(\n messages: Message[],\n responseFormat?: { type: string },\n tools?: any[],\n ): Promise<string | LLMResponse> {\n const response = await this.client.chat.complete({\n model: this.model,\n messages: messages.map((msg) => ({\n role: msg.role as \"system\" | \"user\" | \"assistant\",\n content:\n typeof msg.content === \"string\"\n ? msg.content\n : JSON.stringify(msg.content),\n })),\n ...(tools && { tools }),\n ...(responseFormat && { response_format: responseFormat }),\n });\n\n if (!response || !response.choices || response.choices.length === 0) {\n return \"\";\n }\n\n const message = response.choices[0].message;\n\n if (!message) {\n return \"\";\n }\n\n if (message.toolCalls && message.toolCalls.length > 0) {\n return {\n content: this.contentToString(message.content),\n role: message.role || \"assistant\",\n toolCalls: message.toolCalls.map((call) => ({\n name: call.function.name,\n arguments:\n typeof call.function.arguments === \"string\"\n ? call.function.arguments\n : JSON.stringify(call.function.arguments),\n })),\n };\n }\n\n return this.contentToString(message.content);\n }\n\n async generateChat(messages: Message[]): Promise<LLMResponse> {\n const formattedMessages = messages.map((msg) => ({\n role: msg.role as \"system\" | \"user\" | \"assistant\",\n content:\n typeof msg.content === \"string\"\n ? msg.content\n : JSON.stringify(msg.content),\n }));\n\n const response = await this.client.chat.complete({\n model: this.model,\n messages: formattedMessages,\n });\n\n if (!response || !response.choices || response.choices.length === 0) {\n return {\n content: \"\",\n role: \"assistant\",\n };\n }\n\n const message = response.choices[0].message;\n\n return {\n content: this.contentToString(message.content),\n role: message.role || \"assistant\",\n };\n }\n}\n","import { VectorStore } from \"./base\";\nimport { SearchFilters, VectorStoreConfig, VectorStoreResult } from \"../types\";\nimport sqlite3 from \"sqlite3\";\nimport path from \"path\";\n\ninterface MemoryVector {\n id: string;\n vector: number[];\n payload: Record<string, any>;\n}\n\nexport class MemoryVectorStore implements VectorStore {\n private db: sqlite3.Database;\n private dimension: number;\n private dbPath: string;\n\n constructor(config: VectorStoreConfig) {\n this.dimension = config.dimension || 1536; // Default OpenAI dimension\n this.dbPath = path.join(process.cwd(), \"vector_store.db\");\n if (config.dbPath) {\n this.dbPath = config.dbPath;\n }\n this.db = new sqlite3.Database(this.dbPath);\n this.init().catch(console.error);\n }\n\n private async init() {\n await this.run(`\n CREATE TABLE IF NOT EXISTS vectors (\n id TEXT PRIMARY KEY,\n vector BLOB NOT NULL,\n payload TEXT NOT NULL\n )\n `);\n\n await this.run(`\n CREATE TABLE IF NOT EXISTS memory_migrations (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n user_id TEXT NOT NULL UNIQUE\n )\n `);\n }\n\n private async run(sql: string, params: any[] = []): Promise<void> {\n return new Promise((resolve, reject) => {\n this.db.run(sql, params, (err) => {\n if (err) reject(err);\n else resolve();\n });\n });\n }\n\n private async all(sql: string, params: any[] = []): Promise<any[]> {\n return new Promise((resolve, reject) => {\n this.db.all(sql, params, (err, rows) => {\n if (err) reject(err);\n else resolve(rows);\n });\n });\n }\n\n private async getOne(sql: string, params: any[] = []): Promise<any> {\n return new Promise((resolve, reject) => {\n this.db.get(sql, params, (err, row) => {\n if (err) reject(err);\n else resolve(row);\n });\n });\n }\n\n private cosineSimilarity(a: number[], b: number[]): number {\n let dotProduct = 0;\n let normA = 0;\n let normB = 0;\n for (let i = 0; i < a.length; i++) {\n dotProduct += a[i] * b[i];\n normA += a[i] * a[i];\n normB += b[i] * b[i];\n }\n return dotProduct / (Math.sqrt(normA) * Math.sqrt(normB));\n }\n\n private filterVector(vector: MemoryVector, filters?: SearchFilters): boolean {\n if (!filters) return true;\n return Object.entries(filters).every(\n ([key, value]) => vector.payload[key] === value,\n );\n }\n\n async insert(\n vectors: number[][],\n ids: string[],\n payloads: Record<string, any>[],\n ): Promise<void> {\n for (let i = 0; i < vectors.length; i++) {\n if (vectors[i].length !== this.dimension) {\n throw new Error(\n `Vector dimension mismatch. Expected ${this.dimension}, got ${vectors[i].length}`,\n );\n }\n const vectorBuffer = Buffer.from(new Float32Array(vectors[i]).buffer);\n await this.run(\n `INSERT OR REPLACE INTO vectors (id, vector, payload) VALUES (?, ?, ?)`,\n [ids[i], vectorBuffer, JSON.stringify(payloads[i])],\n );\n }\n }\n\n async search(\n query: number[],\n limit: number = 10,\n filters?: SearchFilters,\n ): Promise<VectorStoreResult[]> {\n if (query.length !== this.dimension) {\n throw new Error(\n `Query dimension mismatch. Expected ${this.dimension}, got ${query.length}`,\n );\n }\n\n const rows = await this.all(`SELECT * FROM vectors`);\n const results: VectorStoreResult[] = [];\n\n for (const row of rows) {\n const vector = new Float32Array(row.vector.buffer);\n const payload = JSON.parse(row.payload);\n const memoryVector: MemoryVector = {\n id: row.id,\n vector: Array.from(vector),\n payload,\n };\n\n if (this.filterVector(memoryVector, filters)) {\n const score = this.cosineSimilarity(query, Array.from(vector));\n results.push({\n id: memoryVector.id,\n payload: memoryVector.payload,\n score,\n });\n }\n }\n\n results.sort((a, b) => (b.score || 0) - (a.score || 0));\n return results.slice(0, limit);\n }\n\n async get(vectorId: string): Promise<VectorStoreResult | null> {\n const row = await this.getOne(`SELECT * FROM vectors WHERE id = ?`, [\n vectorId,\n ]);\n if (!row) return null;\n\n const payload = JSON.parse(row.payload);\n return {\n id: row.id,\n payload,\n };\n }\n\n async update(\n vectorId: string,\n vector: number[],\n payload: Record<string, any>,\n ): Promise<void> {\n if (vector.length !== this.dimension) {\n throw new Error(\n `Vector dimension mismatch. Expected ${this.dimension}, got ${vector.length}`,\n );\n }\n const vectorBuffer = Buffer.from(new Float32Array(vector).buffer);\n await this.run(`UPDATE vectors SET vector = ?, payload = ? WHERE id = ?`, [\n vectorBuffer,\n JSON.stringify(payload),\n vectorId,\n ]);\n }\n\n async delete(vectorId: string): Promise<void> {\n await this.run(`DELETE FROM vectors WHERE id = ?`, [vectorId]);\n }\n\n async deleteCol(): Promise<void> {\n await this.run(`DROP TABLE IF EXISTS vectors`);\n await this.init();\n }\n\n async list(\n filters?: SearchFilters,\n limit: number = 100,\n ): Promise<[VectorStoreResult[], number]> {\n const rows = await this.all(`SELECT * FROM vectors`);\n const results: VectorStoreResult[] = [];\n\n for (const row of rows) {\n const payload = JSON.parse(row.payload);\n const memoryVector: MemoryVector = {\n id: row.id,\n vector: Array.from(new Float32Array(row.vector.buffer)),\n payload,\n };\n\n if (this.filterVector(memoryVector, filters)) {\n results.push({\n id: memoryVector.id,\n payload: memoryVector.payload,\n });\n }\n }\n\n return [results.slice(0, limit), results.length];\n }\n\n async getUserId(): Promise<string> {\n const row = await this.getOne(\n `SELECT user_id FROM memory_migrations LIMIT 1`,\n );\n if (row) {\n return row.user_id;\n }\n\n // Generate a random user_id if none exists\n const randomUserId =\n Math.random().toString(36).substring(2, 15) +\n Math.random().toString(36).substring(2, 15);\n await this.run(`INSERT INTO memory_migrations (user_id) VALUES (?)`, [\n randomUserId,\n ]);\n return randomUserId;\n }\n\n async setUserId(userId: string): Promise<void> {\n await this.run(`DELETE FROM memory_migrations`);\n await this.run(`INSERT INTO memory_migrations (user_id) VALUES (?)`, [\n userId,\n ]);\n }\n\n async initialize(): Promise<void> {\n await this.init();\n }\n}\n","import { QdrantClient } from \"@qdrant/js-client-rest\";\nimport { VectorStore } from \"./base\";\nimport { SearchFilters, VectorStoreConfig, VectorStoreResult } from \"../types\";\nimport * as fs from \"fs\";\n\ninterface QdrantConfig extends VectorStoreConfig {\n client?: QdrantClient;\n host?: string;\n port?: number;\n path?: string;\n url?: string;\n apiKey?: string;\n onDisk?: boolean;\n collectionName: string;\n embeddingModelDims: number;\n dimension?: number;\n}\n\ninterface QdrantFilter {\n must?: QdrantCondition[];\n must_not?: QdrantCondition[];\n should?: QdrantCondition[];\n}\n\ninterface QdrantCondition {\n key: string;\n match?: { value: any };\n range?: { gte?: number; gt?: number; lte?: number; lt?: number };\n}\n\nexport class Qdrant implements VectorStore {\n private client: QdrantClient;\n private readonly collectionName: string;\n private dimension: number;\n\n constructor(config: QdrantConfig) {\n if (config.client) {\n this.client = config.client;\n } else {\n const params: Record<string, any> = {};\n if (config.apiKey) {\n params.apiKey = config.apiKey;\n }\n if (config.url) {\n params.url = config.url;\n }\n if (config.host && config.port) {\n params.host = config.host;\n params.port = config.port;\n }\n if (!Object.keys(params).length) {\n params.path = config.path;\n if (!config.onDisk && config.path) {\n if (\n fs.existsSync(config.path) &&\n fs.statSync(config.path).isDirectory()\n ) {\n fs.rmSync(config.path, { recursive: true });\n }\n }\n }\n\n this.client = new QdrantClient(params);\n }\n\n this.collectionName = config.collectionName;\n this.dimension = config.dimension || 1536; // Default OpenAI dimension\n this.initialize().catch(console.error);\n }\n\n private createFilter(filters?: SearchFilters): QdrantFilter | undefined {\n if (!filters) return undefined;\n\n const conditions: QdrantCondition[] = [];\n for (const [key, value] of Object.entries(filters)) {\n if (\n typeof value === \"object\" &&\n value !== null &&\n \"gte\" in value &&\n \"lte\" in value\n ) {\n conditions.push({\n key,\n range: {\n gte: value.gte,\n lte: value.lte,\n },\n });\n } else {\n conditions.push({\n key,\n match: {\n value,\n