UNPKG

supermemory-ai-provider

Version:

Vercel AI Provider for providing memory to LLMs using Supermemory

600 lines (590 loc) 19.8 kB
"use strict"; var __create = Object.create; var __defProp = Object.defineProperty; var __getOwnPropDesc = Object.getOwnPropertyDescriptor; var __getOwnPropNames = Object.getOwnPropertyNames; var __getProtoOf = Object.getPrototypeOf; var __hasOwnProp = Object.prototype.hasOwnProperty; var __export = (target, all) => { for (var name in all) __defProp(target, name, { get: all[name], enumerable: true }); }; var __copyProps = (to, from, except, desc) => { if (from && typeof from === "object" || typeof from === "function") { for (let key of __getOwnPropNames(from)) if (!__hasOwnProp.call(to, key) && key !== except) __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); } return to; }; var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps( // If the importer is in node compatibility mode or this is not an ESM // file that has been converted to a CommonJS file using a Babel- // compatible transform (i.e. "__esModule" has not been set), then set // "default" to the CommonJS "module.exports" for node compatibility. isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target, mod )); var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); // src/index.ts var index_exports = {}; __export(index_exports, { Supermemory: () => Supermemory2, addMemories: () => addMemories, createSupermemory: () => createSupermemory, getMemories: () => getMemories, retrieveMemories: () => retrieveMemories, searchMemories: () => searchMemories, supermemory: () => supermemory }); module.exports = __toCommonJS(index_exports); // src/supermemory-facade.ts var import_provider_utils2 = require("@ai-sdk/provider-utils"); // src/provider-response-provider.ts var import_openai = require("@ai-sdk/openai"); var import_cohere = require("@ai-sdk/cohere"); var import_anthropic = require("@ai-sdk/anthropic"); var import_groq = require("@ai-sdk/groq"); var SupermemoryAITextGenerator = class { specificationVersion = "v1"; defaultObjectGenerationMode = "json"; supportsImageUrls = false; modelId; provider; llmClient; llmClientConfig; config; constructor(modelId, config, llmClientConfigParam) { this.provider = config.provider || "openai"; switch (config.provider) { case "openai": { const openaiProvider = (0, import_openai.createOpenAI)({ apiKey: config?.apiKey, ...llmClientConfigParam }); if (config?.modelType === "completion") { this.llmClient = (id, cfg) => openaiProvider.completion(id, cfg); } else if (config?.modelType === "chat") { this.llmClient = (id, cfg) => openaiProvider.chat(id, cfg); } else { this.llmClient = (id, cfg) => openaiProvider.languageModel(id, cfg); } break; } case "cohere": { const cohereProvider = (0, import_cohere.createCohere)({ apiKey: config?.apiKey, ...llmClientConfigParam }); this.llmClient = (id, cfg) => cohereProvider(id, cfg); break; } case "anthropic": { const anthropicProvider = (0, import_anthropic.createAnthropic)({ apiKey: config?.apiKey, ...llmClientConfigParam }); this.llmClient = (id, cfg) => anthropicProvider.languageModel(id, cfg); break; } case "groq": { const groqProvider = (0, import_groq.createGroq)({ apiKey: config?.apiKey, ...llmClientConfigParam }); this.llmClient = (id, cfg) => groqProvider(id, cfg); break; } default: throw new Error("Invalid provider"); } this.modelId = modelId; this.llmClientConfig = llmClientConfigParam; this.config = config; } doGenerate(options) { const modelInstance = this.llmClient(this.modelId, this.llmClientConfig); return Promise.resolve(modelInstance.doGenerate(options)); } doStream(options) { const modelInstance = this.llmClient(this.modelId, this.llmClientConfig); return Promise.resolve(modelInstance.doStream(options)); } }; var provider_response_provider_default = SupermemoryAITextGenerator; // src/supermemory-provider-selector.ts var SupermemoryClassSelector = class _SupermemoryClassSelector { modelId; provider_wrapper; config; provider_config; static supportedProviders = ["openai", "anthropic", "cohere", "groq"]; constructor(modelId, config, provider_config) { this.modelId = modelId; this.provider_wrapper = config.provider || "openai"; this.provider_config = provider_config; this.config = config; if (!_SupermemoryClassSelector.supportedProviders.includes( this.provider_wrapper )) { throw new Error(`Model not supported: ${this.provider_wrapper}`); } } createProvider() { return new provider_response_provider_default( this.modelId, this.config, this.provider_config || {} ); } }; // src/supermemory-utils.ts var import_provider_utils = require("@ai-sdk/provider-utils"); var import_supermemory = __toESM(require("supermemory")); var flattenPrompt = (prompt) => { try { return prompt.map((part) => { if (part.role === "user") { return part.content.filter((obj) => obj.type === "text").map((obj) => obj.text).join(" "); } return ""; }).join(" "); } catch (error) { console.error("Error in flattenPrompt:", error); return ""; } }; var convertToSupermemoryFormat = (messages) => { try { return messages.flatMap((message) => { try { if (typeof message.content === "string") { return { role: message.role, content: message.content }; } return message.content.map((obj) => { try { if (obj.type === "text") { return { role: message.role, content: obj.text }; } return null; } catch (error) { console.error("Error processing content object:", error); return null; } }).filter((item) => item !== null); } catch (error) { console.error("Error processing message:", error); return []; } }); } catch (error) { console.error("Error in convertToSupermemoryFormat:", error); return []; } }; var getSupermemoryClient = (config) => { try { const apiKey = (0, import_provider_utils.loadApiKey)({ apiKey: config?.supermemoryApiKey, environmentVariableName: "SUPERMEMORY_API_KEY", description: "Supermemory" }); return new import_supermemory.default({ apiKey }); } catch (error) { console.error("Error initializing Supermemory client:", error); throw error; } }; var searchMemories = async (query, config, top_k = 5) => { try { const client = getSupermemoryClient(config); const filters = {}; if (config?.user_id) { filters.user_id = config.user_id; } const searchResponse = await client.search.execute({ q: query, ...Object.keys(filters).length > 0 ? { filters } : {} }); return searchResponse.results; } catch (error) { console.error("Error in searchMemories:", error); return []; } }; var addMemories = async (messages, config) => { try { const client = getSupermemoryClient(config); let finalMessages = []; if (typeof messages === "string") { finalMessages = [{ role: "user", content: messages }]; } else { finalMessages = convertToSupermemoryFormat(messages); } const addPromises = finalMessages.map(async (message) => { return await client.memories.add({ content: message.content, containerTags: config?.user_id ? [config.user_id] : void 0, metadata: config?.metadata }); }); const results = await Promise.all(addPromises); return results.filter(Boolean); } catch (error) { console.error("Error in addMemories:", error); throw error; } }; var getMemories = async (prompt, config) => { try { const message = typeof prompt === "string" ? prompt : flattenPrompt(prompt); const memories = await searchMemories(message, config); return memories; } catch (error) { console.error("Error in getMemories:", error); return []; } }; var retrieveMemories = async (prompt, config) => { try { const message = typeof prompt === "string" ? prompt : flattenPrompt(prompt); const systemPrompt = "These are the memories I have stored. Give more weightage to the question by users and try to answer that first. You have to modify your answer based on the memories I have provided. If the memories are irrelevant you can ignore them. Also don't reply to this section of the prompt, or the memories, they are only for your reference. The System prompt starts after text System Message: \n\n"; const memories = await searchMemories(message, config); if (!memories || memories.length === 0) { return ""; } const memoriesText = memories.map((memory) => { return `Memory: ${memory.content || memory.memory} `; }).join("\n\n"); return `System Message: ${systemPrompt} ${memoriesText}`; } catch (error) { console.error("Error in retrieveMemories:", error); throw error; } }; // src/supermemory-generic-language-model.ts var generateRandomId = () => { return `supermemory-${Math.random().toString(36).substring(2, 15)}${Math.random().toString(36).substring(2, 15)}`; }; var SupermemoryGenericLanguageModel = class { constructor(modelId, settings, config, provider_config) { this.modelId = modelId; this.settings = settings; this.config = config; this.provider_config = provider_config; this.provider = config.provider ?? "openai"; } specificationVersion = "v1"; defaultObjectGenerationMode = "json"; supportsImageUrls = false; provider; async processMemories(messagesPrompts, supermemoryConfig) { try { try { await addMemories(messagesPrompts, supermemoryConfig); } catch (e) { console.error("Error while adding memories:", e); } const memories = await getMemories(messagesPrompts, supermemoryConfig); const mySystemPrompt = "These are the memories I have stored. Give more weightage to the question by users and try to answer that first. You have to modify your answer based on the memories I have provided. If the memories are irrelevant you can ignore them. Also don't reply to this section of the prompt, or the memories, they are only for your reference. The System prompt starts after text System Message: \n\n"; let memoriesText = ""; try { memoriesText = memories?.map((memory) => { return `Memory: ${memory.memory} `; }).join("\n\n"); } catch (e) { console.error("Error while parsing memories"); } const memoriesPrompt = `System Message: ${mySystemPrompt} ${memoriesText}`; const systemPrompt = { role: "system", content: memoriesPrompt }; if (memories?.length > 0) { messagesPrompts.unshift(systemPrompt); } return { memories, messagesPrompts }; } catch (e) { console.error("Error while processing memories"); return { memories: [], messagesPrompts }; } } async doGenerate(options) { try { const provider = this.config.provider; const supermemory_api_key = this.config.supermemoryApiKey; const settings = { provider, supermemoryApiKey: supermemory_api_key, apiKey: this.config.apiKey }; const supermemoryConfig = { supermemoryApiKey: supermemory_api_key, ...this.config.supermemoryConfig, ...this.settings }; const selector = new SupermemoryClassSelector( this.modelId, settings, this.provider_config ); const messagesPrompts = options.prompt; const { memories, messagesPrompts: updatedPrompts } = await this.processMemories(messagesPrompts, supermemoryConfig); const model = selector.createProvider(); const ans = await model.doGenerate({ ...options, prompt: updatedPrompts }); if (!memories || memories?.length === 0) { return ans; } const sources = [...ans.sources || []]; const combinedSource = { title: "Supermemory Memories", sourceType: "url", id: generateRandomId(), url: "https://app.supermemory.ai", providerMetadata: { supermemory: { memories, memoriesText: memories.map((memory) => memory.memory).join("\n\n") } } }; sources.push(combinedSource); for (const memory of memories) { const memorySource = { title: memory.title || "Memory", sourceType: "url", id: generateRandomId(), url: "https://app.supermemory.ai", providerMetadata: { supermemory: { memory, memoryText: memory.memory } } }; sources.push(memorySource); } return { ...ans, sources }; } catch (error) { console.error("Error in doGenerate:", error); throw new Error("Failed to generate response."); } } async doStream(options) { try { const provider = this.config.provider; const supermemory_api_key = this.config.supermemoryApiKey; const settings = { provider, supermemoryApiKey: supermemory_api_key, apiKey: this.config.apiKey, modelType: this.config.modelType }; const supermemoryConfig = { supermemoryApiKey: supermemory_api_key, ...this.config.supermemoryConfig, ...this.settings }; const selector = new SupermemoryClassSelector( this.modelId, settings, this.provider_config ); const messagesPrompts = options.prompt; const { memories, messagesPrompts: updatedPrompts } = await this.processMemories(messagesPrompts, supermemoryConfig); const model = selector.createProvider(); const streamResponse = await model.doStream({ ...options, prompt: updatedPrompts }); if (!memories || memories?.length === 0) { return streamResponse; } const originalStream = streamResponse.stream; const transformStream = new TransformStream({ start(controller) { try { if (Array.isArray(memories) && memories?.length > 0) { controller.enqueue({ type: "source", source: { title: "Supermemory Memories", sourceType: "url", id: generateRandomId(), url: "https://app.supermemory.ai", providerMetadata: { supermemory: { memories, memoriesText: memories.map((memory) => memory.memory).join("\n\n") } } } }); for (const memory of memories) { controller.enqueue({ type: "source", source: { title: memory.title || "Memory", sourceType: "url", id: generateRandomId(), url: "https://app.supermemory.ai", providerMetadata: { supermemory: { memory, memoryText: memory.memory } } } }); } } } catch (error) { console.error("Error adding memory sources:", error); } }, transform(chunk, controller) { controller.enqueue(chunk); } }); const enhancedStream = originalStream.pipeThrough(transformStream); return { stream: enhancedStream, rawCall: streamResponse.rawCall, rawResponse: streamResponse.rawResponse, request: streamResponse.request, warnings: streamResponse.warnings }; } catch (error) { console.error("Error in doStream:", error); throw new Error("Streaming failed or method not implemented."); } } }; // src/supermemory-facade.ts var Supermemory2 = class { baseURL; headers; constructor(options = { provider: "openai" }) { this.baseURL = (0, import_provider_utils2.withoutTrailingSlash)(options.baseURL) ?? "https://api.supermemory.ai"; this.headers = options.headers; } get baseConfig() { return { baseURL: this.baseURL, headers: this.headers }; } chat(modelId, settings = {}) { return new SupermemoryGenericLanguageModel(modelId, settings, { provider: "openai", modelType: "chat", ...this.baseConfig }); } completion(modelId, settings = {}) { return new SupermemoryGenericLanguageModel(modelId, settings, { provider: "openai", modelType: "completion", ...this.baseConfig }); } }; // src/supermemory-provider.ts var import_provider_utils3 = require("@ai-sdk/provider-utils"); function createSupermemory(options = { provider: "openai" }) { const baseURL = (0, import_provider_utils3.withoutTrailingSlash)(options.baseURL) ?? "http://api.openai.com"; const getHeaders = () => ({ ...options.headers }); const createGenericModel = (modelId, settings = {}) => new SupermemoryGenericLanguageModel( modelId, settings, { baseURL, fetch: options.fetch, headers: getHeaders(), provider: options.provider || "openai", name: options.name, supermemoryApiKey: options.supermemoryApiKey, apiKey: options.apiKey, supermemoryConfig: options.supermemoryConfig }, options.config ); const createCompletionModel = (modelId, settings = {}) => new SupermemoryGenericLanguageModel( modelId, settings, { baseURL, fetch: options.fetch, headers: getHeaders(), provider: options.provider || "openai", name: options.name, supermemoryApiKey: options.supermemoryApiKey, apiKey: options.apiKey, supermemoryConfig: options.supermemoryConfig, modelType: "completion" }, options.config ); const createChatModel = (modelId, settings = {}) => new SupermemoryGenericLanguageModel( modelId, settings, { baseURL, fetch: options.fetch, headers: getHeaders(), provider: options.provider || "openai", name: options.name, supermemoryApiKey: options.supermemoryApiKey, apiKey: options.apiKey, supermemoryConfig: options.supermemoryConfig, modelType: "completion" }, options.config ); const provider = function(modelId, settings = {}) { if (new.target) { throw new Error( "The Supermemory model function cannot be called with the new keyword." ); } return createGenericModel(modelId, settings); }; provider.languageModel = createGenericModel; provider.completion = createCompletionModel; provider.chat = createChatModel; return provider; } var supermemory = createSupermemory(); // Annotate the CommonJS export names for ESM import in node: 0 && (module.exports = { Supermemory, addMemories, createSupermemory, getMemories, retrieveMemories, searchMemories, supermemory }); //# sourceMappingURL=index.js.map