@mem0/vercel-ai-provider
Version:
Vercel AI Provider for providing memory to LLMs
681 lines (665 loc) • 22.8 kB
JavaScript
;
var __defProp = Object.defineProperty;
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
var __getOwnPropNames = Object.getOwnPropertyNames;
var __hasOwnProp = Object.prototype.hasOwnProperty;
var __export = (target, all) => {
for (var name in all)
__defProp(target, name, { get: all[name], enumerable: true });
};
var __copyProps = (to, from, except, desc) => {
if (from && typeof from === "object" || typeof from === "function") {
for (let key of __getOwnPropNames(from))
if (!__hasOwnProp.call(to, key) && key !== except)
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
}
return to;
};
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
// src/index.ts
var index_exports = {};
__export(index_exports, {
Mem0: () => Mem0,
addMemories: () => addMemories,
createMem0: () => createMem0,
getMemories: () => getMemories,
mem0: () => mem0,
retrieveMemories: () => retrieveMemories,
searchMemories: () => searchMemories
});
module.exports = __toCommonJS(index_exports);
// src/mem0-facade.ts
var import_provider_utils2 = require("@ai-sdk/provider-utils");
// src/mem0-generic-language-model.ts
var import_ai = require("ai");
// src/provider-response-provider.ts
var import_openai = require("@ai-sdk/openai");
var import_cohere = require("@ai-sdk/cohere");
var import_anthropic = require("@ai-sdk/anthropic");
var import_google = require("@ai-sdk/google");
var import_groq = require("@ai-sdk/groq");
var Mem0AITextGenerator = class {
// Use any type to avoid version conflicts
constructor(modelId, config, provider_config) {
this.specificationVersion = "v2";
this.defaultObjectGenerationMode = "json";
this.supportsImageUrls = false;
this.provider = "mem0";
this.supportedUrls = {
"*": [/.*/]
};
this.modelId = modelId;
switch (config.provider) {
case "openai":
if ((config == null ? void 0 : config.modelType) === "completion") {
this.languageModel = (0, import_openai.createOpenAI)({
apiKey: config == null ? void 0 : config.apiKey,
...provider_config
}).completion(modelId);
} else if ((config == null ? void 0 : config.modelType) === "chat") {
this.languageModel = (0, import_openai.createOpenAI)({
apiKey: config == null ? void 0 : config.apiKey,
...provider_config
}).chat(modelId);
} else {
this.languageModel = (0, import_openai.createOpenAI)({
apiKey: config == null ? void 0 : config.apiKey,
...provider_config
}).languageModel(modelId);
}
break;
case "cohere":
this.languageModel = (0, import_cohere.createCohere)({
apiKey: config == null ? void 0 : config.apiKey,
...provider_config
})(modelId);
break;
case "anthropic":
this.languageModel = (0, import_anthropic.createAnthropic)({
apiKey: config == null ? void 0 : config.apiKey,
...provider_config
}).languageModel(modelId);
break;
case "groq":
this.languageModel = (0, import_groq.createGroq)({
apiKey: config == null ? void 0 : config.apiKey,
...provider_config
})(modelId);
break;
case "google":
case "gemini":
this.languageModel = (0, import_google.createGoogleGenerativeAI)({
apiKey: config == null ? void 0 : config.apiKey,
...provider_config
})(modelId);
break;
default:
throw new Error("Invalid provider");
}
}
async doGenerate(options) {
const result = await this.languageModel.doGenerate(options);
return result;
}
async doStream(options) {
const result = await this.languageModel.doStream(options);
return result;
}
};
var provider_response_provider_default = Mem0AITextGenerator;
// src/mem0-provider-selector.ts
var _Mem0ClassSelector = class _Mem0ClassSelector {
constructor(modelId, config, provider_config) {
this.modelId = modelId;
this.provider_wrapper = config.provider || "openai";
this.provider_config = provider_config;
if (config) this.config = config;
else this.config = {
provider: this.provider_wrapper
};
if (!_Mem0ClassSelector.supportedProviders.includes(this.provider_wrapper)) {
throw new Error(`Model not supported: ${this.provider_wrapper}`);
}
}
createProvider() {
return new provider_response_provider_default(this.modelId, this.config, this.provider_config || {});
}
};
_Mem0ClassSelector.supportedProviders = ["openai", "anthropic", "cohere", "groq", "google"];
var Mem0ClassSelector = _Mem0ClassSelector;
// src/mem0-utils.ts
var import_provider_utils = require("@ai-sdk/provider-utils");
var flattenPrompt = (prompt) => {
try {
return prompt.map((part) => {
if (part.role === "user") {
return part.content.filter((obj) => obj.type === "text").map((obj) => obj.text).join(" ");
}
return "";
}).join(" ");
} catch (error) {
console.error("Error in flattenPrompt:", error);
return "";
}
};
var convertToMem0Format = (messages) => {
try {
return messages.flatMap((message) => {
try {
if (typeof message.content === "string") {
return {
role: message.role,
content: message.content
};
} else {
return message.content.map((obj) => {
try {
if (obj.type === "text") {
return {
role: message.role,
content: obj.text
};
}
return null;
} catch (error) {
console.error("Error processing content object:", error);
return null;
}
}).filter((item) => item !== null);
}
} catch (error) {
console.error("Error processing message:", error);
return [];
}
});
} catch (error) {
console.error("Error in convertToMem0Format:", error);
return [];
}
};
var searchInternalMemories = async (query, config, top_k = 5) => {
try {
const filters = {
AND: []
};
if (config == null ? void 0 : config.user_id) {
filters.AND.push({
user_id: config.user_id
});
}
if (config == null ? void 0 : config.app_id) {
filters.AND.push({
app_id: config.app_id
});
}
if (config == null ? void 0 : config.agent_id) {
filters.AND.push({
agent_id: config.agent_id
});
}
if (config == null ? void 0 : config.run_id) {
filters.AND.push({
run_id: config.run_id
});
}
const org_project_filters = {
org_id: config && config.org_id,
project_id: config && config.project_id,
org_name: !(config == null ? void 0 : config.org_id) ? config && config.org_name : void 0,
project_name: !(config == null ? void 0 : config.org_id) ? config && config.project_name : void 0
};
const apiKey = (0, import_provider_utils.loadApiKey)({
apiKey: config && config.mem0ApiKey,
environmentVariableName: "MEM0_API_KEY",
description: "Mem0"
});
const options = {
method: "POST",
headers: {
Authorization: `Token ${apiKey}`,
"Content-Type": "application/json"
},
body: JSON.stringify({
query,
filters,
...config,
top_k: config && config.top_k || top_k,
version: "v2",
output_format: "v1.1",
...org_project_filters
})
};
const response = await fetch("https://api.mem0.ai/v2/memories/search/", options);
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const data = await response.json();
return data;
} catch (error) {
console.error("Error in searchInternalMemories:", error);
throw error;
}
};
var addMemories = async (messages, config) => {
try {
let finalMessages = [];
if (typeof messages === "string") {
finalMessages = [{ role: "user", content: messages }];
} else {
finalMessages = convertToMem0Format(messages);
}
const response = await updateMemories(finalMessages, config);
return response;
} catch (error) {
console.error("Error in addMemories:", error);
throw error;
}
};
var updateMemories = async (messages, config) => {
try {
const apiKey = (0, import_provider_utils.loadApiKey)({
apiKey: config && config.mem0ApiKey,
environmentVariableName: "MEM0_API_KEY",
description: "Mem0"
});
const options = {
method: "POST",
headers: {
Authorization: `Token ${apiKey}`,
"Content-Type": "application/json"
},
body: JSON.stringify({ messages, ...config })
};
const response = await fetch("https://api.mem0.ai/v1/memories/", options);
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const data = await response.json();
return data;
} catch (error) {
console.error("Error in updateMemories:", error);
throw error;
}
};
var retrieveMemories = async (prompt, config) => {
var _a, _b;
try {
const message = typeof prompt === "string" ? prompt : flattenPrompt(prompt);
const systemPrompt = "These are the memories I have stored. Give more weightage to the question by users and try to answer that first. You have to modify your answer based on the memories I have provided. If the memories are irrelevant you can ignore them. Also don't reply to this section of the prompt, or the memories, they are only for your reference. The System prompt starts after text System Message: \n\n";
const memories = await searchInternalMemories(message, config);
let memoriesText1 = "";
let memoriesText2 = "";
let graphPrompt = "";
try {
memoriesText1 = (_a = memories == null ? void 0 : memories.results) == null ? void 0 : _a.map((memory) => {
return `Memory: ${memory.memory}
`;
}).join("\n\n");
if (config == null ? void 0 : config.enable_graph) {
memoriesText2 = (_b = memories == null ? void 0 : memories.relations) == null ? void 0 : _b.map((memory) => {
return `Relation: ${memory.source} -> ${memory.relationship} -> ${memory.target}
`;
}).join("\n\n");
graphPrompt = `HERE ARE THE GRAPHS RELATIONS FOR THE PREFERENCES OF THE USER:
${memoriesText2}`;
}
} catch (error) {
console.error("Error while parsing memories:", error);
}
if (!memories || (memories == null ? void 0 : memories.length) === 0) {
return "";
}
return `System Message: ${systemPrompt} ${memoriesText1} ${graphPrompt}`;
} catch (error) {
console.error("Error in retrieveMemories:", error);
throw error;
}
};
var getMemories = async (prompt, config) => {
try {
const message = typeof prompt === "string" ? prompt : flattenPrompt(prompt);
const memories = await searchInternalMemories(message, config);
if (!(config == null ? void 0 : config.enable_graph)) {
return memories == null ? void 0 : memories.results;
}
return memories;
} catch (error) {
console.error("Error in getMemories:", error);
throw error;
}
};
var searchMemories = async (prompt, config) => {
try {
const message = typeof prompt === "string" ? prompt : flattenPrompt(prompt);
const memories = await searchInternalMemories(message, config);
return memories;
} catch (error) {
console.error("Error in searchMemories:", error);
return [];
}
};
// src/mem0-generic-language-model.ts
var generateRandomId = () => {
return Math.random().toString(36).substring(2, 15) + Math.random().toString(36).substring(2, 15);
};
var Mem0GenericLanguageModel = class {
constructor(modelId, settings, config, provider_config) {
this.modelId = modelId;
this.settings = settings;
this.config = config;
this.provider_config = provider_config;
this.specificationVersion = "v2";
this.defaultObjectGenerationMode = "json";
// We don't support images for now
this.supportsImageUrls = false;
// Allow All Media Types for now
this.supportedUrls = {
"*": [/.*/]
};
var _a;
this.provider = (_a = config.provider) != null ? _a : "openai";
}
async processMemories(messagesPrompts, mem0Config) {
var _a, _b;
try {
addMemories(messagesPrompts, mem0Config).then((res) => {
return res;
}).catch((e) => {
console.error("Error while adding memories");
return { memories: [], messagesPrompts: [] };
});
let memories = await getMemories(messagesPrompts, mem0Config);
const mySystemPrompt = "These are the memories I have stored. Give more weightage to the question by users and try to answer that first. You have to modify your answer based on the memories I have provided. If the memories are irrelevant you can ignore them. Also don't reply to this section of the prompt, or the memories, they are only for your reference. The System prompt starts after text System Message: \n\n";
const isGraphEnabled = mem0Config == null ? void 0 : mem0Config.enable_graph;
let memoriesText = "";
let memoriesText2 = "";
try {
if (isGraphEnabled) {
memoriesText = (_a = memories == null ? void 0 : memories.results) == null ? void 0 : _a.map((memory) => {
return `Memory: ${memory == null ? void 0 : memory.memory}
`;
}).join("\n\n");
memoriesText2 = (_b = memories == null ? void 0 : memories.relations) == null ? void 0 : _b.map((memory) => {
return `Relation: ${memory == null ? void 0 : memory.source} -> ${memory == null ? void 0 : memory.relationship} -> ${memory == null ? void 0 : memory.target}
`;
}).join("\n\n");
} else {
memoriesText = memories == null ? void 0 : memories.map((memory) => {
return `Memory: ${memory == null ? void 0 : memory.memory}
`;
}).join("\n\n");
}
} catch (e) {
console.error("Error while parsing memories");
}
let graphPrompt = "";
if (isGraphEnabled) {
graphPrompt = `HERE ARE THE GRAPHS RELATIONS FOR THE PREFERENCES OF THE USER:
${memoriesText2}`;
}
const memoriesPrompt = `System Message: ${mySystemPrompt} ${memoriesText} ${graphPrompt} `;
const systemPrompt = {
role: "system",
content: memoriesPrompt
};
if ((memories == null ? void 0 : memories.length) > 0) {
messagesPrompts.unshift(systemPrompt);
}
if (isGraphEnabled) {
memories = memories == null ? void 0 : memories.results;
}
return { memories, messagesPrompts };
} catch (e) {
console.error("Error while processing memories");
return { memories: [], messagesPrompts };
}
}
async doGenerate(options) {
try {
const provider = this.config.provider;
const mem0_api_key = this.config.mem0ApiKey;
const settings = {
provider,
mem0ApiKey: mem0_api_key,
apiKey: this.config.apiKey
};
const mem0Config = {
mem0ApiKey: mem0_api_key,
...this.config.mem0Config,
...this.settings
};
const selector = new Mem0ClassSelector(this.modelId, settings, this.provider_config);
let messagesPrompts = options.prompt;
const { memories, messagesPrompts: updatedPrompts } = await this.processMemories(messagesPrompts, mem0Config);
const model = selector.createProvider();
const ans = await model.doGenerate({
...options,
prompt: updatedPrompts
});
if (!memories || (memories == null ? void 0 : memories.length) === 0) {
return ans;
}
try {
const sources = [
{
type: "source",
title: "Mem0 Memories",
sourceType: "url",
id: "mem0-" + generateRandomId(),
url: "https://app.mem0.ai",
providerMetadata: {
mem0: {
memories,
memoriesText: memories == null ? void 0 : memories.map((memory) => memory == null ? void 0 : memory.memory).join("\n\n")
}
}
}
];
} catch (e) {
console.error("Error while creating sources");
}
return {
...ans
// sources
};
} catch (error) {
console.error("Error in doGenerate:", error);
throw new Error("Failed to generate response.");
}
}
async doStream(options) {
try {
const provider = this.config.provider;
const mem0_api_key = this.config.mem0ApiKey;
const settings = {
provider,
mem0ApiKey: mem0_api_key,
apiKey: this.config.apiKey,
modelType: this.config.modelType
};
const mem0Config = {
mem0ApiKey: mem0_api_key,
...this.config.mem0Config,
...this.settings
};
const selector = new Mem0ClassSelector(this.modelId, settings, this.provider_config);
let messagesPrompts = options.prompt;
const { memories, messagesPrompts: updatedPrompts } = await this.processMemories(messagesPrompts, mem0Config);
const baseModel = selector.createProvider();
const model = (0, import_ai.wrapLanguageModel)({
model: baseModel,
middleware: (0, import_ai.simulateStreamingMiddleware)()
});
const streamResponse = await model.doStream({
...options,
prompt: updatedPrompts
});
if (!memories || (memories == null ? void 0 : memories.length) === 0) {
return streamResponse;
}
const originalStream = streamResponse.stream;
const transformStream = new TransformStream({
start(controller) {
try {
if (Array.isArray(memories) && (memories == null ? void 0 : memories.length) > 0) {
controller.enqueue({
type: "source",
title: "Mem0 Memories",
sourceType: "url",
id: "mem0-" + generateRandomId(),
url: "https://app.mem0.ai",
providerOptions: {
mem0: {
memories,
memoriesText: memories == null ? void 0 : memories.map((memory) => memory == null ? void 0 : memory.memory).join("\n\n")
}
}
});
memories == null ? void 0 : memories.forEach((memory) => {
controller.enqueue({
type: "source",
title: (memory == null ? void 0 : memory.title) || "Memory",
sourceType: "url",
id: "mem0-memory-" + generateRandomId(),
url: "https://app.mem0.ai",
providerOptions: {
mem0: {
memory,
memoryText: memory == null ? void 0 : memory.memory
}
}
});
});
}
} catch (error) {
console.error("Error adding memory sources:", error);
}
},
transform(chunk, controller) {
controller.enqueue(chunk);
}
});
const enhancedStream = originalStream.pipeThrough(transformStream);
return {
stream: enhancedStream,
request: streamResponse.request,
response: streamResponse.response
};
} catch (error) {
console.error("Error in doStream:", error);
throw new Error("Streaming failed or method not implemented.");
}
}
};
// src/mem0-facade.ts
var Mem0 = class {
constructor(options = {
provider: "openai"
}) {
var _a;
this.baseURL = (_a = (0, import_provider_utils2.withoutTrailingSlash)(options.baseURL)) != null ? _a : "http://127.0.0.1:11434/api";
this.headers = options.headers;
}
get baseConfig() {
return {
baseURL: this.baseURL,
headers: this.headers
};
}
chat(modelId, settings = {}) {
return new Mem0GenericLanguageModel(modelId, settings, {
provider: "openai",
modelType: "chat",
...this.baseConfig
});
}
completion(modelId, settings = {}) {
return new Mem0GenericLanguageModel(modelId, settings, {
provider: "openai",
modelType: "completion",
...this.baseConfig
});
}
};
// src/mem0-provider.ts
var import_provider_utils3 = require("@ai-sdk/provider-utils");
function createMem0(options = {
provider: "openai"
}) {
var _a;
const baseURL = (_a = (0, import_provider_utils3.withoutTrailingSlash)(options.baseURL)) != null ? _a : "http://api.openai.com";
const getHeaders = () => ({
...options.headers
});
const createGenericModel = (modelId, settings = {}) => new Mem0GenericLanguageModel(
modelId,
settings,
{
baseURL,
fetch: options.fetch,
headers: getHeaders(),
provider: options.provider || "openai",
name: options.name,
mem0ApiKey: options.mem0ApiKey,
apiKey: options.apiKey,
mem0Config: options.mem0Config
},
options.config
);
const createCompletionModel = (modelId, settings = {}) => new Mem0GenericLanguageModel(
modelId,
settings,
{
baseURL,
fetch: options.fetch,
headers: getHeaders(),
provider: options.provider || "openai",
name: options.name,
mem0ApiKey: options.mem0ApiKey,
apiKey: options.apiKey,
mem0Config: options.mem0Config,
modelType: "completion"
},
options.config
);
const createChatModel = (modelId, settings = {}) => new Mem0GenericLanguageModel(
modelId,
settings,
{
baseURL,
fetch: options.fetch,
headers: getHeaders(),
provider: options.provider || "openai",
name: options.name,
mem0ApiKey: options.mem0ApiKey,
apiKey: options.apiKey,
mem0Config: options.mem0Config,
modelType: "completion"
},
options.config
);
const provider = function(modelId, settings = {}) {
if (new.target) {
throw new Error(
"The Mem0 model function cannot be called with the new keyword."
);
}
return createGenericModel(modelId, settings);
};
provider.languageModel = createGenericModel;
provider.completion = createCompletionModel;
provider.chat = createChatModel;
return provider;
}
var mem0 = createMem0();
// Annotate the CommonJS export names for ESM import in node:
0 && (module.exports = {
Mem0,
addMemories,
createMem0,
getMemories,
mem0,
retrieveMemories,
searchMemories
});
//# sourceMappingURL=index.js.map