supermemory-ai-provider
Version:
Vercel AI Provider for providing memory to LLMs using Supermemory
101 lines (94 loc) • 4.83 kB
TypeScript
import { ProviderV1, LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1Prompt } from '@ai-sdk/provider';
import { OpenAIProviderSettings } from '@ai-sdk/openai';
import { OpenAIChatSettings } from '@ai-sdk/openai/internal';
import { AnthropicMessagesSettings } from '@ai-sdk/anthropic/internal';
import { AnthropicProviderSettings } from '@ai-sdk/anthropic';
import { CohereProviderSettings } from '@ai-sdk/cohere';
import { GroqProviderSettings } from '@ai-sdk/groq';
import Supermemory$1 from 'supermemory';
interface SupermemoryProvider extends ProviderV1 {
(modelId: SupermemoryChatModelId, settings?: SupermemoryChatSettings): LanguageModelV1;
chat(modelId: SupermemoryChatModelId, settings?: SupermemoryChatSettings): LanguageModelV1;
completion(modelId: SupermemoryChatModelId, settings?: SupermemoryChatSettings): LanguageModelV1;
languageModel(modelId: SupermemoryChatModelId, settings?: SupermemoryChatSettings): LanguageModelV1;
}
interface SupermemoryProviderSettings extends OpenAIChatSettings, AnthropicMessagesSettings {
baseURL?: string;
/**
* Custom fetch implementation. You can use it as a middleware to intercept
* requests or to provide a custom fetch implementation for e.g. testing
*/
fetch?: typeof fetch;
/**
* @internal
*/
generateId?: () => string;
/**
* Custom headers to include in the requests.
*/
headers?: Record<string, string>;
name?: string;
supermemoryApiKey?: string;
apiKey?: string;
provider?: string;
modelType?: "completion" | "chat";
supermemoryConfig?: SupermemoryConfig;
/**
* The configuration for the provider.
*/
config?: OpenAIProviderSettings | AnthropicProviderSettings | CohereProviderSettings | GroqProviderSettings;
}
declare function createSupermemory(options?: SupermemoryProviderSettings): SupermemoryProvider;
declare const supermemory: SupermemoryProvider;
type SupermemoryChatModelId = string & NonNullable<unknown>;
interface MemoryResult {
memory: string;
title?: string;
content?: string;
metadata?: Record<string, unknown>;
}
interface SupermemoryConfigSettings {
user_id?: string;
metadata?: Record<string, unknown>;
filters?: Record<string, unknown>;
page?: number;
page_size?: number;
supermemoryApiKey?: string;
top_k?: number;
threshold?: number;
rerank?: boolean;
enable_graph?: boolean;
}
interface SupermemoryChatConfig extends SupermemoryConfigSettings, SupermemoryProviderSettings {
}
interface SupermemoryConfig extends SupermemoryConfigSettings {
}
interface SupermemoryChatSettings extends OpenAIChatSettings, AnthropicMessagesSettings, SupermemoryConfigSettings {
}
declare class SupermemoryGenericLanguageModel implements LanguageModelV1 {
readonly modelId: SupermemoryChatModelId;
readonly settings: SupermemoryChatSettings;
readonly config: SupermemoryChatConfig;
readonly provider_config?: SupermemoryProviderSettings | undefined;
readonly specificationVersion = "v1";
readonly defaultObjectGenerationMode = "json";
readonly supportsImageUrls = false;
constructor(modelId: SupermemoryChatModelId, settings: SupermemoryChatSettings, config: SupermemoryChatConfig, provider_config?: SupermemoryProviderSettings | undefined);
provider: string;
private processMemories;
doGenerate(options: LanguageModelV1CallOptions): Promise<Awaited<ReturnType<LanguageModelV1["doGenerate"]>>>;
doStream(options: LanguageModelV1CallOptions): Promise<Awaited<ReturnType<LanguageModelV1["doStream"]>>>;
}
declare class Supermemory {
readonly baseURL: string;
readonly headers?: Record<string, string>;
constructor(options?: SupermemoryProviderSettings);
private get baseConfig();
chat(modelId: SupermemoryChatModelId, settings?: SupermemoryChatSettings): SupermemoryGenericLanguageModel;
completion(modelId: SupermemoryChatModelId, settings?: SupermemoryChatSettings): SupermemoryGenericLanguageModel;
}
declare const searchMemories: (query: string, config?: SupermemoryConfigSettings, top_k?: number) => Promise<MemoryResult[]>;
declare const addMemories: (messages: LanguageModelV1Prompt, config?: SupermemoryConfigSettings) => Promise<Supermemory$1.Memories.MemoryAddResponse[]>;
declare const getMemories: (prompt: LanguageModelV1Prompt | string, config?: SupermemoryConfigSettings) => Promise<MemoryResult[]>;
declare const retrieveMemories: (prompt: LanguageModelV1Prompt | string, config?: SupermemoryConfigSettings) => Promise<string>;
export { Supermemory, type SupermemoryChatConfig, type SupermemoryChatSettings, type SupermemoryConfigSettings, type SupermemoryProvider, type SupermemoryProviderSettings, addMemories, createSupermemory, getMemories, retrieveMemories, searchMemories, supermemory };