UNPKG

agentdb

Version:

AgentDB - Frontier Memory Features with MCP Integration and Direct Vector Search: Causal reasoning, reflexion memory, skill library, automated learning, and raw vector similarity queries. 150x faster vector search. Full Claude Desktop support via Model Co

1,307 lines (1,219 loc) 70.8 kB
#!/usr/bin/env node /** * AgentDB MCP Server * Production-ready MCP server for Claude Desktop integration * Exposes AgentDB frontier memory features + core vector DB operations via MCP protocol */ import { Server } from '@modelcontextprotocol/sdk/server/index.js'; import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js'; import { CallToolRequestSchema, ListToolsRequestSchema, } from '@modelcontextprotocol/sdk/types.js'; // Database type from db-fallback type Database = any; import { createDatabase } from '../db-fallback.js'; import { CausalMemoryGraph } from '../controllers/CausalMemoryGraph.js'; import { CausalRecall } from '../controllers/CausalRecall.js'; import { ReflexionMemory } from '../controllers/ReflexionMemory.js'; import { SkillLibrary } from '../controllers/SkillLibrary.js'; import { NightlyLearner } from '../controllers/NightlyLearner.js'; import { LearningSystem } from '../controllers/LearningSystem.js'; import { EmbeddingService } from '../controllers/EmbeddingService.js'; import { BatchOperations } from '../optimizations/BatchOperations.js'; import { ReasoningBank } from '../controllers/ReasoningBank.js'; import { validateId, validateTimestamp, validateSessionId, ValidationError, handleSecurityError, } from '../security/input-validation.js'; import * as path from 'path'; import * as fs from 'fs'; import { fileURLToPath } from 'url'; import { dirname } from 'path'; // ES module equivalent of __dirname const __filename = fileURLToPath(import.meta.url); const __dirname = dirname(__filename); // ============================================================================ // Helper Functions for Core Vector DB Operations // ============================================================================ /** * Initialize database schema */ function initializeSchema(database: any): void { const db = database; // Episodes table (vector store) db.exec(` CREATE TABLE IF NOT EXISTS episodes ( id INTEGER PRIMARY KEY AUTOINCREMENT, ts INTEGER DEFAULT (strftime('%s', 'now')), session_id TEXT NOT NULL, task TEXT NOT NULL, input TEXT, output TEXT, critique TEXT, reward REAL NOT NULL, success INTEGER NOT NULL, latency_ms INTEGER, tokens_used INTEGER, tags TEXT, metadata TEXT ); CREATE INDEX IF NOT EXISTS idx_episodes_session ON episodes(session_id); CREATE INDEX IF NOT EXISTS idx_episodes_task ON episodes(task); CREATE INDEX IF NOT EXISTS idx_episodes_reward ON episodes(reward); CREATE INDEX IF NOT EXISTS idx_episodes_success ON episodes(success); `); // Episode embeddings (vector storage) db.exec(` CREATE TABLE IF NOT EXISTS episode_embeddings ( episode_id INTEGER PRIMARY KEY, embedding BLOB NOT NULL, FOREIGN KEY (episode_id) REFERENCES episodes(id) ON DELETE CASCADE ); `); // Skills table db.exec(` CREATE TABLE IF NOT EXISTS skills ( id INTEGER PRIMARY KEY AUTOINCREMENT, ts INTEGER DEFAULT (strftime('%s', 'now')), name TEXT UNIQUE NOT NULL, description TEXT NOT NULL, signature TEXT, code TEXT, success_rate REAL DEFAULT 0.0, uses INTEGER DEFAULT 0, avg_reward REAL DEFAULT 0.0, avg_latency_ms REAL DEFAULT 0.0, tags TEXT, metadata TEXT ); CREATE INDEX IF NOT EXISTS idx_skills_success_rate ON skills(success_rate); `); // Skill embeddings db.exec(` CREATE TABLE IF NOT EXISTS skill_embeddings ( skill_id INTEGER PRIMARY KEY, embedding BLOB NOT NULL, FOREIGN KEY (skill_id) REFERENCES skills(id) ON DELETE CASCADE ); `); // Causal edges table db.exec(` CREATE TABLE IF NOT EXISTS causal_edges ( id INTEGER PRIMARY KEY AUTOINCREMENT, ts INTEGER DEFAULT (strftime('%s', 'now')), from_memory_id INTEGER NOT NULL, from_memory_type TEXT NOT NULL, to_memory_id INTEGER NOT NULL, to_memory_type TEXT NOT NULL, similarity REAL DEFAULT 0.0, uplift REAL NOT NULL, confidence REAL DEFAULT 0.95, sample_size INTEGER DEFAULT 0, evidence_ids TEXT, experiment_ids TEXT, confounder_score REAL DEFAULT 0.0, mechanism TEXT, metadata TEXT ); CREATE INDEX IF NOT EXISTS idx_causal_from ON causal_edges(from_memory_id, from_memory_type); CREATE INDEX IF NOT EXISTS idx_causal_to ON causal_edges(to_memory_id, to_memory_type); CREATE INDEX IF NOT EXISTS idx_causal_uplift ON causal_edges(uplift); `); // Causal experiments db.exec(` CREATE TABLE IF NOT EXISTS causal_experiments ( id INTEGER PRIMARY KEY AUTOINCREMENT, ts INTEGER DEFAULT (strftime('%s', 'now')), intervention_id INTEGER NOT NULL, control_outcome REAL NOT NULL, treatment_outcome REAL NOT NULL, uplift REAL NOT NULL, sample_size INTEGER DEFAULT 1, metadata TEXT ); `); // Causal observations db.exec(` CREATE TABLE IF NOT EXISTS causal_observations ( id INTEGER PRIMARY KEY AUTOINCREMENT, ts INTEGER DEFAULT (strftime('%s', 'now')), action TEXT NOT NULL, outcome TEXT NOT NULL, reward REAL NOT NULL, session_id TEXT, metadata TEXT ); `); // Provenance certificates db.exec(` CREATE TABLE IF NOT EXISTS provenance_certificates ( id TEXT PRIMARY KEY, ts INTEGER DEFAULT (strftime('%s', 'now')), query_id TEXT NOT NULL, agent_id TEXT NOT NULL, query_text TEXT NOT NULL, retrieval_method TEXT NOT NULL, source_ids TEXT NOT NULL, certificate_hash TEXT NOT NULL, metadata TEXT ); `); } /** * Serialize embedding to BLOB */ function serializeEmbedding(embedding: Float32Array): Buffer { return Buffer.from(embedding.buffer); } /** * Deserialize embedding from BLOB */ function deserializeEmbedding(blob: Buffer): Float32Array { return new Float32Array(blob.buffer, blob.byteOffset, blob.byteLength / 4); } /** * Calculate cosine similarity between two vectors */ function cosineSimilarity(a: Float32Array, b: Float32Array): number { if (a.length !== b.length) { throw new Error('Vectors must have same length'); } let dotProduct = 0; let normA = 0; let normB = 0; for (let i = 0; i < a.length; i++) { dotProduct += a[i] * b[i]; normA += a[i] * a[i]; normB += b[i] * b[i]; } return dotProduct / (Math.sqrt(normA) * Math.sqrt(normB)); } // ============================================================================ // Initialize Database and Controllers // ============================================================================ const dbPath = process.env.AGENTDB_PATH || './agentdb.db'; const db = await createDatabase(dbPath); // Configure for performance db.pragma('journal_mode = WAL'); db.pragma('synchronous = NORMAL'); db.pragma('cache_size = -64000'); // Initialize schema automatically on server start using SQL files const schemaPath = path.join(__dirname, '../schemas/schema.sql'); const frontierSchemaPath = path.join(__dirname, '../schemas/frontier-schema.sql'); try { if (fs.existsSync(schemaPath)) { const schemaSQL = fs.readFileSync(schemaPath, 'utf-8'); db.exec(schemaSQL); console.error('✅ Main schema loaded'); } if (fs.existsSync(frontierSchemaPath)) { const frontierSQL = fs.readFileSync(frontierSchemaPath, 'utf-8'); db.exec(frontierSQL); console.error('✅ Frontier schema loaded'); } console.error('✅ Database schema initialized'); } catch (error) { console.error('⚠️ Schema initialization failed, using fallback:', (error as Error).message); // Fallback to initializeSchema function if SQL files not found initializeSchema(db); } // Initialize embedding service const embeddingService = new EmbeddingService({ model: 'Xenova/all-MiniLM-L6-v2', dimension: 384, provider: 'transformers' }); await embeddingService.initialize(); // Initialize all controllers const causalGraph = new CausalMemoryGraph(db); const reflexion = new ReflexionMemory(db, embeddingService); const skills = new SkillLibrary(db, embeddingService); const causalRecall = new CausalRecall(db, embeddingService); const learner = new NightlyLearner(db, embeddingService); const learningSystem = new LearningSystem(db, embeddingService); const batchOps = new BatchOperations(db, embeddingService); const reasoningBank = new ReasoningBank(db, embeddingService); // ============================================================================ // MCP Server Setup // ============================================================================ const server = new Server( { name: 'agentdb', version: '1.3.0', }, { capabilities: { tools: {}, }, } ); // ============================================================================ // Tool Definitions // ============================================================================ const tools = [ // ========================================================================== // CORE VECTOR DB OPERATIONS (NEW) // ========================================================================== { name: 'agentdb_init', description: 'Initialize AgentDB database with schema and optimizations. Creates all required tables for vector storage, causal memory, skills, and provenance tracking.', inputSchema: { type: 'object', properties: { db_path: { type: 'string', description: 'Database file path (optional, defaults to ./agentdb.db)', default: './agentdb.db' }, reset: { type: 'boolean', description: 'Reset database (delete existing)', default: false }, }, }, }, { name: 'agentdb_insert', description: 'Insert a single vector with metadata into AgentDB. Automatically generates embeddings for the provided text.', inputSchema: { type: 'object', properties: { text: { type: 'string', description: 'Text content to embed and store' }, metadata: { type: 'object', description: 'Additional metadata (JSON object)' }, session_id: { type: 'string', description: 'Session identifier', default: 'default' }, tags: { type: 'array', items: { type: 'string' }, description: 'Tags for categorization' }, }, required: ['text'], }, }, { name: 'agentdb_insert_batch', description: 'Batch insert multiple vectors efficiently using transactions and parallel embedding generation. Optimized for large datasets.', inputSchema: { type: 'object', properties: { items: { type: 'array', items: { type: 'object', properties: { text: { type: 'string', description: 'Text content to embed' }, metadata: { type: 'object', description: 'Metadata (JSON)' }, session_id: { type: 'string', description: 'Session ID' }, tags: { type: 'array', items: { type: 'string' } }, }, required: ['text'], }, description: 'Array of items to insert', }, batch_size: { type: 'number', description: 'Batch size for processing', default: 100 }, }, required: ['items'], }, }, { name: 'agentdb_search', description: 'Semantic k-NN vector search using cosine similarity. Returns the most relevant results ranked by similarity score.', inputSchema: { type: 'object', properties: { query: { type: 'string', description: 'Search query text' }, k: { type: 'number', description: 'Number of results to return', default: 10 }, min_similarity: { type: 'number', description: 'Minimum similarity threshold (0-1)', default: 0.0 }, filters: { type: 'object', properties: { session_id: { type: 'string', description: 'Filter by session ID' }, tags: { type: 'array', items: { type: 'string' }, description: 'Filter by tags' }, min_reward: { type: 'number', description: 'Minimum reward threshold' }, }, description: 'Optional filters', }, }, required: ['query'], }, }, { name: 'agentdb_delete', description: 'Delete vector(s) from AgentDB by ID or filters. Supports single ID deletion or bulk deletion with conditions.', inputSchema: { type: 'object', properties: { id: { type: 'number', description: 'Specific vector ID to delete' }, filters: { type: 'object', properties: { session_id: { type: 'string', description: 'Delete all vectors with this session ID' }, before_timestamp: { type: 'number', description: 'Delete vectors before this Unix timestamp' }, }, description: 'Bulk deletion filters (used if id not provided)', }, }, }, }, // ========================================================================== // FRONTIER MEMORY FEATURES (EXISTING) // ========================================================================== { name: 'reflexion_store', description: 'Store an episode with self-critique for reflexion-based learning', inputSchema: { type: 'object', properties: { session_id: { type: 'string', description: 'Session identifier' }, task: { type: 'string', description: 'Task description' }, reward: { type: 'number', description: 'Task reward (0-1)' }, success: { type: 'boolean', description: 'Whether task succeeded' }, critique: { type: 'string', description: 'Self-critique reflection' }, input: { type: 'string', description: 'Task input' }, output: { type: 'string', description: 'Task output' }, latency_ms: { type: 'number', description: 'Execution latency' }, tokens: { type: 'number', description: 'Tokens used' }, }, required: ['session_id', 'task', 'reward', 'success'], }, }, { name: 'reflexion_retrieve', description: 'Retrieve relevant past episodes for learning from experience', inputSchema: { type: 'object', properties: { task: { type: 'string', description: 'Task to find similar episodes for' }, k: { type: 'number', description: 'Number of episodes to retrieve', default: 5 }, only_failures: { type: 'boolean', description: 'Only retrieve failures' }, only_successes: { type: 'boolean', description: 'Only retrieve successes' }, min_reward: { type: 'number', description: 'Minimum reward threshold' }, }, required: ['task'], }, }, { name: 'skill_create', description: 'Create a reusable skill in the skill library', inputSchema: { type: 'object', properties: { name: { type: 'string', description: 'Skill name' }, description: { type: 'string', description: 'What the skill does' }, code: { type: 'string', description: 'Skill implementation code' }, success_rate: { type: 'number', description: 'Initial success rate' }, }, required: ['name', 'description'], }, }, { name: 'skill_search', description: 'Search for applicable skills by semantic similarity', inputSchema: { type: 'object', properties: { task: { type: 'string', description: 'Task to find skills for' }, k: { type: 'number', description: 'Number of skills to return', default: 10 }, min_success_rate: { type: 'number', description: 'Minimum success rate filter' }, }, required: ['task'], }, }, { name: 'causal_add_edge', description: 'Add a causal relationship between actions and outcomes', inputSchema: { type: 'object', properties: { cause: { type: 'string', description: 'Causal action/intervention' }, effect: { type: 'string', description: 'Observed effect/outcome' }, uplift: { type: 'number', description: 'Causal uplift magnitude' }, confidence: { type: 'number', description: 'Confidence in causal claim (0-1)', default: 0.95 }, sample_size: { type: 'number', description: 'Number of observations', default: 0 }, }, required: ['cause', 'effect', 'uplift'], }, }, { name: 'causal_query', description: 'Query causal effects to understand what actions cause what outcomes', inputSchema: { type: 'object', properties: { cause: { type: 'string', description: 'Filter by cause (optional)' }, effect: { type: 'string', description: 'Filter by effect (optional)' }, min_confidence: { type: 'number', description: 'Minimum confidence', default: 0.5 }, min_uplift: { type: 'number', description: 'Minimum uplift', default: 0.0 }, limit: { type: 'number', description: 'Maximum results', default: 10 }, }, }, }, { name: 'recall_with_certificate', description: 'Retrieve memories with causal utility scoring and provenance certificate', inputSchema: { type: 'object', properties: { query: { type: 'string', description: 'Query for memory retrieval' }, k: { type: 'number', description: 'Number of results', default: 12 }, alpha: { type: 'number', description: 'Similarity weight', default: 0.7 }, beta: { type: 'number', description: 'Causal uplift weight', default: 0.2 }, gamma: { type: 'number', description: 'Recency weight', default: 0.1 }, }, required: ['query'], }, }, { name: 'learner_discover', description: 'Automatically discover causal patterns from episode history', inputSchema: { type: 'object', properties: { min_attempts: { type: 'number', description: 'Minimum attempts required', default: 3 }, min_success_rate: { type: 'number', description: 'Minimum success rate', default: 0.6 }, min_confidence: { type: 'number', description: 'Minimum statistical confidence', default: 0.7 }, dry_run: { type: 'boolean', description: 'Preview without storing', default: false }, }, }, }, { name: 'db_stats', description: 'Get database statistics showing record counts', inputSchema: { type: 'object', properties: {}, }, }, // ========================================================================== // LEARNING SYSTEM TOOLS (v1.3.0 - Tools 1-5) // ========================================================================== { name: 'learning_start_session', description: 'Start a new reinforcement learning session with specified algorithm and configuration. Supports 9 RL algorithms: q-learning, sarsa, dqn, policy-gradient, actor-critic, ppo, decision-transformer, mcts, model-based.', inputSchema: { type: 'object', properties: { user_id: { type: 'string', description: 'User identifier for the learning session' }, session_type: { type: 'string', description: 'RL algorithm type', enum: ['q-learning', 'sarsa', 'dqn', 'policy-gradient', 'actor-critic', 'ppo', 'decision-transformer', 'mcts', 'model-based'], }, config: { type: 'object', description: 'Learning configuration parameters', properties: { learning_rate: { type: 'number', description: 'Learning rate (0-1)', default: 0.01 }, discount_factor: { type: 'number', description: 'Discount factor gamma (0-1)', default: 0.99 }, exploration_rate: { type: 'number', description: 'Epsilon for epsilon-greedy exploration (0-1)', default: 0.1 }, batch_size: { type: 'number', description: 'Batch size for training', default: 32 }, target_update_frequency: { type: 'number', description: 'Update frequency for target network', default: 100 }, }, required: ['learning_rate', 'discount_factor'], }, }, required: ['user_id', 'session_type', 'config'], }, }, { name: 'learning_end_session', description: 'End an active learning session and save the final trained policy to the database.', inputSchema: { type: 'object', properties: { session_id: { type: 'string', description: 'Session ID to end' }, }, required: ['session_id'], }, }, { name: 'learning_predict', description: 'Get AI-recommended action for a given state with confidence scores and alternative actions.', inputSchema: { type: 'object', properties: { session_id: { type: 'string', description: 'Learning session ID' }, state: { type: 'string', description: 'Current state description' }, }, required: ['session_id', 'state'], }, }, { name: 'learning_feedback', description: 'Submit feedback on action quality to train the RL policy. Feedback includes reward signal and outcome state.', inputSchema: { type: 'object', properties: { session_id: { type: 'string', description: 'Learning session ID' }, state: { type: 'string', description: 'State where action was taken' }, action: { type: 'string', description: 'Action that was executed' }, reward: { type: 'number', description: 'Reward received (higher is better)' }, next_state: { type: 'string', description: 'Resulting state after action (optional)' }, success: { type: 'boolean', description: 'Whether the action was successful' }, }, required: ['session_id', 'state', 'action', 'reward', 'success'], }, }, { name: 'learning_train', description: 'Train the RL policy using batch learning with collected experiences. Returns training metrics including loss, average reward, and convergence rate.', inputSchema: { type: 'object', properties: { session_id: { type: 'string', description: 'Learning session ID to train' }, epochs: { type: 'number', description: 'Number of training epochs', default: 50 }, batch_size: { type: 'number', description: 'Batch size for training', default: 32 }, learning_rate: { type: 'number', description: 'Learning rate for this training run', default: 0.01 }, }, required: ['session_id'], }, }, // ========================================================================== // LEARNING SYSTEM TOOLS (v1.4.0 - Tools 6-10) // ========================================================================== { name: 'learning_metrics', description: 'Get learning performance metrics including success rates, rewards, and policy improvement', inputSchema: { type: 'object', properties: { session_id: { type: 'string', description: 'Optional session ID to filter metrics' }, time_window_days: { type: 'number', description: 'Time window in days for metrics (default: 7)', default: 7 }, include_trends: { type: 'boolean', description: 'Include trend analysis over time', default: true }, group_by: { type: 'string', description: 'Group metrics by task/session/skill', enum: ['task', 'session', 'skill'], default: 'task' }, }, }, }, { name: 'learning_transfer', description: 'Transfer learning between sessions or tasks, enabling knowledge reuse across different contexts', inputSchema: { type: 'object', properties: { source_session: { type: 'string', description: 'Source session ID to transfer from' }, target_session: { type: 'string', description: 'Target session ID to transfer to' }, source_task: { type: 'string', description: 'Source task pattern to transfer from' }, target_task: { type: 'string', description: 'Target task pattern to transfer to' }, min_similarity: { type: 'number', description: 'Minimum similarity threshold (0-1)', default: 0.7, minimum: 0, maximum: 1 }, transfer_type: { type: 'string', description: 'Type of transfer', enum: ['episodes', 'skills', 'causal_edges', 'all'], default: 'all' }, max_transfers: { type: 'number', description: 'Maximum number of items to transfer', default: 10 }, }, }, }, { name: 'learning_explain', description: 'Explain action recommendations with confidence scores and supporting evidence from past experiences', inputSchema: { type: 'object', properties: { query: { type: 'string', description: 'Query or task description to get recommendations for' }, k: { type: 'number', description: 'Number of recommendations to return', default: 5 }, explain_depth: { type: 'string', description: 'Explanation detail level', enum: ['summary', 'detailed', 'full'], default: 'detailed' }, include_confidence: { type: 'boolean', description: 'Include confidence scores', default: true }, include_evidence: { type: 'boolean', description: 'Include supporting evidence from past episodes', default: true }, include_causal: { type: 'boolean', description: 'Include causal reasoning chains', default: true }, }, required: ['query'], }, }, { name: 'experience_record', description: 'Record tool execution as experience for reinforcement learning and experience replay', inputSchema: { type: 'object', properties: { session_id: { type: 'string', description: 'Session identifier' }, tool_name: { type: 'string', description: 'Name of the tool executed' }, action: { type: 'string', description: 'Action taken or tool parameters' }, state_before: { type: 'object', description: 'System state before action (JSON)' }, state_after: { type: 'object', description: 'System state after action (JSON)' }, outcome: { type: 'string', description: 'Outcome description' }, reward: { type: 'number', description: 'Reward signal (0-1)', minimum: 0, maximum: 1 }, success: { type: 'boolean', description: 'Whether the action succeeded' }, latency_ms: { type: 'number', description: 'Execution latency in milliseconds' }, metadata: { type: 'object', description: 'Additional metadata (JSON)' }, }, required: ['session_id', 'tool_name', 'action', 'outcome', 'reward', 'success'], }, }, { name: 'reward_signal', description: 'Calculate reward signal for outcomes based on success, efficiency, and causal impact', inputSchema: { type: 'object', properties: { episode_id: { type: 'number', description: 'Episode ID to calculate reward for' }, success: { type: 'boolean', description: 'Whether the outcome was successful' }, target_achieved: { type: 'boolean', description: 'Whether the target was achieved', default: true }, efficiency_score: { type: 'number', description: 'Efficiency score (0-1)', default: 0.5, minimum: 0, maximum: 1 }, quality_score: { type: 'number', description: 'Quality score (0-1)', default: 0.5, minimum: 0, maximum: 1 }, time_taken_ms: { type: 'number', description: 'Time taken in milliseconds' }, expected_time_ms: { type: 'number', description: 'Expected time in milliseconds' }, include_causal: { type: 'boolean', description: 'Include causal impact in reward', default: true }, reward_function: { type: 'string', description: 'Reward function to use', enum: ['standard', 'sparse', 'dense', 'shaped'], default: 'standard' }, }, required: ['success'], }, }, // ========================================================================== // CORE AGENTDB TOOLS (6-10) - v1.3.0 // ========================================================================== { name: 'agentdb_stats', description: 'Get comprehensive database statistics including table counts, storage usage, and performance metrics', inputSchema: { type: 'object', properties: { detailed: { type: 'boolean', description: 'Include detailed statistics', default: false }, }, }, }, { name: 'agentdb_pattern_store', description: 'Store reasoning pattern with embedding, taskType, approach, and successRate', inputSchema: { type: 'object', properties: { taskType: { type: 'string', description: 'Type of task (e.g., "code_review", "data_analysis")' }, approach: { type: 'string', description: 'Description of the reasoning approach' }, successRate: { type: 'number', description: 'Success rate (0-1)', default: 0.0 }, tags: { type: 'array', items: { type: 'string' }, description: 'Optional tags' }, metadata: { type: 'object', description: 'Additional metadata' }, }, required: ['taskType', 'approach', 'successRate'], }, }, { name: 'agentdb_pattern_search', description: 'Search patterns with taskEmbedding, k, threshold, and filters', inputSchema: { type: 'object', properties: { task: { type: 'string', description: 'Task description to search for' }, k: { type: 'number', description: 'Number of results to return', default: 10 }, threshold: { type: 'number', description: 'Minimum similarity threshold (0-1)', default: 0.0 }, filters: { type: 'object', properties: { taskType: { type: 'string', description: 'Filter by task type' }, minSuccessRate: { type: 'number', description: 'Minimum success rate' }, tags: { type: 'array', items: { type: 'string' }, description: 'Filter by tags' }, }, description: 'Optional filters', }, }, required: ['task'], }, }, { name: 'agentdb_pattern_stats', description: 'Get pattern statistics including total patterns, success rates, and top task types', inputSchema: { type: 'object', properties: {}, }, }, { name: 'agentdb_clear_cache', description: 'Clear query cache to refresh statistics and search results', inputSchema: { type: 'object', properties: { cache_type: { type: 'string', description: 'Type of cache to clear (all, patterns, stats)', enum: ['all', 'patterns', 'stats'], default: 'all' }, }, }, }, ]; // ============================================================================ // Tool Handlers // ============================================================================ server.setRequestHandler(ListToolsRequestSchema, async () => { return { tools }; }); server.setRequestHandler(CallToolRequestSchema, async (request) => { const { name, arguments: args } = request.params; try { switch (name) { // ====================================================================== // CORE VECTOR DB OPERATIONS // ====================================================================== case 'agentdb_init': { const targetDbPath = (args?.db_path as string) || dbPath; if (args?.reset && fs.existsSync(targetDbPath)) { fs.unlinkSync(targetDbPath); } // Initialize schema initializeSchema(db); const stats = db.prepare('SELECT COUNT(*) as count FROM sqlite_master WHERE type="table"').get() as any; return { content: [ { type: 'text', text: `✅ AgentDB initialized successfully!\n` + `📍 Database: ${targetDbPath}\n` + `📊 Tables created: ${stats.count}\n` + `⚙️ Optimizations: WAL mode, cache_size=64MB\n` + `🧠 Embedding service: ${embeddingService.constructor.name} ready`, }, ], }; } case 'agentdb_insert': { const text = args?.text as string; const sessionId = (args?.session_id as string) || 'default'; const tags = (args?.tags as string[]) || []; const metadata = (args?.metadata as Record<string, any>) || {}; const episodeId = await reflexion.storeEpisode({ sessionId, task: text, reward: 1.0, success: true, input: text, output: '', critique: '', latencyMs: 0, tokensUsed: 0, tags, metadata, }); return { content: [ { type: 'text', text: `✅ Vector inserted successfully!\n` + `🆔 ID: ${episodeId}\n` + `📝 Text: ${text.substring(0, 100)}${text.length > 100 ? '...' : ''}\n` + `🏷️ Tags: ${tags.join(', ') || 'none'}\n` + `🧠 Embedding: ${embeddingService.constructor.name}`, }, ], }; } case 'agentdb_insert_batch': { const items = (args?.items as any[]) || []; const batchSize = (args?.batch_size as number) || 100; const episodes = items.map((item: any) => ({ sessionId: item.session_id || 'default', task: item.text, reward: 1.0, success: true, input: item.text, output: '', critique: '', latencyMs: 0, tokensUsed: 0, tags: item.tags || [], metadata: item.metadata || {}, })); const batchOpsConfig = new BatchOperations(db, embeddingService, { batchSize, parallelism: 4, }); const inserted = await batchOpsConfig.insertEpisodes(episodes); return { content: [ { type: 'text', text: `✅ Batch insert completed!\n` + `📊 Inserted: ${inserted} vectors\n` + `⚡ Batch size: ${batchSize}\n` + `🧠 Embeddings generated in parallel\n` + `💾 Transaction committed`, }, ], }; } case 'agentdb_search': { const queryText = args?.query as string; const k = (args?.k as number) || 10; const minSimilarity = (args?.min_similarity as number) || 0.0; const filters = args?.filters as any; const query: any = { task: queryText, k, }; if (filters) { if (filters.min_reward !== undefined) { query.minReward = filters.min_reward; } // Session ID filter would require custom query } const results = await reflexion.retrieveRelevant(query); // Filter by minimum similarity if specified let filteredResults = results; if (minSimilarity > 0) { filteredResults = results.filter(r => (r.similarity || 0) >= minSimilarity); } return { content: [ { type: 'text', text: `🔍 Search completed!\n` + `📊 Found: ${filteredResults.length} results\n` + `🎯 Query: ${queryText}\n\n` + `Top Results:\n` + filteredResults.slice(0, 5).map((r, i) => `${i + 1}. [ID: ${r.id}] Similarity: ${(r.similarity || 0).toFixed(3)}\n` + ` Task: ${r.task.substring(0, 80)}${r.task.length > 80 ? '...' : ''}\n` + ` Reward: ${r.reward.toFixed(2)}` ).join('\n\n') + (filteredResults.length > 5 ? `\n\n... and ${filteredResults.length - 5} more results` : ''), }, ], }; } case 'agentdb_delete': { let deleted = 0; const id = args?.id as number | undefined; const filters = args?.filters as any; try { if (id !== undefined) { // Validate ID const validatedId = validateId(id, 'id'); // Delete single vector using parameterized query const stmt = db.prepare('DELETE FROM episodes WHERE id = ?'); const result = stmt.run(validatedId); deleted = result.changes; } else if (filters) { // Bulk delete with validated filters if (filters.session_id) { // Validate session_id const validatedSessionId = validateSessionId(filters.session_id); // Use parameterized query const stmt = db.prepare('DELETE FROM episodes WHERE session_id = ?'); const result = stmt.run(validatedSessionId); deleted = result.changes; } else if (filters.before_timestamp) { // Validate timestamp const validatedTimestamp = validateTimestamp(filters.before_timestamp, 'before_timestamp'); // Use parameterized query const stmt = db.prepare('DELETE FROM episodes WHERE ts < ?'); const result = stmt.run(validatedTimestamp); deleted = result.changes; } else { throw new ValidationError('Invalid or missing filter criteria', 'INVALID_FILTER'); } } else { throw new ValidationError('Either id or filters must be provided', 'MISSING_PARAMETER'); } return { content: [ { type: 'text', text: `✅ Delete operation completed!\n` + `📊 Deleted: ${deleted} vector(s)\n` + `🗑️ ${id !== undefined ? `ID: ${id}` : 'Bulk deletion with filters'}`, }, ], }; } catch (error: any) { const safeMessage = handleSecurityError(error); return { content: [ { type: 'text', text: `❌ Delete operation failed: ${safeMessage}`, }, ], isError: true, }; } } // ====================================================================== // FRONTIER MEMORY FEATURES (EXISTING) // ====================================================================== case 'reflexion_store': { const episodeId = await reflexion.storeEpisode({ sessionId: args?.session_id as string, task: args?.task as string, reward: args?.reward as number, success: args?.success as boolean, critique: (args?.critique as string) || '', input: (args?.input as string) || '', output: (args?.output as string) || '', latencyMs: (args?.latency_ms as number) || 0, tokensUsed: (args?.tokens as number) || 0, }); return { content: [ { type: 'text', text: `✅ Stored episode #${episodeId}\nTask: ${args?.task}\nReward: ${args?.reward}\nSuccess: ${args?.success}`, }, ], }; } case 'reflexion_retrieve': { const episodes = await reflexion.retrieveRelevant({ task: args?.task as string, k: (args?.k as number) || 5, onlyFailures: args?.only_failures as boolean | undefined, onlySuccesses: args?.only_successes as boolean | undefined, minReward: args?.min_reward as number | undefined, }); return { content: [ { type: 'text', text: `🔍 Retrieved ${episodes.length} episodes:\n\n` + episodes.map((ep, i) => `${i + 1}. Episode ${ep.id}\n Task: ${ep.task}\n Reward: ${ep.reward.toFixed(2)}\n Similarity: ${ep.similarity?.toFixed(3) || 'N/A'}` ).join('\n\n'), }, ], }; } case 'skill_create': { const skillId = await skills.createSkill({ name: args?.name as string, description: args?.description as string, signature: { inputs: {}, outputs: {} }, code: (args?.code as string) || '', successRate: (args?.success_rate as number) || 0.0, uses: 0, avgReward: 0.0, avgLatencyMs: 0.0, }); return { content: [ { type: 'text', text: `✅ Created skill #${skillId}: ${args?.name}`, }, ], }; } case 'skill_search': { const foundSkills = await skills.searchSkills({ task: args?.task as string, k: (args?.k as number) || 10, minSuccessRate: (args?.min_success_rate as number) || 0.0, }); return { content: [ { type: 'text', text: `🔍 Found ${foundSkills.length} skills:\n\n` + foundSkills.map((skill, i) => `${i + 1}. ${skill.name}\n ${skill.description}\n Success: ${(skill.successRate * 100).toFixed(1)}%` ).join('\n\n'), }, ], }; } case 'causal_add_edge': { const cause = args?.cause as string; const effect = args?.effect as string; const uplift = args?.uplift as number; const confidence = (args?.confidence as number) || 0.95; const sampleSize = (args?.sample_size as number) || 0; const edgeId = causalGraph.addCausalEdge({ fromMemoryId: 0, fromMemoryType: cause as 'episode' | 'skill' | 'note' | 'fact', toMemoryId: 0, toMemoryType: effect as 'episode' | 'skill' | 'note' | 'fact', similarity: 0, uplift, confidence, sampleSize, evidenceIds: [], }); return { content: [ { type: 'text', text: `✅ Added causal edge #${edgeId}\n${cause} → ${effect}\nUplift: ${uplift}`, }, ], }; } case 'causal_query': { const cause = args?.cause as string | undefined; const effect = args?.effect as string | undefined; const minConfidence = (args?.min_confidence as number) || 0.5; const minUplift = (args?.min_uplift as number) || 0.0; const limit = (args?.limit as number) || 10; const edges = causalGraph.queryCausalEffects({ interventionMemoryId: 0, interventionMemoryType: cause || '', outcomeMemoryId: effect ? 0 : undefined, minConfidence, minUplift, }); const limited = edges.slice(0, limit); return { content: [ { type: 'text', text: `🔍 Found ${edges.length} causal edges:\n\n` + limited.map((edge, i) => `${i + 1}. ${edge.fromMemoryType} → ${edge.toMemoryType}\n Uplift: ${(edge.uplift || 0).toFixed(3)} (confidence: ${edge.confidence.toFixed(2)})` ).join('\n\n'), }, ], }; } case 'recall_with_certificate': { const query = args?.query as string; const k = (args?.k as number) || 12; const result = await causalRecall.recall( 'mcp-' + Date.now(), query, k, undefined, 'internal' ); return { content: [ { type: 'text', text: `🔍 Retrieved ${result.candidates.length} results:\n\n` + result.candidates.slice(0, 5).map((r, i) => `${i + 1}. ${r.type} ${r.id}\n Similarity: ${r.similarity.toFixed(3)}\n Uplift: ${r.uplift?.toFixed(3) || '0.000'}` ).join('\n\n') + `\n\n📜 Certificate ID: ${result.certificate.id}`, }, ], }; } case 'learner_discover': { const minAttempts = (args?.min_attempts as number) || 3; const minSuccessRate = (args?.min_success_rate as number) || 0.6; const minConfidence = (args?.min_confidence as number) || 0.7; const dryRun = (args?.dry_run as boolean) || false; const discovered = await learner.discover({ minAttempts, minSuccessRate, minConfidence, dryRun, }); return { content: [ { type: 'text', text: `🌙 Discovered ${discovered.length} causal patterns:\n\n` + discovered.slice(0, 10).map((edge, i) => `${i + 1}. ${edge.fromMemoryType} → ${edge.toMemoryType}\n Uplift: ${(edge.uplift || 0).toFixed(3)} (n=${edge.sampleSize || 0})` ).join('\n\n'), }, ], }; } case 'db_stats': { const stats: Record<string, number> = { causal_edges: (db.prepare('SELECT COUNT(*) as count FROM causal_edges').get() as any)?.count || 0, causal_experiments: (db.prepare('SELECT COUNT(*) as count FROM causal_experiments').get() as any)?.count || 0, causal_observations: (db.prepare('SELECT COUNT(*) as count FROM causal_observations').get() as any)?.count || 0, episodes: (db.prepare('SELECT COUNT(*) as count FROM episodes').get() as any)?.count || 0, episode_embeddings: (db.prepare('SELECT COUNT(*) as count FROM episode_embeddings').get() as any)?.count || 0, skills: (db.prepare('SELECT COUNT(*) as count FROM skills').get() as any)?.count || 0, }; return { content: [ { type: 'text', text: `📊 Database Statistics:\n\n` + `Causal Edges: ${stats.causal_edges}\n` + `Experiments: ${stats.causal_experiments}\n` + `Observations: ${stats.causal_observations}\n` + `Episodes (Vectors): ${stats.episodes}\n` + `Episode Embeddings: ${stats.episode_embeddings}\n` + `Skills: ${stats.skills}`, }, ], }; } // ====================================================================== // CORE AGENTDB TOOLS (6-10) // ====================================================================== case 'agentdb_stats': { const detailed = (args?.detailed as boolean) || false; // Helper to safely query table count const safeCount = (tableName: string): number => { try { return (db.prepare(`SELECT COUNT(*) as count FROM ${tableName}`).get() as any)?.count || 0; } catch { return 0; // Table doesn't exist } }; const stats: Record<string, number> = { causal_edges: safeCount('causal_edges'), causal_experiments: safeCount('causal_experiments'), causal_observations: safeCount('causal_observations'), episodes: safeCount('episodes'), episode_embeddings: safeCount('episode_embeddings'), skills: safeCount('skills'), skill_embeddings: safeCount('skill_embeddings'), reasoning_patterns: safeCount('reasoning_patterns'), pattern_embeddings: safeCount('pattern_embeddings'), learning_sessions: safeCount('rl_sessions'), }; let output = `📊 AgentDB Comprehensive Statistics\n\n` + `🧠 Memory & Learning:\n` + ` Episodes (Vectors): ${stats.episodes}\n` + ` Episode Embeddings: ${stats.episode_embeddings}\n` + ` Skills: ${stats.skills}\n` + ` Skill Embeddings: ${stats.skill_embeddings}\n` + ` Reasoning Patterns: ${stats.reasoning_patterns}\n` + ` Pattern Embeddings: ${stats.pattern_embeddings}\n` + ` Learning Sessions: ${stats.learning_sessions}\n\n` + `🔗 Causal Intelligence:\n` + ` Causal Edges: ${stats.causal_edges}\n` + ` Experiments: ${stats.causal_experiments}\n` + ` Observations: ${stats.causal_observations}\n`; if (detailed) { // Add storage statistics const dbStats = db.prepare(` SELECT page_count * page_size as total_bytes FROM pragma_page_count(), pragma_page_size() `).get() as any; const totalMB = (dbStats.total_bytes / (1024 * 1024)).toFixed(2); // Add recent activity stats const recentActivity = db.prepare(` SELECT COUNT(*) as count FROM episodes WHERE ts >= strftime('%s', 'now', '-7 days') `).get() as any; output += `\n📦 Storage:\n` + ` Database Size: ${totalMB} MB\n` + ` Recent Activity (7d): ${recentActivity.count} episodes\n`; } return { content: [ { type: 'text', text: output, }, ], }; } case 'agentdb_pattern_store': { const taskType = args?.taskType as string; const approach = args?.approach as string; const successRate = args?.successRate as number; const tags = (args?.tags as string[]) || []; const metadata = (args?.metadata as Record<string, any>) || {}; const patternId = await reasoningBank.storePattern({ taskType, approach, successRate, tags, metadata, }); return { content: [ { type: 'text', text: `✅ Reasoning pattern stored successfully!\n\n` + `🆔 Pattern ID: ${patternId}\n` + `📋 Task Type: ${taskType}\n` + `💡 Approach: ${approach.substring(0, 100)}${approach.length > 100 ? '...' : ''}\n` + `📊 Success Rate: ${(successRate * 100).toFixed(1)}%\n` + `🏷️ Tags: ${tags.join(', ') || 'none'}\n` + `🧠 Embedding generated and stored`, }, ], }; } case 'agentdb_pattern_search': { const task = args?.task as string; const k = (args?.k as number) || 10; const threshold = (args?.threshold as number) || 0.0;