UNPKG

@smartsamurai/krapi-sdk

Version:

KRAPI TypeScript SDK - Easy-to-use client SDK for connecting to self-hosted KRAPI servers (like Appwrite SDK)

178 lines (163 loc) 4.73 kB
/** * MCP Service for BackendSDK * * Provides Model Context Protocol (MCP) functionality for AI model interactions. * Supports chat operations, model capabilities, and model management. * * @module mcp-service */ import { DatabaseConnection, Logger } from "./core"; import { normalizeError } from "./utils/error-handler"; export interface ChatMessage { role: "user" | "assistant" | "system"; content: string; timestamp?: string; } export interface ChatResponse { message: string; model: string; usage?: { prompt_tokens: number; completion_tokens: number; total_tokens: number; }; finish_reason?: string; } export interface ModelCapabilities { models: string[]; features: string[]; max_tokens: number; supports_streaming: boolean; supports_functions: boolean; } export interface Model { id: string; name: string; provider: string; capabilities: string[]; max_tokens: number; is_available: boolean; } /** * MCP Service for BackendSDK * * Provides MCP functionality for AI model interactions. * * @class MCPService * @example * const mcpService = new MCPService(dbConnection, logger); * const response = await mcpService.chat('project-id', [{ role: 'user', content: 'Hello' }]); */ export class MCPService { constructor( // @ts-expect-error - Reserved for future use private _dbConnection: DatabaseConnection, private logger: Logger = console ) {} /** * Get model capabilities * * @returns {Promise<ModelCapabilities>} Model capabilities */ async getModelCapabilities(): Promise<ModelCapabilities> { // Default capabilities - in a real implementation, this would query the MCP server return { models: ["gpt-4", "gpt-3.5-turbo", "claude-3-opus"], features: ["chat", "completion", "streaming"], max_tokens: 4096, supports_streaming: true, supports_functions: true, }; } /** * Send chat messages to MCP model * * @param {string} projectId - Project ID * @param {ChatMessage[]} messages - Chat messages * @returns {Promise<ChatResponse>} Chat response */ async chat(projectId: string, messages: ChatMessage[]): Promise<ChatResponse> { try { // In a real implementation, this would call the MCP server // For now, return a mock response this.logger.info(`MCP chat for project ${projectId} with ${messages.length} messages`); return { message: "This is a mock MCP response. MCP integration needs to be implemented.", model: "gpt-4", usage: { prompt_tokens: 100, completion_tokens: 50, total_tokens: 150, }, finish_reason: "stop", }; } catch (error) { this.logger.error("Failed to send MCP chat:", error); throw normalizeError(error, "INTERNAL_ERROR", { operation: "chat", projectId }); } } /** * Send chat messages to MCP model (admin context) * * @param {ChatMessage[]} messages - Chat messages * @returns {Promise<ChatResponse>} Chat response */ async adminChat(messages: ChatMessage[]): Promise<ChatResponse> { try { // In a real implementation, this would call the MCP server with admin context this.logger.info(`MCP admin chat with ${messages.length} messages`); return { message: "This is a mock MCP admin response. MCP integration needs to be implemented.", model: "gpt-4", usage: { prompt_tokens: 100, completion_tokens: 50, total_tokens: 150, }, finish_reason: "stop", }; } catch (error) { this.logger.error("Failed to send MCP admin chat:", error); throw normalizeError(error, "INTERNAL_ERROR", { operation: "adminChat" }); } } /** * List available MCP models * * @returns {Promise<Model[]>} List of available models */ async listModels(): Promise<Model[]> { // In a real implementation, this would query the MCP server return [ { id: "gpt-4", name: "GPT-4", provider: "openai", capabilities: ["chat", "completion", "streaming"], max_tokens: 8192, is_available: true, }, { id: "gpt-3.5-turbo", name: "GPT-3.5 Turbo", provider: "openai", capabilities: ["chat", "completion", "streaming"], max_tokens: 4096, is_available: true, }, { id: "claude-3-opus", name: "Claude 3 Opus", provider: "anthropic", capabilities: ["chat", "completion"], max_tokens: 200000, is_available: true, }, ]; } }