UNPKG

@restnfeel/agentc-starter-kit

Version:

한국어 기업용 CMS 모듈 - Task Master AI와 함께 빠르게 웹사이트를 구현할 수 있는 재사용 가능한 컴포넌트 시스템

442 lines (436 loc) 15.9 kB
import { LLMProvider } from '../contexts/ChatbotContext.js'; // Abstract base class for LLM operations class BaseLLM { constructor(config) { this.isConnected = false; this.availableModels = []; this.config = config; } getStatus() { return { isConnected: this.isConnected, config: this.config, availableModels: this.availableModels, }; } updateConfig(updates) { this.config = { ...this.config, ...updates }; } } // OpenAI LLM Implementation class OpenAILLM extends BaseLLM { constructor(config) { super(config); this.client = null; } async connect() { try { if (!this.config.apiKey) { throw new Error("OpenAI API key is required"); } // Dynamic import to avoid bundling issues const { OpenAI } = await import('../../../../node_modules/openai/index.js'); this.client = new OpenAI({ apiKey: this.config.apiKey, dangerouslyAllowBrowser: true, // For client-side usage }); // Test connection await this.validateApiKey(); // Get available models this.availableModels = await this.listModels(); this.isConnected = true; } catch (error) { this.isConnected = false; throw new Error(`Failed to connect to OpenAI: ${error instanceof Error ? error.message : "Unknown error"}`); } } async disconnect() { this.client = null; this.isConnected = false; this.availableModels = []; } async generateResponse(prompt, context) { var _a, _b, _c; if (!this.isConnected || !this.client) { throw new Error("LLM is not connected"); } try { // Build context if provided let contextualPrompt = prompt; if (context && context.length > 0) { const contextText = context .map((doc) => `Document: ${doc.title}\nContent: ${doc.content}`) .join("\n\n"); contextualPrompt = `Context:\n${contextText}\n\nQuestion: ${prompt}\n\nPlease answer the question using the provided context. If the context doesn't contain relevant information, say so.`; } const systemPrompt = this.config.systemPrompt || "You are a helpful AI assistant."; const response = await this.client.chat.completions.create({ model: this.config.model, messages: [ { role: "system", content: systemPrompt }, { role: "user", content: contextualPrompt }, ], temperature: this.config.temperature || 0.7, max_tokens: this.config.maxTokens || 1000, top_p: this.config.topP || 1, frequency_penalty: this.config.frequencyPenalty || 0, presence_penalty: this.config.presencePenalty || 0, }); const choice = response.choices[0]; if (!choice || !choice.message) { throw new Error("No response generated"); } return { content: choice.message.content || "", metadata: { model: response.model, tokens: { prompt: ((_a = response.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) || 0, completion: ((_b = response.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0, total: ((_c = response.usage) === null || _c === void 0 ? void 0 : _c.total_tokens) || 0, }, finishReason: choice.finish_reason || undefined, }, }; } catch (error) { throw new Error(`Failed to generate response: ${error instanceof Error ? error.message : "Unknown error"}`); } } async listModels() { if (!this.client) { return ["gpt-3.5-turbo", "gpt-4", "gpt-4-turbo"]; // Fallback models } try { const models = await this.client.models.list(); return models.data .filter((model) => model.id.includes("gpt")) .map((model) => model.id) .sort(); } catch (error) { console.warn("Failed to list OpenAI models, using fallback:", error); return ["gpt-3.5-turbo", "gpt-4", "gpt-4-turbo"]; } } async validateApiKey() { if (!this.client) return false; try { await this.client.models.list(); return true; } catch (error) { return false; } } async healthCheck() { return this.isConnected && (await this.validateApiKey()); } } // Anthropic LLM Implementation class AnthropicLLM extends BaseLLM { constructor(config) { super(config); this.client = null; } async connect() { try { if (!this.config.apiKey) { throw new Error("Anthropic API key is required"); } // Dynamic import to avoid bundling issues const { Anthropic } = await import('../../../../node_modules/@anthropic-ai/sdk/index.js'); this.client = new Anthropic({ apiKey: this.config.apiKey, dangerouslyAllowBrowser: true, }); // Test connection if (await this.validateApiKey()) { this.availableModels = await this.listModels(); this.isConnected = true; } else { throw new Error("Invalid API key"); } } catch (error) { this.isConnected = false; throw new Error(`Failed to connect to Anthropic: ${error instanceof Error ? error.message : "Unknown error"}`); } } async disconnect() { this.client = null; this.isConnected = false; this.availableModels = []; } async generateResponse(prompt, context) { if (!this.isConnected || !this.client) { throw new Error("LLM is not connected"); } try { // Build context if provided let contextualPrompt = prompt; if (context && context.length > 0) { const contextText = context .map((doc) => `Document: ${doc.title}\nContent: ${doc.content}`) .join("\n\n"); contextualPrompt = `Context:\n${contextText}\n\nQuestion: ${prompt}\n\nPlease answer the question using the provided context.`; } const systemPrompt = this.config.systemPrompt || "You are a helpful AI assistant."; const response = await this.client.messages.create({ model: this.config.model, system: systemPrompt, messages: [{ role: "user", content: contextualPrompt }], temperature: this.config.temperature || 0.7, max_tokens: this.config.maxTokens || 1000, top_p: this.config.topP || 1, }); if (!response.content || response.content.length === 0) { throw new Error("No response generated"); } const content = response.content[0]; if (content.type !== "text") { throw new Error("Unexpected response type"); } return { content: content.text, metadata: { model: response.model, tokens: { prompt: response.usage.input_tokens, completion: response.usage.output_tokens, total: response.usage.input_tokens + response.usage.output_tokens, }, finishReason: response.stop_reason || undefined, }, }; } catch (error) { throw new Error(`Failed to generate response: ${error instanceof Error ? error.message : "Unknown error"}`); } } async listModels() { // Anthropic doesn't have a models API, so return known models return [ "claude-3-haiku-20240307", "claude-3-sonnet-20240229", "claude-3-opus-20240229", "claude-3-5-sonnet-20241022", ]; } async validateApiKey() { if (!this.client) return false; try { // Simple validation request await this.client.messages.create({ model: "claude-3-haiku-20240307", messages: [{ role: "user", content: "test" }], max_tokens: 1, }); return true; } catch (error) { return false; } } async healthCheck() { return this.isConnected && (await this.validateApiKey()); } } // Google LLM Implementation (placeholder) class GoogleLLM extends BaseLLM { async connect() { throw new Error("Google LLM not implemented yet"); } async disconnect() { this.isConnected = false; } async generateResponse(prompt, context) { throw new Error("Google LLM not implemented yet"); } async listModels() { return []; } async validateApiKey() { return false; } async healthCheck() { return false; } } // Mistral LLM Implementation (placeholder) class MistralLLM extends BaseLLM { async connect() { throw new Error("Mistral LLM not implemented yet"); } async disconnect() { this.isConnected = false; } async generateResponse(prompt, context) { throw new Error("Mistral LLM not implemented yet"); } async listModels() { return []; } async validateApiKey() { return false; } async healthCheck() { return false; } } // Factory function to create LLM instances function createLLM(config) { switch (config.provider) { case LLMProvider.OPENAI: return new OpenAILLM(config); case LLMProvider.ANTHROPIC: return new AnthropicLLM(config); case LLMProvider.GOOGLE: return new GoogleLLM(config); case LLMProvider.MISTRAL: return new MistralLLM(config); default: throw new Error(`Unsupported LLM provider: ${config.provider}`); } } // RAG Chain Service class RAGChainService { constructor(llm, vectorStore) { this.llm = llm; this.vectorStore = vectorStore; } async generateRAGResponse(query, options = {}) { var _a, _b; const { maxRetrievedDocs = 5, similarityThreshold = 0.7 } = options; try { // Step 1: Retrieve relevant documents const retrievedDocs = await this.vectorStore.similaritySearch(query, maxRetrievedDocs); // Step 2: Filter by similarity threshold if needed let filteredDocs = retrievedDocs; if (similarityThreshold > 0) { // Note: This would require similarity scores from the vector store // For now, we'll use all retrieved documents filteredDocs = retrievedDocs; } // Step 3: Generate response using LLM with context const llmResponse = await this.llm.generateResponse(query, filteredDocs); return { response: llmResponse.content, sources: filteredDocs, metadata: { retrievedDocCount: retrievedDocs.length, similarity: [], // Would be populated with actual similarity scores model: (_a = llmResponse.metadata) === null || _a === void 0 ? void 0 : _a.model, tokens: (_b = llmResponse.metadata) === null || _b === void 0 ? void 0 : _b.tokens, }, }; } catch (error) { throw new Error(`Failed to generate RAG response: ${error instanceof Error ? error.message : "Unknown error"}`); } } updateLLM(llm) { this.llm = llm; } updateVectorStore(vectorStore) { this.vectorStore = vectorStore; } } // LLM utilities class LLMUtils { static validateConfig(config) { if (!config.provider || !config.model) return false; switch (config.provider) { case LLMProvider.OPENAI: case LLMProvider.ANTHROPIC: case LLMProvider.GOOGLE: case LLMProvider.MISTRAL: return !!config.apiKey; default: return false; } } static getDefaultConfig(provider) { const baseConfig = { provider, temperature: 0.7, maxTokens: 1000, topP: 1, frequencyPenalty: 0, presencePenalty: 0, }; switch (provider) { case LLMProvider.OPENAI: return { ...baseConfig, model: "gpt-3.5-turbo", }; case LLMProvider.ANTHROPIC: return { ...baseConfig, model: "claude-3-haiku-20240307", }; case LLMProvider.GOOGLE: return { ...baseConfig, model: "gemini-pro", }; case LLMProvider.MISTRAL: return { ...baseConfig, model: "mistral-small", }; default: return baseConfig; } } static buildContextPrompt(query, documents, systemPrompt) { const contextText = documents .map((doc, index) => `[${index + 1}] ${doc.title}\n${doc.content}`) .join("\n\n"); const prompt = systemPrompt || "You are a helpful AI assistant. Use the provided context to answer questions accurately."; return `${prompt} Context: ${contextText} Question: ${query} Please answer the question using the information from the provided context. If the context doesn't contain enough information to answer the question, please say so clearly.`; } static estimateTokens(text) { // Rough estimation: ~4 characters per token for English text return Math.ceil(text.length / 4); } static truncateContext(documents, maxTokens) { let totalTokens = 0; const truncatedDocs = []; for (const doc of documents) { const docTokens = this.estimateTokens(doc.content); if (totalTokens + docTokens <= maxTokens) { truncatedDocs.push(doc); totalTokens += docTokens; } else { // Truncate the document content to fit const remainingTokens = maxTokens - totalTokens; const remainingChars = remainingTokens * 4; if (remainingChars > 100) { // Only include if we have meaningful content const truncatedDoc = { ...doc, content: doc.content.substring(0, remainingChars) + "...", }; truncatedDocs.push(truncatedDoc); } break; } } return truncatedDocs; } } export { AnthropicLLM, BaseLLM, GoogleLLM, LLMUtils, MistralLLM, OpenAILLM, RAGChainService, createLLM }; //# sourceMappingURL=llmService.js.map