UNPKG

@restnfeel/agentc-starter-kit

Version:

한국어 기업용 CMS 모듈 - Task Master AI와 함께 빠르게 웹사이트를 구현할 수 있는 재사용 가능한 컴포넌트 시스템

264 lines (259 loc) 10.7 kB
import { ConversationContextManager } from './context-manager.js'; import { PromptManager } from './prompt-manager.js'; class RAGChatbot { constructor(config) { this.config = config; this.ragEngine = config.ragEngine; this.contextManager = new ConversationContextManager(config.conversationConfig); this.promptManager = new PromptManager(); } async chat(request) { var _a, _b; // Generate conversation ID if not provided const conversationId = request.conversationId || this.generateConversationId(); // Add user message to conversation this.contextManager.addMessage(conversationId, { role: "user", content: request.message, }); try { // Retrieve relevant documents const retrievalStart = Date.now(); const sources = await this.retrieveDocuments(request); const retrievalTime = Date.now() - retrievalStart; // Generate response const generationStart = Date.now(); const assistantMessage = await this.generateResponse(conversationId, request, sources); const generationTime = Date.now() - generationStart; // Add assistant message to conversation const responseMessage = this.contextManager.addMessage(conversationId, { role: "assistant", content: assistantMessage.content, metadata: { sources, model: this.config.llmConfig.modelName, tokenCount: (_a = assistantMessage.metadata) === null || _a === void 0 ? void 0 : _a.tokenCount, }, }); return { message: responseMessage, sources, conversationId, metadata: { retrievalTime, generationTime, totalTokens: (_b = assistantMessage.metadata) === null || _b === void 0 ? void 0 : _b.tokenCount, model: this.config.llmConfig.modelName, template: request.promptTemplate || this.config.defaultPromptTemplate || "rag-default-en", }, }; } catch (error) { // Add error message to conversation this.contextManager.addMessage(conversationId, { role: "assistant", content: `죄송합니다. 응답을 생성하는 중에 오류가 발생했습니다: ${error}`, metadata: { error: String(error), }, }); throw new Error(`Chat failed: ${error}`); } } async retrieveDocuments(request) { const retrievalOptions = { topK: 5, minScore: 0.1, searchMethod: "hybrid", ...this.config.retrievalConfig, ...request.retrievalOptions, }; return await this.ragEngine.searchWithOptions(request.message, { method: retrievalOptions.searchMethod, k: retrievalOptions.topK, minScore: retrievalOptions.minScore, }); } async generateResponse(conversationId, request, sources) { // Get conversation history (exclude current user message) const conversationHistory = this.contextManager.getConversationHistory(conversationId); const previousMessages = conversationHistory.slice(0, -1); // Remove the just-added user message // Detect language if enabled let language = "en"; if (this.config.languageDetection) { language = this.detectLanguage(request.message); } // Determine prompt template const templateId = request.promptTemplate || this.config.defaultPromptTemplate || `rag-default-${language}`; // Build prompt context const promptContext = { query: request.message, retrievedDocuments: sources, conversationHistory: previousMessages, userContext: request.userContext, language, }; // Generate prompt const prompt = this.promptManager.buildPrompt(templateId, promptContext); // Call LLM (mock implementation for now) const response = await this.callLLM(prompt, request.systemPrompt); return { id: this.generateMessageId(), role: "assistant", content: response.content, timestamp: new Date(), metadata: { sources, model: this.config.llmConfig.modelName, tokenCount: response.tokenCount, }, }; } // Mock LLM call - in production, this would integrate with actual LLM providers async callLLM(prompt, systemPrompt) { // This is a mock implementation // In production, integrate with OpenAI, Anthropic, or other LLM providers const mockResponse = `Based on the provided documents, I can help answer your question. This is a mock response that would be replaced with actual LLM integration using the generated prompt. Prompt used: ${prompt.substring(0, 200)}... System prompt: ${systemPrompt || "None provided"}`; return { content: mockResponse, tokenCount: Math.ceil(mockResponse.length / 4), }; } detectLanguage(text) { // Simple language detection - in production, use proper language detection library const koreanPattern = /[ㄱ-ㅎ가-힣]/; return koreanPattern.test(text) ? "ko" : "en"; } // Conversation management methods getConversation(conversationId) { return this.contextManager.getConversation(conversationId); } clearConversation(conversationId) { return this.contextManager.clearConversation(conversationId); } getConversationHistory(conversationId, lastN) { return this.contextManager.getConversationHistory(conversationId, lastN); } // Stream chat for real-time responses async *streamChat(request) { const conversationId = request.conversationId || this.generateConversationId(); // Add user message this.contextManager.addMessage(conversationId, { role: "user", content: request.message, }); yield { type: "retrieval", data: { status: "starting" } }; // Retrieve documents const sources = await this.retrieveDocuments(request); yield { type: "retrieval", data: { sources, status: "complete" } }; yield { type: "generation", data: { status: "starting" } }; // For streaming, we'd implement actual streaming LLM calls here const response = await this.generateResponse(conversationId, request, sources); yield { type: "generation", data: { message: response, status: "complete" }, }; yield { type: "complete", data: { conversationId, sources } }; } // Utility methods generateConversationId() { return `conv_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`; } generateMessageId() { return `msg_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`; } // Configuration methods updateConfig(updates) { this.config = { ...this.config, ...updates }; } addPromptTemplate(template) { this.promptManager.addTemplate(template); } // Analytics and monitoring getStats() { const contextStats = this.contextManager.getManagerStats(); const ragStats = this.ragEngine.getSearchStats(); const promptStats = this.promptManager.getTemplateStats(); return { conversations: contextStats.totalConversations, totalMessages: contextStats.totalMessages, ragEngineStats: ragStats, promptTemplates: promptStats.totalTemplates, }; } // Advanced features async summarizeConversation(conversationId) { const history = this.getConversationHistory(conversationId); if (history.length === 0) { return "No conversation to summarize."; } // Use the summarization prompt template const documents = history.map((msg) => ({ chunk: { id: msg.id, content: `${msg.role}: ${msg.content}`, metadata: { documentId: conversationId, chunkIndex: 0, startOffset: 0, endOffset: msg.content.length, tokens: Math.ceil(msg.content.length / 4), source: "conversation", }, }, score: 1.0, document: { id: conversationId, content: msg.content, metadata: { title: "Conversation History", createdAt: msg.timestamp, updatedAt: msg.timestamp, fileType: "conversation", fileSize: msg.content.length, }, source: "conversation", }, })); const prompt = this.promptManager.buildPrompt("summarize-documents", { query: "Summarize this conversation", retrievedDocuments: documents, conversationHistory: [], }); const response = await this.callLLM(prompt); return response.content; } async suggestFollowUpQuestions(conversationId) { var _a; const lastMessage = this.getConversationHistory(conversationId, 1)[0]; if (!lastMessage || lastMessage.role !== "assistant") { return []; } const sources = ((_a = lastMessage.metadata) === null || _a === void 0 ? void 0 : _a.sources) || []; if (sources.length === 0) { return []; } const prompt = this.promptManager.buildPrompt("generate-questions", { query: "Generate follow-up questions", retrievedDocuments: sources, conversationHistory: [], }); const response = await this.callLLM(prompt); // Parse questions from response (simple implementation) const questions = response.content .split("\n") .filter((line) => line.trim().startsWith("-") || line.match(/^\d+\./)) .map((line) => line.replace(/^[-\d.]\s*/, "").trim()) .filter((q) => q.length > 0); return questions; } } export { RAGChatbot }; //# sourceMappingURL=chatbot.js.map