UNPKG

flux-agent

Version:

FluxAgent - 一个可灵活插拔的AI Agent系统框架,基于TypeScript开发,支持流式执行、事件系统、插件系统、知识库管理等功能 (Protected Release) (Protected Release) (Protected Release) (Protected Release) (Protected Release) (Protected Release) (Protected Release) (Protected Release) (Protected Release) (

175 lines (171 loc) 6.23 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.LLMOrchestrator = void 0; const types_1 = require("./types"); const Agent_1 = require("../Agent"); /** * LLM 协调器 * 负责与LLM的交互和响应处理 */ class LLMOrchestrator { constructor(agent // 暂时使用any,后续会优化类型 ) { this.agent = agent; } /** * 流式与LLM进行交互 */ async interactStream(phase, session, tagedPrompt, callbacks) { // 添加提示词到记忆中 this.agent.context.addUserMessage(tagedPrompt); // 获取当前阶段的工具集 const tools = this.getToolsForPhase(phase); // 发送流式请求给LLM Agent_1.AgentLogger.log('准备发送流式LLM请求', { messageCount: this.agent.context.getMessages().length, toolCount: tools.length, tools: tools.map(tool => tool.function.name), phase: phase }); const messages = this.agent.context.getMessages(); // 收集回复 const { content: originalContent, streamId: originalStreamId, isStop } = await Promise.race([ this.agent.llm.streamChat([ ...messages, ], tools, this.agent.recognizer ? undefined : callbacks, 'none'), this.createStopEventPromise() ]); let streamId = originalStreamId; let content = originalContent; Agent_1.AgentLogger.log('收到流式LLM响应', { content: content, streamId: streamId, }); if (isStop) { return this.createEmptyLLMResult(phase); } // embed处理 if (this.agent.recognizer) { try { // 使用 Recognizer 进行文本处理 const { content: processedContent, streamId: recognizerStreamId } = await Promise.race([ this.agent.recognizer.process(content, callbacks), this.createStopEventPromise() ]); if (processedContent.isStop) { return this.createEmptyLLMResult(phase); } if (processedContent !== content) { Agent_1.AgentLogger.log('Embed 处理完成', { originalLength: content.length, processedLength: processedContent.length, hasChanges: true }); } streamId = recognizerStreamId; content = processedContent; } catch (error) { Agent_1.AgentLogger.log('Embed 处理失败', { error: error instanceof Error ? error.message : String(error) }); // 处理失败时保持原始内容 } } // 收集工具调用 const { content: toolContent, toolCalls, isStop: isToolStop } = await Promise.race([ this.agent.toolcallLLM.streamChat([ ...messages, { role: 'assistant', content, }, { role: 'user', content: `<system>上面是你自己的回复,请根据你的回复调用和你意图一致的工具 ## 问题 比如你的回复是这种询问类:您看是否需要这样初步检索呢? 你就需要调用 AskUserInputTool,而不是EndPhaseTool进入下一步 ## 简单问题 如果当前是 confirm 阶段,你已经针对用户的简单问题进行了回答,那么直接调用AskUserInputTool ${this.agent.toolcallRecognizePrompt} 特别注意:你的回复里不允许有工具详细信息,你必须选择一个工具进行调用!!</system>`, } ], tools, callbacks, 'required'), this.createStopEventPromise() ]); if (isToolStop) { return this.createEmptyLLMResult(phase); } Agent_1.AgentLogger.log('收到流式LLM 工具响应', { content: toolContent, toolCalls: toolCalls, phase: phase }); // 确保响应包含适当的工具调用 const enhancedResponse = { content: content, toolCalls: toolCalls, streamId: streamId }; return new types_1.LLMResultImpl(enhancedResponse, enhancedResponse, phase); } /** * 根据当前阶段获取工具集 */ getToolsForPhase(phase) { return this.agent.getToolsForPhase(phase); } /** * 验证LLM响应的有效性 */ validateResponse(llmResult) { const { enhancedResponse } = llmResult; // 检查是否有内容或工具调用 if (!enhancedResponse.content && (!enhancedResponse.toolCalls || enhancedResponse.toolCalls.length === 0)) { Agent_1.AgentLogger.log('LLM响应无效:既没有内容也没有工具调用'); return false; } return true; } /** * 记录LLM响应到上下文 */ recordResponse(llmResult) { if (llmResult.hasContent()) { this.agent.context.addAgentResponse(llmResult.enhancedResponse.content); Agent_1.AgentLogger.log('LLM回复', { content: llmResult.enhancedResponse.content, phase: llmResult.phase }); } } /** * 创建空的 LLM 返回结果 */ createEmptyLLMResult(phase) { return new types_1.LLMResultImpl({ content: '', toolCalls: [], streamId: '' }, { content: '', toolCalls: [], streamId: '' }, phase); } createStopEventPromise() { return new Promise((resolve, reject) => { try { this.agent.eventHub.registerType('StopEvent', () => { const result = { isStop: true }; resolve(result); }); } catch (error) { reject(error); } }); } } exports.LLMOrchestrator = LLMOrchestrator;