generator-begcode
Version:
Spring Boot + Angular/React/Vue in one handy generator
124 lines (123 loc) • 5.62 kB
JavaScript
import { MessageChunker, ContextualizedChat, agentFunctionBaseToAgentFunction, tokensToChars, Prompt, } from '../../agent-core/index.js';
import { agentPrompts, prompts } from './prompts.js';
import { Agent, AgentConfig } from '../utils/index.js';
import { findBestAgent } from './findBestAgent.js';
const insertPersonaAsFirstMsg = (persona, logs, tokenizer) => {
const newLogs = logs.clone();
newLogs.insert('persistent', [
{
role: 'user',
content: persona,
},
], [tokenizer.encode(persona).length], 0);
return newLogs;
};
export class Evo extends Agent {
enableQuickTermination;
_cChat;
_chunker;
previousPrediction;
loopCounter = 0;
previousAgent;
initializedAgents = new Set();
goal = '';
constructor(context, timeout, enableQuickTermination) {
super(new AgentConfig(agentPrompts, [], timeout), context);
this.enableQuickTermination = enableQuickTermination;
this._chunker = new MessageChunker({
maxChunkSize: context.variables.saveThreshold,
});
this._cChat = new ContextualizedChat(this.context, this.context.chat, this._chunker, context.variables);
}
reset() {
this.previousPrediction = undefined;
this.loopCounter = 0;
this.previousAgent = undefined;
this.initializedAgents = new Set();
this.goal = '';
}
async init() {
const { chat } = this.context;
const initialMessages = [
{ role: 'user', content: prompts.exhaustAllApproaches },
{ role: 'user', content: prompts.variablesExplainer },
{ role: 'user', content: prompts.evoExplainer },
];
await chat.persistent(initialMessages);
}
async initRun(args) {
this.goal = args.goal;
await this.context.chat.persistent([{ role: 'user', content: args.goal }]);
}
async beforeLlmResponse() {
const { chat } = this.context;
const { messages } = chat.chatLogs;
await this.context.logger.notice('Predicting best next step...');
const prediction = !this.previousPrediction || this.loopCounter % 2 === 0
? await this.predictBestNextStep(messages, this.previousAgent, this.enableQuickTermination ? (this.previousPrediction ? 'SUCCESS' : undefined) : undefined)
: this.previousPrediction;
this.loopCounter += 1;
if (prediction === 'SUCCESS' || prediction.includes('"SUCCESS"') || prediction.includes('goal has been achieved')) {
return {
logs: chat.chatLogs,
agentFunctions: [],
allFunctions: [],
finalOutput: {
type: 'success',
title: 'SUCCESS',
},
};
}
const predictionVector = await this.createEmbeddingVector(prediction);
await this.context.logger.info(`### Prediction:\n-> ${prediction}`);
await this.context.logger.notice('Finding best agent to execute step...');
const [agent, agentFunctions, persona, allFunctions] = await findBestAgent(predictionVector, this.context);
if (!this.initializedAgents.has(agent.config.prompts.name)) {
this.initializedAgents.add(agent.config.prompts.name);
await agent.onFirstRun({ goal: this.goal }, chat);
}
this.previousPrediction = prediction;
this.previousAgent = agent;
const contextualizedChat = await this.contextualizeChat(await this.createEmbeddingVector(`${persona}\n${prediction}`));
const logs = insertPersonaAsFirstMsg(persona, contextualizedChat.chatLogs, chat.tokenizer);
return {
logs,
agentFunctions,
allFunctions: allFunctions.map(agentFunctionBaseToAgentFunction(agent)),
};
}
async contextualizeChat(predictionVector) {
const maxContextTokens = this.context.llm.getMaxContextTokens();
const maxResponseTokens = this.context.llm.getMaxResponseTokens();
const fuzz = 500;
const maxChatTokens = maxContextTokens - maxResponseTokens - fuzz;
return this._cChat.contextualize(predictionVector, {
persistent: maxChatTokens * 0.25,
temporary: maxChatTokens * 0.75,
});
}
async predictBestNextStep(messages, previousAgent, terminationStr) {
const agentPersona = previousAgent
?
previousAgent.config.prompts.initialMessages().slice(0, -1)
: [{ role: 'user', content: prompts.generalAgentPersona }];
return this.askLlm(new Prompt().json([...agentPersona, ...messages]).line(`
如果需要使用信息,并且该信息不在对话中,则必须通过web_search搜索或fs_readFile读取该信息。
结合前面用户和助手之间的对话,认真检查是否所有的任务已经完成。
如果没有完成,根据您的专业能力,助手下一步最好的步骤是什么?
${terminationStr && this.goal.length < 150
? `如果您 100% 确定用户的目标已实现,只需回复"${terminationStr}". 用户的目标是:"${this.goal}"。`
: ''}`), {
model: this.context.llm.getModel(),
});
}
maxContextChars() {
return tokensToChars(this.maxContextTokens());
}
maxContextTokens() {
return this.context.llm.getMaxContextTokens() ?? 8000;
}
isLargeMsg = (message) => {
return !!message.content && message.content.length > this.maxContextChars() * 0.0625;
};
}