auto-gpt-ts
Version:
my take of Auto-GPT in typescript
267 lines • 14.4 kB
JavaScript
;
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.Agent = void 0;
const chalk_1 = __importDefault(require("chalk"));
const logging_1 = require("../logging");
const commands_1 = require("../commands");
const config_1 = require("../config/config");
const json_fix_llm_1 = require("../json-utils/json-fix-llm");
const utilities_1 = require("../json-utils/utilities");
const chat_1 = require("../llm/chat");
const llm_utils_1 = require("../llm/llm-utils");
const token_counter_1 = require("../llm/token-counter");
const log_cycle_1 = require("../logging/log-cycle");
const spinner_1 = require("../spinner");
const utils_1 = require("../utils");
const workspace_1 = require("../workspace/workspace");
const CFG = new config_1.Config();
/**
* a base agent class that can be extended to create an agent with a specific AI role.
*/
class Agent extends logging_1.Loggable {
constructor(aiName, memory, fullMessageHistory, nextActionCount, config, systemPrompt, triggeringPrompt, workspace_directory) {
super();
this.aiName = aiName;
this.memory = memory;
this.fullMessageHistory = fullMessageHistory;
this.nextActionCount = nextActionCount;
this.config = config;
this.systemPrompt = systemPrompt;
this.triggeringPrompt = triggeringPrompt;
this.userInput = "";
this.interactionState = {
commandName: "",
args: "",
userInput: "",
isDone: false,
};
this.cfg = new config_1.Config();
this.summaryMemory = "I was created.";
this.lastMemoryIndex = 0;
this.workspace = new workspace_1.Workspace(workspace_directory, CFG.restrictToWorkspace);
this.created_at = new Date()
.toISOString()
.replace(/[-T:]/g, "")
.slice(0, -4);
this.cycleCount = 0;
this.logCycleHandler = new log_cycle_1.LogCycleHandler();
}
_resolvePathlikeCommandArgs(commandArgs = {}) {
if ("directory" in commandArgs &&
(commandArgs["directory"] === "" || commandArgs["directory"] === "/")) {
commandArgs["directory"] = this.workspace.root.toString();
}
else {
for (const pathlike of [
"filename",
"directory",
"clone_path",
"destinationPath",
]) {
if (pathlike in commandArgs) {
commandArgs[pathlike] = this.workspace
.getPath(commandArgs[pathlike])
.toString();
}
}
}
return commandArgs;
}
getSelfFeedback(thoughts, llm_model) {
return __awaiter(this, void 0, void 0, function* () {
const ai_role = this.config.aiRole;
const feedback_prompt = `Below is a message from an AI agent with the role of ${ai_role}. Please review the provided Thought, Reasoning, Plan, and Criticism. If these elements accurately contribute to the successful execution of the assumed role, respond with the letter 'Y' followed by a space, and then explain why it is effective. If the provided information is not suitable for achieving the role's objectives, please provide one or more sentences addressing the issue and suggesting a resolution.`;
const reasoning = thoughts.reasoning || "";
const plan = thoughts.plan || "";
const thought = thoughts.thoughts || "";
const criticism = thoughts.criticism || "";
const feedback_thoughts = thought + reasoning + plan + criticism;
return (0, llm_utils_1.createChatCompletion)([{ role: "user", content: feedback_prompt + feedback_thoughts }], llm_model);
});
}
// override to intercept
shouldContinue() {
return __awaiter(this, void 0, void 0, function* () {
return !this.interactionState.isDone;
});
}
onInteractionLoopEnd() {
return __awaiter(this, void 0, void 0, function* () {
// override
});
}
startInteractionLoop() {
return __awaiter(this, void 0, void 0, function* () {
this.cycleCount = 0;
while (yield this.shouldContinue()) {
this.interactionState = {
commandName: "",
args: "",
userInput: "",
isDone: false,
};
yield this.loopInteraction();
yield this.onInteractionLoopEnd();
this.cycleCount += 1;
}
});
}
loopInteraction() {
return __awaiter(this, void 0, void 0, function* () {
// Discontinue if continuous limit is reached
this.logCycleHandler.logCountWithinCycle = 0;
this.logCycleHandler.logCycle(this.config.aiName, this.created_at, this.cycleCount, this.fullMessageHistory, log_cycle_1.FULL_MESSAGE_HISTORY_FILE_NAME);
if (this.cfg.continuousMode &&
this.cfg.continuousLimit > 0 &&
this.cycleCount > this.cfg.continuousLimit) {
this.logger.info(`Continuous Limit Reached: ${this.cfg.continuousLimit}`);
return true;
}
// Send message to AI, get response
yield this.talkToAI();
if (this.interactionState.isDone) {
return;
}
let result = "";
// ### EXECUTE COMMAND ###
if (this.interactionState.commandName &&
this.interactionState.commandName.toLowerCase().startsWith("error")) {
result = `Command ${this.interactionState.commandName} threw the following error: ${this.interactionState.args}`;
}
else if (this.interactionState.commandName &&
this.interactionState.commandName.toLowerCase().startsWith("feedback")) {
result = `Human feedback: ${this.interactionState.userInput}`;
}
else if (this.interactionState.commandName) {
let commandResult = yield (0, commands_1.executeCommand)(this.interactionState.commandName, this.interactionState.parsedArgs);
if (typeof commandResult === "object") {
commandResult = JSON.stringify(commandResult);
}
result = `Command ${this.interactionState.commandName} returned the following result: ${commandResult}`;
const resultLength = (0, token_counter_1.countStringTokens)(result, CFG.fastLlmModel);
const memoryLength = (0, token_counter_1.countStringTokens)(this.summaryMemory, CFG.fastLlmModel);
if (resultLength + memoryLength + 600 > CFG.fastTokenLimit) {
this.summaryMemory = `Failure: command ${this.interactionState.commandName} returned too much output. Do not execute this command again with the same arguments.`;
}
if (this.nextActionCount > 0) {
this.nextActionCount -= 1;
}
}
if (result) {
this.fullMessageHistory.push((0, chat_1.createChatMessage)("system", result));
this.logger.info(chalk_1.default.cyan(`SYSTEM:`) + ` ${result}`);
}
else {
this.fullMessageHistory.push((0, chat_1.createChatMessage)("system", "Unable to execute command"));
this.logger.info(chalk_1.default.cyan(`SYSTEM:`) + ` Unable to execute command`);
}
});
}
talkToAI() {
var _a, _b;
return __awaiter(this, void 0, void 0, function* () {
let assistantReply = yield (0, spinner_1.withSpinner)("Thinking... ", () => (0, chat_1.chat_with_ai)(this, this.systemPrompt, this.triggeringPrompt, this.fullMessageHistory, this.memory, this.cfg.fastTokenLimit)); // TODO: This hardcodes the model to use GPT3.5. Make this an argument
let assistantReplyJson = (0, json_fix_llm_1.fixJsonUsingMultipleTechniques)(assistantReply);
// Print Assistant thoughts
if (JSON.stringify(assistantReplyJson) !== `{}`) {
(0, utilities_1.validateJson)(assistantReplyJson, ""); //, LLM_DEFAULT_RESPONSE_FORMAT);
// Get command name and arguments
try {
(0, log_cycle_1.printAssistantThoughts)(this.aiName, assistantReplyJson, this.cfg.speakMode);
const res = (0, commands_1.getCommand)(assistantReplyJson);
this.interactionState.commandName = res.commandName;
this.interactionState.parsedArgs = res.args;
this.interactionState.parsedArgs = this._resolvePathlikeCommandArgs(this.interactionState.parsedArgs);
}
catch (e) {
this.logger.error("Error: \n", e);
}
}
this.logCycleHandler.logCycle(this.config.aiName, this.created_at, this.cycleCount, assistantReplyJson, log_cycle_1.NEXT_ACTION_FILE_NAME);
if (!this.cfg.continuousMode && this.nextActionCount === 0) {
// ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
// Get key press: Prompt the user to press enter to continue or escape
// to exit
this.userInput = "";
this.logger.info("NEXT ACTION: " +
chalk_1.default.cyan(`COMMAND = ${this.interactionState.commandName} ` +
`ARGUMENTS = ${JSON.stringify((_a = this.interactionState.parsedArgs) !== null && _a !== void 0 ? _a : {}, null, 2)}`));
this.logger.info("Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands" +
"'n' to exit program, or enter feedback for " +
`${this.aiName}...`);
while (true) {
let consoleInput = "";
if (this.cfg.chatMessagesEnabled) {
consoleInput = yield (0, utils_1.cleanInput)("Waiting for your response...");
}
else {
consoleInput = yield (0, utils_1.cleanInput)("\x1b[35mInput\x1b[0m");
}
if (consoleInput.toLowerCase().trim() === this.cfg.authoriseKey) {
this.interactionState.userInput = "GENERATE NEXT COMMAND JSON";
break;
}
else if (consoleInput.toLowerCase().trim() === "s") {
this.logger.info("-=-=-=-=-=-=-= THOUGHTS, REASONING, PLAN AND CRITICISM WILL NOW BE VERIFIED BY AGENT -=-=-=-=-=-=-=");
const thoughts = assistantReplyJson.thoughts || {};
const self_feedback_resp = yield this.getSelfFeedback(thoughts, this.cfg.fastLlmModel);
this.logger.info(`SELF FEEDBACK: ${self_feedback_resp}`);
this.interactionState.userInput = self_feedback_resp;
this.interactionState.commandName = "self_feedback";
break;
}
else if (consoleInput.toLowerCase().trim() === "") {
this.logger.warn("Invalid input format.");
continue;
}
else if (consoleInput.toLowerCase().startsWith(`${this.cfg.authoriseKey} -`)) {
try {
this.nextActionCount = Math.abs(parseInt(consoleInput.split(" ")[1]));
this.interactionState.userInput = "GENERATE NEXT COMMAND JSON";
}
catch (err) {
this.logger.warn("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.");
continue;
}
break;
}
else if (consoleInput.toLowerCase() === this.cfg.exitKey) {
this.interactionState.userInput = "EXIT";
break;
}
else {
this.interactionState.userInput = consoleInput;
this.interactionState.commandName = "humanFeedback";
this.logCycleHandler.logCycle(this.config.aiName, this.created_at, this.cycleCount, this.interactionState.userInput, log_cycle_1.USER_INPUT_FILE_NAME);
break;
}
}
if (this.interactionState.userInput === "GENERATE NEXT COMMAND JSON") {
this.logger.info(chalk_1.default.green(`-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=`));
}
else if (this.interactionState.userInput === "EXIT") {
this.logger.info("Exiting...");
this.interactionState.isDone = true;
return;
}
}
else {
this.logger.info(`"NEXT ACTION: Command = ${this.interactionState.commandName} Arguments = ${JSON.stringify((_b = this.interactionState.parsedArgs) !== null && _b !== void 0 ? _b : {}, null, 2)}}"`);
}
});
}
}
exports.Agent = Agent;
//# sourceMappingURL=agent.js.map