UNPKG

auto-gpt-ts

Version:

my take of Auto-GPT in typescript

70 lines 3.3 kB
"use strict"; var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; return c > 3 && r && Object.defineProperty(target, key, r), r; }; var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } return new (P || (P = Promise))(function (resolve, reject) { function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } step((generator = generator.apply(thisArg, _arguments || [])).next()); }); }; Object.defineProperty(exports, "__esModule", { value: true }); exports.AgentManager = void 0; const config_1 = require("../config/config"); const llm_utils_1 = require("../llm/llm-utils"); const singelton_1 = require("../singelton"); let AgentManager = class AgentManager { constructor() { this.nextKey = 0; this.agents = {}; this.cfg = new config_1.Config(); } // Create new GPT agent // TODO: Centralise use of create_chat_completion() to globally enforce token limit createAgent(task, prompt, model) { return __awaiter(this, void 0, void 0, function* () { const messages = [{ role: "user", content: prompt }]; // Start GPT instance let agentReply = yield (0, llm_utils_1.createChatCompletion)(messages, model); messages.push({ role: "assistant", content: agentReply }); const key = this.nextKey; this.nextKey += 1; this.agents[key] = [task, messages, model]; return [key, agentReply]; }); } messageAgent(key, message) { return __awaiter(this, void 0, void 0, function* () { const [_, messages, model] = this.agents[Number(key)]; // Add user message to message history before sending to agent messages.push({ role: "user", content: message }); // Start GPT instance let agentReply = yield (0, llm_utils_1.createChatCompletion)(messages, model); messages.push({ role: "assistant", content: agentReply }); return agentReply; }); } listAgents() { return Object.entries(this.agents).map(([key, [task]]) => [key, task]); } deleteAgent(key) { try { delete this.agents[Number(key)]; return true; } catch (_a) { return false; } } }; AgentManager = __decorate([ singelton_1.Singleton ], AgentManager); exports.AgentManager = AgentManager; //# sourceMappingURL=agent-manager.js.map