taskforce-aiagent
Version:
TaskForce is a modular, open-source, production-ready TypeScript agent framework for orchestrating AI agents, LLM-powered autonomous agents, task pipelines, dynamic toolchains, RAG workflows and memory/retrieval systems.
133 lines (132 loc) • 4.83 kB
JavaScript
;
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.aiConfig = void 0;
exports.getLLMModelByName = getLLMModelByName;
exports.getLLMRouteByModel = getLLMRouteByModel;
const enum_js_1 = require("./enum.js");
const fs_1 = __importDefault(require("fs"));
const fineTuner_js_1 = require("../fineTune/fineTuner.js");
const helper_js_1 = require("../helpers/helper.js");
const dotenv_1 = __importDefault(require("dotenv"));
dotenv_1.default.config();
const builtInModels = {
"gpt-4o-mini": {
apiKey: process.env.OPENAI_API_KEY,
model: {
name: "gpt-4o-mini",
provider: enum_js_1.SupportedModelProvider.OPENAI,
supportsTools: false,
maxContextTokens: helper_js_1.baseModelTokenLimits["gpt-4o-mini"] || 16000,
},
},
"gpt-4o": {
apiKey: process.env.OPENAI_API_KEY,
model: {
name: "gpt-4o",
provider: enum_js_1.SupportedModelProvider.OPENAI,
supportsTools: true,
maxContextTokens: helper_js_1.baseModelTokenLimits["gpt-4o"] || 64000,
},
},
"gpt-3.5-turbo": {
apiKey: process.env.OPENAI_API_KEY,
model: {
name: "gpt-3.5-turbo",
provider: enum_js_1.SupportedModelProvider.OPENAI,
supportsTools: true,
maxContextTokens: helper_js_1.baseModelTokenLimits["gpt-3.5-turbo"] || 16000,
},
},
"deepseek-chat": {
apiKey: process.env.DEEPSEEK_API_KEY,
baseUrl: "https://api.deepseek.com/v1/chat/completions",
model: {
name: "deepseek-chat",
provider: enum_js_1.SupportedModelProvider.DEEPSEEK,
supportsTools: false,
maxContextTokens: helper_js_1.baseModelTokenLimits["deepseek-chat"] || 16000,
},
},
"claude-3-haiku": {
apiKey: process.env.ANTHROPIC_API_KEY,
baseUrl: "https://api.anthropic.com/v1/messages",
model: {
name: "claude-3-haiku",
provider: enum_js_1.SupportedModelProvider.ANTHROPIC,
supportsTools: false,
maxContextTokens: helper_js_1.baseModelTokenLimits["claude-3-haiku"] || 200000,
},
},
"claude-3-opus": {
apiKey: process.env.ANTHROPIC_API_KEY,
baseUrl: "https://api.anthropic.com/v1/messages",
model: {
name: "claude-3-opus",
provider: enum_js_1.SupportedModelProvider.ANTHROPIC,
supportsTools: false,
maxContextTokens: helper_js_1.baseModelTokenLimits["claude-3-opus"] || 200000,
},
},
"gemini-1.5-pro": {
apiKey: process.env.GEMINI_API_KEY,
model: {
name: "gemini-1.5-pro",
provider: enum_js_1.SupportedModelProvider.GEMINI,
supportsTools: false,
maxContextTokens: helper_js_1.baseModelTokenLimits["gemini-1.5-pro"] || 1000000,
},
},
"gemini-1.5-flash": {
apiKey: process.env.GEMINI_API_KEY,
model: {
name: "gemini-1.5-flash",
provider: enum_js_1.SupportedModelProvider.GEMINI,
supportsTools: false,
maxContextTokens: helper_js_1.baseModelTokenLimits["gemini-1.5-flash"] || 2000000,
},
},
"local-meta-llama": {
baseUrl: "http://localhost:1234/v1/chat/completions",
model: {
name: "meta-llama-3.1-8b-instruct",
provider: enum_js_1.SupportedModelProvider.LOCAL,
supportsTools: false,
maxContextTokens: helper_js_1.baseModelTokenLimits["meta-llama-3.1-8b-instruct"] || 8192,
},
},
"local-hermes-writer": {
baseUrl: "http://localhost:1234/v1/chat/completions",
model: {
name: "nous-hermes-2-mistral-7b-dpo",
provider: enum_js_1.SupportedModelProvider.LOCAL,
supportsTools: false,
maxContextTokens: helper_js_1.baseModelTokenLimits["nous-hermes-2-mistral-7b-dpo"] || 8192,
},
},
};
// Load external fine-tuned models
let externalModels = {};
const externalPath = fineTuner_js_1.MODELS_PATH;
if (fs_1.default.existsSync(externalPath)) {
try {
const raw = fs_1.default.readFileSync(externalPath, "utf-8");
externalModels = JSON.parse(raw);
}
catch (err) {
console.warn("⚠️ Failed to load external fine-tuned models:", err);
}
}
exports.aiConfig = {
...builtInModels,
...externalModels,
};
function getLLMModelByName(modelName) {
const route = getLLMRouteByModel(modelName);
return route?.model;
}
function getLLMRouteByModel(modelName) {
return exports.aiConfig[modelName];
}