taskforce-aiagent
Version:
TaskForce is a modular, open-source, production-ready TypeScript agent framework for orchestrating AI agents, LLM-powered autonomous agents, task pipelines, dynamic toolchains, RAG workflows and memory/retrieval systems.
309 lines (308 loc) • 15 kB
JavaScript
"use strict";
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.callAIModel = callAIModel;
const openai_1 = __importDefault(require("openai"));
const generative_ai_1 = require("@google/generative-ai");
const aiConfig_js_1 = require("../configs/aiConfig.js");
const toolAdapter_js_1 = require("../tools/toolWorker/toolAdapter.js");
const log_helper_js_1 = require("../helpers/log.helper.js");
const chalk_1 = __importDefault(require("chalk"));
const telemetry_helper_js_1 = require("../helpers/telemetry.helper.js");
const modelsDefaultOptions_js_1 = require("./modelsDefaultOptions.js");
function getMergedGenerationOptions(modelName, userOptions) {
const defaults = modelsDefaultOptions_js_1.modelGenerationDefaults[modelName] || {};
return {
...defaults,
...userOptions,
};
}
/**
* Wraps callAIModel with telemetry recording if TELEMETRY=true.
*/
async function callAIModel(agentName, modelName, messages, verbose = false, tools, modelOptions = {}) {
const mergedOptions = getMergedGenerationOptions(modelName, modelOptions);
if (process.env.TELEMETRY_MODE === "none") {
return callAIModelFunc(modelName, messages, verbose, tools, mergedOptions);
}
const startTime = Date.now();
const result = await callAIModelFunc(modelName, messages, verbose, tools, mergedOptions);
const duration = Date.now() - startTime;
// Approximate token usage
const totalTokens = Math.round(JSON.stringify(messages).length / 4);
(0, telemetry_helper_js_1.recordLLMCall)(agentName, totalTokens, duration, modelName);
return result;
}
async function callAIModelFunc(modelName, messages, verbose = false, tools, modelOptions = {}) {
const config = aiConfig_js_1.aiConfig[modelName];
if (!config)
throw new Error(`Model '${modelName}' not defined in llmConfig`);
switch (config.model.provider) {
case "openai": {
const openai = new openai_1.default({ apiKey: config.apiKey });
if (config.model.supportsTools && tools?.length) {
const rawTools = (0, toolAdapter_js_1.toAIToolSchema)(config.model, tools) || [];
const openAITools = rawTools.map(({ __originalTool__, ...t }) => t);
const toolMap = rawTools.reduce((acc, t) => {
if (t.function?.name && t.__originalTool__) {
acc[t.function.name] = t.__originalTool__;
}
return acc;
}, {});
const res = await withRetry(() => openai.chat.completions.create({
model: config.model.name,
messages,
temperature: modelOptions.temperature || 0.7,
top_p: modelOptions.top_p || 1,
max_tokens: modelOptions.max_tokens || 2048,
presence_penalty: modelOptions.presence_penalty || 0,
frequency_penalty: modelOptions.frequency_penalty || 0,
tools: openAITools,
tool_choice: "auto",
}), 3, 1000, verbose);
const toolCalls = res.choices[0].message.tool_calls;
let toolResult = [];
if (toolCalls && toolCalls?.length > 0 && toolMap) {
if (verbose) {
(0, log_helper_js_1.TFLog)(`🧠 [LLM] Total ${toolCalls.length} tool call${toolCalls.length > 1 ? "s" : ""} received.`, chalk_1.default.yellow);
const names = toolCalls.map((t) => t.function.name).join(", ");
(0, log_helper_js_1.TFLog)(`🧠 [LLM] Tool calls received: ${names}`, chalk_1.default.yellow);
}
for (const toolCall of toolCalls) {
const toolName = toolCall.function.name;
const args = JSON.parse(toolCall.function.arguments);
const selectedTool = toolMap[toolName];
if (!selectedTool) {
throw new Error(`Tool not found: ${toolName}`);
}
if (verbose) {
(0, log_helper_js_1.TFLog)(`🧠 [LLM] Calling Tool: '${toolCall.function.name}' ${JSON.stringify(args, null, 2)}`, chalk_1.default.yellow);
}
const result = await selectedTool.handler(args);
if (verbose) {
(0, log_helper_js_1.TFLog)(`🧠 [LLM] Tool Result: '${toolCall.function.name}'`, chalk_1.default.yellow);
(0, log_helper_js_1.TFLog)(`Output:\n${result}\n`, chalk_1.default.white);
}
toolResult.push({
role: "tool",
tool_call_id: toolCall.id,
content: result,
});
}
const followUp = await withRetry(() => openai.chat.completions.create({
model: config.model.name,
messages: [
...messages,
{
role: "assistant",
content: null,
tool_calls: [...toolCalls],
},
...toolResult,
],
temperature: modelOptions.temperature || 0.7,
top_p: modelOptions.top_p || 1,
max_tokens: modelOptions.max_tokens || 2048,
presence_penalty: modelOptions.presence_penalty || 0,
frequency_penalty: modelOptions.frequency_penalty || 0,
}), 3, 1000, verbose);
return followUp.choices[0].message.content || "";
}
return res.choices[0].message.content || "";
}
else {
const res = await withRetry(() => openai.chat.completions.create({
model: config.model.name,
messages,
temperature: modelOptions.temperature || 0.7,
top_p: modelOptions.top_p || 1,
max_tokens: modelOptions.max_tokens || 2048,
presence_penalty: modelOptions.presence_penalty || 0,
frequency_penalty: modelOptions.frequency_penalty || 0,
}), 3, 1000, verbose);
return res.choices[0].message.content || "";
}
}
case "deepseek": {
const res = await fetch(config.baseUrl, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${config.apiKey}`,
},
body: JSON.stringify({
model: config.model,
messages,
temperature: modelOptions.temperature || 0.7,
top_p: modelOptions.top_p || 1,
max_tokens: modelOptions.max_tokens || 2048,
presence_penalty: modelOptions.presence_penalty || 0,
frequency_penalty: modelOptions.frequency_penalty || 0,
}),
});
const json = await res.json();
return json.choices?.[0]?.message?.content || "";
}
case "local": {
const res = await fetch(config.baseUrl, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
model: config.model.name,
messages,
temperature: modelOptions.temperature || 0.7,
top_p: modelOptions.top_p || 1,
max_tokens: modelOptions.max_tokens || 2048,
presence_penalty: modelOptions.presence_penalty || 0,
frequency_penalty: modelOptions.frequency_penalty || 0,
}),
});
const json = await res.json();
return json.choices?.[0]?.message?.content || "";
}
case "anthropic": {
const res = await fetch(config.baseUrl, {
method: "POST",
headers: {
"Content-Type": "application/json",
"x-api-key": config.apiKey,
"anthropic-version": "2023-06-01",
},
body: JSON.stringify({
model: config.model.name,
messages: messages.map((m) => ({
role: m.role,
content: m.content,
})),
temperature: modelOptions.temperature || 0.7,
top_p: modelOptions.top_p || 1,
max_tokens: modelOptions.max_tokens || 2048,
presence_penalty: modelOptions.presence_penalty || 0,
frequency_penalty: modelOptions.frequency_penalty || 0,
}),
});
const json = await res.json();
return (json?.content?.[0]?.text || json?.choices?.[0]?.message?.content || "");
}
case "gemini": {
const genAI = new generative_ai_1.GoogleGenerativeAI(config.apiKey);
const model = genAI.getGenerativeModel({ model: config.model.name });
function mergeSystemAndUserMessages(messages) {
let systemPrompt = "";
const nonSystemMessages = messages.filter((msg) => {
if (msg.role === "system") {
systemPrompt += msg.content.trim() + "\n\n";
return false;
}
return true;
});
const mergedMessages = nonSystemMessages.map((msg, index) => {
const prefix = index === 0 && systemPrompt ? systemPrompt : "";
return {
role: msg.role,
parts: [{ text: prefix + msg.content }],
};
});
return mergedMessages;
}
const geminiMessages = mergeSystemAndUserMessages(messages);
const generationConfig = {
temperature: modelOptions.temperature ?? 0.7,
topP: modelOptions.top_p ?? 1,
maxOutputTokens: modelOptions.max_tokens ?? 2048,
};
// Eğer tools destekleniyorsa
if (config.model.supportsTools && tools?.length) {
const rawTools = (0, toolAdapter_js_1.toAIToolSchema)(config.model, tools) || [];
const functionDeclarations = rawTools.map(({ __originalTool__, ...tool }) => {
const fn = tool.function;
return {
name: fn.name,
description: fn.description,
parameters: fn.parameters,
};
});
const toolMap = rawTools.reduce((acc, tool) => {
if (tool.function?.name && tool.__originalTool__) {
acc[tool.function.name] = tool.__originalTool__;
}
return acc;
}, {});
if (verbose) {
(0, log_helper_js_1.TFLog)(`🛠️ [LLM] Passing ${functionDeclarations.length} tools`, chalk_1.default.yellow);
}
const result = await model.generateContent({
contents: geminiMessages,
generationConfig,
tools: [{ functionDeclarations }],
});
const parts = result.response?.candidates?.[0]?.content?.parts || [];
const functionCalls = parts
.map((p) => p.functionCall)
.filter((fc) => !!fc);
const responseText = parts
.map((p) => p.text)
.filter(Boolean)
.join("\n");
if (functionCalls.length && toolMap) {
const outputs = [];
for (const call of functionCalls) {
const tool = toolMap[call.name];
if (!tool)
continue;
const args = call.args;
if (verbose) {
(0, log_helper_js_1.TFLog)(`🧠 [LLM] Calling Tool '${call.name}'`, chalk_1.default.yellow);
(0, log_helper_js_1.TFLog)(`Args: ${JSON.stringify(args, null, 2)}`, chalk_1.default.white);
}
const output = await tool.handler(args);
if (verbose) {
(0, log_helper_js_1.TFLog)(`🧠 [LLM] Tool Result '${call.name}'`, chalk_1.default.yellow);
(0, log_helper_js_1.TFLog)(`Output: ${output}`, chalk_1.default.white);
}
outputs.push({
role: "tool",
tool_call_id: call.name,
content: output,
});
}
return outputs.map((o) => o.content).join("\n") || responseText;
}
return responseText;
}
const result = await model.generateContent({
contents: geminiMessages,
generationConfig,
});
return result.response?.candidates?.[0]?.content?.parts?.[0]?.text || "";
}
default:
throw new Error(`Unsupported LLM provider: ${config.model.provider}`);
}
}
async function withRetry(fn, maxRetries = 3, delayMs = 1000, verbose = false) {
let attempt = 0;
while (attempt < maxRetries) {
try {
return await fn();
}
catch (err) {
if (err?.status === 429 ||
err?.message?.toLowerCase().includes("rate limit")) {
if (verbose) {
console.warn(`⏳ OpenAI rate limit hit. Retrying in ${delayMs}ms... (attempt ${attempt + 1})`);
}
await new Promise((res) => setTimeout(res, delayMs));
attempt++;
}
else {
throw err;
}
}
}
throw new Error("❌ Retry failed after multiple OpenAI 429 errors.");
}