ai-agent-runtime
Version:
Runtime adapter that bridges agent frameworks (OpenAI SDK, LangChain) with production infrastructure
1,458 lines (1,421 loc) • 212 kB
JavaScript
// src/agent-runtime.ts
import { randomUUID as randomUUID2 } from "crypto";
import dotenv from "dotenv";
// src/agent-factory.ts
import { Agent, tool } from "@openai/agents";
// src/tool-registry.ts
var ToolRegistry = class {
tools = /* @__PURE__ */ new Map();
register(tool2) {
if (this.tools.has(tool2.name)) {
throw new Error(`Tool '${tool2.name}' is already registered`);
}
this.tools.set(tool2.name, tool2);
}
registerMany(tools) {
for (const tool2 of tools) {
this.register(tool2);
}
}
unregister(name) {
return this.tools.delete(name);
}
get(name) {
return this.tools.get(name);
}
getAllTools() {
return Array.from(this.tools.values());
}
getToolNames() {
return Array.from(this.tools.keys());
}
has(name) {
return this.tools.has(name);
}
clear() {
this.tools.clear();
}
size() {
return this.tools.size;
}
};
// src/mcp-manager.ts
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js";
import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/streamableHttp.js";
import { resolve } from "path";
var MCPManager = class {
servers = /* @__PURE__ */ new Map();
clients = /* @__PURE__ */ new Map();
async registerServer(server) {
if (this.servers.has(server.name)) {
throw new Error(`MCP server '${server.name}' is already registered`);
}
this.servers.set(server.name, server);
await this.connectToServer(server);
}
async loadServerTools(server) {
const client = await this.getOrCreateClient(server);
const tools = [];
try {
const { tools: mcpTools } = await client.listTools();
for (const mcpTool of mcpTools) {
if (process.env.DEBUG_MCP_SCHEMA === "1") {
console.log(`[MCP Runtime] Tool: ${mcpTool.name}`);
console.log(`[MCP Runtime] Original schema:`, JSON.stringify(mcpTool.inputSchema));
}
const patchSchema = (node, depth = 0) => {
if (!node || typeof node !== "object") return;
if (typeof node.type === "undefined") {
node.type = "string";
}
if (node.type === "object") {
if (typeof node.additionalProperties === "undefined") {
node.additionalProperties = false;
}
if (node.properties) {
const props = node.properties;
const existingRequired = Array.isArray(node.required) ? node.required : [];
const allPropertyKeys = Object.keys(props);
const requiredSet = /* @__PURE__ */ new Set([...existingRequired, ...allPropertyKeys]);
node.required = Array.from(requiredSet);
if (process.env.DEBUG_MCP_SCHEMA === "1") {
console.log(`[MCP Runtime] Depth ${depth}: Setting required for all ${allPropertyKeys.length} properties`);
}
for (const child of Object.values(props)) {
patchSchema(child, depth + 1);
}
}
} else if (node.type === "array" && node.items) {
patchSchema(node.items, depth + 1);
}
};
patchSchema(mcpTool.inputSchema);
if (typeof mcpTool.inputSchema.additionalProperties === "undefined") {
mcpTool.inputSchema.additionalProperties = false;
}
if (process.env.DEBUG_MCP_SCHEMA === "1") {
console.log(`[MCP Runtime] Patched schema:`, JSON.stringify(mcpTool.inputSchema));
}
const serverName = server.name;
const toolName = mcpTool.name;
const toolDef = {
name: toolName,
description: mcpTool.description || `Tool from ${serverName}`,
// Pass the patched JSON schema directly to OpenAI. Using Zod conversion can
// inadvertently mark nested object properties as required when converted back
// to JSON (OpenAI strict-mode bug May-2025).
parameters: mcpTool.inputSchema,
execute: async (input) => {
const currentClient = this.clients.get(serverName);
if (!currentClient) {
throw new Error(`No client found for server ${serverName}`);
}
try {
const result = await currentClient.callTool({
name: toolName,
arguments: input
});
return result.content;
} catch (error) {
throw error;
}
}
};
tools.push(toolDef);
}
} catch (error) {
console.error(`Failed to load tools from MCP server '${server.name}':`, error);
}
return tools;
}
async connectToServer(server) {
try {
if (server.url.startsWith("http://") || server.url.startsWith("https://")) {
await this.connectToHttpServer(server);
} else if (server.url.startsWith("ws://") || server.url.startsWith("wss://")) {
throw new Error("WebSocket transport not yet implemented");
} else {
await this.connectToStdioServer(server);
}
} catch (error) {
if (this.isOAuthRelatedError(error, server)) {
const enhancedError = this.createOAuthErrorMessage(server, error);
console.error(`Failed to connect to MCP server '${server.name}':`, enhancedError);
throw enhancedError;
}
console.error(`Failed to connect to MCP server '${server.name}':`, error);
throw error;
}
}
async connectToStdioServer(server) {
const commandParts = server.url.split(" ");
const command = commandParts[0];
const args = commandParts.slice(1);
const env = {};
for (const [key, value] of Object.entries(process.env)) {
if (value !== void 0) {
env[key] = value;
}
}
if (server.env) {
for (const [key, value] of Object.entries(server.env)) {
if (value !== "required" && value !== "optional" && !env[key]) {
env[key] = value;
}
}
}
if (server.oauth?.service) {
this.injectOAuthTokens(env, server.oauth.service);
}
const cwd = process.cwd();
let resolvedCommand = command;
if (command.includes("/") || command.startsWith(".")) {
resolvedCommand = command.startsWith("/") ? command : resolve(cwd, command);
}
const transport = new StdioClientTransport({
command: resolvedCommand,
args,
env,
cwd
// Important: set working directory for relative paths in args
});
const client = new Client({
name: `ai-agent-runtime-${server.name}`,
version: "1.0.0"
}, {
capabilities: {
tools: {}
}
});
await client.connect(transport);
this.clients.set(server.name, client);
}
async connectToHttpServer(server) {
const url = new URL(server.url);
const headers = this.buildHttpHeaders(server);
if (process.env.DEBUG === "1") {
console.log(`[MCP] Connecting to HTTP server ${server.name} at ${server.url}`);
console.log(`[MCP] Headers:`, JSON.stringify(headers, null, 2));
}
const transport = new StreamableHTTPClientTransport(url, {
requestInit: {
headers
}
});
const client = new Client({
name: `ai-agent-runtime-${server.name}`,
version: "1.0.0"
}, {
capabilities: {
tools: {}
}
});
await client.connect(transport);
this.clients.set(server.name, client);
}
buildHttpHeaders(server) {
const headers = {
"Content-Type": "application/json",
"Accept": "application/json"
};
if (process.env.DEBUG === "1") {
console.log(`[MCP] Building headers for ${server.name}, auth:`, server.auth);
}
if (server.auth?.type === "bearer" || server.auth?.type === "api-key") {
let apiKey = server.auth.apiKey;
if (apiKey?.startsWith("${") && apiKey.endsWith("}")) {
const envVarName = apiKey.slice(2, -1);
apiKey = process.env[envVarName];
if (!apiKey && process.env.DEBUG === "1") {
console.warn(`[MCP] Environment variable ${envVarName} not found for ${server.name}`);
}
}
if (!apiKey && server.auth.envVar) {
apiKey = process.env[server.auth.envVar];
}
if (!apiKey) {
const serverNameUpper = server.name.toUpperCase().replace(/-/g, "_");
const possibleEnvVars = [
`${serverNameUpper}_API_KEY`,
`${serverNameUpper}_TOKEN`,
`${serverNameUpper}_ACCESS_TOKEN`,
`${serverNameUpper}_SECRET_KEY`
];
for (const envVar of possibleEnvVars) {
apiKey = process.env[envVar];
if (apiKey) {
if (process.env.DEBUG === "1") {
console.log(`[MCP] Auto-detected API key from ${envVar} for ${server.name}`);
}
break;
}
}
}
if (apiKey) {
headers["Authorization"] = `Bearer ${apiKey}`;
if (process.env.DEBUG === "1") {
console.log(`[MCP] Using Bearer token authentication for ${server.name}`);
}
}
} else if (server.apiKey) {
let apiKey = server.apiKey;
if (apiKey.startsWith("${") && apiKey.endsWith("}")) {
const envVarName = apiKey.slice(2, -1);
apiKey = process.env[envVarName] || "";
if (!apiKey && process.env.DEBUG === "1") {
console.warn(`[MCP] Environment variable ${envVarName} not found for ${server.name}`);
}
}
if (apiKey) {
headers["Authorization"] = `Bearer ${apiKey}`;
if (process.env.DEBUG === "1") {
console.log(`[MCP] Using Bearer token authentication for ${server.name} (shorthand)`);
}
}
}
if (!headers["Authorization"] && !server.oauth?.service) {
const knownApiKeyPatterns = {
"stripe": ["STRIPE_API_KEY", "STRIPE_SECRET_KEY"],
"huggingface": ["HUGGING_FACE_TOKEN", "HF_TOKEN", "HUGGINGFACE_TOKEN"],
"openai": ["OPENAI_API_KEY"],
"anthropic": ["ANTHROPIC_API_KEY"],
"asana": ["ASANA_API_KEY", "ASANA_ACCESS_TOKEN"],
"intercom": ["INTERCOM_API_KEY", "INTERCOM_ACCESS_TOKEN"],
"paypal": ["PAYPAL_API_KEY", "PAYPAL_ACCESS_TOKEN"]
};
const serverNameLower = server.name.toLowerCase();
const serverUrlLower = server.url.toLowerCase();
for (const [service, envVars] of Object.entries(knownApiKeyPatterns)) {
if (serverNameLower.includes(service) || serverUrlLower.includes(service)) {
for (const envVar of envVars) {
const apiKey = process.env[envVar];
if (apiKey) {
headers["Authorization"] = `Bearer ${apiKey}`;
if (process.env.DEBUG === "1") {
console.log(`[MCP] Auto-detected ${service} API key from ${envVar} for ${server.name}`);
}
break;
}
}
if (headers["Authorization"]) break;
}
}
if (!headers["Authorization"]) {
const serverNameUpper = server.name.toUpperCase().replace(/-/g, "_");
const possibleEnvVars = [
`${serverNameUpper}_API_KEY`,
`${serverNameUpper}_TOKEN`,
`${serverNameUpper}_ACCESS_TOKEN`,
`${serverNameUpper}_SECRET_KEY`
];
for (const envVar of possibleEnvVars) {
const apiKey = process.env[envVar];
if (apiKey) {
headers["Authorization"] = `Bearer ${apiKey}`;
if (process.env.DEBUG === "1") {
console.log(`[MCP] Found API key from ${envVar} for ${server.name}`);
}
break;
}
}
}
}
if (server.oauth?.service && !headers["Authorization"]) {
const serviceUpper = server.oauth.service.toUpperCase();
const accessToken = process.env[`${serviceUpper}_ACCESS_TOKEN`];
if (accessToken) {
headers["Authorization"] = `Bearer ${accessToken}`;
if (process.env.DEBUG === "1") {
console.log(`[MCP] Using OAuth access token for ${server.name}`);
}
}
}
if (server.env) {
for (const [key, value] of Object.entries(server.env)) {
if (value === "required" || value === "optional") {
const envValue = process.env[key];
if (envValue) {
const headerName = `X-${key.split("_").map(
(part) => part.charAt(0).toUpperCase() + part.slice(1).toLowerCase()
).join("-")}`;
headers[headerName] = envValue;
}
} else {
const headerName = `X-${key.split("_").map(
(part) => part.charAt(0).toUpperCase() + part.slice(1).toLowerCase()
).join("-")}`;
headers[headerName] = value;
}
}
}
return headers;
}
async getOrCreateClient(server) {
let client = this.clients.get(server.name);
if (!client) {
await this.connectToServer(server);
client = this.clients.get(server.name);
}
return client;
}
injectOAuthTokens(env, service) {
const serviceUpper = service.toUpperCase();
const accessToken = env[`${serviceUpper}_ACCESS_TOKEN`];
const refreshToken = env[`${serviceUpper}_REFRESH_TOKEN`];
const tokenType = env[`${serviceUpper}_TOKEN_TYPE`];
const tokenExpiry = env[`${serviceUpper}_TOKEN_EXPIRY`];
const scope = env[`${serviceUpper}_SCOPE`];
if (accessToken) {
const tokensJson = {
access_token: accessToken,
token_type: tokenType || "Bearer"
};
if (refreshToken) {
tokensJson.refresh_token = refreshToken;
}
if (tokenExpiry) tokensJson.expiry_date = parseInt(tokenExpiry);
if (scope) tokensJson.scope = scope;
if (service === "slack") {
const botUserId = env[`${serviceUpper}_BOT_USER_ID`];
const appId = env[`${serviceUpper}_APP_ID`];
const teamId = env[`${serviceUpper}_TEAM_ID`];
const teamName = env[`${serviceUpper}_TEAM_NAME`];
if (botUserId) tokensJson.bot_user_id = botUserId;
if (appId) tokensJson.app_id = appId;
if (teamId || teamName) {
tokensJson.team = {
...teamId && { id: teamId },
...teamName && { name: teamName }
};
}
}
env[`${serviceUpper}_TOKENS_JSON`] = JSON.stringify(tokensJson);
}
}
isOAuthRelatedError(error, server) {
const errorMessage = error?.message || String(error);
const isHttpServer = server.url.startsWith("http://") || server.url.startsWith("https://");
const oauthIndicators = [
"invalid_token",
"No authorization provided",
"HTTP 401",
"Unauthorized",
"authentication required",
"access_denied"
];
return isHttpServer && oauthIndicators.some(
(indicator) => errorMessage.toLowerCase().includes(indicator.toLowerCase())
);
}
createOAuthErrorMessage(server, originalError) {
const originalMessage = originalError?.message || String(originalError);
if (this.isApiKeyAuthError(originalMessage)) {
return this.createApiKeyErrorMessage(server, originalMessage);
}
const enhancedMessage = `
\u274C OAuth Authentication Required for Remote MCP Server
Server: ${server.name} (${server.url})
Original Error: ${originalMessage}
\u{1F6A7} Known Limitation:
The AI Agent Platform currently doesn't support browser-based OAuth flows for remote HTTP MCP servers.
OAuth currently only works for local/stdio MCP servers.
\u{1F4A1} Possible Solutions:
1. Check if the MCP server supports API key authentication instead of OAuth
2. Use a local MCP server that wraps the remote service
3. Wait for OAuth support for HTTP servers (see ROADMAP.md)
\u{1F4DA} For more details, see: https://github.com/anthropics/agent-fleet/blob/main/ROADMAP.md#2-remote-mcp-server-oauth-support
`.trim();
return new Error(enhancedMessage);
}
isApiKeyAuthError(errorMessage) {
const apiKeyIndicators = [
"api key",
"api_key",
"invalid key",
"authentication failed",
"unauthorized",
"401",
"forbidden",
"403"
];
const lowerMessage = errorMessage.toLowerCase();
return apiKeyIndicators.some((indicator) => lowerMessage.includes(indicator));
}
createApiKeyErrorMessage(server, originalMessage) {
const possibleEnvVars = this.getPossibleApiKeyEnvVars(server);
const envVarsList = possibleEnvVars.map((v) => ` - ${v}`).join("\n");
const enhancedMessage = `
\u274C API Key Authentication Failed for Remote MCP Server
Server: ${server.name} (${server.url})
Original Error: ${originalMessage}
\u{1F511} API Key Configuration:
This server requires API key authentication. Please ensure you have set the appropriate environment variable.
Possible environment variables to set:
${envVarsList}
\u{1F4A1} Solutions:
1. Set the API key in your environment:
export ${possibleEnvVars[0]}="your-api-key-here"
2. Or configure it in your agent manifest:
mcpServers:
- name: ${server.name}
url: ${server.url}
auth:
type: bearer
apiKey: \${${possibleEnvVars[0]}}
3. For shorthand configuration:
mcpServers:
- name: ${server.name}
url: ${server.url}
apiKey: \${${possibleEnvVars[0]}}
\u{1F4DA} Common API Key Formats:
- Stripe: sk_test_... or sk_live_...
- Hugging Face: hf_...
- OpenAI: sk-...
`.trim();
return new Error(enhancedMessage);
}
getPossibleApiKeyEnvVars(server) {
const serverNameUpper = server.name.toUpperCase().replace(/-/g, "_");
const serverNameLower = server.name.toLowerCase();
const knownServices = {
"stripe": ["STRIPE_API_KEY", "STRIPE_SECRET_KEY"],
"huggingface": ["HUGGING_FACE_TOKEN", "HF_TOKEN", "HUGGINGFACE_TOKEN"],
"openai": ["OPENAI_API_KEY"],
"anthropic": ["ANTHROPIC_API_KEY"],
"asana": ["ASANA_API_KEY", "ASANA_ACCESS_TOKEN"],
"intercom": ["INTERCOM_API_KEY", "INTERCOM_ACCESS_TOKEN"],
"paypal": ["PAYPAL_API_KEY", "PAYPAL_ACCESS_TOKEN"]
};
for (const [service, envVars] of Object.entries(knownServices)) {
if (serverNameLower.includes(service) || server.url.toLowerCase().includes(service)) {
return envVars;
}
}
return [
`${serverNameUpper}_API_KEY`,
`${serverNameUpper}_TOKEN`,
`${serverNameUpper}_ACCESS_TOKEN`,
`${serverNameUpper}_SECRET_KEY`
];
}
async disconnectAll() {
for (const [name, client] of this.clients.entries()) {
try {
await client.close();
} catch (error) {
console.error(`Error disconnecting from MCP server '${name}':`, error);
}
}
this.clients.clear();
this.servers.clear();
}
getServer(name) {
return this.servers.get(name);
}
getAllServers() {
return Array.from(this.servers.values());
}
};
// src/utils/token-utils.ts
import { get_encoding } from "tiktoken";
var TokenCounter = class {
encoder;
constructor(model = "gpt-4") {
const encodingMap = {
"gpt-4": "cl100k_base",
"gpt-5": "cl100k_base",
"gpt-5-mini": "cl100k_base",
"gpt-5-nano": "cl100k_base",
"gpt-4.1": "cl100k_base",
"gpt-4.1-mini": "cl100k_base",
"gpt-4.1-nano": "cl100k_base",
"gpt-4o": "cl100k_base",
"o3": "o200k_base",
"o4-mini": "o200k_base",
"o3-mini": "o200k_base"
};
const encoding = encodingMap[model] || "cl100k_base";
this.encoder = get_encoding(encoding);
}
/**
* Count tokens in a text string
*/
countTokens(text) {
if (!text) return 0;
try {
const tokens = this.encoder.encode(text);
return tokens.length;
} catch (error) {
return Math.ceil(text.length / 4);
}
}
/**
* Estimate tokens for structured data (JSON, objects)
*/
countTokensInData(data) {
if (typeof data === "string") {
return this.countTokens(data);
}
const jsonString = JSON.stringify(data, null, 0);
return this.countTokens(jsonString);
}
/**
* Check if text exceeds token limit
*/
exceedsLimit(text, limit) {
return this.countTokens(text) > limit;
}
/**
* Dispose of the encoder to free memory
*/
dispose() {
if (this.encoder?.free) {
this.encoder.free();
}
}
};
var TextChunker = class {
tokenCounter;
maxTokensPerChunk;
overlapPercentage;
constructor(model = "gpt-4", maxTokensPerChunk = 1e5, overlapPercentage = 0.1) {
this.tokenCounter = new TokenCounter(model);
this.maxTokensPerChunk = maxTokensPerChunk;
this.overlapPercentage = overlapPercentage;
}
/**
* Split text into chunks with intelligent boundaries
*/
chunkText(text) {
if (!this.tokenCounter.exceedsLimit(text, this.maxTokensPerChunk)) {
return [{
content: text,
tokenCount: this.tokenCounter.countTokens(text),
chunkIndex: 0,
totalChunks: 1,
hasOverlap: false
}];
}
const strategies = [
this.splitByFiles.bind(this),
this.splitByFunctions.bind(this),
this.splitByParagraphs.bind(this),
this.splitByLines.bind(this),
this.splitByCharacters.bind(this)
];
for (const strategy of strategies) {
const chunks = strategy(text);
if (chunks.length > 1 && this.validateChunks(chunks)) {
return this.addOverlap(chunks);
}
}
return this.addOverlap(this.splitByCharacters(text));
}
/**
* Split by file boundaries (for git diffs, multi-file content)
*/
splitByFiles(text) {
const filePatterns = [
/^diff --git /gm,
// Git diff files
/^--- a\//gm,
// Unified diff
/^\+\+\+ b\//gm,
// Unified diff
/^Index: /gm,
// SVN-style
/^@@.*@@$/gm,
// Hunk headers
/^=== .* ===/gm
// Custom separators
];
for (const pattern of filePatterns) {
const matches2 = Array.from(text.matchAll(pattern));
if (matches2.length > 1) {
return this.splitAtPositions(text, matches2.map((m) => m.index));
}
}
const filePathPattern = /^(?:[\w-]+\/)*[\w-]+\.[a-zA-Z]{1,4}:/gm;
const matches = Array.from(text.matchAll(filePathPattern));
if (matches.length > 1) {
return this.splitAtPositions(text, matches.map((m) => m.index));
}
return [];
}
/**
* Split by function boundaries
*/
splitByFunctions(text) {
const functionPatterns = [
/^(export\s+)?(async\s+)?function\s+\w+/gm,
// JavaScript/TypeScript
/^(public|private|protected)?\s*(static\s+)?[\w<>]+\s+\w+\s*\(/gm,
// Java/C#
/^def\s+\w+\s*\(/gm,
// Python
/^fn\s+\w+\s*\(/gm,
// Rust
/^func\s+\w+\s*\(/gm,
// Go
/^\s*class\s+\w+/gm,
// Class definitions
/^\s*interface\s+\w+/gm
// Interface definitions
];
for (const pattern of functionPatterns) {
const matches = Array.from(text.matchAll(pattern));
if (matches.length > 1) {
const positions = matches.map((m) => m.index);
const chunks = this.splitAtPositions(text, positions);
if (this.validateChunks(chunks)) {
return chunks;
}
}
}
return [];
}
/**
* Split by paragraph boundaries
*/
splitByParagraphs(text) {
const paragraphs = text.split(/\n\s*\n/);
if (paragraphs.length <= 1) return [];
return this.combineParagraphs(paragraphs);
}
/**
* Split by line boundaries
*/
splitByLines(text) {
const lines = text.split("\n");
if (lines.length <= 1) return [];
return this.combineLines(lines);
}
/**
* Split by character boundaries (last resort)
*/
splitByCharacters(text) {
const chunks = [];
const targetSize = Math.floor(this.maxTokensPerChunk * 4);
for (let i = 0; i < text.length; i += targetSize) {
const chunk = text.slice(i, i + targetSize);
chunks.push({
content: chunk,
tokenCount: this.tokenCounter.countTokens(chunk),
chunkIndex: chunks.length,
totalChunks: 0,
// Will be set later
hasOverlap: false
});
}
chunks.forEach((chunk) => chunk.totalChunks = chunks.length);
return chunks;
}
/**
* Split text at specific positions
*/
splitAtPositions(text, positions) {
if (positions.length === 0) return [];
const sortedPositions = [0, ...positions.sort((a, b) => a - b), text.length];
const chunks = [];
for (let i = 0; i < sortedPositions.length - 1; i++) {
const start = sortedPositions[i];
const end = sortedPositions[i + 1];
const content = text.slice(start, end);
if (content.trim()) {
chunks.push({
content,
tokenCount: this.tokenCounter.countTokens(content),
chunkIndex: chunks.length,
totalChunks: 0,
// Will be set later
hasOverlap: false
});
}
}
chunks.forEach((chunk) => chunk.totalChunks = chunks.length);
return chunks;
}
/**
* Combine paragraphs into appropriately sized chunks
*/
combineParagraphs(paragraphs) {
const chunks = [];
let currentChunk = "";
let currentTokens = 0;
for (const paragraph of paragraphs) {
const paragraphTokens = this.tokenCounter.countTokens(paragraph);
if (currentTokens + paragraphTokens > this.maxTokensPerChunk && currentChunk) {
chunks.push({
content: currentChunk.trim(),
tokenCount: currentTokens,
chunkIndex: chunks.length,
totalChunks: 0,
hasOverlap: false
});
currentChunk = paragraph;
currentTokens = paragraphTokens;
} else {
currentChunk += (currentChunk ? "\n\n" : "") + paragraph;
currentTokens += paragraphTokens;
}
}
if (currentChunk.trim()) {
chunks.push({
content: currentChunk.trim(),
tokenCount: currentTokens,
chunkIndex: chunks.length,
totalChunks: 0,
hasOverlap: false
});
}
chunks.forEach((chunk) => chunk.totalChunks = chunks.length);
return chunks;
}
/**
* Combine lines into appropriately sized chunks
*/
combineLines(lines) {
const chunks = [];
let currentChunk = "";
let currentTokens = 0;
for (const line of lines) {
const lineTokens = this.tokenCounter.countTokens(line + "\n");
if (currentTokens + lineTokens > this.maxTokensPerChunk && currentChunk) {
chunks.push({
content: currentChunk.trim(),
tokenCount: currentTokens,
chunkIndex: chunks.length,
totalChunks: 0,
hasOverlap: false
});
currentChunk = line;
currentTokens = lineTokens;
} else {
currentChunk += (currentChunk ? "\n" : "") + line;
currentTokens += lineTokens;
}
}
if (currentChunk.trim()) {
chunks.push({
content: currentChunk.trim(),
tokenCount: currentTokens,
chunkIndex: chunks.length,
totalChunks: 0,
hasOverlap: false
});
}
chunks.forEach((chunk) => chunk.totalChunks = chunks.length);
return chunks;
}
/**
* Add overlap between chunks for context preservation
*/
addOverlap(chunks) {
if (chunks.length <= 1) return chunks;
const overlapTokens = Math.floor(this.maxTokensPerChunk * this.overlapPercentage);
const overlappedChunks = [];
for (let i = 0; i < chunks.length; i++) {
let content = chunks[i].content;
let hasOverlap = false;
if (i > 0 && overlapTokens > 0) {
const prevContent = chunks[i - 1].content;
const overlap = this.getOverlapContent(prevContent, overlapTokens, "end");
if (overlap) {
content = overlap + "\n---\n" + content;
hasOverlap = true;
}
}
if (i < chunks.length - 1 && overlapTokens > 0) {
const nextContent = chunks[i + 1].content;
const overlap = this.getOverlapContent(nextContent, overlapTokens, "start");
if (overlap) {
content = content + "\n---\n" + overlap;
hasOverlap = true;
}
}
overlappedChunks.push({
content,
tokenCount: this.tokenCounter.countTokens(content),
chunkIndex: i,
totalChunks: chunks.length,
hasOverlap
});
}
return overlappedChunks;
}
/**
* Get overlap content from beginning or end of text
*/
getOverlapContent(text, maxTokens, position) {
const lines = text.split("\n");
let content = "";
let tokens = 0;
if (position === "start") {
for (const line of lines) {
const lineTokens = this.tokenCounter.countTokens(line + "\n");
if (tokens + lineTokens > maxTokens) break;
content += (content ? "\n" : "") + line;
tokens += lineTokens;
}
} else {
for (let i = lines.length - 1; i >= 0; i--) {
const line = lines[i];
const lineTokens = this.tokenCounter.countTokens(line + "\n");
if (tokens + lineTokens > maxTokens) break;
content = line + (content ? "\n" + content : "");
tokens += lineTokens;
}
}
return content;
}
/**
* Validate that chunks are reasonable sizes
*/
validateChunks(chunks) {
return chunks.every(
(chunk) => chunk.tokenCount <= this.maxTokensPerChunk * 1.1 && // Allow 10% tolerance
chunk.tokenCount > 100
// Minimum meaningful size
);
}
/**
* Dispose of resources
*/
dispose() {
this.tokenCounter.dispose();
}
};
var globalTokenCounter = null;
function getTokenCounter(model) {
if (!globalTokenCounter || model && model !== "gpt-4") {
globalTokenCounter = new TokenCounter(model);
}
return globalTokenCounter;
}
function countTokens(text, model) {
return getTokenCounter(model).countTokens(text);
}
// src/agent-factory.ts
import OpenAI from "openai";
var AgentFactory = class {
toolRegistry;
mcpManager;
openai;
currentModel = "gpt-4";
constructor() {
this.toolRegistry = new ToolRegistry();
this.mcpManager = new MCPManager();
this.openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY
});
}
async createAgent(config) {
this.currentModel = config.model || "gpt-5";
const tools = await this.loadTools(config);
const toolAwareInstructions = this.buildToolAwareInstructions(config.instructions, tools);
if (process.env.DEBUG_TOOLS) {
console.log("[AgentFactory] Tools loaded:", tools.map((t) => t.name));
console.log("[AgentFactory] Tool-aware instructions:", toolAwareInstructions);
}
const modelSettings = this.buildModelSettings(config);
const agent = new Agent({
name: config.name || "Assistant",
instructions: toolAwareInstructions,
model: config.model || "gpt-5",
tools: tools.map((t) => this.convertToAgentTool(t, config.toolConfigurations)),
...Object.keys(modelSettings).length > 0 && { modelSettings },
// Additional new fields from organized structure (basic ones that we know work)
...config.toolSettings?.behavior && { toolUseBehavior: config.toolSettings.behavior },
...config.toolSettings?.resetChoice !== void 0 && { resetToolChoice: config.toolSettings.resetChoice }
// TODO: Add support for advanced features later:
// - prompt (needs proper Prompt type)
// - outputType/structured outputs (needs proper schema handling)
// - guardrails (needs proper guardrail function setup)
// - handoffs (needs actual Agent instances)
});
return agent;
}
buildModelSettings(config) {
const settings = {};
const temperature = config.modelSettings?.temperature ?? config.temperature;
const model = config.model || "gpt-5";
const supportsTemperature = !model.startsWith("gpt-5") && !model.includes("o1");
if (temperature !== void 0 && supportsTemperature) {
settings.temperature = temperature;
}
const reasoning = config.modelSettings?.reasoning ?? config.reasoning;
if (reasoning?.effort) {
settings.reasoning_effort = reasoning.effort;
}
const verbosity = config.modelSettings?.verbosity ?? config.verbosity;
if (verbosity) {
settings.verbosity = verbosity;
}
if (config.modelSettings) {
if (config.modelSettings.topP !== void 0 && supportsTemperature) {
settings.top_p = config.modelSettings.topP;
}
if (config.modelSettings.frequencyPenalty !== void 0) {
settings.frequency_penalty = config.modelSettings.frequencyPenalty;
}
if (config.modelSettings.presencePenalty !== void 0) {
settings.presence_penalty = config.modelSettings.presencePenalty;
}
if (config.modelSettings.maxTokens !== void 0) {
settings.max_tokens = config.modelSettings.maxTokens;
}
if (config.modelSettings.toolChoice !== void 0) {
settings.tool_choice = config.modelSettings.toolChoice;
}
if (config.modelSettings.parallelToolCalls !== void 0) {
settings.parallel_tool_calls = config.modelSettings.parallelToolCalls;
}
if (config.modelSettings.truncation !== void 0) {
settings.truncation = config.modelSettings.truncation;
}
if (config.modelSettings.store !== void 0) {
settings.store = config.modelSettings.store;
}
}
return settings;
}
async loadTools(config) {
const tools = [];
if (config.tools) {
tools.push(...config.tools);
}
const registeredTools = this.toolRegistry.getAllTools();
tools.push(...registeredTools);
if (config.mcpServers) {
for (const server of config.mcpServers) {
try {
await this.mcpManager.registerServer(server);
const mcpTools = await this.mcpManager.loadServerTools(server);
tools.push(...mcpTools);
} catch (error) {
console.error(`Failed to load MCP server '${server.name}':`, error);
}
}
}
return tools;
}
convertToAgentTool(toolDef, toolConfigurations) {
const toolConfig = {
// Pass agent context to tools
agentModel: this.currentModel,
// Merge with tool-specific configuration
...toolConfigurations?.[toolDef.name] || {}
};
return tool({
name: toolDef.name,
description: toolDef.description,
parameters: toolDef.parameters,
execute: this.wrapExecuteWithChunking(toolDef.execute, toolDef.name, toolConfig)
});
}
/**
* Wrap tool execution with intelligent chunking for large responses
*/
wrapExecuteWithChunking(originalExecute, toolName, toolConfig) {
return async (params) => {
try {
const result = await originalExecute(params, toolConfig);
if (result === null || result === void 0) {
return result;
}
if (typeof result !== "string" && typeof result !== "object") {
return result;
}
let resultText;
try {
resultText = typeof result === "string" ? result : JSON.stringify(result, null, 2);
} catch (stringifyError) {
console.warn(`[Chunking] Failed to stringify result from ${toolName}, using string representation`);
resultText = String(result);
}
if (!resultText || resultText.trim().length === 0) {
return result;
}
const TOKEN_LIMIT = 16e4;
const tokenCount = countTokens(resultText);
if (tokenCount <= TOKEN_LIMIT) {
return result;
}
console.log(`[Chunking] Tool '${toolName}' returned ${tokenCount} tokens, chunking into manageable pieces...`);
return await this.processLargeToolResponse(resultText, toolName, params);
} catch (error) {
console.error(`[Tool] Error executing ${toolName}:`, error);
if (error?.message?.includes("tiktoken") || error?.message?.includes("token")) {
throw new Error(`Token processing error in ${toolName}: ${error.message}. Tool result may be too large or contain unsupported characters.`);
}
throw error;
}
};
}
/**
* Process large tool responses by chunking and iterative analysis
*/
async processLargeToolResponse(resultText, toolName, originalParams) {
try {
const chunker = new TextChunker(this.currentModel, 1e5, 0.1);
const chunks = chunker.chunkText(resultText);
console.log(`[Chunking] Split into ${chunks.length} chunks for analysis`);
if (chunks.length === 1) {
const singleChunkTokens = chunks[0].tokenCount;
if (singleChunkTokens > 15e4) {
console.warn(`[Chunking] Single chunk still too large (${singleChunkTokens} tokens), using emergency truncation`);
chunker.dispose();
const emergencyTruncated = resultText.substring(0, 6e5);
return {
content: emergencyTruncated,
warning: `Content was extremely large (${countTokens(resultText)} tokens) and has been emergency truncated to fit context limits. Analysis may be incomplete.`,
originalSize: countTokens(resultText),
truncatedSize: countTokens(emergencyTruncated),
emergencyTruncation: true
};
}
chunker.dispose();
return resultText;
}
const chunkAnalyses = [];
for (let i = 0; i < chunks.length; i++) {
const chunk = chunks[i];
console.log(`[Chunking] Analyzing chunk ${i + 1}/${chunks.length} (${chunk.tokenCount} tokens)`);
try {
const chunkAnalysis = await this.analyzeChunkWithModel(chunk, toolName, originalParams);
chunkAnalyses.push(chunkAnalysis);
} catch (error) {
console.error(`[Chunking] Error analyzing chunk ${i + 1}:`, error);
chunkAnalyses.push(`[Error analyzing chunk ${i + 1}: ${error?.message || "Unknown error"}]`);
}
}
const aggregatedResult = await this.aggregateChunkAnalyses(chunkAnalyses, toolName, originalParams);
chunker.dispose();
return aggregatedResult;
} catch (error) {
console.error(`[Chunking] Error processing large response for ${toolName}:`, error);
const truncatedResult = resultText.substring(0, 4e5);
return {
content: truncatedResult,
warning: `Response was too large (${countTokens(resultText)} tokens) and has been truncated. Full analysis may be incomplete.`,
originalSize: countTokens(resultText),
truncatedSize: countTokens(truncatedResult)
};
}
}
/**
* Analyze a single chunk using the OpenAI model
*/
async analyzeChunkWithModel(chunk, toolName, originalParams) {
const analysisPrompt = this.createChunkAnalysisPrompt(chunk, toolName, originalParams);
try {
const response = await this.openai.responses.create({
model: this.currentModel,
input: [
{
role: "system",
content: `You are analyzing a chunk of data from a tool called "${toolName}".
Focus on extracting key insights, important details, and relevant information from this chunk.
Be concise but thorough. This chunk is part of a larger dataset that was split due to size constraints.`
},
{
role: "user",
content: analysisPrompt
}
],
max_output_tokens: 4e3
// Leave room for response
});
return response.output_text || "[No analysis generated]";
} catch (error) {
console.error(`[Chunking] OpenAI API error for chunk analysis:`, error);
return `Raw chunk content (analysis failed):
${chunk.content}`;
}
}
/**
* Create analysis prompt for a single chunk
*/
createChunkAnalysisPrompt(chunk, toolName, originalParams) {
let prompt = "";
prompt += `This is chunk ${chunk.chunkIndex + 1} of ${chunk.totalChunks} from tool "${toolName}".
`;
prompt += `Chunk size: ${chunk.tokenCount} tokens
`;
if (chunk.hasOverlap) {
prompt += `Note: This chunk includes overlap with adjacent chunks for context.
`;
}
if (toolName.includes("git") || toolName.includes("diff")) {
prompt += `
Focus on: code changes, file modifications, potential issues, and key improvements.
`;
} else if (toolName.includes("review") || toolName.includes("pr")) {
prompt += `
Focus on: code quality, potential bugs, best practices, and improvement suggestions.
`;
} else if (toolName.includes("search") || toolName.includes("find")) {
prompt += `
Focus on: relevant matches, key findings, and important results.
`;
} else {
prompt += `
Focus on: key information, important details, and relevant insights.
`;
}
prompt += `
Original tool parameters: ${JSON.stringify(originalParams, null, 2)}
`;
prompt += `
--- CHUNK CONTENT ---
`;
prompt += chunk.content;
return prompt;
}
/**
* Aggregate chunk analyses into a coherent final response
*/
async aggregateChunkAnalyses(chunkAnalyses, toolName, originalParams) {
if (chunkAnalyses.length === 1) {
return {
type: "chunked_response",
tool: toolName,
totalChunks: 1,
content: chunkAnalyses[0],
originalParams
};
}
try {
const synthesisPrompt = this.createSynthesisPrompt(chunkAnalyses, toolName, originalParams);
const response = await this.openai.responses.create({
model: this.currentModel,
input: [
{
role: "system",
content: `You are synthesizing analyses from multiple chunks of data from tool "${toolName}".
Create a coherent, comprehensive summary that combines insights from all chunks.
Avoid repetition and focus on the most important findings. Structure your response clearly.`
},
{
role: "user",
content: synthesisPrompt
}
],
max_output_tokens: 8e3
});
const synthesizedContent = response.output_text || "[Synthesis failed]";
return {
type: "chunked_response",
tool: toolName,
totalChunks: chunkAnalyses.length,
content: synthesizedContent,
tokenCount: countTokens(synthesizedContent),
originalParams,
synthesized: true
};
} catch (error) {
console.error(`[Chunking] Error synthesizing chunk analyses:`, error);
let fallbackContent = `## Analysis Results (${chunkAnalyses.length} chunks processed)
`;
fallbackContent += `*Note: Automatic synthesis failed, showing individual chunk analyses.*
`;
for (let i = 0; i < chunkAnalyses.length; i++) {
fallbackContent += `### Chunk ${i + 1} Analysis
`;
fallbackContent += chunkAnalyses[i];
if (i < chunkAnalyses.length - 1) {
fallbackContent += `
---
`;
}
}
return {
type: "chunked_response",
tool: toolName,
totalChunks: chunkAnalyses.length,
content: fallbackContent,
tokenCount: countTokens(fallbackContent),
originalParams,
synthesized: false,
fallback: true
};
}
}
/**
* Create synthesis prompt for combining chunk analyses
*/
createSynthesisPrompt(chunkAnalyses, toolName, originalParams) {
let prompt = "";
prompt += `Please synthesize the following ${chunkAnalyses.length} chunk analyses from tool "${toolName}":
`;
prompt += `Original parameters: ${JSON.stringify(originalParams, null, 2)}
`;
if (toolName.includes("git") || toolName.includes("diff")) {
prompt += `Create a comprehensive code review summary focusing on:
`;
prompt += `- Overall changes and their impact
`;
prompt += `- Key issues or concerns found
`;
prompt += `- Suggestions for improvement
`;
prompt += `- Summary of files/areas modified
`;
} else if (toolName.includes("search") || toolName.includes("find")) {
prompt += `Create a comprehensive search results summary focusing on:
`;
prompt += `- Most relevant findings
`;
prompt += `- Key patterns or themes
`;
prompt += `- Important matches or results
`;
} else {
prompt += `Create a comprehensive summary that:
`;
prompt += `- Combines key insights from all chunks
`;
prompt += `- Identifies important patterns or themes
`;
prompt += `- Provides actionable conclusions
`;
}
prompt += `--- CHUNK ANALYSES TO SYNTHESIZE ---
`;
for (let i = 0; i < chunkAnalyses.length; i++) {
prompt += `## Chunk ${i + 1} Analysis:
`;
prompt += chunkAnalyses[i];
prompt += `
`;
}
prompt += `
Please provide a well-structured, comprehensive synthesis of these analyses.`;
return prompt;
}
buildToolAwareInstructions(baseInstructions, tools) {
let instructions = baseInstructions || `You are a helpful AI assistant with access to various tools. Always use the appropriate tools when asked to perform calculations, read/write files, execute commands, or search the web. Do not just describe what you would do - actually use the tools to complete the task.
**Important MCP Server Integration Guidelines:**
When a user asks to integrate with an external API or service:
1. FIRST check if an official MCP server already exists using web search and the mcp list/search commands
2. Look for existing MCP servers on GitHub, npm, or the official MCP registry
3. If an official server exists, prefer using it via \`npx\` command rather than reimplementing
4. Only use the generate_mcp_server tool when no suitable existing server is found
5. When generating, the tool will research the API documentation thoroughly using web search
Example workflow:
- User: "I need to integrate with Stripe API"
- You: Search for existing Stripe MCP servers first, then either configure the existing one or generate a new one if needed`;
if (tools.length === 0) {
return instructions;
}
const builtInTools = [];
const mcpTools = /* @__PURE__ */ new Map();
const customTools = [];
const builtInToolNames = ["web_search", "analyze_pdf", "generate_pdf", "calculator", "shell", "read_file", "write_file", "list_directory", "generate_mcp_server"];
const hasMCPGenerator = tools.some((t) => t.name === "generate_mcp_server");
for (const tool2 of tools) {
if (builtInToolNames.includes(tool2.name)) {
builtInTools.push(`${tool2.name}: ${tool2.description}`);
} else if (tool2.name.includes("__")) {
const serverName = tool2.name.split("__")[0];
if (!mcpTools.has(serverName)) {
mcpTools.set(serverName, []);
}
mcpTools.get(serverName).push(`${tool2.name}: ${tool2.description}`);
} else {
customTools.push(`${tool2.name}: ${tool2.description}`);
}
}
let toolInfo = "\n\nYou have access to the following tools:";
if (builtInTools.length > 0) {
toolInfo += "\n\n**Built-in Tools:**";
for (const tool2 of builtInTools) {
toolInfo += `
- ${tool2}`;
}
}
if (mcpTools.size > 0) {
for (const [serverName, serverTools] of mcpTools) {
let displayName = serverName;
if (serverName === "google-workspace") {
displayName = "Google Workspace (Gmail, Calendar, Meet)";
} else if (serverName === "slack") {
displayName = "Slack";
}
toolInfo += `
**${displayName} Tools:**`;
for (const tool2 of serverTools) {
toolInfo += `
- ${tool2}`;
}
}
}
if (customTools.length > 0) {
toolInfo += "\n\n**Custom Tools:**";
for (const tool2 of customTools) {
toolInfo += `
- ${tool2}`;
}
}
toolInfo += "\n\nWhen users ask about your capabilities or what tools you have, make sure to mention these available tools. Use them whenever appropriate to help users accomplish their tasks.";
if (hasMCPGenerator) {
toolInfo += `
**MCP Server Integration Workflow:**
1. When user mentions integrating with an API/service (e.g., "connect to Stripe", "use GitHub API"):
- First run: \`mcp list --search <service>\` to check existing servers
- Search web for "<service> MCP server" to find community implementations
- Check if an npm package exists: search for "mcp-<service>" on npmjs.com
2. If an existing MCP server is found:
- Prefer using it via \`npx -y <package-name>\` in the registry configuration
- Guide user through required environment variables and OAuth setup
3. Only if no suitable server exists:
- Use the generate_mcp_server tool to create a new one
- The tool will automatically research the API using web search
- It will generate a complete, production-ready implementation
Remember: Always prefer existing, maintained MCP servers over generating new ones to ensure compatibility and reduce maintenance burden.`;
}
return instructions + toolInfo;
}
registerTool(tool2) {
this.toolRegistry.register(tool2);
}
registerTools(tools) {
this.toolRegistry.registerMany(tools);
}
async registerMCPServer(server) {
await this.mcpManager.registerServer(server);
}
getToolRegistry() {
return this.toolRegistry;
}
getMCPManager() {
return this.mcpManager;
}
async cleanup() {
await this.mcpManager.disconnectAll();
this.toolRegistry.clear();
}
};
// src/conversation-manager.ts
import { run } from "@openai/agents";
import ora from "ora";
// src/execution-tracker.ts
import { randomUUID } from "crypto";
var ExecutionTracker = class {
activeExecutions = /* @__PURE__ */ new Map();
completedExecutions = [];
maxCompletedHistory = 100;
getActiveExecution(toolName) {
for (const execution of this.activeExecutions.values()) {
if (execution.toolName === toolName && execution.status === "running") {
return execution;
}
}
return void 0;
}
getAllActiveExecutions() {
return Array.from(this.activeExecutions.values());
}
startExecution(toolName, parameters) {
const executionId = random