openagentic
Version:
A TypeScript framework for building AI agents with self-contained tool orchestration capabilities
1,288 lines (1,276 loc) • 144 kB
JavaScript
"use strict";
var __create = Object.create;
var __defProp = Object.defineProperty;
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
var __getOwnPropNames = Object.getOwnPropertyNames;
var __getProtoOf = Object.getPrototypeOf;
var __hasOwnProp = Object.prototype.hasOwnProperty;
var __export = (target, all) => {
for (var name in all)
__defProp(target, name, { get: all[name], enumerable: true });
};
var __copyProps = (to, from, except, desc) => {
if (from && typeof from === "object" || typeof from === "function") {
for (let key of __getOwnPropNames(from))
if (!__hasOwnProp.call(to, key) && key !== except)
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
}
return to;
};
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
// If the importer is in node compatibility mode or this is not an ESM
// file that has been converted to a CommonJS file using a Babel-
// compatible transform (i.e. "__esModule" has not been set), then set
// "default" to the CommonJS "module.exports" for node compatibility.
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
mod
));
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
// src/tools/index.ts
var tools_exports = {};
__export(tools_exports, {
aiTools: () => aiTools,
allToolDescriptions: () => allToolDescriptions,
allTools: () => allTools,
anthropicTool: () => anthropicTool,
elevenlabsTool: () => elevenlabsTool,
geminiTool: () => geminiTool,
geminiTtsTool: () => geminiTtsTool,
getToolDescription: () => getToolDescription,
getToolDescriptionsByCategory: () => getToolDescriptionsByCategory,
githubTool: () => githubTool,
grokTool: () => grokTool,
inceptionLabsTool: () => inceptionLabsTool,
llamaTool: () => llamaTool,
newsdataTool: () => newsdataTool,
openaiImageTool: () => openaiImageTool,
openaiTool: () => openaiTool,
perplexityTool: () => perplexityTool,
qrcodeTool: () => qrcodeTool,
searchToolDescriptions: () => searchToolDescriptions,
toOpenAgenticTool: () => toOpenAgenticTool,
toolDescriptionsByCategory: () => toolDescriptionsByCategory,
utilityTools: () => utilityTools,
videoGenerationTool: () => videoGenerationTool,
websearchTool: () => websearchTool
});
module.exports = __toCommonJS(tools_exports);
// src/tools/openai.ts
var import_ai3 = require("ai");
var import_zod2 = require("zod");
var import_ai4 = require("ai");
var import_openai = require("@ai-sdk/openai");
// src/tools/utils.ts
var import_ai2 = require("ai");
// src/types.ts
var import_zod = require("zod");
var import_ai = require("ai");
var AIModelSchema = import_zod.z.object({
provider: import_zod.z.enum(["openai", "anthropic", "google", "google-vertex", "perplexity", "xai", "custom"]),
model: import_zod.z.string(),
apiKey: import_zod.z.string().optional(),
baseURL: import_zod.z.string().optional(),
temperature: import_zod.z.number().min(0).max(2).optional().default(0.7),
maxTokens: import_zod.z.number().positive().optional(),
topP: import_zod.z.number().min(0).max(1).optional(),
project: import_zod.z.string().optional(),
location: import_zod.z.string().optional()
});
var MessageSchema = import_zod.z.object({
role: import_zod.z.enum(["system", "user", "assistant", "tool"]),
content: import_zod.z.string(),
toolCallId: import_zod.z.string().optional(),
toolCalls: import_zod.z.array(import_zod.z.object({
toolCallId: import_zod.z.string(),
toolName: import_zod.z.string(),
args: import_zod.z.record(import_zod.z.any())
})).optional()
});
var ExecutionResultSchema = import_zod.z.object({
success: import_zod.z.boolean(),
result: import_zod.z.any().optional(),
error: import_zod.z.string().optional(),
messages: import_zod.z.array(MessageSchema),
iterations: import_zod.z.number(),
toolCallsUsed: import_zod.z.array(import_zod.z.string()),
executionStats: import_zod.z.object({
totalDuration: import_zod.z.number(),
stepsExecuted: import_zod.z.number(),
toolCallsExecuted: import_zod.z.number(),
tokensUsed: import_zod.z.number().optional(),
averageStepDuration: import_zod.z.number(),
averageToolCallDuration: import_zod.z.number()
}).optional()
});
// src/tools/utils.ts
function toOpenAgenticTool(tool16, details) {
return {
...tool16,
...details
};
}
// src/tools/openai.ts
var SUPPORTED_MODELS = [
"gpt-4o",
"gpt-4o-mini",
"gpt-4-turbo",
"gpt-4",
"gpt-3.5-turbo"
];
var rawOpenAITool = (0, import_ai3.tool)({
description: "Generate high-quality text responses using OpenAI GPT models with advanced parameter control",
parameters: import_zod2.z.object({
prompt: import_zod2.z.string().min(1).max(5e4).describe("The text prompt to send to OpenAI (required, max 50,000 characters)"),
model: import_zod2.z.string().optional().default("gpt-4o-mini").describe("OpenAI model to use (gpt-4o, gpt-4o-mini, gpt-4-turbo, gpt-4, gpt-3.5-turbo)"),
maxTokens: import_zod2.z.number().int().min(1).max(4096).optional().default(1e3).describe("Maximum number of tokens to generate (1-4096, default: 1000)"),
temperature: import_zod2.z.number().min(0).max(2).optional().default(0.7).describe("Controls randomness - lower values are more focused (0-2, default: 0.7)"),
topP: import_zod2.z.number().min(0).max(1).optional().describe("Controls diversity via nucleus sampling (0-1, optional)"),
presencePenalty: import_zod2.z.number().min(-2).max(2).optional().describe("Penalizes repeated tokens (-2 to 2, optional)"),
frequencyPenalty: import_zod2.z.number().min(-2).max(2).optional().describe("Penalizes frequent tokens (-2 to 2, optional)")
}),
execute: async ({
prompt,
model = "gpt-4o-mini",
maxTokens = 1e3,
temperature = 0.7,
topP,
presencePenalty,
frequencyPenalty
}) => {
const apiKey = process.env.OPENAI_API_KEY;
if (!apiKey) {
throw new Error("OPENAI_API_KEY environment variable is required");
}
if (!prompt || prompt.trim().length === 0) {
throw new Error("Prompt cannot be empty");
}
if (prompt.length > 5e4) {
throw new Error("Prompt exceeds maximum length of 50,000 characters");
}
if (!SUPPORTED_MODELS.includes(model)) {
throw new Error(`Model "${model}" not in supported list`);
}
console.log("\u{1F916} OpenAI Tool - Generation started:", {
model,
promptLength: prompt.length,
maxTokens,
temperature,
topP,
presencePenalty,
frequencyPenalty
});
try {
const openai = (0, import_openai.createOpenAI)({
apiKey
});
const generateConfig = {
model: openai(model),
prompt: prompt.trim(),
maxTokens,
temperature
};
if (topP !== void 0) {
generateConfig.topP = topP;
}
if (presencePenalty !== void 0) {
generateConfig.presencePenalty = presencePenalty;
}
if (frequencyPenalty !== void 0) {
generateConfig.frequencyPenalty = frequencyPenalty;
}
const { text, usage, finishReason } = await (0, import_ai4.generateText)(generateConfig);
console.log("\u2705 OpenAI Tool - Generation completed:", {
model,
tokensUsed: usage?.totalTokens || 0,
responseLength: text.length,
finishReason
});
return {
success: true,
text,
model,
usage: {
promptTokens: usage?.promptTokens || 0,
completionTokens: usage?.completionTokens || 0,
totalTokens: usage?.totalTokens || 0
},
finishReason,
parameters: {
temperature,
maxTokens,
topP,
presencePenalty,
frequencyPenalty
},
metadata: {
generatedAt: (/* @__PURE__ */ new Date()).toISOString(),
promptLength: prompt.length,
responseLength: text.length
}
};
} catch (error) {
console.error("\u274C OpenAI Tool - Generation failed:", {
model,
promptLength: prompt.length,
error: error instanceof Error ? error.message : JSON.stringify(error)
});
if (error instanceof Error) {
if (error.message.includes("rate limit") || error.message.includes("429")) {
throw new Error("OpenAI API rate limit exceeded. Please try again in a moment.");
}
if (error.message.includes("401") || error.message.includes("authentication")) {
throw new Error("OpenAI API authentication failed. Please check your API key.");
}
if (error.message.includes("token") && error.message.includes("limit")) {
throw new Error(`Token limit exceeded. Try reducing maxTokens or prompt length.`);
}
if (error.message.includes("model") && error.message.includes("not found")) {
throw new Error(`Invalid model "${model}". Please use a supported OpenAI model.`);
}
if (error.message.includes("network") || error.message.includes("timeout")) {
throw new Error("Network error connecting to OpenAI API. Please try again.");
}
}
throw new Error(`OpenAI text generation failed: ${error instanceof Error ? error.message : JSON.stringify(error)}`);
}
}
});
var toolDetails = {
toolId: "openai_text_generation",
name: "OpenAI Text Generation",
useCases: [
"Generate creative content and stories",
"Answer questions and provide explanations",
"Summarize text and documents",
"Write code and technical documentation",
"Translate text between languages",
"Proofread and edit content",
"Generate marketing copy and descriptions",
"Create blog posts and articles",
"Brainstorm ideas and concepts",
"Generate product descriptions",
"Write emails and communications"
],
logo: "https://www.openagentic.org/tools/openai.svg"
};
var openaiTool = toOpenAgenticTool(rawOpenAITool, toolDetails);
// src/tools/openai-image.ts
var import_ai5 = require("ai");
var import_zod3 = require("zod");
// src/utils/s3.ts
var import_client_s3 = require("@aws-sdk/client-s3");
var DIRECTORY_PREFIX = "openagentic/";
var S3Directory = ((S3Directory2) => {
S3Directory2["IMAGES"] = DIRECTORY_PREFIX + "images";
S3Directory2["AUDIO"] = DIRECTORY_PREFIX + "audio";
S3Directory2["VIDEOS"] = DIRECTORY_PREFIX + "videos";
S3Directory2["DOCUMENTS"] = DIRECTORY_PREFIX + "documents";
S3Directory2["WEBSITES"] = DIRECTORY_PREFIX + "websites";
S3Directory2["UPLOADS"] = DIRECTORY_PREFIX + "uploads";
return S3Directory2;
})(S3Directory || {});
var s3Client = null;
var s3Config = null;
function validateS3Config() {
const requiredVars = [
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"AWS_REGION",
"S3_BUCKET_NAME"
];
const missing = requiredVars.filter((varName) => !process.env[varName]);
if (missing.length > 0) {
throw new Error(
`Missing required environment variables: ${missing.join(", ")}
Please ensure all AWS S3 configuration variables are set:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_REGION
- S3_BUCKET_NAME`
);
}
s3Config = {
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
region: process.env.AWS_REGION,
bucketName: process.env.S3_BUCKET_NAME
};
s3Client = new import_client_s3.S3Client({
region: s3Config.region,
credentials: {
accessKeyId: s3Config.accessKeyId,
secretAccessKey: s3Config.secretAccessKey
}
});
}
function getS3Client() {
if (!s3Client || !s3Config) {
validateS3Config();
}
return s3Client;
}
function getS3Config() {
if (!s3Config) {
validateS3Config();
}
return s3Config;
}
function sanitizeFilename(input) {
if (!input || typeof input !== "string") {
throw new Error("Filename must be a non-empty string");
}
return input.trim().replace(/[<>:"/\\|?*\x00-\x1f]/g, "").replace(/\s+/g, "_").replace(/_+/g, "_").replace(/^_+|_+$/g, "").substring(0, 200) || "unnamed_file";
}
function generateFileName(prompt, extension, prefix) {
if (!prompt || !extension) {
throw new Error("Prompt and extension are required");
}
const timestamp = (/* @__PURE__ */ new Date()).toISOString().replace(/[:.]/g, "-").replace(/T/, "_").replace(/Z/, "");
const sanitizedPrompt = sanitizeFilename(prompt).toLowerCase().substring(0, 50);
const cleanExtension = extension.startsWith(".") ? extension : `.${extension}`;
const prefixPart = prefix ? `${sanitizeFilename(prefix)}_` : "";
const randomSuffix = Math.random().toString(36).substring(2, 8);
return `${prefixPart}${sanitizedPrompt}_${timestamp}_${randomSuffix}${cleanExtension}`;
}
function generateImageFileName(prompt, extension = "png") {
return generateFileName(prompt, extension, "img");
}
function generateAudioFileName(prompt, extension = "mp3") {
return generateFileName(prompt, extension, "audio");
}
function generateVideoFileName(prompt, extension = "mp4") {
return generateFileName(prompt, extension, "video");
}
function getContentTypeFromExtension(extension) {
const cleanExt = extension.toLowerCase().replace(".", "");
const contentTypes = {
// Images
"jpg": "image/jpeg",
"jpeg": "image/jpeg",
"png": "image/png",
"gif": "image/gif",
"webp": "image/webp",
"svg": "image/svg+xml",
"bmp": "image/bmp",
"ico": "image/x-icon",
// Audio
"mp3": "audio/mpeg",
"wav": "audio/wav",
"ogg": "audio/ogg",
"flac": "audio/flac",
"aac": "audio/aac",
"m4a": "audio/mp4",
// Video
"mp4": "video/mp4",
"avi": "video/x-msvideo",
"mov": "video/quicktime",
"wmv": "video/x-ms-wmv",
"flv": "video/x-flv",
"webm": "video/webm",
"mkv": "video/x-matroska",
// Documents
"pdf": "application/pdf",
"doc": "application/msword",
"docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"xls": "application/vnd.ms-excel",
"xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"ppt": "application/vnd.ms-powerpoint",
"pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
"txt": "text/plain",
"rtf": "application/rtf",
// Web
"html": "text/html",
"htm": "text/html",
"css": "text/css",
"js": "application/javascript",
"json": "application/json",
"xml": "application/xml",
// Archives
"zip": "application/zip",
"tar": "application/x-tar",
"gz": "application/gzip",
"rar": "application/vnd.rar",
"7z": "application/x-7z-compressed",
// Other
"bin": "application/octet-stream"
};
return contentTypes[cleanExt] || "application/octet-stream";
}
function validateFileSize(size, fileType) {
const sizeLimits = {
["image" /* IMAGE */]: 50 * 1024 * 1024,
// 50MB
["audio" /* AUDIO */]: 100 * 1024 * 1024,
// 100MB
["video" /* VIDEO */]: 500 * 1024 * 1024,
// 500MB
["document" /* DOCUMENT */]: 25 * 1024 * 1024,
// 25MB
["website" /* WEBSITE */]: 10 * 1024 * 1024,
// 10MB
["upload" /* GENERIC */]: 100 * 1024 * 1024
// 100MB
};
const limit = sizeLimits[fileType];
if (size > limit) {
const limitMB = Math.round(limit / (1024 * 1024));
const sizeMB = Math.round(size / (1024 * 1024));
throw new Error(
`File size (${sizeMB}MB) exceeds limit for ${fileType} files (${limitMB}MB)`
);
}
}
async function uploadFileToS3(buffer, fileName, contentType, directory = S3Directory.UPLOADS, description) {
try {
if (!Buffer.isBuffer(buffer) || buffer.length === 0) {
throw new Error("Invalid or empty buffer provided");
}
if (!fileName || typeof fileName !== "string") {
throw new Error("Valid filename is required");
}
if (!contentType || typeof contentType !== "string") {
throw new Error("Valid content type is required");
}
validateFileSize(buffer.length, "upload" /* GENERIC */);
const sanitizedFileName = sanitizeFilename(fileName);
const key = `${directory}/${sanitizedFileName}`;
const config = getS3Config();
const client = getS3Client();
const uploadParams = {
Bucket: config.bucketName,
Key: key,
Body: buffer,
ContentType: contentType,
CacheControl: "public, max-age=31536000",
// 1 year cache
Metadata: {
"upload-timestamp": (/* @__PURE__ */ new Date()).toISOString(),
"original-name": fileName,
"file-size": buffer.length.toString(),
...description && { description }
}
};
console.log(`Uploading file to S3: ${key} (${buffer.length} bytes)`);
const command = new import_client_s3.PutObjectCommand(uploadParams);
await client.send(command);
const publicUrl = `https://${config.bucketName}.s3.${config.region}.amazonaws.com/${key}`;
console.log(`\u2705 File uploaded successfully: ${publicUrl}`);
return publicUrl;
} catch (error) {
console.error("\u274C S3 upload failed:", error);
throw new Error(
`S3 upload failed: ${error instanceof Error ? error.message : String(error)}`
);
}
}
async function uploadImageToS3(imageBuffer, fileName, contentType, description) {
try {
if (!Buffer.isBuffer(imageBuffer) || imageBuffer.length === 0) {
throw new Error("Invalid or empty image buffer");
}
validateFileSize(imageBuffer.length, "image" /* IMAGE */);
let finalContentType = contentType;
if (!finalContentType) {
const extension = fileName.split(".").pop()?.toLowerCase() || "";
finalContentType = getContentTypeFromExtension(extension);
if (!finalContentType.startsWith("image/")) {
finalContentType = "image/jpeg";
}
}
console.log(`Uploading image: ${fileName} (${imageBuffer.length} bytes)`);
return await uploadFileToS3(
imageBuffer,
fileName,
finalContentType,
S3Directory.IMAGES,
description || "Image upload"
);
} catch (error) {
console.error("\u274C Image upload failed:", error);
throw new Error(
`Image upload failed: ${error instanceof Error ? error.message : String(error)}`
);
}
}
async function uploadAudioToS3(audioBuffer, fileName, contentType, description) {
try {
if (!Buffer.isBuffer(audioBuffer) || audioBuffer.length === 0) {
throw new Error("Invalid or empty audio buffer");
}
validateFileSize(audioBuffer.length, "audio" /* AUDIO */);
let finalContentType = contentType;
if (!finalContentType) {
const extension = fileName.split(".").pop()?.toLowerCase() || "";
finalContentType = getContentTypeFromExtension(extension);
if (!finalContentType.startsWith("audio/")) {
finalContentType = "audio/mpeg";
}
}
console.log(`Uploading audio: ${fileName} (${audioBuffer.length} bytes)`);
return await uploadFileToS3(
audioBuffer,
fileName,
finalContentType,
S3Directory.AUDIO,
description || "Audio upload"
);
} catch (error) {
console.error("\u274C Audio upload failed:", error);
throw new Error(
`Audio upload failed: ${error instanceof Error ? error.message : String(error)}`
);
}
}
async function uploadVideoToS3(videoBuffer, fileName, contentType, description) {
try {
if (!Buffer.isBuffer(videoBuffer) || videoBuffer.length === 0) {
throw new Error("Invalid or empty video buffer");
}
validateFileSize(videoBuffer.length, "video" /* VIDEO */);
let finalContentType = contentType;
if (!finalContentType) {
const extension = fileName.split(".").pop()?.toLowerCase() || "";
finalContentType = getContentTypeFromExtension(extension);
if (!finalContentType.startsWith("video/")) {
finalContentType = "video/mp4";
}
}
console.log(`Uploading video: ${fileName} (${videoBuffer.length} bytes)`);
return await uploadFileToS3(
videoBuffer,
fileName,
finalContentType,
S3Directory.VIDEOS,
description || "Video upload"
);
} catch (error) {
console.error("\u274C Video upload failed:", error);
throw new Error(
`Video upload failed: ${error instanceof Error ? error.message : String(error)}`
);
}
}
// src/tools/openai-image.ts
var SUPPORTED_MODELS2 = [
"dall-e-3",
"dall-e-2"
];
var MODEL_SIZES = {
"dall-e-3": ["1024x1024", "1024x1792", "1792x1024"],
"dall-e-2": ["256x256", "512x512", "1024x1024"]
};
var rawOpenAIImageTool = (0, import_ai5.tool)({
description: "Generate high-quality images using OpenAI DALL-E models with automatic S3 upload and storage",
parameters: import_zod3.z.object({
prompt: import_zod3.z.string().min(1).max(4e3).describe("The text prompt to generate an image from (required, max 4000 characters)"),
model: import_zod3.z.string().optional().default("dall-e-3").describe("The DALL-E model to use (dall-e-3, dall-e-2, default: dall-e-3)"),
size: import_zod3.z.string().optional().default("1024x1024").describe("The size of the image - DALL-E 3: 1024x1024, 1024x1792, 1792x1024 | DALL-E 2: 256x256, 512x512, 1024x1024"),
quality: import_zod3.z.string().optional().default("standard").describe("The quality of the image (standard, hd) - DALL-E 3 only, default: standard"),
style: import_zod3.z.string().optional().default("vivid").describe("The style of the image (vivid, natural) - DALL-E 3 only, default: vivid")
}),
execute: async ({
prompt,
model = "dall-e-3",
size = "1024x1024",
quality = "standard",
style = "vivid"
}) => {
const apiKey = process.env.OPENAI_API_KEY;
if (!apiKey) {
throw new Error("OPENAI_API_KEY environment variable is required");
}
if (!prompt || prompt.trim().length === 0) {
throw new Error("Prompt cannot be empty");
}
if (prompt.length > 4e3) {
throw new Error("Prompt exceeds maximum length of 4000 characters");
}
if (!SUPPORTED_MODELS2.includes(model)) {
throw new Error(`Model "${model}" not in supported list`);
}
const validSizes = MODEL_SIZES[model] || MODEL_SIZES["dall-e-3"];
if (!validSizes.includes(size)) {
throw new Error(`Invalid size "${size}" for model "${model}". Supported sizes: ${validSizes.join(", ")}`);
}
if (quality !== "standard" && quality !== "hd") {
throw new Error('Quality must be either "standard" or "hd"');
}
if (model === "dall-e-2" && quality === "hd") {
console.warn('Quality parameter "hd" not supported for DALL-E 2, using "standard"');
quality = "standard";
}
if (style !== "vivid" && style !== "natural") {
throw new Error('Style must be either "vivid" or "natural"');
}
if (model === "dall-e-2" && style === "natural") {
console.warn("Style parameter not supported for DALL-E 2, ignoring");
}
console.log("\u{1F3A8} OpenAI Image Generation Tool - Generation started:", {
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
prompt: prompt.substring(0, 100) + (prompt.length > 100 ? "..." : ""),
promptLength: prompt.length,
model,
size,
quality,
style
});
try {
const requestBody = {
model,
prompt: prompt.trim(),
size,
response_format: "b64_json",
// Get base64 for easier handling
n: 1
// Generate one image
};
if (model === "dall-e-3") {
requestBody.quality = quality;
requestBody.style = style;
}
const response = await fetch("https://api.openai.com/v1/images/generations", {
method: "POST",
headers: {
"Authorization": `Bearer ${apiKey}`,
"Content-Type": "application/json"
},
body: JSON.stringify(requestBody)
});
if (!response.ok) {
const errorText = await response.text();
let errorMessage = `OpenAI Images API error: ${response.status} - ${response.statusText}`;
try {
const errorJson = JSON.parse(errorText);
if (errorJson.error && errorJson.error.message) {
errorMessage = errorJson.error.message;
}
} catch {
}
throw new Error(errorMessage);
}
let imageData;
try {
imageData = await response.json();
} catch (error) {
throw new Error(`Failed to parse OpenAI Images API response: ${error instanceof Error ? error.message : JSON.stringify(error)}`);
}
if (!imageData || !imageData.data || !Array.isArray(imageData.data) || imageData.data.length === 0) {
throw new Error("Invalid response structure from OpenAI Images API");
}
const generatedImage = imageData.data[0];
if (!generatedImage.b64_json) {
throw new Error("No base64 image data received from OpenAI Images API");
}
const imageBuffer = Buffer.from(generatedImage.b64_json, "base64");
const fileName = generateImageFileName(prompt, "png");
console.log("\u{1F4E4} Uploading generated image to S3...");
const imageUrl = await uploadImageToS3(
imageBuffer,
fileName,
"image/png",
`DALL-E ${model} generated image: ${prompt.substring(0, 100)}`
);
console.log("\u2705 OpenAI Image Generation Tool - Generation completed:", {
model,
size,
quality,
style,
imageUrl,
fileName,
imageSize: imageBuffer.length,
revisedPrompt: generatedImage.revised_prompt || null
});
return {
success: true,
imageUrl,
fileName,
model,
size,
quality,
style,
originalPrompt: prompt.trim(),
revisedPrompt: generatedImage.revised_prompt || null,
metadata: {
generatedAt: (/* @__PURE__ */ new Date()).toISOString(),
promptLength: prompt.length,
fileSize: imageBuffer.length,
uploadedToS3: true
}
};
} catch (error) {
console.error("\u274C OpenAI Image Generation Tool - Generation failed:", {
model,
size,
quality,
style,
promptLength: prompt.length,
error: error instanceof Error ? error.message : JSON.stringify(error)
});
if (error instanceof Error) {
if (error.message.includes("rate limit") || error.message.includes("429")) {
throw new Error("OpenAI API rate limit exceeded. Please try again in a moment.");
}
if (error.message.includes("401") || error.message.includes("authentication")) {
throw new Error("OpenAI API authentication failed. Please check your API key.");
}
if (error.message.includes("model") && error.message.includes("not found")) {
throw new Error(`Invalid model "${model}". Please use a supported DALL-E model.`);
}
if (error.message.includes("content policy") || error.message.includes("safety")) {
throw new Error("Image generation request violates OpenAI content policy. Please modify your prompt.");
}
if (error.message.includes("prompt") && error.message.includes("too long")) {
throw new Error("Prompt is too long. Please reduce the prompt length and try again.");
}
if (error.message.includes("size") || error.message.includes("dimensions")) {
throw new Error(`Invalid image size "${size}" for model "${model}". Please use a supported size.`);
}
if (error.message.includes("quality") || error.message.includes("style")) {
throw new Error("Invalid quality or style parameter. Please check the supported values for your model.");
}
if (error.message.includes("network") || error.message.includes("timeout") || error.message.includes("ECONNREFUSED") || error.message.includes("ETIMEDOUT")) {
throw new Error("Network error connecting to OpenAI API. Please try again.");
}
if (error.message.includes("S3") || error.message.includes("upload")) {
throw new Error("Failed to upload generated image to S3. Please check your S3 configuration.");
}
if (error.message.includes("base64") || error.message.includes("buffer")) {
throw new Error("Failed to process generated image data. Please try again.");
}
if (error.message.includes("502") || error.message.includes("503") || error.message.includes("504")) {
throw new Error("OpenAI service temporarily unavailable. Please try again later.");
}
}
throw new Error(`OpenAI image generation failed: ${error instanceof Error ? error.message : JSON.stringify(error)}`);
}
}
});
var toolDetails2 = {
toolId: "openai_image_generator",
name: "OpenAI Image Generator",
useCases: [
"Generate photorealistic images from text descriptions",
"Create artistic illustrations and digital art",
"Design logos and brand imagery",
"Generate product mockups and prototypes",
"Create concept art for creative projects",
"Generate marketing visuals and advertisements",
"Create custom artwork for presentations",
"Generate book covers and poster designs",
"Create social media content and graphics",
"Generate architectural and interior design concepts",
"Create character designs and illustrations",
"Generate landscape and nature imagery"
],
logo: "https://www.openagentic.org/tools/openai.svg"
};
var openaiImageTool = toOpenAgenticTool(rawOpenAIImageTool, toolDetails2);
// src/tools/anthropic.ts
var import_ai6 = require("ai");
var import_zod4 = require("zod");
var import_ai7 = require("ai");
var import_anthropic = require("@ai-sdk/anthropic");
var SUPPORTED_MODELS3 = [
"claude-opus-4-20250514",
"claude-sonnet-4-20250514",
"claude-3-7-sonnet-latest",
"claude-3-5-sonnet-latest",
"claude-3-5-haiku-latest"
];
var rawAnthropicTool = (0, import_ai6.tool)({
description: "Generate high-quality text responses using Anthropic Claude models with advanced parameter control",
parameters: import_zod4.z.object({
prompt: import_zod4.z.string().min(1).max(2e5).describe("The text prompt to send to Claude (required, max 200,000 characters)"),
model: import_zod4.z.string().optional().default("claude-sonnet-4-20250514").describe("Claude model to use (claude-opus-4-20250514, claude-sonnet-4-20250514, claude-3-7-sonnet-latest, claude-3-5-sonnet-latest, claude-3-5-haiku-latest)"),
maxTokens: import_zod4.z.number().int().min(1).max(4096).optional().default(1e3).describe("Maximum number of tokens to generate (1-4096, default: 1000)"),
temperature: import_zod4.z.number().min(0).max(1).optional().default(0.7).describe("Controls randomness - lower values are more focused (0-1, default: 0.7)"),
topP: import_zod4.z.number().min(0).max(1).optional().describe("Controls diversity via nucleus sampling (0-1, optional)"),
topK: import_zod4.z.number().int().min(1).max(40).optional().describe("Controls diversity by limiting token choices (1-40, optional)")
}),
execute: async ({
prompt,
model = "claude-sonnet-4-20250514",
maxTokens = 1e3,
temperature = 0.7,
topP,
topK
}) => {
const apiKey = process.env.ANTHROPIC_API_KEY;
if (!apiKey) {
throw new Error("ANTHROPIC_API_KEY environment variable is required");
}
if (!prompt || prompt.trim().length === 0) {
throw new Error("Prompt cannot be empty");
}
if (prompt.length > 2e5) {
throw new Error("Prompt exceeds maximum length of 200,000 characters");
}
if (!SUPPORTED_MODELS3.includes(model)) {
throw new Error(`Model "${model}" not in supported list`);
}
console.log("\u{1F916} Anthropic Tool - Generation started:", {
model,
promptLength: prompt.length,
maxTokens,
temperature,
topP,
topK
});
try {
const anthropic = (0, import_anthropic.createAnthropic)({
apiKey
});
const generateConfig = {
model: anthropic(model),
prompt: prompt.trim(),
maxTokens,
temperature
};
if (topP !== void 0) {
generateConfig.topP = topP;
}
if (topK !== void 0) {
generateConfig.topK = topK;
}
const { text, usage, finishReason } = await (0, import_ai7.generateText)(generateConfig);
console.log("\u2705 Anthropic Tool - Generation completed:", {
model,
tokensUsed: usage?.totalTokens || 0,
responseLength: text.length,
finishReason
});
return {
success: true,
text,
model,
usage: {
promptTokens: usage?.promptTokens || 0,
completionTokens: usage?.completionTokens || 0,
totalTokens: usage?.totalTokens || 0
},
finishReason,
parameters: {
temperature,
maxTokens,
topP,
topK
},
metadata: {
generatedAt: (/* @__PURE__ */ new Date()).toISOString(),
promptLength: prompt.length,
responseLength: text.length
}
};
} catch (error) {
console.error("\u274C Anthropic Tool - Generation failed:", {
model,
promptLength: prompt.length,
error: error instanceof Error ? error.message : String(error)
});
if (error instanceof Error) {
if (error.message.includes("Overloaded") || error.message.includes("overloaded") || error.message.includes("529")) {
throw new Error("Anthropic API is currently overloaded. Please try again in a few moments.");
}
if (error.message.includes("rate limit") || error.message.includes("429")) {
throw new Error("Anthropic API rate limit exceeded. Please try again in a moment.");
}
if (error.message.includes("401") || error.message.includes("authentication")) {
throw new Error("Anthropic API authentication failed. Please check your API key.");
}
if (error.message.includes("token") && error.message.includes("limit")) {
throw new Error(`Token limit exceeded. Try reducing maxTokens or prompt length.`);
}
if (error.message.includes("model") && error.message.includes("not found")) {
throw new Error(`Invalid model "${model}". Please use a supported Claude model.`);
}
if (error.message.includes("context length") || error.message.includes("too long")) {
throw new Error("Prompt is too long for the selected Claude model. Please reduce the prompt length.");
}
if (error.message.includes("network") || error.message.includes("timeout")) {
throw new Error("Network error connecting to Anthropic API. Please try again.");
}
if (error.message.includes("safety") || error.message.includes("harmful")) {
throw new Error("Request was rejected by Claude safety filters. Please modify your prompt.");
}
if (error.message.includes("502") || error.message.includes("503") || error.message.includes("504")) {
throw new Error("Anthropic service temporarily unavailable. Please try again later.");
}
}
throw new Error(`Anthropic text generation failed: ${error instanceof Error ? error.message : String(error)}`);
}
}
});
var toolDetails3 = {
toolId: "anthropic_chat",
name: "Anthropic Claude Chat",
useCases: [
"Generate thoughtful and nuanced creative content",
"Provide detailed analysis and explanations",
"Write technical documentation with clarity",
"Create research summaries and reports",
"Generate code with comprehensive comments",
"Perform complex reasoning tasks",
"Write professional emails and communications",
"Create educational content and tutorials",
"Generate marketing copy with brand voice",
"Conduct thorough text analysis and critique",
"Write structured articles and blog posts",
"Provide ethical AI assistance and guidance"
],
logo: "https://www.openagentic.org/tools/anthropic.svg"
};
var anthropicTool = toOpenAgenticTool(rawAnthropicTool, toolDetails3);
// src/tools/gemini.ts
var import_ai8 = require("ai");
var import_zod5 = require("zod");
var import_ai9 = require("ai");
var import_google = require("@ai-sdk/google");
var SUPPORTED_MODELS4 = [
"gemini-1.5-pro",
"gemini-1.5-flash"
// 'gemini-2.5-pro',
// 'gemini-2.5-flash',
// 'gemini-2.5-flash-lite-preview-06-17',
];
var rawGeminiTool = (0, import_ai8.tool)({
description: "Generate high-quality text responses using Google Gemini models with multimodal capabilities",
parameters: import_zod5.z.object({
prompt: import_zod5.z.string().min(1).max(2e5).describe("The text prompt to send to Gemini (required, max 200,000 characters)"),
model: import_zod5.z.string().optional().default("gemini-1.5-pro").describe("Gemini model to use (gemini-1.5-pro, gemini-1.5-flash)"),
// .describe('Gemini model to use (gemini-1.5-pro, gemini-1.5-flash, gemini-2.5-pro, gemini-2.5-flash, gemini-2.5-flash-lite-preview-06-17)'),
maxTokens: import_zod5.z.number().int().min(1).max(8192).optional().default(1e3).describe("Maximum number of tokens to generate (1-8192, default: 1000)"),
temperature: import_zod5.z.number().min(0).max(1).optional().default(0.7).describe("Controls randomness - lower values are more focused (0-1, default: 0.7)"),
topP: import_zod5.z.number().min(0).max(1).optional().describe("Controls diversity via nucleus sampling (0-1, optional)"),
topK: import_zod5.z.number().int().min(1).max(40).optional().describe("Controls diversity by limiting token choices (1-40, optional)"),
imageUrls: import_zod5.z.array(import_zod5.z.string().url()).optional().default([]).describe("Array of image URLs for multimodal analysis (optional)")
}),
execute: async ({
prompt,
model = "gemini-2.5-pro",
maxTokens = 1e3,
temperature = 0.7,
topP,
topK,
imageUrls = []
}) => {
const apiKey = process.env.GOOGLE_API_KEY;
if (!apiKey) {
throw new Error("GOOGLE_API_KEY environment variable is required");
}
if (!prompt || prompt.trim().length === 0) {
throw new Error("Prompt cannot be empty");
}
if (prompt.length > 2e5) {
throw new Error("Prompt exceeds maximum length of 200,000 characters");
}
if (!SUPPORTED_MODELS4.includes(model)) {
throw new Error(`Model "${model}" not in supported list`);
}
if (imageUrls.length > 0) {
for (const url of imageUrls) {
try {
new URL(url);
} catch {
throw new Error(`Invalid image URL: ${url}`);
}
}
}
console.log("\u{1F916} Gemini Tool - Generation started:", {
model,
promptLength: prompt.length,
maxTokens,
temperature,
topP,
topK,
imageCount: imageUrls.length,
hasImages: imageUrls.length > 0
});
try {
const google = (0, import_google.createGoogleGenerativeAI)({
apiKey
});
const generateConfig = {
model: google(model),
maxTokens: Math.min(maxTokens, 4e3),
// Reduce maxTokens to avoid the empty response bug
temperature
};
if (topP !== void 0) {
generateConfig.topP = topP;
}
if (topK !== void 0) {
generateConfig.topK = topK;
}
if (imageUrls.length > 0) {
const content = [
{ type: "text", text: prompt.trim() }
];
for (const imageUrl of imageUrls) {
content.push({
type: "image",
image: imageUrl
});
}
generateConfig.messages = [
{
role: "user",
content
}
];
} else {
generateConfig.prompt = prompt.trim();
}
const { text, usage, finishReason } = await (0, import_ai9.generateText)(generateConfig);
if (!text || text.trim().length === 0) {
throw new Error(`Gemini model returned empty response. FinishReason: ${finishReason}. This may indicate insufficient maxTokens, content filtering, or model issues.`);
}
console.log("\u2705 Gemini Tool - Generation completed:", {
model,
tokensUsed: usage?.totalTokens || 0,
responseLength: text.length,
finishReason,
imageCount: imageUrls.length
});
return {
success: true,
text,
model,
usage: {
promptTokens: usage?.promptTokens || 0,
completionTokens: usage?.completionTokens || 0,
totalTokens: usage?.totalTokens || 0
},
finishReason,
parameters: {
temperature,
maxTokens,
topP,
topK
},
multimodal: {
imageCount: imageUrls.length,
hasImages: imageUrls.length > 0
},
metadata: {
generatedAt: (/* @__PURE__ */ new Date()).toISOString(),
promptLength: prompt.length,
responseLength: text.length
}
};
} catch (error) {
console.error("\u274C Gemini Tool - Generation failed:", {
model,
promptLength: prompt.length,
imageCount: imageUrls.length,
error: error instanceof Error ? error.message : String(error)
});
if (error instanceof Error) {
if (error.message.includes("rate limit") || error.message.includes("429")) {
throw new Error("Google API rate limit exceeded. Please try again in a moment.");
}
if (error.message.includes("401") || error.message.includes("authentication")) {
throw new Error("Google API authentication failed. Please check your API key.");
}
if (error.message.includes("token") && error.message.includes("limit")) {
throw new Error(`Token limit exceeded. Try reducing maxTokens or prompt length.`);
}
if (error.message.includes("model") && error.message.includes("not found")) {
throw new Error(`Invalid model "${model}". Please use a supported Gemini model.`);
}
if (error.message.includes("image") || error.message.includes("multimodal")) {
throw new Error("Image processing failed. Please check image URLs and try again.");
}
if (error.message.includes("context length") || error.message.includes("too long")) {
throw new Error("Prompt is too long for the selected Gemini model. Please reduce the prompt length.");
}
if (error.message.includes("network") || error.message.includes("timeout")) {
throw new Error("Network error connecting to Google API. Please try again.");
}
if (error.message.includes("safety") || error.message.includes("blocked")) {
throw new Error("Request was blocked by Google safety filters. Please modify your prompt.");
}
}
throw new Error(`Gemini text generation failed: ${error instanceof Error ? error.message : String(error)}`);
}
}
});
var toolDetails4 = {
toolId: "gemini_chat",
name: "Google Gemini Chat",
useCases: [
"Generate creative content and stories",
"Answer questions with multimodal understanding",
"Analyze images and visual content",
"Summarize text and visual documents",
"Write code with contextual understanding",
"Translate text between languages",
"Generate descriptions of images",
"Create content from visual inputs",
"Multimodal reasoning and analysis",
"Visual question answering"
],
logo: "https://www.openagentic.org/tools/gemini.svg"
};
var geminiTool = toOpenAgenticTool(rawGeminiTool, toolDetails4);
// src/tools/github.ts
var import_ai10 = require("ai");
var import_zod6 = require("zod");
var import_rest = require("@octokit/rest");
var rawGitHubTool = (0, import_ai10.tool)({
description: "Fetch file contents and directory listings from GitHub repositories with comprehensive error handling",
parameters: import_zod6.z.object({
owner: import_zod6.z.string().min(1).describe("The GitHub repository owner (username or organization)"),
repo: import_zod6.z.string().min(1).describe("The GitHub repository name"),
path: import_zod6.z.string().optional().default("").describe("The path to the file or directory (optional, defaults to root)"),
ref: import_zod6.z.string().optional().describe("The branch, tag, or commit SHA to fetch from (optional, defaults to default branch)")
}),
execute: async ({
owner,
repo,
path = "",
ref
}) => {
const apiKey = process.env.GITHUB_TOKEN || process.env.GITHUB_API_KEY;
if (!apiKey) {
throw new Error("GITHUB_TOKEN or GITHUB_API_KEY environment variable is required");
}
if (!owner || owner.trim().length === 0) {
throw new Error("Repository owner cannot be empty");
}
if (!repo || repo.trim().length === 0) {
throw new Error("Repository name cannot be empty");
}
const normalizedPath = path.trim().replace(/^\/+|\/+$/g, "");
console.log("\u{1F4C2} GitHub Tool - Fetching started:", {
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
owner: owner.trim(),
repo: repo.trim(),
path: normalizedPath || "(root)",
ref: ref || "(default)",
hasApiKey: !!apiKey
});
try {
const octokit = new import_rest.Octokit({
auth: apiKey
});
const requestParams = {
owner: owner.trim(),
repo: repo.trim(),
path: normalizedPath
};
if (ref) {
requestParams.ref = ref;
}
const response = await octokit.rest.repos.getContent(requestParams);
if (Array.isArray(response.data)) {
console.log("\u2705 GitHub Tool - Directory fetch completed:", {
owner: owner.trim(),
repo: repo.trim(),
path: normalizedPath || "/",
itemCount: response.data.length,
ref: ref || "default"
});
return {
success: true,
type: "directory",
repository: {
owner: owner.trim(),
repo: repo.trim(),
ref: ref || "default"
},
directory: {
path: normalizedPath || "/",
contents: response.data.map((item) => ({
name: item.name,
type: item.type,
size: item.size || 0,
path: item.path,
downloadUrl: item.download_url || null,
htmlUrl: item.html_url,
sha: item.sha
}))
},
metadata: {
fetchedAt: (/* @__PURE__ */ new Date()).toISOString(),
itemCount: response.data.length
}
};
} else {
const fileData = response.data;
let decodedContent = null;
let isBinary = false;
if (fileData.content && fileData.encoding === "base64") {
try {
const buffer = Buffer.from(fileData.content, "base64");
if (buffer.includes(0)) {
isBinary = true;
} else {
decodedContent = buffer.toString("utf-8");
}
} catch (error) {
console.warn("\u26A0\uFE0F Failed to decode file content, treating as binary");
isBinary = true;
}
}
console.log("\u2705 GitHub Tool - File fetch completed:", {
owner: owner.trim(),
repo: repo.trim(),
path: normalizedPath,
fileName: fileData.name,
fileSize: fileData.size,
encoding: fileData.encoding,
isBinary,
hasContent: !!decodedContent,
ref: ref || "default"
});
return {
success: true,
type: "file",
repository: {
owner: owner.trim(),
repo: repo.trim(),
ref: ref || "default"
},
file: {
name: fileData.name,
path: fileData.path,
size: fileData.size,
content: decodedContent,
encoding: fileData.encoding,
sha: fileData.sha,
downloadUrl: fileData.download_url,
htmlUrl: fileData.html_url
},
metadata: {
fetchedAt: (/* @__PURE__ */ new Date()).toISOString(),
hasContent: !!decodedContent,
isBinary,
contentLength: decodedContent?.length || 0
}
};
}
} catch (error) {
console.error("\u274C GitHub Tool - Fetch failed:", {
owner: owner.trim(),
repo: repo.trim(),
path: normalizedPath,
ref: ref || "default",
error: error instanceof Error ? error.message : String(error),
status: error.status || "unknown"
});
if (error.status === 404) {
throw new Error(`Repository ${owner}/${repo} or path '${normalizedPath}' not found. Please check the repository name and path.`);
} else if (error.status === 401) {
throw new Error("GitHub authentication failed. Please check your GitHub token.");
} else if (error.status === 403) {
if (error.message && error.message.includes("rate limit")) {
throw new Error("GitHub API rate limit exceeded. Please try again later.");
} else if (error.message && error.message.includes("abuse")) {
throw new Error("Request was flagged as potential abuse. Please try again later.");
} else {
throw new Error("Access forbidden. The repository may be private or your token lacks sufficient permissions.");
}
} else if (error.status === 451) {
throw new Error("Repository unavailable due to legal reasons.");
} else if (error.message && error.message.includes("timeout")) {
throw new Error("GitHub API request timed ou