UNPKG

openagentic

Version:

A TypeScript framework for building AI agents with self-contained tool orchestration capabilities

1,366 lines (1,355 loc) 227 kB
// src/tools/openai.ts import { tool as tool2 } from "ai"; import { z as z3 } from "zod"; import { generateText } from "ai"; import { createOpenAI } from "@ai-sdk/openai"; // src/tools/utils.ts import "ai"; import { tool } from "ai"; import { z as z2 } from "zod"; // src/types.ts import { z } from "zod"; import "ai"; var AIModelSchema = z.object({ provider: z.enum(["openai", "anthropic", "google", "google-vertex", "perplexity", "xai", "custom"]), model: z.string(), apiKey: z.string().optional(), baseURL: z.string().optional(), temperature: z.number().min(0).max(2).optional().default(0.7), maxTokens: z.number().positive().optional(), topP: z.number().min(0).max(1).optional(), project: z.string().optional(), location: z.string().optional() }); var MessageSchema = z.object({ role: z.enum(["system", "user", "assistant", "tool"]), content: z.string(), toolCallId: z.string().optional(), toolCalls: z.array(z.object({ toolCallId: z.string(), toolName: z.string(), args: z.record(z.any()) })).optional() }); var ExecutionResultSchema = z.object({ success: z.boolean(), result: z.any().optional(), error: z.string().optional(), messages: z.array(MessageSchema), iterations: z.number(), toolCallsUsed: z.array(z.string()), executionStats: z.object({ totalDuration: z.number(), stepsExecuted: z.number(), toolCallsExecuted: z.number(), tokensUsed: z.number().optional(), averageStepDuration: z.number(), averageToolCallDuration: z.number() }).optional(), usage: z.object({ totalTokens: z.number(), promptTokens: z.number(), completionTokens: z.number() }).optional() }); // src/tools/utils.ts import { createAnthropic } from "@ai-sdk/anthropic"; import { createAmazonBedrock } from "@ai-sdk/amazon-bedrock"; // src/providers/manager.ts var providerConfigs = { openai: { baseURL: "https://api.openai.com/v1", models: { "gpt-4": { contextWindow: 8192, cost: { input: 0.03, output: 0.06 }, description: "Most capable GPT-4 model" }, "gpt-4-turbo": { contextWindow: 128e3, cost: { input: 0.01, output: 0.03 }, description: "GPT-4 Turbo with larger context window" }, "gpt-4o": { contextWindow: 128e3, cost: { input: 5e-3, output: 0.015 }, description: "GPT-4 Omni - fastest and most cost-effective" }, "gpt-4o-mini": { contextWindow: 128e3, cost: { input: 15e-5, output: 6e-4 }, description: "Smaller, faster GPT-4o variant" }, "o3": { contextWindow: 2e5, cost: { input: 0.06, output: 0.24 }, description: "Latest reasoning model" }, "o3-mini": { contextWindow: 2e5, cost: { input: 0.015, output: 0.06 }, description: "Smaller o3 variant with faster inference" } } }, anthropic: { baseURL: "https://api.anthropic.com", models: { "claude-opus-4-20250514": { contextWindow: 2e5, cost: { input: 0.015, output: 0.075 }, description: "Most capable Claude 4 model" }, "claude-sonnet-4-20250514": { contextWindow: 2e5, cost: { input: 3e-3, output: 0.015 }, description: "Balanced Claude 4 model for most use cases" }, "claude-3-7-sonnet-latest": { contextWindow: 2e5, cost: { input: 3e-3, output: 0.015 }, description: "Latest Claude 3.7 Sonnet model" }, "claude-3-5-sonnet-latest": { contextWindow: 2e5, cost: { input: 3e-3, output: 0.015 }, description: "Latest Claude 3.5 Sonnet model" } } }, google: { baseURL: "https://generativelanguage.googleapis.com/v1beta", models: { "gemini-2.5-pro": { contextWindow: 2e6, cost: { input: 1e-3, output: 2e-3 }, description: "Latest Gemini 2.5 Pro preview model" }, "gemini-2.5-flash": { contextWindow: 1e6, cost: { input: 5e-4, output: 1e-3 }, description: "Fast Gemini 2.5 Flash preview model" }, "gemini-1.5-pro": { contextWindow: 2e6, cost: { input: 125e-5, output: 5e-3 }, description: "Gemini 1.5 Pro with large context window" }, "gemini-1.5-flash": { contextWindow: 1e6, cost: { input: 75e-6, output: 3e-4 }, description: "Fast and efficient Gemini 1.5 model" }, "gemini-2.5-flash-lite-preview-06-17": { contextWindow: 1e6, cost: { input: 75e-6, output: 3e-4 }, description: "Fast and efficient Gemini 2.5 Flash Lite preview model" } } }, "google-vertex": { baseURL: "https://us-central1-aiplatform.googleapis.com", models: { "gemini-2.5-pro": { contextWindow: 2e6, cost: { input: 1e-3, output: 2e-3 }, description: "Latest Gemini 2.5 Pro preview model via Vertex AI" }, "gemini-2.5-flash": { contextWindow: 1e6, cost: { input: 5e-4, output: 1e-3 }, description: "Fast Gemini 2.5 Flash preview model via Vertex AI" }, "gemini-1.5-pro": { contextWindow: 2e6, cost: { input: 125e-5, output: 5e-3 }, description: "Gemini 1.5 Pro via Vertex AI" }, "gemini-1.5-flash": { contextWindow: 1e6, cost: { input: 75e-6, output: 3e-4 }, description: "Fast Gemini 1.5 model via Vertex AI" } } }, perplexity: { baseURL: "https://api.perplexity.ai", models: { "llama-3.1-sonar-small-128k-online": { contextWindow: 127072, cost: { input: 2e-4, output: 2e-4 }, description: "Small Llama 3.1 Sonar with online search" }, "llama-3.1-sonar-large-128k-online": { contextWindow: 127072, cost: { input: 1e-3, output: 1e-3 }, description: "Large Llama 3.1 Sonar with online search" }, "llama-3.1-sonar-huge-128k-online": { contextWindow: 127072, cost: { input: 5e-3, output: 5e-3 }, description: "Huge Llama 3.1 Sonar with online search" } } }, xai: { baseURL: "https://api.x.ai/v1", models: { "grok-beta": { contextWindow: 131072, cost: { input: 5e-3, output: 0.015 }, description: "Grok conversational AI model" } } } }; var ProviderManager = class { static userApiKeys; /** * Set user-provided API keys that take precedence over environment variables */ static setUserApiKeys(apiKeys) { this.userApiKeys = apiKeys; if (apiKeys && Object.keys(apiKeys).length > 0) { console.log("\u{1F511} User API keys configured for providers:", Object.keys(apiKeys).length); } } /** * Get AWS credentials from user API keys or environment variables */ static getAwsCredentials() { const userKeys = this.userApiKeys; return { accessKeyId: userKeys?.awsAccessKeyId || process.env.MY_AWS_ACCESS_KEY_ID || process.env.AWS_ACCESS_KEY_ID, secretAccessKey: userKeys?.awsSecretAccessKey || process.env.MY_AWS_SECRET_ACCESS_KEY || process.env.AWS_SECRET_ACCESS_KEY, region: userKeys?.awsRegion || process.env.MY_AWS_REGION || process.env.AWS_REGION, bucketName: userKeys?.awsS3Bucket || process.env.MY_S3_BUCKET_NAME || process.env.S3_BUCKET_NAME }; } /** * Get AWS Bedrock credentials from user API keys or environment variables */ static getBedrockCredentials() { const userKeys = this.userApiKeys; return { accessKeyId: userKeys?.bedrockAccessKeyId || process.env.BEDROCK_ACCESS_KEY_ID, secretAccessKey: userKeys?.bedrockSecretAccessKey || process.env.BEDROCK_SECRET_ACCESS_KEY, region: userKeys?.bedrockRegion || process.env.BEDROCK_REGION }; } /** * Create a model configuration from a string or AIModel object * Automatically detects provider from model name if string is provided */ static createModel(input) { if (typeof input === "string") { return this.autoDetectProvider(input); } return this.validateAndNormalizeModel(input); } /** * Create an AI SDK provider instance for the given model */ static async createProvider(model) { const apiKey = model.apiKey ?? this.getDefaultApiKey(model.provider); switch (model.provider) { case "openai": { const { createOpenAI: createOpenAI4 } = await import("@ai-sdk/openai"); const config = {}; if (apiKey !== void 0) config.apiKey = apiKey; if (model.baseURL !== void 0) config.baseURL = model.baseURL; return createOpenAI4(config); } case "anthropic": { const { provider } = getAnthropicModelInstance(model.model); return provider; } case "google": { const { createGoogleGenerativeAI: createGoogleGenerativeAI3 } = await import("@ai-sdk/google"); const config = {}; if (apiKey !== void 0) config.apiKey = apiKey; return createGoogleGenerativeAI3(config); } case "google-vertex": { const { createVertex } = await import("@ai-sdk/google-vertex"); const config = {}; if (model.project !== void 0) config.project = model.project; if (model.location !== void 0) config.location = model.location; return createVertex(config); } case "perplexity": { const { createPerplexity: createPerplexity2 } = await import("@ai-sdk/perplexity"); const config = {}; if (apiKey !== void 0) config.apiKey = apiKey; return createPerplexity2(config); } case "xai": { const { createXai: createXai2 } = await import("@ai-sdk/xai"); const config = {}; if (apiKey !== void 0) config.apiKey = apiKey; return createXai2(config); } case "custom": { if (!model.baseURL) { throw new Error("Custom provider requires baseURL"); } const { createOpenAI: createOpenAI4 } = await import("@ai-sdk/openai"); const config = { baseURL: model.baseURL }; if (apiKey !== void 0) config.apiKey = apiKey; return createOpenAI4(config); } default: throw new Error(`Unsupported provider: ${model.provider}`); } } /** * Create a provider for a specific provider name (for tool context) */ static async createProviderByName(providerName, apiKey) { const key = apiKey ?? this.getDefaultApiKey(providerName); switch (providerName) { case "openai": { const { createOpenAI: createOpenAI4 } = await import("@ai-sdk/openai"); if (!key) throw new Error("OpenAI API key not found"); return createOpenAI4({ apiKey: key }); } case "anthropic": { const { createAnthropic: createAnthropic3 } = await import("@ai-sdk/anthropic"); if (!key) throw new Error("Anthropic API key not found"); return createAnthropic3({ apiKey: key }); } case "google": { const { createGoogleGenerativeAI: createGoogleGenerativeAI3 } = await import("@ai-sdk/google"); if (!key) throw new Error("Google API key not found"); return createGoogleGenerativeAI3({ apiKey: key }); } case "perplexity": { const { createPerplexity: createPerplexity2 } = await import("@ai-sdk/perplexity"); if (!key) throw new Error("Perplexity API key not found"); return createPerplexity2({ apiKey: key }); } case "xai": { const { createXai: createXai2 } = await import("@ai-sdk/xai"); if (!key) throw new Error("xAI API key not found"); return createXai2({ apiKey: key }); } default: throw new Error(`Unsupported provider: ${providerName}`); } } /** * Get all available providers and their models */ static getAllProviders() { return Object.entries(providerConfigs).map(([provider, config]) => ({ provider, models: Object.keys(config.models) })); } /** * Get supported models for a provider */ static getProviderModels(provider) { const config = providerConfigs[provider]; return config ? Object.keys(config.models) : []; } /** * Check if a model is supported by a provider */ static isModelSupported(provider, model) { const models = this.getProviderModels(provider); return models.includes(model); } /** * Get model information (context window, cost, description) */ static getModelInfo(provider, model) { const config = providerConfigs[provider]; if (!config) { throw new Error(`Unknown provider: ${provider}`); } const modelInfo = config.models[model]; if (!modelInfo) { throw new Error(`Unknown model: ${model} for provider: ${provider}`); } return modelInfo; } // Private methods static autoDetectProvider(modelName) { let provider; let apiKey; if (modelName.includes("gpt") || modelName.includes("o3")) { provider = "openai"; apiKey = this.getDefaultApiKey("openai"); } else if (modelName.includes("claude")) { provider = "anthropic"; apiKey = this.getDefaultApiKey("anthropic"); } else if (modelName.includes("gemini")) { provider = "google"; apiKey = this.getDefaultApiKey("google"); } else if (modelName.includes("grok")) { provider = "xai"; apiKey = this.getDefaultApiKey("xai"); } else if (modelName.includes("llama") && modelName.includes("sonar")) { provider = "perplexity"; apiKey = this.getDefaultApiKey("perplexity"); } else { provider = "openai"; apiKey = this.getDefaultApiKey("openai"); console.warn(`Unknown model "${modelName}", defaulting to OpenAI provider`); } if (!this.isModelSupported(provider, modelName)) { throw new Error(`Model "${modelName}" not found in ${provider} configuration`); } return { provider, model: modelName, apiKey, temperature: 0.7 }; } static validateAndNormalizeModel(model) { if (!model.provider || !model.model) { throw new Error("AIModel must have provider and model fields"); } const normalizedModel = { ...model }; if (normalizedModel.apiKey === void 0) { normalizedModel.apiKey = this.getDefaultApiKey(normalizedModel.provider); } if (normalizedModel.temperature === void 0) { normalizedModel.temperature = 0.7; } return normalizedModel; } static getDefaultApiKey(provider) { if (this.userApiKeys && provider in this.userApiKeys) { return this.userApiKeys[provider]; } switch (provider) { case "openai": return process.env.OPENAI_API_KEY; case "anthropic": return process.env.ANTHROPIC_API_KEY; case "google": return process.env.GOOGLE_API_KEY; case "google-vertex": return void 0; // Vertex uses service account auth case "perplexity": return process.env.PERPLEXITY_API_KEY; case "xai": return process.env.XAI_API_KEY; case "custom": return void 0; // Custom providers handle their own auth default: return void 0; } } }; // src/tools/utils.ts function toOpenAgenticTool(tool26, details) { return { ...tool26, ...details }; } function getAnthropicModelInstance(model) { const bedrockCredentials = ProviderManager.getBedrockCredentials(); let modelInstance; let provider; if (bedrockCredentials.accessKeyId && bedrockCredentials.secretAccessKey) { console.log("Using Bedrock"); provider = createAmazonBedrock({ region: bedrockCredentials.region, accessKeyId: bedrockCredentials.accessKeyId, secretAccessKey: bedrockCredentials.secretAccessKey }); if (model.includes("sonnet")) { modelInstance = provider("us.anthropic.claude-sonnet-4-20250514-v1:0"); console.log("Model: Claude Sonnet 4"); } else if (model.includes("opus")) { modelInstance = provider("us.anthropic.claude-4-opus-20250514-v1:0"); console.log("Model: Claude Opus 4"); } else { throw new Error(`Model "${model}" not supported`); } } else { console.log("Using Anthropic"); const apiKey = process.env.ANTHROPIC_API_KEY; if (!apiKey) { throw new Error("ANTHROPIC_API_KEY environment variable is required"); } provider = createAnthropic({ apiKey }); modelInstance = provider(model); console.log("Model:", model); } return { provider, modelInstance }; } // src/tools/openai.ts var SUPPORTED_MODELS = [ "gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-4", "gpt-3.5-turbo" ]; var rawOpenAITool = tool2({ description: "Generate high-quality text responses using OpenAI GPT models with advanced parameter control", parameters: z3.object({ prompt: z3.string().min(1).max(5e4).describe("The text prompt to send to OpenAI (required, max 50,000 characters)"), model: z3.string().optional().default("gpt-4o-mini").describe("OpenAI model to use (gpt-4o, gpt-4o-mini, gpt-4-turbo, gpt-4, gpt-3.5-turbo)"), maxTokens: z3.number().int().min(1).max(4096).optional().default(1e3).describe("Maximum number of tokens to generate (1-4096, default: 1000)"), temperature: z3.number().min(0).max(2).optional().default(0.7).describe("Controls randomness - lower values are more focused (0-2, default: 0.7)"), topP: z3.number().min(0).max(1).optional().describe("Controls diversity via nucleus sampling (0-1, optional)"), presencePenalty: z3.number().min(-2).max(2).optional().describe("Penalizes repeated tokens (-2 to 2, optional)"), frequencyPenalty: z3.number().min(-2).max(2).optional().describe("Penalizes frequent tokens (-2 to 2, optional)") }), execute: async ({ prompt, model = "gpt-4o-mini", maxTokens = 1e3, temperature = 0.7, topP, presencePenalty, frequencyPenalty }) => { const apiKey = process.env.OPENAI_API_KEY; if (!apiKey) { throw new Error("OPENAI_API_KEY environment variable is required"); } if (!prompt || prompt.trim().length === 0) { throw new Error("Prompt cannot be empty"); } if (prompt.length > 5e4) { throw new Error("Prompt exceeds maximum length of 50,000 characters"); } if (!SUPPORTED_MODELS.includes(model)) { throw new Error(`Model "${model}" not in supported list`); } console.log("\u{1F916} OpenAI Tool - Generation started:", { model, promptLength: prompt.length, maxTokens, temperature, topP, presencePenalty, frequencyPenalty }); try { const openai2 = createOpenAI({ apiKey }); const generateConfig = { model: openai2(model), prompt: prompt.trim(), maxTokens, temperature }; if (topP !== void 0) { generateConfig.topP = topP; } if (presencePenalty !== void 0) { generateConfig.presencePenalty = presencePenalty; } if (frequencyPenalty !== void 0) { generateConfig.frequencyPenalty = frequencyPenalty; } const { text, usage, finishReason } = await generateText(generateConfig); console.log("\u2705 OpenAI Tool - Generation completed:", { model, tokensUsed: usage?.totalTokens || 0, responseLength: text.length, finishReason }); return { success: true, text, model, usage: { promptTokens: usage?.promptTokens || 0, completionTokens: usage?.completionTokens || 0, totalTokens: usage?.totalTokens || 0 }, finishReason, parameters: { temperature, maxTokens, topP, presencePenalty, frequencyPenalty }, metadata: { generatedAt: (/* @__PURE__ */ new Date()).toISOString(), promptLength: prompt.length, responseLength: text.length } }; } catch (error) { console.error("\u274C OpenAI Tool - Generation failed:", { model, promptLength: prompt.length, error: error instanceof Error ? error.message : JSON.stringify(error) }); if (error instanceof Error) { if (error.message.includes("rate limit") || error.message.includes("429")) { throw new Error("OpenAI API rate limit exceeded. Please try again in a moment."); } if (error.message.includes("401") || error.message.includes("authentication")) { throw new Error("OpenAI API authentication failed. Please check your API key."); } if (error.message.includes("token") && error.message.includes("limit")) { throw new Error(`Token limit exceeded. Try reducing maxTokens or prompt length.`); } if (error.message.includes("model") && error.message.includes("not found")) { throw new Error(`Invalid model "${model}". Please use a supported OpenAI model.`); } if (error.message.includes("network") || error.message.includes("timeout")) { throw new Error("Network error connecting to OpenAI API. Please try again."); } } throw new Error(`OpenAI text generation failed: ${error instanceof Error ? error.message : JSON.stringify(error)}`); } } }); var toolDetails = { toolId: "openai_text_generation", name: "OpenAI Text Generation", useCases: [ "Generate creative content and stories", "Answer questions and provide explanations", "Summarize text and documents", "Write code and technical documentation", "Translate text between languages", "Proofread and edit content", "Generate marketing copy and descriptions", "Create blog posts and articles", "Brainstorm ideas and concepts", "Generate product descriptions", "Write emails and communications" ], logo: "https://www.openagentic.org/tools/openai.svg" }; var openaiTool = toOpenAgenticTool(rawOpenAITool, toolDetails); // src/tools/openai-image.ts import { tool as tool3, experimental_generateImage as generateImage } from "ai"; import { z as z4 } from "zod"; // src/utils/s3.ts import { S3Client, PutObjectCommand } from "@aws-sdk/client-s3"; import { randomUUID } from "crypto"; var DIRECTORY_PREFIX = "openagentic/"; var S3Directory = ((S3Directory2) => { S3Directory2["IMAGES"] = DIRECTORY_PREFIX + "images"; S3Directory2["AUDIO"] = DIRECTORY_PREFIX + "audio"; S3Directory2["VIDEOS"] = DIRECTORY_PREFIX + "videos"; S3Directory2["DOCUMENTS"] = DIRECTORY_PREFIX + "documents"; S3Directory2["WEBSITES"] = DIRECTORY_PREFIX + "websites"; S3Directory2["UPLOADS"] = DIRECTORY_PREFIX + "uploads"; return S3Directory2; })(S3Directory || {}); var s3Client = null; var s3Config = null; function validateS3Config() { const awsCredentials = ProviderManager.getAwsCredentials(); const requiredFields = [ { field: "accessKeyId", value: awsCredentials.accessKeyId }, { field: "secretAccessKey", value: awsCredentials.secretAccessKey }, { field: "region", value: awsCredentials.region }, { field: "bucketName", value: awsCredentials.bucketName } ]; const missing = requiredFields.filter(({ value }) => !value).map(({ field }) => field); if (missing.length > 0) { throw new Error( `Missing required AWS configuration: ${missing.join(", ")} Please ensure all AWS S3 configuration is provided via apiKeys or environment variables: - awsAccessKeyId (or AWS_ACCESS_KEY_ID) - awsSecretAccessKey (or AWS_SECRET_ACCESS_KEY) - awsRegion (or AWS_REGION) - awsS3Bucket (or S3_BUCKET_NAME)` ); } s3Config = { accessKeyId: awsCredentials.accessKeyId, secretAccessKey: awsCredentials.secretAccessKey, region: awsCredentials.region, bucketName: awsCredentials.bucketName }; s3Client = new S3Client({ region: s3Config.region, credentials: { accessKeyId: s3Config.accessKeyId, secretAccessKey: s3Config.secretAccessKey } }); } function getS3Client() { if (!s3Client || !s3Config) { validateS3Config(); } return s3Client; } function getS3Config() { if (!s3Config) { validateS3Config(); } return s3Config; } function sanitizeFilename(input) { if (!input || typeof input !== "string") { throw new Error("Filename must be a non-empty string"); } return input.trim().replace(/[<>:"/\\|?*\x00-\x1f]/g, "").replace(/\s+/g, "_").replace(/_+/g, "_").replace(/^_+|_+$/g, "").substring(0, 200) || "unnamed_file"; } function generateFileName(prompt, extension, prefix) { if (!prompt || !extension) { throw new Error("Prompt and extension are required"); } const timestamp = (/* @__PURE__ */ new Date()).toISOString().replace(/[:.]/g, "-").replace(/T/, "_").replace(/Z/, ""); const sanitizedPrompt = sanitizeFilename(prompt).toLowerCase().substring(0, 50); const cleanExtension = extension.startsWith(".") ? extension : `.${extension}`; const prefixPart = prefix ? `${sanitizeFilename(prefix)}_` : ""; const randomSuffix = Math.random().toString(36).substring(2, 8); return `${prefixPart}${sanitizedPrompt}_${timestamp}_${randomSuffix}${cleanExtension}`; } function generateImageFileName(prompt, extension = "png") { return generateFileName(prompt, extension, "img"); } function generateAudioFileName(prompt, extension = "mp3") { return generateFileName(prompt, extension, "audio"); } function generateVideoFileName(prompt, extension = "mp4") { return generateFileName(prompt, extension, "video"); } function generateHtmlFileName(prompt, extension = "html") { return generateFileName(prompt, extension, "page"); } function getContentTypeFromExtension(extension) { const cleanExt = extension.toLowerCase().replace(".", ""); const contentTypes = { // Images "jpg": "image/jpeg", "jpeg": "image/jpeg", "png": "image/png", "gif": "image/gif", "webp": "image/webp", "svg": "image/svg+xml", "bmp": "image/bmp", "ico": "image/x-icon", // Audio "mp3": "audio/mpeg", "wav": "audio/wav", "ogg": "audio/ogg", "flac": "audio/flac", "aac": "audio/aac", "m4a": "audio/mp4", // Video "mp4": "video/mp4", "avi": "video/x-msvideo", "mov": "video/quicktime", "wmv": "video/x-ms-wmv", "flv": "video/x-flv", "webm": "video/webm", "mkv": "video/x-matroska", // Documents "pdf": "application/pdf", "doc": "application/msword", "docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", "xls": "application/vnd.ms-excel", "xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", "ppt": "application/vnd.ms-powerpoint", "pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", "txt": "text/plain", "rtf": "application/rtf", // Web "html": "text/html", "htm": "text/html", "css": "text/css", "js": "application/javascript", "json": "application/json", "xml": "application/xml", // Archives "zip": "application/zip", "tar": "application/x-tar", "gz": "application/gzip", "rar": "application/vnd.rar", "7z": "application/x-7z-compressed", // Other "bin": "application/octet-stream" }; return contentTypes[cleanExt] || "application/octet-stream"; } function validateFileSize(size, fileType) { const sizeLimits = { ["image" /* IMAGE */]: 50 * 1024 * 1024, // 50MB ["audio" /* AUDIO */]: 100 * 1024 * 1024, // 100MB ["video" /* VIDEO */]: 500 * 1024 * 1024, // 500MB ["document" /* DOCUMENT */]: 25 * 1024 * 1024, // 25MB ["website" /* WEBSITE */]: 10 * 1024 * 1024, // 10MB ["upload" /* GENERIC */]: 100 * 1024 * 1024 // 100MB }; const limit = sizeLimits[fileType]; if (size > limit) { const limitMB = Math.round(limit / (1024 * 1024)); const sizeMB = Math.round(size / (1024 * 1024)); throw new Error( `File size (${sizeMB}MB) exceeds limit for ${fileType} files (${limitMB}MB)` ); } } async function uploadFileToS3(buffer, fileName, contentType, directory = S3Directory.UPLOADS, description) { try { if (!Buffer.isBuffer(buffer) || buffer.length === 0) { throw new Error("Invalid or empty buffer provided"); } if (!fileName || typeof fileName !== "string") { throw new Error("Valid filename is required"); } if (!contentType || typeof contentType !== "string") { throw new Error("Valid content type is required"); } validateFileSize(buffer.length, "upload" /* GENERIC */); const uuid = randomUUID(); fileName = uuid; const sanitizedFileName = sanitizeFilename(fileName); const key = `${directory}/${sanitizedFileName}`; const config = getS3Config(); const client = getS3Client(); const uploadParams = { Bucket: config.bucketName, Key: key, Body: buffer, ContentType: contentType, CacheControl: "public, max-age=31536000", // 1 year cache Metadata: { "upload-timestamp": (/* @__PURE__ */ new Date()).toISOString(), "original-name": fileName, "file-size": buffer.length.toString(), ...description && { description } } }; console.log(`Uploading file to S3: ${key} (${buffer.length} bytes)`); const command = new PutObjectCommand(uploadParams); await client.send(command); const publicUrl = `https://${config.bucketName}.s3.${config.region}.amazonaws.com/${key}`; console.log(`\u2705 File uploaded successfully: ${publicUrl}`); return publicUrl; } catch (error) { console.error("\u274C S3 upload failed:", error); throw new Error( `S3 upload failed: ${error instanceof Error ? error.message : String(error)}` ); } } async function uploadImageToS3(imageBuffer, fileName, contentType, description) { try { if (!Buffer.isBuffer(imageBuffer) || imageBuffer.length === 0) { throw new Error("Invalid or empty image buffer"); } validateFileSize(imageBuffer.length, "image" /* IMAGE */); let finalContentType = contentType; if (!finalContentType) { const extension = fileName.split(".").pop()?.toLowerCase() || ""; finalContentType = getContentTypeFromExtension(extension); if (!finalContentType.startsWith("image/")) { finalContentType = "image/jpeg"; } } console.log(`Uploading image: ${fileName} (${imageBuffer.length} bytes)`); return await uploadFileToS3( imageBuffer, fileName, finalContentType, S3Directory.IMAGES, "Image upload" ); } catch (error) { console.error("\u274C Image upload failed:", error); throw new Error( `Image upload failed: ${error instanceof Error ? error.message : String(error)}` ); } } async function uploadAudioToS3(audioBuffer, fileName, contentType, description) { try { if (!Buffer.isBuffer(audioBuffer) || audioBuffer.length === 0) { throw new Error("Invalid or empty audio buffer"); } validateFileSize(audioBuffer.length, "audio" /* AUDIO */); let finalContentType = contentType; if (!finalContentType) { const extension = fileName.split(".").pop()?.toLowerCase() || ""; finalContentType = getContentTypeFromExtension(extension); if (!finalContentType.startsWith("audio/")) { finalContentType = "audio/mpeg"; } } console.log(`Uploading audio: ${fileName} (${audioBuffer.length} bytes)`); return await uploadFileToS3( audioBuffer, fileName, finalContentType, S3Directory.AUDIO, "Audio upload" ); } catch (error) { console.error("\u274C Audio upload failed:", error); throw new Error( `Audio upload failed: ${error instanceof Error ? error.message : String(error)}` ); } } async function uploadVideoToS3(videoBuffer, fileName, contentType, description) { try { if (!Buffer.isBuffer(videoBuffer) || videoBuffer.length === 0) { throw new Error("Invalid or empty video buffer"); } validateFileSize(videoBuffer.length, "video" /* VIDEO */); let finalContentType = contentType; if (!finalContentType) { const extension = fileName.split(".").pop()?.toLowerCase() || ""; finalContentType = getContentTypeFromExtension(extension); if (!finalContentType.startsWith("video/")) { finalContentType = "video/mp4"; } } console.log(`Uploading video: ${fileName} (${videoBuffer.length} bytes)`); return await uploadFileToS3( videoBuffer, fileName, finalContentType, S3Directory.VIDEOS, "Video upload" ); } catch (error) { console.error("\u274C Video upload failed:", error); throw new Error( `Video upload failed: ${error instanceof Error ? error.message : String(error)}` ); } } async function uploadHtmlToS3(htmlContent, fileName, contentType = "text/html", description) { try { if (!htmlContent || typeof htmlContent !== "string") { throw new Error("Valid HTML content is required"); } if (htmlContent.trim().length === 0) { throw new Error("HTML content cannot be empty"); } const htmlBuffer = Buffer.from(htmlContent, "utf-8"); validateFileSize(htmlBuffer.length, "website" /* WEBSITE */); let finalFileName = fileName; if (!fileName.toLowerCase().endsWith(".html") && !fileName.toLowerCase().endsWith(".htm")) { finalFileName = `${fileName}.html`; } console.log(`Uploading HTML: ${finalFileName} (${htmlBuffer.length} bytes)`); return await uploadFileToS3( htmlBuffer, finalFileName, contentType, S3Directory.WEBSITES, "HTML website upload" ); } catch (error) { console.error("\u274C HTML upload failed:", error); throw new Error( `HTML upload failed: ${error instanceof Error ? error.message : String(error)}` ); } } // src/tools/openai-image.ts import { openai } from "@ai-sdk/openai"; var SUPPORTED_MODELS2 = [ // 'dall-e-3', // 'dall-e-2', "gpt-image-1" ]; var MODEL_SIZES = { // 'dall-e-3': ['1024x1024', '1024x1792', '1792x1024'], // 'dall-e-2': ['256x256', '512x512', '1024x1024'], "gpt-image-1": ["1024x1024", "1536x1024", "1024x1536"] }; var MODEL_QUALITY = { // 'dall-e-3': 'standard', // 'dall-e-2': 'standard', "gpt-image-1": "high" }; var rawOpenAIImageTool = tool3({ description: "Generate images using OpenAI models with automatic S3 upload and storage", parameters: z4.object({ prompt: z4.string().min(1).max(4e3).describe("The text prompt to generate an image from (required, max 4000 characters)"), model: z4.string().optional().default("gpt-image-1").describe("The model to use (gpt-image-1)"), size: z4.string().optional().default("1024x1024").describe("The size of the image - 1024x1024, 1536x1024, 1024x1536") // quality: z.string() // .optional() // .default('standard') // .describe('The quality of the image (auto, high, standard, hd) - DALL-E 3 only, default: high'), // style: z.string() // .optional() // .default('vivid') // .describe('The style of the image (vivid, natural) - DALL-E 3 only, default: vivid'), }), execute: async ({ prompt, model = "gpt-image-1", size = "1024x1024" // quality = 'high', // style = 'vivid' }) => { const apiKey = process.env.OPENAI_API_KEY; if (!apiKey) { throw new Error("OPENAI_API_KEY environment variable is required"); } if (!prompt || prompt.trim().length === 0) { throw new Error("Prompt cannot be empty"); } if (prompt.length > 4e3) { throw new Error("Prompt exceeds maximum length of 4000 characters"); } if (!SUPPORTED_MODELS2.includes(model)) { throw new Error(`Model "${model}" not in supported list`); } const validSizes = MODEL_SIZES[model] || MODEL_SIZES["gpt-image-1"]; if (!validSizes.includes(size)) { throw new Error(`Invalid size "${size}" for model "${model}". Supported sizes: ${validSizes.join(", ")}`); } console.log("\u{1F3A8} OpenAI Image Generation Tool - Generation started:", { timestamp: (/* @__PURE__ */ new Date()).toISOString(), prompt: prompt.substring(0, 100) + (prompt.length > 100 ? "..." : ""), promptLength: prompt.length, model, size, quality: MODEL_QUALITY[model] // style, }); try { const { image } = await generateImage({ model: openai.image(model), prompt: prompt.trim(), providerOptions: { openai: { quality: MODEL_QUALITY[model] } }, size, n: 1 // Generate one image }); if (!image) { throw new Error("Invalid response structure from OpenAI Images API"); } const generatedImageBase64 = image.base64; if (!generatedImageBase64) { throw new Error("No base64 image data received from OpenAI Images API"); } const imageBuffer = Buffer.from(generatedImageBase64, "base64"); const fileName = generateImageFileName(prompt, "png"); console.log("\u{1F4E4} Uploading generated image to S3..."); const imageUrl = await uploadImageToS3( imageBuffer, fileName, "image/png", `OpenAI ${model} generated image` ); console.log("\u2705 OpenAI Image Generation Tool - Generation completed:", { model, size, quality: MODEL_QUALITY[model], imageUrl, fileName, imageSize: imageBuffer.length }); return { success: true, imageUrl, fileName, model, size, quality: MODEL_QUALITY[model], originalPrompt: prompt.trim(), metadata: { generatedAt: (/* @__PURE__ */ new Date()).toISOString(), promptLength: prompt.length, fileSize: imageBuffer.length, uploadedToS3: true } }; } catch (error) { console.error("\u274C OpenAI Image Generation Tool - Generation failed:", { model, size, quality: MODEL_QUALITY[model], promptLength: prompt.length, error: error instanceof Error ? error.message : JSON.stringify(error) }); if (error instanceof Error) { if (error.message.includes("rate limit") || error.message.includes("429")) { throw new Error("OpenAI API rate limit exceeded. Please try again in a moment."); } if (error.message.includes("401") || error.message.includes("authentication")) { throw new Error("OpenAI API authentication failed. Please check your API key."); } if (error.message.includes("model") && error.message.includes("not found")) { throw new Error(`Invalid model "${model}". Please use a supported DALL-E model.`); } if (error.message.includes("content policy") || error.message.includes("safety")) { throw new Error("Image generation request violates OpenAI content policy. Please modify your prompt."); } if (error.message.includes("prompt") && error.message.includes("too long")) { throw new Error("Prompt is too long. Please reduce the prompt length and try again."); } if (error.message.includes("size") || error.message.includes("dimensions")) { throw new Error(`Invalid image size "${size}" for model "${model}". Please use a supported size.`); } if (error.message.includes("quality") || error.message.includes("style")) { throw new Error("Invalid quality or style parameter. Please check the supported values for your model."); } if (error.message.includes("network") || error.message.includes("timeout") || error.message.includes("ECONNREFUSED") || error.message.includes("ETIMEDOUT")) { throw new Error("Network error connecting to OpenAI API. Please try again."); } if (error.message.includes("S3") || error.message.includes("upload")) { throw new Error("Failed to upload generated image to S3. Please check your S3 configuration."); } if (error.message.includes("base64") || error.message.includes("buffer")) { throw new Error("Failed to process generated image data. Please try again."); } if (error.message.includes("502") || error.message.includes("503") || error.message.includes("504")) { throw new Error("OpenAI service temporarily unavailable. Please try again later."); } } throw new Error(`OpenAI image generation failed: ${error instanceof Error ? error.message : JSON.stringify(error)}`); } } }); var toolDetails2 = { toolId: "openai_image_generator", name: "OpenAI Image Generator", useCases: [ "Generate photorealistic images from text descriptions", "Create artistic illustrations and digital art", "Design logos and brand imagery", "Generate product mockups and prototypes", "Create concept art for creative projects", "Generate marketing visuals and advertisements", "Create custom artwork for presentations", "Generate book covers and poster designs", "Create social media content and graphics", "Generate architectural and interior design concepts", "Create character designs and illustrations", "Generate landscape and nature imagery" ], logo: "https://www.openagentic.org/tools/openai.svg" }; var openaiImageTool = toOpenAgenticTool(rawOpenAIImageTool, toolDetails2); // src/tools/openai-vector-store.ts import { tool as tool4 } from "ai"; import { z as z5 } from "zod"; import "@ai-sdk/openai"; var FilterSchema = z5.object({ type: z5.enum(["eq", "ne", "gt", "gte", "lt", "lte", "and", "or"]), key: z5.string().optional(), value: z5.union([z5.string(), z5.number(), z5.boolean()]).optional(), filters: z5.array(z5.lazy(() => FilterSchema)).optional() }); var rawOpenAIVectorStoreTool = tool4({ description: "Search through OpenAI vector stores for relevant document chunks using semantic search with optional metadata filtering", parameters: z5.object({ vectorStoreId: z5.string().min(1).describe("The ID of the OpenAI vector store to search (required, format: vs_abc123)"), query: z5.string().min(1).max(1e4).describe("The search query to find relevant documents (required, max 10,000 characters)"), maxNumResults: z5.number().int().min(1).max(50).optional().default(10).describe("Maximum number of search results to return (1-50, default: 10)"), filters: FilterSchema.optional().describe("Optional metadata filters to narrow search results (based on file attributes)"), rewriteQuery: z5.boolean().optional().default(false).describe("Whether to rewrite the natural language query for better vector search (default: false)"), rankingOptions: z5.object({ ranker: z5.enum(["auto", "default-2024-11-15"]).optional().default("auto"), scoreThreshold: z5.number().min(0).max(1).optional() }).optional().describe("Optional ranking configuration to fine-tune search results") }), execute: async ({ vectorStoreId, query, maxNumResults = 10, filters, rewriteQuery = false, rankingOptions }) => { const apiKey = process.env.OPENAI_API_KEY; if (!apiKey) { throw new Error("OPENAI_API_KEY environment variable is required"); } if (!vectorStoreId || vectorStoreId.trim().length === 0) { throw new Error("Vector store ID cannot be empty"); } if (!query || query.trim().length === 0) { throw new Error("Query cannot be empty"); } if (query.length > 1e4) { throw new Error("Query exceeds maximum length of 10,000 characters"); } if (!vectorStoreId.startsWith("vs_")) { console.warn('\u26A0\uFE0F Vector store ID should typically start with "vs_"'); } console.log("\u{1F50D} OpenAI Vector Store Tool - Search started:", { vectorStoreId, queryLength: query.length, maxNumResults, hasFilters: !!filters, rewriteQuery, hasRankingOptions: !!rankingOptions }); try { const searchBody = { query: query.trim(), max_num_results: maxNumResults, rewrite_query: rewriteQuery }; if (filters) { searchBody.filters = filters; } if (rankingOptions) { searchBody.ranking_options = rankingOptions; } const response = await fetch( `https://api.openai.com/v1/vector_stores/${vectorStoreId}/search`, { method: "POST", headers: { "Authorization": `Bearer ${apiKey}`, "Content-Type": "application/json" }, body: JSON.stringify(searchBody) } ); if (!response.ok) { const errorData = await response.text(); console.error("\u274C OpenAI Vector Store API error:", { status: response.status, statusText: response.statusText, body: errorData }); if (response.status === 401) { throw new Error("OpenAI API authentication failed. Please check your API key."); } if (response.status === 404) { throw new Error(`Vector store "${vectorStoreId}" not found. Please check the vector store ID.`); } if (response.status === 429) { throw new Error("OpenAI API rate limit exceeded. Please try again in a moment."); } if (response.status === 400) { throw new Error(`Bad request to OpenAI Vector Store API: ${errorData}`); } throw new Error(`OpenAI Vector Store API error (${response.status}): ${response.statusText}`); } const searchResults = await response.json(); console.log("\u2705 OpenAI Vector Store Tool - Search completed:", { vectorStoreId, resultsCount: searchResults.data?.length || 0, hasMore: searchResults.has_more || false, queryLength: query.length }); const results = searchResults.data || []; const processedResults = results.map((result) => ({ fileId: result.file_id, filename: result.filename, score: result.score, attributes: result.attributes || {}, content: result.content?.map((c) => c.text).join(" ") || "" })); return { success: true, query: searchResults.search_query || query, results: processedResults, totalResults: results.length, hasMore: searchResults.has_more || false, nextPage: searchResults.next_page || null, vectorStoreId, searchParameters: { maxNumResults, rewriteQuery, filtersApplied: !!filters, rankingOptionsApplied: !!rankingOptions }, metadata: { searchedAt: (/* @__PURE__ */ new Date()).toISOString(), queryLength: query.length, resultsReturned: results.length } }; } catch (error) { console.error("\u274C OpenAI Vector Store Tool - Search failed:", { vectorStoreId, queryLength: query.length, error: error instanceof Error ? error.message : JSON.stringify(error) }); if (error instanceof TypeError && error.message.includes("fetch")) { throw new Error("Network error connecting to OpenAI API. Please check your internet connection."); } if (error instanceof Error) { throw error; } throw new Error(`Vector store search failed: ${JSON.stringify(error)}`); } } }); var openaiVectorStoreTool = toOpenAgenticTool(rawOpenAIVectorStoreTool, { toolId: "openai_vector_store_search", name: "OpenAI Vector Store Search", useCases: [ "Search for relevant documents in OpenAI vector stores", "Retrieve contextual information for RAG applications", "Query knowledge bases with semantic search", "Filter documents by metadata attributes", "Build custom retrieval systems with OpenAI embeddings" ], logo: "https://www.openagentic.org/tools/openai.svg" }); // src/tools/gemini-image.ts import { tool as tool5 } from "ai"; import { z as z6 } from "zod"; import { generateText as generateText2 } from "ai"; import { createGoogleGenerativeAI } from "@ai-sdk/google"; var SUPPORTED_MODELS3 = [ "gemini-2.0-flash-preview-image-generation" // 'gemini-2.0-flash-thinking-exp', ]; var IMAGE_STYLES = [ "photorealistic", "artistic", "cartoon", "sketch", "digital-art", "oil-painting", "watercolor", "minimalist", "abstract", "cinematic" ]; var rawGeminiImageTool = tool5({ description: "Generate high-quality images using Google Gemini models with automatic S3 upload and storage. Supports both text prompts and messages with images for reference-based generation.", parameters: z6.object({ // Support either a string prompt OR message array prompt: z6.string().min(1).max(4e3).optional().describe("The text prompt to generate an image from (max 4000 characters). Use this OR messages, not both."), messages: z6.array(z6.object({ role: z6.enum(["system", "user", "assistant", "tool"]), content: z6.union([ z6.string(), z6.array(z6.object({ type: z6.string(), text: z6.string().optional(), image: z6.union([z6.string(), z6.any()]).optional(), mimeType: z6.string().optional() })) ]) })).optional().describe("Array of messages that may contain text and images. Use this OR prompt, not both."), model: z6.string().optional().default("gemini-2.0-flash-preview-image-generation").describe("The Gemini model to use (gemini-2.0-flash-preview-image-generation)"), style: z6.enum(IMAGE_STYLES).optional().describe("The style of the image (photorealistic, artistic, cartoon, sketch, digital-art, oil-painting, watercolor, minimalist, abstract, cinematic)"), aspectRatio: z6.enum(["1:1", "16:9", "9:16", "4:3", "3:4"]).optional().default("1:1").describe("The aspect ratio of the image (default: 1:1)"), quality: z6.enum(["s