@juspay/neurolink
Version:
Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and
360 lines (355 loc) • 14.2 kB
JavaScript
/**
* NeuroLink AI Analysis Tools
* AI-focused MCP tools for usage analysis, performance benchmarking, and parameter optimization
* Tools: analyze-ai-usage, benchmark-provider-performance, optimize-prompt-parameters
*/
import { z } from "zod";
import { AIProviderFactory } from "../../../core/factory.js";
import { getBestProvider, getAvailableProviders, } from "../../../utils/providerUtils.js";
import { logger } from "../../../utils/logger.js";
/**
* Input Schemas for AI Analysis Tools
*/
const AnalyzeUsageSchema = z.object({
sessionId: z.string().optional(),
timeRange: z.enum(["1h", "24h", "7d", "30d"]).default("24h"),
provider: z
.enum([
"openai",
"bedrock",
"vertex",
"anthropic",
"google-ai",
"azure",
"huggingface",
"ollama",
"mistral",
])
.optional(),
includeTokenBreakdown: z.boolean().default(true),
includeCostEstimation: z.boolean().default(true),
});
const BenchmarkSchema = z.object({
providers: z
.array(z.enum([
"openai",
"bedrock",
"vertex",
"anthropic",
"google-ai",
"azure",
"huggingface",
"ollama",
"mistral",
]))
.optional(),
testPrompts: z.array(z.string()).optional(),
iterations: z.number().min(1).max(5).default(2),
metrics: z
.array(z.enum(["latency", "quality", "cost", "tokens"]))
.default(["latency", "quality"]),
maxTokens: z.number().positive().default(100),
});
const OptimizeParametersSchema = z.object({
prompt: z.string().min(1, "Prompt is required for optimization"),
provider: z
.enum([
"openai",
"bedrock",
"vertex",
"anthropic",
"google-ai",
"azure",
"huggingface",
"ollama",
"mistral",
])
.optional(),
targetLength: z.number().positive().optional(),
style: z
.enum(["creative", "balanced", "precise", "factual"])
.default("balanced"),
optimizeFor: z
.enum(["speed", "quality", "cost", "tokens"])
.default("quality"),
iterations: z.number().min(1).max(3).default(2),
});
/**
* AI Usage Analysis Tool
* Analyzes AI usage patterns, token consumption, and cost optimization opportunities
*/
const _analyzeAIUsageTool = {
name: "analyze-ai-usage",
description: "Analyze AI usage patterns, token consumption, and cost optimization opportunities",
category: "ai-analysis",
inputSchema: AnalyzeUsageSchema,
isImplemented: true,
permissions: ["read", "analytics"],
version: "1.2.0", // Updated version with real AI
execute: async (params, context) => {
const typedParams = params;
const startTime = Date.now();
try {
logger.debug(`[AI-Analysis] Starting real AI-powered usage analysis for timeRange: ${typedParams.timeRange}`);
const providerName = await getBestProvider();
const provider = await AIProviderFactory.createProvider(providerName);
if (!provider) {
throw new Error(`Failed to create AI provider: ${providerName}`);
}
const analysisPrompt = `
Analyze hypothetical AI usage data for a project based on the following parameters.
Time Range: ${typedParams.timeRange}
Provider Focus: ${typedParams.provider || "all"}
Generate a realistic analysis including:
1. A summary of usage statistics (totalRequests, totalTokens).
2. A breakdown of usage by provider (OpenAI, Bedrock, Vertex, Google AI, Anthropic, Azure, Hugging Face, Ollama, Mistral).
3. Key insights and actionable recommendations for cost and performance optimization.
Return the result as a valid JSON object with keys: "analysis", "insights".
- "analysis" should contain: timeRange, totalRequests, totalTokens, and a "providers" object.
- "insights" should contain: mostUsedProvider, avgCostPerToken, peakUsageHours, costOptimizationPotential, and an array of "recommendations".
`;
const result = await provider.generate({
prompt: analysisPrompt,
maxTokens: 800,
temperature: 0.5,
});
if (!result || !result.content) {
throw new Error("AI provider returned no result for usage analysis.");
}
const parsedData = JSON.parse(result.content);
const executionTime = Date.now() - startTime;
return {
success: true,
data: {
...parsedData,
generatedAt: new Date().toISOString(),
sessionId: context.sessionId,
},
usage: {
...result.usage,
executionTime,
provider: providerName,
model: "analysis-engine",
},
metadata: {
toolName: "analyze-ai-usage",
serverId: "neurolink-ai-core",
sessionId: context.sessionId,
timestamp: Date.now(),
executionTime,
},
};
}
catch (error) {
const executionTime = Date.now() - startTime;
const errorMessage = error instanceof Error ? error.message : String(error);
return {
success: false,
error: errorMessage,
metadata: {
toolName: "analyze-ai-usage",
serverId: "neurolink-ai-core",
sessionId: context.sessionId,
timestamp: Date.now(),
executionTime,
},
};
}
},
};
/**
* Provider Performance Benchmarking Tool
* Benchmarks AI provider performance across latency, quality, and cost metrics
*/
const _benchmarkProviderPerformanceTool = {
name: "benchmark-provider-performance",
description: "Benchmark AI provider performance across latency, quality, and cost metrics",
category: "ai-analysis",
inputSchema: BenchmarkSchema,
isImplemented: true,
permissions: ["read", "benchmark"],
version: "1.1.0", // Updated version with real AI
execute: async (params, context) => {
const typedParams = params;
const startTime = Date.now();
try {
const providersToTest = typedParams.providers || getAvailableProviders();
const testPrompts = typedParams.testPrompts || [
"Explain quantum computing in simple terms",
];
const benchmarkResults = [];
for (const providerName of providersToTest) {
const provider = await AIProviderFactory.createProvider(providerName);
if (!provider) {
benchmarkResults.push({
provider: providerName,
error: "Failed to create provider.",
});
continue;
}
let totalLatency = 0, totalTokens = 0, successfulTests = 0;
for (const prompt of testPrompts) {
for (let i = 0; i < typedParams.iterations; i++) {
const testStartTime = Date.now();
const result = await provider.generate({
prompt: prompt,
maxTokens: typedParams.maxTokens,
});
if (result && result.usage) {
totalLatency += Date.now() - testStartTime;
totalTokens += result.usage.total || 0;
successfulTests++;
}
}
}
benchmarkResults.push({
provider: providerName,
metrics: {
avgLatency: successfulTests > 0
? Math.round(totalLatency / successfulTests)
: 0,
total: totalTokens,
successRate: (successfulTests /
(testPrompts.length * typedParams.iterations)) *
100,
},
});
}
const executionTime = Date.now() - startTime;
return {
success: true,
data: {
results: benchmarkResults,
benchmarkedAt: new Date().toISOString(),
},
usage: {
executionTime,
provider: "benchmark-engine",
model: "multi-provider",
},
metadata: {
toolName: "benchmark-provider-performance",
serverId: "neurolink-ai-core",
sessionId: context.sessionId,
timestamp: Date.now(),
executionTime,
},
};
}
catch (error) {
const executionTime = Date.now() - startTime;
const errorMessage = error instanceof Error ? error.message : String(error);
return {
success: false,
error: errorMessage,
metadata: {
toolName: "benchmark-provider-performance",
serverId: "neurolink-ai-core",
sessionId: context.sessionId,
timestamp: Date.now(),
executionTime,
},
};
}
},
};
/**
* Prompt Parameter Optimization Tool
* Optimizes prompt parameters (temperature, max tokens) for better AI output quality and efficiency
*/
const _optimizePromptParametersTool = {
name: "optimize-prompt-parameters",
description: "Optimize prompt parameters (temperature, max tokens) for better AI output quality and efficiency",
category: "ai-optimization",
inputSchema: OptimizeParametersSchema,
isImplemented: true,
permissions: ["read", "optimize"],
version: "1.1.0", // Updated version with real AI
execute: async (params, context) => {
const typedParams = params;
const startTime = Date.now();
try {
const providerName = typedParams.provider || (await getBestProvider());
const provider = await AIProviderFactory.createProvider(providerName);
if (!provider) {
throw new Error(`Failed to create provider: ${providerName}`);
}
const optimizationResults = [];
const temperatures = [0.2, 0.7, 1.0]; // Test a range of temperatures
for (const temp of temperatures) {
const result = await provider.generate({
prompt: typedParams.prompt,
temperature: temp,
maxTokens: typedParams.targetLength || 250,
});
if (result) {
optimizationResults.push({
parameters: { temperature: temp },
output: result.content,
usage: result.usage,
});
}
}
const analysisProvider = await AIProviderFactory.createProvider(await getBestProvider());
if (!analysisProvider) {
throw new Error("Failed to create analysis provider.");
}
const analysisPrompt = `
Analyze the following AI-generated responses for the prompt "${typedParams.prompt}" based on the optimization goal of "${typedParams.optimizeFor}".
Responses:
${optimizationResults.map((r, i) => `Response ${i + 1} (Temp: ${r.parameters.temperature}):\n${r.output}`).join("\n\n")}
Determine which set of parameters is optimal and provide a recommendation.
Return a valid JSON object with keys: "optimalParameters", "reasoning", "recommendations".
`;
const analysisResult = await analysisProvider.generate({
prompt: analysisPrompt,
maxTokens: 500,
});
if (!analysisResult || !analysisResult.content) {
throw new Error("Optimization analysis failed.");
}
const parsedAnalysis = JSON.parse(analysisResult.content);
const executionTime = Date.now() - startTime;
return {
success: true,
data: {
optimization: {
originalPrompt: typedParams.prompt,
optimizeFor: typedParams.optimizeFor,
provider: providerName,
},
results: optimizationResults,
recommendations: parsedAnalysis,
optimizedAt: new Date().toISOString(),
},
usage: {
executionTime,
provider: "optimization-engine",
model: "multi-provider",
},
metadata: {
toolName: "optimize-prompt-parameters",
serverId: "neurolink-ai-core",
sessionId: context.sessionId,
timestamp: Date.now(),
executionTime,
},
};
}
catch (error) {
const executionTime = Date.now() - startTime;
const errorMessage = error instanceof Error ? error.message : String(error);
return {
success: false,
error: errorMessage,
metadata: {
toolName: "optimize-prompt-parameters",
serverId: "neurolink-ai-core",
sessionId: context.sessionId,
timestamp: Date.now(),
executionTime,
},
};
}
},
};