@juspay/neurolink
Version:
Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and
1,191 lines (1,190 loc) ⢠58 kB
JavaScript
import { NeuroLink } from "../../lib/neurolink.js";
import { configManager } from "../commands/config.js";
import { ContextFactory, } from "../../lib/types/contextTypes.js";
import { ModelsCommandFactory } from "../commands/models.js";
import { MCPCommandFactory } from "../commands/mcp.js";
import { OllamaCommandFactory } from "./ollamaCommandFactory.js";
import { SageMakerCommandFactory } from "./sagemakerCommandFactory.js";
import ora from "ora";
import chalk from "chalk";
import { logger } from "../../lib/utils/logger.js";
import fs from "fs";
/**
* CLI Command Factory for generate commands
*/
export class CLICommandFactory {
// Common options available on all commands
static commonOptions = {
// Core generation options
provider: {
choices: [
"auto",
"openai",
"openai-compatible",
"bedrock",
"vertex",
"googleVertex",
"anthropic",
"azure",
"google-ai",
"huggingface",
"ollama",
"mistral",
"litellm",
"sagemaker",
],
default: "auto",
description: "AI provider to use (auto-selects best available)",
alias: "p",
},
model: {
type: "string",
description: "Specific model to use (e.g. gemini-2.5-pro, gemini-2.5-flash)",
alias: "m",
},
temperature: {
type: "number",
default: 0.7,
description: "Creativity level (0.0 = focused, 1.0 = creative)",
alias: "t",
},
maxTokens: {
type: "number",
default: 1000,
description: "Maximum tokens to generate",
alias: "max",
},
system: {
type: "string",
description: "System prompt to guide AI behavior",
alias: "s",
},
// Output control options
format: {
choices: ["text", "json", "table"],
default: "text",
alias: ["f", "output-format"],
description: "Output format",
},
output: {
type: "string",
description: "Save output to file",
alias: "o",
},
// Behavior control options
timeout: {
type: "number",
default: 120,
description: "Maximum execution time in seconds",
},
delay: {
type: "number",
description: "Delay between operations (ms)",
},
// Tools & features options
disableTools: {
type: "boolean",
default: false,
description: "Disable MCP tool integration (tools enabled by default)",
},
enableAnalytics: {
type: "boolean",
default: false,
description: "Enable usage analytics collection",
},
enableEvaluation: {
type: "boolean",
default: false,
description: "Enable AI response quality evaluation",
},
domain: {
type: "string",
choices: [
"healthcare",
"finance",
"analytics",
"ecommerce",
"education",
"legal",
"technology",
"generic",
"auto",
],
description: "Domain type for specialized processing and optimization",
alias: "d",
},
evaluationDomain: {
type: "string",
description: "Domain expertise for evaluation (e.g., 'AI coding assistant', 'Customer service expert')",
},
toolUsageContext: {
type: "string",
description: "Tool usage context for evaluation (e.g., 'Used sales-data MCP tools')",
},
domainAware: {
type: "boolean",
default: false,
description: "Use domain-aware evaluation",
},
context: {
type: "string",
description: "JSON context object for custom data",
},
// Debug & output options
debug: {
type: "boolean",
alias: ["v", "verbose"],
default: false,
description: "Enable debug mode with verbose output",
},
quiet: {
type: "boolean",
alias: "q",
default: false,
description: "Suppress non-essential output",
},
noColor: {
type: "boolean",
default: false,
description: "Disable colored output (useful for CI/scripts)",
},
configFile: {
type: "string",
description: "Path to custom configuration file",
},
dryRun: {
type: "boolean",
default: false,
description: "Test command without making actual API calls (for testing)",
},
};
// Helper method to build options for commands
static buildOptions(yargs, additionalOptions = {}) {
return yargs.options({
...this.commonOptions,
...additionalOptions,
});
}
// Helper method to process common options
static processOptions(argv) {
// Handle noColor option by disabling chalk
if (argv.noColor) {
process.env.FORCE_COLOR = "0";
}
// Process context using ContextFactory for type-safe integration
let processedContext;
let contextConfig;
if (argv.context) {
let rawContext;
if (typeof argv.context === "string") {
try {
rawContext = JSON.parse(argv.context);
}
catch (err) {
const contextStr = argv.context;
const truncatedJson = contextStr.length > 100
? `${contextStr.slice(0, 100)}...`
: contextStr;
logger.error(`Invalid JSON in --context parameter: ${err.message}. Received: ${truncatedJson}`);
process.exit(1);
}
}
else {
rawContext = argv.context;
}
const validatedContext = ContextFactory.validateContext(rawContext);
if (validatedContext) {
processedContext = validatedContext;
// Configure context integration based on CLI usage
contextConfig = {
mode: "prompt_prefix", // Add context as prompt prefix for CLI usage
includeInPrompt: true,
includeInAnalytics: true,
includeInEvaluation: true,
maxLength: 500, // Reasonable limit for CLI context
};
}
else if (argv.debug) {
logger.debug("Invalid context provided, skipping context integration");
}
}
return {
provider: argv.provider === "auto" ? undefined : argv.provider,
model: argv.model,
temperature: argv.temperature,
maxTokens: argv.maxTokens,
systemPrompt: argv.system,
timeout: argv.timeout,
disableTools: argv.disableTools,
enableAnalytics: argv.enableAnalytics,
enableEvaluation: argv.enableEvaluation,
domain: argv.domain,
evaluationDomain: argv.evaluationDomain,
toolUsageContext: argv.toolUsageContext,
domainAware: argv.domainAware,
context: processedContext,
contextConfig,
debug: argv.debug,
quiet: argv.quiet,
format: argv.format,
output: argv.output,
delay: argv.delay,
noColor: argv.noColor,
configFile: argv.configFile,
dryRun: argv.dryRun,
};
}
// Helper method to handle output
static handleOutput(result, options) {
let output;
if (options.format === "json") {
output = JSON.stringify(result, null, 2);
}
else if (options.format === "table" && Array.isArray(result)) {
logger.table(result);
return;
}
else {
if (typeof result === "string") {
output = result;
}
else if (result && typeof result === "object" && "content" in result) {
const generateResult = result;
output = generateResult.content;
// Add analytics display for text mode when enabled
if (options.enableAnalytics && generateResult.analytics) {
output += this.formatAnalyticsForTextMode(generateResult);
}
}
else if (result && typeof result === "object" && "text" in result) {
output = result.text;
}
else {
output = JSON.stringify(result);
}
}
if (options.output) {
fs.writeFileSync(options.output, output);
if (!options.quiet) {
logger.always(`Output saved to ${options.output}`);
}
}
else {
logger.always(output);
}
}
// Helper method to validate token usage data
static isValidTokenUsage(tokens) {
return !!(tokens &&
typeof tokens === "object" &&
tokens !== null &&
typeof tokens.input === "number" &&
typeof tokens.output === "number" &&
typeof tokens.total === "number");
}
// Helper method to format analytics for text mode display
static formatAnalyticsForTextMode(result) {
if (!result.analytics) {
return "";
}
const analytics = result.analytics;
let analyticsText = "\n\nš Analytics:\n";
// Provider and model info
analyticsText += ` Provider: ${analytics.provider}`;
// Check for model in multiple locations: result.model, analytics.model, or available model property
const modelName = result.model ||
analytics.model ||
analytics.modelName;
if (modelName) {
analyticsText += ` (${modelName})`;
}
analyticsText += "\n";
// Token usage
if (this.isValidTokenUsage(analytics.tokens)) {
const tokens = analytics.tokens;
analyticsText += ` Tokens: ${tokens.input} input + ${tokens.output} output = ${tokens.total} total\n`;
}
// Cost information
if (analytics.cost !== undefined &&
analytics.cost !== null &&
typeof analytics.cost === "number") {
analyticsText += ` Cost: $${analytics.cost.toFixed(5)}\n`;
}
// Response time
if (analytics.responseTime && typeof analytics.responseTime === "number") {
const timeInSeconds = (analytics.responseTime / 1000).toFixed(1);
analyticsText += ` Time: ${timeInSeconds}s\n`;
}
// Tools used
if (result.toolsUsed && result.toolsUsed.length > 0) {
analyticsText += ` Tools: ${result.toolsUsed.join(", ")}\n`;
}
// Context information
if (analytics.context &&
typeof analytics.context === "object" &&
analytics.context !== null) {
const contextEntries = Object.entries(analytics.context);
if (contextEntries.length > 0) {
const contextItems = contextEntries.map(([key, value]) => `${key}=${value}`);
analyticsText += ` Context: ${contextItems.join(", ")}\n`;
}
}
return analyticsText;
}
/**
* Create the new primary 'generate' command
*/
static createGenerateCommand() {
return {
command: ["generate <input>", "gen <input>"],
describe: "Generate content using AI providers",
builder: (yargs) => {
return this.buildOptions(yargs
.positional("input", {
type: "string",
description: "Text prompt for AI generation (or read from stdin)",
})
.example('$0 generate "Explain quantum computing"', "Basic generation")
.example('$0 gen "Write a Python function" --provider openai', "Use specific provider")
.example('$0 generate "Code review" -m gpt-4 -t 0.3', "Use specific model and temperature")
.example('echo "Summarize this" | $0 generate', "Use stdin input")
.example('$0 generate "Analyze data" --enable-analytics', "Enable usage analytics"));
},
handler: async (argv) => await this.executeGenerate(argv),
};
}
/**
* Create stream command
*/
static createStreamCommand() {
return {
command: "stream <input>",
describe: "Stream generation in real-time",
builder: (yargs) => {
return this.buildOptions(yargs
.positional("input", {
type: "string",
description: "Text prompt for streaming (or read from stdin)",
})
.example('$0 stream "Write a story about space"', "Stream a creative story")
.example('$0 stream "Explain machine learning" -p anthropic', "Stream with specific provider")
.example('$0 stream "Code walkthrough" --output story.txt', "Stream to file")
.example('echo "Live demo" | $0 stream', "Stream from stdin"));
},
handler: async (argv) => await this.executeStream(argv),
};
}
/**
* Create batch command
*/
static createBatchCommand() {
return {
command: "batch <file>",
describe: "Process multiple prompts from a file",
builder: (yargs) => {
return this.buildOptions(yargs
.positional("file", {
type: "string",
description: "File with prompts (one per line)",
demandOption: true,
})
.example("$0 batch prompts.txt", "Process prompts from file")
.example("$0 batch questions.txt --format json", "Export results as JSON")
.example("$0 batch tasks.txt -p vertex --delay 2000", "Use Vertex AI with 2s delay")
.example("$0 batch batch.txt --output results.json", "Save results to file"));
},
handler: async (argv) => await this.executeBatch(argv),
};
}
/**
* Create provider commands
*/
static createProviderCommands() {
return {
command: "provider <subcommand>",
describe: "Manage AI provider configurations and status",
builder: (yargs) => {
return yargs
.command("status", "Check status of all configured AI providers", (y) => this.buildOptions(y)
.example("$0 provider status", "Check all provider status")
.example("$0 provider status --verbose", "Detailed provider diagnostics")
.example("$0 provider status --quiet", "Minimal status output"), (argv) => CLICommandFactory.executeProviderStatus(argv))
.demandCommand(1, "Please specify a provider subcommand");
},
handler: () => { }, // No-op handler as subcommands handle everything
};
}
/**
* Create status command (alias for provider status)
*/
static createStatusCommand() {
return {
command: "status",
describe: "Check AI provider connectivity and performance (alias for provider status)",
builder: (yargs) => this.buildOptions(yargs)
.example("$0 status", "Quick provider status check")
.example("$0 status --verbose", "Detailed connectivity diagnostics")
.example("$0 status --format json", "Export status as JSON"),
handler: async (argv) => await CLICommandFactory.executeProviderStatus(argv),
};
}
/**
* Create models commands
*/
static createModelsCommands() {
return ModelsCommandFactory.createModelsCommands();
}
/**
* Create MCP commands
*/
static createMCPCommands() {
return MCPCommandFactory.createMCPCommands();
}
/**
* Create discover command
*/
static createDiscoverCommand() {
return MCPCommandFactory.createDiscoverCommand();
}
/**
* Create config commands
*/
static createConfigCommands() {
return {
command: "config <subcommand>",
describe: "Manage NeuroLink configuration",
builder: (yargs) => {
return yargs
.command("init", "Interactive configuration setup wizard", (y) => this.buildOptions(y), async (_argv) => {
await configManager.initInteractive();
})
.command("show", "Display current configuration", (y) => this.buildOptions(y), async (_argv) => {
configManager.showConfig();
})
.command("validate", "Validate current configuration", (y) => this.buildOptions(y), async (_argv) => {
const result = configManager.validateConfig();
if (result.valid) {
logger.always(chalk.green("ā
Configuration is valid"));
}
else {
logger.always(chalk.red("ā Configuration has errors:"));
result.errors.forEach((error) => logger.always(` ⢠${error}`));
process.exit(1);
}
})
.command("reset", "Reset configuration to defaults", (y) => this.buildOptions(y), async (_argv) => {
configManager.resetConfig();
})
.command("export", "Export current configuration", (y) => this.buildOptions(y), (argv) => this.executeConfigExport(argv))
.demandCommand(1, "");
},
handler: () => { }, // No-op handler as subcommands handle everything
};
}
/**
* Create validate command
*/
static createValidateCommand() {
return {
command: "validate",
describe: "Validate current configuration (alias for 'config validate')",
builder: (yargs) => this.buildOptions(yargs),
handler: async (_argv) => {
const result = configManager.validateConfig();
if (result.valid) {
logger.always(chalk.green("ā
Configuration is valid"));
}
else {
logger.always(chalk.red("ā Configuration has errors:"));
result.errors.forEach((error) => logger.always(` ⢠${error}`));
throw new Error("Configuration is invalid. See errors above.");
}
},
};
}
/**
* Create get-best-provider command
*/
static createBestProviderCommand() {
return {
command: "get-best-provider",
describe: "Show the best available AI provider",
builder: (yargs) => this.buildOptions(yargs)
.example("$0 get-best-provider", "Get best available provider")
.example("$0 get-best-provider --format json", "Get provider as JSON")
.example("$0 get-best-provider --quiet", "Just the provider name"),
handler: async (argv) => await this.executeGetBestProvider(argv),
};
}
/**
* Create Ollama commands
*/
static createOllamaCommands() {
return OllamaCommandFactory.createOllamaCommands();
}
/**
* Create SageMaker commands
*/
static createSageMakerCommands() {
return SageMakerCommandFactory.createSageMakerCommands();
}
/**
* Create completion command
*/
static createCompletionCommand() {
return {
command: "completion",
describe: "Generate shell completion script",
builder: (yargs) => this.buildOptions(yargs)
.example("$0 completion", "Generate shell completion")
.example("$0 completion > ~/.neurolink-completion.sh", "Save completion script")
.example("source ~/.neurolink-completion.sh", "Enable completions (bash)")
.epilogue("Add the completion script to your shell profile for persistent completions"),
handler: async (argv) => await this.executeCompletion(argv),
};
}
/**
* Execute provider status command
*/
static async executeProviderStatus(argv) {
if (argv.verbose && !argv.quiet) {
logger.always(chalk.yellow("ā¹ļø Verbose mode enabled. Displaying detailed status.\n"));
}
const spinner = argv.quiet
? null
: ora("š Checking AI provider status...\n").start();
try {
// Handle dry-run mode for provider status
if (argv.dryRun) {
const mockResults = [
{
provider: "google-ai",
status: "working",
configured: true,
responseTime: 150,
model: "gemini-2.5-flash",
},
{
provider: "openai",
status: "working",
configured: true,
responseTime: 200,
model: "gpt-4o-mini",
},
{
provider: "anthropic",
status: "working",
configured: true,
responseTime: 180,
model: "claude-3-haiku",
},
{ provider: "bedrock", status: "not configured", configured: false },
{ provider: "vertex", status: "not configured", configured: false },
];
if (spinner) {
spinner.succeed("Provider check complete (dry-run): 3/3 providers working");
}
// Display mock results
for (const result of mockResults) {
const status = result.status === "working"
? chalk.green("ā
Working")
: result.status === "failed"
? chalk.red("ā Failed")
: chalk.gray("āŖ Not configured");
const time = result.responseTime ? ` (${result.responseTime}ms)` : "";
const model = result.model ? ` [${result.model}]` : "";
logger.always(`${result.provider}: ${status}${time}${model}`);
}
if (argv.verbose && !argv.quiet) {
logger.always(chalk.blue("\nš Detailed Results (Dry-run):"));
logger.always(JSON.stringify(mockResults, null, 2));
}
return;
}
// Use SDK's provider diagnostic method instead of manual testing
const sdk = new NeuroLink();
const results = await sdk.getProviderStatus({ quiet: !!argv.quiet });
if (spinner) {
const working = results.filter((r) => r.status === "working").length;
const configured = results.filter((r) => r.configured).length;
spinner.succeed(`Provider check complete: ${working}/${configured} providers working`);
}
// Display results
for (const result of results) {
const status = result.status === "working"
? chalk.green("ā
Working")
: result.status === "failed"
? chalk.red("ā Failed")
: chalk.gray("āŖ Not configured");
const time = result.responseTime ? ` (${result.responseTime}ms)` : "";
const model = result.model ? ` [${result.model}]` : "";
logger.always(`${result.provider}: ${status}${time}${model}`);
if (argv.verbose && result.error) {
logger.always(` Error: ${chalk.red(result.error)}`);
}
}
if (argv.verbose && !argv.quiet) {
logger.always(chalk.blue("\nš Detailed Results:"));
logger.always(JSON.stringify(results, null, 2));
}
}
catch (error) {
if (spinner) {
spinner.fail("Provider status check failed");
}
logger.error(chalk.red("Error checking provider status:"), error);
process.exit(1);
}
}
/**
* Execute the generate command
*/
static async executeGenerate(argv) {
// Handle stdin input if no input provided
if (!argv.input && !process.stdin.isTTY) {
let stdinData = "";
process.stdin.setEncoding("utf8");
for await (const chunk of process.stdin) {
stdinData += chunk;
}
argv.input = stdinData.trim();
if (!argv.input) {
throw new Error("No input received from stdin");
}
}
else if (!argv.input) {
throw new Error('Input required. Use: neurolink generate "your prompt" or echo "prompt" | neurolink generate');
}
const options = this.processOptions(argv);
const spinner = argv.quiet ? null : ora("š¤ Generating text...").start();
try {
// Add delay if specified
if (options.delay) {
await new Promise((resolve) => setTimeout(resolve, options.delay));
}
// Process context if provided
let inputText = argv.input;
let contextMetadata;
if (options.context && options.contextConfig) {
const processedContextResult = ContextFactory.processContext(options.context, options.contextConfig);
// Integrate context into prompt if configured
if (processedContextResult.processedContext) {
inputText = processedContextResult.processedContext + inputText;
}
// Add context metadata for analytics
contextMetadata = {
...ContextFactory.extractAnalyticsContext(options.context),
contextMode: processedContextResult.config.mode,
contextTruncated: processedContextResult.metadata.truncated,
};
if (options.debug) {
logger.debug("Context processed:", {
mode: processedContextResult.config.mode,
truncated: processedContextResult.metadata.truncated,
processingTime: processedContextResult.metadata.processingTime,
});
}
}
// Handle dry-run mode for testing
if (options.dryRun) {
const mockResult = {
content: "Mock response for testing purposes",
provider: options.provider || "auto",
model: options.model || "test-model",
usage: {
inputTokens: 10,
outputTokens: 15,
totalTokens: 25,
},
responseTime: 150,
analytics: options.enableAnalytics
? {
provider: options.provider || "auto",
model: options.model || "test-model",
tokens: { input: 10, output: 15, total: 25 },
cost: 0.00025,
responseTime: 150,
context: contextMetadata,
}
: undefined,
evaluation: options.enableEvaluation
? {
relevance: 8,
accuracy: 9,
completeness: 8,
overall: 8.3,
isOffTopic: false,
alertSeverity: "none",
reasoning: "Test evaluation response",
evaluationModel: "test-evaluator",
evaluationTime: 50,
}
: undefined,
};
if (spinner) {
spinner.succeed(chalk.green("ā
Dry-run completed successfully!"));
}
this.handleOutput(mockResult, options);
if (options.debug) {
logger.debug("\n" + chalk.yellow("Debug Information (Dry-run):"));
logger.debug("Provider:", mockResult.provider);
logger.debug("Model:", mockResult.model);
logger.debug("Mode: DRY-RUN (no actual API calls made)");
}
process.exit(0);
return;
}
const sdk = new NeuroLink();
if (options.debug) {
logger.debug("CLI Tools configuration:", {
disableTools: options.disableTools,
toolsEnabled: !options.disableTools,
});
}
const result = await sdk.generate({
input: { text: inputText },
provider: options.provider,
model: options.model,
temperature: options.temperature,
maxTokens: options.maxTokens,
systemPrompt: options.systemPrompt,
timeout: options.timeout,
disableTools: options.disableTools,
enableAnalytics: options.enableAnalytics,
enableEvaluation: options.enableEvaluation,
evaluationDomain: options.evaluationDomain,
toolUsageContext: options.toolUsageContext,
context: contextMetadata,
factoryConfig: options.domain
? {
domainType: options.domain,
enhancementType: "domain-configuration",
validateDomainData: true,
}
: undefined,
});
if (spinner) {
spinner.succeed(chalk.green("ā
Text generated successfully!"));
}
// Handle output with universal formatting
this.handleOutput(result, options);
if (options.debug) {
logger.debug("\n" + chalk.yellow("Debug Information:"));
logger.debug("Provider:", result.provider);
logger.debug("Model:", result.model);
if (result.analytics) {
logger.debug("Analytics:", JSON.stringify(result.analytics, null, 2));
}
if (result.evaluation) {
logger.debug("Evaluation:", JSON.stringify(result.evaluation, null, 2));
}
}
process.exit(0);
}
catch (error) {
if (spinner) {
spinner.fail();
}
logger.error(chalk.red(`ā Generation failed: ${error.message}`));
if (options.debug) {
logger.error(chalk.gray(error.stack));
}
process.exit(1);
}
}
/**
* Execute the stream command
*/
static async executeStream(argv) {
// Handle stdin input if no input provided
if (!argv.input && !process.stdin.isTTY) {
let stdinData = "";
process.stdin.setEncoding("utf8");
for await (const chunk of process.stdin) {
stdinData += chunk;
}
argv.input = stdinData.trim();
if (!argv.input) {
throw new Error("No input received from stdin");
}
}
else if (!argv.input) {
throw new Error('Input required. Use: neurolink stream "your prompt" or echo "prompt" | neurolink stream');
}
const options = this.processOptions(argv);
if (!options.quiet) {
logger.always(chalk.blue("š Streaming..."));
}
try {
// Add delay if specified
if (options.delay) {
await new Promise((resolve) => setTimeout(resolve, options.delay));
}
// Process context if provided (same as generate command)
let inputText = argv.input;
let contextMetadata;
if (options.context && options.contextConfig) {
const processedContextResult = ContextFactory.processContext(options.context, options.contextConfig);
// Integrate context into prompt if configured
if (processedContextResult.processedContext) {
inputText = processedContextResult.processedContext + inputText;
}
// Add context metadata for analytics
contextMetadata = {
...ContextFactory.extractAnalyticsContext(options.context),
contextMode: processedContextResult.config.mode,
contextTruncated: processedContextResult.metadata.truncated,
};
if (options.debug) {
logger.debug("Context processed for streaming:", {
mode: processedContextResult.config.mode,
truncated: processedContextResult.metadata.truncated,
processingTime: processedContextResult.metadata.processingTime,
});
}
}
// Handle dry-run mode for testing
if (options.dryRun) {
if (!options.quiet) {
logger.always(chalk.blue("š Dry-run streaming..."));
}
// Simulate streaming output
const chunks = [
"Mock ",
"streaming ",
"response ",
"for ",
"testing ",
"purposes",
];
let fullContent = "";
for (const chunk of chunks) {
process.stdout.write(chunk);
fullContent += chunk;
await new Promise((resolve) => setTimeout(resolve, 50)); // Simulate streaming delay
}
if (!options.quiet) {
process.stdout.write("\n");
}
// Mock analytics and evaluation for dry-run
if (options.enableAnalytics) {
const mockAnalytics = {
provider: options.provider || "auto",
model: options.model || "test-model",
requestDuration: 300,
tokenUsage: {
inputTokens: 10,
outputTokens: 15,
totalTokens: 25,
},
timestamp: Date.now(),
context: contextMetadata,
};
const mockGenerateResult = {
success: true,
content: fullContent,
analytics: mockAnalytics,
model: mockAnalytics.model,
toolsUsed: [],
};
const analyticsDisplay = this.formatAnalyticsForTextMode(mockGenerateResult);
logger.always(analyticsDisplay);
}
if (options.enableEvaluation) {
logger.always(chalk.blue("\nš Response Evaluation (Dry-run):"));
logger.always(` Relevance: 8/10`);
logger.always(` Accuracy: 9/10`);
logger.always(` Completeness: 8/10`);
logger.always(` Overall: 8.3/10`);
logger.always(` Reasoning: Test evaluation response`);
}
if (options.output) {
fs.writeFileSync(options.output, fullContent);
if (!options.quiet) {
logger.always(`\nOutput saved to ${options.output}`);
}
}
if (options.debug) {
logger.debug("\n" + chalk.yellow("Debug Information (Dry-run Streaming):"));
logger.debug("Provider:", options.provider || "auto");
logger.debug("Model:", options.model || "test-model");
logger.debug("Mode: DRY-RUN (no actual API calls made)");
}
process.exit(0);
return;
}
const sdk = new NeuroLink();
const stream = await sdk.stream({
input: { text: inputText },
provider: options.provider,
model: options.model,
temperature: options.temperature,
maxTokens: options.maxTokens,
systemPrompt: options.systemPrompt,
timeout: options.timeout,
disableTools: options.disableTools,
enableAnalytics: options.enableAnalytics,
enableEvaluation: options.enableEvaluation,
context: contextMetadata,
factoryConfig: options.domain
? {
domainType: options.domain,
enhancementType: "domain-configuration",
validateDomainData: true,
}
: undefined,
});
let fullContent = "";
// Process the stream
for await (const chunk of stream.stream) {
if (options.delay && options.delay > 0) {
// Demo mode - add delay between chunks
await new Promise((resolve) => setTimeout(resolve, options.delay));
}
process.stdout.write(chunk.content);
fullContent += chunk.content;
}
if (!options.quiet) {
process.stdout.write("\n");
}
// š§ NEW: Display analytics and evaluation after streaming (similar to generate command)
if (options.enableAnalytics && stream.analytics) {
const resolvedAnalytics = await (stream.analytics instanceof Promise
? stream.analytics
: Promise.resolve(stream.analytics));
const streamAnalytics = {
success: true,
content: fullContent,
analytics: resolvedAnalytics,
model: stream.model,
toolsUsed: stream.toolCalls?.map((tc) => tc.toolName) || [],
};
const analyticsDisplay = this.formatAnalyticsForTextMode(streamAnalytics);
logger.always(analyticsDisplay);
}
// š§ NEW: Display evaluation after streaming
if (options.enableEvaluation && stream.evaluation) {
const resolvedEvaluation = await (stream.evaluation instanceof Promise
? stream.evaluation
: Promise.resolve(stream.evaluation));
logger.always(chalk.blue("\nš Response Evaluation:"));
logger.always(` Relevance: ${resolvedEvaluation.relevance}/10`);
logger.always(` Accuracy: ${resolvedEvaluation.accuracy}/10`);
logger.always(` Completeness: ${resolvedEvaluation.completeness}/10`);
logger.always(` Overall: ${resolvedEvaluation.overall}/10`);
if (resolvedEvaluation.reasoning) {
logger.always(` Reasoning: ${resolvedEvaluation.reasoning}`);
}
}
// Handle output file if specified
if (options.output) {
fs.writeFileSync(options.output, fullContent);
if (!options.quiet) {
logger.always(`\nOutput saved to ${options.output}`);
}
}
// š§ NEW: Debug output for streaming (similar to generate command)
if (options.debug) {
logger.debug("\n" + chalk.yellow("Debug Information (Streaming):"));
logger.debug("Provider:", stream.provider);
logger.debug("Model:", stream.model);
if (stream.analytics) {
const resolvedAnalytics = await (stream.analytics instanceof Promise
? stream.analytics
: Promise.resolve(stream.analytics));
logger.debug("Analytics:", JSON.stringify(resolvedAnalytics, null, 2));
}
if (stream.evaluation) {
const resolvedEvaluation = await (stream.evaluation instanceof Promise
? stream.evaluation
: Promise.resolve(stream.evaluation));
logger.debug("Evaluation:", JSON.stringify(resolvedEvaluation, null, 2));
}
if (stream.metadata) {
logger.debug("Metadata:", JSON.stringify(stream.metadata, null, 2));
}
}
process.exit(0);
}
catch (error) {
logger.error(chalk.red(`ā Streaming failed: ${error.message}`));
if (options.debug) {
logger.error(chalk.gray(error.stack));
}
process.exit(1);
}
}
/**
* Execute the batch command
*/
static async executeBatch(argv) {
const options = this.processOptions(argv);
const spinner = options.quiet ? null : ora().start();
try {
if (!argv.file) {
throw new Error("No file specified");
}
if (!fs.existsSync(argv.file)) {
throw new Error(`File not found: ${argv.file}`);
}
const buffer = fs.readFileSync(argv.file);
const prompts = buffer
.toString("utf8")
.split("\n")
.map((line) => line.trim())
.filter(Boolean);
if (prompts.length === 0) {
throw new Error("No prompts found in file");
}
if (spinner) {
spinner.text = `š¦ Processing ${prompts.length} prompts...`;
}
else if (!options.quiet) {
logger.always(chalk.blue(`š¦ Processing ${prompts.length} prompts...\n`));
}
const results = [];
const sdk = new NeuroLink();
for (let i = 0; i < prompts.length; i++) {
if (spinner) {
spinner.text = `Processing ${i + 1}/${prompts.length}: ${prompts[i].substring(0, 30)}...`;
}
try {
// Handle dry-run mode for batch processing
if (options.dryRun) {
results.push({
prompt: prompts[i],
response: `Mock batch response ${i + 1} for testing purposes`,
});
if (spinner) {
spinner.render();
}
continue;
}
// Process context for each batch item
let inputText = prompts[i];
let contextMetadata;
if (options.context && options.contextConfig) {
const processedContextResult = ContextFactory.processContext(options.context, options.contextConfig);
if (processedContextResult.processedContext) {
inputText = processedContextResult.processedContext + inputText;
}
contextMetadata = {
...ContextFactory.extractAnalyticsContext(options.context),
contextMode: processedContextResult.config.mode,
contextTruncated: processedContextResult.metadata.truncated,
batchIndex: i,
};
}
const result = await sdk.generate({
input: { text: inputText },
provider: options.provider,
model: options.model,
temperature: options.temperature,
maxTokens: options.maxTokens,
systemPrompt: options.systemPrompt,
timeout: options.timeout,
disableTools: options.disableTools,
enableAnalytics: options.enableAnalytics,
enableEvaluation: options.enableEvaluation,
context: contextMetadata,
factoryConfig: options.domain
? {
domainType: options.domain,
enhancementType: "domain-configuration",
validateDomainData: true,
}
: undefined,
});
results.push({ prompt: prompts[i], response: result.content });
if (spinner) {
spinner.render();
}
}
catch (error) {
results.push({
prompt: prompts[i],
error: error.message,
});
if (spinner) {
spinner.render();
}
}
// Add delay between requests
if (i < prompts.length - 1) {
await new Promise((resolve) => setTimeout(resolve, options.delay || 1000));
}
}
if (spinner) {
spinner.succeed(chalk.green("ā
Batch processing complete!"));
}
// Handle output with universal formatting
this.handleOutput(results, options);
process.exit(0);
}
catch (error) {
if (spinner) {
spinner.fail();
}
logger.error(chalk.red(`ā Batch processing failed: ${error.message}`));
if (options.debug) {
logger.error(chalk.gray(error.stack));
}
process.exit(1);
}
}
/**
* Execute config export command
*/
static async executeConfigExport(argv) {
const options = this.processOptions(argv);
try {
const config = {
providers: {
openai: !!process.env.OPENAI_API_KEY,
bedrock: !!(process.env.AWS_ACCESS_KEY_ID && process.env.AWS_SECRET_ACCESS_KEY),
vertex: !!(process.env.GOOGLE_APPLICATION_CREDENTIALS ||
process.env.GOOGLE_SERVICE_ACCOUNT_KEY),
anthropic: !!process.env.ANTHROPIC_API_KEY,
azure: !!(process.env.AZURE_OPENAI_API_KEY &&
process.env.AZURE_OPENAI_ENDPOINT),
"google-ai": !!process.env.GOOGLE_AI_API_KEY,
},
defaults: {
temperature: 0.7,
maxTokens: 500,
},
timestamp: new Date().toISOString(),
};
this.handleOutput(config, options);
}
catch (error) {
logger.error(chalk.red(`ā Configuration export failed: ${error.message}`));
process.exit(1);
}
}
/**
* Execute get best provider command
*/
static async executeGetBestProvider(argv) {
const options = this.processOptions(argv);
try {
const { getBestProvider } = await import("../../lib/utils/providerUtils.js");
const bestProvider = await getBestProvider();
if (options.format === "json") {
this.handleOutput({ provider: bestProvider }, options);
}
else {
if (!options.quiet) {
logger.always(chalk.green(`šÆ Best available provider: ${bestProvider}`));
}
else {
this.handleOutput(bestProvider, options);
}
}
}
catch (error) {
logger.error(chalk.red(`ā Provider selection failed: ${error.message}`));
process.exit(1);
}
}
/**