@juspay/neurolink
Version:
Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and
654 lines (653 loc) ⢠26.6 kB
JavaScript
/**
* Models CLI Commands for NeuroLink
* Implements comprehensive model management commands
* Part of Phase 4.1 - Models Command System
*/
import { ModelResolver, formatSearchResults, formatRecommendation, formatComparison, } from "../../lib/models/modelResolver.js";
import { getAllModels, formatModelForDisplay, } from "../../lib/models/modelRegistry.js";
import chalk from "chalk";
import ora from "ora";
import { logger } from "../../lib/utils/logger.js";
/**
* Type-safe mapping functions to convert CLI types to SDK types
*/
// Map CLI capability queries to SDK ModelCapabilities keys
function mapQueryToCapability(query) {
const queryLower = query.toLowerCase();
const mappings = {
vision: "vision",
function: "functionCalling",
code: "codeGeneration",
reasoning: "reasoning",
multimodal: "multimodal",
streaming: "streaming",
json: "jsonMode",
};
// Find matching capability
for (const [key, capability] of Object.entries(mappings)) {
if (queryLower.includes(key)) {
return capability;
}
}
return undefined;
}
// Map CLI use cases to SDK UseCaseSuitability keys
function mapUseCaseToSuitability(useCase) {
const mappings = {
chat: "conversation",
coding: "coding",
creative: "creative",
analysis: "analysis",
reasoning: "reasoning",
translation: "translation",
summarization: "summarization",
};
return mappings[useCase.toLowerCase()];
}
// Map CLI requirement flags to SDK ModelCapabilities keys
function mapRequirementToCapability(requirement) {
const mappings = {
vision: "vision",
functionCalling: "functionCalling",
};
return mappings[requirement];
}
/**
* Models CLI command factory
*/
export class ModelsCommandFactory {
/**
* Create the main models command with subcommands
*/
static createModelsCommands() {
return {
command: "models <subcommand>",
describe: "Manage and discover AI models",
builder: (yargs) => {
return yargs
.command("list", "List available models with filtering options", this.buildListOptions, this.executeList)
.command("search [query]", "Search models by capabilities, use case, or features", this.buildSearchOptions, this.executeSearch)
.command("best", "Get the best model recommendation for your use case", this.buildBestOptions, this.executeBest)
.command("resolve <model>", "Resolve model aliases and find exact model names", this.buildResolveOptions, this.executeResolve)
.command("compare <models..>", "Compare multiple models side by side", this.buildCompareOptions, this.executeCompare)
.command("stats", "Show model registry statistics and insights", this.buildStatsOptions, this.executeStats)
.option("format", {
choices: ["table", "json", "compact"],
default: "table",
description: "Output format",
})
.option("output", {
type: "string",
description: "Save output to file",
})
.option("quiet", {
type: "boolean",
alias: "q",
default: false,
description: "Suppress non-essential output",
})
.option("debug", {
type: "boolean",
default: false,
description: "Enable debug output",
})
.demandCommand(1, "Please specify a models subcommand")
.help();
},
handler: () => {
// No-op handler as subcommands handle everything
},
};
}
/**
* Build options for list command
*/
static buildListOptions(yargs) {
return yargs
.option("provider", {
choices: [
"openai",
"bedrock",
"vertex",
"anthropic",
"azure",
"google-ai",
"huggingface",
"ollama",
"mistral",
],
description: "Filter by AI provider",
})
.option("category", {
choices: ["general", "coding", "creative", "vision", "reasoning"],
description: "Filter by model category",
})
.option("capability", {
type: "array",
choices: [
"vision",
"functionCalling",
"codeGeneration",
"reasoning",
"multimodal",
"streaming",
"jsonMode",
],
description: "Filter by required capabilities",
})
.option("deprecated", {
type: "boolean",
default: false,
description: "Include deprecated models",
})
.example("neurolink models list", "List all available models")
.example("neurolink models list --provider openai", "List OpenAI models only")
.example("neurolink models list --capability vision", "List models with vision capability")
.example("neurolink models list --category coding", "List coding-focused models");
}
/**
* Build options for search command
*/
static buildSearchOptions(yargs) {
return yargs
.positional("query", {
type: "string",
description: "Search query (capability, use case, or model name)",
})
.option("use-case", {
choices: [
"coding",
"creative",
"analysis",
"conversation",
"reasoning",
"translation",
"summarization",
],
description: "Filter by primary use case",
})
.option("max-cost", {
type: "number",
description: "Maximum cost per 1K tokens (USD)",
})
.option("min-context", {
type: "number",
description: "Minimum context window size (tokens)",
})
.option("max-context", {
type: "number",
description: "Maximum context window size (tokens)",
})
.option("performance", {
choices: ["fast", "medium", "slow", "high", "low"],
description: "Required performance level (speed or quality)",
})
.example("neurolink models search vision", "Search for models with vision capabilities")
.example("neurolink models search --use-case coding --max-cost 0.01", "Find coding models under $0.01/1K tokens")
.example("neurolink models search --min-context 100000", "Find models with large context windows");
}
/**
* Build options for best command
*/
static buildBestOptions(yargs) {
return yargs
.option("coding", {
type: "boolean",
description: "Optimize for code generation and programming",
})
.option("creative", {
type: "boolean",
description: "Optimize for creative writing and content",
})
.option("analysis", {
type: "boolean",
description: "Optimize for data analysis and research",
})
.option("conversation", {
type: "boolean",
description: "Optimize for conversational interactions",
})
.option("reasoning", {
type: "boolean",
description: "Optimize for logical reasoning tasks",
})
.option("translation", {
type: "boolean",
description: "Optimize for language translation",
})
.option("summarization", {
type: "boolean",
description: "Optimize for text summarization",
})
.option("cost-effective", {
type: "boolean",
description: "Prioritize cost-effectiveness",
})
.option("high-quality", {
type: "boolean",
description: "Prioritize output quality over cost",
})
.option("fast", {
type: "boolean",
description: "Prioritize response speed",
})
.option("require-vision", {
type: "boolean",
description: "Require vision/image processing capability",
})
.option("require-function-calling", {
type: "boolean",
description: "Require function calling capability",
})
.option("exclude-providers", {
type: "array",
description: "Exclude specific providers",
})
.option("prefer-local", {
type: "boolean",
description: "Prefer local/offline models",
})
.example("neurolink models best --coding", "Get best model for coding tasks")
.example("neurolink models best --cost-effective --require-vision", "Get cheapest model with vision")
.example("neurolink models best --fast --exclude-providers ollama", "Get fastest non-local model");
}
/**
* Build options for resolve command
*/
static buildResolveOptions(yargs) {
return yargs
.positional("model", {
type: "string",
description: "Model name, alias, or partial match to resolve",
demandOption: true,
})
.option("fuzzy", {
type: "boolean",
default: true,
description: "Enable fuzzy matching for partial names",
})
.example("neurolink models resolve claude-latest", "Resolve claude-latest alias")
.example("neurolink models resolve gpt4", "Fuzzy match for GPT-4 models")
.example("neurolink models resolve fastest", "Resolve fastest model alias");
}
/**
* Build options for compare command
*/
static buildCompareOptions(yargs) {
return yargs
.positional("models", {
type: "string",
array: true,
description: "Model IDs or aliases to compare",
demandOption: true,
})
.example("neurolink models compare gpt-4o claude-3.5-sonnet gemini-2.5-pro", "Compare three flagship models")
.example("neurolink models compare fastest cheapest best-coding", "Compare models by alias");
}
/**
* Build options for stats command
*/
static buildStatsOptions(yargs) {
return yargs
.option("detailed", {
type: "boolean",
default: false,
description: "Show detailed statistics breakdown",
})
.example("neurolink models stats", "Show model registry statistics")
.example("neurolink models stats --detailed", "Show detailed statistics with breakdowns");
}
/**
* Execute list command
*/
static async executeList(argv) {
try {
const spinner = argv.quiet
? null
: ora("Loading model registry...").start();
let models = getAllModels();
// Apply filters
if (argv.provider) {
const providers = Array.isArray(argv.provider)
? argv.provider
: [argv.provider];
models = models.filter((model) => providers.includes(model.provider));
}
if (argv.category) {
models = models.filter((model) => model.category === argv.category);
}
if (argv.capability) {
models = models.filter((model) => {
return argv.capability.every((cap) => model.capabilities[cap]);
});
}
if (!argv.deprecated) {
models = models.filter((model) => !model.deprecated);
}
if (spinner) {
spinner.succeed(`Found ${models.length} models`);
}
// Format and display results
if (argv.format === "json") {
const output = models.map(formatModelForDisplay);
logger.always(JSON.stringify(output, null, 2));
}
else if (argv.format === "compact") {
models.forEach((model) => {
logger.always(`${model.id} (${model.provider}) - ${model.description}`);
});
}
else {
// Table format
logger.always(chalk.bold("\nš Available Models:\n"));
for (const model of models) {
const status = model.deprecated
? chalk.red("DEPRECATED")
: chalk.green("ACTIVE");
const cost = model.pricing.inputCostPer1K === 0
? chalk.green("FREE")
: `$${(model.pricing.inputCostPer1K + model.pricing.outputCostPer1K).toFixed(6)}/1K`;
logger.always(`${chalk.cyan(model.id)} ${status}`);
logger.always(` Provider: ${model.provider} | Category: ${model.category}`);
logger.always(` Cost: ${cost} | Context: ${(model.limits.maxContextTokens / 1000).toFixed(0)}K tokens`);
logger.always(` ${chalk.gray(model.description)}`);
logger.always();
}
}
}
catch (error) {
logger.error(chalk.red(`ā List command failed: ${error.message}`));
process.exit(1);
}
}
/**
* Execute search command
*/
static async executeSearch(argv) {
try {
const spinner = argv.quiet ? null : ora("Searching models...").start();
// Build search filters
const filters = {};
if (argv.query) {
// Use type-safe capability mapping
const mappedCapability = mapQueryToCapability(argv.query);
if (mappedCapability) {
filters.capability = mappedCapability;
}
}
if (argv.useCase) {
// Use type-safe use case mapping
const mappedUseCase = mapUseCaseToSuitability(argv.useCase);
if (mappedUseCase) {
filters.useCase = mappedUseCase;
}
}
if (argv.maxCost) {
filters.maxCost = argv.maxCost;
}
if (argv.minContext) {
filters.minContextSize = argv.minContext;
}
if (argv.maxContext) {
filters.maxContextSize = argv.maxContext;
}
if (argv.performance) {
filters.performance = argv.performance;
}
const results = ModelResolver.searchModels(filters);
if (spinner) {
spinner.succeed(`Found ${results.length} matching models`);
}
if (results.length === 0) {
logger.always(chalk.yellow("No models found matching your criteria."));
return;
}
// Display results
if (argv.format === "json") {
logger.always(JSON.stringify(formatSearchResults(results), null, 2));
}
else {
logger.always(chalk.bold("\nš Search Results:\n"));
results.slice(0, 10).forEach((result, index) => {
logger.always(`${index + 1}. ${chalk.cyan(result.model.id)} (Score: ${result.score})`);
logger.always(` ${result.model.description}`);
logger.always(` Matches: ${chalk.green(result.matchReasons.join(", "))}`);
logger.always();
});
if (results.length > 10) {
logger.always(chalk.gray(`... and ${results.length - 10} more results`));
}
}
}
catch (error) {
logger.error(chalk.red(`ā Search command failed: ${error.message}`));
process.exit(1);
}
}
/**
* Execute best command
*/
static async executeBest(argv) {
try {
const spinner = argv.quiet ? null : ora("Finding best model...").start();
// Build recommendation context
const context = {};
// Determine use case from flags
if (argv.coding) {
context.useCase = "coding";
}
else if (argv.creative) {
context.useCase = "creative";
}
else if (argv.analysis) {
context.useCase = "analysis";
}
else if (argv.conversation) {
context.useCase = "conversation";
}
else if (argv.reasoning) {
context.useCase = "reasoning";
}
else if (argv.translation) {
context.useCase = "translation";
}
else if (argv.summarization) {
context.useCase = "summarization";
}
// Apply other preferences
if (argv.costEffective) {
context.maxCost = 0.01;
}
if (argv.highQuality) {
context.minQuality = "high";
}
if (argv.preferLocal) {
context.preferLocal = true;
}
// Required capabilities - build with correct type from start
const requiredCapabilities = [];
if (argv.requireVision) {
const mapped = mapRequirementToCapability("vision");
if (mapped) {
requiredCapabilities.push(mapped);
}
}
if (argv.requireFunctionCalling) {
const mapped = mapRequirementToCapability("functionCalling");
if (mapped) {
requiredCapabilities.push(mapped);
}
}
if (requiredCapabilities.length > 0) {
// Now we can assign safely since types match
context.requireCapabilities = requiredCapabilities;
}
// Excluded providers
if (argv.excludeProviders) {
context.excludeProviders = argv.excludeProviders;
}
const recommendation = ModelResolver.getBestModel(context);
if (spinner) {
spinner.succeed("Found best model recommendation");
}
// Display recommendation
if (argv.format === "json") {
logger.always(JSON.stringify(formatRecommendation(recommendation), null, 2));
}
else {
logger.always(chalk.bold("\nšÆ Best Model Recommendation:\n"));
const model = recommendation.model;
logger.always(`${chalk.green("ā
")} ${chalk.cyan(model.id)} (${model.name})`);
logger.always(` Provider: ${model.provider} | Category: ${model.category}`);
logger.always(` Score: ${recommendation.score}/100`);
logger.always();
logger.always(chalk.bold("š Why this model:"));
recommendation.reasoning.forEach((reason) => {
logger.always(` ⢠${reason}`);
});
logger.always();
if (recommendation.alternatives.length > 0) {
logger.always(chalk.bold("š Alternatives to consider:"));
recommendation.alternatives.slice(0, 3).forEach((alt) => {
logger.always(` ⢠${alt.id} (${alt.provider})`);
});
}
}
}
catch (error) {
logger.error(chalk.red(`ā Best command failed: ${error.message}`));
process.exit(1);
}
}
/**
* Execute resolve command
*/
static async executeResolve(argv) {
try {
const query = argv.model;
const model = ModelResolver.resolveModel(query);
if (!model) {
logger.always(chalk.red(`ā Could not resolve model: ${query}`));
// Suggest similar models
const allModels = getAllModels();
const similar = allModels
.filter((m) => m.id.toLowerCase().includes(query.toLowerCase()) ||
m.name.toLowerCase().includes(query.toLowerCase()))
.slice(0, 3);
if (similar.length > 0) {
logger.always(chalk.yellow("\nš” Did you mean:"));
similar.forEach((m) => logger.always(` ⢠${m.id}`));
}
process.exit(1);
}
// Display resolution
if (argv.format === "json") {
logger.always(JSON.stringify(formatModelForDisplay(model), null, 2));
}
else {
logger.always(chalk.bold("\nš Model Resolution:\n"));
logger.always(`Query: ${chalk.yellow(query)}`);
logger.always(`Resolved: ${chalk.green(model.id)}`);
logger.always(`Name: ${model.name}`);
logger.always(`Provider: ${model.provider}`);
logger.always(`Description: ${chalk.gray(model.description)}`);
if (model.aliases.length > 0) {
logger.always(`Aliases: ${model.aliases.join(", ")}`);
}
}
}
catch (error) {
logger.error(chalk.red(`ā Resolve command failed: ${error.message}`));
process.exit(1);
}
}
/**
* Execute compare command
*/
static async executeCompare(argv) {
try {
const modelIds = argv.models;
const comparison = ModelResolver.compareModels(modelIds);
if (argv.format === "json") {
logger.always(JSON.stringify(formatComparison(comparison), null, 2));
}
else {
logger.always(chalk.bold("\nāļø Model Comparison:\n"));
// Display models being compared
logger.always(chalk.bold("Models:"));
comparison.models.forEach((model, index) => {
logger.always(`${index + 1}. ${chalk.cyan(model.id)} (${model.provider})`);
});
logger.always();
// Pricing comparison
logger.always(chalk.bold("š° Pricing:"));
logger.always(` Cheapest: ${chalk.green(comparison.comparison.pricing.cheapest.id)}`);
logger.always(` Most Expensive: ${chalk.red(comparison.comparison.pricing.mostExpensive.id)}`);
logger.always();
// Context size comparison
logger.always(chalk.bold("š Context Size:"));
logger.always(` Largest: ${chalk.green(comparison.comparison.contextSize.largest.id)}`);
logger.always(` Smallest: ${chalk.yellow(comparison.comparison.contextSize.smallest.id)}`);
logger.always();
// Capabilities comparison
logger.always(chalk.bold("š ļø Capabilities:"));
Object.entries(comparison.comparison.capabilities).forEach(([capability, models]) => {
if (models.length > 0) {
logger.always(` ${capability}: ${models.map((m) => m.id).join(", ")}`);
}
});
}
}
catch (error) {
logger.error(chalk.red(`ā Compare command failed: ${error.message}`));
process.exit(1);
}
}
/**
* Execute stats command
*/
static async executeStats(argv) {
try {
const stats = ModelResolver.getModelStatistics();
if (argv.format === "json") {
logger.always(JSON.stringify(stats, null, 2));
}
else {
logger.always(chalk.bold("\nš Model Registry Statistics:\n"));
if (typeof stats === "object" && stats !== null) {
const statsObj = stats;
logger.always(`Total Models: ${chalk.cyan(statsObj.total)}`);
logger.always(`Providers: ${chalk.cyan(statsObj.providers)}`);
logger.always(`Deprecated: ${chalk.yellow(statsObj.deprecated)}`);
logger.always();
logger.always(chalk.bold("By Provider:"));
Object.entries(statsObj.byProvider).forEach(([provider, count]) => {
logger.always(` ${provider}: ${count}`);
});
logger.always();
logger.always(chalk.bold("By Category:"));
Object.entries(statsObj.byCategory).forEach(([category, count]) => {
logger.always(` ${category}: ${count}`);
});
logger.always();
if (argv.detailed) {
logger.always(chalk.bold("Capability Distribution:"));
Object.entries(statsObj.capabilities).forEach(([capability, count]) => {
logger.always(` ${capability}: ${count} models`);
});
logger.always();
const pricing = statsObj.pricing;
logger.always(chalk.bold("Pricing Overview:"));
logger.always(` Average: $${(pricing.average || 0).toFixed(6)}/1K tokens`);
logger.always(` Range: $${(pricing.min || 0).toFixed(6)} - $${(pricing.max || 0).toFixed(6)}/1K`);
logger.always(` Free models: ${pricing.free || false}`);
}
}
}
}
catch (error) {
logger.error(chalk.red(`ā Stats command failed: ${error.message}`));
process.exit(1);
}
}
}