llm-checker
Version:
Intelligent CLI tool with AI-powered model selection that analyzes your hardware and recommends optimal LLM models for your system
1,202 lines (993 loc) โข 57 kB
JavaScript
#!/usr/bin/env node
const { Command } = require('commander');
const chalk = require('chalk');
const ora = require('ora');
const { table } = require('table');
const os = require('os');
const { spawn } = require('child_process');
const LLMChecker = require('../src/index');
const { getLogger } = require('../src/utils/logger');
const program = new Command();
program
.name('llm-checker')
.description('Check which LLM models your computer can run')
.version('2.1.0');
const logger = getLogger({ console: false });
// Ollama installation helper
function getOllamaInstallInstructions() {
const platform = os.platform();
const arch = os.arch();
const instructions = {
'darwin': {
name: 'macOS',
downloadUrl: 'https://ollama.com/download/mac',
instructions: [
'1. Download Ollama for macOS from the link above',
'2. Open the downloaded .pkg file and follow the installer',
'3. Once installed, open Terminal and run: ollama serve',
'4. In a new terminal window, test with: ollama run llama2:7b'
],
alternativeInstall: 'brew install ollama'
},
'win32': {
name: 'Windows',
downloadUrl: 'https://ollama.com/download/windows',
instructions: [
'1. Download Ollama for Windows from the link above',
'2. Run the downloaded installer (.exe file)',
'3. Open Command Prompt or PowerShell',
'4. Test with: ollama run llama2:7b'
],
alternativeInstall: 'winget install Ollama.Ollama'
},
'linux': {
name: 'Linux',
downloadUrl: 'https://ollama.com/download/linux',
instructions: [
'1. Run the installation script:',
' curl -fsSL https://ollama.com/install.sh | sh',
'2. Start Ollama service:',
' sudo systemctl start ollama',
'3. Test with: ollama run llama2:7b'
],
alternativeInstall: 'Manual install: https://github.com/ollama/ollama/blob/main/docs/linux.md'
}
};
return instructions[platform] || instructions['linux'];
}
function displayOllamaInstallHelp() {
const installInfo = getOllamaInstallInstructions();
console.log(chalk.red.bold('\nโ Ollama is not installed or not running!'));
console.log(chalk.yellow('\n๐ LLM Checker requires Ollama to function properly.'));
console.log(chalk.cyan.bold(`\n๐ฅ Install Ollama for ${installInfo.name}:`));
console.log(chalk.blue(`\n๐ Download: ${installInfo.downloadUrl}`));
console.log(chalk.green.bold('\n๐ Installation Steps:'));
installInfo.instructions.forEach(step => {
console.log(chalk.gray(` ${step}`));
});
if (installInfo.alternativeInstall) {
console.log(chalk.magenta.bold('\nโก Quick Install (if available):'));
console.log(chalk.white(` ${installInfo.alternativeInstall}`));
}
console.log(chalk.yellow.bold('\nโ
After installation:'));
console.log(chalk.gray(' 1. Restart your terminal'));
console.log(chalk.gray(' 2. Run: llm-checker check'));
console.log(chalk.gray(' 3. Start using the AI model selector!'));
console.log(chalk.cyan('\n๐ก Need help? Visit: https://github.com/ollama/ollama'));
}
async function checkOllamaAndExit() {
const spinner = ora('Checking Ollama availability...').start();
try {
// Quick check if ollama command exists
const checkCommand = os.platform() === 'win32' ? 'where' : 'which';
return new Promise((resolve) => {
const proc = spawn(checkCommand, ['ollama'], { stdio: 'pipe' });
proc.on('close', (code) => {
spinner.stop();
if (code !== 0) {
displayOllamaInstallHelp();
process.exit(1);
}
resolve(true);
});
proc.on('error', () => {
spinner.stop();
displayOllamaInstallHelp();
process.exit(1);
});
});
} catch (error) {
spinner.stop();
displayOllamaInstallHelp();
process.exit(1);
}
}
function getStatusIcon(model, ollamaModels) {
const ollamaModel = ollamaModels?.find(om => om.matchedModel?.name === model.name);
if (ollamaModel?.isRunning) return '๐';
if (ollamaModel?.isInstalled) return '๐ฆ';
if (model.specialization === 'code') return '๐ป';
if (model.specialization === 'multimodal' || model.multimodal) return '๐ผ๏ธ';
if (model.specialization === 'embeddings') return '๐งฒ';
if (model.category === 'ultra_small') return '๐ฃ';
if (model.category === 'small') return '๐ค';
if (model.category === 'medium') return '๐ฆ';
if (model.category === 'large') return '๐ฆ
';
return '-';
}
function formatSize(size) {
if (!size) return 'Unknown';
const cleanSize = size.replace(/[^\d.BMK]/gi, '');
const numMatch = cleanSize.match(/(\d+\.?\d*)/);
const unitMatch = cleanSize.match(/[BMK]/i);
if (numMatch && unitMatch) {
const num = parseFloat(numMatch[1]);
const unit = unitMatch[0].toUpperCase();
return `${num}${unit}`;
}
return size;
}
function formatSpeed(speed) {
const speedMap = {
'very_fast': 'very_fast',
'fast': 'fast',
'medium': 'medium',
'slow': 'slow',
'very_slow': 'very_slow'
};
return speedMap[speed] || (speed || 'unknown');
}
function getScoreColor(score) {
if (score >= 90) return chalk.green;
if (score >= 75) return chalk.yellow;
if (score >= 60) return chalk.hex('#FFA500');
return chalk.red;
}
function getOllamaCommand(modelName) {
const mapping = {
'TinyLlama 1.1B': 'tinyllama:1.1b',
'Qwen 0.5B': 'qwen:0.5b',
'Gemma 2B': 'gemma2:2b',
'Phi-3 Mini 3.8B': 'phi3:mini',
'Llama 3.2 3B': 'llama3.2:3b',
'Llama 3.1 8B': 'llama3.1:8b',
'Mistral 7B v0.3': 'mistral:7b',
'CodeLlama 7B': 'codellama:7b',
'Qwen 2.5 7B': 'qwen2.5:7b'
};
return mapping[modelName] || '-';
}
function displaySystemInfo(hardware, analysis) {
const cpuColor = hardware.cpu.cores >= 8 ? chalk.green : hardware.cpu.cores >= 4 ? chalk.yellow : chalk.red;
const ramColor = hardware.memory.total >= 32 ? chalk.green : hardware.memory.total >= 16 ? chalk.yellow : chalk.red;
const gpuColor = hardware.gpu.dedicated ? chalk.green : chalk.hex('#FFA500');
const lines = [
`${chalk.cyan('CPU:')} ${cpuColor(hardware.cpu.brand)} ${chalk.gray(`(${hardware.cpu.cores} cores, ${hardware.cpu.speed}GHz)`)}`,
`${chalk.cyan('Architecture:')} ${hardware.cpu.architecture}`,
`${chalk.cyan('RAM:')} ${ramColor(hardware.memory.total + 'GB')} total ${chalk.gray(`(${hardware.memory.free}GB free)`)}`,
`${chalk.cyan('GPU:')} ${gpuColor(hardware.gpu.model || 'Not detected')}`,
`${chalk.cyan('VRAM:')} ${hardware.gpu.vram === 0 && hardware.gpu.model && hardware.gpu.model.toLowerCase().includes('apple') ? 'Unified Memory' : `${hardware.gpu.vram || 'N/A'}GB`}${hardware.gpu.dedicated ? chalk.green(' (Dedicated)') : chalk.hex('#FFA500')(' (Integrated)')}`,
];
const tier = analysis.summary.hardwareTier?.replace('_', ' ').toUpperCase() || 'UNKNOWN';
const tierColor = tier.includes('HIGH') ? chalk.green : tier.includes('MEDIUM') ? chalk.yellow : chalk.red;
lines.push(`๐ ${chalk.bold('Hardware Tier:')} ${tierColor.bold(tier)}`);
console.log('\n' + chalk.bgBlue.white.bold(' ๐ฅ๏ธ SYSTEM INFORMATION '));
console.log(chalk.blue('โญ' + 'โ'.repeat(50)));
lines.forEach(line => {
console.log(chalk.blue('โ') + ' ' + line);
});
console.log(chalk.blue('โฐ'));
}
function displayOllamaIntegration(ollamaInfo, ollamaModels) {
const lines = [];
if (ollamaInfo.available) {
lines.push(`${chalk.green('โ
Status:')} Running ${chalk.gray(`(v${ollamaInfo.version || 'unknown'})`)}`);
if (ollamaModels && ollamaModels.length > 0) {
const compatibleCount = ollamaModels.filter(m => {
return m.canRun === true ||
m.compatibilityScore >= 60 ||
(m.matchedModel && true);
}).length;
const runningCount = ollamaModels.filter(m => m.isRunning).length;
lines.push(`๐ฆ ${chalk.cyan('Installed:')} ${ollamaModels.length} total, ${chalk.green(compatibleCount)} compatible`);
if (runningCount > 0) {
lines.push(`๐ ${chalk.cyan('Running:')} ${chalk.green(runningCount)} models`);
}
} else {
lines.push(`๐ฆ ${chalk.gray('No models installed yet')}`);
}
} else {
lines.push(`${chalk.red('โ Status:')} Not available`);
}
console.log('\n' + chalk.bgMagenta.white.bold(' ๐ฆ OLLAMA INTEGRATION '));
console.log(chalk.hex('#a259ff')('โญ' + 'โ'.repeat(50)));
lines.forEach(line => {
console.log(chalk.hex('#a259ff')('โ') + ' ' + line);
});
console.log(chalk.hex('#a259ff')('โฐ'));
}
function displayEnhancedCompatibleModels(compatible, ollamaModels) {
if (compatible.length === 0) {
console.log('\n' + chalk.yellow('No compatible models found.'));
return;
}
console.log('\n' + chalk.green.bold(' โ
Compatible Models (Score โฅ 75)'));
const data = [
[
chalk.bgGreen.white.bold(' Model '),
chalk.bgGreen.white.bold(' Size '),
chalk.bgGreen.white.bold(' Score '),
chalk.bgGreen.white.bold(' RAM '),
chalk.bgGreen.white.bold(' VRAM '),
chalk.bgGreen.white.bold(' Speed '),
chalk.bgGreen.white.bold(' Status ')
]
];
compatible.slice(0, 15).forEach(model => {
const tokensPerSec = model.performanceEstimate?.estimatedTokensPerSecond || 'N/A';
const ramReq = model.requirements?.ram || 1;
const vramReq = model.requirements?.vram || 0;
const speedFormatted = formatSpeed(model.performance?.speed || 'medium');
const scoreColor = getScoreColor(model.score || 0);
const scoreDisplay = scoreColor(`${model.score || 0}/100`);
let statusDisplay = `${tokensPerSec}t/s`;
if (model.isOllamaInstalled) {
const ollamaInfo = model.ollamaInfo || {};
if (ollamaInfo.isRunning) {
statusDisplay = '๐ Running';
} else {
statusDisplay = '๐ฆ Installed';
}
}
let modelName = model.name;
if (model.isOllamaInstalled) {
modelName = `${model.name} ๐ฆ`;
}
const row = [
modelName,
formatSize(model.size || 'Unknown'),
scoreDisplay,
`${ramReq}GB`,
`${vramReq}GB`,
speedFormatted,
statusDisplay
];
data.push(row);
});
console.log(table(data));
if (compatible.length > 15) {
console.log(chalk.gray(`\n... and ${compatible.length - 15} more compatible models`));
}
displayCompatibleModelsSummary(compatible.length);
}
function displayCompatibleModelsSummary(count) {
console.log('\n' + chalk.bgMagenta.white.bold(' COMPATIBLE MODELS '));
console.log(chalk.hex('#a259ff')('โญ' + 'โ'.repeat(40)));
console.log(chalk.hex('#a259ff')('โ') + ` Total compatible models: ${chalk.green.bold(count)}`);
console.log(chalk.hex('#a259ff')('โฐ'));
}
function displayMarginalModels(marginal) {
if (marginal.length === 0) return;
console.log('\n' + chalk.yellow.bold('โ ๏ธ Marginal Performance (Score 60-74)'));
const data = [
[
chalk.bgYellow.white.bold(' Model '),
chalk.bgYellow.white.bold(' Size '),
chalk.bgYellow.white.bold(' Score '),
chalk.bgYellow.white.bold(' RAM '),
chalk.bgYellow.white.bold(' VRAM '),
chalk.bgYellow.white.bold(' Issue ')
]
];
marginal.slice(0, 6).forEach(model => {
const mainIssue = model.issues?.[0] || 'Performance limitations';
const scoreColor = getScoreColor(model.score || 0);
const scoreDisplay = scoreColor(`${model.score || 0}/100`);
const ramReq = model.requirements?.ram || 1;
const vramReq = model.requirements?.vram || 0;
const truncatedIssue = mainIssue.length > 30 ? mainIssue.substring(0, 27) + '...' : mainIssue;
const row = [
model.name,
formatSize(model.size || 'Unknown'),
scoreDisplay,
`${ramReq}GB`,
`${vramReq}GB`,
truncatedIssue
];
data.push(row);
});
console.log(table(data));
if (marginal.length > 6) {
console.log(chalk.gray(`\n... and ${marginal.length - 6} more marginal models`));
}
}
function displayIncompatibleModels(incompatible) {
if (incompatible.length === 0) return;
console.log('\n' + chalk.red.bold('โ Incompatible Models (showing top 5)'));
const data = [
[
chalk.bgRed.white.bold(' Model '),
chalk.bgRed.white.bold(' Size '),
chalk.bgRed.white.bold(' Score '),
chalk.bgRed.white.bold(' Required RAM '),
chalk.bgRed.white.bold(' Reason ')
]
];
incompatible.slice(0, 5).forEach(model => {
const reason = model.issues?.[0] || 'Hardware insufficient';
const required = `${model.requirements?.ram || '?'}GB`;
const scoreColor = getScoreColor(model.score || 0);
const scoreDisplay = scoreColor(`${model.score || 0}/100`);
const truncatedReason = reason.length > 50 ? reason.substring(0, 22) + '...' : reason;
const row = [
model.name,
formatSize(model.size || 'Unknown'),
scoreDisplay,
required,
truncatedReason
];
data.push(row);
});
console.log(table(data));
}
function displayStructuredRecommendations(recommendations) {
if (!recommendations) return;
if (Array.isArray(recommendations)) {
displayLegacyRecommendations(recommendations);
return;
}
console.log('\n' + chalk.bgCyan.white.bold(' ๐ฏ SMART RECOMMENDATIONS '));
console.log(chalk.cyan('โญ' + 'โ'.repeat(50)));
if (recommendations.general && recommendations.general.length > 0) {
console.log(chalk.cyan('โ') + ` ${chalk.bold.white('๐ก General Recommendations:')}`);
recommendations.general.slice(0, 4).forEach((rec, index) => {
console.log(chalk.cyan('โ') + ` ${index + 1}. ${chalk.white(rec)}`);
});
console.log(chalk.cyan('โ'));
}
if (recommendations.installedModels && recommendations.installedModels.length > 0) {
console.log(chalk.cyan('โ') + ` ${chalk.bold.green('๐ฆ Your Installed Ollama Models:')}`);
recommendations.installedModels.forEach(rec => {
console.log(chalk.cyan('โ') + ` ${chalk.green(rec)}`);
});
console.log(chalk.cyan('โ'));
}
if (recommendations.cloudSuggestions && recommendations.cloudSuggestions.length > 0) {
console.log(chalk.cyan('โ') + ` ${chalk.bold.blue('โ๏ธ Recommended from Ollama Cloud:')}`);
recommendations.cloudSuggestions.forEach(rec => {
if (rec.includes('ollama pull')) {
console.log(chalk.cyan('โ') + ` ๐ ${chalk.cyan.bold(rec)}`);
} else {
console.log(chalk.cyan('โ') + ` ${chalk.blue(rec)}`);
}
});
console.log(chalk.cyan('โ'));
}
if (recommendations.quickCommands && recommendations.quickCommands.length > 0) {
console.log(chalk.cyan('โ') + ` ${chalk.bold.yellow('โก Quick Commands:')}`);
const uniqueCommands = [...new Set(recommendations.quickCommands)];
uniqueCommands.slice(0, 3).forEach(cmd => {
console.log(chalk.cyan('โ') + ` > ${chalk.yellow.bold(cmd)}`);
});
}
console.log(chalk.cyan('โฐ'));
}
function displayLegacyRecommendations(recommendations) {
if (!recommendations || recommendations.length === 0) return;
const generalRecs = [];
const ollamaFoundRecs = [];
const quickInstallRecs = [];
recommendations.forEach(rec => {
if (rec.includes('๐ฆ') && rec.includes('Score:')) {
ollamaFoundRecs.push(rec);
} else if (rec.includes('ollama pull')) {
quickInstallRecs.push(rec);
} else if (rec.includes('ollama run')) {
quickInstallRecs.push(rec);
} else {
generalRecs.push(rec);
}
});
console.log('\n' + chalk.bgCyan.white.bold(' SMART RECOMMENDATIONS '));
console.log(chalk.cyan('โญ' + 'โ'.repeat(40)));
generalRecs.slice(0, 8).forEach((rec, index) => {
const number = chalk.green.bold(`${index + 1}.`);
console.log(chalk.cyan('โ') + ` ${number} ${chalk.white(rec)}`);
});
if (ollamaFoundRecs.length > 0) {
console.log(chalk.cyan('โ'));
console.log(chalk.cyan('โ') + ` ${chalk.bold.green('๐ฆ Your Installed Ollama Models:')}`);
ollamaFoundRecs.forEach(rec => {
console.log(chalk.cyan('โ') + ` ${chalk.green(rec)}`);
});
}
if (quickInstallRecs.length > 0) {
console.log(chalk.cyan('โ'));
console.log(chalk.cyan('โ') + ` ${chalk.bold.blue('๐ Quick Commands:')}`);
quickInstallRecs.slice(0, 3).forEach(cmd => {
console.log(chalk.cyan('โ') + ` > ${chalk.cyan.bold(cmd)}`);
});
}
console.log(chalk.cyan('โฐ'));
}
function displayIntelligentRecommendations(intelligentData) {
if (!intelligentData || !intelligentData.summary) return;
const { summary, recommendations } = intelligentData;
const tier = summary.hardware_tier.replace('_', ' ').toUpperCase();
const tierColor = tier.includes('HIGH') ? chalk.green : tier.includes('MEDIUM') ? chalk.yellow : chalk.red;
console.log('\n' + chalk.bgRed.white.bold(' ๐ง INTELLIGENT RECOMMENDATIONS BY CATEGORY '));
console.log(chalk.red('โญ' + 'โ'.repeat(65)));
console.log(chalk.red('โ') + ` ๐ Hardware Tier: ${tierColor.bold(tier)} | Models Analyzed: ${chalk.cyan.bold(intelligentData.totalModelsAnalyzed)}`);
console.log(chalk.red('โ'));
// Mostrar mejor modelo general
if (summary.best_overall) {
const best = summary.best_overall;
console.log(chalk.red('โ') + ` ๐ ${chalk.bold.yellow('BEST OVERALL:')} ${chalk.green.bold(best.name)}`);
console.log(chalk.red('โ') + ` ๐ฆ Command: ${chalk.cyan.bold(best.command)}`);
console.log(chalk.red('โ') + ` ๐ Score: ${chalk.yellow.bold(best.score)}/100 | Category: ${chalk.magenta(best.category)}`);
console.log(chalk.red('โ'));
}
// Mostrar por categorรญas
const categories = {
coding: '๐ป',
talking: '๐ฌ',
reading: '๐',
reasoning: '๐งฎ',
multimodal: '๐ผ๏ธ',
creative: '๐จ',
general: '๐ค'
};
Object.entries(summary.by_category).forEach(([category, model]) => {
const icon = categories[category] || '๐';
const categoryName = category.charAt(0).toUpperCase() + category.slice(1);
const scoreColor = getScoreColor(model.score);
console.log(chalk.red('โ') + ` ${icon} ${chalk.bold.white(categoryName)}:`);
console.log(chalk.red('โ') + ` ๐ ${chalk.green(model.name)} (${model.size})`);
console.log(chalk.red('โ') + ` ๐ Score: ${scoreColor.bold(model.score)}/100 | Pulls: ${chalk.gray(model.pulls?.toLocaleString() || 'N/A')}`);
console.log(chalk.red('โ') + ` ๐ฆ ${chalk.cyan.bold(model.command)}`);
console.log(chalk.red('โ'));
});
console.log(chalk.red('โฐ'));
}
function displayModelsStats(originalCount, filteredCount, options) {
console.log('\n' + chalk.bgGreen.white.bold(' ๐ DATABASE STATS '));
console.log(chalk.green('โญ' + 'โ'.repeat(60)));
console.log(chalk.green('โ') + ` Total models in database: ${chalk.cyan.bold(originalCount)}`);
console.log(chalk.green('โ') + ` After filters: ${chalk.yellow.bold(filteredCount)}`);
if (options.category) {
console.log(chalk.green('โ') + ` Category filter: ${chalk.magenta.bold(options.category)}`);
}
if (options.size) {
console.log(chalk.green('โ') + ` Size filter: ${chalk.magenta.bold(options.size)}`);
}
if (options.popular) {
console.log(chalk.green('โ') + ` Filter: ${chalk.magenta.bold('Popular models only (>100k pulls)')}`);
}
if (options.recent) {
console.log(chalk.green('โ') + ` Filter: ${chalk.magenta.bold('Recent models only')}`);
}
console.log(chalk.green('โฐ'));
}
function displayCompactModelsList(models) {
console.log('\n' + chalk.bgBlue.white.bold(' ๐ MODELS LIST '));
const data = [
[
chalk.bgBlue.white.bold(' # '),
chalk.bgBlue.white.bold(' Model '),
chalk.bgBlue.white.bold(' Size '),
chalk.bgBlue.white.bold(' Context '),
chalk.bgBlue.white.bold(' Input '),
chalk.bgBlue.white.bold(' Category '),
chalk.bgBlue.white.bold(' Variants ')
]
];
models.forEach((model, index) => {
const category = model.category || 'general';
const categoryColor = getCategoryColor(category);
// Obtener el tamaรฑo mรกs representativo
const mainSize = model.main_size ||
(model.model_sizes && model.model_sizes[0]) ||
extractSizeFromIdentifier(model.model_identifier) ||
'Unknown';
// Context length
const contextLength = model.context_length || 'Unknown';
// Input types
const inputTypes = (model.input_types && model.input_types.length > 0) ?
model.input_types.slice(0, 2).join(',') : 'text';
// Number of variants
const variantCount = (model.tags && model.tags.length > 0) ?
model.tags.length : 0;
const row = [
chalk.gray(`${index + 1}`),
model.model_name || 'Unknown',
chalk.green(mainSize),
chalk.blue(contextLength),
chalk.magenta(inputTypes),
categoryColor(category),
chalk.yellow(`${variantCount} tags`)
];
data.push(row);
});
console.log(table(data));
}
function extractSizeFromIdentifier(identifier) {
const sizeMatch = identifier.match(/(\d+\.?\d*[bg])/i);
return sizeMatch ? sizeMatch[1].toLowerCase() : null;
}
function displayFullModelsList(models) {
console.log('\n' + chalk.bgBlue.white.bold(' ๐ DETAILED MODELS LIST '));
models.forEach((model, index) => {
console.log(`\n${chalk.cyan.bold(`${index + 1}. ${model.model_name}`)}`);
console.log(` ${chalk.gray('Identifier:')} ${chalk.yellow(model.model_identifier)}`);
console.log(` ${chalk.gray('Size:')} ${chalk.green(model.main_size || 'Unknown')}`);
console.log(` ${chalk.gray('Context:')} ${chalk.blue(model.context_length || 'Unknown')}`);
console.log(` ${chalk.gray('Input types:')} ${chalk.magenta((model.input_types || ['text']).join(', '))}`);
console.log(` ${chalk.gray('Category:')} ${getCategoryColor(model.category || 'general')(model.category || 'general')}`);
console.log(` ${chalk.gray('Pulls:')} ${chalk.green((model.pulls || 0).toLocaleString())}`);
console.log(` ${chalk.gray('Description:')} ${model.description || model.detailed_description || 'No description'}`);
if (model.use_cases && model.use_cases.length > 0) {
console.log(` ${chalk.gray('Use cases:')} ${model.use_cases.map(uc => chalk.magenta(uc)).join(', ')}`);
}
if (model.tags && model.tags.length > 0) {
console.log(` ${chalk.gray(`Available variants (${model.tags.length}):`)} `);
// Mostrar las primeras 10 variantes, agrupadas de 5 por lรญnea
const tagsToShow = model.tags.slice(0, 15);
for (let i = 0; i < tagsToShow.length; i += 5) {
const batch = tagsToShow.slice(i, i + 5);
console.log(` ${batch.map(tag => chalk.blue(tag)).join(', ')}`);
}
if (model.tags.length > 15) {
console.log(` ${chalk.gray(`... and ${model.tags.length - 15} more variants`)}`);
}
}
if (model.quantizations && model.quantizations.length > 0) {
console.log(` ${chalk.gray('Quantizations found:')} ${model.quantizations.map(q => chalk.green(q)).join(', ')}`);
}
console.log(` ${chalk.gray('Base command:')} ${chalk.cyan.bold(`ollama pull ${model.model_identifier}`)}`);
console.log(` ${chalk.gray('Example variant:')} ${chalk.cyan.bold(`ollama pull ${model.tags && model.tags.length > 0 ? model.tags[0] : model.model_identifier}`)}`);
console.log(` ${chalk.gray('Updated:')} ${model.last_updated || 'Unknown'}`);
});
}
function getCategoryColor(category) {
const colors = {
coding: chalk.blue,
talking: chalk.green,
reading: chalk.yellow,
reasoning: chalk.red,
multimodal: chalk.magenta,
creative: chalk.cyan,
general: chalk.gray,
chat: chalk.green,
embeddings: chalk.blue
};
return colors[category] || chalk.gray;
}
function displaySampleCommands(topModels) {
console.log('\n' + chalk.bgYellow.black.bold(' โก SAMPLE COMMANDS '));
console.log(chalk.yellow('โญ' + 'โ'.repeat(60)));
console.log(chalk.yellow('โ') + ` ${chalk.bold.white('Try these popular models:')}`);
topModels.forEach((model, index) => {
const command = `ollama pull ${model.model_identifier}`;
console.log(chalk.yellow('โ') + ` ${index + 1}. ${chalk.cyan.bold(command)}`);
});
console.log(chalk.yellow('โ'));
console.log(chalk.yellow('โ') + ` ${chalk.bold.white('More commands:')}`);
console.log(chalk.yellow('โ') + ` ๐ ${chalk.gray('llm-checker list-models --category coding')}`);
console.log(chalk.yellow('โ') + ` ๐ ${chalk.gray('llm-checker list-models --popular --limit 10')}`);
console.log(chalk.yellow('โ') + ` ๐พ ${chalk.gray('llm-checker list-models --json > models.json')}`);
console.log(chalk.yellow('โฐ'));
}
function displayNextSteps(analysis) {
const stepsRaw = [];
if (!analysis.ollamaInfo.available) {
stepsRaw.push(['๐ฆ', chalk.white(`Install Ollama:`) + ' ' + chalk.underline('https://ollama.ai')]);
stepsRaw.push(['๐', `Return here to see compatible models`]);
} else if (!analysis.ollamaModels || analysis.ollamaModels.length === 0) {
stepsRaw.push(['๐', `Install a recommended model from above`]);
stepsRaw.push(['๐ฌ', chalk.yellow(`Start chatting: ollama run <model-name>`)]);
} else {
stepsRaw.push(['๐', chalk.yellow(`Analyze: llm-checker analyze-model <model>`)]);
stepsRaw.push(['๐', chalk.yellow(`See status: llm-checker ollama --list`)]);
stepsRaw.push(['๐', chalk.yellow(`Try: ollama run <your-best-model>`)]);
}
console.log('\n' + chalk.bgMagenta.white.bold(' ๐ฏ NEXT STEPS ') + '\n' + chalk.hex('#a259ff')('โญ' + 'โ'.repeat(40)));
stepsRaw.forEach(([icon, text], index) => {
const num = chalk.green.bold(`${index + 1}.`);
console.log(chalk.hex('#a259ff')('โ') + ` ${num} ${icon} ${text}`);
});
console.log(chalk.hex('#a259ff')('โฐ'));
}
program
.command('check')
.description('Analyze your system and show compatible LLM models')
.option('-d, --detailed', 'Show detailed hardware information')
.option('-f, --filter <type>', 'Filter by model type')
.option('-u, --use-case <case>', 'Specify use case', 'general')
.option('--include-cloud', 'Include cloud models in analysis')
.option('--ollama-only', 'Only show models available in Ollama')
.option('--performance-test', 'Run performance benchmarks')
.option('--show-ollama-analysis', 'Show detailed Ollama model analysis')
.action(async (options) => {
const spinner = ora('๐ Analyzing your system...').start();
try {
const checker = new LLMChecker();
spinner.text = '๐ฅ๏ธ Detecting hardware...';
const hardware = await checker.getSystemInfo();
spinner.text = '๐ฆ Integrating with Ollama...';
const analysis = await checker.analyze({
filter: options.filter,
useCase: options.useCase,
includeCloud: options.includeCloud,
performanceTest: options.performanceTest
});
spinner.succeed('โ
Analysis complete!');
displaySystemInfo(hardware, analysis);
displayOllamaIntegration(analysis.ollamaInfo, analysis.ollamaModels);
displayEnhancedCompatibleModels(analysis.compatible, analysis.ollamaModels);
if (analysis.marginal.length > 0) {
displayMarginalModels(analysis.marginal);
}
if (analysis.incompatible.length > 0) {
displayIncompatibleModels(analysis.incompatible);
}
displayStructuredRecommendations(analysis.recommendations);
// Mostrar recomendaciones inteligentes por categorรญa
if (analysis.intelligentRecommendations) {
displayIntelligentRecommendations(analysis.intelligentRecommendations);
}
displayNextSteps(analysis);
} catch (error) {
spinner.fail('Failed to analyze system');
console.error(chalk.red('Error:'), error.message);
if (process.env.DEBUG) {
console.error(error.stack);
}
process.exit(1);
}
});
program
.command('ollama')
.description('Manage Ollama integration with hardware compatibility')
.option('-l, --list', 'List installed models with compatibility scores')
.option('-r, --running', 'Show running models with performance data')
.option('-c, --compatible', 'Show only hardware-compatible installed models')
.option('--recommendations', 'Show installation recommendations')
.action(async (options) => {
const spinner = ora('Checking Ollama integration...').start();
try {
const checker = new LLMChecker();
const analysis = await checker.analyze();
if (!analysis.ollamaInfo.available) {
spinner.fail(`Ollama not available`);
console.log('\n๐ก To install Ollama:');
console.log('๐ Visit: https://ollama.ai');
return;
}
spinner.succeed(`Ollama integration active`);
if (options.list) {
console.log('Ollama models list feature coming soon...');
}
} catch (error) {
spinner.fail('Error with Ollama integration');
console.error(chalk.red('Error:'), error.message);
}
});
program
.command('update-db')
.description('Update Ollama models database')
.option('-f, --force', 'Force update, ignore cache')
.action(async (options) => {
const spinner = ora('Updating Ollama models database...').start();
try {
const checker = new LLMChecker();
if (options.force) {
spinner.text = '๐ Force updating database (this may take a while)...';
const data = await checker.forceUpdateOllamaDatabase();
spinner.succeed(`โ
Database force updated! Found ${data.total_count} models`);
} else {
spinner.text = '๐ก Checking for database updates...';
const data = await checker.updateOllamaDatabase();
if (data) {
spinner.succeed(`โ
Database updated! Found ${data.total_count} models`);
} else {
spinner.succeed('๐ Database is up to date');
}
}
const stats = await checker.getOllamaModelStats();
if (stats) {
console.log('\n' + chalk.bgBlue.white.bold(' ๐ DATABASE STATS '));
console.log(chalk.blue('โญ' + 'โ'.repeat(40)));
console.log(chalk.blue('โ') + ` Total models: ${chalk.green.bold(stats.total_models || 'N/A')}`);
console.log(chalk.blue('โ') + ` Last updated: ${chalk.yellow(stats.last_updated || 'Unknown')}`);
console.log(chalk.blue('โฐ'));
}
} catch (error) {
spinner.fail('Failed to update database');
console.error(chalk.red('Error:'), error.message);
if (process.env.DEBUG) {
console.error(error.stack);
}
process.exit(1);
}
});
program
.command('recommend')
.description('Get intelligent model recommendations for your hardware')
.option('-c, --category <category>', 'Get recommendations for specific category (coding, talking, reading, etc.)')
.action(async (options) => {
const spinner = ora('๐ง Analyzing your hardware and generating recommendations...').start();
try {
const checker = new LLMChecker();
const hardware = await checker.getSystemInfo();
spinner.text = '๐ Analyzing thousands of models...';
const intelligentRecommendations = await checker.generateIntelligentRecommendations(hardware);
if (!intelligentRecommendations) {
spinner.fail('Failed to generate recommendations');
return;
}
spinner.succeed('โ
Smart recommendations generated!');
// Mostrar informaciรณn del sistema
displaySystemInfo(hardware, { summary: { hardwareTier: intelligentRecommendations.summary.hardware_tier } });
// Mostrar recomendaciones
displayIntelligentRecommendations(intelligentRecommendations);
} catch (error) {
spinner.fail('Failed to generate recommendations');
console.error(chalk.red('Error:'), error.message);
if (process.env.DEBUG) {
console.error(error.stack);
}
process.exit(1);
}
});
program
.command('list-models')
.description('List all models from Ollama database')
.option('-c, --category <category>', 'Filter by category (coding, talking, reading, reasoning, multimodal, creative, general)')
.option('-s, --size <size>', 'Filter by size (small, medium, large, e.g., "7b", "13b")')
.option('-p, --popular', 'Show only popular models (>100k pulls)')
.option('-r, --recent', 'Show only recent models (updated in last 30 days)')
.option('--limit <number>', 'Limit number of results (default: 50)', '50')
.option('--full', 'Show full details including variants and tags')
.option('--json', 'Output in JSON format')
.action(async (options) => {
const spinner = ora('๐ Loading models database...').start();
try {
const checker = new LLMChecker();
const data = await checker.ollamaScraper.scrapeAllModels(false);
if (!data || !data.models) {
spinner.fail('No models found in database');
return;
}
let models = data.models;
let originalCount = models.length;
// Aplicar filtros
if (options.category) {
models = models.filter(model =>
model.category === options.category.toLowerCase() ||
(model.use_cases && model.use_cases.includes(options.category.toLowerCase()))
);
}
if (options.size) {
const sizeFilter = options.size.toLowerCase();
models = models.filter(model =>
model.model_identifier.toLowerCase().includes(sizeFilter) ||
(model.model_sizes && model.model_sizes.some(size => size.includes(sizeFilter)))
);
}
if (options.popular) {
models = models.filter(model => (model.pulls || 0) > 100000);
}
if (options.recent) {
models = models.filter(model =>
model.last_updated && model.last_updated.includes('day')
);
}
// Ordenar por popularidad
models.sort((a, b) => (b.pulls || 0) - (a.pulls || 0));
// Limitar resultados
const limit = parseInt(options.limit) || 50;
const displayModels = models.slice(0, limit);
spinner.succeed(`โ
Found ${models.length} models (showing ${displayModels.length})`);
if (options.json) {
console.log(JSON.stringify(displayModels, null, 2));
return;
}
// Mostrar estadรญsticas
displayModelsStats(originalCount, models.length, options);
// Mostrar modelos
if (options.full) {
displayFullModelsList(displayModels);
} else {
displayCompactModelsList(displayModels);
}
// Mostrar comandos de ejemplo
if (displayModels.length > 0) {
displaySampleCommands(displayModels.slice(0, 3));
}
} catch (error) {
spinner.fail('Failed to load models');
console.error(chalk.red('Error:'), error.message);
if (process.env.DEBUG) {
console.error(error.stack);
}
process.exit(1);
}
});
program
.command('ai-check')
.description('AI-powered model selection for optimal hardware compatibility')
.option('-m, --models <models...>', 'Specific models to choose from')
.option('--prompt <prompt>', 'Show command to run selected model with this prompt')
.option('--benchmark', 'Benchmark available models for training data')
.option('--train', 'Train the AI selector model')
.option('--status', 'Show AI model training status')
.action(async (options) => {
// Check if Ollama is installed first
await checkOllamaAndExit();
const AIModelSelector = require('../src/ai/model-selector');
try {
const aiSelector = new AIModelSelector();
if (options.status) {
const status = aiSelector.getTrainingStatus();
console.log('\n' + chalk.bgMagenta.white.bold(' ๐ง AI MODEL STATUS '));
console.log(chalk.magenta('โญ' + 'โ'.repeat(50)));
console.log(chalk.magenta('โ') + ` Status: ${getStatusColor(status.status)(status.status.replace('_', ' ').toUpperCase())}`);
if (status.status === 'trained') {
console.log(chalk.magenta('โ') + ` Model size: ${chalk.green.bold(status.modelSize + ' KB')}`);
console.log(chalk.magenta('โ') + ` Version: ${chalk.cyan(status.version)}`);
console.log(chalk.magenta('โ') + ` Features: ${chalk.yellow(status.features)}`);
console.log(chalk.magenta('โ') + ` Last updated: ${chalk.gray(new Date(status.lastUpdated).toLocaleDateString())}`);
console.log(chalk.magenta('โ') + ` Ready for use: ${chalk.green.bold('YES')}`);
} else if (status.status === 'not_trained') {
console.log(chalk.magenta('โ') + ` To get started:`);
console.log(chalk.magenta('โ') + ` 1. ${chalk.cyan.bold('npm run benchmark')} - Collect performance data`);
console.log(chalk.magenta('โ') + ` 2. ${chalk.cyan.bold('npm run train-ai')} - Train the AI model`);
console.log(chalk.magenta('โ') + ` 3. ${chalk.cyan.bold('npm run ai-check')} - Use AI selection`);
} else {
console.log(chalk.magenta('โ') + ` Issue: ${chalk.red('Model files corrupted or incomplete')}`);
console.log(chalk.magenta('โ') + ` Solution: ${chalk.cyan.bold('npm run train-ai')}`);
}
console.log(chalk.magenta('โฐ'));
return;
}
if (options.benchmark) {
const spinner = ora('๐ฌ Collecting benchmark data...').start();
try {
const benchmarkProcess = spawn('python', [
'ml-model/python/benchmark_collector.py'
], { stdio: 'inherit' });
benchmarkProcess.on('close', (code) => {
if (code === 0) {
spinner.succeed('โ
Benchmark data collected!');
console.log('\nNext steps:');
console.log(` 1. ${chalk.cyan.bold('npm run train-ai')} - Train the AI model`);
console.log(` 2. ${chalk.cyan.bold('npm run ai-check')} - Use AI selection`);
} else {
spinner.fail('โ Benchmark collection failed');
}
});
return;
} catch (error) {
spinner.fail('โ Failed to start benchmark collection');
console.error(chalk.red('Error:'), error.message);
return;
}
}
if (options.train) {
const spinner = ora('๐ง Training AI model...').start();
try {
const trainProcess = spawn('python', [
'ml-model/python/train_model.py'
], { stdio: 'inherit' });
trainProcess.on('close', (code) => {
if (code === 0) {
spinner.succeed('โ
AI model trained successfully!');
console.log('\nYou can now use AI-powered selection:');
console.log(` ${chalk.cyan.bold('npm run ai-check')}`);
} else {
spinner.fail('โ AI model training failed');
}
});
return;
} catch (error) {
spinner.fail('โ Failed to start training');
console.error(chalk.red('Error:'), error.message);
return;
}
}
// Main AI selection logic
const spinner = ora('๐ง AI-powered model selection...').start();
// Get system info
const checker = new LLMChecker();
const systemInfo = await checker.getSystemInfo();
// Get available models or use provided ones
let candidateModels = options.models;
if (!candidateModels) {
spinner.text = '๐ Getting available Ollama models...';
const OllamaClient = require('../src/ollama/client');
const client = new OllamaClient();
try {
const models = await client.getLocalModels();
candidateModels = models.map(m => m.name || m.model);
if (candidateModels.length === 0) {
spinner.fail('โ No Ollama models found');
console.log('\nInstall some models first:');
console.log(' ollama pull llama2:7b');
console.log(' ollama pull mistral:7b');
console.log(' ollama pull phi3:mini');
return;
}
} catch (error) {
spinner.fail('โ Failed to get Ollama models');
console.error(chalk.red('Error:'), error.message);
return;
}
}
spinner.text = '๐ค Analyzing optimal model selection...';
// Use AI selector - fix data extraction
const systemSpecs = {
cpu_cores: systemInfo.cpu?.cores || 4,
cpu_freq_max: systemInfo.cpu?.speed || 3.0,
total_ram_gb: systemInfo.memory?.total || 8,
gpu_vram_gb: systemInfo.gpu?.vram || 0,
gpu_model_normalized: systemInfo.gpu?.model ||
(systemInfo.cpu?.manufacturer === 'Apple' ? 'apple_silicon' : 'cpu_only')
};
const result = await aiSelector.selectBestModel(candidateModels, systemSpecs);
spinner.succeed('โ
AI selection completed!');
// Display results
console.log('\n' + chalk.bgMagenta.white.bold(' ๐ง INTELLIGENT MODEL SELECTION '));
console.log(chalk.magenta('โญ' + 'โ'.repeat(65)));
console.log(chalk.magenta('โ') + ` ๐ Selected Model: ${chalk.green.bold(result.bestModel)}`);
console.log(chalk.magenta('โ') + ` ๐ฏ Selection Method: ${chalk.cyan(result.method.replace(/_/g, ' ').toUpperCase())}`);
console.log(chalk.magenta('โ') + ` ๐ Confidence: ${getConfidenceColor(result.confidence)(Math.round(result.confidence * 100) + '%')}`);
if (result.score) {
console.log(chalk.magenta('โ') + ` ๐ข Intelligence Score: ${getScoreColor(result.score)(Math.round(result.score))}/100`);
}
if (result.reasoning || result.reason) {
const reasoning = result.reasoning || result.reason;
console.log(chalk.magenta('โ') + ` ๐ก AI Analysis: ${chalk.yellow(reasoning)}`);
}
if (result.allPredictions && result.allPredictions.length > 1) {
console.log(chalk.magenta('โ'));
console.log(chalk.magenta('โ') + ` ${chalk.bold.white('Top Candidates:')}`);
result.allPredictions.slice(0, 5).forEach((pred, i) => {
const score = Math.round(pred.score * 100);
const icon = i === 0 ? '๐ฅ' : i === 1 ? '๐ฅ' : i === 2 ? '๐ฅ' : ' ';
console.log(chalk.magenta('โ') + ` ${icon} ${pred.model}: ${getConfidenceColor(pred.score)(score + '%')}`);
});
}
console.log(chalk.magenta('โฐ'));
// Display system info
const specs = result.systemSpecs || systemSpecs;
const hwAnalysis = result.hardware_analysis;
console.log('\n' + chalk.bgBlue.white.bold(' ๐ป INTELLIGENT HARDWARE ANALYSIS '));
console.log(chalk.blue('โญ' + 'โ'.repeat(55)));
console.log(chalk.blue('โ') + ` CPU: ${chalk.green(specs.cpu_cores + ' cores')} @ ${chalk.cyan(specs.cpu_freq_max?.toFixed(1) + ' GHz')}`);
// Show appropriate memory info based on platform and GPU type
const platform = process.platform;
const isAppleSilicon = specs.gpu_model_normalized === 'apple_silicon' ||
(platform === 'darwin' && specs.gpu_model_normalized?.toLowerCase().includes('apple'));
const hasDedicatedGPU = specs.gpu_vram_gb > 0;
console.log(chalk.blue('โ') + ` RAM: ${chalk.green(specs.total_ram_gb?.toFixed(1) + ' GB')}`);
if (isAppleSilicon) {
// macOS with Apple Silicon - Unified Memory
console.log(chalk.blue('โ') + ` GPU: ${chalk.yellow('Apple Silicon')} ${chalk.gray('(Unified Memory)')}`);
} else if (hasDedicatedGPU) {
// Windows/Linux with dedicated GPU - Show VRAM
const gpuName = specs.gpu_model_normalized || 'Dedicated GPU';
console.log(chalk.blue('โ') + ` GPU: ${chalk.yellow(gpuName)}`);
console.log(chalk.blue('โ') + ` VRAM: ${chalk.green(specs.gpu_vram_gb?.toFixed(1) + ' GB')} ${chalk.gray('(Dedicated)')}`);
} else {
// Integrated GPU (Intel/AMD) or CPU-only
const gpuName = specs.gpu_model_normalized === 'cpu_only' ? 'CPU Only' :
specs.gpu_model_normalized || 'Integrated GPU';
console.log(chalk.blue('โ') + ` GPU: ${chalk.yellow(gpuName)} ${chalk.gray('(Integrated)')}`);
}
if (hwAnalysis) {
console.log(chalk.blue('โ'));
console.log(chalk.blue('โ') + ` ${chalk.bold.white('Hardware Classification:')}`);
console.log(chalk.blue('โ') + ` Overall Tier: ${getTierColor(hwAnalysis.overall_tier)(hwAnalysis.overall_tier.replace('_', ' ').toUpperCase())}`);
console.log(chalk.blue('โ') + ` Available Memory: ${chalk.green(hwAnalysis.available_memory?.total?.toFixed(1) + ' GB')}`);
console.log(chalk.blue('โ') + ` Performance Index: ${chalk.cyan('ร' + hwAnalysis.performance_multiplier?.toFixed(1))}`);
}