llmverify
Version:
AI Output Verification Toolkit — Local-first LLM safety, hallucination detection, PII redaction, prompt injection defense, and runtime monitoring. Zero telemetry. OWASP LLM Top 10 aligned.
1,044 lines (1,038 loc) • 187 kB
JavaScript
#!/usr/bin/env node
"use strict";
/**
* llmverify CLI
*
* Command-line interface for AI output verification.
*
* @module cli
* @author Haiec
* @license MIT
*/
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
var desc = Object.getOwnPropertyDescriptor(m, k);
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
desc = { enumerable: true, get: function() { return m[k]; } };
}
Object.defineProperty(o, k2, desc);
}) : (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
o[k2] = m[k];
}));
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
Object.defineProperty(o, "default", { enumerable: true, value: v });
}) : function(o, v) {
o["default"] = v;
});
var __importStar = (this && this.__importStar) || (function () {
var ownKeys = function(o) {
ownKeys = Object.getOwnPropertyNames || function (o) {
var ar = [];
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
return ar;
};
return ownKeys(o);
};
return function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
__setModuleDefault(result, mod);
return result;
};
})();
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
const commander_1 = require("commander");
const chalk_1 = __importDefault(require("chalk"));
const cli_table3_1 = __importDefault(require("cli-table3"));
const fs = __importStar(require("fs"));
const path = __importStar(require("path"));
const verify_1 = require("./verify");
const constants_1 = require("./constants");
const config_1 = require("./types/config");
const program = new commander_1.Command();
program
.name('llmverify')
.description('AI Output Verification Toolkit - Local-first, privacy-preserving')
.version(constants_1.VERSION, '-V, --version', 'Output the version number')
.addHelpText('beforeAll', chalk_1.default.cyan(`
╔══════════════════════════════════════════════════════════════════════════════╗
║ llmverify v${constants_1.VERSION} — AI Output Verification Toolkit ║
║ Local-first • Zero telemetry • Privacy-preserving ║
╚══════════════════════════════════════════════════════════════════════════════╝
`))
.addHelpText('after', `
${chalk_1.default.bold('Core Commands:')}
${chalk_1.default.cyan('run')} ${chalk_1.default.yellow('★')} Master command - run all engines with presets (dev/prod/strict/fast/ci)
${chalk_1.default.cyan('verify')} Run multi-engine verification on AI output (default)
${chalk_1.default.cyan('engines')} List all verification engines with status
${chalk_1.default.cyan('explain')} Explain how a specific engine works
${chalk_1.default.cyan('adapters')} List available LLM provider adapters
${chalk_1.default.bold('Setup & Config:')}
${chalk_1.default.cyan('wizard')} ${chalk_1.default.yellow('★')} Interactive setup wizard for first-time users
${chalk_1.default.cyan('presets')} List available preset configurations
${chalk_1.default.cyan('init')} Initialize llmverify.config.json
${chalk_1.default.cyan('doctor')} Check system health and configuration
${chalk_1.default.cyan('privacy')} Show privacy guarantees
${chalk_1.default.bold('Help & Info:')}
${chalk_1.default.cyan('info')} Show package info, docs, and funding links
${chalk_1.default.cyan('tutorial')} Show usage examples and quick start guide
${chalk_1.default.bold('Quick Examples:')}
${chalk_1.default.green('$ npx llmverify run "AI output" --preset dev')} ${chalk_1.default.dim('# Master command')}
${chalk_1.default.green('$ npx llmverify run "AI output" --preset prod')} ${chalk_1.default.dim('# Production mode')}
${chalk_1.default.green('$ npx llmverify wizard')} ${chalk_1.default.dim('# First-time setup')}
${chalk_1.default.green('$ npx llmverify verify "Your AI response here"')}
${chalk_1.default.green('$ npx llmverify doctor')}
${chalk_1.default.bold('Exit Codes (CI/CD):')}
${chalk_1.default.green('0')} = Low risk (allow)
${chalk_1.default.yellow('1')} = Moderate risk (review)
${chalk_1.default.red('2')} = High/Critical risk (block)
${chalk_1.default.bold('Documentation:')}
README: ${chalk_1.default.blue('https://github.com/subodhkc/llmverify-npm#readme')}
CLI Reference: ${chalk_1.default.blue('docs/CLI-REFERENCE.md')}
Troubleshooting: ${chalk_1.default.blue('docs/TROUBLESHOOTING.md')}
${chalk_1.default.yellow('☕ Support development:')} npm fund or https://www.buymeacoffee.com/subodhkc
`);
program
.command('verify', { isDefault: true })
.description('Verify AI output for risks')
.argument('[content]', 'Content to verify (or use --file)')
.option('-f, --file <path>', 'Read content from file')
.option('-j, --json', 'Content is JSON')
.option('-c, --config <path>', 'Path to config file')
.option('-v, --verbose', 'Verbose output')
.option('-o, --output <format>', 'Output format: text, json', 'text')
.action(async (content, options) => {
try {
// Get content
let inputContent = content;
if (options.file) {
const filePath = path.resolve(options.file);
if (!fs.existsSync(filePath)) {
console.error(chalk_1.default.red(`File not found: ${filePath}`));
process.exit(1);
}
inputContent = fs.readFileSync(filePath, 'utf-8');
}
if (!inputContent) {
console.error(chalk_1.default.red('No content provided. Use --file or provide content as argument.'));
program.help();
process.exit(1);
}
// Load config
let config = {};
if (options.config) {
const configPath = path.resolve(options.config);
if (fs.existsSync(configPath)) {
config = JSON.parse(fs.readFileSync(configPath, 'utf-8'));
}
}
if (options.verbose) {
config.output = {
verbose: true,
includeEvidence: true,
includeMethodology: true,
includeLimitations: true
};
}
// Run verification
console.log(chalk_1.default.blue('\n🔍 Running llmverify...\n'));
const result = await (0, verify_1.verify)({
content: inputContent,
config,
context: {
isJSON: options.json
}
});
// Output results
if (options.output === 'json') {
console.log(JSON.stringify(result, null, 2));
}
else {
printTextResult(result, options.verbose);
}
// Exit code based on risk level
const exitCodes = {
low: 0,
moderate: 1,
high: 2,
critical: 2
};
process.exit(exitCodes[result.risk.level] || 0);
}
catch (error) {
console.error(chalk_1.default.red(`\nError: ${error.message}`));
process.exit(1);
}
});
program
.command('init')
.description('Initialize llmverify config file')
.action(() => {
const configPath = path.resolve('llmverify.config.json');
if (fs.existsSync(configPath)) {
console.log(chalk_1.default.yellow('Config file already exists: llmverify.config.json'));
return;
}
try {
const { createDefaultConfigFile } = require('./config');
createDefaultConfigFile();
console.log(chalk_1.default.green('✓ Created llmverify.config.json'));
console.log(chalk_1.default.dim(' Edit this file to customize your verification settings'));
}
catch (error) {
// Fallback to inline config creation
const config = {
tier: 'free',
engines: config_1.DEFAULT_CONFIG.engines,
performance: config_1.DEFAULT_CONFIG.performance,
output: config_1.DEFAULT_CONFIG.output
};
fs.writeFileSync(configPath, JSON.stringify(config, null, 2));
console.log(chalk_1.default.green('✓ Created llmverify.config.json'));
}
});
program
.command('privacy')
.description('Show privacy guarantees')
.action(() => {
console.log(chalk_1.default.blue('\n📋 llmverify Privacy Guarantees\n'));
console.log(chalk_1.default.green('Free Tier:'));
console.log(` • Network Traffic: ${constants_1.PRIVACY_GUARANTEE.freeTier.networkTraffic}`);
console.log(` • Data Transmission: ${constants_1.PRIVACY_GUARANTEE.freeTier.dataTransmission}`);
console.log(` • Telemetry: ${constants_1.PRIVACY_GUARANTEE.freeTier.telemetry}`);
console.log(` • Verification: ${constants_1.PRIVACY_GUARANTEE.freeTier.verification}`);
console.log(chalk_1.default.yellow('\nPaid Tiers:'));
console.log(` • Default: ${constants_1.PRIVACY_GUARANTEE.paidTiers.defaultBehavior}`);
console.log(` • API Calls: ${constants_1.PRIVACY_GUARANTEE.paidTiers.apiCalls}`);
console.log(` • Requires: ${constants_1.PRIVACY_GUARANTEE.paidTiers.requires}`);
console.log(chalk_1.default.red('\nWe NEVER:'));
constants_1.PRIVACY_GUARANTEE.neverEver.forEach(item => {
console.log(` ✗ ${item}`);
});
console.log();
});
// ============================================================================
// COMMAND: info
// ============================================================================
program
.command('info')
.description('Show package info, docs, privacy, and funding options')
.option('--json', 'Output as JSON')
.action((options) => {
const info = {
name: 'llmverify',
version: constants_1.VERSION,
maintainer: 'Subodh KC (KingCaliber Labs)',
engines: [
'classification (intent, hallucination, reasoning)',
'CSM6 (security, PII, harmful content, injection)',
'hallucination detection',
'drift analysis',
'latency monitoring',
'token-rate tracking'
],
docs: {
readme: 'README.md',
cli: 'docs/CLI.md',
engines: 'docs/ENGINES.md',
api: 'docs/API.md'
},
privacy: 'No telemetry, no remote logging. All analysis local.',
funding: 'https://www.buymeacoffee.com/subodhkc'
};
if (options.json) {
console.log(JSON.stringify(info, null, 2));
return;
}
console.log(chalk_1.default.blue('\n📦 llmverify Package Information\n'));
console.log(chalk_1.default.bold('Package'));
console.log(chalk_1.default.gray('─'.repeat(50)));
console.log(` ${chalk_1.default.cyan('Name:')} ${info.name}`);
console.log(` ${chalk_1.default.cyan('Version:')} ${info.version}`);
console.log(` ${chalk_1.default.cyan('Maintainer:')} ${info.maintainer}`);
console.log();
console.log(chalk_1.default.bold('Engines Included'));
console.log(chalk_1.default.gray('─'.repeat(50)));
info.engines.forEach(engine => {
console.log(` ${chalk_1.default.green('✓')} ${engine}`);
});
console.log();
console.log(chalk_1.default.bold('Documentation'));
console.log(chalk_1.default.gray('─'.repeat(50)));
Object.entries(info.docs).forEach(([key, value]) => {
console.log(` ${chalk_1.default.cyan(key.toUpperCase().padEnd(10))} ${value}`);
});
console.log();
console.log(chalk_1.default.bold('Privacy'));
console.log(chalk_1.default.gray('─'.repeat(50)));
console.log(` ${chalk_1.default.green('🔒')} ${info.privacy}`);
console.log();
console.log(chalk_1.default.bold('Support Development'));
console.log(chalk_1.default.gray('─'.repeat(50)));
console.log(` ${chalk_1.default.yellow('☕')} ${info.funding}`);
console.log();
});
// ============================================================================
// COMMAND: engines
// ============================================================================
program
.command('engines')
.description('List all verification engines with status')
.option('--json', 'Output as JSON')
.action((options) => {
const engines = [
{ name: 'classification', status: 'enabled', description: 'Intent, hallucination, reasoning detection' },
{ name: 'csm6', status: 'enabled', description: 'Security checks (PII, harmful content, injection)' },
{ name: 'hallucination', status: 'enabled', description: 'Hallucination and factuality detection' },
{ name: 'drift', status: 'enabled', description: 'Fingerprint drift analysis' },
{ name: 'token-rate', status: 'disabled', description: 'Token rate monitoring (static mode)' },
{ name: 'latency', status: 'disabled', description: 'Latency tracking (no wrapping client)' }
];
if (options.json) {
console.log(JSON.stringify(engines, null, 2));
return;
}
console.log(chalk_1.default.blue('\n🔧 Verification Engines\n'));
engines.forEach(engine => {
const statusIcon = engine.status === 'enabled'
? chalk_1.default.green('●')
: chalk_1.default.gray('○');
const statusText = engine.status === 'enabled'
? chalk_1.default.green('enabled')
: chalk_1.default.gray('disabled');
console.log(` ${statusIcon} ${chalk_1.default.cyan(engine.name.padEnd(16))} ${statusText.padEnd(18)} ${chalk_1.default.gray(engine.description)}`);
});
console.log();
});
// ============================================================================
// COMMAND: explain
// ============================================================================
program
.command('explain <engine>')
.description('Explain how a verification engine works')
.action((engine) => {
const explanations = {
'hallucination': {
description: 'Detects AI-generated content that may be factually incorrect or fabricated.',
signals: [
'contradiction signal - conflicting statements within response',
'low-confidence signal - hedging language patterns',
'compression signal - information density anomalies',
'domain mismatch signal - out-of-context claims',
'pattern mismatch signal - structural inconsistencies'
]
},
'classification': {
description: 'Classifies AI output by intent, reasoning quality, and potential issues.',
signals: [
'intent classification - what the AI is trying to do',
'reasoning quality - logical consistency check',
'confidence scoring - certainty of classification',
'category mapping - maps to risk categories'
]
},
'csm6': {
description: 'CSM6 security framework for comprehensive content safety.',
signals: [
'PII detection - personal identifiable information',
'harmful content - violence, hate, self-harm',
'prompt injection - manipulation attempts',
'jailbreak detection - bypass attempts',
'data leakage - sensitive information exposure'
]
},
'drift': {
description: 'Monitors changes in AI behavior over time.',
signals: [
'fingerprint comparison - baseline vs current',
'distribution shift - output pattern changes',
'vocabulary drift - language changes',
'confidence drift - certainty changes'
]
}
};
const info = explanations[engine];
if (!info) {
console.log(chalk_1.default.red(`\nUnknown engine: ${engine}`));
console.log(chalk_1.default.gray('Available engines: ' + Object.keys(explanations).join(', ')));
console.log();
return;
}
console.log(chalk_1.default.blue(`\n🔍 Engine: ${engine}\n`));
console.log(chalk_1.default.gray('─'.repeat(50)));
console.log(info.description);
console.log();
console.log(chalk_1.default.bold('Detection Signals:'));
info.signals.forEach(signal => {
console.log(` ${chalk_1.default.cyan('•')} ${signal}`);
});
console.log();
});
// ============================================================================
// COMMAND: doctor (hidden)
// ============================================================================
program
.command('doctor')
.description('Check system health and configuration')
.action(() => {
console.log(chalk_1.default.blue('\n🩺 llmverify Doctor\n'));
console.log(chalk_1.default.gray('─'.repeat(50)));
// Node version check
const nodeVersion = process.version;
const nodeMajor = parseInt(nodeVersion.slice(1).split('.')[0]);
const nodeOk = nodeMajor >= 18;
console.log(` ${nodeOk ? chalk_1.default.green('✓') : chalk_1.default.red('✗')} Node.js Version: ${nodeVersion} ${nodeOk ? '' : chalk_1.default.red('(requires >=18)')}`);
// Config file check
const configPath = path.resolve('llmverify.config.json');
const configExists = fs.existsSync(configPath);
console.log(` ${configExists ? chalk_1.default.green('✓') : chalk_1.default.yellow('○')} Config File: ${configExists ? 'Found' : 'Not found (optional)'}`);
// Environment variables
const envVars = ['OPENAI_API_KEY', 'ANTHROPIC_API_KEY'];
envVars.forEach(envVar => {
const exists = !!process.env[envVar];
console.log(` ${exists ? chalk_1.default.green('✓') : chalk_1.default.gray('○')} ${envVar}: ${exists ? 'Set' : 'Not set'}`);
});
// Postinstall check
const postinstallPath = path.resolve(__dirname, 'postinstall.js');
const postinstallExists = fs.existsSync(postinstallPath);
console.log(` ${postinstallExists ? chalk_1.default.green('✓') : chalk_1.default.yellow('○')} Postinstall: ${postinstallExists ? 'Present' : 'Not found'}`);
console.log();
console.log(chalk_1.default.dim('Run "llmverify init" to create a config file.'));
console.log();
});
// ============================================================================
// COMMAND: version (detailed)
// ============================================================================
program
.command('version')
.description('Show detailed version information')
.option('--detailed', 'Show detailed system information')
.option('--json', 'Output as JSON')
.action((options) => {
const versionInfo = {
package: {
name: 'llmverify',
version: constants_1.VERSION,
description: 'AI Output Verification Toolkit'
},
system: {
node: process.version,
platform: process.platform,
arch: process.arch,
cwd: process.cwd()
},
engines: {
classification: 'enabled',
csm6: 'enabled',
hallucination: 'enabled',
drift: 'enabled',
'token-rate': 'disabled (static mode)',
latency: 'disabled (no client)'
},
adapters: ['openai', 'anthropic', 'groq', 'google', 'deepseek', 'mistral', 'cohere', 'local', 'custom'],
compliance: ['OWASP LLM Top 10', 'NIST AI RMF', 'EU AI Act', 'ISO 42001'],
privacy: 'Zero telemetry, 100% local processing',
links: {
repository: 'https://github.com/subodhkc/llmverify-npm',
issues: 'https://github.com/subodhkc/llmverify-npm/issues',
funding: 'https://www.buymeacoffee.com/subodhkc'
}
};
if (options.json) {
console.log(JSON.stringify(versionInfo, null, 2));
return;
}
if (options.detailed) {
console.log(chalk_1.default.blue('\n📦 llmverify Detailed Version Information\n'));
console.log(chalk_1.default.gray('═'.repeat(60)));
console.log(chalk_1.default.bold('\nPackage'));
console.log(chalk_1.default.gray('─'.repeat(60)));
console.log(` ${chalk_1.default.cyan('Name:')} ${versionInfo.package.name}`);
console.log(` ${chalk_1.default.cyan('Version:')} ${versionInfo.package.version}`);
console.log(` ${chalk_1.default.cyan('Description:')} ${versionInfo.package.description}`);
console.log(chalk_1.default.bold('\nSystem'));
console.log(chalk_1.default.gray('─'.repeat(60)));
console.log(` ${chalk_1.default.cyan('Node.js:')} ${versionInfo.system.node}`);
console.log(` ${chalk_1.default.cyan('Platform:')} ${versionInfo.system.platform}`);
console.log(` ${chalk_1.default.cyan('Architecture:')} ${versionInfo.system.arch}`);
console.log(` ${chalk_1.default.cyan('Working Dir:')} ${versionInfo.system.cwd}`);
console.log(chalk_1.default.bold('\nEngines'));
console.log(chalk_1.default.gray('─'.repeat(60)));
Object.entries(versionInfo.engines).forEach(([engine, status]) => {
const icon = status === 'enabled' ? chalk_1.default.green('●') : chalk_1.default.gray('○');
console.log(` ${icon} ${chalk_1.default.cyan(engine.padEnd(16))} ${status}`);
});
console.log(chalk_1.default.bold('\nAdapters'));
console.log(chalk_1.default.gray('─'.repeat(60)));
console.log(` ${versionInfo.adapters.join(', ')}`);
console.log(chalk_1.default.bold('\nCompliance Frameworks'));
console.log(chalk_1.default.gray('─'.repeat(60)));
versionInfo.compliance.forEach(framework => {
console.log(` ${chalk_1.default.green('✓')} ${framework}`);
});
console.log(chalk_1.default.bold('\nPrivacy'));
console.log(chalk_1.default.gray('─'.repeat(60)));
console.log(` ${chalk_1.default.green('🔒')} ${versionInfo.privacy}`);
console.log(chalk_1.default.bold('\nLinks'));
console.log(chalk_1.default.gray('─'.repeat(60)));
console.log(` ${chalk_1.default.cyan('Repository:')} ${versionInfo.links.repository}`);
console.log(` ${chalk_1.default.cyan('Issues:')} ${versionInfo.links.issues}`);
console.log(` ${chalk_1.default.cyan('Funding:')} ${versionInfo.links.funding}`);
console.log();
}
else {
console.log(`llmverify v${constants_1.VERSION}`);
}
});
// ============================================================================
// COMMAND: tutorial
// ============================================================================
program
.command('tutorial')
.description('Show usage examples and quick start guide')
.action(() => {
console.log(chalk_1.default.blue('\n📚 llmverify Quick Start Guide\n'));
console.log(chalk_1.default.gray('═'.repeat(60)));
console.log();
console.log(chalk_1.default.bold('1. Basic Verification'));
console.log(chalk_1.default.gray(' Verify AI output directly:'));
console.log(chalk_1.default.cyan(' $ npx llmverify verify "Your AI response here"'));
console.log();
console.log(chalk_1.default.bold('2. Verify from File'));
console.log(chalk_1.default.gray(' Verify content from a file:'));
console.log(chalk_1.default.cyan(' $ npx llmverify verify --file response.txt'));
console.log();
console.log(chalk_1.default.bold('3. JSON Output'));
console.log(chalk_1.default.gray(' Get results as JSON for programmatic use:'));
console.log(chalk_1.default.cyan(' $ npx llmverify verify "content" --output json'));
console.log();
console.log(chalk_1.default.bold('4. Initialize Config'));
console.log(chalk_1.default.gray(' Create a config file for your project:'));
console.log(chalk_1.default.cyan(' $ npx llmverify init'));
console.log();
console.log(chalk_1.default.bold('5. Check Engines'));
console.log(chalk_1.default.gray(' See available verification engines:'));
console.log(chalk_1.default.cyan(' $ npx llmverify engines'));
console.log();
console.log(chalk_1.default.bold('6. Learn About Engines'));
console.log(chalk_1.default.gray(' Understand how detection works:'));
console.log(chalk_1.default.cyan(' $ npx llmverify explain hallucination'));
console.log();
console.log(chalk_1.default.bold('7. System Health'));
console.log(chalk_1.default.gray(' Verify your setup:'));
console.log(chalk_1.default.cyan(' $ npx llmverify doctor'));
console.log();
console.log(chalk_1.default.gray('─'.repeat(60)));
console.log(`For more help: ${chalk_1.default.cyan('npx llmverify --help')}`);
console.log(`Documentation: ${chalk_1.default.cyan('https://github.com/subodhkc/llmverify-npm')}`);
console.log();
});
// ============================================================================
// COMMAND: run (Master command with presets)
// ============================================================================
const core_1 = require("./core");
program
.command('run')
.description('Run all verification engines with preset configuration (dev/prod/strict/fast/ci)')
.argument('[content]', 'Content to verify (or use --file)')
.option('-f, --file <path>', 'Read content from file')
.option('-p, --preset <mode>', 'Preset mode: dev, prod, strict, fast, ci', 'dev')
.option('--prompt <text>', 'Original prompt for classification')
.option('--input <text>', 'User input to check for injection')
.option('-o, --output <format>', 'Output format: text, json, summary', 'text')
.option('--parallel', 'Run engines in parallel (default: true)', true)
.option('--no-parallel', 'Run engines sequentially')
.action(async (content, options) => {
try {
// Get content
let inputContent = content;
if (options.file) {
const filePath = path.resolve(options.file);
if (!fs.existsSync(filePath)) {
console.error(chalk_1.default.red(`File not found: ${filePath}`));
process.exit(1);
}
inputContent = fs.readFileSync(filePath, 'utf-8');
}
if (!inputContent) {
console.error(chalk_1.default.red('No content provided. Use --file or provide content as argument.'));
process.exit(1);
}
const preset = options.preset;
if (!['dev', 'prod', 'strict', 'fast', 'ci'].includes(preset)) {
console.error(chalk_1.default.red(`Invalid preset: ${preset}. Use: dev, prod, strict, fast, ci`));
process.exit(1);
}
console.log(chalk_1.default.blue(`\n🚀 Running llmverify with ${chalk_1.default.bold(preset.toUpperCase())} preset...\n`));
const startTime = Date.now();
const result = await (0, core_1.run)({
content: inputContent,
prompt: options.prompt,
userInput: options.input,
preset,
parallel: options.parallel
});
if (options.output === 'json') {
console.log(JSON.stringify(result, null, 2));
}
else if (options.output === 'summary') {
printRunSummary(result);
}
else {
printRunResult(result);
}
// Exit code based on risk level
const exitCodes = {
low: 0,
moderate: 1,
high: 2,
critical: 2
};
process.exit(exitCodes[result.verification.risk.level] || 0);
}
catch (error) {
console.error(chalk_1.default.red(`\nError: ${error.message}`));
process.exit(1);
}
});
function printRunResult(result) {
const riskColors = {
low: chalk_1.default.green,
moderate: chalk_1.default.yellow,
high: chalk_1.default.red,
critical: chalk_1.default.bgRed.white
};
// Header
console.log(chalk_1.default.gray('═'.repeat(60)));
console.log(chalk_1.default.bold('📊 VERIFICATION RESULTS'));
console.log(chalk_1.default.gray('═'.repeat(60)));
console.log();
// Risk Assessment
const riskColor = riskColors[result.verification.risk.level] || chalk_1.default.white;
console.log(chalk_1.default.bold('Risk Assessment'));
console.log(chalk_1.default.gray('─'.repeat(40)));
console.log(` Level: ${riskColor(result.verification.risk.level.toUpperCase())}`);
console.log(` Score: ${(result.verification.risk.overall * 100).toFixed(1)}%`);
console.log(` Action: ${result.verification.risk.action}`);
console.log();
// Classification (if available)
if (result.classification) {
console.log(chalk_1.default.bold('Classification'));
console.log(chalk_1.default.gray('─'.repeat(40)));
console.log(` Intent: ${chalk_1.default.cyan(result.classification.intent)}`);
console.log(` Hallucination Risk: ${getHallucinationColor(result.classification.hallucinationLabel)(result.classification.hallucinationLabel)} (${(result.classification.hallucinationRisk * 100).toFixed(0)}%)`);
if (result.classification.isJson) {
console.log(` JSON Valid: ${chalk_1.default.green('✓')}`);
}
console.log();
}
// Input Safety (if checked)
if (result.inputSafety) {
console.log(chalk_1.default.bold('Input Safety'));
console.log(chalk_1.default.gray('─'.repeat(40)));
const safeIcon = result.inputSafety.safe ? chalk_1.default.green('✓ Safe') : chalk_1.default.red('✗ Unsafe');
console.log(` Status: ${safeIcon}`);
console.log(` Findings: ${result.inputSafety.injectionFindings.length}`);
console.log();
}
// PII Check
if (result.piiCheck) {
console.log(chalk_1.default.bold('PII Detection'));
console.log(chalk_1.default.gray('─'.repeat(40)));
const piiIcon = result.piiCheck.hasPII ? chalk_1.default.yellow('⚠ Found') : chalk_1.default.green('✓ None');
console.log(` Status: ${piiIcon}`);
console.log(` Count: ${result.piiCheck.piiCount}`);
console.log();
}
// Harmful Content
if (result.harmfulCheck) {
console.log(chalk_1.default.bold('Harmful Content'));
console.log(chalk_1.default.gray('─'.repeat(40)));
const harmIcon = result.harmfulCheck.hasHarmful ? chalk_1.default.red('✗ Found') : chalk_1.default.green('✓ None');
console.log(` Status: ${harmIcon}`);
console.log(` Findings: ${result.harmfulCheck.findings.length}`);
console.log();
}
// Meta
console.log(chalk_1.default.gray('─'.repeat(60)));
console.log(chalk_1.default.dim(`Preset: ${result.meta.preset} | Engines: ${result.meta.enginesRun.join(', ')}`));
console.log(chalk_1.default.dim(`Latency: ${result.meta.totalLatencyMs}ms | ${result.meta.timestamp}`));
console.log();
}
function printRunSummary(result) {
const riskColors = {
low: chalk_1.default.green,
moderate: chalk_1.default.yellow,
high: chalk_1.default.red,
critical: chalk_1.default.bgRed.white
};
const riskColor = riskColors[result.verification.risk.level] || chalk_1.default.white;
console.log(`${riskColor('●')} Risk: ${riskColor(result.verification.risk.level.toUpperCase())} | Action: ${result.verification.risk.action} | ${result.meta.totalLatencyMs}ms`);
const checks = [];
if (result.inputSafety)
checks.push(result.inputSafety.safe ? '✓input' : '✗input');
if (result.piiCheck)
checks.push(result.piiCheck.hasPII ? '⚠pii' : '✓pii');
if (result.harmfulCheck)
checks.push(result.harmfulCheck.hasHarmful ? '✗harm' : '✓harm');
if (result.classification)
checks.push(`intent:${result.classification.intent}`);
if (checks.length > 0) {
console.log(chalk_1.default.dim(` ${checks.join(' | ')}`));
}
}
function getHallucinationColor(label) {
switch (label) {
case 'low': return chalk_1.default.green;
case 'medium': return chalk_1.default.yellow;
case 'high': return chalk_1.default.red;
default: return chalk_1.default.white;
}
}
// ============================================================================
// COMMAND: wizard (Interactive setup)
// ============================================================================
program
.command('wizard')
.description('Interactive setup wizard for first-time configuration')
.action(async () => {
console.log(chalk_1.default.blue(`
╔══════════════════════════════════════════════════════════════════════════════╗
║ ║
║ ${chalk_1.default.bold('🧙 llmverify Setup Wizard')} ║
║ ║
║ This wizard will help you configure llmverify for your project. ║
║ ║
╚══════════════════════════════════════════════════════════════════════════════╝
`));
// Since we can't use readline in a simple way, provide guided instructions
console.log(chalk_1.default.bold('\n📋 Step 1: Choose Your Preset\n'));
console.log(chalk_1.default.gray('─'.repeat(60)));
const presetTable = new cli_table3_1.default({
head: ['Preset', 'Use Case', 'Speed', 'Thoroughness'],
style: { head: ['cyan'] }
});
presetTable.push([chalk_1.default.green('dev'), 'Local development & testing', '●●●○○', '●●●●○'], [chalk_1.default.yellow('prod'), 'Production APIs (low latency)', '●●●●●', '●●●○○'], [chalk_1.default.red('strict'), 'High-stakes, compliance', '●●○○○', '●●●●●'], [chalk_1.default.cyan('fast'), 'High-throughput pipelines', '●●●●●', '●●○○○'], [chalk_1.default.magenta('ci'), 'CI/CD pipelines', '●●●●○', '●●●●○']);
console.log(presetTable.toString());
console.log();
console.log(chalk_1.default.bold('\n📋 Step 2: Quick Start Commands\n'));
console.log(chalk_1.default.gray('─'.repeat(60)));
console.log();
console.log(chalk_1.default.dim(' # Run with dev preset (recommended for starting)'));
console.log(chalk_1.default.green(' npx llmverify run "Your AI output" --preset dev'));
console.log();
console.log(chalk_1.default.dim(' # Run with production preset'));
console.log(chalk_1.default.green(' npx llmverify run "Your AI output" --preset prod'));
console.log();
console.log(chalk_1.default.dim(' # Run with classification (provide original prompt)'));
console.log(chalk_1.default.green(' npx llmverify run "AI response" --prompt "Original question" --preset dev'));
console.log();
console.log(chalk_1.default.dim(' # Check user input for injection attacks'));
console.log(chalk_1.default.green(' npx llmverify run "AI response" --input "User message" --preset strict'));
console.log();
console.log(chalk_1.default.dim(' # Output as JSON for programmatic use'));
console.log(chalk_1.default.green(' npx llmverify run "Your AI output" --preset ci --output json'));
console.log();
console.log(chalk_1.default.bold('\n📋 Step 3: Initialize Config File\n'));
console.log(chalk_1.default.gray('─'.repeat(60)));
console.log();
console.log(chalk_1.default.dim(' Create a config file for persistent settings:'));
console.log(chalk_1.default.green(' npx llmverify init'));
console.log();
console.log(chalk_1.default.dim(' This creates llmverify.config.json in your project root.'));
console.log();
console.log(chalk_1.default.bold('\n📋 Step 4: Programmatic Usage\n'));
console.log(chalk_1.default.gray('─'.repeat(60)));
console.log();
console.log(chalk_1.default.dim(' // Quick verification with preset'));
console.log(chalk_1.default.cyan(` import { run, devVerify, prodVerify } from 'llmverify';`));
console.log();
console.log(chalk_1.default.dim(' // Using the run function with options'));
console.log(chalk_1.default.white(` const result = await run({`));
console.log(chalk_1.default.white(` content: aiOutput,`));
console.log(chalk_1.default.white(` prompt: originalPrompt,`));
console.log(chalk_1.default.white(` preset: 'dev'`));
console.log(chalk_1.default.white(` });`));
console.log();
console.log(chalk_1.default.dim(' // Quick helpers'));
console.log(chalk_1.default.white(` const result = await devVerify(aiOutput, prompt);`));
console.log(chalk_1.default.white(` const result = await prodVerify(aiOutput);`));
console.log(chalk_1.default.white(` const result = await strictVerify(aiOutput, prompt);`));
console.log();
console.log(chalk_1.default.bold('\n📋 Step 5: Verify Setup\n'));
console.log(chalk_1.default.gray('─'.repeat(60)));
console.log();
console.log(chalk_1.default.dim(' Run the doctor command to verify your setup:'));
console.log(chalk_1.default.green(' npx llmverify doctor'));
console.log();
console.log(chalk_1.default.bold('\n📚 Additional Resources\n'));
console.log(chalk_1.default.gray('─'.repeat(60)));
console.log(` ${chalk_1.default.cyan('Documentation:')} https://github.com/subodhkc/llmverify-npm#readme`);
console.log(` ${chalk_1.default.cyan('CLI Reference:')} docs/CLI-REFERENCE.md`);
console.log(` ${chalk_1.default.cyan('Troubleshooting:')} docs/TROUBLESHOOTING.md`);
console.log(` ${chalk_1.default.cyan('Getting Started:')} docs/GETTING-STARTED.md`);
console.log();
console.log(chalk_1.default.green.bold('\n✓ Wizard complete! You\'re ready to use llmverify.\n'));
console.log(chalk_1.default.dim('Run "npx llmverify run --help" for more options.\n'));
});
// ============================================================================
// COMMAND: presets (List available presets)
// ============================================================================
program
.command('presets')
.description('List available preset configurations')
.option('--json', 'Output as JSON')
.action((options) => {
if (options.json) {
console.log(JSON.stringify(core_1.PRESETS, null, 2));
return;
}
console.log(chalk_1.default.blue('\n⚙️ Available Presets\n'));
console.log(chalk_1.default.gray('═'.repeat(60)));
console.log();
const presetInfo = [
{
name: 'dev',
description: 'Development mode - balanced, informative output',
useCase: 'Local development and testing',
engines: ['hallucination', 'consistency', 'jsonValidator', 'csm6'],
speed: '●●●○○',
thoroughness: '●●●●○'
},
{
name: 'prod',
description: 'Production mode - optimized for speed',
useCase: 'Production APIs with latency requirements',
engines: ['jsonValidator', 'csm6'],
speed: '●●●●●',
thoroughness: '●●●○○'
},
{
name: 'strict',
description: 'Strict mode - all engines, maximum scrutiny',
useCase: 'High-stakes content, compliance requirements',
engines: ['hallucination', 'consistency', 'jsonValidator', 'csm6 (all checks)'],
speed: '●●○○○',
thoroughness: '●●●●●'
},
{
name: 'fast',
description: 'Fast mode - minimal checks, maximum speed',
useCase: 'High-throughput scenarios',
engines: ['csm6 (security only)'],
speed: '●●●●●',
thoroughness: '●●○○○'
},
{
name: 'ci',
description: 'CI mode - optimized for CI/CD pipelines',
useCase: 'Automated testing and deployment',
engines: ['hallucination', 'consistency', 'jsonValidator', 'csm6'],
speed: '●●●●○',
thoroughness: '●●●●○'
}
];
presetInfo.forEach(preset => {
const nameColors = {
dev: chalk_1.default.green,
prod: chalk_1.default.yellow,
strict: chalk_1.default.red,
fast: chalk_1.default.cyan,
ci: chalk_1.default.magenta
};
const nameColor = nameColors[preset.name] || chalk_1.default.white;
console.log(`${nameColor.bold(preset.name.toUpperCase())}`);
console.log(chalk_1.default.gray('─'.repeat(40)));
console.log(` ${chalk_1.default.dim('Description:')} ${preset.description}`);
console.log(` ${chalk_1.default.dim('Use Case:')} ${preset.useCase}`);
console.log(` ${chalk_1.default.dim('Speed:')} ${preset.speed}`);
console.log(` ${chalk_1.default.dim('Thoroughness:')} ${preset.thoroughness}`);
console.log(` ${chalk_1.default.dim('Engines:')} ${preset.engines.join(', ')}`);
console.log();
});
console.log(chalk_1.default.dim('Usage: npx llmverify run "content" --preset <name>'));
console.log();
});
// ============================================================================
// COMMAND: benchmark
// ============================================================================
program
.command('benchmark')
.description('Benchmark verification latency across all presets')
.option('-i, --iterations <n>', 'Number of iterations per preset', '3')
.option('-c, --content <text>', 'Custom content to benchmark', 'The capital of France is Paris. This is a test response from an AI assistant.')
.option('--json', 'Output results as JSON')
.action(async (options) => {
const iterations = parseInt(options.iterations, 10);
const content = options.content;
const presetNames = ['fast', 'prod', 'dev', 'ci', 'strict'];
console.log(chalk_1.default.blue(`\n⏱️ Benchmarking llmverify (${iterations} iterations per preset)\n`));
console.log(chalk_1.default.gray('─'.repeat(60)));
console.log(chalk_1.default.dim(`Content: "${content.substring(0, 50)}${content.length > 50 ? '...' : ''}"`));
console.log(chalk_1.default.gray('─'.repeat(60)));
console.log();
const results = [];
for (const preset of presetNames) {
const times = [];
process.stdout.write(chalk_1.default.cyan(` ${preset.padEnd(8)}`));
for (let i = 0; i < iterations; i++) {
const start = Date.now();
await (0, core_1.run)({ content, preset });
const elapsed = Date.now() - start;
times.push(elapsed);
process.stdout.write(chalk_1.default.dim('.'));
}
const avg = times.reduce((a, b) => a + b, 0) / times.length;
const min = Math.min(...times);
const max = Math.max(...times);
results.push({ preset, avgMs: avg, minMs: min, maxMs: max, iterations });
// Color based on speed
const avgColor = avg < 20 ? chalk_1.default.green : avg < 50 ? chalk_1.default.yellow : chalk_1.default.red;
console.log(` ${avgColor(`${avg.toFixed(1)}ms`)} avg (${min}-${max}ms)`);
}
console.log();
if (options.json) {
console.log(JSON.stringify({ benchmarks: results, content: content.substring(0, 100) }, null, 2));
}
else {
// Summary table
console.log(chalk_1.default.bold('📊 Summary'));
console.log(chalk_1.default.gray('─'.repeat(60)));
const table = new cli_table3_1.default({
head: ['Preset', 'Avg (ms)', 'Min (ms)', 'Max (ms)', 'Speed'],
style: { head: ['cyan'] }
});
results.forEach(r => {
const speedBars = r.avgMs < 15 ? '●●●●●' : r.avgMs < 25 ? '●●●●○' : r.avgMs < 40 ? '●●●○○' : r.avgMs < 60 ? '●●○○○' : '●○○○○';
const avgColor = r.avgMs < 20 ? chalk_1.default.green : r.avgMs < 50 ? chalk_1.default.yellow : chalk_1.default.red;
table.push([
r.preset,
avgColor(r.avgMs.toFixed(1)),
r.minMs.toString(),
r.maxMs.toString(),
speedBars
]);
});
console.log(table.toString());
console.log();
console.log(chalk_1.default.dim('Tip: Use --preset fast for high-throughput, --preset strict for compliance'));
console.log();
}
});
// ============================================================================
// COMMAND: adapters
// ============================================================================
program
.command('baseline')
.description('Manage baseline metrics and drift detection')
.action(() => {
console.log(chalk_1.default.blue('\n📊 Baseline Management\n'));
console.log('Available subcommands:');
console.log(` ${chalk_1.default.cyan('baseline stats')} - Show baseline statistics`);
console.log(` ${chalk_1.default.cyan('baseline reset')} - Reset baseline metrics`);
console.log(` ${chalk_1.default.cyan('baseline drift')} - Show recent drift records`);
console.log('\nRun with --help for more information');
});
program
.command('baseline:stats')
.description('Show baseline statistics')
.action(() => {
try {
const { getBaselineStorage } = require('./baseline/storage');
const storage = getBaselineStorage();
const stats = storage.getStatistics();
console.log(chalk_1.default.blue('\n📊 Baseline Statistics\n'));
if (!stats.hasBaseline) {
console.log(chalk_1.default.yellow('No baseline data available yet.'));
console.log(chalk_1.default.dim('Baseline will be created automatically as you use llmverify.'));
return;
}
console.log(chalk_1.default.green('Status:'), 'Active');
console.log(chalk_1.default.green('Sample Count:'), stats.sampleCount);
console.log(chalk_1.default.green('Created:'), stats.createdAt);
console.log(chalk_1.default.green('Updated:'), stats.updatedAt);
console.log(chalk_1.default.green('Drift Records:'), stats.driftRecordCount);
if (stats.recentDrifts.length > 0) {
console.log(chalk_1.default.yellow('\n⚠️ Recent Drift Detected:\n'));
stats.recentDrifts.forEach((drift) => {
console.log(` ${chalk_1.default.cyan(drift.metric)}: ${drift.driftPercent.toFixed(2)}% (${drift.severity})`);
});
}
else {
console.log(chalk_1.default.green('\n✓ No significant drift detected'));
}
}
catch (error) {
console.error(chalk_1.default.red('Failed to load baseline stats:', error.message));
}
});
program
.command('baseline:reset')
.description('Reset baseline metrics')
.action(() => {
try {
const { getBaselineStorage } = require('./baseline/storage');
const storage = getBaselineStorage();
storage.resetBaseline();
console.log(chalk_1.default.green('\n✓ Baseline reset successfully'));
console.log(chalk_1.default.dim('New baseline will be created on next verification'));
}
catch (error) {
console.error(chalk_1.default.red('Failed to reset baseline:', error.message));
}
});
program
.command('baseline:drift')
.description('Show recent drift records')
.option('-n, --limit <number>', 'Number of records to show', '20')
.action((options) => {
try {
const { getBaselineStorage } = require('./baseline/storage');
const storage = getBaselineStorage();
const drifts = storage.readDriftHistory(parseInt(options.limit));
console.log(chalk_1.default.blue(`\n📈 Recent Drift Records (${drifts.length})\n`));
if (drifts.length === 0) {
console.log(chalk_1.default.green('No drift records found'));
return;
}
drifts.forEach((drift) => {
const color = drift.severity === 'significant' ? chalk_1.default.red :
drift.severity === 'moderate' ? chalk_1.default.yellow : chalk_1.default.dim;
console.log(color(`[${drift.timestamp}] ${drift.metric}: ${drift.driftPercent.toFixed(2)}% (${drift.severity})`));
});
}
catch (error) {
console.error(chalk_1.default.red('Failed to read drift history:', error.message));
}
});
program
.command('badge')
.description('Generate "Built with llmverify" badge for your project')
.option('-n, --name <name>', 'Project name')
.option('-u, --url <url>', 'Project URL')
.option('-o, --output <path>', 'Output file path')
.action((options) => {
try {
const { generateBadgeForProject, saveBadgeToFile } = require('./badge/generator');
if (!options.name) {
console.error(chalk_1.default.red('Error: Project name is required'));
console.log(chalk_1.default.dim('Usage: npx llmverify badge --name "My Project" --url "https://example.com"'));
return;
}
if (options.output) {
saveBadgeToFile(options.output, options.name, options.url);
console.log(chalk_1.default.green(`\n✓ Badge saved to: ${options.output}\n`));
}
else {
const { markdown, html, signature } = generateBadgeForProject(options.name, options.url);
console.log(chalk_1.default.blue('\n📛 Built with llmverify Badge\n'));
console.log(chalk_1.default.green('Markdown:'));
console.log(chalk_1.default.dim(markdown));
console.log('\n' + chalk_1.default.green('HTML:'));
console.log(chalk_1.default.dim(html));
console.log('\n' + chalk_1.de