vaultace-cli
Version:
AI-powered security scanner that detects vulnerabilities in AI-generated code. Proactive scanning, autonomous fixing, and emergency response for modern development teams.
787 lines (672 loc) โข 28.4 kB
JavaScript
/**
* Fix Simulation Command - Preview and validate fixes before applying
* Enterprise-grade confidence building through fix simulation
*/
const { Command } = require('commander')
const chalk = require('chalk')
const inquirer = require('inquirer')
const ora = require('ora')
const fs = require('fs-extra')
const path = require('path')
const { table } = require('table')
const diff = require('diff')
const APIClient = require('../services/api-client')
const ConfigManager = require('../utils/config-manager')
const LocalScanner = require('../services/local-scanner')
const simulateCommand = new Command('simulate')
.description('๐ Preview and validate AI-generated fixes before applying them')
// Main simulation command
simulateCommand
.command('fixes')
.description('Generate and preview fix simulations')
.argument('[path]', 'repository path to simulate fixes for', '.')
.option('--vulnerability-ids <ids>', 'comma-separated vulnerability IDs to simulate')
.option('--severity <level>', 'simulate fixes for vulnerabilities of this severity and below', 'medium')
.option('--simulation-mode <mode>', 'simulation mode (preview|validate|test)', 'preview')
.option('--model <model>', 'preferred AI model', 'auto')
.option('--include-diff', 'include code diffs in preview', true)
.option('--include-risk', 'include risk analysis', true)
.option('--interactive', 'interactive preview mode', true)
.option('--output <format>', 'output format (table|json|detailed)', 'detailed')
.action(async (repoPath, options) => {
console.log(chalk.bold.cyan('๐ VAULTACE FIX SIMULATION CENTER'))
console.log(chalk.cyan('โ'.repeat(50)))
console.log(chalk.magenta('Enterprise-grade fix preview and validation'))
const config = ConfigManager.getConfig()
if (!config.apiKey) {
console.error(chalk.red('โ API key required. Run: vaultace auth login'))
process.exit(1)
}
const apiClient = new APIClient(config)
const simulator = new FixSimulator(apiClient, repoPath)
try {
// Get vulnerabilities to simulate
let vulnerabilityIds = []
if (options.vulnerabilityIds) {
vulnerabilityIds = options.vulnerabilityIds.split(',').map(id => id.trim())
} else {
vulnerabilityIds = await simulator.discoverVulnerabilities(options.severity)
}
if (vulnerabilityIds.length === 0) {
console.log(chalk.yellow('โ ๏ธ No vulnerabilities found to simulate fixes for'))
return
}
console.log(chalk.blue(`๐ Found ${vulnerabilityIds.length} vulnerabilities to simulate`))
// Interactive selection if not specified
if (options.interactive && !options.vulnerabilityIds) {
const selectedIds = await simulator.selectVulnerabilitiesInteractively(vulnerabilityIds)
vulnerabilityIds = selectedIds
}
// Generate simulation
const simulationResults = await simulator.generateSimulation({
vulnerabilityIds,
simulationMode: options.simulationMode,
modelPreference: options.model === 'auto' ? undefined : options.model,
includeDiff: options.includeDiff,
includeRisk: options.includeRisk
})
// Display results
await simulator.displaySimulationResults(simulationResults, options.output)
// Interactive actions
if (options.interactive) {
await simulator.handleInteractiveActions(simulationResults)
}
} catch (error) {
console.error(chalk.red(`โ Simulation failed: ${error.message}`))
process.exit(1)
}
})
// Apply simulated fixes command
simulateCommand
.command('apply')
.description('Apply previously simulated fixes')
.argument('<simulation-id>', 'simulation ID to apply')
.option('--fix-ids <ids>', 'comma-separated fix IDs to apply (leave empty to apply all)')
.option('--apply-mode <mode>', 'application mode (safe|aggressive|custom)', 'safe')
.option('--backup', 'create backups before applying', true)
.option('--run-tests', 'run tests after applying fixes', true)
.option('--interactive', 'interactive application mode', true)
.action(async (simulationId, options) => {
console.log(chalk.bold.green('โ
APPLYING SIMULATED FIXES'))
console.log(chalk.green('โ'.repeat(50)))
const config = ConfigManager.getConfig()
const apiClient = new APIClient(config)
const applicator = new SimulatedFixApplicator(apiClient, simulationId)
try {
// Get simulation details
const simulation = await applicator.getSimulation()
if (!simulation) {
console.error(chalk.red('โ Simulation not found or expired'))
process.exit(1)
}
console.log(chalk.blue(`๐ Simulation: ${simulation.simulation_id}`))
console.log(chalk.gray(` Total fixes: ${simulation.total_fixes}`))
console.log(chalk.gray(` Average confidence: ${simulation.summary.average_confidence}%`))
// Select fixes to apply
let fixIds = []
if (options.fixIds) {
fixIds = options.fixIds.split(',').map(id => id.trim())
} else if (options.interactive) {
fixIds = await applicator.selectFixesInteractively(simulation.previews)
} else {
fixIds = simulation.previews.map(p => p.vulnerability_id)
}
if (fixIds.length === 0) {
console.log(chalk.yellow('โ ๏ธ No fixes selected for application'))
return
}
// Confirm application
if (options.interactive) {
const confirmed = await applicator.confirmApplication(fixIds, options)
if (!confirmed) {
console.log(chalk.gray('โ Application cancelled'))
return
}
}
// Apply fixes
const results = await applicator.applyFixes(fixIds, {
applyMode: options.applyMode,
createBackup: options.backup,
runTests: options.runTests
})
// Display results
applicator.displayApplicationResults(results)
} catch (error) {
console.error(chalk.red(`โ Application failed: ${error.message}`))
process.exit(1)
}
})
// List simulations command
simulateCommand
.command('list')
.description('List previous simulations')
.option('--limit <number>', 'limit number of results', '10')
.option('--status <status>', 'filter by status (active|expired|applied)')
.action(async (options) => {
console.log(chalk.bold.blue('๐ SIMULATION HISTORY'))
console.log(chalk.blue('โ'.repeat(50)))
const config = ConfigManager.getConfig()
const apiClient = new APIClient(config)
try {
// Mock simulation history - in production, fetch from API
const simulations = [
{
simulation_id: 'sim_20241231_143022_a1b2c3d4',
created_at: '2024-12-31T14:30:22Z',
total_fixes: 8,
average_confidence: 92,
status: 'active',
expires_at: '2024-12-31T23:59:59Z'
},
{
simulation_id: 'sim_20241230_095014_e5f6g7h8',
created_at: '2024-12-30T09:50:14Z',
total_fixes: 5,
average_confidence: 87,
status: 'applied',
applied_at: '2024-12-30T10:15:33Z'
}
]
if (simulations.length === 0) {
console.log(chalk.yellow('โ ๏ธ No simulations found'))
return
}
// Display as table
const tableData = [
['Simulation ID', 'Created', 'Fixes', 'Confidence', 'Status', 'Expires/Applied']
]
simulations.slice(0, parseInt(options.limit)).forEach(sim => {
const statusColor = sim.status === 'active' ? chalk.green :
sim.status === 'applied' ? chalk.blue : chalk.gray
tableData.push([
chalk.cyan(sim.simulation_id.substring(0, 20) + '...'),
chalk.gray(new Date(sim.created_at).toLocaleDateString()),
chalk.yellow(sim.total_fixes.toString()),
chalk.magenta(`${sim.average_confidence}%`),
statusColor(sim.status),
chalk.gray(sim.status === 'applied' ?
new Date(sim.applied_at).toLocaleDateString() :
new Date(sim.expires_at).toLocaleDateString())
])
})
console.log(table(tableData, {
border: {
topBody: chalk.gray('โ'),
topJoin: chalk.gray('โฌ'),
topLeft: chalk.gray('โ'),
topRight: chalk.gray('โ'),
bottomBody: chalk.gray('โ'),
bottomJoin: chalk.gray('โด'),
bottomLeft: chalk.gray('โ'),
bottomRight: chalk.gray('โ'),
bodyLeft: chalk.gray('โ'),
bodyRight: chalk.gray('โ'),
bodyJoin: chalk.gray('โ'),
joinBody: chalk.gray('โ'),
joinLeft: chalk.gray('โ'),
joinRight: chalk.gray('โค'),
joinJoin: chalk.gray('โผ')
}
}))
} catch (error) {
console.error(chalk.red(`โ Failed to list simulations: ${error.message}`))
process.exit(1)
}
})
class FixSimulator {
constructor(apiClient, repoPath) {
this.apiClient = apiClient
this.repoPath = path.resolve(repoPath)
this.scanner = new LocalScanner()
}
async discoverVulnerabilities(severityFilter) {
const spinner = ora('๐ Discovering vulnerabilities...').start()
try {
// Mock vulnerability discovery
const mockVulnerabilities = [
{ id: 'vuln_001', type: 'sql_injection', severity: 'high', file: 'src/database/queries.py' },
{ id: 'vuln_002', type: 'xss_vulnerability', severity: 'medium', file: 'src/templates/user.html' },
{ id: 'vuln_003', type: 'exposed_secret', severity: 'critical', file: 'src/config/database.py' },
{ id: 'vuln_004', type: 'path_traversal', severity: 'high', file: 'src/files/upload.py' }
]
const severityLevels = ['low', 'medium', 'high', 'critical']
const maxSeverityIndex = severityLevels.indexOf(severityFilter)
const filteredVulns = mockVulnerabilities.filter(vuln => {
const vulnSeverityIndex = severityLevels.indexOf(vuln.severity)
return vulnSeverityIndex >= 0 && vulnSeverityIndex <= maxSeverityIndex
})
spinner.succeed(`Found ${filteredVulns.length} vulnerabilities`)
return filteredVulns.map(v => v.id)
} catch (error) {
spinner.fail('Vulnerability discovery failed')
throw error
}
}
async selectVulnerabilitiesInteractively(vulnerabilityIds) {
// Mock vulnerability details
const vulnerabilityDetails = {
'vuln_001': { type: 'sql_injection', severity: 'high', file: 'src/database/queries.py', description: 'SQL injection in user query' },
'vuln_002': { type: 'xss_vulnerability', severity: 'medium', file: 'src/templates/user.html', description: 'XSS in user profile display' },
'vuln_003': { type: 'exposed_secret', severity: 'critical', file: 'src/config/database.py', description: 'Database credentials exposed' },
'vuln_004': { type: 'path_traversal', severity: 'high', file: 'src/files/upload.py', description: 'Path traversal in file upload' }
}
const choices = vulnerabilityIds.map(id => {
const details = vulnerabilityDetails[id] || { type: 'unknown', severity: 'medium', file: 'unknown', description: 'Unknown vulnerability' }
const severityColor = {
'critical': chalk.red,
'high': chalk.magenta,
'medium': chalk.yellow,
'low': chalk.gray
}[details.severity] || chalk.gray
return {
name: `${severityColor(details.severity.toUpperCase())} ${chalk.cyan(details.type)} - ${chalk.gray(details.file)}\n ${details.description}`,
value: id,
checked: true
}
})
const answers = await inquirer.prompt([{
type: 'checkbox',
name: 'selectedVulnerabilities',
message: 'Select vulnerabilities to simulate fixes for:',
choices: choices,
pageSize: 10,
validate: (selections) => {
if (selections.length === 0) {
return 'Please select at least one vulnerability'
}
return true
}
}])
return answers.selectedVulnerabilities
}
async generateSimulation(options) {
const spinner = ora('๐ค Generating fix simulation...').start()
try {
const simulationRequest = {
vulnerability_ids: options.vulnerabilityIds,
simulation_mode: options.simulationMode,
model_preference: options.modelPreference,
include_diff: options.includeDiff,
include_risk_analysis: options.includeRisk,
create_temp_branch: false
}
// Mock API call - replace with actual API call
await new Promise(resolve => setTimeout(resolve, 2000))
const mockSimulation = {
simulation_id: `sim_${Date.now()}_${Math.random().toString(36).substr(2, 8)}`,
status: 'completed',
total_fixes: options.vulnerabilityIds.length,
previews: await Promise.all(options.vulnerabilityIds.map(id => this.generateMockPreview(id))),
summary: {
average_confidence: 89,
risk_breakdown: { low: 2, medium: 1, high: 0 },
severity_breakdown: { critical: 1, high: 2, medium: 0, low: 0 },
recommendations: [
'All fixes appear safe for automatic application',
'Create full system backup before applying any fixes',
'Apply fixes in staging environment first'
],
estimated_application_time: `${options.vulnerabilityIds.length * 2} minutes`
},
expires_at: new Date(Date.now() + 24 * 60 * 60 * 1000).toISOString(),
actions_available: ['apply_selected_fixes', 'apply_all_fixes', 'export_simulation_report']
}
spinner.succeed(`Generated ${mockSimulation.total_fixes} fix previews`)
return mockSimulation
} catch (error) {
spinner.fail('Simulation generation failed')
throw error
}
}
async generateMockPreview(vulnerabilityId) {
// Fetch actual vulnerability data from API
try {
const apiClient = new APIClient()
const response = await apiClient.get(`/fix-simulation/preview/${vulnerabilityId}`)
return {
vulnerability_id: vulnerabilityId,
fix_confidence: response.data.confidence,
risk_level: response.data.risk_level,
original_code: response.data.original_code,
fixed_code: response.data.fixed_code,
explanation: response.data.explanation,
impact: response.data.impact
}
} catch (error) {
// Fallback to mock data if API is unavailable
console.log(chalk.yellow('โ ๏ธ Using mock data - API unavailable'))
return {
vulnerability_id: vulnerabilityId,
fix_confidence: Math.floor(Math.random() * 30) + 70, // 70-99%
risk_level: ['low', 'medium', 'high'][Math.floor(Math.random() * 3)],
original_code: `// Vulnerable code example
function authenticate(username, password) {
const query = "SELECT * FROM users WHERE username = '" + username + "' AND password = '" + password + "'";
return db.query(query);
}`,
fixed_code: `// Fixed code with parameterized query
function authenticate(username, password) {
const query = "SELECT * FROM users WHERE username = ? AND password = ?";
return db.query(query, [username, password]);
}`,
explanation: 'SQL injection vulnerability fixed by using parameterized queries',
impact: 'Prevents SQL injection attacks that could compromise database security'
}
}
}
generateDiffSummary(original, fixed) {
const changes = diff.diffLines(original, fixed)
let addedLines = 0
let removedLines = 0
changes.forEach(change => {
if (change.added) {addedLines += change.count}
if (change.removed) {removedLines += change.count}
})
return `${removedLines} lines removed, ${addedLines} lines added`
}
async displaySimulationResults(simulation, outputFormat) {
console.log(chalk.bold.green('\\nโ
SIMULATION COMPLETED'))
console.log(chalk.green('โ'.repeat(50)))
// Summary section
console.log(chalk.blue('๐ SIMULATION SUMMARY'))
console.log(chalk.gray(` Simulation ID: ${simulation.simulation_id}`))
console.log(chalk.gray(` Total Fixes: ${simulation.total_fixes}`))
console.log(chalk.gray(` Average Confidence: ${simulation.summary.average_confidence}%`))
console.log(chalk.gray(` Estimated Time: ${simulation.summary.estimated_application_time}`))
// Risk breakdown
const risk = simulation.summary.risk_breakdown
console.log(chalk.yellow('\\nโ ๏ธ RISK ASSESSMENT'))
console.log(chalk.green(` Low Risk: ${risk.low} fixes`))
console.log(chalk.yellow(` Medium Risk: ${risk.medium} fixes`))
console.log(chalk.red(` High Risk: ${risk.high} fixes`))
if (outputFormat === 'detailed') {
await this.displayDetailedPreviews(simulation.previews)
} else if (outputFormat === 'table') {
this.displayTablePreviews(simulation.previews)
} else if (outputFormat === 'json') {
console.log(JSON.stringify(simulation, null, 2))
}
// Recommendations
console.log(chalk.magenta('\\n๐ก RECOMMENDATIONS'))
simulation.summary.recommendations.forEach(rec => {
console.log(chalk.gray(` โข ${rec}`))
})
}
async displayDetailedPreviews(previews) {
for (const preview of previews) {
await this.displaySinglePreview(preview)
}
}
async displaySinglePreview(preview) {
console.log(chalk.bold.cyan(`\\n๐ง ${preview.vulnerability_type.toUpperCase()}`))
console.log(chalk.cyan('โ'.repeat(40)))
// Header info
const severityColor = {
'critical': chalk.red,
'high': chalk.magenta,
'medium': chalk.yellow,
'low': chalk.gray
}[preview.severity] || chalk.gray
const riskColor = {
'high': chalk.red,
'medium': chalk.yellow,
'low': chalk.green
}[preview.risk_assessment.risk_level] || chalk.gray
console.log(chalk.gray(`File: ${preview.file_path}`))
console.log(`Severity: ${severityColor(preview.severity.toUpperCase())}`)
console.log(`Risk: ${riskColor(preview.risk_assessment.risk_level.toUpperCase())}`)
console.log(`Confidence: ${chalk.magenta(Math.round(preview.confidence_score * 100))}%`)
console.log(chalk.gray(`Diff: ${preview.diff_summary}`))
// Code diff
console.log(chalk.yellow('\\n๐ CODE CHANGES:'))
console.log(chalk.red('- ' + preview.original_code))
console.log(chalk.green('+ ' + preview.proposed_fix))
// Risk factors
if (preview.risk_assessment.risk_factors.length > 0) {
console.log(chalk.yellow('\\nโ ๏ธ RISK FACTORS:'))
preview.risk_assessment.risk_factors.forEach(factor => {
console.log(chalk.gray(` โข ${factor}`))
})
}
// Testing suggestions
console.log(chalk.blue('\\n๐งช TESTING SUGGESTIONS:'))
preview.testing_suggestions.forEach(suggestion => {
console.log(chalk.gray(` โข ${suggestion}`))
})
}
displayTablePreviews(previews) {
console.log(chalk.blue('\\n๐ FIX PREVIEWS'))
const tableData = [
['ID', 'Type', 'File', 'Severity', 'Risk', 'Confidence']
]
previews.forEach(preview => {
const severityColor = {
'critical': chalk.red,
'high': chalk.magenta,
'medium': chalk.yellow,
'low': chalk.gray
}[preview.severity] || chalk.gray
const riskColor = {
'high': chalk.red,
'medium': chalk.yellow,
'low': chalk.green
}[preview.risk_assessment.risk_level] || chalk.gray
tableData.push([
chalk.cyan(preview.vulnerability_id.substr(-8)),
chalk.blue(preview.vulnerability_type),
chalk.gray(path.basename(preview.file_path)),
severityColor(preview.severity),
riskColor(preview.risk_assessment.risk_level),
chalk.magenta(`${Math.round(preview.confidence_score * 100)}%`)
])
})
console.log(table(tableData))
}
async handleInteractiveActions(simulation) {
const actions = [
{ name: 'โ
Apply All Fixes', value: 'apply_all' },
{ name: '๐ฏ Apply Selected Fixes', value: 'apply_selected' },
{ name: '๐ Export Simulation Report', value: 'export_report' },
{ name: '๐ View Individual Fix Details', value: 'view_details' },
{ name: 'โ Exit', value: 'exit' }
]
const answer = await inquirer.prompt([{
type: 'list',
name: 'action',
message: 'What would you like to do next?',
choices: actions
}])
switch (answer.action) {
case 'apply_all':
await this.applyAllFixes(simulation)
break
case 'apply_selected':
await this.applySelectedFixes(simulation)
break
case 'export_report':
await this.exportSimulationReport(simulation)
break
case 'view_details':
await this.viewFixDetails(simulation)
break
case 'exit':
console.log(chalk.gray('Simulation saved. Use the simulation ID to apply fixes later.'))
break
}
}
async applyAllFixes(simulation) {
const confirmed = await inquirer.prompt([{
type: 'confirm',
name: 'proceed',
message: `Apply all ${simulation.total_fixes} fixes?`,
default: false
}])
if (confirmed.proceed) {
console.log(chalk.green(`๐ Applying all fixes from simulation ${simulation.simulation_id}`))
console.log(chalk.gray('Use: vaultace simulate apply ' + simulation.simulation_id))
}
}
async applySelectedFixes(simulation) {
const choices = simulation.previews.map(preview => ({
name: `${preview.vulnerability_type} - ${preview.file_path} (${Math.round(preview.confidence_score * 100)}% confidence)`,
value: preview.vulnerability_id,
checked: preview.risk_assessment.risk_level === 'low'
}))
const answers = await inquirer.prompt([{
type: 'checkbox',
name: 'selectedFixes',
message: 'Select fixes to apply:',
choices: choices,
validate: (selections) => {
if (selections.length === 0) {
return 'Please select at least one fix'
}
return true
}
}])
if (answers.selectedFixes.length > 0) {
console.log(chalk.green(`๐ฏ Applying ${answers.selectedFixes.length} selected fixes`))
console.log(chalk.gray(`Use: vaultace simulate apply ${simulation.simulation_id} --fix-ids ${answers.selectedFixes.join(',')}`))
}
}
async exportSimulationReport(simulation) {
const reportPath = `simulation-report-${simulation.simulation_id}.json`
const report = {
simulation_id: simulation.simulation_id,
generated_at: new Date().toISOString(),
summary: simulation.summary,
total_fixes: simulation.total_fixes,
previews: simulation.previews.map(preview => ({
vulnerability_id: preview.vulnerability_id,
vulnerability_type: preview.vulnerability_type,
severity: preview.severity,
file_path: preview.file_path,
confidence_score: preview.confidence_score,
risk_level: preview.risk_assessment.risk_level,
risk_factors: preview.risk_assessment.risk_factors,
ai_metadata: preview.ai_metadata
})),
recommendations: simulation.summary.recommendations
}
await fs.writeJson(reportPath, report, { spaces: 2 })
console.log(chalk.green(`๐ Simulation report exported to: ${reportPath}`))
}
async viewFixDetails(simulation) {
const choices = simulation.previews.map(preview => ({
name: `${preview.vulnerability_type} - ${preview.file_path}`,
value: preview.vulnerability_id
}))
const answer = await inquirer.prompt([{
type: 'list',
name: 'selectedFix',
message: 'Select fix to view details:',
choices: choices
}])
const selectedPreview = simulation.previews.find(p => p.vulnerability_id === answer.selectedFix)
if (selectedPreview) {
await this.displaySinglePreview(selectedPreview)
// Ask if user wants to continue
const continueAnswer = await inquirer.prompt([{
type: 'confirm',
name: 'continue',
message: 'View another fix or perform other actions?',
default: true
}])
if (continueAnswer.continue) {
await this.handleInteractiveActions(simulation)
}
}
}
}
class SimulatedFixApplicator {
constructor(apiClient, simulationId) {
this.apiClient = apiClient
this.simulationId = simulationId
}
async getSimulation() {
// Mock API call - replace with actual call
const mockSimulation = {
simulation_id: this.simulationId,
total_fixes: 3,
summary: { average_confidence: 89 },
previews: [
{ vulnerability_id: 'vuln_001', vulnerability_type: 'sql_injection', confidence_score: 0.95 },
{ vulnerability_id: 'vuln_002', vulnerability_type: 'xss_vulnerability', confidence_score: 0.88 },
{ vulnerability_id: 'vuln_003', vulnerability_type: 'exposed_secret', confidence_score: 0.92 }
]
}
return mockSimulation
}
async selectFixesInteractively(previews) {
const choices = previews.map(preview => ({
name: `${preview.vulnerability_type} (${Math.round(preview.confidence_score * 100)}% confidence)`,
value: preview.vulnerability_id,
checked: true
}))
const answers = await inquirer.prompt([{
type: 'checkbox',
name: 'selectedFixes',
message: 'Select fixes to apply:',
choices: choices,
validate: (selections) => {
if (selections.length === 0) {
return 'Please select at least one fix'
}
return true
}
}])
return answers.selectedFixes
}
async confirmApplication(fixIds, options) {
console.log(chalk.yellow('\\nโ ๏ธ APPLICATION CONFIRMATION'))
console.log(chalk.gray(` Fixes to apply: ${fixIds.length}`))
console.log(chalk.gray(` Application mode: ${options.applyMode}`))
console.log(chalk.gray(` Create backups: ${options.backup ? 'Yes' : 'No'}`))
console.log(chalk.gray(` Run tests: ${options.runTests ? 'Yes' : 'No'}`))
const answer = await inquirer.prompt([{
type: 'confirm',
name: 'confirmed',
message: 'Proceed with applying these fixes?',
default: false
}])
return answer.confirmed
}
async applyFixes(fixIds, options) {
const spinner = ora(`Applying ${fixIds.length} fixes...`).start()
try {
// Mock API call - replace with actual call
await new Promise(resolve => setTimeout(resolve, 3000))
const mockResults = {
applied_fixes: fixIds.length,
results: {
applied_fixes: fixIds.map(id => ({ vulnerability_id: id, status: 'applied' })),
failed_fixes: [],
rollback_info: fixIds.map(id => ({
vulnerability_id: id,
backup_path: `backup_${id}.bak`
}))
},
simulation_id: this.simulationId
}
spinner.succeed(`Successfully applied ${fixIds.length} fixes`)
return mockResults
} catch (error) {
spinner.fail('Fix application failed')
throw error
}
}
displayApplicationResults(results) {
console.log(chalk.bold.green('\\nโ
APPLICATION COMPLETED'))
console.log(chalk.green('โ'.repeat(50)))
console.log(chalk.blue('๐ RESULTS SUMMARY'))
console.log(chalk.green(` โ
Applied: ${results.results.applied_fixes.length}`))
console.log(chalk.red(` โ Failed: ${results.results.failed_fixes.length}`))
console.log(chalk.yellow(` ๐ Backups: ${results.results.rollback_info.length}`))
if (results.results.rollback_info.length > 0) {
console.log(chalk.magenta('\\n๐ ROLLBACK AVAILABLE'))
console.log(chalk.gray(' Use: vaultace simulate rollback ' + results.simulation_id))
}
console.log(chalk.cyan('\\n๐ Fixes have been successfully applied to your codebase!'))
}
}
module.exports = simulateCommand