vaultace-cli
Version:
AI-powered security scanner that detects vulnerabilities in AI-generated code. Proactive scanning, autonomous fixing, and emergency response for modern development teams.
764 lines (640 loc) ⢠26.4 kB
JavaScript
/**
* Autonomous Fix Command - AI-powered vulnerability fixing
* Premium feature using Claude Sonnet 4 for autonomous code fixes
*/
const { Command } = require('commander')
const chalk = require('chalk')
const inquirer = require('inquirer')
const ora = require('ora')
const fs = require('fs-extra')
const path = require('path')
const os = require('os')
const { table } = require('table')
const fetch = require('node-fetch')
const LocalScanner = require('../services/local-scanner')
const { BackendFixService, FixResultFormatter } = require('../services/backend-fix-service')
const { AIFixService } = require('../services/ai-fix-service')
const GitManager = require('../services/git-manager')
const { ApprovalWorkflow } = require('../services/approval-workflow')
const APIClient = require('../services/api-client')
const ConfigManager = require('../utils/config-manager')
const fixCommand = new Command('fix')
.description('š¤ Autonomous AI-powered vulnerability fixing (Premium)')
// Main autonomous fix command
fixCommand
.command('auto')
.description('Autonomously fix vulnerabilities using Claude Sonnet 4')
.argument('[path]', 'repository path to fix', '.')
.option('--severity <level>', 'fix vulnerabilities of this severity and below (low|medium|high|critical)', 'medium')
.option('--approve-policy <policy>', 'approval policy (manual|automatic|hybrid)', 'hybrid')
.option('--max-fixes <number>', 'maximum number of fixes to apply', '50')
.option('--model <model>', 'AI model to use (claude-sonnet-4|claude-sonnet-3)', 'claude-sonnet-4')
.option('--dry-run', 'show what would be fixed without making changes')
.option('--skip-tests', 'skip running tests after fixes')
.option('--branch <name>', 'create fixes on specific branch', 'vaultace-auto-fixes')
.action(async (repoPath, options) => {
console.log(chalk.bold.cyan('š¤ VAULTACE AUTONOMOUS FIXING AGENT'))
console.log(chalk.cyan('ā'.repeat(50)))
console.log(chalk.magenta(`š§ Powered by ${options.model.toUpperCase()}`))
// Check premium access
const config = ConfigManager.getConfig()
if (!await validatePremiumAccess(config)) {
console.error(chalk.red('ā Autonomous fixing requires Professional or Enterprise tier'))
console.log(chalk.gray('Upgrade at: https://vaultace.com/pricing'))
process.exit(1)
}
const spinner = ora('Initializing autonomous fixing agent...').start()
try {
// Initialize services
const gitManager = new GitManager(repoPath)
const backendFixService = new BackendFixService(config)
const approvalWorkflow = new ApprovalWorkflow(config)
const scanner = new LocalScanner({ path: repoPath })
// Pre-flight checks
await performPreflightChecks(gitManager, repoPath, options)
// Check fix access and limits
spinner.text = 'Checking fix access and limits...'
const fixAccess = await backendFixService.checkFixAccess()
if (!fixAccess.hasAccess) {
spinner.fail('Autonomous fixing requires premium subscription')
console.log(chalk.red('ā Upgrade required: https://vaultace.com/pricing'))
if (fixAccess.error) {
console.log(chalk.gray(`Error: ${fixAccess.error}`))
}
process.exit(1)
}
if (fixAccess.remaining <= 0) {
spinner.fail(`Monthly fix limit reached (${fixAccess.used}/${fixAccess.monthlyLimit})`)
console.log(chalk.yellow('Upgrade to increase your fix limit'))
process.exit(1)
}
spinner.succeed(`Fix access verified (${fixAccess.remaining} fixes remaining)`)
// Get user confirmation for manual approval policy
if (options.approvePolicy === 'manual') {
const { proceed } = await inquirer.prompt([{
type: 'confirm',
name: 'proceed',
message: 'Proceed with autonomous fixing using Python backend?',
default: true
}])
if (!proceed) {
console.log(chalk.gray('Autonomous fixing cancelled'))
return
}
}
// Execute autonomous fixes via Python backend
spinner.start('Executing autonomous fixes via Python backend...')
const fixResults = await backendFixService.executeAutonomousFix(repoPath, options)
spinner.succeed('Autonomous fixing completed')
// Run tests if not skipped
if (!options.skipTests) {
await runPostFixTests(repoPath, spinner)
}
// Rescan to verify fixes
spinner.start('Rescanning to verify fixes...')
const postFixScan = await scanner.scan()
const remainingVulns = postFixScan.vulnerabilities.length
spinner.succeed(`Rescan complete: ${remainingVulns} vulnerabilities remaining`)
// Present results and approval options
await presentFixResults(fixResults, remainingVulns, options)
// Handle approval workflow
await handleApprovalWorkflow(gitManager, fixResults, options)
} catch (error) {
spinner.fail(`Autonomous fixing failed: ${error.message}`)
console.error(chalk.red(error.stack))
process.exit(1)
}
})
// Fix specific vulnerability by ID
fixCommand
.command('single')
.description('Fix a specific vulnerability by ID')
.requiredOption('--vuln-id <id>', 'vulnerability ID to fix')
.option('--model <model>', 'AI model to use', 'claude-sonnet-4')
.action(async (options) => {
console.log(chalk.cyan(`š§ Fixing vulnerability: ${options.vulnId}`))
const config = ConfigManager.getConfig()
if (!await validatePremiumAccess(config)) {
console.error(chalk.red('ā Vulnerability fixing requires premium access'))
process.exit(1)
}
const spinner = ora('Generating fix with AI...').start()
try {
const aiFixService = new AIFixService(options.model, config)
const fix = await aiFixService.generateSingleFix(options.vulnId)
spinner.succeed('Fix generated successfully')
// Show the fix
console.log(chalk.bold('\nš§ Proposed Fix:'))
console.log(chalk.gray('ā'.repeat(50)))
console.log(fix.description)
console.log('\n' + chalk.bold('Code Changes:'))
console.log(fix.diff)
const { apply } = await inquirer.prompt([{
type: 'confirm',
name: 'apply',
message: 'Apply this fix?',
default: true
}])
if (apply) {
await aiFixService.applyFix(fix)
console.log(chalk.green('ā
Fix applied successfully'))
}
} catch (error) {
spinner.fail(`Fix generation failed: ${error.message}`)
process.exit(1)
}
})
// Show fixing capabilities
fixCommand
.command('capabilities')
.description('Show autonomous fixing capabilities and limitations')
.action(() => {
console.log(chalk.bold.cyan('š¤ VAULTACE AUTONOMOUS FIXING CAPABILITIES'))
console.log(chalk.cyan('ā'.repeat(55)))
console.log(chalk.bold('\nā
Can Automatically Fix:'))
console.log(' ⢠Exposed secrets ā Environment variables')
console.log(' ⢠SQL injection ā Parameterized queries')
console.log(' ⢠XSS vulnerabilities ā Input sanitization')
console.log(' ⢠Path traversal ā Path validation')
console.log(' ⢠Insecure imports ā Secure alternatives')
console.log(' ⢠Missing input validation ā Validation logic')
console.log(' ⢠Hardcoded credentials ā Config-based auth')
console.log(chalk.bold('\nā ļø Requires Manual Review:'))
console.log(' ⢠Authentication bypass fixes')
console.log(' ⢠Authorization logic changes')
console.log(' ⢠Complex business logic modifications')
console.log(' ⢠Database schema changes')
console.log(chalk.bold('\nš§ AI Models Available:'))
console.log(' ⢠Claude Sonnet 4 (Latest, most capable)')
console.log(' ⢠Claude Sonnet 3 (Stable, proven)')
console.log(chalk.bold('\nšÆ Approval Policies:'))
console.log(' ⢠manual - Review all fixes before applying')
console.log(' ⢠automatic - Auto-apply low-risk fixes')
console.log(' ⢠hybrid - Auto-apply safe fixes, review risky ones')
console.log(chalk.bold('\nš° Premium Feature:'))
console.log(' Professional Ace: 50 auto-fixes/month')
console.log(' Enterprise Ace: Unlimited auto-fixes')
})
async function validatePremiumAccess(config) {
// Check if user has premium access
if (!config.auth?.accessToken) {
return false
}
try {
const apiClient = new APIClient(config)
const subscription = await apiClient.getSubscription()
return subscription.tier !== 'free'
} catch (error) {
return false
}
}
async function performPreflightChecks(gitManager, repoPath, options) {
// Check if it's a git repository
if (!await gitManager.isGitRepo()) {
throw new Error('Not a git repository - autonomous fixing requires git for safety')
}
// Check for uncommitted changes
if (await gitManager.hasUncommittedChanges()) {
throw new Error('Repository has uncommitted changes - commit or stash before fixing')
}
// Check if branch already exists
if (await gitManager.branchExists(options.branch)) {
const { overwrite } = await inquirer.prompt([{
type: 'confirm',
name: 'overwrite',
message: `Branch ${options.branch} already exists. Overwrite?`,
default: false
}])
if (!overwrite) {
throw new Error('Fixing cancelled - branch already exists')
}
await gitManager.deleteBranch(options.branch)
}
}
function filterVulnerabilitiesBySeverity(vulnerabilities, maxSeverity) {
const severityOrder = ['info', 'low', 'medium', 'high', 'critical']
const maxIndex = severityOrder.indexOf(maxSeverity)
return vulnerabilities.filter(vuln => {
const vulnIndex = severityOrder.indexOf(vuln.severity)
return vulnIndex >= 0 && vulnIndex <= maxIndex
})
}
async function showFixingPlan(vulnerabilities, options) {
console.log(chalk.bold('\nšÆ AUTONOMOUS FIXING PLAN'))
console.log('ā'.repeat(40))
const severityBreakdown = vulnerabilities.reduce((acc, vuln) => {
acc[vuln.severity] = (acc[vuln.severity] || 0) + 1
return acc
}, {})
console.log(chalk.bold('Vulnerabilities to Fix:'))
Object.entries(severityBreakdown).forEach(([severity, count]) => {
const color = getSeverityColor(severity)
console.log(` ${color}${severity}: ${count}`)
})
console.log(chalk.bold(`\nApproval Policy: ${options.approvePolicy}`))
console.log(chalk.bold(`AI Model: ${options.model}`))
console.log(chalk.bold(`Max Fixes: ${options.maxFixes}`))
if (options.dryRun) {
console.log(chalk.yellow('\nš DRY RUN MODE - No changes will be made'))
}
}
async function executeAutonomousFixes(aiFixService, approvalWorkflow, vulnerabilities, repoPath, options) {
const fixes = []
const maxFixes = parseInt(options.maxFixes)
console.log(chalk.bold('\nš¤ Starting Autonomous Fixing...'))
console.log('ā'.repeat(40))
for (let i = 0; i < Math.min(vulnerabilities.length, maxFixes); i++) {
const vuln = vulnerabilities[i]
const fixSpinner = ora(`Fixing: ${vuln.type} in ${path.basename(vuln.file)}`).start()
try {
// Generate fix using AI
const fix = await aiFixService.generateFix(vuln, repoPath)
// Apply fix based on approval policy
const shouldApply = await approvalWorkflow.shouldAutoApprove(fix, options.approvePolicy)
if (shouldApply) {
await aiFixService.applyFix(fix)
fixes.push({ ...fix, status: 'applied', auto_applied: true })
fixSpinner.succeed(`Fixed: ${vuln.type} (auto-applied)`)
} else {
fixes.push({ ...fix, status: 'pending_approval', auto_applied: false })
fixSpinner.warn(`Generated fix for: ${vuln.type} (pending approval)`)
}
} catch (error) {
fixes.push({
vulnerability: vuln,
status: 'failed',
error: error.message,
auto_applied: false
})
fixSpinner.fail(`Failed to fix: ${vuln.type} - ${error.message}`)
}
// Brief pause to avoid overwhelming the AI API
await new Promise(resolve => setTimeout(resolve, 1000))
}
return fixes
}
function shouldAutoApplyFix(fix, approvalPolicy) {
if (approvalPolicy === 'manual') {return false}
if (approvalPolicy === 'automatic') {return true}
// Hybrid policy logic
const safeFixTypes = [
'exposed_secret',
'hardcoded_password',
'insecure_import',
'debug_statement',
'console_log_exposure'
]
const isHighRiskFile = fix.file_path.includes('auth') ||
fix.file_path.includes('admin') ||
fix.file_path.includes('security')
return safeFixTypes.includes(fix.vulnerability_type) && !isHighRiskFile
}
async function runPostFixTests(repoPath, spinner) {
spinner.start('Running tests to verify fixes...')
try {
// Check for common test commands
const packageJson = path.join(repoPath, 'package.json')
const hasPackageJson = await fs.exists(packageJson)
let testCommand = null
if (hasPackageJson) {
const pkg = await fs.readJson(packageJson)
if (pkg.scripts?.test) {
testCommand = 'npm test'
} else if (pkg.scripts?.jest) {
testCommand = 'npm run jest'
}
}
// Check for Python tests
const hasPytest = await fs.exists(path.join(repoPath, 'pytest.ini')) ||
await fs.exists(path.join(repoPath, 'setup.cfg'))
if (hasPytest) {
testCommand = 'pytest'
}
if (testCommand) {
const { exec } = require('child_process')
const { promisify } = require('util')
const execAsync = promisify(exec)
const { stdout, stderr } = await execAsync(testCommand, {
cwd: repoPath,
timeout: 300000 // 5 minute timeout
})
spinner.succeed('Tests passed - fixes verified')
} else {
spinner.warn('No test command found - manual verification recommended')
}
} catch (error) {
spinner.fail('Tests failed - some fixes may have broken functionality')
console.log(chalk.yellow('ā ļø Manual review required for failing tests'))
}
}
async function presentFixResults(fixResults, remainingVulns, options) {
console.log(chalk.bold('\nš AUTONOMOUS FIXING RESULTS'))
console.log('ā'.repeat(45))
const appliedFixes = fixResults.filter(f => f.status === 'applied')
const pendingFixes = fixResults.filter(f => f.status === 'pending_approval')
const failedFixes = fixResults.filter(f => f.status === 'failed')
console.log(`${chalk.green('ā
Auto-Applied:')} ${appliedFixes.length}`)
console.log(`${chalk.yellow('ā³ Pending Approval:')} ${pendingFixes.length}`)
console.log(`${chalk.red('ā Failed:')} ${failedFixes.length}`)
console.log(`${chalk.blue('š Remaining Vulns:')} ${remainingVulns}`)
// Show applied fixes
if (appliedFixes.length > 0) {
console.log(chalk.bold('\nā
Successfully Applied Fixes:'))
const appliedTable = [
[chalk.bold('File'), chalk.bold('Type'), chalk.bold('Severity'), chalk.bold('Status')]
]
appliedFixes.forEach(fix => {
appliedTable.push([
path.basename(fix.file_path),
fix.vulnerability_type,
getSeverityColor(fix.severity)(fix.severity),
chalk.green('Applied')
])
})
console.log(table(appliedTable))
}
// Show pending fixes
if (pendingFixes.length > 0) {
console.log(chalk.bold('\nā³ Fixes Pending Your Approval:'))
pendingFixes.forEach((fix, index) => {
console.log(`\n${index + 1}. ${chalk.yellow(fix.vulnerability_type)} in ${fix.file_path}`)
console.log(` ${chalk.gray('Fix: ' + fix.description)}`)
})
}
// Offer feedback collection for learning
await offerFeedbackCollection(fixResults, options)
}
async function handleApprovalWorkflow(gitManager, fixResults, options) {
const pendingFixes = fixResults.filter(f => f.status === 'pending_approval')
if (pendingFixes.length === 0) {
// All fixes applied, commit automatically
await gitManager.commitChanges('feat: autonomous vulnerability fixes applied by Vaultace AI')
console.log(chalk.green('\nā
All fixes committed to branch'))
return
}
// Handle pending approvals
console.log(chalk.bold('\nš REVIEW PENDING FIXES'))
console.log('ā'.repeat(30))
for (const fix of pendingFixes) {
console.log(chalk.bold(`\nš Fix: ${fix.vulnerability_type}`))
console.log(`File: ${fix.file_path}`)
console.log(`Severity: ${getSeverityColor(fix.severity)(fix.severity)}`)
console.log('\nProposed Changes:')
console.log(chalk.gray(fix.diff))
const { action } = await inquirer.prompt([{
type: 'list',
name: 'action',
message: 'What would you like to do with this fix?',
choices: [
{ name: 'ā
Apply fix', value: 'apply' },
{ name: 'āļø Edit fix before applying', value: 'edit' },
{ name: 'ā Skip this fix', value: 'skip' },
{ name: 'š Cancel remaining fixes', value: 'cancel' }
]
}])
if (action === 'apply') {
const aiFixService = new AIFixService(options.model, ConfigManager.getConfig())
await aiFixService.applyFix(fix)
console.log(chalk.green('ā
Fix applied'))
} else if (action === 'edit') {
// Interactive fix editing
const { editAction } = await inquirer.prompt([{
type: 'list',
name: 'editAction',
message: 'Edit fix:',
choices: [
'Modify fix description',
'Edit fix code',
'Change severity level',
'Add custom notes',
'Cancel'
]
}])
switch (editAction) {
case 'Modify fix description': {
const { newDescription } = await inquirer.prompt([{
type: 'input',
name: 'newDescription',
message: 'New description:',
default: fix.description
}])
fix.description = newDescription
console.log(chalk.green('ā
Description updated'))
break
}
case 'Edit fix code': {
const { editCode } = await inquirer.prompt([{
type: 'confirm',
name: 'editCode',
message: 'Open external editor for code changes?',
default: false
}])
if (editCode) {
console.log(chalk.blue('š” Opening temporary file for editing...'))
const tempFile = path.join(os.tmpdir(), `vaultace-fix-${Date.now()}.${fix.language || 'txt'}`)
await fs.writeFile(tempFile, fix.fixed_code || fix.original_code)
// Open in default editor
const { spawn } = require('child_process')
const editor = process.env.EDITOR || 'nano'
await new Promise((resolve, reject) => {
const child = spawn(editor, [tempFile], { stdio: 'inherit' })
child.on('close', resolve)
child.on('error', reject)
})
// Read back the edited content
fix.fixed_code = await fs.readFile(tempFile, 'utf8')
await fs.unlink(tempFile)
console.log(chalk.green('ā
Code updated'))
}
break
}
case 'Change severity level': {
const { newSeverity } = await inquirer.prompt([{
type: 'list',
name: 'newSeverity',
message: 'New severity:',
choices: ['low', 'medium', 'high', 'critical'],
default: fix.severity
}])
fix.severity = newSeverity
console.log(chalk.green('ā
Severity updated'))
break
}
case 'Add custom notes': {
const { notes } = await inquirer.prompt([{
type: 'input',
name: 'notes',
message: 'Custom notes:'
}])
fix.custom_notes = notes
console.log(chalk.green('ā
Notes added'))
break
}
default:
console.log(chalk.yellow('Edit cancelled'))
}
} else if (action === 'cancel') {
break
}
}
// Final commit
const { commit } = await inquirer.prompt([{
type: 'confirm',
name: 'commit',
message: 'Commit all applied fixes?',
default: true
}])
if (commit) {
await gitManager.commitChanges('feat: reviewed and approved vulnerability fixes by Vaultace AI')
console.log(chalk.green('ā
Fixes committed successfully'))
const { createPR } = await inquirer.prompt([{
type: 'confirm',
name: 'createPR',
message: 'Create pull request for review?',
default: true
}])
if (createPR) {
console.log(chalk.blue('š PR creation guide: https://docs.vaultace.co/cli/pull-requests'))
}
}
}
async function offerFeedbackCollection(fixResults, options) {
// Only offer feedback if there are applied fixes
const appliedFixes = fixResults.filter(f => f.status === 'applied')
if (appliedFixes.length === 0) {return}
console.log(chalk.bold('\nš HELP IMPROVE VAULTACE AI'))
console.log('ā'.repeat(35))
console.log(chalk.blue('Your feedback helps train our AI to provide better fixes!'))
const { provideFeedback } = await inquirer.prompt([{
type: 'confirm',
name: 'provideFeedback',
message: 'Would you like to rate the AI fixes? (takes 2-3 minutes)',
default: false
}])
if (!provideFeedback) {
console.log(chalk.gray('No problem! You can provide feedback anytime at: https://feedback.vaultace.com'))
return
}
// Collect feedback for each applied fix
console.log(chalk.bold('\nš Please rate each fix (1=Poor, 5=Excellent):'))
const feedbackData = []
for (let i = 0; i < Math.min(appliedFixes.length, 5); i++) {
const fix = appliedFixes[i]
console.log(chalk.bold(`\n${i + 1}. ${fix.vulnerability_type}`))
console.log(` File: ${path.basename(fix.file_path)}`)
console.log(` Severity: ${getSeverityColor(fix.severity)(fix.severity)}`)
const { rating } = await inquirer.prompt([{
type: 'list',
name: 'rating',
message: 'How would you rate this fix?',
choices: [
{ name: 'āāāāā Excellent - Perfect fix', value: 5 },
{ name: 'āāāā Good - Minor improvements needed', value: 4 },
{ name: 'āāā Average - Acceptable but could be better', value: 3 },
{ name: 'āā Below Average - Significant issues', value: 2 },
{ name: 'ā Poor - Incorrect or harmful fix', value: 1 }
]
}])
const { feedbackType } = await inquirer.prompt([{
type: 'list',
name: 'feedbackType',
message: 'What best describes this fix?',
choices: [
{ name: 'šÆ Perfect - exactly what I needed', value: 'perfect' },
{ name: 'š Helpful - mostly good with minor issues', value: 'helpful' },
{ name: 'ā ļø Incomplete - missed some important aspects', value: 'incomplete' },
{ name: 'ā Incorrect - doesn\'t solve the problem', value: 'incorrect' }
],
when: () => rating <= 3 // Only ask for type if rating is 3 or below
}])
const { comments } = await inquirer.prompt([{
type: 'input',
name: 'comments',
message: 'Any specific feedback? (optional)',
when: () => rating <= 3
}])
// Collect feedback data
const metadata = fix.ai_metadata || {}
feedbackData.push({
fix_session_id: fix.session_id || `cli_${Date.now()}`,
vulnerability_id: fix.vulnerability_id || fix.id,
rating: rating,
feedback_type: feedbackType || (rating >= 4 ? 'helpful' : 'incomplete'),
comments: comments || null,
vulnerability_type: fix.vulnerability_type,
language: metadata.language || detectLanguageFromFile(fix.file_path),
framework: metadata.framework || null,
complexity_level: getSeverityComplexity(fix.severity),
model_used: metadata.model_used || options.model || 'claude-sonnet-4',
prompt_version: metadata.prompt_version || 'cli_v1',
tokens_used: metadata.tokens_used || estimateTokens(fix.description),
fix_applied: true,
tests_passed: null, // Could be determined from test results
manual_changes_needed: false
})
}
// Submit feedback to API
const spinner = ora('Submitting feedback...').start()
try {
const config = ConfigManager.getConfig()
const apiBaseURL = config.api_url || 'https://api.vaultace.com'
const submitPromises = feedbackData.map(feedback =>
fetch(`${apiBaseURL}/api/v1/ai-learning/feedback`, {
method: 'POST',
headers: {
'Authorization': `Bearer ${config.api_key}`,
'Content-Type': 'application/json',
},
body: JSON.stringify(feedback)
})
)
await Promise.all(submitPromises)
spinner.succeed(chalk.green('š Thank you! Your feedback helps improve Vaultace AI'))
console.log(chalk.blue(' Your input will help train better security fixes for everyone!'))
} catch (error) {
spinner.fail('Failed to submit feedback')
console.log(chalk.yellow('You can submit feedback later at: https://feedback.vaultace.com'))
}
}
function detectLanguageFromFile(filePath) {
const ext = path.extname(filePath).toLowerCase()
const languageMap = {
'.js': 'javascript',
'.ts': 'typescript',
'.py': 'python',
'.java': 'java',
'.go': 'go',
'.rs': 'rust',
'.cpp': 'cpp',
'.c': 'c',
'.cs': 'csharp',
'.php': 'php',
'.rb': 'ruby'
}
return languageMap[ext] || 'unknown'
}
function getSeverityComplexity(severity) {
const complexityMap = {
'critical': 'complex',
'high': 'complex',
'medium': 'medium',
'low': 'simple',
'info': 'simple'
}
return complexityMap[severity] || 'medium'
}
function estimateTokens(text) {
// Rough token estimation: ~4 characters per token
return Math.ceil((text || '').length / 4)
}
function getSeverityColor(severity) {
const colors = {
critical: chalk.red,
high: chalk.yellow,
medium: chalk.blue,
low: chalk.gray,
info: chalk.gray
}
return colors[severity] || chalk.gray
}
module.exports = fixCommand