claude-git-hooks
Version:
Git hooks with Claude CLI for code analysis and automatic commit messages
495 lines (419 loc) • 18.7 kB
JavaScript
/**
* File: pre-commit.js
* Purpose: Pre-commit hook for code quality analysis with Claude CLI
*
* Flow:
* 1. Get staged files (filtered by extension and size)
* 2. Build analysis prompt with file diffs
* 3. Send to Claude CLI for analysis
* 4. Parse JSON response
* 5. Display structured analysis results
* 6. Generate resolution prompt if issues found
* 7. Block commit if quality gate fails
*
* Dependencies:
* - All utils modules
*/
import {
getStagedFiles,
getFileDiff,
getFileContentFromStaging,
isNewFile,
getRepoName,
getCurrentBranch
} from '../utils/git-operations.js';
import {
filterFiles,
} from '../utils/file-operations.js';
import { analyzeCode, analyzeCodeParallel, chunkArray } from '../utils/claude-client.js';
import { buildAnalysisPrompt } from '../utils/prompt-builder.js';
import {
generateResolutionPrompt,
shouldGeneratePrompt
} from '../utils/resolution-prompt.js';
import { loadPreset } from '../utils/preset-loader.js';
import { getVersion, calculateBatches } from '../utils/package-info.js';
import logger from '../utils/logger.js';
import { getConfig } from '../config.js';
/**
* Configuration loaded from lib/config.js
* Override via .claude/config.json
*
* TOKEN USAGE ANALYSIS (200k context window):
* - 1 token ≈ 4 chars, 1KB ≈ 256 tokens
* - Overhead: template(512) + guidelines(512) + metadata(50) ≈ 1,074 tokens
* - 10 modified files (diffs): ~11,314 tokens (5.6%) ✅
* - 10 new files (100KB each): ~257,074 tokens (128%) ⚠️ EXCEEDS
* - With subagents (batch=3): ~26,265 tokens per batch (13%) ✅
*/
/**
* Displays structured analysis results
* Why: Provides clear, visual feedback about code quality
*
* @param {Object} result - Analysis result from Claude
*/
const displayResults = (result) => {
console.log();
console.log('╔════════════════════════════════════════════════════════════════════╗');
console.log('║ CODE QUALITY ANALYSIS ║');
console.log('╚════════════════════════════════════════════════════════════════════╝');
console.log();
// Quality Gate Status
const qualityGate = result.QUALITY_GATE || 'UNKNOWN';
if (qualityGate === 'PASSED') {
logger.success('Quality Gate: PASSED');
} else {
logger.error('pre-commit - displayResults', 'Quality Gate: FAILED');
}
console.log();
// Issues Summary - Simple count
if (Array.isArray(result.details) && result.details.length > 0) {
const fileCount = new Set(result.details.map(i => i.file)).size;
console.log(`📊 ${result.details.length} issue(s) found across ${fileCount} file(s)`);
} else {
console.log('✅ No issues found!');
}
console.log();
// Issues Breakdown (severity counts)
if (result.issues && typeof result.issues === 'object') {
console.log('📋 ISSUES SUMMARY');
const { blocker = 0, critical = 0, major = 0, minor = 0, info = 0 } = result.issues;
const total = blocker + critical + major + minor + info;
console.log(`Total: ${total} issues found`);
if (blocker > 0) console.log(` 🔴 Blocker: ${blocker}`);
if (critical > 0) console.log(` 🟠 Critical: ${critical}`);
if (major > 0) console.log(` 🟡 Major: ${major}`);
if (minor > 0) console.log(` 🔵 Minor: ${minor}`);
if (info > 0) console.log(` ⚪ Info: ${info}`);
console.log();
}
// Detailed Issues
if (Array.isArray(result.details) && result.details.length > 0) {
console.log('🔍 DETAILED ISSUES');
result.details.forEach(detail => {
console.log(`[${detail.severity}] ${detail.type} in ${detail.file}:${detail.line || '?'}`);
console.log(` ${detail.message}`);
console.log();
});
}
// Security Hotspots
if (result.securityHotspots && result.securityHotspots > 0) {
console.log(`🔥 SECURITY HOTSPOTS: ${result.securityHotspots} found`);
console.log(' Review security-sensitive code carefully');
console.log();
}
};
/**
* Consolidates multiple analysis results into one
* @param {Array<Object>} results - Array of analysis results
* @returns {Object} - Consolidated result
*/
const consolidateResults = (results) => {
const consolidated = {
QUALITY_GATE: 'PASSED',
approved: true,
score: 10,
metrics: { reliability: 'A', security: 'A', maintainability: 'A', coverage: 100, duplications: 0, complexity: 0 },
issues: { blocker: 0, critical: 0, major: 0, minor: 0, info: 0 },
details: [],
blockingIssues: [],
securityHotspots: 0
};
for (const result of results) {
// Worst-case quality gate
if (result.QUALITY_GATE === 'FAILED') consolidated.QUALITY_GATE = 'FAILED';
if (result.approved === false) consolidated.approved = false;
if (result.score < consolidated.score) consolidated.score = result.score;
// Worst-case metrics
if (result.metrics) {
const metricOrder = { 'A': 5, 'B': 4, 'C': 3, 'D': 2, 'E': 1 };
['reliability', 'security', 'maintainability'].forEach(m => {
const current = metricOrder[consolidated.metrics[m]] || 5;
const incoming = metricOrder[result.metrics[m]] || 5;
if (incoming < current) consolidated.metrics[m] = result.metrics[m];
});
if (result.metrics.coverage !== undefined) consolidated.metrics.coverage = Math.min(consolidated.metrics.coverage, result.metrics.coverage);
if (result.metrics.duplications !== undefined) consolidated.metrics.duplications = Math.max(consolidated.metrics.duplications, result.metrics.duplications);
if (result.metrics.complexity !== undefined) consolidated.metrics.complexity = Math.max(consolidated.metrics.complexity, result.metrics.complexity);
}
// Sum issue counts
if (result.issues) {
Object.keys(consolidated.issues).forEach(s => consolidated.issues[s] += (result.issues[s] || 0));
}
// Merge arrays
if (Array.isArray(result.details)) consolidated.details.push(...result.details);
if (Array.isArray(result.blockingIssues)) consolidated.blockingIssues.push(...result.blockingIssues);
if (result.securityHotspots) consolidated.securityHotspots += result.securityHotspots;
}
return consolidated;
};
/**
* Main pre-commit hook execution
*/
const main = async () => {
const startTime = Date.now();
try {
// Load configuration
const config = await getConfig();
// Enable debug mode from config
if (config.system.debug) {
logger.setDebugMode(true);
}
// Display configuration info
const version = await getVersion();
console.log(`\n🤖 claude-git-hooks v${version}`);
logger.info('Starting code quality analysis...');
// DEBUG: Log working directories
const repoRoot = await import('../utils/git-operations.js').then(m => m.getRepoRoot());
const { getRepoRoot } = await import('../utils/git-operations.js');
// Normalize paths for comparison (handle Windows backslash vs forward slash)
const normalizePath = (p) => p.replace(/\\/g, '/').toLowerCase();
const cwdNormalized = normalizePath(process.cwd());
const repoRootNormalized = normalizePath(repoRoot);
logger.debug(
'pre-commit - main',
'Working directory info',
{
'process.cwd()': process.cwd(),
'repo root': repoRoot,
'cwd (normalized)': cwdNormalized,
'repo root (normalized)': repoRootNormalized,
'match': cwdNormalized === repoRootNormalized
}
);
logger.debug(
'pre-commit - main',
'Configuration',
{ ...config }
);
// Load active preset
const presetName = config.preset || 'default';
const { metadata } = await loadPreset(presetName);
logger.info(`🎯 Analyzing with '${metadata.displayName}' preset`);
logger.debug(
'pre-commit - main',
'Preset loaded',
{
preset: presetName,
fileExtensions: metadata.fileExtensions,
techStack: metadata.techStack
}
);
// Use preset's file extensions
const allowedExtensions = metadata.fileExtensions;
// Step 1: Get staged files with preset extensions
logger.debug('pre-commit - main', 'Getting staged files');
const stagedFiles = getStagedFiles({
extensions: allowedExtensions
});
if (stagedFiles.length === 0) {
logger.info('No files to review');
process.exit(0);
}
logger.info(`Files to review: ${stagedFiles.length}`);
logger.debug(
'pre-commit - main',
'Files matched by preset',
{ count: stagedFiles.length, extensions: allowedExtensions }
);
// Step 2: Filter files by size
logger.debug('pre-commit - main', 'Filtering files by size');
const filteredFiles = await filterFiles(stagedFiles, {
maxSize: config.analysis.maxFileSize,
extensions: allowedExtensions
});
const validFiles = filteredFiles.filter(f => f.valid);
const invalidFiles = filteredFiles.filter(f => !f.valid);
// Show user-facing warnings for rejected files
if (invalidFiles.length > 0) {
invalidFiles.forEach(file => {
logger.warning(`Skipping ${file.path}: ${file.reason}`);
});
}
if (validFiles.length === 0) {
logger.warning('No valid files found to review');
process.exit(0);
}
if (validFiles.length > config.analysis.maxFiles) {
logger.warning(`Too many files to review (${validFiles.length})`);
logger.warning('Consider splitting the commit into smaller parts');
process.exit(0);
}
logger.info("test log");
// Step 3: Build file data for prompt
logger.debug('pre-commit - main', 'Building file data for analysis');
const fileDataPromises = validFiles.map(async ({ path: filePath }) => {
try {
// Get diff
let diff = getFileDiff(filePath);
// Check if new file
const isNew = isNewFile(filePath);
// Get full content for new files
let content = null;
if (isNew) {
content = await getFileContentFromStaging(filePath);
}
return {
path: filePath,
diff,
content,
isNew
};
} catch (error) {
logger.error(
'pre-commit - main',
`Failed to process file: ${filePath}`,
error
);
// Return minimal data on error
return {
path: filePath,
diff: `Error getting diff: ${error.message}`,
content: null,
isNew: false
};
}
});
const filesData = await Promise.all(fileDataPromises);
// Step 4: Build analysis prompt
logger.info(`Sending ${filesData.length} files for review...`);
// Display subagent configuration
const subagentsEnabled = config.subagents?.enabled || false;
const subagentModel = config.subagents?.model || 'haiku';
const batchSize = config.subagents?.batchSize || 3;
if (subagentsEnabled && filesData.length >= 3) {
const { numBatches, shouldShowBatches } = calculateBatches(filesData.length, batchSize);
console.log(`⚡ Batch optimization: ${subagentModel} model, ${batchSize} files per batch`);
if (shouldShowBatches) {
console.log(`📊 Analyzing ${filesData.length} files in ${numBatches} batch${numBatches > 1 ? 'es' : ''}`);
}
}
// Step 5: Analyze with Claude (parallel or single)
let result;
// TODO: This can be refactored so no conditional is needed.
// Lists can have 0...N items, e.g. iterating a list of 1 element is akin to single execution.
if (subagentsEnabled && filesData.length >= 3) {
// Parallel execution: split files into batches
logger.info(`Using parallel execution with batch size ${batchSize}`);
const fileBatches = chunkArray(filesData, batchSize);
logger.debug('pre-commit - main', `Split into ${fileBatches.length} batches`);
// Build one prompt per batch
const prompts = await Promise.all(
fileBatches.map(async (batch) => {
return await buildAnalysisPrompt({
templateName: config.templates.analysis,
guidelinesName: config.templates.guidelines,
files: batch,
metadata: {
REPO_NAME: getRepoName(),
BRANCH_NAME: getCurrentBranch()
},
subagentConfig: null // Don't add subagent instruction for parallel
});
})
);
// Build telemetry context
const telemetryContext = {
fileCount: filesData.length,
batchSize: batchSize,
model: subagentModel,
hook: 'pre-commit'
};
// Execute in parallel
const results = await analyzeCodeParallel(prompts, {
timeout: config.analysis.timeout,
saveDebug: false, // Don't save debug for individual batches
telemetryContext
});
// Simple consolidation: merge all results
result = consolidateResults(results);
// Save consolidated debug if enabled
if (config.system.debug) {
const { saveDebugResponse } = await import('../utils/claude-client.js');
await saveDebugResponse(
`PARALLEL ANALYSIS: ${fileBatches.length} batches`,
JSON.stringify(result, null, 2)
);
}
} else {
// Single execution: original behavior
logger.debug('pre-commit - main', 'Building analysis prompt');
const prompt = await buildAnalysisPrompt({
templateName: config.templates.analysis,
guidelinesName: config.templates.guidelines,
files: filesData,
metadata: {
REPO_NAME: getRepoName(),
BRANCH_NAME: getCurrentBranch()
},
subagentConfig: config.subagents
});
logger.debug(
'pre-commit - main',
'Sending prompt to Claude',
{ promptLength: prompt.length }
);
// Build telemetry context for single execution
const telemetryContext = {
fileCount: filesData.length,
batchSize: filesData.length, // Single batch = all files
totalBatches: 1,
model: config.subagents?.model || 'haiku',
hook: 'pre-commit'
};
result = await analyzeCode(prompt, {
timeout: config.analysis.timeout,
saveDebug: config.system.debug,
telemetryContext
});
}
// Step 6: Display results
displayResults(result);
// Step 7: Check quality gate
const qualityGatePassed = result.QUALITY_GATE === 'PASSED';
const approved = result.approved !== false;
if (!qualityGatePassed || !approved) {
const duration = ((Date.now() - startTime) / 1000).toFixed(2);
console.log(`\n⏱️ Analysis time: ${duration}s\n`);
logger.error('pre-commit - main', 'Commit blocked due to quality gate failure');
// Show blocking issues
if (Array.isArray(result.blockingIssues) && result.blockingIssues.length > 0) {
console.log('=== CRITICAL ISSUES ===');
result.blockingIssues.forEach(issue => {
console.log(`- ${issue.description || 'Unknown issue'}`);
});
console.log();
}
// Generate resolution prompt if needed
if (shouldGeneratePrompt(result)) {
const resolutionPath = await generateResolutionPrompt(result, {
fileCount: filesData.length
});
console.log('═══ AI RESOLUTION PROMPT GENERATED ═══');
logger.info(`An AI-friendly prompt has been generated at: ${resolutionPath}`);
logger.warning('Copy this file to a new Claude instance to resolve problems automatically.');
console.log();
}
process.exit(1);
}
// Success
const duration = ((Date.now() - startTime) / 1000).toFixed(2);
console.log(`\n⏱️ Analysis time: ${duration}s`);
logger.success('Code analysis completed. Quality gate passed.');
process.exit(0);
} catch (error) {
const duration = ((Date.now() - startTime) / 1000).toFixed(2);
console.log(`\n⏱️ Analysis time: ${duration}s`);
logger.error('pre-commit - main', 'Pre-commit hook failed', error);
console.error('\nError executing Claude CLI');
console.error('Check that Claude CLI is configured correctly');
if (error.output) {
console.error('\nClaude CLI output:');
console.error(error.output);
}
process.exit(1);
}
};
// Execute main
main();