UNPKG

agentsqripts

Version:

Comprehensive static code analysis toolkit for identifying technical debt, security vulnerabilities, performance issues, and code quality problems

192 lines (170 loc) 7.73 kB
/** * @file Single Responsibility Principle file scorer with context-aware violation assessment * @description Single responsibility: Provide intelligent SRP scoring with architectural context awareness * * This scoring module analyzes individual files for Single Responsibility Principle violations * using context-aware thresholds, AST-based analysis, and intelligent architectural assessment. * It distinguishes between legitimate multi-function modules and genuine architectural issues * requiring refactoring attention through nuanced scoring algorithms. * * Design rationale: * - Context-aware scoring prevents false positives on utility modules and CLI tools * - AST-based analysis provides accurate function and export counting beyond regex patterns * - Multiple violation indicators create comprehensive assessment beyond simple metrics * - Threshold customization enables appropriate assessment for different file types * - Conservative scoring approach minimizes unnecessary refactoring recommendations * * Scoring methodology: * - File context detection determines appropriate thresholds (CLI, library, test, config) * - Multiple metrics combine to provide holistic SRP assessment * - Violation severity adjusts based on file purpose and architectural role * - Keyword clustering analysis identifies responsibility distribution patterns * - Balanced approach weighs mathematical metrics against architectural wisdom */ const { DEFAULT_KEYWORD_CLUSTERS } = require('../../config/localVars'); const { stripStringContent, stripComments, getFileType } = require('./utils/stringStripper'); const { countRealExports, countRealFunctions } = require('./utils/astAnalyzer'); const { getThresholds, exceedsThreshold } = require('./utils/thresholdManager'); const { detectFileContext, getContextualThresholds, getContextualSeverityAdjustment } = require('./contextualThresholdProvider'); /** * Score a file for SRP violations based on multiple indicators * @param {string} content - File content to analyze * @param {string} filePath - File path for context * @param {Array<string[]>} keywordClusters - Clusters of related keywords * @returns {Object} Analysis results with score and details */ function scoreFileForSRP(content, filePath = '', keywordClusters = DEFAULT_KEYWORD_CLUSTERS) { let score = 0; const violations = []; const details = {}; const { getClusterType } = require('./clusterTypeProvider'); const { getSeverityLevel } = require('./severityLevelProvider'); // Determine file type and context for appropriate thresholds const fileType = getFileType(filePath); const fileContext = detectFileContext(filePath, content); const thresholds = getContextualThresholds(fileContext); details.fileType = fileType; details.fileContext = fileContext; // Strip strings and comments for more accurate analysis const strippedContent = stripStringContent(content); const codeContent = stripComments(strippedContent); const lines = content.split('\n'); const lineCount = lines.length; // Use AST-based counting for exports (more accurate) const exportCount = countRealExports(strippedContent); if (exportCount > 1) { score += 2; violations.push(`Multiple exports detected (${exportCount})`); } if (exceedsThreshold(exportCount, thresholds.exports)) { score += 2; violations.push(`Excessive exports (${exportCount} exports)`); } details.exportCount = exportCount; // Check file length with context-aware penalties const linePenalty = thresholds.linePenalty || 1.0; if (lineCount > thresholds.lines) { const penalty = Math.round(2 * linePenalty); if (penalty > 0) { score += penalty; violations.push(`Long ${fileContext} file (${lineCount} lines)`); } } if (lineCount > thresholds.lines * 1.5) { const penalty = Math.round(2 * linePenalty); if (penalty > 0) { score += penalty; violations.push(`Very long ${fileContext} file (${lineCount} lines)`); } } details.lineCount = lineCount; // Check for mixed keyword clusters with smarter detection const foundClusters = []; keywordClusters.forEach((cluster, index) => { const foundKeywords = cluster.filter(keyword => { // Check if keyword appears in actual code (not strings/comments) return isKeywordInCode(codeContent, keyword); }); if (foundKeywords.length >= 1) { foundClusters.push({ index, keywords: foundKeywords, clusterType: getClusterType(cluster) }); } }); if (foundClusters.length >= thresholds.concerns) { score += foundClusters.length; violations.push(`Mixed concerns detected (${foundClusters.length} different responsibility areas)`); } details.foundClusters = foundClusters; // Use AST-based counting for functions with context-aware thresholds const functionCount = countRealFunctions(strippedContent); // Context-aware function penalties if (functionCount > thresholds.functions) { const excess = functionCount - thresholds.functions; if (fileContext === 'cli' || fileContext === 'demo') { score += Math.min(excess, 2); // Lenient for CLI and demo files violations.push(`Multiple functions in ${fileContext} (${functionCount} functions - acceptable for ${fileContext} context)`); } else if (fileContext === 'test') { // Very lenient for test files if (functionCount > thresholds.functions * 2) { score += 1; violations.push(`Many test functions (${functionCount} functions - consider grouping related tests)`); } } else { score += Math.min(excess * 2, 6); // Standard penalty for library/general files violations.push(`Multiple functions affect AI efficiency (${functionCount} functions - consider splitting)`); } } // Additional penalty for excessive functions in non-test contexts if (functionCount > thresholds.functions * 2 && fileContext !== 'test' && fileContext !== 'demo') { score += 2; violations.push(`Excessive functions impact AI token efficiency (${functionCount} functions - high refactoring cost)`); } details.functionCount = functionCount; // Check for excessive class methods (using stripped content) const methodMatches = codeContent.match(/^\s*\w+\s*\([^)]*\)\s*{/gm) || []; const methodCount = methodMatches.length; if (exceedsThreshold(methodCount, thresholds.methods)) { score += 1; violations.push(`Many methods (${methodCount} methods)`); } details.methodCount = methodCount; // Add file type context to details details.responsibilityAreas = foundClusters.map(c => c.clusterType); // Apply contextual severity adjustment const adjustedScore = getContextualSeverityAdjustment(fileContext, score); return { score: adjustedScore, rawScore: score, violations, details, severity: getSeverityLevel(adjustedScore), fileType, fileContext }; } /** * Check if keyword is in actual code, not comments or strings * @param {string} content - Code content (already stripped of strings/comments) * @param {string} keyword - Keyword to search for * @returns {boolean} True if keyword is found in code */ function isKeywordInCode(content, keyword) { // Use word boundaries for more accurate matching const wordBoundaryPattern = new RegExp(`\\b${escapeRegExp(keyword)}\\b`, 'i'); return wordBoundaryPattern.test(content); } /** * Escape special regex characters * @param {string} string - String to escape * @returns {string} Escaped string */ function escapeRegExp(string) { return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); } module.exports = { scoreFileForSRP, DEFAULT_KEYWORD_CLUSTERS };