UNPKG

codedetective

Version:

AI-powered tool to analyze codebases, reconstruct development timelines, and suggest where to resume work

297 lines (286 loc) 11.4 kB
import fetch from 'node-fetch'; import { Document, Packer, Paragraph, TextRun, HeadingLevel } from 'docx'; import fs from 'fs'; /** * Format the repository data for the AI */ function formatRepositoryData(repoData) { // Simplified file tree function buildTree(dir, depth = 0, maxDepth = 3, prefix = '') { if (depth > maxDepth) return ''; let result = ''; // Add files for (const file of dir.files) { result += `${prefix}├── ${file.name} (${file.type}, ${formatSize(file.size)})\n`; } // Add subdirectories for (let i = 0; i < dir.subdirectories.length; i++) { const subdir = dir.subdirectories[i]; const isLast = i === dir.subdirectories.length - 1; const newPrefix = prefix + (isLast ? ' ' : '│ '); result += `${prefix}${isLast ? '└── ' : '├── '}${subdir.name}/\n`; result += buildTree(subdir, depth + 1, maxDepth, newPrefix); } return result; } // Format file size function formatSize(bytes) { const units = ['B', 'KB', 'MB', 'GB']; let size = bytes; let unitIndex = 0; while (size >= 1024 && unitIndex < units.length - 1) { size /= 1024; unitIndex++; } return `${size.toFixed(1)} ${units[unitIndex]}`; } // Format file type distribution function formatFileTypeDistribution(distribution) { return Object.entries(distribution) .sort((a, b) => b[1] - a[1]) .map(([type, count]) => `- ${type}: ${count} files`) .join('\n'); } // Format file category distribution function formatFileCategoryDistribution(distribution) { return Object.entries(distribution) .sort((a, b) => b[1] - a[1]) .map(([category, count]) => `- ${category}: ${count} files`) .join('\n'); } // Format recently modified files function formatRecentlyModifiedFiles(files) { return files .map(file => `- ${file.path} (${new Date(file.lastModified).toISOString().split('T')[0]})`) .join('\n'); } // Create the formatted output return ` # Repository Analysis: ${repoData.name} ## Overview - Total Files: ${repoData.totalFiles} - Total Size: ${formatSize(repoData.totalSize)} - Repository Path: ${repoData.rootPath} ## File Type Distribution ${formatFileTypeDistribution(repoData.statistics.fileTypeDistribution)} ## File Category Distribution ${formatFileCategoryDistribution(repoData.statistics.fileCategoryDistribution)} ## Recently Modified Files ${formatRecentlyModifiedFiles(repoData.statistics.recentlyModifiedFiles)} ## Directory Structure ${buildTree(repoData.structure)} `.trim(); } /** * Generate a DOCX document from the provided content */ async function generateDocx(content, outputPath) { // Create paragraphs array const paragraphs = []; // Convert markdown headings to DOCX headings // Split content by lines const lines = content.split('\n'); for (let i = 0; i < lines.length; i++) { const line = lines[i]; // Handle headings if (line.startsWith('# ')) { paragraphs.push(new Paragraph({ children: [ new TextRun({ text: line.replace('# ', ''), size: 40 }) ], heading: HeadingLevel.HEADING_1 })); } else if (line.startsWith('## ')) { paragraphs.push(new Paragraph({ children: [ new TextRun({ text: line.replace('## ', ''), size: 32 }) ], heading: HeadingLevel.HEADING_2 })); } else if (line.startsWith('### ')) { paragraphs.push(new Paragraph({ children: [ new TextRun({ text: line.replace('### ', ''), size: 28 }) ], heading: HeadingLevel.HEADING_3 })); } else if (line.startsWith('- ')) { // Handle bullet points paragraphs.push(new Paragraph({ children: [ new TextRun({ text: line.replace('- ', '') }) ], bullet: { level: 0 } })); } else if (line.startsWith('```')) { // Handle code blocks - skip the opening and closing markers if (line.length > 3) continue; // Skip opening with language // Collect code block content let codeBlock = ''; i++; while (i < lines.length && !lines[i].startsWith('```')) { codeBlock += lines[i] + '\n'; i++; } // Add code block paragraphs.push(new Paragraph({ children: [ new TextRun({ text: codeBlock, font: "Courier New" }) ] })); } else if (line.trim() !== '') { // Regular paragraph (skip empty lines) paragraphs.push(new Paragraph({ children: [ new TextRun({ text: line }) ] })); } } // Create document with paragraphs const doc = new Document({ sections: [{ children: paragraphs }] }); // Save the document const buffer = await Packer.toBuffer(doc); fs.writeFileSync(outputPath, buffer); } /** * Generate a project report using OpenAI's API */ export async function generateReport(options) { const { repoData, apiKey, model = 'gpt-4-turbo', format = 'docx', outputPath } = options; // Format repository data const formattedData = formatRepositoryData(repoData); // Create system message const systemMessage = `You are a meticulous code detective tasked with analyzing repository data to reconstruct the developer's timeline and suggest where they should resume work. Your specialty is identifying work patterns, dependent files, and logical next steps based on file modification timestamps.`; // Create user message const userMessage = ` # Code Detective: Reconstruct My Development Timeline & Tell Me Where to Resume ## Context I've generated a markdown inventory of all my code files using a script that captures filenames, file types, and most importantly, the timestamp when each file was last modified. I'd like you to analyze this data to help me understand my past work and determine exactly where I should pick up. ## Your Task Based on the script inventory data I'm sharing, please: 1. Create a chronological timeline of my coding activity 2. Identify the most recently modified files as my "last work session" 3. Clearly specify the exact filenames and paths I should focus on next 4. Determine any potential dependencies or prerequisites for these files 5. Suggest the most logical "next steps" based on my previous work patterns 6. Highlight any abandoned or incomplete work threads I might have forgotten ## How to Analyze - Group files with similar timestamps (within hours of each other) - Identify the most recent clusters of activity across different directories - Look for incomplete sequences or patterns that suggest unfinished work - Analyze file names, paths, and project structure to determine logical workflows - Check for files that appear to be in progress based on their modification pattern - Examine the context around the most recently modified files Please provide a section titled "RESUME HERE" that gives me specific, actionable guidance with: 1. The exact filenames I should open first 2. What these files were likely being used for 3. Which related files I might need to reference 4. Specific suggestions on what aspects of these files I was likely working on 5. A clear "next task" recommendation based on the timeline evidence When suggesting where I should resume work, be as specific as possible with exact filenames, paths, and concrete tasks rather than general directions. Repository data: ${formattedData} Generate a comprehensive ${format === 'json' ? 'JSON' : 'Markdown'} format report following the instructions above. `.trim(); try { // Call OpenAI's responses API const response = await fetch('https://api.openai.com/v1/responses', { method: 'POST', headers: { 'Content-Type': 'application/json', 'Authorization': `Bearer ${apiKey}`, 'Accept': 'application/json' }, body: JSON.stringify({ model, input: [ { "role": "system", "content": [ { "type": "input_text", "text": systemMessage } ] }, { "role": "user", "content": [ { "type": "input_text", "text": userMessage } ] } ], max_output_tokens: 4000, temperature: 0.2, metadata: { domain: "code_analysis", user_id: "", user_region: "US", request_id: `codedetective-${Date.now()}` } }) }); if (!response.ok) { const errorData = await response.json(); throw new Error(`OpenAI API error: ${errorData.error?.message || response.statusText}`); } const data = await response.json(); // Extract response from the OpenAI API format based on new API structure if (!data.output || !data.output.length) { throw new Error('No content in OpenAI response'); } let content = ''; // Process based on the new API response structure if (data.output && Array.isArray(data.output)) { for (const outputItem of data.output) { if (outputItem.role === 'assistant' && outputItem.content && Array.isArray(outputItem.content)) { for (const contentItem of outputItem.content) { if (contentItem.type === 'output_text' && contentItem.text) { content += contentItem.text; } } } } } if (!content) { throw new Error('Unable to extract content from OpenAI response'); } // If DOCX format is requested, generate a DOCX file if (format === 'docx' && outputPath) { await generateDocx(content, outputPath); return `Report saved as DOCX: ${outputPath}`; } return content; } catch (error) { throw new Error(`Error generating report: ${error.message}`); } } //# sourceMappingURL=generator.js.map