UNPKG

scai

Version:

> **AI-powered CLI for local code analysis, commit message suggestions, and natural-language queries.** 100% local, private, GDPR-friendly, made in Denmark/EU with ❤️.

53 lines (49 loc) 1.93 kB
import { generate } from "../../lib/generate.js"; import { logInputOutput } from "../../utils/promptLogHelper.js"; export const contextReviewModule = { name: "contextReview", description: "Analyzes summaries and global context to decide if reading full files is needed before answering.", groups: ["analysis"], run: async (input) => { const query = input.query; // Convert content to a text blob for the model const contextText = typeof input.content === "string" ? input.content : JSON.stringify(input.content ?? "", null, 2); const promptText = ` You are an AI reasoning agent reviewing file summaries and contextual notes for a software repository. User query: ${query} Context: ${contextText} Your task: 1. Summarize what this context reveals about the codebase. 2. Identify what is still missing or unclear. 3. Decide if reading full file contents is required ("needFullRead") or if current summaries are enough ("enoughForAnswer"). Respond concisely but analytically. `.trim(); // ✅ Call generate() with ModuleIO-compliant input const aiResponse = await generate({ query, content: promptText, }); // ✅ Model output is in aiResponse.data const modelText = typeof aiResponse.data === "string" ? aiResponse.data.trim() : JSON.stringify(aiResponse.data); const lower = modelText.toLowerCase(); const decision = lower.includes("needfullread") || lower.includes("read full") ? "needFullRead" : "enoughForAnswer"; // ✅ Return fully ModuleIO-compliant output const output = { query, data: { decision, understanding: modelText, }, }; logInputOutput("contextReview", "output", output.data); return output; }, };