scai
Version:
> **A local-first AI CLI for understanding, querying, and iterating on large codebases.** > **100% local • No token costs • No cloud • No prompt injection • Private by design**
117 lines (112 loc) • 4.6 kB
JavaScript
import { logInputOutput } from '../utils/promptLogHelper.js';
import { generate } from '../lib/generate.js';
import { cleanupModule } from '../pipeline/modules/cleanupModule.js';
/**
* VALIDATE CHANGES STEP
* Validates executed changes against expectations and emits iteration reasoning.
*/
export const validateChangesStep = {
name: 'validateChangesStep',
description: 'Validates executed changes against expected changes and produces iteration reasoning.',
requires: ['analysis.executionOutcome', 'analysis.fileAnalysis'],
produces: ['analysis.executionOutcome', 'analysis.iterationReasoning'],
async run(context) {
context.analysis || (context.analysis = {});
const exec = context.analysis.executionOutcome ?? {
executedStepIds: [],
validations: [],
};
context.analysis.executionOutcome = exec;
if (!context.task?.currentStep?.filePath) {
console.warn('validateChangesStep: no currentStep or filePath — skipping validation');
return;
}
const targetFile = context.task.currentStep.filePath;
const fileAnalysis = context.analysis.fileAnalysis?.[targetFile];
if (!fileAnalysis)
return;
// 🔹 Normalize artifacts
const artifacts = context.execution?.codeTransformArtifacts?.files ?? [];
const artifact = artifacts.find(f => f.filePath === targetFile);
// 🔹 Reason about validation
let status = 'invalid';
let rationale = '';
let actualChange = '';
if (artifact) {
actualChange = artifact.notes ?? artifact.content ?? 'No changes detected';
status = 'valid';
rationale = 'Transformation artifact exists for target file.';
}
else {
actualChange = 'No transformation artifact produced';
status = 'invalid';
rationale = 'Expected changes were not executed or artifact missing.';
}
if (artifact && fileAnalysis.proposedChanges) {
try {
const prompt = `
You are validating a code transformation for a single file.
File path: ${targetFile}
Proposed changes:
${JSON.stringify(fileAnalysis.proposedChanges, null, 2)}
Actual transformed content:
${artifact.content ?? "[no content]"}
Task:
- Determine whether the proposed changes have been properly applied.
- Consider the proposed targets and rationale.
- Return STRICT JSON:
{
"status": "valid" | "partial" | "invalid",
"rationale": "Explain why the changes are valid/partial/invalid"
}
`.trim();
const ai = await generate({
query: context.initContext?.userQuery ?? `validate ${targetFile}`,
content: prompt,
});
let cleaned;
try {
cleaned = await cleanupModule.run({
query: context.initContext?.userQuery ?? `validate ${targetFile}`,
content: ai.data,
});
}
catch (cleanupErr) {
console.warn(`[validateChangesStep] cleanupModule failed for ${targetFile}`, cleanupErr);
cleaned = { data: ai.data, content: ai.data };
}
let responseData = undefined;
if (typeof cleaned.data === 'object' && cleaned.data !== null) {
responseData = cleaned.data;
}
else if (typeof cleaned.content === 'string') {
try {
responseData = JSON.parse(cleaned.content);
}
catch {
responseData = undefined;
}
}
if (responseData?.status)
status = responseData.status;
if (responseData?.rationale)
rationale = responseData.rationale;
}
catch (err) {
console.warn(`[validateChangesStep] LLM validation failed for ${targetFile}:`, err);
}
}
const validation = {
filePath: targetFile,
expectedChange: fileAnalysis.proposedChanges?.summary ?? 'No expected change defined',
actualChange,
status,
rationale,
requiresRedo: status !== 'valid',
};
exec.validations = [validation];
logInputOutput('validateChangesStep', 'output', {
executionOutcome: exec,
});
},
};