prompt-validation
Version:
Validate and standardize AI prompt outputs, detect hallucinations, enforce patterns, and auto-correct issues.
420 lines (407 loc) • 15.4 kB
JavaScript
// src/cli.ts
import { Command } from "commander";
// src/validator.ts
import fs from "fs/promises";
// src/patterns.ts
import Ajv from "ajv";
var ajv = new Ajv();
function validateWithRules(text, rules) {
const errors = [];
for (const rule of rules) {
switch (rule) {
case "email":
if (!/\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,}\b/i.test(text)) {
errors.push("Text does not contain a valid email address.");
}
break;
case "json-schema":
try {
const json = JSON.parse(text);
const validate = ajv.compile({ type: "object" });
if (!validate(json)) {
errors.push("JSON does not match schema.");
}
} catch {
errors.push("Invalid JSON format.");
}
break;
case "length-limit":
if (text.length > 500) {
errors.push("Text exceeds length limit (500 chars).");
}
break;
case "no-hallucination":
if (/unicorn/i.test(text)) {
errors.push("Hallucination detected: contains 'unicorn'.");
}
break;
default:
errors.push(`Unknown rule: ${rule}`);
}
}
return errors;
}
// src/autocorrect.ts
function autoCorrect(text) {
let corrected = text;
let isValidJson = false;
if (/^\s*{/.test(text) || /^\s*\[/.test(text)) {
try {
corrected = JSON.stringify(JSON.parse(text), null, 2);
isValidJson = true;
} catch {
corrected = text.replace(/,\s*}/g, "}").replace(/,\s*]/g, "]");
}
}
if (!isValidJson) {
if (corrected.length > 0) {
corrected = corrected.charAt(0).toUpperCase() + corrected.slice(1);
}
if (!/[.?!]$/.test(corrected)) {
corrected += ".";
}
}
return corrected;
}
// src/validator.ts
async function validateFile(file, options) {
const raw = await fs.readFile(file, "utf-8");
let errors = [];
if (options.rules) {
errors = validateWithRules(raw, options.rules);
}
let fixedContent = null;
if (options.fix && errors.length > 0) {
fixedContent = autoCorrect(raw);
await fs.writeFile(file + ".fixed", fixedContent, "utf-8");
}
return {
valid: errors.length === 0,
errors,
fixed: fixedContent !== null
};
}
// src/cli.ts
import chalk from "chalk";
import fs2 from "fs/promises";
import path from "path";
// src/promptImprover.ts
async function improvePrompt(input, options = {}) {
const targetFormat = options.targetFormat ?? "text";
const temperature = options.temperature ?? 0.2;
if (options.llm === "openai" && process.env.OPENAI_API_KEY) {
const model = options.model ?? "gpt-4o-mini";
const improved2 = await improveWithOpenAI(input, { model, temperature, targetFormat });
if (improved2)
return { improved: improved2, meta: { used: "openai", model } };
}
if (options.llm === "gemini" && process.env.GEMINI_API_KEY) {
const model = options.model ?? "gemini-1.5-pro";
const improved2 = await improveWithGemini(input, { model, temperature, targetFormat });
if (improved2)
return { improved: improved2, meta: { used: "gemini", model } };
}
const improved = locallyStructurePrompt(input, targetFormat);
return { improved, meta: { used: "local" } };
}
function locallyStructurePrompt(input, targetFormat) {
const formatTip = targetFormat === "json" ? `
Output Format:
- JSON object only, no prose.
- Include only the required keys. No additional keys.
- Use lowercase snake_case keys.
Example:
{
"summary": "...",
"steps": ["...", "..."],
"constraints": ["..."],
"final_answer": "..."
}` : targetFormat === "markdown" ? `
Output Format:
- Markdown only.
- Use headings, bullet lists, and code blocks where helpful.
- End with a short "Summary" section.` : `
Output Format:
- Clear, concise textual answer.
- Use numbered steps where applicable.
- End with a one-line TL;DR.`;
return `You are an expert assistant.
Objective:
- Rewrite the user's prompt into a high-quality, unambiguous instruction that maximizes answer quality.
- Preserve the original intent exactly.
User Intent:
"""
${input.trim()}
"""
Requirements:
- Disambiguate vague terms (timeframes, data ranges, success criteria).
- Specify constraints: time, tools, data sources, formatting, tone.
- Add step-by-step guidance (decompose complex tasks).
- Include evaluation criteria (what makes a good answer).
- Add edge cases and non-goals to avoid scope creep.
- Keep it as short as possible while precise.
${formatTip}
Deliverable:
- Return ONLY the improved prompt text.
- Do not include explanations or preambles.
`;
}
async function improveWithOpenAI(input, params) {
try {
const fetchFn = globalThis.fetch;
const res = await fetchFn("https://api.openai.com/v1/chat/completions", {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${process.env.OPENAI_API_KEY}`
},
body: JSON.stringify({
model: params.model,
temperature: params.temperature,
messages: [
{ role: "system", content: "You rewrite prompts to be clearer, more specific, and better structured." },
{ role: "user", content: locallyStructurePrompt(input, params.targetFormat) }
]
})
});
if (!res.ok)
return null;
const data = await res.json();
return data?.choices?.[0]?.message?.content?.trim() ?? null;
} catch {
return null;
}
}
async function improveWithGemini(input, params) {
try {
const fetchFn = globalThis.fetch;
const url = `https://generativelanguage.googleapis.com/v1beta/models/${params.model}:generateContent?key=${process.env.GEMINI_API_KEY}`;
const res = await fetchFn(url, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
contents: [
{
role: "user",
parts: [{ text: locallyStructurePrompt(input, params.targetFormat) }]
}
],
generationConfig: { temperature: params.temperature }
})
});
if (!res.ok)
return null;
const data = await res.json();
const text = data?.candidates?.[0]?.content?.parts?.[0]?.text ?? null;
return typeof text === "string" ? text.trim() : null;
} catch {
return null;
}
}
// src/cli.ts
import boxen from "boxen";
import gradient from "gradient-string";
import ora from "ora";
import inquirer from "inquirer";
import figlet from "figlet";
import clipboardy from "clipboardy";
var program = new Command();
function showBanner() {
const ascii = figlet.textSync("Prompt\nValidation", { horizontalLayout: "fitted" });
const title = gradient.atlas.multiline(ascii);
const msg = boxen(
`${title}
${chalk.dim("\u{1F50E} Validate \u2022 \u2728 Improve \u2022 \u{1F680} Ship better prompts")}`,
{ padding: 1, borderColor: "cyan", borderStyle: "round" }
);
console.log(msg);
}
async function loadConfigRules(cwd) {
try {
const configPath = path.join(cwd, ".promptvalidationrc.json");
const raw = await fs2.readFile(configPath, "utf-8");
const parsed = JSON.parse(raw);
if (Array.isArray(parsed.rules))
return parsed.rules;
} catch {
}
return void 0;
}
function normalizeRulesOption(rulesOpt) {
if (!rulesOpt)
return void 0;
if (Array.isArray(rulesOpt)) {
return rulesOpt.flatMap((s) => s.split(",").map((t) => t.trim())).filter(Boolean);
}
return rulesOpt.split(",").map((t) => t.trim()).filter(Boolean);
}
async function readStdin() {
return await new Promise((resolve, reject) => {
let data = "";
process.stdin.setEncoding("utf-8");
process.stdin.on("data", (chunk) => data += chunk);
process.stdin.on("end", () => resolve(data));
process.stdin.on("error", reject);
});
}
var AVAILABLE_RULES = ["email", "json-schema", "length-limit", "no-hallucination"];
program.name("prompt-validation").description("Validate and fix AI prompt outputs").version("0.1.1");
program.command("check <file>").description("\u{1F50E} Validate a file with rules").option("-r, --rules <rules...>", "Validation rules to apply (comma or space separated)").option("--fix", "Automatically fix common issues").action(async (file, options) => {
try {
showBanner();
const spinner = ora({ text: "Validating...", color: "cyan" }).start();
const cwdRules = await loadConfigRules(process.cwd());
const cliRules = normalizeRulesOption(options.rules);
const rules = cliRules ?? cwdRules;
const result = await validateFile(file, { ...options, rules });
spinner.stop();
if (result.valid) {
console.log(chalk.green("\u2705 Validation passed!"));
} else {
console.log(chalk.red("\u274C Validation failed:"));
result.errors.forEach((e) => console.log(" - " + e));
}
if (options.fix && result.fixed) {
console.log(chalk.yellow(`\u{1F527} Auto-fixed issues and saved to ${file}.fixed`));
}
} catch (err) {
console.error(chalk.red("Error:"), err);
}
});
program.command("improve <file>").description("\u2728 Improve a prompt for better LLM outputs. Use '-' to read from stdin.").option("--out <file>", "Write improved prompt to a file").option("--format <format>", "Target output guidance (json|markdown|text)", "text").option("--llm <provider>", "Use an LLM to improve (openai|gemini)").option("--model <name>", "Model name (e.g., gpt-4o-mini, gemini-1.5-pro)").option("--temperature <num>", "LLM temperature", (v) => Number(v), 0.2).option("--copy", "Copy improved prompt to clipboard").action(async (file, options) => {
try {
showBanner();
const spinner = ora({ text: "Crafting a better prompt... \u2728", color: "cyan" }).start();
const input = file === "-" ? await readStdin() : await fs2.readFile(file, "utf-8");
const { improved, meta } = await improvePrompt(input, {
targetFormat: options.format,
llm: options.llm,
model: options.model,
temperature: options.temperature
});
spinner.stop();
if (options.out) {
await fs2.writeFile(options.out, improved, "utf-8");
console.log(boxen(chalk.green(`\u{1F4BE} Saved improved prompt to ${options.out}${meta.model ? ` (via ${meta.used}:${meta.model})` : ""}`), { padding: 1, borderColor: "green" }));
} else {
console.log(boxen(improved, { padding: 1, borderColor: "magenta" }));
}
if (options.copy) {
await clipboardy.write(improved);
console.log(chalk.cyan("\u{1F4CB} Copied improved prompt to clipboard"));
}
} catch (err) {
console.error(chalk.red("Error:"), err);
}
});
async function interactiveLoop() {
showBanner();
process.on("SIGINT", () => {
console.log("\n\u{1F44B} Goodbye!");
process.exit(0);
});
while (true) {
const { action } = await inquirer.prompt([
{
name: "action",
type: "list",
message: "What would you like to do?",
choices: [
{ name: "\u{1F50E} Validate a file", value: "check" },
{ name: "\u2728 Improve a prompt", value: "improve" },
{ name: "\u{1F4CB} Paste prompt from clipboard and improve", value: "improve-clipboard" },
{ name: "\u2699\uFE0F View current rules", value: "rules" },
{ name: "\u2753 Help", value: "help" },
{ name: "\u{1F6AA} Quit", value: "quit" }
]
}
]);
if (action === "quit") {
console.log("\u{1F44B} Bye!");
break;
}
if (action === "help") {
console.log(chalk.cyan("Use arrow keys to navigate. This menu loops until you choose Quit or press Ctrl+C."));
continue;
}
if (action === "rules") {
const rules = await loadConfigRules(process.cwd()) ?? AVAILABLE_RULES;
console.log(boxen(`Current rules:
- ${rules.join("\n- ")}`, { padding: 1, borderColor: "cyan" }));
continue;
}
if (action === "check") {
const answers = await inquirer.prompt([
{ name: "file", message: "Path to file:", type: "input", validate: (v) => !!v || "Required" },
{ name: "rules", message: "Select rules:", type: "checkbox", choices: AVAILABLE_RULES, default: await loadConfigRules(process.cwd()) ?? ["json-schema", "length-limit"] },
{ name: "fix", message: "Auto-fix issues?", type: "confirm", default: true }
]);
const spinner = ora({ text: "Validating...", color: "cyan" }).start();
const result = await validateFile(answers.file, { rules: answers.rules, fix: answers.fix });
spinner.stop();
if (result.valid) {
console.log(chalk.green("\u2705 Validation passed!"));
} else {
console.log(chalk.red("\u274C Validation failed:"));
result.errors.forEach((e) => console.log(" - " + e));
}
if (answers.fix && result.fixed) {
console.log(chalk.yellow(`\u{1F527} Auto-fixed issues and saved to ${answers.file}.fixed`));
}
continue;
}
if (action === "improve" || action === "improve-clipboard") {
let input = "";
if (action === "improve-clipboard") {
try {
input = await clipboardy.read();
} catch {
input = "";
}
if (!input) {
console.log(chalk.red("Clipboard is empty. Please copy some text and try again."));
continue;
}
} else {
const { file } = await inquirer.prompt([{ name: "file", message: "Path to prompt file (or '-' for stdin):", type: "input", validate: (v) => !!v || "Required" }]);
input = file === "-" ? await readStdin() : await fs2.readFile(file, "utf-8");
}
const { format, llm, model, temperature, shouldCopy, out } = await inquirer.prompt([
{ name: "format", message: "Target format:", type: "list", choices: ["text", "markdown", "json"], default: "text" },
{ name: "llm", message: "Use LLM?", type: "list", choices: [
{ name: "No (local improver)", value: void 0 },
{ name: "OpenAI", value: "openai" },
{ name: "Gemini", value: "gemini" }
], default: void 0 },
{ name: "model", message: "Model (optional):", type: "input", when: (a) => !!a.llm },
{ name: "temperature", message: "Temperature (0-1):", type: "number", default: 0.2 },
{ name: "out", message: "Write to file (optional):", type: "input" },
{ name: "shouldCopy", message: "Copy improved prompt to clipboard?", type: "confirm", default: true }
]);
const spinner = ora({ text: "Crafting a better prompt... \u2728", color: "cyan" }).start();
const { improved, meta } = await improvePrompt(input, {
targetFormat: format,
llm,
model,
temperature: Number(temperature) || 0.2
});
spinner.stop();
if (out) {
await fs2.writeFile(out, improved, "utf-8");
console.log(boxen(chalk.green(`\u{1F4BE} Saved improved prompt to ${out}${meta.model ? ` (via ${meta.used}:${meta.model})` : ""}`), { padding: 1, borderColor: "green" }));
} else {
console.log(boxen(improved, { padding: 1, borderColor: "magenta" }));
}
if (shouldCopy) {
await clipboardy.write(improved);
console.log(chalk.cyan("\u{1F4CB} Copied improved prompt to clipboard"));
}
continue;
}
}
}
program.command("interactive").description("\u{1F9ED} Incredible interactive menu (loops until you quit)").action(async () => {
await interactiveLoop();
});
program.parse();