@robinson_ai_systems/free-agent-mcp
Version:
Free Agent MCP - Portable, workspace-agnostic code generation using FREE models (Ollama)
1,446 lines (1,415 loc) • 51.4 kB
JavaScript
import { fileURLToPath } from 'url';
import { dirname } from 'path';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
// src/core/patch/ops.ts
function isPatchOps(x) {
return !!x && Array.isArray(x.ops) && x.ops.every((o) => typeof o.type === "string" && typeof o.path === "string");
}
// src/core/patch/applyOps.ts
import fs from "fs";
import path from "path";
function read(p) {
return fs.readFileSync(p, "utf8");
}
function write(p, s) {
fs.mkdirSync(path.dirname(p), { recursive: true });
fs.writeFileSync(p, s);
}
function nthIndexOf(hay, needle, occur = 1) {
let idx = -1, from = 0;
for (let i = 0; i < occur; i++) {
idx = hay.indexOf(needle, from);
if (idx < 0) return -1;
from = idx + needle.length;
}
return idx;
}
function norm(s) {
return s.replace(/\s+/g, " ").trim();
}
function findAnchor(content, anchor, occur = 1) {
let idx = nthIndexOf(content, anchor, occur);
if (idx >= 0) return idx;
const N = norm(content);
const A = norm(anchor);
idx = nthIndexOf(N, A, occur);
if (idx >= 0) {
const head = anchor.slice(0, Math.min(12, anchor.length));
const rough = content.indexOf(head);
return rough >= 0 ? rough : -1;
}
return -1;
}
function upsertImport(content, spec, from) {
const importRe = new RegExp(`^import\\s+[^;]*\\s+from\\s+['"]${escapeReg(from)}['"];?\\s*$`, "m");
if (importRe.test(content)) {
return content;
}
const firstNonComment = content.search(/^(?!\s*\/\/|\s*\/\*|\s*\*|\s*$)/m);
const headerEnd = firstNonComment > -1 ? firstNonComment : 0;
const importLine = `import ${spec} from '${from}';
`;
return importLine + content.slice(headerEnd);
}
function escapeReg(s) {
return s.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
}
function applyOpsToContent(filePath, content, ops) {
let s = content;
for (const op of ops) {
switch (op.type) {
case "insert_after": {
const idx = findAnchor(s, op.anchor, op.occur ?? 1);
if (idx < 0) throw new Error(`anchor not found (after): ${op.anchor}`);
const insertAt = idx + op.anchor.length;
s = s.slice(0, insertAt) + op.code + s.slice(insertAt);
break;
}
case "insert_before": {
const idx = findAnchor(s, op.anchor, op.occur ?? 1);
if (idx < 0) throw new Error(`anchor not found (before): ${op.anchor}`);
s = s.slice(0, idx) + op.code + s.slice(idx);
break;
}
case "replace_between": {
const a = s.indexOf(op.start);
if (a < 0) throw new Error(`start anchor not found: ${op.start}`);
const b = s.indexOf(op.end, a + op.start.length);
if (b < 0) throw new Error(`end anchor not found: ${op.end}`);
s = s.slice(0, a + op.start.length) + op.code + s.slice(b);
break;
}
case "append_if_missing": {
if (!s.includes(op.mustContain)) s = s.trimEnd() + "\n" + op.code + (op.code.endsWith("\n") ? "" : "\n");
break;
}
case "upsert_import": {
s = upsertImport(s, op.spec, op.from);
break;
}
default:
throw new Error(`Unknown op: ${op.type}`);
}
}
return s;
}
function applyOpsInPlace(repoRoot, allOps) {
const perFile = /* @__PURE__ */ new Map();
for (const op of allOps) perFile.set(op.path, [...perFile.get(op.path) ?? [], op]);
const changed = [];
for (const [rel, ops] of perFile) {
const abs = path.join(repoRoot, rel);
const before = fs.existsSync(abs) ? read(abs) : "";
const after = applyOpsToContent(abs, before, ops);
if (after !== before) {
write(abs, after);
changed.push({ path: rel, before, after });
}
}
return changed;
}
// src/core/patch/unified.ts
import { createTwoFilesPatch } from "diff";
import { createHash } from "crypto";
function toUnified(relPath, before, after) {
const a = before.replace(/\r\n/g, "\n");
const b = after.replace(/\r\n/g, "\n");
const diffBody = createTwoFilesPatch(`a/${relPath}`, `b/${relPath}`, a, b, "", "");
const hashBefore = createHash("sha1").update(`blob ${a.length}\0${a}`).digest("hex").substring(0, 7);
const hashAfter = createHash("sha1").update(`blob ${b.length}\0${b}`).digest("hex").substring(0, 7);
const gitHeaders = [
`diff --git a/${relPath} b/${relPath}`,
`index ${hashBefore}..${hashAfter} 100644`
].join("\n");
const lines = diffBody.split("\n");
const diffStart = lines.findIndex((line) => line.startsWith("---"));
if (diffStart === -1) {
return "";
}
const patchBody = lines.slice(diffStart).join("\n");
return `${gitHeaders}
${patchBody}`;
}
function bundleUnified(changes) {
return changes.map((c) => toUnified(c.path, c.before, c.after)).join("\n");
}
// src/core/patch/validate.ts
import { spawnSync } from "child_process";
function gitApplyCheck(patch, cwd) {
const fileMatches = patch.match(/^diff --git a\/(.+?) b\/.+?$/gm);
const files = fileMatches ? fileMatches.map((m) => m.match(/^diff --git a\/(.+?) b\/.+?$/)?.[1]).filter(Boolean) : [];
if (process.env.CODEGEN_VERBOSE === "1" || process.env.DEBUG) {
console.log(`[gitApplyCheck] Target repo: ${cwd}`);
console.log(`[gitApplyCheck] Files being patched: ${files.join(", ") || "(none detected)"}`);
console.log(`[gitApplyCheck] Patch size: ${patch.length} chars`);
}
const p = spawnSync("git", ["apply", "--check", "-"], { input: patch, cwd, encoding: "utf8" });
if (p.status !== 0) {
const errorMsg = p.stderr || p.stdout || "(no error message)";
console.error(`[gitApplyCheck] FAILED in repo: ${cwd}`);
console.error(`[gitApplyCheck] Files attempted: ${files.join(", ") || "(none detected)"}`);
console.error(`[gitApplyCheck] Git error: ${errorMsg}`);
throw new Error(`git apply --check failed: ${errorMsg}`);
}
if (process.env.CODEGEN_VERBOSE === "1" || process.env.DEBUG) {
console.log(`[gitApplyCheck] \u2713 PASS`);
}
}
// src/core/patterns/enforce.ts
function enforceContractOnDiff(diff, contract) {
const violations = [];
const fileHeaders = diff.split(/^diff --git/m).slice(1).map((c) => (c.match(/\+\+\+ b\/(.+)\n/) || [])[1]).filter(Boolean);
for (const f of fileHeaders) {
if (!f.startsWith(contract.layout.baseDir + "/") && !f.startsWith("test") && !f.endsWith(".md")) {
violations.push(`Contract violation: file outside baseDir (${contract.layout.baseDir}): ${f}`);
}
}
const chunks = diff.split(/^diff --git/m).slice(1);
for (const c of chunks) {
const file = (c.match(/\+\+\+ b\/(.+)\n/) || [])[1];
if (!file) continue;
const added2 = c.split("\n").filter((l) => l.startsWith("+") && !l.startsWith("+++")).map((l) => l.slice(1)).join("\n");
if (!added2.trim()) continue;
for (const bad of contract.forbid) {
if (new RegExp(bad, "i").test(added2)) {
violations.push(`Contract violation: contains "${bad}" in ${file}`);
}
}
if (/\bany\b(?!\s*=>)/.test(added2)) {
violations.push(`Contract violation: TypeScript 'any' in ${file}`);
}
const usesHttp = /\b(fetch|http|axios)\s*\(/.test(added2);
const must = contract.wrappers.filter((w) => w.mustUse);
if (usesHttp && must.length) {
const ok = must.some((w) => new RegExp(`\\b${w.name}\\s*\\(`).test(added2));
if (!ok) {
violations.push(
`Contract violation: must use wrapper ${must.map((w) => w.name).join(" or ")} instead of raw fetch/http in ${file}`
);
}
}
const wantsClass = contract.containers.some((c2) => c2.kind === "class");
if (wantsClass && /(export\s+function|const\s+\w+\s*=\s*\()/m.test(added2) && /class\s+\w+/.test(added2) === false) {
}
}
if (violations.length > 0) {
throw new Error("Contract violations:\n" + violations.join("\n"));
}
}
// src/core/shared/gates.ts
function getGateMode() {
const v = (process.env.FREE_AGENT_GATE_MODE || "migrate").toLowerCase();
return ["strict", "migrate", "lenient"].includes(v) ? v : "migrate";
}
// src/core/shared/patchGuard.ts
var added = (re, diff) => diff.split("\n").some((l) => l.startsWith("+") && re.test(l));
function introducedAny(diff) {
const lines = diff.split("\n");
for (let i = 0; i < lines.length; i++) {
const l = lines[i];
if (l.startsWith("+") && /:?\sany(\W|$)/.test(l)) {
const ctx = lines.slice(Math.max(0, i - 12), i).join("\n");
if (!/:?\sany(\W|$)/.test(ctx)) return true;
}
}
return false;
}
function validatePatchUnifiedDiff(diff, contract) {
const mode = getGateMode();
if (contract?.containers?.length) {
const newTs = /new file mode 100644\n\+\+\+ b\/.*\.ts/m.test(diff);
const addsClass = /(^|\n)\+.*export\s+class\s+/m.test(diff);
if (newTs && addsClass) {
throw new Error("Patch rejected: should modify existing container, not create a new class/file.");
}
}
if (mode !== "lenient" && added(/\bTODO\b/i, diff)) {
if (mode === "strict") throw new Error("Patch rejected: TODO comments not allowed.");
diff = diff.replace(/(^|\n)\+([^\n]*)(TODO)/gi, (_m, a, pre) => `${a}+${pre}NOTE`);
}
if (mode === "strict" && introducedAny(diff)) {
throw new Error("Patch rejected: new 'any' types are not allowed.");
}
if (mode === "migrate" && introducedAny(diff)) {
diff = diff.replace(/(^|\n)\+([^\n]*):\s*any(\W)/g, (_m, a, pre, tail) => `${a}+${pre}: unknown${tail}`);
}
if (contract) {
enforceContractOnDiff(diff, contract);
}
return diff;
}
// src/core/anchors/indexer.ts
import fs2 from "fs";
import path2 from "path";
import fg from "fast-glob";
// src/core/anchors/extract.ts
function dedupe(xs, key) {
const m = /* @__PURE__ */ new Map();
xs.forEach((x) => m.set(key(x), x));
return [...m.values()];
}
function extractAnchors(path5, content) {
const lines = content.replace(/\r\n/g, "\n").split("\n");
const anchors = [];
const push = (text, kind, line) => {
if (!text || text.length < 3) return;
anchors.push({ text, kind, line });
};
lines.forEach((l, i) => {
if (/^\s*import\s.+from\s+['"].+['"]/.test(l)) push(l.trim(), "import", i + 1);
const mClass = l.match(/^\s*export\s+class\s+([A-Za-z0-9_]+)/) || l.match(/^\s*class\s+([A-Za-z0-9_]+)/);
if (mClass) push(mClass[0].trim(), "class", i + 1);
const mMethod = l.match(/^\s*(public|private|protected|async|\s)*\s*[A-Za-z0-9_]+\s*\([^)]*\)\s*{/);
if (mMethod) push(mMethod[0].trim(), "method", i + 1);
const mFn = l.match(/^\s*export\s+function\s+[A-Za-z0-9_]+\s*\(|^\s*function\s+[A-Za-z0-9_]+\s*\(/);
if (mFn) push(mFn[0].trim(), "function", i + 1);
const mCase = l.match(/^\s*case\s+['"`]?[A-Za-z0-9_.-]+['"`]?\s*:/);
if (mCase) push(mCase[0].trim(), "switchCase", i + 1);
if (/^\s*\/\/\s*region\b/i.test(l) || /^\s*\/\/\s*endregion\b/i.test(l)) push(l.trim(), "region", i + 1);
});
return { path: path5, anchors: dedupe(anchors, (a) => `${a.kind}:${a.text}`) };
}
// src/core/anchors/indexer.ts
async function buildAnchorHints(repoRoot, targetPaths, exemplarPaths) {
const byFile = {};
const candidates = /* @__PURE__ */ new Set();
for (const p of targetPaths) candidates.add(p);
for (const p of exemplarPaths) candidates.add(p);
const extra = [];
for (const p of [...candidates]) {
if (p.endsWith("/**") || p.endsWith("/*")) {
const matches = await fg(p, { cwd: repoRoot, dot: false });
matches.forEach((m) => extra.push(m));
candidates.delete(p);
}
}
extra.forEach((e) => candidates.add(e));
for (const rel of candidates) {
const abs = path2.join(repoRoot, rel);
if (!fs2.existsSync(abs) || !fs2.statSync(abs).isFile()) continue;
const content = fs2.readFileSync(abs, "utf8");
const fa = extractAnchors(rel, content);
byFile[rel] = { allowed: fa.anchors.map((a) => a.text) };
}
return { byFile };
}
// src/core/anchors/suggest.ts
function norm2(s) {
return s.replace(/\s+/g, " ").replace(/[;{]+$/, "").trim().toLowerCase();
}
function similarity(a, b) {
const A = new Set(norm2(a).split(" "));
const B = new Set(norm2(b).split(" "));
const inter = [...A].filter((x) => B.has(x)).length;
const uni = (/* @__PURE__ */ new Set([...A, ...B])).size;
return uni ? inter / uni : 0;
}
function nearestAllowed(file, want, hints, min = 0.45) {
const allowed = hints.byFile[file]?.allowed ?? [];
if (!allowed.length) return null;
const exact = allowed.find((a) => a === want) || allowed.find((a) => norm2(a) === norm2(want));
if (exact) return exact;
let best = { a: "", s: 0 };
for (const a of allowed) {
const s = similarity(a, want);
if (s > best.s) best = { a, s };
}
return best.s >= min ? best.a : null;
}
// src/core/generation/ops-generator.ts
var OpsGenerator = class {
name = "ops-generator";
llmClient;
// Will be injected
constructor(llmClient) {
this.llmClient = llmClient;
}
async generate(input) {
const { repo, task, contract, examples, tier, quality } = input;
console.log(`[OpsGenerator] Generating ops for task: ${task.slice(0, 60)}...`);
const targetPaths = this.extractTargetPaths(task, contract);
const exemplarPaths = (examples || []).map((ex) => {
if (typeof ex === "string") {
const match = ex.match(/^```(?:\w+)?\s*\n\/\/\s*(.+?)\n/);
return match ? match[1] : null;
}
return ex.path || null;
}).filter(Boolean);
const hints = await buildAnchorHints(repo, targetPaths, exemplarPaths);
for (const [file, data] of Object.entries(hints.byFile)) {
console.log(`[Anchors] ${file}: ${data.allowed.length} allowed`);
}
const prompt = this.buildOpsPrompt(input, hints);
let ops;
try {
ops = await this.llmSynthesizeOps(prompt, quality || "auto", tier || "free");
} catch (e) {
console.warn(`[OpsGenerator] Initial synthesis failed, escalating quality...`);
const nextQuality = quality === "fast" ? "balanced" : quality === "balanced" ? "best" : "best";
ops = await this.llmSynthesizeOps(prompt, nextQuality, tier || "free");
}
console.log(`[OpsGenerator] Synthesized ${ops.ops.length} ops`);
ops = this.validateAnchors(ops, hints);
console.log(`[Ops] ${ops.ops.length} ops returned; anchors normalized to allowed set`);
const changes = applyOpsInPlace(repo, ops.ops);
console.log(`[OpsGenerator] Applied ops to ${changes.length} files`);
let unified = bundleUnified(changes);
unified = validatePatchUnifiedDiff(unified, contract);
console.log(`[OpsGenerator] Generated patch (${unified.length} chars):`);
console.log("--- PATCH START ---");
console.log(unified);
console.log("--- PATCH END ---");
gitApplyCheck(unified, repo);
console.log(`[OpsGenerator] git apply --check OK`);
return unified;
}
extractTargetPaths(task, contract) {
const paths = [];
if (contract?.containers) {
paths.push(...contract.containers);
}
const fileMatches = task.match(/[\w\-./]+\.(?:ts|js|tsx|jsx|json)/g);
if (fileMatches) {
paths.push(...fileMatches);
}
return [...new Set(paths)];
}
validateAnchors(ops, hints) {
for (const op of ops.ops) {
if ("anchor" in op && typeof op.anchor === "string") {
const a = op.anchor;
const fixed = nearestAllowed(op.path, a, hints);
if (!fixed) {
const examples = (hints.byFile[op.path]?.allowed ?? []).slice(0, 5).join(" | ");
throw new Error(`Invalid anchor for ${op.path}: "${a}". Allowed examples: ${examples}`);
}
op.anchor = fixed;
}
if (op.start && typeof op.start === "string") {
const fixedStart = nearestAllowed(op.path, op.start, hints);
if (fixedStart) op.start = fixedStart;
}
if (op.end && typeof op.end === "string") {
const fixedEnd = nearestAllowed(op.path, op.end, hints);
if (fixedEnd) op.end = fixedEnd;
}
}
return ops;
}
buildOpsPrompt(input, hints) {
const { task, contract, examples } = input;
const parts = [];
parts.push(`# TASK`);
parts.push(task);
parts.push("");
if (contract) {
parts.push(`# REPO PATTERNS (MUST FOLLOW)`);
if (contract.containers?.length) {
parts.push(`## Containers (modify these, don't create new files):`);
contract.containers.forEach((c) => parts.push(`- ${c}`));
}
if (contract.wrappers?.length) {
parts.push(`## Wrappers (use these patterns):`);
contract.wrappers.forEach((w) => parts.push(`- ${w}`));
}
parts.push("");
}
if (examples?.length) {
parts.push(`# EXAMPLES FROM THIS REPO (Mirror These Patterns)`);
examples.forEach((ex, i) => {
parts.push(`## Example ${i + 1}: ${ex.path}`);
parts.push("```");
parts.push(ex.content);
parts.push("```");
});
parts.push("");
}
if (Object.keys(hints.byFile).length > 0) {
parts.push(`# ANCHOR HINTS`);
parts.push(`For each file, you MUST choose anchors from the allowed list below:`);
parts.push("");
for (const [file, data] of Object.entries(hints.byFile)) {
parts.push(`## ${file}`);
parts.push(`Allowed anchors (choose from these):`);
data.allowed.slice(0, 20).forEach((a) => parts.push(`- ${a}`));
if (data.allowed.length > 20) {
parts.push(`... and ${data.allowed.length - 20} more`);
}
parts.push("");
}
}
parts.push(this.getOpsOnlyFragment());
return parts.join("\n");
}
getOpsOnlyFragment() {
return `
# OUTPUT FORMAT (STRICT JSON ONLY)
You must output STRICT JSON that conforms to this TypeScript type:
type EditOp =
| { type: "insert_after", path: string, anchor: string, code: string, occur?: number }
| { type: "insert_before", path: string, anchor: string, code: string, occur?: number }
| { type: "replace_between", path: string, start: string, end: string, code: string }
| { type: "append_if_missing", path: string, code: string, mustContain: string }
| { type: "upsert_import", path: string, spec: string, from: string };
Return: { "ops": EditOp[] }
Rules:
- Use existing repo patterns and containers learned from exemplars.
- Prefer modifying the existing container file; do NOT create new files or classes if a container exists.
- For any op with an "anchor" field, the value MUST be exactly one string from that file's allowed anchor list (see ANCHOR HINTS above).
- If anchors are provided for a file, you MUST choose from them; do not invent new anchors.
- Prefer the most semantically close anchor (e.g., switch case label for that endpoint, or the containing class/method signature).
- "code" must be complete, compile-ready TypeScript (no placeholders, no TODO).
- No prose. No markdown. JSON ONLY.
`.trim();
}
async llmSynthesizeOps(prompt, quality, tier) {
const model = this.selectModel(quality, tier);
console.log(`[OpsGenerator] Calling ${model} for ops synthesis...`);
const raw = await this.llmClient.generate({
model,
prompt,
format: "json",
timeoutMs: 3e5
// 5 min for Ollama cold start
});
let parsed;
try {
parsed = JSON.parse(raw.text || raw);
} catch (e) {
throw new Error(`LLM returned invalid JSON: ${raw.text?.slice(0, 200)}`);
}
if (!isPatchOps(parsed)) {
throw new Error(`LLM returned invalid PatchOps schema: ${JSON.stringify(parsed).slice(0, 200)}`);
}
return parsed;
}
selectModel(quality, tier) {
if (tier === "paid") {
return quality === "best" ? "gpt-4o" : "gpt-4o-mini";
}
return quality === "best" ? "qwen2.5-coder:7b" : "qwen2.5-coder:7b";
}
};
// src/ollama-client.ts
import { Ollama as Ollama2 } from "ollama";
import { spawn } from "child_process";
// src/shared/shared-llm/ollama-client.ts
var BASE = (process.env.OLLAMA_BASE_URL || "http://localhost:11434").replace(/\/+$/, "");
var sleep = (ms) => new Promise((r) => setTimeout(r, ms));
async function pingOllama(timeoutMs = 1e3) {
const urls = [
`${BASE}/api/tags`,
`${BASE.replace("localhost", "127.0.0.1")}/api/tags`
];
for (const url of urls) {
try {
const r = await fetch(url, {
method: "GET",
signal: AbortSignal.timeout(timeoutMs),
headers: { "Accept": "application/json" }
});
if (r.ok) {
return true;
}
} catch (error) {
console.error(`[pingOllama] Failed to ping ${url}: ${error?.message || error}`);
}
}
return false;
}
async function ollamaGenerate(opts) {
const { model, prompt, format, timeoutMs = 12e4, retries = 2 } = opts;
console.error(`[sharedGenerate] Starting generation with model: ${model}, timeout: ${timeoutMs}ms`);
let lastErr;
for (let i = 0; i <= retries; i++) {
try {
console.error(`[sharedGenerate] Attempt ${i + 1}/${retries + 1}`);
const body = { model, prompt, stream: false };
if (format === "json") {
body.format = "json";
}
console.error(`[sharedGenerate] Sending fetch to ${BASE}/api/generate`);
const r = await fetch(`${BASE}/api/generate`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(body),
signal: AbortSignal.timeout(timeoutMs)
});
console.error(`[sharedGenerate] Fetch completed with status: ${r.status}`);
if (!r.ok) throw new Error(`HTTP ${r.status}`);
console.error("[sharedGenerate] Parsing JSON response...");
const json = await r.json();
console.error("[sharedGenerate] JSON parsed successfully");
return json.response || "";
} catch (e) {
console.error(`[sharedGenerate] Error on attempt ${i + 1}:`, e);
lastErr = e;
if (i < retries) {
console.error(`[sharedGenerate] Retrying in ${500 * (i + 1)}ms...`);
await sleep(500 * (i + 1));
}
}
}
throw new Error(`Ollama generate failed after ${retries + 1} attempt(s): ${lastErr?.message || lastErr}`);
}
// src/utils/model-manager.ts
import { Ollama } from "ollama";
var ModelManager = class {
ollama;
models = /* @__PURE__ */ new Map();
lastDiscovery = 0;
discoveryInterval = 6e4;
// Re-discover every 60 seconds
baseUrl;
constructor(baseUrl = "http://localhost:11434") {
this.baseUrl = baseUrl;
this.ollama = new Ollama({ host: baseUrl });
}
/**
* Discover all available models from Ollama
*/
async discoverModels(force = false) {
const now = Date.now();
if (!force && this.models.size > 0 && now - this.lastDiscovery < this.discoveryInterval) {
return Array.from(this.models.values());
}
try {
const response = await this.ollama.list();
this.models.clear();
for (const model of response.models) {
const info = this.parseModelInfo(model);
this.models.set(info.name, info);
}
this.lastDiscovery = now;
console.error(`[ModelManager] Discovered ${this.models.size} models`);
return Array.from(this.models.values());
} catch (error) {
console.error("[ModelManager] Failed to discover models:", error);
return [];
}
}
/**
* Parse model information from Ollama response
*/
parseModelInfo(model) {
const name = model.name;
const size = model.size || 0;
const sizeGB = size / (1024 * 1024 * 1024);
const parts = name.split(":");
const baseName = parts[0] || name;
const paramSize = parts[1] || "unknown";
let family = "unknown";
if (baseName.includes("qwen")) family = "qwen";
else if (baseName.includes("deepseek")) family = "deepseek";
else if (baseName.includes("llama")) family = "llama";
else if (baseName.includes("codellama")) family = "codellama";
else if (baseName.includes("mistral")) family = "mistral";
else if (baseName.includes("phi")) family = "phi";
else if (baseName.includes("gemma")) family = "gemma";
const capabilities = ["chat"];
const isCodeCapable = baseName.includes("coder") || baseName.includes("code") || family === "codellama" || family === "mistral" || // Mistral is excellent for code
family === "deepseek" || // DeepSeek is code-focused
family === "qwen";
if (isCodeCapable) {
capabilities.push("code");
}
if (baseName.includes("vision") || baseName.includes("llava")) {
capabilities.push("vision");
}
if (baseName.includes("embed")) {
capabilities.push("embedding");
}
let speed;
if (sizeGB < 2) speed = "fast";
else if (sizeGB < 5) speed = "medium";
else speed = "slow";
let quality;
const paramNum = parseInt(paramSize);
if (isNaN(paramNum)) quality = "good";
else if (paramNum < 7) quality = "good";
else if (paramNum < 20) quality = "better";
else quality = "best";
return {
name,
size,
sizeGB,
family,
parameter_size: paramSize,
capabilities,
speed,
quality,
modified_at: model.modified_at || (/* @__PURE__ */ new Date()).toISOString()
};
}
/**
* Select the best model for a task
*/
async selectModel(criteria) {
const models = await this.discoverModels();
if (models.length === 0) {
console.error("[ModelManager] No models available");
return null;
}
let candidates = models;
if (criteria.requiredCapabilities && criteria.requiredCapabilities.length > 0) {
candidates = models.filter(
(m) => criteria.requiredCapabilities.every((cap) => m.capabilities.includes(cap))
);
}
if (criteria.task === "code" || criteria.task === "refactor" || criteria.task === "test") {
const codeModels = candidates.filter((m) => m.capabilities.includes("code"));
if (codeModels.length > 0) {
candidates = codeModels;
}
}
candidates.sort((a, b) => {
if (criteria.preferSpeed) {
if (a.speed !== b.speed) {
const speedOrder = { fast: 0, medium: 1, slow: 2 };
return speedOrder[a.speed] - speedOrder[b.speed];
}
}
const targetQuality = this.getTargetQuality(criteria.complexity);
const aMatch = a.quality === targetQuality ? 0 : 1;
const bMatch = b.quality === targetQuality ? 0 : 1;
if (aMatch !== bMatch) return aMatch - bMatch;
if (criteria.complexity === "complex") {
return b.size - a.size;
}
if (criteria.complexity === "simple") {
return a.size - b.size;
}
return Math.abs(a.sizeGB - 5) - Math.abs(b.sizeGB - 5);
});
return candidates[0]?.name || models[0]?.name || null;
}
/**
* Get fallback chain for a model
*/
async getFallbackChain(primaryModel, criteria) {
const models = await this.discoverModels();
const chain = [primaryModel];
let candidates = models.filter(
(m) => m.name !== primaryModel && (!criteria.requiredCapabilities || criteria.requiredCapabilities.every((cap) => m.capabilities.includes(cap)))
);
candidates.sort((a, b) => a.size - b.size);
chain.push(...candidates.slice(0, 2).map((m) => m.name));
return chain;
}
/**
* Get adaptive timeout for a model
*/
async getAdaptiveTimeout(modelName, isColdStart = false) {
const model = this.models.get(modelName);
if (!model) {
return isColdStart ? 18e4 : 6e4;
}
let baseTimeout;
if (model.sizeGB < 2) {
baseTimeout = 3e4;
} else if (model.sizeGB < 5) {
baseTimeout = 6e4;
} else if (model.sizeGB < 10) {
baseTimeout = 12e4;
} else {
baseTimeout = 18e4;
}
if (isColdStart) {
baseTimeout *= 2;
}
return baseTimeout;
}
/**
* Get model info
*/
getModelInfo(modelName) {
return this.models.get(modelName);
}
/**
* List all models
*/
listModels() {
return Array.from(this.models.values());
}
/**
* List available model names (for quick lookup)
*/
async listAvailableModels() {
await this.discoverModels();
return Array.from(this.models.keys());
}
/**
* Get models by capability
*/
getModelsByCapability(capability) {
return Array.from(this.models.values()).filter(
(m) => m.capabilities.includes(capability)
);
}
/**
* Get target quality for complexity
*/
getTargetQuality(complexity) {
switch (complexity) {
case "simple":
return "good";
case "medium":
return "better";
case "complex":
return "best";
default:
return "good";
}
}
/**
* Check if Ollama is running
*/
async isOllamaRunning() {
try {
await this.ollama.list();
return true;
} catch {
return false;
}
}
};
var modelManager = null;
function getModelManager(baseUrl) {
if (!modelManager) {
modelManager = new ModelManager(baseUrl);
}
return modelManager;
}
// src/ollama-client.ts
var OllamaClient = class {
ollama;
models;
baseUrl;
autoStart;
ollamaProcess = null;
startedByUs = false;
constructor(autoStart = true) {
this.baseUrl = process.env.OLLAMA_BASE_URL || "http://localhost:11434";
this.ollama = new Ollama2({ host: this.baseUrl });
this.models = this.initializeModels();
this.autoStart = autoStart;
}
/**
* Get the current Ollama base URL
*/
getBaseUrl() {
return this.baseUrl;
}
initializeModels() {
return /* @__PURE__ */ new Map([
// PRIMARY: Qwen 2.5 Coder 7B - BEST 7B code model available
[
"qwen2.5-coder:7b",
{
name: "qwen2.5-coder:7b",
speed: "medium",
quality: "best",
useCase: ["code", "medium", "refactoring", "tests", "features", "complex"]
}
],
// ANALYSIS: Mistral 7B - Excellent for API integration, analysis, planning
[
"mistral:7b",
{
name: "mistral:7b",
speed: "medium",
quality: "better",
useCase: ["analysis", "api-integration", "research", "planning", "tool-setup", "configuration"]
}
],
// FALLBACK: DeepSeek Coder 1.3B - Small but code-specific
[
"deepseek-coder:1.3b",
{
name: "deepseek-coder:1.3b",
speed: "fast",
quality: "good",
useCase: ["code", "simple", "quick-generation"]
}
],
// ROUTER: Qwen 3B - Fast for routing/intent detection only
[
"qwen2.5:3b",
{
name: "qwen2.5:3b",
speed: "fast",
quality: "good",
useCase: ["router", "intent", "scaffolding", "simple"]
}
],
// EMBEDDINGS: Nomic Embed Text - For semantic search
[
"nomic-embed-text",
{
name: "nomic-embed-text",
speed: "fast",
quality: "good",
useCase: ["embeddings", "semantic-search"]
}
]
]);
}
/**
* Select the best model for the task
*/
async selectModel(options) {
const modelManager2 = getModelManager(this.baseUrl);
if (options.model && options.model !== "auto") {
const modelMap = {
"primary": "qwen2.5-coder:7b",
"fallback": "deepseek-coder:1.3b",
"router": "qwen2.5:3b",
"qwen-coder": "qwen2.5-coder:7b",
"deepseek": "deepseek-coder:1.3b"
};
const requestedModel = modelMap[options.model] || options.model;
const modelInfo = modelManager2.getModelInfo(requestedModel);
if (modelInfo) {
return requestedModel;
}
console.warn(`[OllamaClient] Requested model ${requestedModel} not found, auto-selecting...`);
}
const modelManager_models = await modelManager2.listAvailableModels();
if (options.complexity === "simple" && (options.model?.includes("analysis") || options.model?.includes("research"))) {
if (modelManager_models.includes("mistral:7b")) {
return "mistral:7b";
}
}
if (modelManager_models.includes("qwen2.5-coder:7b")) {
return "qwen2.5-coder:7b";
}
if (modelManager_models.includes("mistral:7b")) {
return "mistral:7b";
}
if (modelManager_models.includes("deepseek-coder:1.3b")) {
return "deepseek-coder:1.3b";
}
const criteria = {
task: "code",
// Default to code task
complexity: options.complexity || "medium",
preferSpeed: false,
// Prefer quality over speed for code
requiredCapabilities: ["code"]
};
const selectedModel = await modelManager2.selectModel(criteria);
if (!selectedModel) {
throw new Error("No Ollama models available. Please pull mistral:7b or qwen2.5-coder:7b.");
}
return selectedModel;
}
/**
* Generate text using Ollama (with auto-start, dynamic model selection, and adaptive timeouts!)
*/
async generate(prompt, options = {}) {
await this.ensureRunning();
const modelManager2 = getModelManager(this.baseUrl);
await modelManager2.discoverModels();
const model = await this.selectModel(options);
const startTime = Date.now();
const isColdStart = false;
let timeout = await modelManager2.getAdaptiveTimeout(model, isColdStart);
if (process.env.OLLAMA_REQUEST_TIMEOUT) {
timeout = parseInt(process.env.OLLAMA_REQUEST_TIMEOUT, 10) * 1e3;
} else if (isColdStart && process.env.OLLAMA_STARTUP_TIMEOUT) {
timeout = parseInt(process.env.OLLAMA_STARTUP_TIMEOUT, 10) * 1e3;
} else if (!isColdStart && process.env.OLLAMA_WARMUP_TIMEOUT) {
timeout = parseInt(process.env.OLLAMA_WARMUP_TIMEOUT, 10) * 1e3;
}
console.error(`[OllamaClient] Using model: ${model} (timeout: ${timeout}ms, cold_start: ${isColdStart})`);
console.error(`[OllamaClient] Prompt length: ${prompt.length} chars`);
try {
console.error("[OllamaClient] Calling sharedGenerate...");
const text = await ollamaGenerate({
model,
prompt,
format: "text",
timeoutMs: timeout,
retries: 2
});
const timeMs = Date.now() - startTime;
console.error(`[OllamaClient] sharedGenerate completed in ${timeMs}ms`);
const tokensInput = Math.ceil(prompt.length / 4);
const tokensGenerated = Math.ceil(text.length / 4);
return {
text,
model,
tokensGenerated,
tokensInput,
tokensTotal: tokensInput + tokensGenerated,
timeMs
};
} catch (error) {
if (error.message?.includes("not found") || error.message?.includes("404")) {
console.warn(`[OllamaClient] Model ${model} failed, trying fallback...`);
const criteria = {
task: "code",
complexity: options.complexity || "simple"
};
const fallbackChain = await modelManager2.getFallbackChain(model, criteria);
if (fallbackChain.length > 1) {
const fallbackModel = fallbackChain[1];
console.error(`[OllamaClient] Retrying with fallback model: ${fallbackModel}`);
const fallbackTimeout = await modelManager2.getAdaptiveTimeout(fallbackModel, isColdStart);
const text = await ollamaGenerate({
model: fallbackModel,
prompt,
format: "text",
timeoutMs: fallbackTimeout,
retries: 1
});
const timeMs = Date.now() - startTime;
const tokensInput = Math.ceil(prompt.length / 4);
const tokensGenerated = Math.ceil(text.length / 4);
return {
text,
model: fallbackModel,
tokensGenerated,
tokensInput,
tokensTotal: tokensInput + tokensGenerated,
timeMs
};
}
throw new Error(
`No available models found. Please pull at least one model: ollama pull qwen2.5:3b`
);
}
throw error;
}
}
/**
* Check if Ollama is running and models are available
*/
async checkHealth() {
const errors = [];
let running = false;
let availableModels = [];
try {
const response = await this.ollama.list();
running = true;
availableModels = response.models.map((m) => m.name);
const recommendedModels = [
"qwen2.5-coder:7b",
// PRIMARY: Best 7B code model
"mistral:7b",
// ANALYSIS: API integration, research, planning
"deepseek-coder:1.3b",
// FALLBACK: Code-specific, small
"qwen2.5:3b",
// ROUTER: Fast for routing
"nomic-embed-text"
// EMBEDDINGS: For semantic search
];
for (const model of recommendedModels) {
if (!availableModels.includes(model)) {
errors.push(`Model ${model} not found. Run: ollama pull ${model}`);
}
}
} catch (error) {
errors.push(`Ollama not running. Please start Ollama.`);
}
return {
running,
models: availableModels,
errors
};
}
/**
* Get model info
*/
getModelInfo(modelName) {
return this.models.get(modelName);
}
/**
* List all configured models
*/
listModels() {
return Array.from(this.models.values());
}
/**
* Auto-start Ollama if not running (saves Augment credits!)
* Enhanced with better detection, configurable timeout, and exponential backoff
*/
async startOllama() {
console.error("\u{1F680} Auto-starting Ollama...");
const timeoutSeconds = parseInt(process.env.OLLAMA_STARTUP_TIMEOUT || "180", 10);
console.error(`\u23F1\uFE0F Using startup timeout: ${timeoutSeconds} seconds`);
const ollamaPath = process.env.OLLAMA_PATH || (process.platform === "win32" ? "C:\\Users\\chris\\AppData\\Local\\Programs\\Ollama\\ollama.exe" : "ollama");
try {
console.error("\u{1F50D} Checking if Ollama is already running...");
console.error(`\u{1F517} Base URL: ${this.baseUrl}`);
const isRunning = await pingOllama(5e3);
if (isRunning) {
console.error("\u2705 Ollama is already running!");
return;
}
console.error("\u274C Ollama not responding, attempting to start...");
console.error(`\u{1F680} Spawning Ollama process: ${ollamaPath}`);
this.ollamaProcess = spawn(ollamaPath, ["serve"], {
detached: true,
stdio: "ignore",
windowsHide: true
});
this.ollamaProcess.unref();
this.startedByUs = true;
console.error(`\u23F3 Waiting for Ollama to be ready (timeout: ${timeoutSeconds}s)...`);
const delays = [1e3, 2e3, 4e3, 8e3];
let totalWait = 0;
let attemptCount = 0;
while (totalWait < timeoutSeconds * 1e3) {
const delay = attemptCount < delays.length ? delays[attemptCount] : 1e3;
await new Promise((resolve2) => setTimeout(resolve2, delay));
totalWait += delay;
attemptCount++;
try {
const ready = await pingOllama(2e3);
if (ready) {
console.error(`\u2705 Ollama ready after ${totalWait}ms!`);
return;
}
} catch {
console.error(`\u23F3 Still waiting... (${Math.floor(totalWait / 1e3)}s / ${timeoutSeconds}s)`);
}
}
throw new Error(`Ollama started but not ready within ${timeoutSeconds} seconds. Try increasing OLLAMA_START_TIMEOUT.`);
} catch (error) {
if (error.code === "ENOENT") {
throw new Error(
`Ollama not found at: ${ollamaPath}
Please install Ollama from https://ollama.com or set OLLAMA_PATH environment variable.`
);
}
if (error.code === "EADDRINUSE" || error.message?.includes("address already in use")) {
throw new Error(
`Port 11434 is already in use. Another Ollama instance may be running.
Try: pkill ollama (Linux/Mac) or taskkill /F /IM ollama.exe (Windows)`
);
}
throw new Error(`Failed to auto-start Ollama: ${error.message}`);
}
}
/**
* Ensure Ollama is running (auto-start if needed)
* Enhanced with better health checking using pingOllama
*/
async ensureRunning() {
console.error(`[OllamaClient] Ensuring Ollama is running at ${this.baseUrl}...`);
try {
console.error("[OllamaClient] Checking Ollama health...");
const isRunning = await pingOllama(1e4);
if (isRunning) {
console.error("[OllamaClient] \u2705 Ollama is running and healthy!");
return;
}
console.error("[OllamaClient] \u274C Ollama not responding");
if (this.autoStart) {
console.error("[OllamaClient] Auto-start enabled, attempting to start Ollama...");
await this.startOllama();
const isNowRunning = await pingOllama(5e3);
if (!isNowRunning) {
throw new Error("Ollama started but still not responding to health checks");
}
console.error("[OllamaClient] \u2705 Ollama started successfully!");
} else {
throw new Error(
"Ollama is not running. Please start Ollama with: ollama serve\nOr enable auto-start by setting autoStart=true in constructor."
);
}
} catch (error) {
console.error(`[OllamaClient] Error in ensureRunning: ${error.message}`);
if (this.autoStart && !error.message?.includes("auto-start") && !error.message?.includes("started but still not responding")) {
console.error("[OllamaClient] Ping failed, trying auto-start as fallback...");
await this.startOllama();
} else {
throw error;
}
}
}
/**
* Cleanup spawned Ollama process on shutdown
*/
async cleanup() {
if (this.ollamaProcess && this.startedByUs) {
console.error("\u{1F9F9} Cleaning up spawned Ollama process...");
try {
this.ollamaProcess.kill();
console.error("\u2705 Ollama process terminated");
} catch (error) {
console.error(`\u26A0\uFE0F Failed to kill Ollama process: ${error.message}`);
}
}
}
};
// src/shared/shared-llm/llm-client.ts
async function llmGenerate(options) {
const { provider, model, prompt, format, timeoutMs, retries, temperature, maxTokens } = options;
console.error(`[LLM Client] Using provider: ${provider}, model: ${model}`);
if (provider === "ollama") {
const response = await ollamaGenerate({
model,
prompt,
format,
timeoutMs,
retries
});
const tokensInput = Math.ceil(prompt.length / 4);
const tokensOutput = Math.ceil(response.length / 4);
return {
text: response,
model,
provider: "ollama",
tokensInput,
tokensOutput,
tokensTotal: tokensInput + tokensOutput,
cost: 0
};
}
if (provider === "openai") {
const OpenAI = (await import("openai")).default;
const client = new OpenAI({
apiKey: process.env.OPENAI_API_KEY
});
const response = await client.chat.completions.create({
model,
messages: [{ role: "user", content: prompt }],
temperature: temperature ?? 0.2,
max_tokens: maxTokens ?? 4096,
response_format: format === "json" ? { type: "json_object" } : void 0
});
const text = response.choices[0]?.message?.content || "";
const tokensInput = response.usage?.prompt_tokens || 0;
const tokensOutput = response.usage?.completion_tokens || 0;
const tokensTotal = response.usage?.total_tokens || 0;
const costPerInputToken = 25e-4 / 1e3;
const costPerOutputToken = 0.01 / 1e3;
const cost = tokensInput * costPerInputToken + tokensOutput * costPerOutputToken;
return {
text,
model,
provider: "openai",
tokensInput,
tokensOutput,
tokensTotal,
cost
};
}
if (provider === "claude") {
const { Anthropic } = await import("@anthropic-ai/sdk");
const client = new Anthropic({
apiKey: process.env.ANTHROPIC_API_KEY
});
const response = await client.messages.create({
model,
max_tokens: maxTokens ?? 4096,
temperature: temperature ?? 0.2,
messages: [{ role: "user", content: prompt }]
});
const content = response.content[0];
const text = content.type === "text" ? content.text : "";
const tokensInput = response.usage.input_tokens;
const tokensOutput = response.usage.output_tokens;
const tokensTotal = tokensInput + tokensOutput;
const costPerInputToken = 3e-3 / 1e3;
const costPerOutputToken = 0.015 / 1e3;
const cost = tokensInput * costPerInputToken + tokensOutput * costPerOutputToken;
return {
text,
model,
provider: "claude",
tokensInput,
tokensOutput,
tokensTotal,
cost
};
}
if (provider === "voyage") {
const apiKey = process.env.VOYAGE_API_KEY || process.env.ANTHROPIC_API_KEY;
if (!apiKey) {
throw new Error("Voyage API key missing. Set VOYAGE_API_KEY or reuse ANTHROPIC_API_KEY.");
}
const baseUrl = (process.env.VOYAGE_BASE_URL || "https://api.voyageai.com/v1").replace(/\/$/, "");
const response = await fetch(`${baseUrl}/chat/completions`, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${apiKey}`
},
body: JSON.stringify({
model,
messages: [{ role: "user", content: prompt }],
temperature: temperature ?? 0.2,
max_output_tokens: maxTokens ?? 4096,
response_format: format === "json" ? { type: "json_object" } : void 0
})
});
if (!response.ok) {
const errorText = await response.text().catch(() => "Unknown error");
throw new Error(`Voyage request failed: HTTP ${response.status} ${errorText}`);
}
const data = await response.json();
const text = data.choices?.[0]?.message?.content ?? "";
const usage = data.usage ?? {};
const tokensInput = usage.prompt_tokens ?? 0;
const tokensOutput = usage.completion_tokens ?? 0;
const tokensTotal = usage.total_tokens ?? tokensInput + tokensOutput;
const costPerInputToken = 12e-5;
const costPerOutputToken = 12e-5;
const cost = tokensInput * costPerInputToken + tokensOutput * costPerOutputToken;
return {
text,
model,
provider: "voyage",
tokensInput,
tokensOutput,
tokensTotal,
cost
};
}
throw new Error(`Unsupported provider: ${provider}`);
}
// src/shared/shared-llm/toolkit-client.ts
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js";
import { createRequire } from "module";
import * as fs3 from "fs";
import * as path3 from "path";
var sharedToolkitClient = null;
process.on("exit", () => {
if (sharedToolkitClient) {
sharedToolkitClient.disconnect().catch(console.error);
}
});
// src/shared/shared-llm/thinking-client.ts
import { Client as Client2 } from "@modelcontextprotocol/sdk/client/index.js";
import { StdioClientTransport as StdioClientTransport2 } from "@modelcontextprotocol/sdk/client/stdio.js";
var sharedThinkingClient = null;
process.on("exit", () => {
if (sharedThinkingClient) {
sharedThinkingClient.disconnect().catch(console.error);
}
});
// src/shared/shared-llm/file-editor.ts
import * as fs4 from "fs";
import * as path4 from "path";
// src/shared/shared-llm/workspace.ts
import { resolve, dirname as dirname3, join as join3 } from "path";
import { existsSync } from "fs";
// src/shared/shared-llm/llm-router.ts
import http from "http";
// src/shared/shared-llm/metrics/openai-adapter.ts
var FALLBACK_PRICING = {
"gpt-4o-mini": {
cost_per_1k_input: 15e-5,
cost_per_1k_output: 6e-4,
last_updated: Date.now(),
source: "fallback"
},
"gpt-4o": {
cost_per_1k_input: 25e-4,
cost_per_1k_output: 0.01,
last_updated: Date.now(),
source: "fallback"
},
"o1-mini": {
cost_per_1k_input: 3e-3,
cost_per_1k_output: 0.012,
last_updated: Date.now(),
source: "fallback"
},
"o1": {
cost_per_1k_input: 0.015,
cost_per_1k_output: 0.06,
last_updated: Date.now(),
source: "fallback"
}
};
// src/shared/shared-llm/metrics/anthropic-adapter.ts
var FALLBACK_PRICING2 = {
"claude-3-5-sonnet-20241022": {
cost_per_1k_input: 3e-3,
cost_per_1k_output: 0.015,
last_updated: Date.now(),
source: "fallback"
},
"claude-3-5-haiku-20241022": {
cost_per_1k_input: 1e-3,
cost_per_1k_output: 5e-3,
last_updated: Date.now(),
source: "fallback"
},
"claude-3-opus-20240229": {
cost_per_1k_input: 0.015,
cost_per_1k_output: 0.075,
last_updated: Date.now(),
source: "fallback"
}
};
// src/shared/shared-llm/metrics/moonshot-adapter.ts
var FALLBACK_PRICING3 = {
"moonshot-v1-8k": {
cost_per_1k_input: 12e-5,
// ¥0.012/1K tokens ≈ $0.00012
cost_per_1k_output: 12e-5,
last_updated: Date.now(),
source: "fallback"
},
"moonshot-v1-32k": {
cost_per_1k_input: 24e-5,
// ¥0.024/1K tokens ≈ $0.00024
cost_per_1k_output: 24e-5,
last_updated: Date.now(),
source: "fallback"
},
"moonshot-v1-128k": {
cost_per_1k_input: 6e-4,
// ¥0.06/1K tokens ≈ $0.00060
cost_per_1k_output: 6e-4,
last_updated: Date.now(),
source: "fallback"
}
};
// src/shared/shared-llm/metrics/voyage-adapter.ts
var FALLBACK_PRICING4 = {
"voyage-3": {
cost_per_1k_input: 6e-4,
cost_per_1k_output: 12e-4,
last_updated: Date.now(),
source: "fallback"
},
"voyage-3-lite": {
cost_per_1k_input: 1e-4,
cost_per_1k_output: 2e-4,
last_updated: Date.now(),
source: "fallback"
}
};
// src/generation/ops-mcp-generator.ts
var LLMClientAdapter = class {
ollama;
constructor() {
this.ollama = new OllamaClient();
}
async generate(opts) {
const provider = opts.model.startsWith("gpt-") || opts.model.startsWith("o1-") ? "openai" : "ollama";
const result = await llmGenerate({
provider,
model: opts.model,
prompt: opts.prompt,
format: opts.format,
timeoutMs: opts.timeoutMs
});
return { text: result.text };
}
};
var OpsMCPGenerator = class {
name = "ops-mcp-generator";
opsGen;
constructor() {
const llmClient = new LLMClientAdapter();
this.