arela
Version:
AI-powered CTO with multi-agent orchestration, code summarization, visual testing (web + mobile) for blazing fast development.
207 lines • 7.51 kB
JavaScript
import OpenAI from "openai";
import ollama from "ollama";
import { TechnicalSummarySchema, } from "./types.js";
import { buildSynthesizerPrompt } from "./prompts.js";
import { checkAPIKeys, showAPIKeyGuide, } from "../../utils/api-key-helper.js";
export class LLMSynthesizer {
options;
openaiModel = "gpt-4o-mini";
ollamaModel = "qwen2.5:3b";
openai;
backend = "local";
initialized = false;
constructor(options = {}) {
this.options = options;
}
/**
* Synthesize semantic contract into technical summary.
*
* The method:
* - Builds a structured prompt from SemanticContract
* - Uses best-available backend (OpenAI, Ollama, or local)
* - Enforces TechnicalSummary schema with zod
* - Computes basic metadata (token count, compression ratio, timestamp)
*/
async synthesize(contract) {
if (!this.initialized) {
await this.initBackend();
}
const prompt = buildSynthesizerPrompt(contract);
let rawSummary;
if (this.options.llmCaller) {
// Test/advanced mode: external caller fully controls LLM interaction.
const text = await this.options.llmCaller(prompt);
rawSummary = this.safeParseJson(text);
}
else if (this.backend === "openai" && this.openai) {
rawSummary = await this.callOpenAI(prompt);
}
else if (this.backend === "ollama") {
rawSummary = await this.callOllama(prompt);
}
else {
rawSummary = this.buildLocalSummary(contract);
}
const parsed = TechnicalSummarySchema.parse(rawSummary);
const base = {
filePath: contract.filePath,
mainResponsibility: parsed.mainResponsibility,
publicAPI: parsed.publicAPI,
ioContracts: parsed.ioContracts,
dependencies: parsed.dependencies,
sideEffects: parsed.sideEffects,
keyAlgorithms: parsed.keyAlgorithms,
metadata: {
tokenCount: 0,
compressionRatio: 0,
synthesizedAt: parsed.metadata?.synthesizedAt ?? new Date().toISOString(),
},
};
const tokenCount = this.countTokens(base);
const compressionRatio = this.calculateCompression(contract, tokenCount);
return {
...base,
metadata: {
...base.metadata,
tokenCount,
compressionRatio,
},
};
}
async initBackend() {
this.initialized = true;
if (this.options.forceLocal) {
this.backend = "local";
return;
}
const status = checkAPIKeys();
// Prefer OpenAI (cheap + already used elsewhere in Arela)
if (status.hasOpenAIKey) {
try {
this.openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
await this.openai.models.list();
this.backend = "openai";
return;
}
catch {
this.backend = "local";
}
}
// Try Ollama as a free local fallback
try {
await ollama.list();
this.backend = "ollama";
return;
}
catch {
this.backend = "local";
}
if (!status.hasAnyKey) {
// No remote keys and no local model; show user-friendly guide.
showAPIKeyGuide("error");
}
}
async callOpenAI(prompt) {
if (!this.openai) {
throw new Error("OpenAI client not initialized");
}
const response = await this.openai.chat.completions.create({
model: this.openaiModel,
messages: [{ role: "user", content: prompt }],
response_format: { type: "json_object" },
temperature: 0.3,
max_tokens: 512,
});
const content = response.choices[0]?.message?.content;
if (!content) {
throw new Error("No response from OpenAI synthesizer");
}
return this.safeParseJson(content);
}
async callOllama(prompt) {
const response = await ollama.generate({
model: this.ollamaModel,
prompt,
format: "json",
keep_alive: -1,
options: {
temperature: 0.2,
num_predict: 512,
},
});
return this.safeParseJson(response.response);
}
/**
* Local, deterministic summarizer used when no LLM backend
* is available or when forceLocal is enabled. This keeps
* tests fast and avoids mandatory network calls.
*/
buildLocalSummary(contract) {
const exportedNames = contract.exports.map((e) => e.name);
const mainResponsibility = contract.description ??
`Technical summary for ${contract.filePath} with ${exportedNames.length} exported symbol${exportedNames.length === 1 ? "" : "s"}.`;
const ioContracts = contract.exports
.flatMap((exp) => {
if (exp.signature) {
const params = exp.signature.params
.map((p) => `${p.name}: ${p.type ?? "unknown"}`)
.join(", ");
const returnType = exp.signature.returnType ?? "void";
return [
{
name: exp.name,
definition: `${exp.name}(${params}): ${returnType}`,
},
];
}
if (exp.methods) {
return exp.methods.map((m) => {
const params = m.signature.params
.map((p) => `${p.name}: ${p.type ?? "unknown"}`)
.join(", ");
const returnType = m.signature.returnType ?? "void";
return {
name: `${exp.name}.${m.name}`,
definition: `${m.name}(${params}): ${returnType}`,
};
});
}
return [];
})
.filter(Boolean);
const dependencies = contract.imports.length === 0
? "None"
: `Imports ${contract.imports.length} module(s): ${contract.imports
.map((i) => i.module)
.join(", ")}`;
const sideEffects = "Unknown - local summarizer cannot infer side effects";
return {
mainResponsibility,
publicAPI: exportedNames,
ioContracts,
dependencies,
sideEffects,
keyAlgorithms: contract.description
? "Inferred from file-level description"
: undefined,
};
}
safeParseJson(text) {
const cleaned = text
.trim()
.replace(/^[`\s]*```json\s*/i, "")
.replace(/^[`\s]*```/, "")
.replace(/```[\s`]*$/, "");
return JSON.parse(cleaned || "{}");
}
countTokens(summary) {
const text = JSON.stringify(summary);
return Math.max(1, Math.ceil(text.length / 4));
}
calculateCompression(contract, summaryTokens) {
const originalText = JSON.stringify(contract);
const originalTokens = Math.max(1, Math.ceil(originalText.length / 4));
return originalTokens / summaryTokens;
}
}
//# sourceMappingURL=llm-synthesizer.js.map