UNPKG

arela

Version:

AI-powered CTO with multi-agent orchestration, code summarization, visual testing (web + mobile) for blazing fast development.

47 lines 1.65 kB
import type { SemanticContract } from "../extractor/types.js"; import { type TechnicalSummary } from "./types.js"; export type LLMCaller = (prompt: string) => Promise<string>; export interface SynthesizerOptions { /** * Custom LLM caller for testing or advanced usage. * Receives a full prompt and must return a raw JSON string. */ llmCaller?: LLMCaller; /** * Force local summarization without any remote LLM calls. * Useful for tests or fully offline environments. */ forceLocal?: boolean; } export declare class LLMSynthesizer { private readonly options; private readonly openaiModel; private readonly ollamaModel; private openai?; private backend; private initialized; constructor(options?: SynthesizerOptions); /** * Synthesize semantic contract into technical summary. * * The method: * - Builds a structured prompt from SemanticContract * - Uses best-available backend (OpenAI, Ollama, or local) * - Enforces TechnicalSummary schema with zod * - Computes basic metadata (token count, compression ratio, timestamp) */ synthesize(contract: SemanticContract): Promise<TechnicalSummary>; private initBackend; private callOpenAI; private callOllama; /** * Local, deterministic summarizer used when no LLM backend * is available or when forceLocal is enabled. This keeps * tests fast and avoids mandatory network calls. */ private buildLocalSummary; private safeParseJson; private countTokens; private calculateCompression; } //# sourceMappingURL=llm-synthesizer.d.ts.map