hikma-engine
Version:
Code Knowledge Graph Indexer - A sophisticated TypeScript-based indexer that transforms Git repositories into multi-dimensional knowledge stores for AI agents
120 lines (117 loc) • 4.91 kB
JavaScript
"use strict";
/**
* @file Module responsible for synthesizing search results into coherent answers using local LLM.
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.AnswerSynthesizer = void 0;
const transformers_1 = require("@xenova/transformers");
const logger_1 = require("../utils/logger");
const error_handling_1 = require("../utils/error-handling");
class AnswerSynthesizer {
/**
* Initializes the Answer Synthesizer.
* @param {ConfigManager} config - Configuration manager instance.
*/
constructor(config) {
this.logger = (0, logger_1.getLogger)('AnswerSynthesizer');
this.model = null;
this.config = config;
this.logger.info('Initializing AnswerSynthesizer');
// Set environment for transformers
transformers_1.env.allowRemoteModels = true;
transformers_1.env.allowLocalModels = true;
}
/**
* Loads the text generation model for answer synthesis.
*/
async loadModel() {
if (this.model) {
this.logger.debug('Model already loaded, skipping');
return;
}
const operation = this.logger.operation('Loading answer synthesis model');
try {
const aiConfig = this.config.getAIConfig();
this.logger.info('Loading answer synthesis model', {
model: aiConfig.summary.model,
});
// Load the transformers pipeline for text generation
this.logger.info('Loading transformers.js text generation model', {
model: aiConfig.summary.model,
});
this.model = await (0, transformers_1.pipeline)('text-generation', aiConfig.summary.model);
this.logger.info('Transformers.js text generation model loaded successfully');
this.logger.info('Answer synthesis model loaded successfully');
operation();
}
catch (error) {
this.logger.error('Failed to load answer synthesis model', {
error: (0, error_handling_1.getErrorMessage)(error),
});
operation();
throw error;
}
}
/**
* Synthesizes search results into a coherent answer to the user's question using RAG approach.
* @param {string} question - The user's original query/question
* @param {Array<{node: any, similarity: number}>} results - Search results with nodes and similarity scores
* @returns {Promise<string>} The synthesized answer
*/
async synthesizeAnswer(question, results) {
const operation = this.logger.operation('Synthesizing answer');
try {
this.logger.info(`Starting answer synthesis for question: "${question}"`);
if (results.length === 0) {
this.logger.info('No results to synthesize');
operation();
return "I couldn't find any relevant information to answer your question.";
}
// Ensure model is loaded
if (!this.model) {
await this.loadModel();
}
// Extract text from nodes
const docs = results.map((result) => result.node.sourceText || '');
this.logger.info('Results docs', docs);
// Join documents and truncate if needed (to fit within model's context window)
const context = docs.join('\n');
// Create the prompt for the model following the RAG pattern
const prompt = `Answer the question based only on the context below. If the question is not related to the context, say "I don't know".
${context}
Q: ${question}
A:`;
this.logger.debug('Generating answer with prompt', {
promptLength: prompt.length,
});
// Generate answer using the loaded pipeline
const result = await this.model(prompt, {
max_new_tokens: 256,
temperature: 0.7,
repetition_penalty: 1.2,
do_sample: true,
});
let answer;
if (result && result[0] && result[0].generated_text) {
// Extract just the answer part (after the prompt)
answer = result[0].generated_text.substring(prompt.length).trim();
}
else {
throw new Error('Unexpected result format from text generation model');
}
this.logger.debug('Answer generated successfully', {
answerLength: answer.length,
});
operation();
return answer;
}
catch (error) {
this.logger.error('Answer synthesis failed', {
error: (0, error_handling_1.getErrorMessage)(error),
});
operation();
throw error;
}
}
}
exports.AnswerSynthesizer = AnswerSynthesizer;