@roadiehq/rag-ai-backend
Version:
34 lines (30 loc) • 734 B
JavaScript
;
var prompts = require('./prompts.cjs.js');
class LlmService {
logger;
model;
prompts;
constructor({
logger,
model,
configuredPrompts
}) {
this.logger = logger;
this.model = model;
this.prompts = prompts.createPromptTemplates(configuredPrompts);
}
async query(embeddings, query) {
this.logger.info("Starting to prompt LLM.");
const promptEmbeddings = embeddings.map((embedding) => embedding.content).join("\n");
const prompt = `Human:
${this.prompts.prefixPrompt(
promptEmbeddings
)}
---
${this.prompts.suffixPrompt(query)}
Assistant:`;
return this.model.stream(prompt);
}
}
exports.LlmService = LlmService;
//# sourceMappingURL=LlmService.cjs.js.map