UNPKG

ludmi

Version:

LU (Layer Understanding) is a lightweight framework for controlled chatbot interactions with LLMs, action orchestration, and retrieval-augmented generation (RAG).

54 lines (51 loc) 2.09 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.rag = exports.retriever = void 0; const openai_1 = require("./llm/openai"); //get the cosine similarity between two vectors const cosineSimilarity = (vecA, vecB) => { const dotProduct = vecA.reduce((acc, cur, idx) => acc + cur * vecB[idx], 0); const normA = Math.sqrt(vecA.reduce((acc, cur) => acc + cur * cur, 0)); const normB = Math.sqrt(vecB.reduce((acc, cur) => acc + cur * cur, 0)); return dotProduct / (normA * normB); }; //get chunks with the highest cosine similarity to the revised prompt const retriever = async ({ revised_prompt, data, size = 4 }) => { const embedding = await (0, openai_1.getEmbeddings)(revised_prompt); const chunks = data.map(item => ({ ...item, score: cosineSimilarity(embedding, item.embedding) })); chunks.sort((a, b) => b.score - a.score); return chunks.slice(0, size); }; exports.retriever = retriever; const rag = async ({ chunks, conversation, revised_prompt, userData = {}, model = "gpt-4o-mini", prompt }) => { //conversation o revised_promp, uno debe venir if (!conversation?.length && !revised_prompt) throw new Error("Debe enviar revised_prompt o conversation"); const developerInstruction = prompt || `Responder solo usando la información extraída de la BD: \`${JSON.stringify(chunks)}\` REGLAS DE RESPUESTA: - Mismo idioma del usuario. - Estilo whatsapp, ejemplo para destacar palabra clave: *palabra* (No usar doble asteriscos; **MAL**). - Emojis con coherencia. Datos que conocemos del usuario: \`${JSON.stringify(userData)}\` `; const messages = [ { role: "developer", content: developerInstruction } ]; if (revised_prompt) messages.push({ role: "user", content: revised_prompt }); else if (conversation) messages.push(...conversation); const AIresponse = await (0, openai_1.getAIResponse)({ messages, model, temperature: 1 }); return AIresponse; }; exports.rag = rag;