@jackhua/mini-langchain
Version:
A lightweight TypeScript implementation of LangChain with cost optimization features
170 lines (161 loc) • 5.54 kB
JavaScript
;
/**
* Retrieval-based chains for RAG
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.ConversationalRetrievalChain = exports.RetrievalQAChain = exports.VectorStoreRetriever = void 0;
const base_1 = require("./base");
const prompt_1 = require("../prompts/prompt");
/**
* Vector store retriever
*/
class VectorStoreRetriever {
constructor(config) {
this.vectorStore = config.vectorStore;
this.k = config.k || 4;
this.filter = config.filter;
this.searchType = config.searchType || 'similarity';
this.searchKwargs = config.searchKwargs || {};
}
async getRelevantDocuments(query) {
if (this.searchType === 'mmr') {
return this.vectorStore.maxMarginalRelevanceSearch(query, {
k: this.k,
filter: this.filter,
...this.searchKwargs
});
}
else {
return this.vectorStore.similaritySearch(query, this.k, this.filter);
}
}
}
exports.VectorStoreRetriever = VectorStoreRetriever;
/**
* Retrieval QA Chain
*/
class RetrievalQAChain extends base_1.BaseChain {
constructor(config) {
super();
this.inputKey = 'query';
this.outputKey = 'answer';
this.llm = config.llm;
this.retriever = config.retriever;
this.inputKey = config.inputKey || 'query';
this.outputKey = config.outputKey || 'answer';
// Default QA prompt
this.prompt = config.prompt || prompt_1.PromptTemplate.fromTemplate(`Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
Context:
{context}
Question: {question}
Answer:`);
}
get inputKeys() {
return [this.inputKey];
}
get outputKeys() {
return [this.outputKey];
}
async call(inputs) {
const query = inputs[this.inputKey];
// Retrieve relevant documents
const docs = await this.retriever.getRelevantDocuments(query);
// Format context
const context = docs
.map(doc => doc.pageContent)
.join('\n\n');
// Create prompt
const prompt = await this.prompt.format({
context,
question: query
});
// Get answer from LLM
const answer = await this.llm.call(prompt);
return {
[this.outputKey]: answer,
source_documents: docs
};
}
static fromLLM(llm, retriever, options) {
return new RetrievalQAChain({
llm,
retriever,
...options
});
}
}
exports.RetrievalQAChain = RetrievalQAChain;
/**
* Conversational Retrieval Chain
*/
class ConversationalRetrievalChain extends base_1.BaseChain {
constructor(config) {
super();
this.inputKey = 'question';
this.outputKey = 'answer';
this.chatHistoryKey = 'chat_history';
this.llm = config.llm;
this.retriever = config.retriever;
this.inputKey = config.inputKey || 'question';
this.outputKey = config.outputKey || 'answer';
this.chatHistoryKey = config.chatHistoryKey || 'chat_history';
// Default question generator prompt
this.questionGeneratorPrompt = config.questionGeneratorPrompt || prompt_1.PromptTemplate.fromTemplate(`Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:`);
// Default QA prompt
this.qaPrompt = config.qaPrompt || prompt_1.PromptTemplate.fromTemplate(`Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
Context:
{context}
Question: {question}
Answer:`);
}
get inputKeys() {
return [this.inputKey, this.chatHistoryKey];
}
get outputKeys() {
return [this.outputKey];
}
async call(inputs) {
const question = inputs[this.inputKey];
const chatHistory = inputs[this.chatHistoryKey] || '';
// Generate standalone question if there's chat history
let standaloneQuestion = question;
if (chatHistory) {
const questionPrompt = await this.questionGeneratorPrompt.format({
chat_history: chatHistory,
question
});
standaloneQuestion = await this.llm.call(questionPrompt);
}
// Retrieve relevant documents
const docs = await this.retriever.getRelevantDocuments(standaloneQuestion);
// Format context
const context = docs
.map(doc => doc.pageContent)
.join('\n\n');
// Create QA prompt
const qaPrompt = await this.qaPrompt.format({
context,
question: standaloneQuestion
});
// Get answer from LLM
const answer = await this.llm.call(qaPrompt);
return {
[this.outputKey]: answer,
source_documents: docs,
standalone_question: standaloneQuestion
};
}
static fromLLM(llm, retriever, options) {
return new ConversationalRetrievalChain({
llm,
retriever,
...options
});
}
}
exports.ConversationalRetrievalChain = ConversationalRetrievalChain;
//# sourceMappingURL=retrieval.js.map