@jackhua/mini-langchain
Version:
A lightweight TypeScript implementation of LangChain with cost optimization features
209 lines (206 loc) • 7.44 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.QAChain = exports.ConversationChain = exports.LLMChain = void 0;
const base_1 = require("./base");
const prompt_1 = require("../prompts/prompt");
/**
* Chain to run a prompt through an LLM
*/
class LLMChain extends base_1.BaseChain {
constructor(config) {
super(config);
this.llm = config.llm;
this.prompt = config.prompt;
this.outputKey = config.outputKey || 'text';
}
get inputKeys() {
// Get input variables from prompt, excluding memory variables
if (this.prompt instanceof prompt_1.PromptTemplate) {
const promptInputs = this.prompt.inputVariables;
if (this.memory) {
const memoryVars = this.memory.memoryVariables;
return promptInputs.filter(v => !memoryVars.includes(v));
}
return promptInputs;
}
else if (this.prompt instanceof prompt_1.ChatPromptTemplate) {
// For ChatPromptTemplate, we need to extract variables from all message templates
// This is a simplified version - in reality we'd need to parse the templates
return ['input']; // Default for now
}
return [];
}
get outputKeys() {
return [this.outputKey];
}
async call(inputs, callbacks) {
const allCallbacks = [...this.callbacks, ...(callbacks || [])];
// Format the prompt
let formattedPrompt;
if (this.prompt instanceof prompt_1.PromptTemplate) {
formattedPrompt = this.prompt.format(inputs);
}
else if (this.prompt instanceof prompt_1.ChatPromptTemplate) {
const chatValue = this.prompt.format(inputs);
formattedPrompt = chatValue.messages.map(msg => {
// Convert to our Message type
if (msg.role === 'system') {
return { type: 'system', content: msg.content };
}
else if (msg.role === 'user' || msg.role === 'human') {
return { type: 'human', content: msg.content };
}
else if (msg.role === 'assistant' || msg.role === 'ai') {
return { type: 'ai', content: msg.content };
}
else {
return { type: 'human', content: msg.content };
}
});
}
else {
throw new Error('Invalid prompt type');
}
// Call the LLM
let result;
if (typeof formattedPrompt === 'string') {
result = await this.llm.call(formattedPrompt);
}
else {
const llmResult = await this.llm.generate(formattedPrompt);
result = llmResult.text;
}
return { [this.outputKey]: result };
}
/**
* Format the prompt with the inputs
*/
async prepPrompt(inputs) {
if (this.prompt instanceof prompt_1.PromptTemplate) {
return this.prompt.format(inputs);
}
else if (this.prompt instanceof prompt_1.ChatPromptTemplate) {
const chatValue = this.prompt.format(inputs);
return chatValue.messages.map(msg => {
if (msg.role === 'system') {
return { type: 'system', content: msg.content };
}
else if (msg.role === 'user' || msg.role === 'human') {
return { type: 'human', content: msg.content };
}
else if (msg.role === 'assistant' || msg.role === 'ai') {
return { type: 'ai', content: msg.content };
}
else {
return { type: 'human', content: msg.content };
}
});
}
throw new Error('Invalid prompt type');
}
/**
* Stream the response
*/
async *streamResponse(inputs) {
const formattedPrompt = await this.prepPrompt(inputs);
if (typeof formattedPrompt === 'string') {
const messages = [{ type: 'human', content: formattedPrompt }];
for await (const chunk of this.llm.stream(messages)) {
yield chunk.text;
}
}
else {
for await (const chunk of this.llm.stream(formattedPrompt)) {
yield chunk.text;
}
}
}
/**
* Create an LLMChain from a prompt template string
*/
static fromLLM(llm, prompt, config) {
const promptTemplate = prompt_1.PromptTemplate.fromTemplate(prompt);
return new LLMChain({
llm,
prompt: promptTemplate,
...config
});
}
}
exports.LLMChain = LLMChain;
/**
* Conversation Chain - a chain specifically for conversations
*/
class ConversationChain extends LLMChain {
constructor(config) {
// Default conversation prompt
const defaultPrompt = new prompt_1.PromptTemplate({
template: `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
{history}
Human: {input}
AI:`,
inputVariables: ['history', 'input']
});
super({
...config,
prompt: config.prompt || defaultPrompt
});
}
get inputKeys() {
// For conversation chain, we typically just need 'input'
// The history comes from memory
return ['input'];
}
}
exports.ConversationChain = ConversationChain;
/**
* Question-Answering Chain
*/
class QAChain extends base_1.BaseChain {
constructor(config) {
super(config);
this.llm = config.llm;
this.inputKey = config.inputKey || 'question';
this.documentVariableName = config.documentVariableName || 'context';
this.outputKey = config.outputKey || 'answer';
// Default QA prompt
const defaultPrompt = new prompt_1.PromptTemplate({
template: `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
Answer:`,
inputVariables: ['context', 'question']
});
this.prompt = config.prompt || defaultPrompt;
}
get inputKeys() {
return [this.inputKey, this.documentVariableName];
}
get outputKeys() {
return [this.outputKey];
}
async call(inputs, callbacks) {
const question = inputs[this.inputKey];
const context = inputs[this.documentVariableName];
const formattedPrompt = this.prompt.format({
[this.inputKey]: question,
[this.documentVariableName]: context
});
const answer = await this.llm.call(formattedPrompt);
return { [this.outputKey]: answer };
}
/**
* Create a QA chain from documents
*/
static fromDocuments(llm, documents, config) {
// Combine documents into context
const context = documents.map(doc => doc.pageContent).join('\n\n');
return new QAChain({
llm,
...config
});
}
}
exports.QAChain = QAChain;
//# sourceMappingURL=llm.js.map