@n8n/n8n-nodes-langchain
Version:

247 lines • 10.4 kB
JavaScript
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.ChainRetrievalQa = void 0;
const n8n_workflow_1 = require("n8n-workflow");
const descriptions_1 = require("../../../utils/descriptions");
const ai_utilities_1 = require("@n8n/ai-utilities");
const constants_1 = require("./constants");
const processItem_1 = require("./processItem");
class ChainRetrievalQa {
constructor() {
this.description = {
displayName: 'Question and Answer Chain',
name: 'chainRetrievalQa',
icon: 'node:question-and-answer-chain',
iconColor: 'black',
group: ['transform'],
version: [1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
description: 'Answer questions about retrieved documents',
defaults: {
name: 'Question and Answer Chain',
},
codex: {
alias: ['LangChain'],
categories: ['AI'],
subcategories: {
AI: ['Chains', 'Root Nodes'],
},
resources: {
primaryDocumentation: [
{
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.chainretrievalqa/',
},
],
},
},
inputs: [
n8n_workflow_1.NodeConnectionTypes.Main,
{
displayName: 'Model',
maxConnections: 1,
type: n8n_workflow_1.NodeConnectionTypes.AiLanguageModel,
required: true,
},
{
displayName: 'Retriever',
maxConnections: 1,
type: n8n_workflow_1.NodeConnectionTypes.AiRetriever,
required: true,
},
],
outputs: [n8n_workflow_1.NodeConnectionTypes.Main],
builderHint: {
inputs: {
ai_languageModel: { required: true },
ai_retriever: { required: true },
},
},
credentials: [],
properties: [
(0, ai_utilities_1.getTemplateNoticeField)(1960),
{
displayName: 'Query',
name: 'query',
type: 'string',
required: true,
default: '={{ $json.input }}',
displayOptions: {
show: {
'@version': [1],
},
},
},
{
displayName: 'Query',
name: 'query',
type: 'string',
required: true,
default: '={{ $json.chat_input }}',
displayOptions: {
show: {
'@version': [1.1],
},
},
},
{
displayName: 'Query',
name: 'query',
type: 'string',
required: true,
default: '={{ $json.chatInput }}',
displayOptions: {
show: {
'@version': [1.2],
},
},
},
{
...descriptions_1.promptTypeOptionsDeprecated,
displayOptions: {
hide: {
'@version': [{ _cnd: { lte: 1.2 } }, { _cnd: { gte: 1.7 } }],
},
},
},
{
...descriptions_1.promptTypeOptions,
displayOptions: {
show: {
'@version': [{ _cnd: { gte: 1.7 } }],
},
},
},
{
...descriptions_1.textFromGuardrailsNode,
displayOptions: {
show: { promptType: ['guardrails'], '@version': [{ _cnd: { gte: 1.4 } }] },
},
},
{
...descriptions_1.textFromPreviousNode,
displayOptions: { show: { promptType: ['auto'], '@version': [{ _cnd: { gte: 1.4 } }] } },
},
{
displayName: 'Prompt (User Message)',
name: 'text',
type: 'string',
required: true,
default: '',
placeholder: 'e.g. Hello, how can you help me?',
typeOptions: {
rows: 2,
},
displayOptions: {
show: {
promptType: ['define'],
},
},
},
{
displayName: 'Options',
name: 'options',
type: 'collection',
default: {},
placeholder: 'Add Option',
options: [
{
...constants_1.systemPromptOption,
description: `Template string used for the system prompt. This should include the variable \`{context}\` for the provided context. For text completion models, you should also include the variable \`{${constants_1.LEGACY_INPUT_TEMPLATE_KEY}}\` for the user’s query.`,
displayOptions: {
show: {
'@version': [{ _cnd: { lt: 1.5 } }],
},
},
},
{
...constants_1.systemPromptOption,
description: `Template string used for the system prompt. This should include the variable \`{context}\` for the provided context. For text completion models, you should also include the variable \`{${constants_1.INPUT_TEMPLATE_KEY}}\` for the user’s query.`,
displayOptions: {
show: {
'@version': [{ _cnd: { gte: 1.5 } }],
},
},
},
(0, ai_utilities_1.getBatchingOptionFields)({
show: {
'@version': [{ _cnd: { gte: 1.6 } }],
},
}),
],
},
],
};
}
async execute() {
this.logger.debug('Executing Retrieval QA Chain');
const items = this.getInputData();
const returnData = [];
const batchSize = this.getNodeParameter('options.batching.batchSize', 0, 5);
const delayBetweenBatches = this.getNodeParameter('options.batching.delayBetweenBatches', 0, 0);
if (this.getNode().typeVersion >= 1.6 && batchSize >= 1) {
for (let i = 0; i < items.length; i += batchSize) {
const batch = items.slice(i, i + batchSize);
const batchPromises = batch.map(async (_item, batchItemIndex) => {
return await (0, processItem_1.processItem)(this, i + batchItemIndex);
});
const batchResults = await Promise.allSettled(batchPromises);
batchResults.forEach((response, index) => {
if (response.status === 'rejected') {
const error = response.reason;
if (this.continueOnFail()) {
const metadata = (0, n8n_workflow_1.parseErrorMetadata)(error);
returnData.push({
json: { error: error.message },
pairedItem: { item: index },
metadata,
});
return;
}
else {
throw error;
}
}
const output = response.value;
const answer = output.answer;
if (this.getNode().typeVersion >= 1.5) {
returnData.push({ json: { response: answer } });
}
else {
returnData.push({ json: { response: { text: answer } } });
}
});
if (i + batchSize < items.length && delayBetweenBatches > 0) {
await (0, n8n_workflow_1.sleep)(delayBetweenBatches);
}
}
}
else {
for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
try {
const response = await (0, processItem_1.processItem)(this, itemIndex);
const answer = response.answer;
if (this.getNode().typeVersion >= 1.5) {
returnData.push({ json: { response: answer } });
}
else {
returnData.push({ json: { response: { text: answer } } });
}
}
catch (error) {
if (this.continueOnFail()) {
const metadata = (0, n8n_workflow_1.parseErrorMetadata)(error);
returnData.push({
json: { error: error.message },
pairedItem: { item: itemIndex },
metadata,
});
continue;
}
throw error;
}
}
}
return [returnData];
}
}
exports.ChainRetrievalQa = ChainRetrievalQa;
//# sourceMappingURL=ChainRetrievalQa.node.js.map