@n8n/n8n-nodes-langchain
Version:

270 lines • 12.8 kB
JavaScript
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.TextClassifier = void 0;
const output_parsers_1 = require("@langchain/classic/output_parsers");
const n8n_workflow_1 = require("n8n-workflow");
const zod_1 = require("zod");
const ai_utilities_1 = require("@n8n/ai-utilities");
const processItem_1 = require("./processItem");
const SYSTEM_PROMPT_TEMPLATE = "Please classify the text provided by the user into one of the following categories: {categories}, and use the provided formatting instructions below. Don't explain, and only output the json.";
const configuredOutputs = (parameters) => {
const categories = parameters.categories?.categories ?? [];
const fallback = parameters.options?.fallback;
const ret = categories.map((cat) => {
return { type: 'main', displayName: cat.category };
});
if (fallback === 'other')
ret.push({ type: 'main', displayName: 'Other' });
return ret;
};
class TextClassifier {
constructor() {
this.description = {
displayName: 'Text Classifier',
name: 'textClassifier',
icon: 'node:text-classifier',
iconColor: 'black',
group: ['transform'],
version: [1, 1.1],
description: 'Classify your text into distinct categories',
codex: {
categories: ['AI'],
subcategories: {
AI: ['Chains', 'Root Nodes'],
},
resources: {
primaryDocumentation: [
{
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.text-classifier/',
},
],
},
},
defaults: {
name: 'Text Classifier',
},
inputs: [
{ displayName: '', type: n8n_workflow_1.NodeConnectionTypes.Main },
{
displayName: 'Model',
maxConnections: 1,
type: n8n_workflow_1.NodeConnectionTypes.AiLanguageModel,
required: true,
},
],
outputs: `={{(${configuredOutputs})($parameter)}}`,
builderHint: {
inputs: {
ai_languageModel: { required: true },
},
message: 'Each category defined creates a separate output branch. Output 0 corresponds to the first category, output 1 to the second, and so on. Use .output(index).to() to connect from a specific category. @example textClassifier.output(0).to(nodeA) and textClassifier.output(1).to(nodeB)',
},
properties: [
{
displayName: 'Text to Classify',
name: 'inputText',
type: 'string',
required: true,
default: '',
description: 'Use an expression to reference data in previous nodes or enter static text',
typeOptions: {
rows: 2,
},
},
{
displayName: 'Categories',
name: 'categories',
placeholder: 'Add Category',
type: 'fixedCollection',
default: {},
typeOptions: {
multipleValues: true,
},
options: [
{
name: 'categories',
displayName: 'Categories',
values: [
{
displayName: 'Category',
name: 'category',
type: 'string',
default: '',
description: 'Category to add',
required: true,
},
{
displayName: 'Description',
name: 'description',
type: 'string',
default: '',
description: "Describe your category if it's not obvious",
},
],
},
],
},
{
displayName: 'Options',
name: 'options',
type: 'collection',
default: {},
placeholder: 'Add Option',
options: [
{
displayName: 'Allow Multiple Classes To Be True',
name: 'multiClass',
type: 'boolean',
default: false,
},
{
displayName: 'When No Clear Match',
name: 'fallback',
type: 'options',
default: 'discard',
description: 'What to do with items that don’t match the categories exactly',
options: [
{
name: 'Discard Item',
value: 'discard',
description: 'Ignore the item and drop it from the output',
},
{
name: "Output on Extra, 'Other' Branch",
value: 'other',
description: "Create a separate output branch called 'Other'",
},
],
},
{
displayName: 'System Prompt Template',
name: 'systemPromptTemplate',
type: 'string',
default: SYSTEM_PROMPT_TEMPLATE,
description: 'String to use directly as the system prompt template',
typeOptions: {
rows: 6,
},
},
{
displayName: 'Enable Auto-Fixing',
name: 'enableAutoFixing',
type: 'boolean',
default: true,
description: 'Whether to enable auto-fixing (may trigger an additional LLM call if output is broken)',
},
(0, ai_utilities_1.getBatchingOptionFields)({
show: {
'@version': [{ _cnd: { gte: 1.1 } }],
},
}),
],
},
],
};
}
async execute() {
const items = this.getInputData();
const batchSize = this.getNodeParameter('options.batching.batchSize', 0, 5);
const delayBetweenBatches = this.getNodeParameter('options.batching.delayBetweenBatches', 0, 0);
const llm = (await this.getInputConnectionData(n8n_workflow_1.NodeConnectionTypes.AiLanguageModel, 0));
const categories = this.getNodeParameter('categories.categories', 0, []);
if (categories.length === 0) {
throw new n8n_workflow_1.NodeOperationError(this.getNode(), 'At least one category must be defined');
}
const options = this.getNodeParameter('options', 0, {});
const multiClass = options?.multiClass ?? false;
const fallback = options?.fallback ?? 'discard';
const schemaEntries = categories.map((cat) => [
cat.category,
zod_1.z
.boolean()
.describe(`Should be true if the input has category "${cat.category}" (description: ${cat.description})`),
]);
if (fallback === 'other')
schemaEntries.push([
'fallback',
zod_1.z.boolean().describe('Should be true if none of the other categories apply'),
]);
const schema = zod_1.z.object(Object.fromEntries(schemaEntries));
const structuredParser = output_parsers_1.StructuredOutputParser.fromZodSchema(schema);
const parser = options.enableAutoFixing
? output_parsers_1.OutputFixingParser.fromLLM(llm, structuredParser)
: structuredParser;
const multiClassPrompt = multiClass
? 'Categories are not mutually exclusive, and multiple can be true'
: 'Categories are mutually exclusive, and only one can be true';
const fallbackPrompt = {
other: 'If no categories apply, select the "fallback" option.',
discard: 'If there is not a very fitting category, select none of the categories.',
}[fallback];
const returnData = Array.from({ length: categories.length + (fallback === 'other' ? 1 : 0) }, (_) => []);
if (this.getNode().typeVersion >= 1.1 && batchSize > 1) {
for (let i = 0; i < items.length; i += batchSize) {
const batch = items.slice(i, i + batchSize);
const batchPromises = batch.map(async (_item, batchItemIndex) => {
const itemIndex = i + batchItemIndex;
const item = items[itemIndex];
return await (0, processItem_1.processItem)(this, itemIndex, item, llm, parser, categories, multiClassPrompt, fallbackPrompt);
});
const batchResults = await Promise.allSettled(batchPromises);
batchResults.forEach((response, batchItemIndex) => {
const index = i + batchItemIndex;
if (response.status === 'rejected') {
const error = response.reason;
if (this.continueOnFail()) {
returnData[0].push({
json: { error: error.message },
pairedItem: { item: index },
});
return;
}
else {
throw new n8n_workflow_1.NodeOperationError(this.getNode(), error.message);
}
}
else {
const output = response.value;
const item = items[index];
categories.forEach((cat, idx) => {
if (output[cat.category])
returnData[idx].push(item);
});
if (fallback === 'other' && output.fallback)
returnData[returnData.length - 1].push(item);
}
});
if (i + batchSize < items.length && delayBetweenBatches > 0) {
await (0, n8n_workflow_1.sleep)(delayBetweenBatches);
}
}
}
else {
for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
const item = items[itemIndex];
try {
const output = await (0, processItem_1.processItem)(this, itemIndex, item, llm, parser, categories, multiClassPrompt, fallbackPrompt);
categories.forEach((cat, idx) => {
if (output[cat.category])
returnData[idx].push(item);
});
if (fallback === 'other' && output.fallback)
returnData[returnData.length - 1].push(item);
}
catch (error) {
if (this.continueOnFail()) {
returnData[0].push({
json: { error: error.message },
pairedItem: { item: itemIndex },
});
continue;
}
throw error;
}
}
}
return returnData;
}
}
exports.TextClassifier = TextClassifier;
//# sourceMappingURL=TextClassifier.node.js.map