UNPKG

@n8n/n8n-nodes-langchain

Version:

![Banner image](https://user-images.githubusercontent.com/10284570/173569848-c624317f-42b1-45a6-ab09-f0ea3c247648.png)

341 lines 17.4 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.SentimentAnalysis = void 0; const messages_1 = require("@langchain/core/messages"); const prompts_1 = require("@langchain/core/prompts"); const output_parsers_1 = require("@langchain/classic/output_parsers"); const n8n_workflow_1 = require("n8n-workflow"); const zod_1 = require("zod"); const ai_utilities_1 = require("@n8n/ai-utilities"); const tracing_1 = require("../../../utils/tracing"); const DEFAULT_SYSTEM_PROMPT_TEMPLATE = 'You are highly intelligent and accurate sentiment analyzer. Analyze the sentiment of the provided text. Categorize it into one of the following: {categories}. Use the provided formatting instructions. Only output the JSON.'; const DEFAULT_CATEGORIES = 'Positive, Neutral, Negative'; const configuredOutputs = (parameters, defaultCategories) => { const options = (parameters?.options ?? {}); const categories = options?.categories ?? defaultCategories; const categoriesArray = categories.split(',').map((cat) => cat.trim()); const ret = categoriesArray.map((cat) => ({ type: 'main', displayName: cat })); return ret; }; class SentimentAnalysis { constructor() { this.description = { displayName: 'Sentiment Analysis', name: 'sentimentAnalysis', icon: 'node:sentiment-analysis', iconColor: 'black', group: ['transform'], version: [1, 1.1], description: 'Analyze the sentiment of your text', codex: { categories: ['AI'], subcategories: { AI: ['Chains', 'Root Nodes'], }, resources: { primaryDocumentation: [ { url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.sentimentanalysis/', }, ], }, }, defaults: { name: 'Sentiment Analysis', }, inputs: [ { displayName: '', type: n8n_workflow_1.NodeConnectionTypes.Main }, { displayName: 'Model', maxConnections: 1, type: n8n_workflow_1.NodeConnectionTypes.AiLanguageModel, required: true, }, ], outputs: `={{(${configuredOutputs})($parameter, "${DEFAULT_CATEGORIES}")}}`, builderHint: { inputs: { ai_languageModel: { required: true }, }, }, properties: [ { displayName: 'Text to Analyze', name: 'inputText', type: 'string', required: true, default: '', description: 'Use an expression to reference data in previous nodes or enter static text', typeOptions: { rows: 2, }, }, { displayName: 'Sentiment scores are LLM-generated estimates, not statistically rigorous measurements. They may be inconsistent across runs and should be used as rough indicators only.', name: 'detailedResultsNotice', type: 'notice', default: '', displayOptions: { show: { '/options.includeDetailedResults': [true], }, }, }, { displayName: 'Options', name: 'options', type: 'collection', default: {}, placeholder: 'Add Option', options: [ { displayName: 'Sentiment Categories', name: 'categories', type: 'string', default: DEFAULT_CATEGORIES, description: 'A comma-separated list of categories to analyze', noDataExpression: true, typeOptions: { rows: 2, }, }, { displayName: 'System Prompt Template', name: 'systemPromptTemplate', type: 'string', default: DEFAULT_SYSTEM_PROMPT_TEMPLATE, description: 'String to use directly as the system prompt template', typeOptions: { rows: 6, }, }, { displayName: 'Include Detailed Results', name: 'includeDetailedResults', type: 'boolean', default: false, description: 'Whether to include sentiment strength and confidence scores in the output', }, { displayName: 'Enable Auto-Fixing', name: 'enableAutoFixing', type: 'boolean', default: true, description: 'Whether to enable auto-fixing (may trigger an additional LLM call if output is broken)', }, (0, ai_utilities_1.getBatchingOptionFields)({ show: { '@version': [{ _cnd: { gte: 1.1 } }], }, }), ], }, ], }; } async execute() { const items = this.getInputData(); const llm = (await this.getInputConnectionData(n8n_workflow_1.NodeConnectionTypes.AiLanguageModel, 0)); const returnData = []; const batchSize = this.getNodeParameter('options.batching.batchSize', 0, 5); const delayBetweenBatches = this.getNodeParameter('options.batching.delayBetweenBatches', 0, 0); if (this.getNode().typeVersion >= 1.1 && batchSize > 1) { for (let i = 0; i < items.length; i += batchSize) { const batch = items.slice(i, i + batchSize); const batchPromises = batch.map(async (_item, batchItemIndex) => { const itemIndex = i + batchItemIndex; const sentimentCategories = this.getNodeParameter('options.categories', itemIndex, DEFAULT_CATEGORIES); const categories = sentimentCategories .split(',') .map((cat) => cat.trim()) .filter(Boolean); if (categories.length === 0) { return { result: null, itemIndex, error: new n8n_workflow_1.NodeOperationError(this.getNode(), 'No sentiment categories provided', { itemIndex, }), }; } if (returnData.length === 0) { returnData.push(...Array.from({ length: categories.length }, () => [])); } const options = this.getNodeParameter('options', itemIndex, {}); const schema = zod_1.z.object({ sentiment: zod_1.z.enum(categories), strength: zod_1.z .number() .min(0) .max(1) .describe('Strength score for sentiment in relation to the category'), confidence: zod_1.z.number().min(0).max(1), }); const structuredParser = output_parsers_1.StructuredOutputParser.fromZodSchema(schema); const parser = options.enableAutoFixing ? output_parsers_1.OutputFixingParser.fromLLM(llm, structuredParser) : structuredParser; const escapedTemplate = (options.systemPromptTemplate ?? DEFAULT_SYSTEM_PROMPT_TEMPLATE) .replace(/[{}]/g, (match) => match + match) .replaceAll('{{categories}}', '{categories}'); const systemPromptTemplate = prompts_1.SystemMessagePromptTemplate.fromTemplate(`${escapedTemplate} {format_instructions}`); const input = this.getNodeParameter('inputText', itemIndex); const inputPrompt = new messages_1.HumanMessage(input); const messages = [ await systemPromptTemplate.format({ categories: sentimentCategories, format_instructions: parser.getFormatInstructions(), }), inputPrompt, ]; const prompt = prompts_1.ChatPromptTemplate.fromMessages(messages); const chain = prompt.pipe(llm).pipe(parser).withConfig((0, tracing_1.getTracingConfig)(this)); try { const output = await chain.invoke(messages); const sentimentIndex = categories.findIndex((s) => s.toLowerCase() === output.sentiment.toLowerCase()); if (sentimentIndex !== -1) { const resultItem = { ...items[itemIndex] }; const sentimentAnalysis = { category: output.sentiment, }; if (options.includeDetailedResults) { sentimentAnalysis.strength = output.strength; sentimentAnalysis.confidence = output.confidence; } resultItem.json = { ...resultItem.json, sentimentAnalysis, }; return { result: { resultItem, sentimentIndex, }, itemIndex, }; } return { result: {}, itemIndex, }; } catch (error) { return { result: null, itemIndex, error: new n8n_workflow_1.NodeOperationError(this.getNode(), 'Error during parsing of LLM output, please check your LLM model and configuration', { itemIndex, }), }; } }); const batchResults = await Promise.all(batchPromises); batchResults.forEach(({ result, itemIndex, error }) => { if (error) { if (this.continueOnFail()) { const executionErrorData = this.helpers.constructExecutionMetaData(this.helpers.returnJsonArray({ error: error.message }), { itemData: { item: itemIndex } }); returnData[0].push(...executionErrorData); return; } else { throw error; } } else if (result.resultItem && result.sentimentIndex !== -1) { const sentimentIndex = result.sentimentIndex; const resultItem = result.resultItem; returnData[sentimentIndex].push(resultItem); } }); if (i + batchSize < items.length && delayBetweenBatches > 0) { await (0, n8n_workflow_1.sleep)(delayBetweenBatches); } } } else { for (let i = 0; i < items.length; i++) { try { const sentimentCategories = this.getNodeParameter('options.categories', i, DEFAULT_CATEGORIES); const categories = sentimentCategories .split(',') .map((cat) => cat.trim()) .filter(Boolean); if (categories.length === 0) { throw new n8n_workflow_1.NodeOperationError(this.getNode(), 'No sentiment categories provided', { itemIndex: i, }); } if (returnData.length === 0) { returnData.push(...Array.from({ length: categories.length }, () => [])); } const options = this.getNodeParameter('options', i, {}); const schema = zod_1.z.object({ sentiment: zod_1.z.enum(categories), strength: zod_1.z .number() .min(0) .max(1) .describe('Strength score for sentiment in relation to the category'), confidence: zod_1.z.number().min(0).max(1), }); const structuredParser = output_parsers_1.StructuredOutputParser.fromZodSchema(schema); const parser = options.enableAutoFixing ? output_parsers_1.OutputFixingParser.fromLLM(llm, structuredParser) : structuredParser; const escapedTemplate = (options.systemPromptTemplate ?? DEFAULT_SYSTEM_PROMPT_TEMPLATE) .replace(/[{}]/g, (match) => match + match) .replaceAll('{{categories}}', '{categories}'); const systemPromptTemplate = prompts_1.SystemMessagePromptTemplate.fromTemplate(`${escapedTemplate} {format_instructions}`); const input = this.getNodeParameter('inputText', i); const inputPrompt = new messages_1.HumanMessage(input); const messages = [ await systemPromptTemplate.format({ categories: sentimentCategories, format_instructions: parser.getFormatInstructions(), }), inputPrompt, ]; const prompt = prompts_1.ChatPromptTemplate.fromMessages(messages); const chain = prompt.pipe(llm).pipe(parser).withConfig((0, tracing_1.getTracingConfig)(this)); try { const output = await chain.invoke(messages); const sentimentIndex = categories.findIndex((s) => s.toLowerCase() === output.sentiment.toLowerCase()); if (sentimentIndex !== -1) { const resultItem = { ...items[i] }; const sentimentAnalysis = { category: output.sentiment, }; if (options.includeDetailedResults) { sentimentAnalysis.strength = output.strength; sentimentAnalysis.confidence = output.confidence; } resultItem.json = { ...resultItem.json, sentimentAnalysis, }; returnData[sentimentIndex].push(resultItem); } } catch (error) { throw new n8n_workflow_1.NodeOperationError(this.getNode(), 'Error during parsing of LLM output, please check your LLM model and configuration', { itemIndex: i, }); } } catch (error) { if (this.continueOnFail()) { const executionErrorData = this.helpers.constructExecutionMetaData(this.helpers.returnJsonArray({ error: error.message }), { itemData: { item: i } }); returnData[0].push(...executionErrorData); continue; } throw error; } } } return returnData; } } exports.SentimentAnalysis = SentimentAnalysis; //# sourceMappingURL=SentimentAnalysis.node.js.map