UNPKG

n8n-nodes-sap-ai-core

Version:

n8n nodes for SAP AI Core LLM and embeddings integration

405 lines 21.8 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.logWrapper = logWrapper; exports.createSapAiCoreLogger = createSapAiCoreLogger; exports.withPerformanceLogging = withPerformanceLogging; const n8n_workflow_1 = require("n8n-workflow"); const helpers_1 = require("./helpers"); // Import embeddings and other LangChain types // Note: Adjust imports based on your LangChain version const embeddings_1 = require("@langchain/core/embeddings"); const retrievers_1 = require("@langchain/core/retrievers"); const vectorstores_1 = require("@langchain/core/vectorstores"); const textsplitters_1 = require("@langchain/textsplitters"); /** * Call method asynchronously with error handling */ async function callMethodAsync(parameters) { const { method, arguments: args, executeFunctions, connectionType, currentNodeRunIndex, target } = parameters; try { // Use target as 'this' context if provided, otherwise use null return await method.apply(target || null, args); } catch (e) { const connectedNode = executeFunctions.getNode(); const error = new n8n_workflow_1.NodeOperationError(connectedNode, e, { functionality: 'configuration-node' }); if ('addOutputData' in executeFunctions) { executeFunctions.addOutputData(connectionType, currentNodeRunIndex, error); } if (error.message) { if (!error.description) { error.description = error.message; } throw error; } throw new n8n_workflow_1.NodeOperationError(connectedNode, `Error on node "${connectedNode.name}" which is connected via input "${connectionType}"`); } } /** * Call method synchronously with error handling */ function callMethodSync(parameters) { const { method, arguments: args, executeFunctions, connectionType, currentNodeRunIndex } = parameters; try { return method.apply(null, args); } catch (e) { const connectedNode = executeFunctions.getNode(); const error = new n8n_workflow_1.NodeOperationError(connectedNode, e); if ('addOutputData' in executeFunctions) { executeFunctions.addOutputData(connectionType, currentNodeRunIndex, error); } throw new n8n_workflow_1.NodeOperationError(connectedNode, `Error on node "${connectedNode.name}" which is connected via input "${connectionType}"`); } } /** * Enhanced log wrapper with comprehensive monitoring */ function logWrapper(originalInstance, executeFunctions) { return new Proxy(originalInstance, { get: (target, prop) => { let connectionType; // Memory handling if ((0, helpers_1.isBaseChatMemory)(originalInstance)) { if (prop === 'loadMemoryVariables' && 'loadMemoryVariables' in target) { return async (values) => { var _a; connectionType = "ai_memory" /* NodeConnectionType.AiMemory */; if ('addInputData' in executeFunctions) { const { index } = executeFunctions.addInputData(connectionType, [ [{ json: { action: 'loadMemoryVariables', values } }] ]); const response = await callMethodAsync.call(target, { executeFunctions: executeFunctions, connectionType, currentNodeRunIndex: index, method: target[prop], arguments: [values] }); const chatHistory = (_a = response === null || response === void 0 ? void 0 : response.chat_history) !== null && _a !== void 0 ? _a : response; executeFunctions.addOutputData(connectionType, index, [ [{ json: { action: 'loadMemoryVariables', chatHistory } }] ]); return response; } // Fallback without logging return await target[prop].call(target, values); }; } else if (prop === 'saveContext' && 'saveContext' in target) { return async (input, output) => { connectionType = "ai_memory" /* NodeConnectionType.AiMemory */; if ('addInputData' in executeFunctions) { const { index } = executeFunctions.addInputData(connectionType, [ [{ json: { action: 'saveContext', input, output } }] ]); const response = await callMethodAsync.call(target, { executeFunctions: executeFunctions, connectionType, currentNodeRunIndex: index, method: target[prop], arguments: [input, output] }); const chatHistory = await target.chatHistory.getMessages(); executeFunctions.addOutputData(connectionType, index, [ [{ json: { action: 'saveContext', chatHistory } }] ]); return response; } // Fallback without logging return await target[prop].call(target, input, output); }; } } // Chat message history handling if ((0, helpers_1.isBaseChatMessageHistory)(originalInstance)) { if (prop === 'getMessages' && 'getMessages' in target) { return async () => { connectionType = "ai_memory" /* NodeConnectionType.AiMemory */; if ('addInputData' in executeFunctions) { const { index } = executeFunctions.addInputData(connectionType, [ [{ json: { action: 'getMessages' } }] ]); const response = await callMethodAsync.call(target, { executeFunctions: executeFunctions, connectionType, currentNodeRunIndex: index, method: target[prop], arguments: [] }); const payload = { action: 'getMessages', response }; executeFunctions.addOutputData(connectionType, index, [[{ json: payload }]]); (0, helpers_1.logAiEvent)(executeFunctions, 'ai-messages-retrieved-from-memory', { response }); return response; } // Fallback without logging return await target[prop].call(target); }; } else if (prop === 'addMessage' && 'addMessage' in target) { return async (message) => { connectionType = "ai_memory" /* NodeConnectionType.AiMemory */; const payload = { action: 'addMessage', message }; if ('addInputData' in executeFunctions) { const { index } = executeFunctions.addInputData(connectionType, [[{ json: payload }]]); await callMethodAsync.call(target, { executeFunctions: executeFunctions, connectionType, currentNodeRunIndex: index, method: target[prop], arguments: [message] }); (0, helpers_1.logAiEvent)(executeFunctions, 'ai-message-added-to-memory', { message }); executeFunctions.addOutputData(connectionType, index, [[{ json: payload }]]); } else { // Fallback without logging await target[prop].call(target, message); } }; } } // Retriever handling if (originalInstance instanceof retrievers_1.BaseRetriever) { if (prop === 'getRelevantDocuments' && 'getRelevantDocuments' in target) { return async (query, config) => { var _a, _b, _c, _d; connectionType = "ai_retriever" /* NodeConnectionType.AiRetriever */; if ('addInputData' in executeFunctions) { const { index } = executeFunctions.addInputData(connectionType, [ [{ json: { query, config } }] ]); const response = await callMethodAsync.call(target, { executeFunctions: executeFunctions, connectionType, currentNodeRunIndex: index, method: target[prop], arguments: [query, config] }); const executionId = (_b = (_a = response[0]) === null || _a === void 0 ? void 0 : _a.metadata) === null || _b === void 0 ? void 0 : _b.executionId; const workflowId = (_d = (_c = response[0]) === null || _c === void 0 ? void 0 : _c.metadata) === null || _d === void 0 ? void 0 : _d.workflowId; const metadata = {}; if (executionId && workflowId) { metadata.subExecution = { executionId, workflowId }; } (0, helpers_1.logAiEvent)(executeFunctions, 'ai-documents-retrieved', { query }); executeFunctions.addOutputData(connectionType, index, [[{ json: { response } }]], metadata); return response; } // Fallback without logging return await target[prop].call(target, query, config); }; } } // Embeddings handling if (originalInstance instanceof embeddings_1.Embeddings) { if (prop === 'embedDocuments' && 'embedDocuments' in target) { return async (documents) => { connectionType = "ai_embedding" /* NodeConnectionType.AiEmbedding */; if ('addInputData' in executeFunctions) { const { index } = executeFunctions.addInputData(connectionType, [ [{ json: { documents } }] ]); const response = await callMethodAsync({ executeFunctions: executeFunctions, connectionType, currentNodeRunIndex: index, method: target[prop], arguments: [documents], target: target }); (0, helpers_1.logAiEvent)(executeFunctions, 'ai-document-embedded'); executeFunctions.addOutputData(connectionType, index, [[{ json: { response } }]]); return response; } // Fallback without logging return await target[prop].call(target, documents); }; } if (prop === 'embedQuery' && 'embedQuery' in target) { return async (query) => { connectionType = "ai_embedding" /* NodeConnectionType.AiEmbedding */; if ('addInputData' in executeFunctions) { const { index } = executeFunctions.addInputData(connectionType, [ [{ json: { query } }] ]); const response = await callMethodAsync({ executeFunctions: executeFunctions, connectionType, currentNodeRunIndex: index, method: target[prop], arguments: [query], target: target }); (0, helpers_1.logAiEvent)(executeFunctions, 'ai-query-embedded'); executeFunctions.addOutputData(connectionType, index, [[{ json: { response } }]]); return response; } // Fallback without logging return await target[prop].call(target, query); }; } } // Document compressor handling if ('compressDocuments' in originalInstance) { if (prop === 'compressDocuments' && 'compressDocuments' in target) { return async (documents, query) => { // Use a custom connection type since AiReranker might not exist connectionType = 'ai-document-compressor'; if ('addInputData' in executeFunctions) { const { index } = executeFunctions.addInputData(connectionType, [ [{ json: { query, documents } }] ]); const response = await callMethodAsync.call(target, { executeFunctions: executeFunctions, connectionType, currentNodeRunIndex: index, method: target[prop], arguments: [(0, n8n_workflow_1.deepCopy)(documents), query] }); (0, helpers_1.logAiEvent)(executeFunctions, 'ai-document-reranked', { query }); executeFunctions.addOutputData(connectionType, index, [[{ json: { response } }]]); return response; } // Fallback without logging return await target[prop].call(target, (0, n8n_workflow_1.deepCopy)(documents), query); }; } } // Text splitter handling if (originalInstance instanceof textsplitters_1.TextSplitter) { if (prop === 'splitText' && 'splitText' in target) { return async (text) => { connectionType = "ai_textSplitter" /* NodeConnectionType.AiTextSplitter */; if ('addInputData' in executeFunctions) { const { index } = executeFunctions.addInputData(connectionType, [ [{ json: { textSplitter: text } }] ]); const response = await callMethodAsync.call(target, { executeFunctions: executeFunctions, connectionType, currentNodeRunIndex: index, method: target[prop], arguments: [text] }); (0, helpers_1.logAiEvent)(executeFunctions, 'ai-text-split'); executeFunctions.addOutputData(connectionType, index, [[{ json: { response } }]]); return response; } // Fallback without logging return await target[prop].call(target, text); }; } } // Tool execution handling if ((0, helpers_1.isToolsInstance)(originalInstance)) { if (prop === '_call' && '_call' in target) { return async (query) => { var _a; connectionType = "ai_tool" /* NodeConnectionType.AiTool */; const inputData = { query }; if ((_a = target.metadata) === null || _a === void 0 ? void 0 : _a.isFromToolkit) { inputData.tool = { name: target.name, description: target.description }; } if ('addInputData' in executeFunctions) { const { index } = executeFunctions.addInputData(connectionType, [ [{ json: inputData }] ]); const response = await callMethodAsync.call(target, { executeFunctions: executeFunctions, connectionType, currentNodeRunIndex: index, method: target[prop], arguments: [query] }); (0, helpers_1.logAiEvent)(executeFunctions, 'ai-tool-called', { ...inputData, response }); executeFunctions.addOutputData(connectionType, index, [[{ json: { response } }]]); return typeof response === 'string' ? response : JSON.stringify(response); } // Fallback without logging const response = await target[prop].call(target, query); return typeof response === 'string' ? response : JSON.stringify(response); }; } } // Vector store handling if (originalInstance instanceof vectorstores_1.VectorStore) { if (prop === 'similaritySearch' && 'similaritySearch' in target) { return async (query, k, filter, _callbacks) => { connectionType = "ai_vectorStore" /* NodeConnectionType.AiVectorStore */; if ('addInputData' in executeFunctions) { const { index } = executeFunctions.addInputData(connectionType, [ [{ json: { query, k, filter } }] ]); const response = await callMethodAsync.call(target, { executeFunctions: executeFunctions, connectionType, currentNodeRunIndex: index, method: target[prop], arguments: [query, k, filter, _callbacks] }); (0, helpers_1.logAiEvent)(executeFunctions, 'ai-vector-store-searched', { query }); executeFunctions.addOutputData(connectionType, index, [[{ json: { response } }]]); return response; } // Fallback without logging return await target[prop].call(target, query, k, filter, _callbacks); }; } } return target[prop]; } }); } /** * Create a logging wrapper specifically for SAP AI Core components */ function createSapAiCoreLogger(component, context) { return logWrapper(component, context); } /** * Performance logging utility for SAP AI Core */ function withPerformanceLogging(fn, context, operationName) { return ((...args) => { const startTime = Date.now(); const nodeName = context.getNode().name; try { const result = fn(...args); // Handle promises if (result && typeof result.then === 'function') { return result.finally(() => { const duration = Date.now() - startTime; (0, helpers_1.logAiEvent)(context, 'sap-ai-core-performance', { operation: operationName, duration, node: nodeName }); }); } const duration = Date.now() - startTime; (0, helpers_1.logAiEvent)(context, 'sap-ai-core-performance', { operation: operationName, duration, node: nodeName }); return result; } catch (error) { const duration = Date.now() - startTime; (0, helpers_1.logAiEvent)(context, 'sap-ai-core-error', { operation: operationName, duration, error: error instanceof Error ? error.message : String(error), node: nodeName }); throw error; } }); } //# sourceMappingURL=logWrapper.js.map