UNPKG

n8n-nodes-aimlapi

Version:

Custom n8n node for integrating with the AI/ML API platform (AIMLAPI) to interact with LLMs and multimodal AI models such as chat completion endpoints.

199 lines 9.15 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.AimlApi = void 0; const n8n_workflow_1 = require("n8n-workflow"); const chatCompletion_description_1 = require("./operations/chatCompletion.description"); const imageGeneration_description_1 = require("./operations/imageGeneration.description"); const audioGeneration_description_1 = require("./operations/audioGeneration.description"); const videoGeneration_description_1 = require("./operations/videoGeneration.description"); const speechSynthesis_description_1 = require("./operations/speechSynthesis.description"); const speechTranscription_description_1 = require("./operations/speechTranscription.description"); const embeddingGeneration_description_1 = require("./operations/embeddingGeneration.description"); const chatCompletion_execute_1 = require("./operations/chatCompletion.execute"); const imageGeneration_execute_1 = require("./operations/imageGeneration.execute"); const audioGeneration_execute_1 = require("./operations/audioGeneration.execute"); const videoGeneration_execute_1 = require("./operations/videoGeneration.execute"); const speechSynthesis_execute_1 = require("./operations/speechSynthesis.execute"); const speechTranscription_execute_1 = require("./operations/speechTranscription.execute"); const embeddingGeneration_execute_1 = require("./operations/embeddingGeneration.execute"); const models_1 = require("./utils/models"); const operationLabels = { chatCompletion: 'Chat Completion', imageGeneration: 'Image Generation', audioGeneration: 'Audio Generation', videoGeneration: 'Video Generation', speechSynthesis: 'Speech Synthesis', speechTranscription: 'Speech Transcription', embeddingGeneration: 'Embedding Generation', }; const operationExecutors = { chatCompletion: chatCompletion_execute_1.executeChatCompletion, imageGeneration: imageGeneration_execute_1.executeImageGeneration, audioGeneration: audioGeneration_execute_1.executeAudioGeneration, videoGeneration: videoGeneration_execute_1.executeVideoGeneration, speechSynthesis: speechSynthesis_execute_1.executeSpeechSynthesis, speechTranscription: speechTranscription_execute_1.executeSpeechTranscription, embeddingGeneration: embeddingGeneration_execute_1.executeEmbeddingGeneration, }; const operationSpecificProperties = [ ...chatCompletion_description_1.chatCompletionProperties, ...imageGeneration_description_1.imageGenerationProperties, ...audioGeneration_description_1.audioGenerationProperties, ...videoGeneration_description_1.videoGenerationProperties, ...speechSynthesis_description_1.speechSynthesisProperties, ...speechTranscription_description_1.speechTranscriptionProperties, ...embeddingGeneration_description_1.embeddingGenerationProperties, ]; const baseProperties = [ { displayName: 'Operation', name: 'operation', type: 'options', noDataExpression: true, options: [ { name: operationLabels.chatCompletion, value: 'chatCompletion', action: 'Generate text using chat completion models', description: 'Generate text using chat completion models', }, { name: operationLabels.imageGeneration, value: 'imageGeneration', action: 'Generate images from text prompts', description: 'Generate images from text prompts', }, { name: operationLabels.audioGeneration, value: 'audioGeneration', action: 'Generate music or sound effects from text prompts', description: 'Generate music or sound effects from text prompts', }, { name: operationLabels.videoGeneration, value: 'videoGeneration', action: 'Generate videos from prompts or reference media', description: 'Generate videos from prompts or reference media', }, { name: operationLabels.speechSynthesis, value: 'speechSynthesis', action: 'Convert text into speech audio', description: 'Convert text into speech audio', }, { name: operationLabels.speechTranscription, value: 'speechTranscription', action: 'Transcribe audio files into text', description: 'Transcribe audio files into text', }, { name: operationLabels.embeddingGeneration, value: 'embeddingGeneration', action: 'Create vector embeddings from text', description: 'Create vector embeddings from text', }, ], default: 'chatCompletion', }, { displayName: 'Model Name or ID', name: 'model', type: 'options', typeOptions: { loadOptionsMethod: 'getModels', loadOptionsDependsOn: ['operation'], }, default: '', required: true, description: 'Choose model or specify an ID using an expression. Choose from the list, or specify an ID using an <a href="https://docs.n8n.io/code/expressions/">expression</a>.', }, ]; class AimlApi { constructor() { this.description = { displayName: 'AI/ML API', name: 'aimlApi', icon: 'file:aimlapi.svg', group: ['transform'], version: 1, subtitle: '={{ ({ chatCompletion: "Chat Completion", imageGeneration: "Image Generation", audioGeneration: "Audio Generation", videoGeneration: "Video Generation", speechSynthesis: "Speech Synthesis", speechTranscription: "Speech Transcription", embeddingGeneration: "Embedding Generation" })[$parameter["operation"]] }}', description: 'Choose from 300+ AI models, from Gemini and ChatGPT to DeepSeek and Llama.', defaults: { name: 'AI/ML API', }, inputs: ["main"], outputs: ["main"], credentials: [ { name: 'aimlApi', required: true, }, ], requestDefaults: { baseURL: '={{ $credentials.url.endsWith("/") ? $credentials.url.slice(0, -1) : $credentials.url }}', headers: { 'Content-Type': 'application/json', 'X-Title': `n8n AIMLAPI Node`, }, }, properties: [...baseProperties, ...operationSpecificProperties], }; this.methods = { loadOptions: { async getModels() { const credentials = await this.getCredentials('aimlApi'); const apiUrl = credentials.url; const endpoint = apiUrl.endsWith('/') ? `${apiUrl}models` : `${apiUrl}/models`; const response = await this.helpers.httpRequestWithAuthentication.call(this, 'aimlApi', { method: 'GET', url: endpoint, json: true, }); const models = response?.models ?? response?.data ?? response; const operation = this.getCurrentNodeParameter('operation') ?? 'chatCompletion'; return (0, models_1.toModelOptions)(models, operation); }, }, }; } async execute() { const items = this.getInputData(); const returnItems = []; const credentials = await this.getCredentials('aimlApi'); const rawBaseUrl = credentials.url ?? ''; const baseURL = rawBaseUrl.endsWith('/') ? rawBaseUrl.slice(0, -1) : rawBaseUrl; for (let itemIndex = 0; itemIndex < items.length; itemIndex++) { try { const operation = this.getNodeParameter('operation', itemIndex); const model = this.getNodeParameter('model', itemIndex); const context = { context: this, itemIndex, baseURL, model, }; const executor = operationExecutors[operation]; if (!executor) { throw new n8n_workflow_1.NodeOperationError(this.getNode(), `Unsupported operation: ${operation}`); } const result = await executor(context); returnItems.push(result); } catch (error) { if (this.continueOnFail()) { returnItems.push({ json: { error: error instanceof Error ? error.message : error, }, }); continue; } throw error; } } return [returnItems]; } } exports.AimlApi = AimlApi; //# sourceMappingURL=AimlApi.node.js.map