UNPKG

n8n-nodes-aimlapi

Version:

Custom n8n node for integrating with the AI/ML API platform (AIMLAPI) to interact with LLMs and multimodal AI models such as chat completion endpoints.

72 lines 3.37 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.executeAudioGeneration = executeAudioGeneration; const request_1 = require("../utils/request"); const object_1 = require("../utils/object"); const models_1 = require("../utils/models"); const generation_1 = require("../utils/generation"); function resolveAudioUrl(entry) { return (entry.url || entry.audio_url || entry.audioUrl); } function resolveAudioBase64(entry) { return (entry.b64_json || entry.base64 || entry.audio_base64 || entry.audio || entry.data); } async function executeAudioGeneration({ context, itemIndex, baseURL, model, }) { const prompt = context.getNodeParameter('audioPrompt', itemIndex); const extract = context.getNodeParameter('audioExtract', itemIndex); const options = context.getNodeParameter('audioOptions', itemIndex, {}); const endpoints = await (0, models_1.getModelEndpoints)(context, baseURL, model); const generationPath = endpoints.find((endpoint) => endpoint.includes('/v2/generate/audio')) ?? '/v2/generate/audio'; const requestOptions = (0, request_1.createRequestOptions)(baseURL, generationPath); const body = { model, prompt, }; (0, object_1.setIfDefined)(body, 'mode', options.mode); (0, object_1.setIfDefined)(body, 'duration', options.duration); (0, object_1.setIfDefined)(body, 'audio_format', options.audioFormat); (0, object_1.setIfDefined)(body, 'cfg_scale', options.cfgScale); (0, object_1.setIfDefined)(body, 'seed', options.seed); (0, object_1.setIfDefined)(body, 'negative_prompt', options.negativePrompt); (0, object_1.setIfDefined)(body, 'instrument', options.instrument); (0, object_1.setIfDefined)(body, 'reference_audio_url', options.referenceAudioUrl); (0, object_1.setIfDefined)(body, 'prompt_strength', options.promptStrength); requestOptions.body = body; const initialResponse = (await context.helpers.httpRequestWithAuthentication.call(context, 'aimlApi', requestOptions)); const rawResponse = await (0, generation_1.resolveGenerationResponse)(context, baseURL, generationPath, initialResponse, { mediaType: 'audio', }); const data = (0, generation_1.extractAudioOutputs)(rawResponse); switch (extract) { case 'firstUrl': { const url = data.map(resolveAudioUrl).find((value) => Boolean(value)) ?? ''; return { json: { url }, pairedItem: { item: itemIndex } }; } case 'allUrls': { const urls = data.map(resolveAudioUrl).filter((value) => Boolean(value)); return { json: { urls }, pairedItem: { item: itemIndex } }; } case 'firstBase64': { const base64 = data.map(resolveAudioBase64).find((value) => Boolean(value)) ?? ''; return { json: { base64 }, pairedItem: { item: itemIndex } }; } case 'allBase64': { const base64 = data .map(resolveAudioBase64) .filter((value) => Boolean(value)); return { json: { base64 }, pairedItem: { item: itemIndex } }; } default: return { json: { result: rawResponse }, pairedItem: { item: itemIndex }, }; } } //# sourceMappingURL=audioGeneration.execute.js.map