n8n-nodes-aimlapi
Version:
Custom n8n node for integrating with the AI/ML API platform (AIMLAPI) to interact with LLMs and multimodal AI models such as chat completion endpoints.
117 lines • 4.28 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.executeSpeechSynthesis = executeSpeechSynthesis;
const request_1 = require("../utils/request");
const object_1 = require("../utils/object");
function normalizeModelId(model) {
return model.trim().toLowerCase();
}
function isAuraModel(model) {
const normalized = normalizeModelId(model);
return normalized.startsWith('#g1_aura');
}
function isElevenLabsModel(model) {
const normalized = normalizeModelId(model);
return normalized.startsWith('elevenlabs/');
}
function isMicrosoftVallEModel(model) {
const normalized = normalizeModelId(model);
return normalized.startsWith('microsoft/');
}
function pickFirstUrl(payload) {
const directUrl = payload.audio_url ?? payload.url;
if (directUrl) {
return directUrl;
}
const dataEntries = payload.data ?? [];
return dataEntries
.map((entry) => entry.audio_url ?? entry.url)
.find((value) => Boolean(value));
}
function pickFirstBase64(payload) {
const direct = payload.audio ?? payload.base64;
if (direct) {
return direct;
}
const dataEntries = payload.data ?? [];
return dataEntries
.map((entry) => entry.b64_json ?? entry.audio)
.find((value) => Boolean(value));
}
async function executeSpeechSynthesis({ context, itemIndex, baseURL, model, }) {
const input = context.getNodeParameter('ttsInput', itemIndex);
const extract = context.getNodeParameter('ttsExtract', itemIndex);
const options = context.getNodeParameter('ttsOptions', itemIndex, {});
const requestOptions = (0, request_1.createRequestOptions)(baseURL, '/v1/tts');
const body = {
model,
};
const variant = isMicrosoftVallEModel(model)
? 'microsoft'
: isAuraModel(model)
? 'aura'
: isElevenLabsModel(model)
? 'elevenlabs'
: 'generic';
if (variant === 'microsoft') {
const scriptOverride = options.scriptOverride ?? '';
body.script = scriptOverride ? scriptOverride : input;
let speakers = options.speakers;
if (typeof speakers === 'string') {
try {
const parsed = JSON.parse(speakers);
speakers = parsed;
}
catch { }
}
if (Array.isArray(speakers)) {
body.speakers = speakers;
}
else if (speakers && typeof speakers === 'object') {
body.speakers = [speakers];
}
(0, object_1.setIfDefined)(body, 'seed', options.seed);
(0, object_1.setIfDefined)(body, 'cfg_scale', options.cfgScale);
}
else {
body.text = input;
}
const container = options.container ?? options.audioFormat;
const encoding = options.encoding;
const sampleRate = options.sampleRate;
if (container) {
body.container = container;
}
if (encoding) {
body.encoding = encoding;
}
if (sampleRate !== undefined && sampleRate !== null && sampleRate !== '') {
body.sample_rate = sampleRate;
}
if (variant === 'elevenlabs' || variant === 'generic') {
(0, object_1.setIfDefined)(body, 'voice', options.voice);
(0, object_1.setIfDefined)(body, 'output_format', options.outputFormat);
const subtitleOption = options.subtitleEnable ??
options.subtitle ??
options.subtitle_enable ??
options.enableSubtitles;
if (subtitleOption !== undefined) {
body.subtitle_enable = Boolean(subtitleOption);
}
}
requestOptions.body = body;
const response = (await context.helpers.httpRequestWithAuthentication.call(context, 'aimlApi', requestOptions));
switch (extract) {
case 'audioUrl': {
const url = pickFirstUrl(response) ?? '';
return { json: { url }, pairedItem: { item: itemIndex } };
}
case 'audioBase64': {
const base64 = pickFirstBase64(response) ?? '';
return { json: { base64 }, pairedItem: { item: itemIndex } };
}
default:
return { json: { result: response }, pairedItem: { item: itemIndex } };
}
}
//# sourceMappingURL=speechSynthesis.execute.js.map