n8n-nodes-aimlapi
Version:
Custom n8n node for integrating with the AI/ML API platform (AIMLAPI) to interact with LLMs and multimodal AI models such as chat completion endpoints.
165 lines • 6.61 kB
JavaScript
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.executeChatCompletion = executeChatCompletion;
const n8n_workflow_1 = require("n8n-workflow");
const request_1 = require("../utils/request");
const object_1 = require("../utils/object");
function collectTextSegments(value, segments) {
if (typeof value === 'string') {
const trimmed = value.trim();
if (trimmed) {
segments.push(trimmed);
}
return;
}
if (!value || typeof value !== 'object') {
return;
}
if (Array.isArray(value)) {
for (const entry of value) {
collectTextSegments(entry, segments);
}
return;
}
const objectValue = value;
const type = typeof objectValue.type === 'string' ? objectValue.type.toLowerCase() : '';
if (type.includes('audio')) {
return;
}
const candidates = [
objectValue.text,
objectValue.content,
objectValue.value,
objectValue.output_text,
];
for (const candidate of candidates) {
if (typeof candidate === 'string') {
const trimmed = candidate.trim();
if (trimmed) {
segments.push(trimmed);
}
}
}
const nestedCollections = [objectValue.content, objectValue.parts, objectValue.messages];
for (const collection of nestedCollections) {
if (Array.isArray(collection)) {
for (const entry of collection) {
collectTextSegments(entry, segments);
}
}
}
}
function extractTextFromContent(content) {
const segments = [];
collectTextSegments(content, segments);
return segments.join('\n');
}
async function executeChatCompletion({ context, itemIndex, baseURL, model, }) {
const useStructuredMessages = context.getNodeParameter('useStructuredMessages', itemIndex, false);
const extract = context.getNodeParameter('extract', itemIndex);
const options = context.getNodeParameter('options', itemIndex, {});
const requestOptions = (0, request_1.createRequestOptions)(baseURL, '/v1/chat/completions');
const body = { model };
const messages = [];
if (useStructuredMessages) {
const messagesUi = context.getNodeParameter('messagesUi', itemIndex, {});
const structuredMessages = messagesUi.message ?? [];
for (const entry of structuredMessages) {
const rawRoleSelection = typeof entry.role === 'string' ? entry.role.trim() : '';
const normalizedSelection = rawRoleSelection.toLowerCase();
let role;
if (normalizedSelection === 'custom') {
const customRoleRaw = typeof entry.customRole === 'string' ? entry.customRole.trim() : '';
role = customRoleRaw !== '' ? customRoleRaw : 'user';
}
else if (rawRoleSelection !== '') {
role = rawRoleSelection;
}
else {
role = 'user';
}
const normalizedRole = role.toLowerCase();
const rawContent = typeof entry.content === 'string' ? entry.content : '';
const content = rawContent.trim();
if (!content) {
continue;
}
const message = {
role,
content: rawContent,
};
if (normalizedRole === 'tool') {
const toolCallId = typeof entry.tool_call_id === 'string' ? entry.tool_call_id.trim() : '';
if (!toolCallId) {
throw new n8n_workflow_1.NodeOperationError(context.getNode(), 'Tool messages must include the Tool Call ID returned with the assistant tool invocation.');
}
message.tool_call_id = toolCallId;
}
messages.push(message);
}
if (messages.length === 0) {
throw new n8n_workflow_1.NodeOperationError(context.getNode(), 'At least one message with content is required when using the message list.');
}
}
else {
const prompt = context.getNodeParameter('prompt', itemIndex);
messages.push({
role: 'user',
content: prompt,
});
}
body.messages = messages;
(0, object_1.setIfDefined)(body, 'temperature', options.temperature);
(0, object_1.setIfDefined)(body, 'top_p', options.topP);
(0, object_1.setIfDefined)(body, 'max_tokens', options.maxTokens);
(0, object_1.setIfDefined)(body, 'frequency_penalty', options.frequencyPenalty);
(0, object_1.setIfDefined)(body, 'presence_penalty', options.presencePenalty);
if (options.responseFormat === 'text') {
body.response_format = { type: 'text' };
}
const audioOutput = options.audioOutput === true;
const audioVoice = typeof options.audioVoice === 'string' && options.audioVoice.trim() !== ''
? options.audioVoice
: undefined;
const audioFormat = typeof options.audioFormat === 'string' && options.audioFormat.trim() !== ''
? options.audioFormat
: undefined;
if (audioOutput) {
body.modalities = ['text', 'audio'];
const audioConfig = {
voice: audioVoice ?? 'alloy',
};
if (audioFormat) {
audioConfig.format = audioFormat;
}
body.audio = audioConfig;
}
requestOptions.body = body;
const response = (await context.helpers.httpRequestWithAuthentication.call(context, 'aimlApi', requestOptions));
const choices = response.choices ?? [];
switch (extract) {
case 'text': {
const firstMessage = choices[0]?.message ?? {};
let content = extractTextFromContent(firstMessage.content);
if (!content) {
const firstChoice = choices[0] ?? {};
if (typeof firstChoice.text === 'string') {
content = firstChoice.text;
}
}
return { json: { content }, pairedItem: { item: itemIndex } };
}
case 'messages': {
const messages = choices
.map((choice) => choice.message)
.filter((message) => Boolean(message));
return { json: { result: messages }, pairedItem: { item: itemIndex } };
}
case 'choices': {
return { json: { result: choices }, pairedItem: { item: itemIndex } };
}
default:
return { json: { result: response }, pairedItem: { item: itemIndex } };
}
}
//# sourceMappingURL=chatCompletion.execute.js.map