n8n-nodes-aimlapi
Version:
Custom n8n node for integrating with the AI/ML API platform (AIMLAPI) to interact with LLMs and multimodal AI models such as chat completion endpoints.
168 lines • 5.71 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.toModelOptions = toModelOptions;
exports.getModelEndpoints = getModelEndpoints;
const request_1 = require("./request");
const OPERATION_TYPE_ALIASES = {
chatCompletion: new Set([
'chat-completion',
'chat',
'completion',
'text-generation',
'language-completion',
'responses',
]),
imageGeneration: new Set(['image', 'image-generation', 'images', 'vision']),
audioGeneration: new Set([
'audio',
'audio-generation',
'audio_generation',
'music',
'music-generation',
'music_generation',
'sound',
'sound-generation',
'sound_generation',
]),
videoGeneration: new Set(['video', 'video-generation', 'video_generation']),
speechSynthesis: new Set(['tts', 'text-to-speech', 'speech']),
speechTranscription: new Set(['stt', 'speech-to-text', 'transcription']),
embeddingGeneration: new Set(['embedding', 'embeddings']),
};
const OPERATION_CAPABILITY_ALIASES = {
chatCompletion: new Set(['chat', 'completion', 'text', 'language', 'llm']),
imageGeneration: new Set(['image', 'images', 'image-generation', 'vision']),
audioGeneration: new Set([
'audio',
'audio-generation',
'audio_generation',
'music',
'music-generation',
'music_generation',
'sound',
'sound-generation',
'sound_generation',
]),
videoGeneration: new Set(['video', 'video-generation', 'video_generation']),
speechSynthesis: new Set(['tts', 'speech', 'voice']),
speechTranscription: new Set(['stt', 'speech-to-text', 'transcription']),
embeddingGeneration: new Set(['embedding', 'vector']),
};
const TRAILING_WHITESPACE_REGEX = /\s+/g;
function normalize(value) {
if (typeof value !== 'string') {
return '';
}
return value.trim().replace(TRAILING_WHITESPACE_REGEX, ' ').toLowerCase();
}
function extractCapabilities(model) {
const capabilities = model.capabilities;
if (Array.isArray(capabilities)) {
return new Set(capabilities.map(normalize).filter(Boolean));
}
if (capabilities && typeof capabilities === 'object') {
return new Set(Object.entries(capabilities)
.filter(([, supported]) => Boolean(supported))
.map(([key]) => normalize(key))
.filter(Boolean));
}
return new Set();
}
function hasAnyCapability(capabilities, supported) {
if (!supported) {
return false;
}
for (const capability of capabilities) {
if (supported.has(capability)) {
return true;
}
}
return false;
}
function isTypeClaimedByOtherOperation(type, operation) {
return Object.entries(OPERATION_TYPE_ALIASES).some(([key, set]) => key !== operation && set.has(type));
}
function supportsOperation(model, operation) {
const type = normalize(model.type);
const capabilities = extractCapabilities(model);
const supportedTypes = OPERATION_TYPE_ALIASES[operation];
const supportedCapabilities = OPERATION_CAPABILITY_ALIASES[operation];
if (type && supportedTypes.has(type)) {
return true;
}
if (hasAnyCapability(capabilities, supportedCapabilities)) {
return true;
}
if (operation === 'chatCompletion') {
if (!type && capabilities.size === 0) {
return true;
}
if (type && !isTypeClaimedByOtherOperation(type, operation)) {
return true;
}
}
return false;
}
function toModelOptions(models, operation) {
if (!Array.isArray(models)) {
return [];
}
return models
.filter((model) => Boolean(model) && typeof model === 'object')
.filter((model) => supportsOperation(model, operation))
.map((model) => {
const info = model.info ?? {};
const name = (typeof info.name === 'string' && info.name) ||
(typeof model.name === 'string' && model.name) ||
(typeof model.id === 'string' && model.id) ||
'';
const value = typeof model.id === 'string' ? model.id : '';
return {
name,
value,
};
})
.filter((option) => option.value !== '');
}
const MODEL_ENDPOINT_CACHE = new WeakMap();
function toStringArray(value) {
if (Array.isArray(value)) {
return value.filter((entry) => typeof entry === 'string');
}
return [];
}
function ensureEndpointCache(context) {
let cache = MODEL_ENDPOINT_CACHE.get(context);
if (!cache) {
cache = new Map();
MODEL_ENDPOINT_CACHE.set(context, cache);
}
return cache;
}
async function getModelEndpoints(context, baseURL, modelId) {
const cache = ensureEndpointCache(context);
if (cache.has(modelId)) {
return cache.get(modelId) ?? [];
}
try {
const requestOptions = (0, request_1.createRequestOptions)(baseURL, '/models', 'GET');
const response = (await context.helpers.httpRequestWithAuthentication.call(context, 'aimlApi', requestOptions));
const models = (response.models ?? response.data) ?? [];
for (const model of models) {
if (!model || typeof model !== 'object') {
continue;
}
const id = typeof model.id === 'string' ? model.id : undefined;
if (!id || cache.has(id)) {
continue;
}
cache.set(id, toStringArray(model.endpoints));
}
}
catch (error) {
cache.set(modelId, []);
return [];
}
return cache.get(modelId) ?? [];
}
//# sourceMappingURL=models.js.map