n8n-nodes-nim
Version:
NVIDIA NIM node for n8n
164 lines • 7.1 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.LmChatNIM = void 0;
const openai_1 = require("@langchain/openai");
const n8n_workflow_1 = require("n8n-workflow");
class LmChatNIM {
constructor() {
this.description = {
displayName: 'NVIDIA NIM Chat Model',
name: 'lmChatNIM',
icon: { light: 'file:nvidia-nim-dark.svg', dark: 'file:nvidia-nim-light.svg' },
group: ['transform'],
version: [1],
description: 'For advanced usage with an AI chain',
defaults: {
name: 'NIM Chat Model',
},
codex: {
categories: ['AI'],
subcategories: {
AI: ['Language Models', 'Root Nodes'],
'Language Models': ['Chat Models (Recommended)'],
},
resources: {
primaryDocumentation: [
{
url: 'https://build.nvidia.com/search?q=chat/',
},
],
},
},
inputs: [],
outputs: [n8n_workflow_1.NodeConnectionTypes.AiLanguageModel],
outputNames: ['Model'],
credentials: [
{
name: 'nimApi',
required: true,
},
],
requestDefaults: {
ignoreHttpStatusErrors: true,
baseURL: '={{ $credentials?.url }}',
},
properties: [
{
displayName: 'If using JSON response format, you must include word "json" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.',
name: 'notice',
type: 'notice',
default: '',
displayOptions: {
show: {
'/options.responseFormat': ['json_object'],
},
},
},
{
displayName: 'Model',
name: 'model',
type: 'string',
required: true,
default: 'meta/llama-3.1-70b-instruct',
description: 'Specify model endpoint',
noDataExpression: false,
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
description: 'Additional options to add',
type: 'collection',
default: {},
options: [
{
displayName: 'Max Retries',
name: 'maxRetries',
default: 2,
description: 'Maximum number of retries to attempt',
type: 'number',
},
{
displayName: 'Maximum Number of Tokens',
name: 'max_tokens',
default: -1,
description: 'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',
type: 'number',
typeOptions: {
maxValue: 32768,
},
},
{
displayName: 'Response Format',
name: 'responseFormat',
default: 'text',
type: 'options',
options: [
{
name: 'Text',
value: 'text',
description: 'Regular text response',
},
{
name: 'JSON',
value: 'json_object',
description: 'Enables JSON mode, which should guarantee the message the model generates is valid JSON',
},
],
},
{
displayName: 'Sampling Temperature',
name: 'temperature',
default: 0.7,
typeOptions: { maxValue: 2, minValue: 0, numberPrecision: 1 },
description: 'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
type: 'number',
},
{
displayName: 'Timeout',
name: 'timeout',
default: 360000,
description: 'Maximum amount of time a request is allowed to take in milliseconds',
type: 'number',
},
{
displayName: 'Top P',
name: 'top_p',
default: 1,
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
description: 'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',
type: 'number',
},
],
},
],
};
}
async supplyData(itemIndex) {
var _a, _b;
const credentials = await this.getCredentials('nimApi');
const modelName = this.getNodeParameter('model', 0);
const options = this.getNodeParameter('options', itemIndex, {});
const configuration = {
baseURL: credentials.url,
};
const model = new openai_1.ChatOpenAI({
apiKey: credentials.apiKey,
modelName,
...options,
timeout: (_a = options.timeout) !== null && _a !== void 0 ? _a : 60000,
maxRetries: (_b = options.maxRetries) !== null && _b !== void 0 ? _b : 2,
configuration,
modelKwargs: options.responseFormat
? {
response_format: { type: options.responseFormat },
}
: undefined,
});
return {
response: model,
};
}
}
exports.LmChatNIM = LmChatNIM;
//# sourceMappingURL=LmChatNIM.node.js.map