@n8n/n8n-nodes-langchain
Version:

143 lines • 6.38 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.LmChatGroq = void 0;
const groq_1 = require("@langchain/groq");
const ai_utilities_1 = require("@n8n/ai-utilities");
const n8n_workflow_1 = require("n8n-workflow");
class LmChatGroq {
constructor() {
this.description = {
displayName: 'Groq Chat Model',
name: 'lmChatGroq',
icon: 'file:groq.svg',
group: ['transform'],
version: 1,
description: 'Language Model Groq',
defaults: {
name: 'Groq Chat Model',
},
codex: {
categories: ['AI'],
subcategories: {
AI: ['Language Models', 'Root Nodes'],
'Language Models': ['Chat Models (Recommended)'],
},
resources: {
primaryDocumentation: [
{
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgroq/',
},
],
},
},
inputs: [],
outputs: [n8n_workflow_1.NodeConnectionTypes.AiLanguageModel],
outputNames: ['Model'],
credentials: [
{
name: 'groqApi',
required: true,
},
],
requestDefaults: {
baseURL: 'https://api.groq.com/openai/v1',
},
properties: [
(0, ai_utilities_1.getConnectionHintNoticeField)([n8n_workflow_1.NodeConnectionTypes.AiChain, n8n_workflow_1.NodeConnectionTypes.AiChain]),
{
displayName: 'Model',
name: 'model',
type: 'options',
typeOptions: {
loadOptions: {
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'filter',
properties: {
pass: '={{ $responseItem.active === true && $responseItem.object === "model" }}',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
],
},
},
},
},
routing: {
send: {
type: 'body',
property: 'model',
},
},
description: 'The model which will generate the completion. <a href="https://console.groq.com/docs/models">Learn more</a>.',
default: 'llama3-8b-8192',
builderHint: {
message: 'Default to a flagship model on Groq (openai/gpt-oss-120b, llama-3.3-70b-versatile, or moonshotai/kimi-k2-instruct-0905). Avoid the legacy llama3-8b-8192 default and older llama3/llama-2 variants.',
},
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
description: 'Additional options to add',
type: 'collection',
default: {},
options: [
{
displayName: 'Maximum Number of Tokens',
name: 'maxTokensToSample',
default: 4096,
description: 'The maximum number of tokens to generate in the completion',
type: 'number',
},
{
displayName: 'Sampling Temperature',
name: 'temperature',
default: 0.7,
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
description: 'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
type: 'number',
},
],
},
],
};
}
async supplyData(itemIndex) {
const credentials = await this.getCredentials('groqApi');
const modelName = this.getNodeParameter('model', itemIndex);
const options = this.getNodeParameter('options', itemIndex, {});
const model = new groq_1.ChatGroq({
apiKey: credentials.apiKey,
model: modelName,
maxTokens: options.maxTokensToSample,
temperature: options.temperature,
callbacks: [new ai_utilities_1.N8nLlmTracing(this)],
httpAgent: (0, ai_utilities_1.getProxyAgent)('https://api.groq.com/openai/v1'),
onFailedAttempt: (0, ai_utilities_1.makeN8nLlmFailedAttemptHandler)(this),
});
return {
response: model,
};
}
}
exports.LmChatGroq = LmChatGroq;
//# sourceMappingURL=LmChatGroq.node.js.map