@n8n/n8n-nodes-langchain
Version:

255 lines • 9.32 kB
JavaScript
;
var __defProp = Object.defineProperty;
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
var __getOwnPropNames = Object.getOwnPropertyNames;
var __hasOwnProp = Object.prototype.hasOwnProperty;
var __export = (target, all) => {
for (var name in all)
__defProp(target, name, { get: all[name], enumerable: true });
};
var __copyProps = (to, from, except, desc) => {
if (from && typeof from === "object" || typeof from === "function") {
for (let key of __getOwnPropNames(from))
if (!__hasOwnProp.call(to, key) && key !== except)
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
}
return to;
};
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
var LmOpenAi_node_exports = {};
__export(LmOpenAi_node_exports, {
LmOpenAi: () => LmOpenAi
});
module.exports = __toCommonJS(LmOpenAi_node_exports);
var import_openai = require("@langchain/openai");
var import_n8n_workflow = require("n8n-workflow");
var import_httpProxyAgent = require("../../../utils/httpProxyAgent");
var import_di = require("@n8n/di");
var import_config = require("@n8n/config");
var import_n8nLlmFailedAttemptHandler = require("../n8nLlmFailedAttemptHandler");
var import_N8nLlmTracing = require("../N8nLlmTracing");
class LmOpenAi {
constructor() {
this.description = {
displayName: "OpenAI Model",
name: "lmOpenAi",
hidden: true,
icon: { light: "file:openAiLight.svg", dark: "file:openAiLight.dark.svg" },
group: ["transform"],
version: 1,
description: "For advanced usage with an AI chain",
defaults: {
name: "OpenAI Model"
},
codex: {
categories: ["AI"],
subcategories: {
AI: ["Language Models", "Root Nodes"],
"Language Models": ["Text Completion Models"]
},
resources: {
primaryDocumentation: [
{
url: "https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmopenai/"
}
]
}
},
inputs: [],
outputs: [import_n8n_workflow.NodeConnectionTypes.AiLanguageModel],
outputNames: ["Model"],
credentials: [
{
name: "openAiApi",
required: true
}
],
requestDefaults: {
ignoreHttpStatusErrors: true,
baseURL: '={{ $parameter.options?.baseURL?.split("/").slice(0,-1).join("/") || "https://api.openai.com" }}'
},
properties: [
{
displayName: "This node is using OpenAI completions which are now deprecated. Please use the OpenAI Chat Model node instead.",
name: "deprecated",
type: "notice",
default: ""
},
{
displayName: "Model",
name: "model",
type: "resourceLocator",
default: { mode: "list", value: "gpt-3.5-turbo-instruct" },
required: true,
description: 'The model which will generate the completion. <a href="https://beta.openai.com/docs/models/overview">Learn more</a>.',
modes: [
{
displayName: "From List",
name: "list",
type: "list",
typeOptions: {
searchListMethod: "openAiModelSearch"
}
},
{
displayName: "ID",
name: "id",
type: "string"
}
],
routing: {
send: {
type: "body",
property: "model",
value: "={{$parameter.model.value}}"
}
}
},
{
displayName: "When using non OpenAI models via Base URL override, not all models might be chat-compatible or support other features, like tools calling or JSON response format.",
name: "notice",
type: "notice",
default: "",
displayOptions: {
show: {
"/options.baseURL": [{ _cnd: { exists: true } }]
}
}
},
{
displayName: "Options",
name: "options",
placeholder: "Add Option",
description: "Additional options to add",
type: "collection",
default: {},
options: [
{
displayName: "Base URL",
name: "baseURL",
default: "https://api.openai.com/v1",
description: "Override the default base URL for the API",
type: "string"
},
{
displayName: "Frequency Penalty",
name: "frequencyPenalty",
default: 0,
typeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },
description: "Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim",
type: "number"
},
{
displayName: "Maximum Number of Tokens",
name: "maxTokens",
default: -1,
description: "The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).",
type: "number",
typeOptions: {
maxValue: 32768
}
},
{
displayName: "Presence Penalty",
name: "presencePenalty",
default: 0,
typeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },
description: "Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics",
type: "number"
},
{
displayName: "Sampling Temperature",
name: "temperature",
default: 0.7,
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
description: "Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.",
type: "number"
},
{
displayName: "Timeout",
name: "timeout",
default: 6e4,
description: "Maximum amount of time a request is allowed to take in milliseconds",
type: "number"
},
{
displayName: "Max Retries",
name: "maxRetries",
default: 2,
description: "Maximum number of retries to attempt",
type: "number"
},
{
displayName: "Top P",
name: "topP",
default: 1,
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
description: "Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.",
type: "number"
}
]
}
]
};
this.methods = {
listSearch: {
async openAiModelSearch() {
const results = [];
const options = this.getNodeParameter("options", {});
let uri = "https://api.openai.com/v1/models";
if (options.baseURL) {
uri = `${options.baseURL}/models`;
}
const { data } = await this.helpers.requestWithAuthentication.call(this, "openAiApi", {
method: "GET",
uri,
json: true
});
for (const model of data) {
if (!options.baseURL && !model.owned_by?.startsWith("system")) continue;
results.push({
name: model.id,
value: model.id
});
}
return { results };
}
}
};
}
async supplyData(itemIndex) {
const credentials = await this.getCredentials("openAiApi");
const modelName = this.getNodeParameter("model", itemIndex, "", {
extractValue: true
});
const options = this.getNodeParameter("options", itemIndex, {});
const { openAiDefaultHeaders: defaultHeaders } = import_di.Container.get(import_config.AiConfig);
const configuration = {
fetchOptions: {
dispatcher: (0, import_httpProxyAgent.getProxyAgent)(options.baseURL ?? "https://api.openai.com/v1")
},
defaultHeaders
};
if (options.baseURL) {
configuration.baseURL = options.baseURL;
}
const model = new import_openai.OpenAI({
apiKey: credentials.apiKey,
model: modelName,
...options,
configuration,
timeout: options.timeout ?? 6e4,
maxRetries: options.maxRetries ?? 2,
callbacks: [new import_N8nLlmTracing.N8nLlmTracing(this)],
onFailedAttempt: (0, import_n8nLlmFailedAttemptHandler.makeN8nLlmFailedAttemptHandler)(this)
});
return {
response: model
};
}
}
// Annotate the CommonJS export names for ESM import in node:
0 && (module.exports = {
LmOpenAi
});
//# sourceMappingURL=LmOpenAi.node.js.map