sfdx-hardis
Version:
Swiss-army-knife Toolbox for Salesforce. Allows you to define a complete CD/CD Pipeline. Orchestrate base commands and assist users with interactive wizards
86 lines • 3.97 kB
JavaScript
import { AiProviderRoot } from "./aiProviderRoot.js";
import c from "chalk";
import { uxLog } from "../utils/index.js";
import { getEnvVar } from "../../config/index.js";
import { LangChainProviderFactory } from "./langChainProviders/langChainProviderFactory.js";
export class LangChainProvider extends AiProviderRoot {
model;
modelName;
constructor() {
super();
const provider = getEnvVar("LANGCHAIN_LLM_PROVIDER");
if (!provider) {
throw new Error("LANGCHAIN_LLM_PROVIDER environment variable must be set to use LangChain integration");
}
const providerType = provider.toLowerCase();
const modelName = getEnvVar("LANGCHAIN_LLM_MODEL");
const apiKey = getEnvVar("LANGCHAIN_LLM_MODEL_API_KEY");
if (!modelName) {
throw new Error("LANGCHAIN_LLM_MODEL environment variable must be set to use LangChain integration");
}
this.modelName = modelName;
// Common configuration for all providers
const config = {
temperature: Number(getEnvVar("LANGCHAIN_LLM_TEMPERATURE")) || undefined,
timeout: Number(getEnvVar("LANGCHAIN_LLM_TIMEOUT")) || undefined,
maxTokens: Number(getEnvVar("LANGCHAIN_LLM_MAX_TOKENS")) || undefined,
maxRetries: Number(getEnvVar("LANGCHAIN_LLM_MAX_RETRIES")) || undefined,
baseUrl: getEnvVar("LANGCHAIN_LLM_BASE_URL") || undefined,
apiKey: apiKey || undefined
};
// factory pattern so that adding support for new providers is easy in the future
const llmProvider = LangChainProviderFactory.createProvider(providerType, modelName, config);
this.model = llmProvider.getModel();
}
getLabel() {
return "LangChain connector";
}
async promptAi(promptText, template = null) {
// re-use the same check for max ai calls number as in the original openai provider implementation
if (!this.checkMaxAiCallsNumber()) {
const maxCalls = this.getAiMaxCallsNumber();
uxLog(this, c.yellow(`[LangChain] Already performed maximum ${maxCalls} calls. Increase it by defining AI_MAXIMUM_CALL_NUMBER env variable`));
return null;
}
if (process.env?.DEBUG_PROMPTS === "true") {
uxLog(this, c.grey(`[LangChain] Requesting the following prompt to ${this.modelName}${template ? ' using template ' + template : ''}:\n${promptText}`));
}
else {
uxLog(this, c.grey(`[LangChain] Requesting prompt to ${this.modelName}${template ? ' using template ' + template : ''} (define DEBUG_PROMPTS=true to see details)`));
}
this.incrementAiCallsNumber();
try {
const response = await this.model.invoke([
{
role: "user",
content: promptText
}
]);
if (process.env?.DEBUG_PROMPTS === "true") {
uxLog(this, c.grey("[LangChain] Received prompt response\n" + JSON.stringify(response, null, 2)));
}
else {
uxLog(this, c.grey("[LangChain] Received prompt response"));
}
const aiResponse = {
success: false,
model: this.modelName,
};
if (response.content) {
aiResponse.success = true;
aiResponse.promptResponse = typeof response.content === 'string' ? response.content : JSON.stringify(response.content);
}
return aiResponse;
}
catch (error) {
if (error instanceof Error) {
uxLog(this, c.red(`[LangChain] Error while calling LLM API: ${error.message}`));
}
else {
uxLog(this, c.red(`[LangChain] Unexpected error occurred`));
}
return null;
}
}
}
//# sourceMappingURL=langchainProvider.js.map