sfdx-hardis
Version:
Swiss-army-knife Toolbox for Salesforce. Allows you to define a complete CD/CD Pipeline. Orchestrate base commands and assist users with interactive wizards
49 lines • 2.11 kB
JavaScript
import { OpenAI } from "openai";
import { AiProviderRoot } from "./aiProviderRoot.js";
import c from "chalk";
import { uxLog } from "../utils/index.js";
export class OpenAiProvider extends AiProviderRoot {
openai;
constructor() {
super();
this.openai = new OpenAI();
}
getLabel() {
return "OpenAi connector";
}
async promptAi(promptText, template = null) {
if (!this.checkMaxAiCallsNumber()) {
const maxCalls = this.getAiMaxCallsNumber();
uxLog("warning", this, c.yellow(`[OpenAi] Already performed maximum ${maxCalls} calls. Increase it by defining AI_MAXIMUM_CALL_NUMBER env variable`));
return null;
}
const gptModel = process.env.OPENAI_MODEL || "gpt-4o-mini";
if (process.env?.DEBUG_PROMPTS === "true") {
uxLog("log", this, c.grey(`[OpenAi] Requesting the following prompt to ${gptModel}${template ? ' using template ' + template : ''}:\n${promptText}`));
}
else {
uxLog("log", this, c.grey(`[OpenAi] Requesting prompt to ${gptModel}${template ? ' using template ' + template : ''} (define DEBUG_PROMPTS=true to see details)`));
}
this.incrementAiCallsNumber();
const completion = await this.openai.chat.completions.create({
messages: [{ role: "system", content: promptText }],
model: gptModel,
});
if (process.env?.DEBUG_PROMPTS === "true") {
uxLog("log", this, c.grey("[OpenAi] Received prompt response from " + gptModel + "\n" + JSON.stringify(completion, null, 2)));
}
else {
uxLog("log", this, c.grey("[OpenAi] Received prompt response from " + gptModel));
}
const aiResponse = {
success: false,
model: completion.model,
};
if (completion?.choices?.length > 0) {
aiResponse.success = true;
aiResponse.promptResponse = completion.choices[0].message.content ?? undefined;
}
return aiResponse;
}
}
//# sourceMappingURL=openaiProvider.js.map