retort-js
Version:
Intuitive, production-ready prompt chaining in Javascript
61 lines (60 loc) • 2.05 kB
JavaScript
;
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.openAiChatCompletion = void 0;
const openai_1 = __importDefault(require("openai"));
async function* openAiChatCompletion(settings, messagePromises) {
const openai = new openai_1.default({
apiKey: process.env["OPENAI_API_KEY"],
});
let messages = [];
for await (let m of messagePromises) {
messages.push({
content: m.content,
role: m.role,
});
}
const commonOptions = {
model: settings.model.toString(),
messages,
temperature: settings.temperature,
max_tokens: settings.maxTokens,
top_p: settings.topP,
};
// Only stream if explicitly set to true
if (settings.stream === true) {
const stream = await openai.chat.completions.create({
...commonOptions,
stream: true,
});
let content = "";
for await (const part of stream) {
const contentDelta = part.choices[0]?.delta?.content || "";
content += contentDelta;
yield {
content,
contentDelta,
promptTokens: part.usage?.prompt_tokens,
completionTokens: part.usage?.completion_tokens,
totalTokens: part.usage?.total_tokens,
};
}
}
else {
const response = await openai.chat.completions.create({
...commonOptions,
stream: false,
});
const content = response.choices[0]?.message?.content || "";
yield {
content,
contentDelta: content,
promptTokens: response.usage?.prompt_tokens,
completionTokens: response.usage?.completion_tokens,
totalTokens: response.usage?.total_tokens,
};
}
}
exports.openAiChatCompletion = openAiChatCompletion;