@competent-devs/test-forge
Version:
Package for UI unit test generation based on storybook context
40 lines • 1.71 kB
JavaScript
import { AzureChatOpenAI } from "@langchain/openai";
import { BufferMemory } from "langchain/memory";
import { ConversationChain } from "langchain/chains";
import { TokenUsageTracker } from "./token-usage-tracker.js";
import { getConfig, DEPLOYMENT, API_VERSION } from "../services/config.js";
export const tokenTracker = new TokenUsageTracker(DEPLOYMENT);
export const memory = new BufferMemory();
export const invokeLlmChain = async (messages) => {
const { DIAL_API_KEY, DIAL_PROXY_URL } = await getConfig();
const API_BASE_PATH = `https://${DIAL_PROXY_URL}/openai/deployments/${DEPLOYMENT}/chat/completions?api-version=${API_VERSION}`;
const llm = new AzureChatOpenAI({
azureOpenAIApiKey: DIAL_API_KEY,
azureOpenAIApiVersion: "2024-07-01-preview",
azureOpenAIBasePath: API_BASE_PATH,
temperature: 0.2,
maxTokens: undefined,
timeout: undefined,
maxRetries: 1,
callbacks: [
{
handleLLMEnd(output) {
tokenTracker.addUsage(output.llmOutput?.tokenUsage);
console.log(JSON.stringify(tokenTracker.getTokenUsageData(), null, 2));
},
},
],
});
console.log("Chat History:", memory.chatHistory);
const chain = new ConversationChain({ llm, memory });
try {
const response = await chain.call({ input: messages });
console.log(JSON.stringify(tokenTracker.getTokenUsageData(), null, 2));
console.log("Total Price in $:", tokenTracker.getTotalCost());
return response;
}
catch (err) {
console.error("Error during LLM execution:", err);
}
};
//# sourceMappingURL=agent.js.map