@summarisation/openai
Version:
Client for openai
60 lines (59 loc) • 2.96 kB
JavaScript
;
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
const axios_1 = __importDefault(require("axios"));
const axios_mock_adapter_1 = __importDefault(require("axios-mock-adapter"));
const openai_1 = require("./openai");
describe('openAiClient', () => {
let mock;
let config;
const baseURL = 'https://api.openai.com';
const Authorization = 'Bearer test-token';
beforeEach(() => {
mock = new axios_mock_adapter_1.default(axios_1.default);
config = (0, openai_1.defaultOpenAiConfig)(baseURL, Authorization, 'davinci', axios_1.default, (a) => { });
});
it('should use default model if not provided', async () => {
const client = (0, openai_1.openAiClient)({ ...config, model: undefined });
const messages = [{ role: 'user', content: 'Hello!' }];
const responseMessage = { role: 'assistant', content: 'Hi there!' };
mock.onPost('/v1/chat/completions').reply(200, {
choices: [{ message: responseMessage }]
});
const response = await client(messages);
expect(response).toEqual([responseMessage]);
expect(mock.history.post[0].data).toContain('"model":"davinci"');
});
it('should call the OpenAI API and return the response messages', async () => {
const client = (0, openai_1.openAiClient)(config);
const messages = [{ role: 'user', content: 'Hello!' }];
const responseMessage = { role: 'assistant', content: 'Hi there!' };
mock.onPost('/v1/chat/completions').reply(200, {
choices: [{ message: responseMessage }]
});
const response = await client(messages);
expect(response).toEqual([responseMessage]);
expect(mock.history.post[0].data).toContain('"model":"davinci"');
expect(JSON.parse(mock.history.post[0].data).messages).toEqual(messages);
});
it('should throw an error if the API call fails', async () => {
const client = (0, openai_1.openAiClient)(config);
const messages = [{ role: 'user', content: 'Hello!' }];
mock.onPost('/v1/chat/completions').reply(500);
await expect(client(messages)).rejects.toThrow();
});
it('should log debug messages if debug is enabled', async () => {
const debugConfig = { ...config, debug: true };
const client = (0, openai_1.openAiClient)(debugConfig);
const messages = [{ role: 'user', content: 'Hello!' }];
const responseMessage = { role: 'assistant', content: 'Hi there!' };
console.log = jest.fn();
mock.onPost('/v1/chat/completions').reply(200, {
choices: [{ message: responseMessage }]
});
await client(messages);
expect(console.log).toHaveBeenCalledWith('openAiMessagesClient', { messages, model: "davinci" });
});
});