portkey-ai
Version:
Node client library for the Portkey API
52 lines • 3.45 kB
JavaScript
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var _a, _b;
Object.defineProperty(exports, "__esModule", { value: true });
const dotenv_1 = require("dotenv");
const portkey_ai_1 = require("portkey-ai");
(0, dotenv_1.config)({ override: true });
const client = new portkey_ai_1.Portkey({
apiKey: (_a = process.env["PORTKEY_API_KEY"]) !== null && _a !== void 0 ? _a : "",
virtualKey: (_b = process.env["ANYSCALE_VIRTUAL_KEY"]) !== null && _b !== void 0 ? _b : ""
});
describe('Anyscale ChatCompletions APIs', () => {
test('model: meta-llama/Llama-2-7b-chat-hf', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({ model: 'meta-llama/Llama-2-7b-chat-hf', messages: [{ "role": "user", "content": "Say this is a test" }] });
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
test('model: meta-llama/Llama-2-13b-chat-hf', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({ model: 'meta-llama/Llama-2-13b-chat-hf', messages: [{ "role": "user", "content": "Say this is a test" }] });
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
test('model: meta-llama/Llama-2-70b-chat-hf', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({ model: 'meta-llama/Llama-2-70b-chat-hf', messages: [{ "role": "user", "content": "Say this is a test" }] });
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
test('model: codellama/CodeLlama-34b-Instruct-hf', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({ model: 'codellama/CodeLlama-34b-Instruct-hf', messages: [{ "role": "user", "content": "Say this is a test" }] });
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
test('model: mistralai/Mistral-7B-Instruct-v0.1', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({ model: 'mistralai/Mistral-7B-Instruct-v0.1', messages: [{ "role": "user", "content": "Say this is a test" }] });
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
});
//# sourceMappingURL=anyscale.test.js.map
;