portkey-ai
Version:
Node client library for the Portkey API
116 lines • 6.55 kB
JavaScript
"use strict";
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var _a, _b;
Object.defineProperty(exports, "__esModule", { value: true });
const dotenv_1 = require("dotenv");
const portkey_ai_1 = require("portkey-ai");
(0, dotenv_1.config)({ override: true });
const client = new portkey_ai_1.Portkey({
apiKey: (_a = process.env["PORTKEY_API_KEY"]) !== null && _a !== void 0 ? _a : "",
virtualKey: (_b = process.env["ANYSCALE_VIRTUAL_KEY"]) !== null && _b !== void 0 ? _b : ""
});
describe('Anyscale ChatCompletions APIs', () => {
test('model: meta-llama/Llama-2-7b-chat-hf', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({ model: 'meta-llama/Llama-2-7b-chat-hf', messages: [{ "role": "user", "content": "Say this is a test" }] });
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
test('model: meta-llama/Llama-2-13b-chat-hf', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({ model: 'meta-llama/Llama-2-13b-chat-hf', messages: [{ "role": "user", "content": "Say this is a test" }] });
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
test('model: meta-llama/Llama-2-70b-chat-hf', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({ model: 'meta-llama/Llama-2-70b-chat-hf', messages: [{ "role": "user", "content": "Say this is a test" }] });
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
test('model: codellama/CodeLlama-34b-Instruct-hf', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({
model: 'codellama/CodeLlama-34b-Instruct-hf',
messages: [{ "role": "user", "content": "Say this is a test" }],
max_tokens: 30
});
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
test('model: mistralai/Mistral-7B-Instruct-v0.1', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({ model: 'mistralai/Mistral-7B-Instruct-v0.1', messages: [{ "role": "user", "content": "Say this is a test" }] });
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
test('model: google/gemma-7b-it', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({
model: 'google/gemma-7b-it',
messages: [{ "role": "user", "content": "Say this is a test" }],
max_tokens: 25
});
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
test('model: meta-llama/Meta-Llama-3-8B-Instruct', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({
model: 'meta-llama/Meta-Llama-3-8B-Instruct',
messages: [{ "role": "user", "content": "Say this is a test" }],
max_tokens: 25
});
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
test('model: meta-llama/Meta-Llama-3-70B-Instruct', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({
model: 'meta-llama/Meta-Llama-3-70B-Instruct',
messages: [{ role: 'user', content: 'Say this is a test' }],
max_tokens: 25,
});
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
test('model: mistralai/Mixtral-8x7B-Instruct-v0.1', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({
model: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
messages: [{ role: 'user', content: 'Say this is a test' }],
max_tokens: 25,
});
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
test('model: mistralai/Mixtral-8x22B-Instruct-v0.1', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({
model: 'mistralai/Mixtral-8x22B-Instruct-v0.1',
messages: [{ role: 'user', content: 'Say this is a test' }],
max_tokens: 25,
});
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
test('model: mlabonne/NeuralHermes-2.5-Mistral-7B', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({
model: 'mlabonne/NeuralHermes-2.5-Mistral-7B',
messages: [{ role: 'user', content: 'Say this is a test' }],
max_tokens: 25,
});
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
});
//# sourceMappingURL=anyscale.test.js.map