@nullplatform/llm-gateway
Version:
LLM Gateway Core - Main proxy server
321 lines • 15.1 kB
JavaScript
var __esDecorate = (this && this.__esDecorate) || function (ctor, descriptorIn, decorators, contextIn, initializers, extraInitializers) {
function accept(f) { if (f !== void 0 && typeof f !== "function") throw new TypeError("Function expected"); return f; }
var kind = contextIn.kind, key = kind === "getter" ? "get" : kind === "setter" ? "set" : "value";
var target = !descriptorIn && ctor ? contextIn["static"] ? ctor : ctor.prototype : null;
var descriptor = descriptorIn || (target ? Object.getOwnPropertyDescriptor(target, contextIn.name) : {});
var _, done = false;
for (var i = decorators.length - 1; i >= 0; i--) {
var context = {};
for (var p in contextIn) context[p] = p === "access" ? {} : contextIn[p];
for (var p in contextIn.access) context.access[p] = contextIn.access[p];
context.addInitializer = function (f) { if (done) throw new TypeError("Cannot add initializers after decoration has completed"); extraInitializers.push(accept(f || null)); };
var result = (0, decorators[i])(kind === "accessor" ? { get: descriptor.get, set: descriptor.set } : descriptor[key], context);
if (kind === "accessor") {
if (result === void 0) continue;
if (result === null || typeof result !== "object") throw new TypeError("Object expected");
if (_ = accept(result.get)) descriptor.get = _;
if (_ = accept(result.set)) descriptor.set = _;
if (_ = accept(result.init)) initializers.unshift(_);
}
else if (_ = accept(result)) {
if (kind === "field") initializers.unshift(_);
else descriptor[key] = _;
}
}
if (target) Object.defineProperty(target, contextIn.name, descriptor);
done = true;
};
var __runInitializers = (this && this.__runInitializers) || function (thisArg, initializers, value) {
var useValue = arguments.length > 2;
for (var i = 0; i < initializers.length; i++) {
value = useValue ? initializers[i].call(thisArg, value) : initializers[i].call(thisArg);
}
return useValue ? value : void 0;
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.OpenAIApiAdapter = void 0;
const llm_gateway_sdk_1 = require("@nullplatform/llm-gateway-sdk");
let OpenAIApiAdapter = (() => {
let _classDecorators = [(0, llm_gateway_sdk_1.ExtensionMetadata)({
name: 'openai',
description: 'OpenAI API adapter for LLM Gateway',
})];
let _classDescriptor;
let _classExtraInitializers = [];
let _classThis;
var OpenAIApiAdapter = class {
static { _classThis = this; }
static {
const _metadata = typeof Symbol === "function" && Symbol.metadata ? Object.create(null) : void 0;
__esDecorate(null, _classDescriptor = { value: _classThis }, _classDecorators, { kind: "class", name: _classThis.name, metadata: _metadata }, null, _classExtraInitializers);
OpenAIApiAdapter = _classThis = _classDescriptor.value;
if (_metadata) Object.defineProperty(_classThis, Symbol.metadata, { enumerable: true, configurable: true, writable: true, value: _metadata });
__runInitializers(_classThis, _classExtraInitializers);
}
name = 'openai';
basePaths = ['/v1/chat/completions', '/chat/completions'];
configure(config) {
//Do Nothing
return;
}
async transformInput(request) {
// Validate required fields
if (!request.model) {
throw new Error('Model is required');
}
if (!Array.isArray(request.messages) || request.messages.length === 0) {
throw new Error('Messages array is required and must be non-empty');
}
// Map messages to LLM format (no transformation needed unless normalization is required)
const messages = request.messages.map(msg => ({
role: msg.role,
content: msg.content,
name: msg.name,
tool_calls: msg.tool_calls,
tool_call_id: msg.tool_call_id
}));
// Map tools if present
const tools = request.tools?.map(tool => ({
type: tool.type,
function: {
name: tool.function.name,
description: tool.function.description,
parameters: tool.function.parameters
}
}));
// Construct metadata, preserving custom OpenAI-specific values
const metadata = {
user_id: request.user,
original_provider: 'openai',
custom: {
logit_bias: request.logit_bias,
logprobs: request.logprobs,
top_logprobs: request.top_logprobs,
n: request.n,
response_format: request.response_format,
seed: request.seed
}
};
// Build final LLMRequest
const llmRequest = {
messages,
model: request.model,
temperature: request.temperature,
max_tokens: request.max_tokens,
top_p: request.top_p,
frequency_penalty: request.frequency_penalty,
presence_penalty: request.presence_penalty,
stop: request.stop,
stream: request.stream,
tools,
tool_choice: request.tool_choice,
target_provider: "", // To be filled dynamically if not statically set
metadata
};
return llmRequest;
}
async validate(request) {
if (!request.model || typeof request.model !== 'string') {
return 'Model must be a non-empty string';
}
if (!request.messages || !Array.isArray(request.messages)) {
return 'Messages must be an array';
}
if (request.messages.length === 0) {
return 'Messages array cannot be empty';
}
// Validate each message
for (const [index, message] of request.messages.entries()) {
if (!message.role || !['system', 'user', 'assistant', 'tool'].includes(message.role)) {
return `Message ${index}: role must be one of: system, user, assistant, tool`;
}
if (!message.content || typeof message.content !== 'string') {
return `Message ${index}: content must be a non-empty string`;
}
// Additional validations for tool messages
if (message.role === 'tool' && !message.tool_call_id) {
return `Message ${index}: tool messages must have tool_call_id`;
}
}
// Validate temperature
if (request.temperature !== undefined) {
if (typeof request.temperature !== 'number' || request.temperature < 0 || request.temperature > 2) {
return 'Temperature must be a number between 0 and 2';
}
}
// Validate max_tokens
if (request.max_tokens !== undefined) {
if (typeof request.max_tokens !== 'number' || request.max_tokens < 1) {
return 'max_tokens must be a positive number';
}
}
return null; // No validation errors
}
async transformOutputChunk(processedInput, input, chunk, firstChunk, finalChunk, acummulated) {
let response = "";
if (chunk) {
response += `data: ${JSON.stringify({
id: chunk.id,
object: chunk.object,
created: chunk.created,
model: chunk.model,
choices: chunk?.content ? chunk.content.map((choice, index) => ({
index,
delta: {
role: firstChunk ? 'assistant' : null, //open ai streaming ever answer assistant
content: choice.delta?.tool_calls?.length > 0 ? null : choice.delta?.content || null,
tool_calls: choice.delta?.tool_calls?.map(tool => ({
id: tool.id,
type: tool.type,
index: choice.delta?.tool_calls?.length - 1,
function: {
name: tool?.function?.name,
arguments: tool?.function?.arguments
}
}))
},
logprobs: choice.logprobs,
finish_reason: choice.finish_reason ?? null
})) : undefined,
usage: chunk.usage
})}\n\n`;
}
if (finalChunk) {
response += `data: [DONE]\n\n`;
}
return Buffer.from(response);
}
async transformOutput(processedInput, input, response) {
const messages = response.content.map((choice, index) => {
let message;
if (choice.message.tool_calls?.length > 0) {
const tool_calls = choice.message.tool_calls?.map(tool => ({
id: tool.id,
type: 'function',
function: {
name: tool.function.name,
arguments: typeof tool.function.arguments === 'string'
? tool.function.arguments
: JSON.stringify(tool.function.arguments)
}
}));
message = {
role: 'assistant',
content: null,
tool_calls
};
}
else {
message = {
role: choice.message.role,
content: choice.message.content ?? null,
};
}
let messagesResp = {
message,
};
return {
index,
...messagesResp,
logprobs: choice.logprobs,
finish_reason: choice.finish_reason ?? null,
};
});
const transformed = {
id: response.id,
object: processedInput.stream ? 'chat.completion.chunk' : 'chat.completion',
created: response.created,
model: response.model,
choices: messages,
usage: response.usage,
system_fingerprint: response.system_fingerprint,
};
return transformed;
}
async getNativeAdapters() {
return [
{
path: "/models",
method: 'get',
doRequest: async (request, response) => {
response.json({
"object": "list",
"data": [
{
"id": "gpt-4-0613",
"object": "model",
"created": 1686588896,
"owned_by": "openai"
},
{
"id": "gpt-4",
"object": "model",
"created": 1687882411,
"owned_by": "openai"
},
{
"id": "gpt-3.5-turbo",
"object": "model",
"created": 1677610602,
"owned_by": "openai"
},
{
"id": "gpt-4o-audio-preview-2025-06-03",
"object": "model",
"created": 1748908498,
"owned_by": "system"
},
{
"id": "gpt-4.1-nano",
"object": "model",
"created": 1744321707,
"owned_by": "system"
},
{
"id": "gpt-image-1",
"object": "model",
"created": 1745517030,
"owned_by": "system"
},
{
"id": "codex-mini-latest",
"object": "model",
"created": 1746673257,
"owned_by": "system"
},
{
"id": "gpt-4o-realtime-preview-2025-06-03",
"object": "model",
"created": 1748907838,
"owned_by": "system"
},
{
"id": "davinci-002",
"object": "model",
"created": 1692634301,
"owned_by": "system"
},
{
"id": "babbage-002",
"object": "model",
"created": 1692634615,
"owned_by": "system"
},
{
"id": "gpt-3.5-turbo-instruct",
"object": "model",
"created": 1692901427,
"owned_by": "system"
}
]
});
}
}
];
}
};
return OpenAIApiAdapter = _classThis;
})();
exports.OpenAIApiAdapter = OpenAIApiAdapter;
//# sourceMappingURL=openai.js.map
;