@nullplatform/llm-gateway
Version:
LLM Gateway Core - Main proxy server
286 lines • 11 kB
JavaScript
"use strict";
// packages/core/basic-apikey-auth/providers/openai.ts
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.OpenAIProvider = void 0;
const axios_1 = __importDefault(require("axios"));
const llm_gateway_sdk_1 = require("@nullplatform/llm-gateway-sdk");
class OpenAIProvider {
name = 'openai';
config;
client;
logger;
constructor(logger) {
this.logger = logger;
}
configure(config) {
this.config = {
baseUrl: config.baseUrl || 'https://api.openai.com/v1',
retryAttempts: config.retryAttempts || 3,
retryDelay: config.retryDelay || 1000, // default to 1 second
...config
};
const headers = {
'Content-Type': 'application/json',
'User-Agent': 'llm-gateway/1.0.0',
};
if (this.config.apiKey) {
headers['Authorization'] = `Bearer ${this.config.apiKey}`;
}
this.client = axios_1.default.create({
baseURL: this.config.baseUrl,
headers
});
this.setupInterceptors();
return;
}
getHttpClient() {
return this.client;
}
setupInterceptors() {
// Request interceptor for logging
this.client.interceptors.request.use((config) => {
this.logger.debug('OpenAI request', {
method: config.method,
url: config.url,
headers: { ...config.headers, Authorization: '[REDACTED]' }
});
return config;
}, (error) => {
this.logger.error('OpenAI request error', { error });
return Promise.reject(error);
});
// Response interceptor for logging and error handling
this.client.interceptors.response.use((response) => {
this.logger.debug('OpenAI response', {
status: response.status,
headers: response.headers,
data: response.data
});
return response;
}, (error) => {
this.logger.error('OpenAI response error', {
status: error.response?.status,
data: error.response?.data,
message: error.message
});
return Promise.reject(this.transformError(error));
});
}
async executeStreaming(request, chunkEmitter) {
const httpRequest = this.buildOpenAIRequest(request);
httpRequest.stream_options = { include_usage: true };
const endpoint = '/chat/completions';
let lastPluginExecution = null;
try {
const response = await this.client.post(endpoint, httpRequest, {
responseType: 'stream'
});
let buffer = '';
let lastChunk = null;
response.data.on('data', async (chunk) => {
buffer += chunk.toString('utf-8');
// Process complete lines
const lines = buffer.split('\n');
// Keep the last potentially incomplete line in buffer
buffer = lines.pop() || '';
for (const line of lines) {
if (line.trim()) {
lastPluginExecution = await this.processStreamLine(line.trim(), chunkEmitter, lastChunk);
}
}
});
response.data.on('end', async () => {
// Process any remaining data in buffer
if (buffer.trim()) {
lastPluginExecution = await this.processStreamLine(buffer.trim(), chunkEmitter, lastChunk);
}
});
response.data.on('error', (error) => {
this.logger.error('Stream error', { error });
throw error;
});
// Wait for stream to complete
await new Promise((resolve, reject) => {
response.data.on('end', () => { resolve(lastPluginExecution); });
response.data.on('error', () => { reject(lastPluginExecution); });
});
}
catch (error) {
this.logger.error('Streaming request failed', { error });
throw new llm_gateway_sdk_1.LLMModelError(error);
;
}
}
async processStreamLine(line, chunkEmitter, lastChunk) {
if (!line.startsWith('data: ')) {
return;
}
const data = line.slice(6); // Remove 'data: ' prefix
if (data === '[DONE]') {
await this.emitFinalChunk(chunkEmitter, lastChunk);
return;
}
try {
const parsedChunk = JSON.parse(data);
lastChunk = parsedChunk;
return await this.emitStreamChunk(chunkEmitter, parsedChunk);
}
catch (error) {
this.logger.error('Failed to parse OpenAI stream chunk', {
data,
error: error instanceof Error ? error.message : error
});
// Continue processing instead of throwing - streaming should be resilient
}
}
async emitStreamChunk(chunkEmitter, parsedChunk) {
const choice = parsedChunk.choices?.[0];
const response = {
id: parsedChunk.id,
object: parsedChunk.object,
created: parsedChunk.created,
model: parsedChunk.model,
content: choice ? [{
index: choice.index,
logprobs: choice.logprobs,
finish_reason: choice.finish_reason,
message: choice.message,
delta: choice.delta
}] : undefined,
usage: parsedChunk.usage
};
return await chunkEmitter.onData(response, false);
}
async emitFinalChunk(chunkEmitter, lastChunk) {
let finalResponse = null;
if (lastChunk) {
finalResponse = {
id: lastChunk.id,
object: lastChunk.object,
created: lastChunk.created,
model: lastChunk.model,
content: [{
index: lastChunk.choices[0].index,
logprobs: lastChunk.choices[0].logprobs,
finish_reason: lastChunk.choices[0].finish_reason,
message: lastChunk.choices[0].message,
delta: lastChunk.choices[0].delta
}],
usage: lastChunk.usage
};
}
return await chunkEmitter.onData(finalResponse, true);
}
buildOpenAIRequest(request) {
return {
model: this.config.bypassModel ? request.model : this.config.model,
messages: request.messages.map(msg => ({
role: msg.role,
content: msg.content,
name: msg.name,
tool_calls: msg.tool_calls,
tool_call_id: msg.tool_call_id
})),
temperature: request.temperature,
max_tokens: request.max_tokens,
top_p: request.top_p > 0 && request.top_p < 1 ? request.top_p : undefined,
frequency_penalty: request.frequency_penalty,
presence_penalty: request.presence_penalty,
stop: request.stop,
stream: request.stream,
tools: request.tools,
tool_choice: request.tool_choice,
};
}
async execute(request) {
const endpoint = '/chat/completions';
const httpRequest = this.buildOpenAIRequest(request);
try {
const response = await this.retryRequest(async () => {
return await this.client.post(endpoint, httpRequest);
});
return {
id: response.data.id,
object: response.data.object,
created: response.data.created,
model: response.data.model,
usage: response.data.usage,
content: [
{
...response.data.choices[0],
message: response.data.choices[0]?.message
}
]
};
}
catch (error) {
this.logger.error('OpenAI request failed', { error, request: this.sanitizeRequest(request) });
throw new llm_gateway_sdk_1.LLMModelError(error);
}
}
async retryRequest(requestFn) {
let lastError;
for (let attempt = 1; attempt <= this.config.retryAttempts; attempt++) {
try {
return await requestFn();
}
catch (error) {
lastError = error;
// Don't retry on client errors (4xx)
if (error.response?.status >= 400 && error.response?.status < 500) {
throw error;
}
if (attempt < this.config.retryAttempts) {
const delay = this.config.retryDelay * Math.pow(2, attempt - 1); // exponential backoff
this.logger.warn(`OpenAI request failed, retrying in ${delay}ms`, {
attempt,
error: error.message
});
await this.sleep(delay);
}
}
}
throw lastError;
}
transformError(error) {
if (error.response) {
const status = error.response.status;
const data = error.response.data;
// OpenAI specific error handling
if (data?.error) {
const openaiError = data.error;
const message = `OpenAI API Error (${status}): ${openaiError.message}`;
const transformedError = new Error(message);
transformedError.status = status;
transformedError.type = openaiError.type;
transformedError.code = openaiError.code;
transformedError.param = openaiError.param;
return transformedError;
}
return new Error(`OpenAI API Error (${status}): ${error.message}`);
}
if (error.code === 'ECONNABORTED') {
return new Error('OpenAI API request timeout');
}
return error;
}
sanitizeRequest(request) {
// Remove sensitive data for logging
const sanitized = { ...request };
if (sanitized.messages) {
sanitized.messages = sanitized.messages.map((msg) => ({
...msg,
content: msg.content?.length > 100 ? msg.content.substring(0, 100) + '...' : msg.content
}));
}
return sanitized;
}
sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
}
exports.OpenAIProvider = OpenAIProvider;
//# sourceMappingURL=openai.js.map