@nullplatform/llm-gateway
Version:
LLM Gateway Core - Main proxy server
33 lines • 1.22 kB
TypeScript
import { AxiosInstance } from 'axios';
import { IProvider, ILLMRequest, ILLMResponse, IChunkEmitter, IPluginPhaseExecution } from '@nullplatform/llm-gateway-sdk';
import { Logger } from '../utils/logger.js';
import { OpenAIRequest } from "../adapters/openai";
export interface OpenAIProviderConfig {
bypassModel: boolean;
baseUrl?: string;
model?: string;
apiKey: string;
retryAttempts?: number;
retryDelay?: number;
}
export declare class OpenAIProvider implements IProvider {
readonly name = "openai";
private config;
private client;
protected logger: Logger;
constructor(logger: Logger);
configure(config: OpenAIProviderConfig): Promise<void>;
getHttpClient(): AxiosInstance;
private setupInterceptors;
executeStreaming(request: ILLMRequest, chunkEmitter: IChunkEmitter): Promise<IPluginPhaseExecution | void>;
private processStreamLine;
private emitStreamChunk;
private emitFinalChunk;
buildOpenAIRequest(request: ILLMRequest): OpenAIRequest;
execute(request: ILLMRequest): Promise<ILLMResponse>;
private retryRequest;
private transformError;
private sanitizeRequest;
private sleep;
}
//# sourceMappingURL=openai.d.ts.map