UNPKG

@knn_labs/conduit-core-client

Version:

Official Node.js client library for Conduit Core API - OpenAI-compatible multi-provider LLM gateway

483 lines (469 loc) 16.4 kB
import { AxiosInstance, AxiosRequestConfig } from 'axios'; interface ClientConfig { apiKey: string; baseURL?: string; timeout?: number; maxRetries?: number; headers?: Record<string, string>; debug?: boolean; } interface RequestOptions { signal?: AbortSignal; headers?: Record<string, string>; timeout?: number; correlationId?: string; } interface RetryConfig { maxRetries: number; initialDelay: number; maxDelay: number; factor: number; } declare abstract class BaseClient { protected readonly client: AxiosInstance; protected readonly config: Required<ClientConfig>; protected readonly retryConfig: RetryConfig; constructor(config: ClientConfig); private setupInterceptors; protected request<T>(config: AxiosRequestConfig, options?: RequestOptions): Promise<T>; private executeWithRetry; private shouldRetry; private calculateDelay; private sleep; private handleError; private isErrorResponse; } interface Usage { prompt_tokens: number; completion_tokens: number; total_tokens: number; } interface ResponseFormat { type: 'text' | 'json_object'; } interface FunctionCall { name: string; arguments: string; } interface ToolCall { id: string; type: 'function'; function: FunctionCall; } interface FunctionDefinition { name: string; description?: string; parameters?: Record<string, unknown>; } interface Tool { type: 'function'; function: FunctionDefinition; } type FinishReason = 'stop' | 'length' | 'tool_calls' | 'content_filter' | null; interface PerformanceMetrics { provider_name: string; provider_response_time_ms: number; total_response_time_ms: number; tokens_per_second?: number; } interface ErrorResponse { error: { message: string; type: string; param?: string | null; code?: string | null; }; } interface ChatCompletionMessage { role: 'system' | 'user' | 'assistant' | 'tool'; content: string | null; name?: string; tool_calls?: ToolCall[]; tool_call_id?: string; } interface ChatCompletionRequest { model: string; messages: ChatCompletionMessage[]; frequency_penalty?: number; logit_bias?: Record<string, number>; logprobs?: boolean; top_logprobs?: number; max_tokens?: number; n?: number; presence_penalty?: number; response_format?: ResponseFormat; seed?: number; stop?: string | string[]; stream?: boolean; temperature?: number; top_p?: number; tools?: Tool[]; tool_choice?: 'none' | 'auto' | { type: 'function'; function: { name: string; }; }; user?: string; } interface ChatCompletionChoice { index: number; message: ChatCompletionMessage; logprobs?: unknown; finish_reason: FinishReason; } interface ChatCompletionResponse { id: string; object: 'chat.completion'; created: number; model: string; system_fingerprint?: string; choices: ChatCompletionChoice[]; usage: Usage; performance?: PerformanceMetrics; } interface ChatCompletionChunkChoice { index: number; delta: Partial<ChatCompletionMessage>; logprobs?: unknown; finish_reason: FinishReason; } interface ChatCompletionChunk { id: string; object: 'chat.completion.chunk'; created: number; model: string; system_fingerprint?: string; choices: ChatCompletionChunkChoice[]; usage?: Usage; performance?: PerformanceMetrics; } declare class ChatService { private readonly client; constructor(client: BaseClient); create(request: ChatCompletionRequest & { stream?: false; }, options?: RequestOptions): Promise<ChatCompletionResponse>; create(request: ChatCompletionRequest & { stream: true; }, options?: RequestOptions): Promise<AsyncGenerator<ChatCompletionChunk, void, unknown>>; private createCompletion; private createStream; } interface Model { id: string; object: 'model'; created: number; owned_by: string; } interface ModelsResponse { object: 'list'; data: Model[]; } declare class ModelsService { private readonly client; private cachedModels?; private cacheExpiry?; private readonly cacheTTL; constructor(client: BaseClient); list(options?: RequestOptions & { useCache?: boolean; }): Promise<Model[]>; get(modelId: string, options?: RequestOptions): Promise<Model | null>; exists(modelId: string, options?: RequestOptions): Promise<boolean>; clearCache(): void; private isCacheValid; } /** * Image generation models and interfaces for OpenAI-compatible API */ interface ImageGenerationRequest { /** * A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3. */ prompt: string; /** * The model to use for image generation. */ model?: string; /** * The number of images to generate. Must be between 1 and 10. For dall-e-3, only n=1 is supported. */ n?: number; /** * The quality of the image that will be generated. hd creates images with finer details and greater consistency across the image. This param is only supported for dall-e-3. */ quality?: 'standard' | 'hd'; /** * The format in which the generated images are returned. Must be one of url or b64_json. */ response_format?: 'url' | 'b64_json'; /** * The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024 for dall-e-2. Must be one of 1024x1024, 1792x1024, or 1024x1792 for dall-e-3 models. */ size?: '256x256' | '512x512' | '1024x1024' | '1792x1024' | '1024x1792'; /** * The style of the generated images. Must be one of vivid or natural. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for dall-e-3. */ style?: 'vivid' | 'natural'; /** * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more. */ user?: string; } interface ImageData { /** * The base64-encoded JSON of the generated image, if response_format is b64_json. */ b64_json?: string; /** * The URL of the generated image, if response_format is url (default). */ url?: string; /** * The prompt that was used to generate the image, if there was any revision to the prompt. */ revised_prompt?: string; } interface ImageGenerationResponse { /** * The Unix timestamp (in seconds) when the image was created. */ created: number; /** * The list of generated images. */ data: ImageData[]; } interface ImageEditRequest { /** * The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. */ image: File | Blob; /** * A text description of the desired image(s). The maximum length is 1000 characters. */ prompt: string; /** * An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image. */ mask?: File | Blob; /** * The model to use for image editing. Only dall-e-2 is supported at this time. */ model?: string; /** * The number of images to generate. Must be between 1 and 10. */ n?: number; /** * The format in which the generated images are returned. Must be one of url or b64_json. */ response_format?: 'url' | 'b64_json'; /** * The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. */ size?: '256x256' | '512x512' | '1024x1024'; /** * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. */ user?: string; } interface ImageVariationRequest { /** * The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. */ image: File | Blob; /** * The model to use for image variation. Only dall-e-2 is supported at this time. */ model?: string; /** * The number of images to generate. Must be between 1 and 10. */ n?: number; /** * The format in which the generated images are returned. Must be one of url or b64_json. */ response_format?: 'url' | 'b64_json'; /** * The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. */ size?: '256x256' | '512x512' | '1024x1024'; /** * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. */ user?: string; } type ImageEditResponse = ImageGenerationResponse; type ImageVariationResponse = ImageGenerationResponse; /** * Supported image generation models */ declare const IMAGE_MODELS: { readonly DALL_E_2: "dall-e-2"; readonly DALL_E_3: "dall-e-3"; readonly MINIMAX_IMAGE: "minimax-image"; }; type ImageModel = typeof IMAGE_MODELS[keyof typeof IMAGE_MODELS]; /** * Model-specific capabilities and constraints */ declare const IMAGE_MODEL_CAPABILITIES: { readonly "dall-e-2": { readonly maxPromptLength: 1000; readonly supportedSizes: readonly ["256x256", "512x512", "1024x1024"]; readonly supportedQualities: readonly ["standard"]; readonly supportedStyles: readonly []; readonly maxImages: 10; readonly supportsEdit: true; readonly supportsVariation: true; }; readonly "dall-e-3": { readonly maxPromptLength: 4000; readonly supportedSizes: readonly ["1024x1024", "1792x1024", "1024x1792"]; readonly supportedQualities: readonly ["standard", "hd"]; readonly supportedStyles: readonly ["vivid", "natural"]; readonly maxImages: 1; readonly supportsEdit: false; readonly supportsVariation: false; }; readonly "minimax-image": { readonly maxPromptLength: 2000; readonly supportedSizes: readonly ["1024x1024", "1792x1024", "1024x1792"]; readonly supportedQualities: readonly ["standard", "hd"]; readonly supportedStyles: readonly ["vivid", "natural"]; readonly maxImages: 4; readonly supportsEdit: false; readonly supportsVariation: false; }; }; /** * Default values for image generation requests */ declare const IMAGE_DEFAULTS: { readonly model: "dall-e-3"; readonly n: 1; readonly quality: "standard"; readonly response_format: "url"; readonly size: "1024x1024"; readonly style: "vivid"; }; declare class ImagesService { private readonly client; constructor(client: BaseClient); /** * Creates an image given a text prompt. * @param request The image generation request * @param options Optional request options * @returns Promise resolving to image generation response */ generate(request: ImageGenerationRequest, options?: RequestOptions): Promise<ImageGenerationResponse>; /** * Creates an edited or extended image given an original image and a prompt. * @param request The image edit request * @param options Optional request options * @returns Promise resolving to image edit response */ edit(request: ImageEditRequest, options?: RequestOptions): Promise<ImageEditResponse>; /** * Creates a variation of a given image. * @param request The image variation request * @param options Optional request options * @returns Promise resolving to image variation response */ createVariation(request: ImageVariationRequest, options?: RequestOptions): Promise<ImageVariationResponse>; } declare class ConduitCoreClient extends BaseClient { readonly chat: { completions: ChatService; }; readonly images: ImagesService; readonly models: ModelsService; constructor(config: ClientConfig); static fromApiKey(apiKey: string, baseURL?: string): ConduitCoreClient; } declare class ConduitError extends Error { readonly statusCode?: number; readonly code?: string; readonly type?: string; readonly param?: string; constructor(message: string, statusCode?: number, code?: string, type?: string, param?: string); static fromErrorResponse(response: ErrorResponse, statusCode?: number): ConduitError; } declare class AuthenticationError extends ConduitError { constructor(message?: string); } declare class RateLimitError extends ConduitError { readonly retryAfter?: number; constructor(message?: string, retryAfter?: number); } declare class ValidationError extends ConduitError { constructor(message: string, param?: string); } declare class NetworkError extends ConduitError { constructor(message?: string); } declare class StreamError extends ConduitError { constructor(message?: string); } /** * Model capability types for Core client */ declare enum CoreModelCapability { CHAT = "chat", VISION = "vision", IMAGE_GENERATION = "image-generation", IMAGE_EDIT = "image-edit", IMAGE_VARIATION = "image-variation" } /** * Check if a model supports a specific capability * @param modelId The model identifier * @param capability The capability to check for * @returns True if the model supports the capability */ declare function modelSupportsCapability(modelId: string, capability: CoreModelCapability): boolean; /** * Get all capabilities supported by a model * @param modelId The model identifier * @returns Array of supported capabilities */ declare function getModelCapabilities(modelId: string): CoreModelCapability[]; /** * Validate that a request is compatible with the specified model * @param modelId The model identifier * @param requestType The type of request being made * @returns Validation result with any errors */ declare function validateModelCompatibility(modelId: string, requestType: 'chat' | 'image-generation' | 'image-edit' | 'image-variation'): { isValid: boolean; errors: string[]; suggestions?: string[]; }; /** * Get optimal model recommendations for a specific capability * @param capability The desired capability * @param preferences Optional preferences for model selection * @returns Array of recommended model IDs, ordered by preference */ declare function getRecommendedModels(capability: CoreModelCapability, preferences?: { prioritizeQuality?: boolean; prioritizeSpeed?: boolean; prioritizeCost?: boolean; }): string[]; /** * Get user-friendly display name for a capability * @param capability The capability to get display name for * @returns Human-readable display name */ declare function getCapabilityDisplayName(capability: CoreModelCapability): string; /** * Check if two models are functionally equivalent for a given capability * @param modelA First model to compare * @param modelB Second model to compare * @param capability The capability to compare for * @returns True if models are equivalent for the capability */ declare function areModelsEquivalent(modelA: string, modelB: string, capability: CoreModelCapability): boolean; export { AuthenticationError, type ChatCompletionChoice, type ChatCompletionChunk, type ChatCompletionChunkChoice, type ChatCompletionMessage, type ChatCompletionRequest, type ChatCompletionResponse, type ClientConfig, ConduitCoreClient, ConduitError, CoreModelCapability, type ErrorResponse, type FinishReason, type FunctionCall, type FunctionDefinition, IMAGE_DEFAULTS, IMAGE_MODELS, IMAGE_MODEL_CAPABILITIES, type ImageData, type ImageEditRequest, type ImageEditResponse, type ImageGenerationRequest, type ImageGenerationResponse, type ImageModel, type ImageVariationRequest, type ImageVariationResponse, type Model, type ModelsResponse, NetworkError, type PerformanceMetrics, RateLimitError, type RequestOptions, type ResponseFormat, StreamError, type Tool, type ToolCall, type Usage, ValidationError, areModelsEquivalent, getCapabilityDisplayName, getModelCapabilities, getRecommendedModels, modelSupportsCapability, validateModelCompatibility };