ai.libx.js
Version:
Unified API bridge for various AI models (LLMs, image/video generation, TTS, STT) - stateless, edge-compatible
233 lines (201 loc) • 6.37 kB
text/typescript
import { ChatOptions, ChatResponse, StreamChunk, ProviderConfig } from './types';
import { IProviderAdapter } from './types/provider';
import { validateChatOptions } from './utils/validation';
import { ModelNotFoundError, InvalidRequestError } from './utils/errors';
import { getProviderFromModel, isModelSupported, getModelInfo, resolveModel } from './models';
import { RequestLogger, getRequestLogger } from './utils/request-logger';
// Lazy import adapters
import { OpenAIAdapter } from './adapters/openai';
import { AnthropicAdapter } from './adapters/anthropic';
import { GoogleAdapter } from './adapters/google';
import { GroqAdapter } from './adapters/groq';
import { MistralAdapter } from './adapters/mistral';
import { CohereAdapter } from './adapters/cohere';
import { XAIAdapter } from './adapters/xai';
import { DeepSeekAdapter } from './adapters/deepseek';
import { AI21Adapter } from './adapters/ai21';
import { OpenRouterAdapter } from './adapters/openrouter';
import { CloudflareAdapter } from './adapters/cloudflare';
export interface AIClientConfig {
/**
* API keys for different providers
* Example: { openai: 'sk-...', anthropic: 'sk-ant-...', google: '...' }
*/
apiKeys?: Record<string, string>;
/**
* Base URLs for providers (optional, for custom endpoints)
*/
baseUrls?: Record<string, string>;
/**
* Cloudflare account ID (required for Cloudflare Workers AI)
*/
cloudflareAccountId?: string;
/**
* Enable request logging for metrics tracking
*/
enableLogging?: boolean;
}
/**
* Main AI client class providing unified access to multiple AI providers
*/
export class AIClient {
private config: AIClientConfig;
private adapters: Map<string, IProviderAdapter> = new Map();
private logger: RequestLogger;
constructor(config: AIClientConfig = {}) {
this.config = config;
this.logger = getRequestLogger(config.enableLogging || false);
}
/**
* Execute a chat completion request
* @param options Chat options including model, messages, and parameters
* @returns ChatResponse for non-streaming, AsyncIterable<StreamChunk> for streaming
*/
async chat(options: ChatOptions): Promise<ChatResponse | AsyncIterable<StreamChunk>> {
// Validate options
validateChatOptions(options);
// Resolve model name (supports partial/alias matching)
const resolvedModel = resolveModel(options.model);
options.model = resolvedModel;
// Extract provider from model string
const provider = getProviderFromModel(resolvedModel);
if (!provider) {
throw new InvalidRequestError(
`Invalid model format: "${resolvedModel}". Expected format: "provider/model-name"`
);
}
// Check if model is supported
if (!isModelSupported(resolvedModel)) {
throw new ModelNotFoundError(resolvedModel);
}
// Check model capabilities and issues
const modelInfo = getModelInfo(resolvedModel);
if (modelInfo) {
// Check if model is enabled
if (modelInfo.enabled === false) {
throw new InvalidRequestError(
`Model "${resolvedModel}" is disabled and not available for use`
);
}
// Check if model supports chat
if (modelInfo.noChat) {
throw new InvalidRequestError(
`Model "${resolvedModel}" does not support chat completions (e.g., realtime/audio models)`
);
}
// Warn about known issues (but don't block)
if (modelInfo.knownIssues) {
console.warn(`⚠️ Warning: Model "${resolvedModel}" has known issues: ${modelInfo.knownIssues}`);
}
}
// Get or create adapter
const adapter = this.getAdapter(provider);
// Merge API keys from config if not provided in options
const chatOptions: ChatOptions = {
...options,
apiKey: options.apiKey || this.config.apiKeys?.[provider],
};
// Start request tracking
const tracker = this.logger.startRequest(provider, resolvedModel);
try {
// Execute chat
const result = await adapter.chat(chatOptions);
// Log successful request
if ('content' in result) {
const tokens = result.usage?.totalTokens;
this.logger.logRequest(tracker, true, tokens);
} else {
// Streaming - log without token count
this.logger.logRequest(tracker, true);
}
return result;
} catch (error) {
// Log failed request
this.logger.logRequest(
tracker,
false,
undefined,
error instanceof Error ? error.message : 'Unknown error'
);
throw error;
}
}
/**
* Get adapter for a provider (creates if not exists)
*/
private getAdapter(provider: string): IProviderAdapter {
// Return cached adapter if exists
if (this.adapters.has(provider)) {
return this.adapters.get(provider)!;
}
// Create adapter config
const adapterConfig: ProviderConfig = {
apiKey: this.config.apiKeys?.[provider],
baseUrl: this.config.baseUrls?.[provider],
};
// Add cloudflare-specific config
if (provider === 'cloudflare') {
adapterConfig.cloudflareAccountId = this.config.cloudflareAccountId;
}
// Create new adapter
let adapter: IProviderAdapter;
switch (provider) {
case 'openai':
adapter = new OpenAIAdapter(adapterConfig);
break;
case 'anthropic':
adapter = new AnthropicAdapter(adapterConfig);
break;
case 'google':
adapter = new GoogleAdapter(adapterConfig);
break;
case 'groq':
adapter = new GroqAdapter(adapterConfig);
break;
case 'mistral':
adapter = new MistralAdapter(adapterConfig);
break;
case 'cohere':
adapter = new CohereAdapter(adapterConfig);
break;
case 'xai':
adapter = new XAIAdapter(adapterConfig);
break;
case 'deepseek':
adapter = new DeepSeekAdapter(adapterConfig);
break;
case 'ai21':
adapter = new AI21Adapter(adapterConfig);
break;
case 'openrouter':
adapter = new OpenRouterAdapter(adapterConfig);
break;
case 'cloudflare':
adapter = new CloudflareAdapter(adapterConfig);
break;
default:
throw new InvalidRequestError(`Unsupported provider: ${provider}`);
}
// Cache adapter
this.adapters.set(provider, adapter);
return adapter;
}
/**
* Clear cached adapters (useful for testing or resetting state)
*/
clearAdapters(): void {
this.adapters.clear();
}
/**
* Get request logger for metrics
*/
getLogger(): RequestLogger {
return this.logger;
}
/**
* Get request statistics
*/
getStats() {
return this.logger.getStats();
}
}