@relayplane/sdk
Version:
RelayPlane SDK with zero-config AI access, intelligent model selection, built-in examples, and contextual error handling. The easiest way to add AI to your app with automatic optimization and fallback.
471 lines • 15.3 kB
TypeScript
/**
* RelayPlane SDK Type Definitions
*
* This file defines all the TypeScript interfaces and types used by the RelayPlane SDK.
* Based on the PRD v2 API specification.
*/
export type ModelProvider = 'claude' | 'openai' | 'google' | 'custom';
export type SupportedModel = 'claude-opus-4-20250514' | 'claude-opus-4-0' | 'claude-sonnet-4-20250514' | 'claude-sonnet-4-0' | 'claude-3-7-sonnet-20250219' | 'claude-3-7-sonnet-latest' | 'claude-3-5-sonnet-20241022' | 'claude-3-5-sonnet-latest' | 'claude-3-5-sonnet-20240620' | 'claude-3-5-haiku-20241022' | 'claude-3-5-haiku-latest' | 'claude-3-opus-20240229' | 'claude-3-opus-latest' | 'claude-3-sonnet-20240229' | 'claude-3-haiku-20240307' | 'claude-opus' | 'claude-sonnet' | 'claude-haiku' | 'gpt-4.1' | 'gpt-4.1-mini' | 'gpt-4.1-nano' | 'o3' | 'o3-pro' | 'o3-mini' | 'o4-mini' | 'gpt-4o' | 'gpt-4o-mini' | 'gpt-4o-latest' | 'o1-preview' | 'o1-mini' | 'o1' | 'gpt-4' | 'gpt-4-turbo' | 'gpt-4-turbo-preview' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-16k' | 'gemini-2.5-pro' | 'gemini-2.5-flash' | 'gemini-2.5-flash-lite' | 'gemini-2.0-flash' | 'gemini-2.0-flash-lite' | 'gemini-2.0-pro' | 'gemini-1.5-pro' | 'gemini-1.5-flash' | 'gemini-1.5-flash-8b' | 'gemini-pro' | 'gemini-pro-vision' | 'gemini' | string;
export interface RelayRequest {
/** Target model or endpoint to relay to */
to: SupportedModel;
/** The actual payload to send to the model */
payload: Record<string, any>;
/** Optional metadata for tracking and debugging */
metadata?: Record<string, any>;
}
export interface RelayResponse<T = any> {
/** Unique identifier for this relay request */
relay_id: string;
/** HTTP status code from the downstream service */
status_code: number;
/** Total latency in milliseconds for the request */
latency_ms: number;
/** The actual response body from the downstream service */
body: T;
/** Whether a fallback model was used */
fallback_used: boolean;
}
export interface RelayConfig {
/** API key for hosted mode (optional for local mode) */
apiKey?: string;
/** Base URL for the RelayPlane API */
baseUrl?: string;
/** Timeout for requests in milliseconds */
timeout?: number;
/** Whether to enable debug logging */
debug?: boolean;
}
export interface OptimizeConfig {
/** Strategy for model selection */
strategy?: 'fallback' | 'latency' | 'cost' | 'balanced';
/** Maximum cost ceiling in dollars */
maxCost?: number;
/** Fallback chain in order of preference */
fallbackChain?: SupportedModel[];
/** Whether to enable caching */
enableCache?: boolean;
/** Cache TTL in seconds */
cacheTtl?: number;
/** Maximum number of retries */
maxRetries?: number;
}
export declare class RelayError extends Error {
statusCode?: number | undefined;
relayId?: string | undefined;
cause?: any | undefined;
constructor(message: string, statusCode?: number | undefined, relayId?: string | undefined, cause?: any | undefined);
}
export declare class RelayTimeoutError extends RelayError {
constructor(message: string, relayId?: string);
}
export declare class RelayAuthError extends RelayError {
constructor(message: string);
}
export declare class RelayRateLimitError extends RelayError {
constructor(message: string, retryAfter?: number);
retryAfter?: number;
}
export interface ChatMessage {
role: 'user' | 'assistant' | 'system';
content: string;
}
export interface OpenAIPayload {
model: string;
messages: ChatMessage[];
temperature?: number;
max_tokens?: number;
stream?: boolean;
}
export interface ClaudePayload {
model: string;
messages: ChatMessage[];
max_tokens?: number;
temperature?: number;
system?: string;
}
export interface GooglePayload {
model: string;
contents: Array<{
role: string;
parts: Array<{
text: string;
}>;
}>;
generationConfig?: {
temperature?: number;
maxOutputTokens?: number;
};
}
export interface ChainStep {
/** Step identifier (e.g., 'summarize', 'translate', 'analyze') */
step: string;
/** Model to use for this step */
to: SupportedModel;
/** Base payload (messages will be modified based on previous step output) */
payload: Record<string, any>;
/** Optional metadata for this step */
metadata?: Record<string, any>;
/** Function to transform previous step output into this step's input */
transform?: (previousOutput: any) => Record<string, any>;
}
export interface ChainRequest {
/** Array of steps to execute in sequence */
steps: ChainStep[];
/** Initial input to start the chain */
input: string | ChatMessage[];
/** Optional optimization settings for the entire chain */
optimize?: OptimizeConfig;
/** Optional metadata for the entire chain */
metadata?: Record<string, any>;
}
export interface ChainStepResult {
/** Step identifier */
step: string;
/** Model used for this step */
model: SupportedModel;
/** Response from this step */
response: any;
/** Latency for this step in milliseconds */
latency_ms: number;
/** Whether this step used a fallback model */
fallback_used: boolean;
/** Unique ID for this step execution */
step_id: string;
}
export interface ChainResponse {
/** Unique identifier for this chain execution */
chain_id: string;
/** Results from each step in execution order */
steps: ChainStepResult[];
/** Final output from the last step */
final_output: any;
/** Total chain latency in milliseconds */
total_latency_ms: number;
/** Total cost for the entire chain */
total_cost?: number;
/** Whether any steps used fallback models */
fallbacks_used: boolean;
}
export interface BatchRequest {
/** Array of individual relay requests to process */
requests: RelayRequest[];
/** Whether to process requests in parallel (default: true) */
parallel?: boolean;
/** Whether to stop on first error (default: false) */
fail_fast?: boolean;
/** Maximum number of concurrent requests (default: 10) */
max_concurrency?: number;
/** Optional metadata for the batch */
metadata?: Record<string, any>;
}
export interface BatchRequestResult {
/** Index of this request in the original batch */
index: number;
/** Success status of this request */
success: boolean;
/** Response data if successful */
response?: RelayResponse;
/** Error information if failed */
error?: {
message: string;
code?: string;
status?: number;
};
/** Latency for this specific request */
latency_ms: number;
}
export interface BatchResponse {
/** Unique identifier for this batch execution */
batch_id: string;
/** Results for each request in original order */
results: BatchRequestResult[];
/** Total number of requests processed */
total_requests: number;
/** Number of successful requests */
successful_requests: number;
/** Number of failed requests */
failed_requests: number;
/** Total batch processing time */
total_latency_ms: number;
/** Whether batch completed fully or was terminated early */
completed: boolean;
}
export interface GlobalOptimizeConfig extends OptimizeConfig {
/** Whether to apply optimization globally by default */
enabled?: boolean;
/** Models to never use in fallback chains */
excludeModels?: SupportedModel[];
/** Cost limit per request in dollars */
costLimitPerRequest?: number;
/** Latency limit per request in milliseconds */
latencyLimitMs?: number;
}
export interface EnhancedRelayConfig extends RelayConfig {
/** Global optimization settings applied to all requests */
defaultOptimization?: GlobalOptimizeConfig;
/** Default retry configuration */
defaultRetries?: {
maxRetries?: number;
backoffMs?: number;
retryableErrors?: string[];
};
/** User agent string for requests */
userAgent?: string;
/** Whether to validate model names against supported list */
validateModels?: boolean;
}
/**
* Enhanced error classes with contextual recovery suggestions
*/
/** Base error with recovery suggestions */
export declare class RelayErrorWithRecovery extends Error {
readonly code: string;
readonly suggestions: string[];
readonly docLinks: string[];
readonly quickFix?: string | undefined;
constructor(message: string, code: string, suggestions: string[], docLinks?: string[], quickFix?: string | undefined);
/** Format error with recovery suggestions for console output */
getHelpfulMessage(): string;
}
/** API Key related errors with specific guidance */
export declare class RelayApiKeyError extends RelayErrorWithRecovery {
constructor(provider?: string);
}
/** Rate limiting errors with fallback suggestions */
export declare class RelayRateLimitErrorEnhanced extends RelayErrorWithRecovery {
constructor(provider: string, resetTime?: Date);
}
/** Model availability errors with alternatives */
export declare class RelayModelError extends RelayErrorWithRecovery {
constructor(model: string, availableModels?: string[]);
}
/** Network/timeout errors with retry suggestions */
export declare class RelayNetworkError extends RelayErrorWithRecovery {
constructor(operation: string, originalError?: Error);
}
/** Configuration errors with setup guidance */
export declare class RelayConfigError extends RelayErrorWithRecovery {
constructor(issue: string, fix?: string);
}
/**
* Enhanced configuration result from configure() method
*/
export interface ConfigurationResult {
success: boolean;
mode: 'hosted' | 'local';
detectedProviders: Array<{
provider: ModelProvider;
models: SupportedModel[];
status: 'available' | 'invalid_key' | 'quota_exceeded' | 'unavailable';
}>;
recommendations: string[];
features?: {
autoOptimization: boolean;
smartRetry: boolean;
costTracking: boolean;
batchProcessing: boolean;
streamingSupport: boolean;
multiAgentChaining: boolean;
};
plan?: {
name: string;
tier: PlanTier;
modelAccess: SupportedModel[];
rateLimits: {
requestsPerMinute: number;
requestsPerMonth: number;
};
};
}
/**
* Comprehensive status information from status() method
*/
export interface StatusInformation {
sdk: {
version: string;
mode: 'hosted' | 'local';
configured: boolean;
};
providers: Array<{
provider: ModelProvider;
status: 'available' | 'invalid_key' | 'quota_exceeded' | 'unavailable';
models: SupportedModel[];
quotaRemaining?: number;
}>;
plan?: {
name: string;
tier: 'free' | 'startup' | 'growth' | 'enterprise';
features: string[];
limits: {
requestsPerMonth: number;
modelsAccess: string[];
supportLevel: string;
concurrentRequests: number;
rateLimitRpm: number;
};
billing: {
amount: number;
currency: string;
interval: string;
nextBillingDate?: string;
};
};
quota?: {
current: {
requests: number;
costUsd: number;
};
limits: {
requests: number;
costUsd: number;
};
remaining: {
requests: number;
costUsd: number;
};
resetDate: string;
period: 'monthly' | 'weekly' | 'daily';
usage: {
thisMonth: number;
today: number;
lastHour: number;
};
};
services?: {
database: 'healthy' | 'degraded' | 'down';
redis: 'healthy' | 'degraded' | 'down';
stripe: 'healthy' | 'degraded' | 'down';
providers: {
anthropic: 'healthy' | 'degraded' | 'down';
openai: 'healthy' | 'degraded' | 'down';
google: 'healthy' | 'degraded' | 'down';
};
};
recommendations: string[];
}
/**
* Provider validation status
*/
export type ProviderStatus = 'available' | 'invalid_key' | 'quota_exceeded' | 'unavailable';
/**
* Plan tier types
*/
export type PlanTier = 'free' | 'startup' | 'growth' | 'enterprise';
/**
* Service health status
*/
export type ServiceHealthStatus = 'healthy' | 'degraded' | 'down';
/**
* Enhanced quota response with business plan features
*/
export interface QuotaInformation {
current: {
requests: number;
costUsd: number;
};
limits: {
requests: number;
costUsd: number;
};
remaining: {
requests: number;
costUsd: number;
};
resetDate: string;
period: 'monthly' | 'weekly' | 'daily';
usage: {
thisMonth: number;
today: number;
lastHour: number;
};
billing: {
plan: string;
tier: PlanTier;
upgradeRecommended?: boolean;
nextBillingDate?: string;
};
warnings?: {
nearingLimit: boolean;
rateLimitRisk: boolean;
costOverrun: boolean;
};
}
/**
* Feature capabilities information
*/
export interface FeatureCapabilities {
sdk: {
version: string;
mode: 'hosted' | 'local';
configured: boolean;
features: string[];
};
plan: {
name: string;
tier: PlanTier;
features: string[];
modelAccess: SupportedModel[];
limits: {
requestsPerMonth: number;
requestsPerMinute: number;
concurrentRequests: number;
supportLevel: string;
};
};
capabilities: {
autoOptimization: boolean;
smartRetry: boolean;
costTracking: boolean;
batchProcessing: boolean;
streamingSupport: boolean;
multiAgentChaining: boolean;
bringYourOwnKeysSupport: boolean;
customIntegrations: boolean;
};
providers: Array<{
provider: ModelProvider;
status: ProviderStatus;
models: SupportedModel[];
quotaRemaining?: number;
features: string[];
}>;
recommendations: {
planOptimization: string[];
providerOptimization: string[];
usageOptimization: string[];
securityRecommendations: string[];
};
}
/**
* Bring Your Own Keys configuration
*/
export interface BringYourOwnKeysConfiguration {
enabled: boolean;
providers: {
anthropic?: {
apiKey: string;
endpoint?: string;
validated: boolean;
};
openai?: {
apiKey: string;
organization?: string;
endpoint?: string;
validated: boolean;
};
google?: {
apiKey: string;
projectId?: string;
validated: boolean;
};
};
fallbackToRelayPlane: boolean;
validation: {
lastChecked: string;
errors: string[];
warnings: string[];
};
}
export * from './index';
//# sourceMappingURL=index.d.ts.map