@lunos/client
Version:
Official TypeScript client library for Lunos AI API - A comprehensive AI proxy service supporting chat completions, image generation, audio synthesis, embeddings, and more.
1,473 lines (1,463 loc) • 81.1 kB
TypeScript
interface LunosConfig {
/** Base URL for the Lunos API */
baseUrl: string;
/** API key for authentication */
apiKey: string;
/** Request timeout in milliseconds */
timeout: number;
/** Number of retry attempts */
retries: number;
/** Delay between retries in milliseconds */
retryDelay: number;
/** Fallback model to use when primary model fails after retries */
fallback_model?: string;
/** Application identifier for analytics and usage tracking */
appId?: string;
/** Custom headers to include in all requests */
headers?: Record<string, string>;
/** Whether to enable debug logging */
debug?: boolean;
/** Custom fetch implementation */
fetch?: typeof fetch;
}
interface RequestOptions {
/** Request timeout override */
timeout?: number;
/** Custom headers for this request */
headers?: Record<string, string>;
/** Whether to retry on failure */
retry?: boolean;
/** Signal for request cancellation */
signal?: AbortSignal;
/** Fallback model for this specific request */
fallback_model?: string;
/** Application identifier for this specific request */
appId?: string;
}
declare abstract class BaseService {
protected config: LunosConfig;
protected fetchImpl: typeof fetch;
constructor(config: LunosConfig);
/**
* Makes a request to the API with retry logic and error handling
*/
protected makeRequest<T>(endpoint: string, options?: RequestInit, requestOptions?: RequestOptions): Promise<T>;
/**
* Makes a streaming request to the API
*/
protected makeStreamRequest(endpoint: string, options?: RequestInit, requestOptions?: RequestOptions): Promise<ReadableStream<Uint8Array>>;
/**
* Makes a request to the API and returns the raw response body as Buffer (for audio, etc)
*/
protected makeRawRequest(endpoint: string, options?: RequestInit, requestOptions?: RequestOptions): Promise<{
buffer: Buffer;
contentType: string;
}>;
/**
* Makes a request with retry logic
*/
private makeRequestWithRetry;
/**
* Handles error responses from the API
*/
private handleErrorResponse;
/**
* Calculates retry delay with exponential backoff
*/
private calculateRetryDelay;
/**
* Sleep utility
*/
private sleep;
/**
* Determines if an error should trigger fallback model usage
*/
private shouldTryFallback;
/**
* Attempts the request with a fallback model
*/
private tryWithFallbackModel;
/**
* Attempts the streaming request with a fallback model
*/
private tryStreamWithFallbackModel;
/**
* Validates the service configuration
*/
protected validateConfig(): void;
/**
* Logs debug information if debug mode is enabled
*/
protected log(message: string, data?: any): void;
}
interface BaseRequest {
/** Model identifier */
model?: string;
/** User identifier for tracking */
user?: string;
/** Application identifier for analytics and usage tracking */
appId?: string;
}
interface BaseResponse {
/** Response object type */
object: string;
/** Creation timestamp */
created: number;
/** Model used for the request */
model: string;
}
interface Usage {
/** Number of prompt tokens */
prompt_tokens: number;
/** Number of completion tokens */
completion_tokens?: number;
/** Total number of tokens */
total_tokens: number;
}
interface StreamChunk {
/** Chunk object type */
object: string;
/** Creation timestamp */
created: number;
/** Model used */
model: string;
/** Array of choices */
choices: Array<{
/** Choice index */
index: number;
/** Delta content */
delta: {
/** Role of the message */
role?: string;
/** Content of the message */
content?: string;
/** Function call if applicable */
function_call?: any;
/** Tool calls if applicable */
tool_calls?: any[];
};
/** Finish reason */
finish_reason?: string;
}>;
/** Usage information */
usage?: Usage;
}
interface StreamEnd {
/** End marker */
data: "[DONE]";
}
type StreamResponse = StreamChunk | StreamEnd;
interface RetryConfig {
/** Maximum number of retries */
maxRetries: number;
/** Base delay between retries */
baseDelay: number;
/** Maximum delay between retries */
maxDelay: number;
/** Whether to use exponential backoff */
exponentialBackoff: boolean;
/** Status codes to retry on */
retryStatusCodes: number[];
}
type ChatRole = "system" | "user" | "assistant" | "function" | "tool";
interface ChatMessage {
/** Role of the message sender */
role: ChatRole;
/** Content of the message */
content: string;
/** Name of the function or tool (optional) */
name?: string;
/** Function call details (optional) */
function_call?: {
/** Name of the function */
name: string;
/** Arguments for the function call */
arguments: string;
};
/** Tool calls (optional) */
tool_calls?: Array<{
/** Tool call ID */
id: string;
/** Type of tool */
type: "function";
/** Function details */
function: {
/** Function name */
name: string;
/** Function arguments */
arguments: string;
};
}>;
}
interface ChatCompletionRequest extends BaseRequest {
/** Array of messages in the conversation */
messages: ChatMessage[];
/** Maximum number of tokens to generate */
max_tokens?: number;
/** Temperature for controlling randomness (0-2) */
temperature?: number;
/** Top-p sampling parameter (0-1) */
top_p?: number;
/** Number of completions to generate */
n?: number;
/** Whether to stream the response */
stream?: boolean;
/** Stop sequences */
stop?: string | string[];
/** Presence penalty (-2 to 2) */
presence_penalty?: number;
/** Frequency penalty (-2 to 2) */
frequency_penalty?: number;
/** Logit bias for specific tokens */
logit_bias?: Record<string, number>;
/** Whether to return log probabilities */
logprobs?: boolean;
/** Number of top log probabilities to return */
top_logprobs?: number;
/** Response format */
response_format?: {
/** Type of response format */
type: "text" | "json_object";
};
/** Seed for reproducible results */
seed?: number;
/** Fallback model to use when primary model fails after retries */
fallback_model?: string;
/** Tools available to the model */
tools?: Array<{
/** Tool type */
type: "function";
/** Function definition */
function: {
/** Function name */
name: string;
/** Function description */
description?: string;
/** Function parameters */
parameters: {
/** Parameter type */
type: "object";
/** Parameter properties */
properties: Record<string, any>;
/** Required parameters */
required?: string[];
};
};
}>;
/** Tool choice */
tool_choice?: "none" | "auto" | {
/** Tool choice type */
type: "function";
/** Function name */
function: {
/** Function name */
name: string;
};
};
}
interface ChatCompletionChoice {
/** Choice index */
index: number;
/** Generated message */
message: ChatMessage;
/** Reason for finishing */
finish_reason: string;
/** Log probabilities if requested */
logprobs?: {
/** Token log probabilities */
token_logprobs: number[];
/** Top log probabilities */
top_logprobs: Array<Record<string, number>>;
/** Token text */
text_offset: number[];
};
}
interface ChatCompletionResponse extends BaseResponse {
/** Response object type */
object: "chat.completion";
/** Generated choices */
choices: ChatCompletionChoice[];
/** Usage statistics */
usage: Usage;
/** System fingerprint */
system_fingerprint?: string;
}
interface ChatCompletionChunk {
/** Chunk object type */
object: "chat.completion.chunk";
/** Creation timestamp */
created: number;
/** Model used */
model: string;
/** Generated choices */
choices: Array<{
/** Choice index */
index: number;
/** Delta content */
delta: Partial<ChatMessage>;
/** Finish reason */
finish_reason?: string;
}>;
/** System fingerprint */
system_fingerprint?: string;
}
/**
* Service for handling chat completion operations with the Lunos AI API.
* Provides both synchronous and streaming chat completion capabilities,
* along with various convenience methods for common chat scenarios.
*/
declare class ChatService extends BaseService {
/**
* Creates a chat completion using the Lunos AI API.
*
* This method handles the core chat completion functionality, validating
* the request parameters and making the API call to generate responses
* based on conversation history. Supports fallback models for reliability.
*
* @param request - Complete chat completion request object containing
* messages, model, parameters, and optional fallback model
* @returns Promise resolving to ChatCompletionResponse with generated response
* @throws Error if request validation fails or API call fails
*
* @example
* ```typescript
* const response = await client.chat.createCompletion({
* model: "openai/gpt-4.1-mini",
* messages: [
* { role: "user", content: "Hello! Can you tell me a short joke?" }
* ],
* max_tokens: 100,
* fallback_model: "openai/gpt-4.1-mini",
* appId: "my-app"
* });
* ```
*/
createCompletion(request: ChatCompletionRequest): Promise<ChatCompletionResponse>;
/**
* Creates a streaming chat completion that returns a raw stream.
*
* This method creates a streaming chat completion and returns the raw
* ReadableStream for advanced stream processing. The stream contains
* Server-Sent Events (SSE) chunks that need to be parsed.
*
* @param request - Complete chat completion request object
* @returns Promise resolving to ReadableStream<Uint8Array> for raw stream processing
* @throws Error if request validation fails or API call fails
*
* @example
* ```typescript
* const stream = await client.chat.createCompletionStream({
* model: "openai/gpt-4.1-mini",
* messages: [
* { role: "user", content: "Write a haiku about programming." }
* ]
* });
* ```
*/
createCompletionStream(request: ChatCompletionRequest): Promise<ReadableStream<Uint8Array>>;
/**
* Creates a streaming chat completion with optional callback processing.
*
* This method creates a streaming chat completion and optionally processes
* the stream with a callback function. Similar to OpenAI's streaming API,
* it provides real-time access to generated content chunks.
*
* @param request - Complete chat completion request object
* @param onChunk - Optional callback function called for each content chunk
* @returns Promise resolving to ReadableStream<Uint8Array> for further processing
* @throws Error if request validation fails or API call fails
*
* @example
* ```typescript
* let streamedResponse = "";
* const stream = await client.chat.createCompletionWithStream(
* {
* model: "openai/gpt-4.1-mini",
* messages: [
* { role: "user", content: "Write a haiku about programming." }
* ]
* },
* (chunk) => {
* streamedResponse += chunk;
* process.stdout.write(chunk);
* }
* );
* ```
*/
createCompletionWithStream(request: ChatCompletionRequest, onChunk?: (chunk: string) => void): Promise<ReadableStream<Uint8Array>>;
/**
* Creates a streaming chat completion and returns the full response as a string.
*
* This method is provided for backward compatibility and convenience.
* It processes the entire stream and returns the complete response as a string,
* while optionally calling a callback for each chunk during processing.
*
* @param request - Complete chat completion request object
* @param onChunk - Optional callback function called for each content chunk
* @returns Promise resolving to the complete response as a string
* @throws Error if request validation fails or API call fails
*
* @example
* ```typescript
* const response = await client.chat.createCompletionWithStreamToString(
* {
* model: "openai/gpt-4.1-mini",
* messages: [
* { role: "user", content: "Explain quantum computing." }
* ]
* },
* (chunk) => console.log("Chunk:", chunk)
* );
* ```
*/
createCompletionWithStreamToString(request: ChatCompletionRequest, onChunk?: (chunk: string) => void): Promise<string>;
/**
* Gets a specific generation by ID from the API.
*
* This method retrieves information about a specific chat completion
* generation using its unique identifier.
*
* @param id - Unique identifier of the generation to retrieve
* @returns Promise resolving to generation information
* @throws Error if ID is not provided or API call fails
*
* @example
* ```typescript
* const generation = await client.chat.getGeneration("gen_123456789");
* ```
*/
getGeneration(id: string): Promise<any>;
/**
* Convenience method for simple chat completions with structured parameters.
*
* This method provides a simplified interface for chat completions using
* a structured object that separates messages from other options.
*
* @param options - Object containing messages and optional completion parameters
* @returns Promise resolving to ChatCompletionResponse with generated response
*
* @example
* ```typescript
* const response = await client.chat.chat({
* messages: [
* { role: "user", content: "What is machine learning?" }
* ],
* model: "openai/gpt-4.1-mini",
* max_tokens: 200,
* temperature: 0.7,
* appId: "my-app"
* });
* ```
*/
chat(options: {
messages: ChatMessage[];
model?: string;
max_tokens?: number;
temperature?: number;
top_p?: number;
frequency_penalty?: number;
presence_penalty?: number;
stop?: string | string[];
n?: number;
stream?: boolean;
fallback_model?: string;
user?: string;
appId?: string;
}): Promise<ChatCompletionResponse>;
/**
* Convenience method for streaming chat completions with structured parameters.
*
* This method provides a simplified interface for streaming chat completions
* using a structured object that separates messages from other options.
*
* @param options - Object containing messages, callback, and optional parameters
* @returns Promise resolving to ReadableStream<Uint8Array> for stream processing
*
* @example
* ```typescript
* const stream = await client.chat.chatStream({
* messages: [
* { role: "user", content: "Write a story about a robot." }
* ],
* model: "openai/gpt-4.1-mini",
* onChunk: (chunk) => console.log(chunk),
* max_tokens: 500
* });
* ```
*/
chatStream(options: {
messages: ChatMessage[];
model?: string;
onChunk?: (chunk: string) => void;
max_tokens?: number;
temperature?: number;
top_p?: number;
frequency_penalty?: number;
presence_penalty?: number;
stop?: string | string[];
n?: number;
fallback_model?: string;
user?: string;
}): Promise<ReadableStream<Uint8Array>>;
/**
* Creates a simple chat completion with a single user message.
*
* This convenience method simplifies chat completions when you only
* need to send a single user message without complex conversation history.
*
* @param options - Object containing user message and optional parameters
* @returns Promise resolving to ChatCompletionResponse with generated response
*
* @example
* ```typescript
* const response = await client.chat.chatWithUser({
* userMessage: "Explain the concept of recursion",
* model: "openai/gpt-4.1-mini",
* max_tokens: 300
* });
* ```
*/
chatWithUser(options: {
userMessage: string;
model?: string;
max_tokens?: number;
temperature?: number;
top_p?: number;
frequency_penalty?: number;
presence_penalty?: number;
stop?: string | string[];
n?: number;
fallback_model?: string;
user?: string;
}): Promise<ChatCompletionResponse>;
/**
* Creates a chat completion with system and user messages.
*
* This convenience method is useful for setting up conversations with
* a system prompt that defines the AI's behavior or role.
*
* @param options - Object containing system message, user message, and optional parameters
* @returns Promise resolving to ChatCompletionResponse with generated response
*
* @example
* ```typescript
* const response = await client.chat.chatWithSystem({
* systemMessage: "You are a helpful coding assistant.",
* userMessage: "Write a function to calculate fibonacci numbers",
* model: "openai/gpt-4.1-mini",
* max_tokens: 400,
* appId: "my-app"
* });
* ```
*/
chatWithSystem(options: {
systemMessage: string;
userMessage: string;
model?: string;
max_tokens?: number;
temperature?: number;
top_p?: number;
frequency_penalty?: number;
presence_penalty?: number;
stop?: string | string[];
n?: number;
fallback_model?: string;
user?: string;
appId?: string;
}): Promise<ChatCompletionResponse>;
/**
* Creates a conversation with multiple messages.
*
* This method is an alias for the chat method, providing semantic clarity
* when working with multi-turn conversations.
*
* @param options - Object containing messages and optional parameters
* @returns Promise resolving to ChatCompletionResponse with generated response
*
* @example
* ```typescript
* const response = await client.chat.createConversation({
* messages: [
* { role: "system", content: "You are a helpful assistant." },
* { role: "user", content: "Hello!" },
* { role: "assistant", content: "Hi there! How can I help you today?" },
* { role: "user", content: "What's the weather like?" }
* ],
* model: "openai/gpt-4.1-mini"
* });
* ```
*/
createConversation(options: {
messages: ChatMessage[];
model?: string;
max_tokens?: number;
temperature?: number;
top_p?: number;
frequency_penalty?: number;
presence_penalty?: number;
stop?: string | string[];
n?: number;
fallback_model?: string;
user?: string;
}): Promise<ChatCompletionResponse>;
/**
* Gets API usage information for the current account.
*
* This method retrieves usage statistics and billing information
* for the authenticated API key.
*
* @returns Promise resolving to usage information object
* @throws Error if API call fails or endpoint is not available
*
* @example
* ```typescript
* const usage = await client.chat.getUsage();
* console.log("Total tokens used:", usage.total_tokens);
* ```
*/
getUsage(): Promise<any>;
/**
* Gets account information for the authenticated API key.
*
* This method retrieves account details, limits, and settings
* for the current API key.
*
* @returns Promise resolving to account information object
* @throws Error if API call fails or endpoint is not available
*
* @example
* ```typescript
* const account = await client.chat.getAccount();
* console.log("Account ID:", account.id);
* ```
*/
getAccount(): Promise<any>;
/**
* Validates chat messages for correctness and completeness.
*
* This static method performs validation on chat message arrays
* to ensure they meet the API requirements before making requests.
*
* @param messages - Array of chat messages to validate
* @throws Error if messages are invalid or incomplete
*
* @example
* ```typescript
* ChatService.validateMessages([
* { role: "user", content: "Hello" },
* { role: "assistant", content: "Hi there!" }
* ]);
* ```
*/
static validateMessages(messages: ChatMessage[]): void;
}
interface ImageGenerationRequest extends BaseRequest {
/** Text prompt for image generation */
prompt: string;
/** Number of images to generate */
n?: number;
/** Size of the generated image */
size?: "256x256" | "512x512" | "1024x1024" | "1792x1024" | "1024x1792";
/** Width of the image (for custom sizes) */
width?: number;
/** Height of the image (for custom sizes) */
height?: number;
/** Quality of the generated image */
quality?: "standard" | "hd";
/** Response format */
response_format?: "url" | "b64_json";
/** Style of the generated image */
style?: "vivid" | "natural";
/** Seed for reproducible results */
seed?: number;
/** User identifier */
user?: string;
}
interface ImageGenerationData {
/** URL of the generated image */
url?: string;
/** Base64 encoded image data */
b64_json?: string;
/** Revised prompt if applicable */
revised_prompt?: string;
}
interface ImageGenerationResponse extends BaseResponse {
/** Response object type */
object: "list";
/** Generated images */
data: ImageGenerationData[];
}
interface ImageEditRequest extends BaseRequest {
/** Image to edit (base64 or URL) */
image: string;
/** Mask image (base64 or URL) */
mask?: string;
/** Text prompt for the edit */
prompt: string;
/** Number of images to generate */
n?: number;
/** Size of the generated image */
size?: "256x256" | "512x512" | "1024x1024";
/** Response format */
response_format?: "url" | "b64_json";
/** User identifier */
user?: string;
}
interface ImageVariationRequest extends BaseRequest {
/** Base image for variation (base64 or URL) */
image: string;
/** Number of variations to generate */
n?: number;
/** Size of the generated image */
size?: "256x256" | "512x512" | "1024x1024";
/** Response format */
response_format?: "url" | "b64_json";
/** User identifier */
user?: string;
}
/**
* Service for handling image generation, editing, and variation operations.
* Provides both low-level API methods and high-level convenience methods
* for common image generation tasks.
*/
declare class ImageService extends BaseService {
/**
* Generates an image based on a text prompt using the Lunos AI API.
*
* This method handles the core image generation functionality, validating
* the request parameters and making the API call to generate images.
*
* @param request - Complete image generation request object containing
* prompt, model, size, quality, and other parameters
* @returns Promise resolving to ImageGenerationResponse with generated image data
* @throws Error if request validation fails or API call fails
*
* @example
* ```typescript
* const response = await client.image.generateImage({
* prompt: "A beautiful sunset over mountains",
* model: "openai/dall-e-3",
* size: "1024x1024",
* quality: "hd",
* appId: "my-app"
* });
* ```
*/
generateImage(request: ImageGenerationRequest): Promise<ImageGenerationResponse>;
/**
* Edits an existing image based on a text prompt and optional mask.
*
* This method allows for inpainting and outpainting operations by providing
* an existing image and a text prompt describing the desired changes.
*
* @param request - Image edit request containing the base image, prompt,
* optional mask, and generation parameters
* @returns Promise resolving to ImageGenerationResponse with edited image data
* @throws Error if image is not provided or API call fails
*
* @example
* ```typescript
* const response = await client.image.editImage({
* image: "base64_encoded_image_data",
* prompt: "Add a red car to the scene",
* model: "openai/dall-e-2",
* size: "1024x1024"
* });
* ```
*/
editImage(request: ImageEditRequest): Promise<ImageGenerationResponse>;
/**
* Creates variations of an existing image.
*
* This method generates multiple variations of a provided base image,
* maintaining the overall composition while introducing subtle changes.
*
* @param request - Image variation request containing the base image
* and generation parameters
* @returns Promise resolving to ImageGenerationResponse with variation image data
* @throws Error if image is not provided or API call fails
*
* @example
* ```typescript
* const response = await client.image.createImageVariation({
* image: "base64_encoded_image_data",
* model: "openai/dall-e-2",
* n: 3,
* size: "1024x1024"
* });
* ```
*/
createImageVariation(request: ImageVariationRequest): Promise<ImageGenerationResponse>;
/**
* Convenience method for simple image generation with structured parameters.
*
* This method provides a simplified interface for image generation using
* a structured object that separates the prompt from other options.
*
* @param options - Object containing prompt and optional generation parameters
* @returns Promise resolving to ImageGenerationResponse with generated image data
*
* @example
* ```typescript
* const response = await client.image.generate({
* prompt: "A futuristic city skyline",
* model: "openai/dall-e-3",
* size: "1024x1024",
* quality: "hd",
* appId: "my-app"
* });
* ```
*/
generate(options: {
prompt: string;
model?: string;
size?: "256x256" | "512x512" | "1024x1024" | "1792x1024" | "1024x1792";
quality?: "standard" | "hd";
response_format?: "url" | "b64_json";
style?: "vivid" | "natural";
n?: number;
seed?: number;
user?: string;
appId?: string;
}): Promise<ImageGenerationResponse>;
/**
* Convenience method for image generation with specific dimensions.
*
* This method allows for custom image dimensions while maintaining
* the structured parameter approach.
*
* @param options - Object containing prompt, dimensions, and other parameters
* @returns Promise resolving to ImageGenerationResponse with generated image data
*
* @example
* ```typescript
* const response = await client.image.generateWithSize({
* prompt: "A panoramic landscape",
* width: 1792,
* height: 1024,
* model: "openai/dall-e-3",
* quality: "hd",
* appId: "my-app"
* });
* ```
*/
generateWithSize(options: {
prompt: string;
width: number;
height: number;
model?: string;
quality?: "standard" | "hd";
response_format?: "url" | "b64_json";
style?: "vivid" | "natural";
n?: number;
seed?: number;
user?: string;
appId?: string;
}): Promise<ImageGenerationResponse>;
/**
* Convenience method for high-quality image generation.
*
* This method automatically sets the quality to "hd" for high-definition
* image generation while using the structured parameter approach.
*
* @param options - Object containing prompt and other parameters
* @returns Promise resolving to ImageGenerationResponse with HD image data
*
* @example
* ```typescript
* const response = await client.image.generateHD({
* prompt: "A detailed portrait of a cat",
* model: "openai/dall-e-3",
* size: "1024x1024"
* });
* ```
*/
generateHD(options: {
prompt: string;
model?: string;
size?: "256x256" | "512x512" | "1024x1024" | "1792x1024" | "1024x1792";
response_format?: "url" | "b64_json";
style?: "vivid" | "natural";
n?: number;
seed?: number;
user?: string;
}): Promise<ImageGenerationResponse>;
/**
* Convenience method for image generation with base64 response format.
*
* This method automatically sets the response format to base64 JSON
* for direct image data access.
*
* @param options - Object containing prompt and other parameters
* @returns Promise resolving to ImageGenerationResponse with base64 image data
*
* @example
* ```typescript
* const response = await client.image.generateBase64({
* prompt: "A digital art piece",
* model: "openai/dall-e-3",
* size: "1024x1024",
* appId: "my-app"
* });
* ```
*/
generateBase64(options: {
prompt: string;
model?: string;
size?: "256x256" | "512x512" | "1024x1024" | "1792x1024" | "1024x1792";
quality?: "standard" | "hd";
style?: "vivid" | "natural";
n?: number;
seed?: number;
user?: string;
appId?: string;
}): Promise<ImageGenerationResponse>;
/**
* Convenience method for image generation with URL response format.
*
* This method automatically sets the response format to URL
* for direct image URL access.
*
* @param options - Object containing prompt and other parameters
* @returns Promise resolving to ImageGenerationResponse with image URLs
*
* @example
* ```typescript
* const response = await client.image.generateURL({
* prompt: "A modern office space",
* model: "openai/dall-e-3",
* size: "1024x1024"
* });
* ```
*/
generateURL(options: {
prompt: string;
model?: string;
size?: "256x256" | "512x512" | "1024x1024" | "1792x1024" | "1024x1792";
quality?: "standard" | "hd";
style?: "vivid" | "natural";
n?: number;
seed?: number;
user?: string;
}): Promise<ImageGenerationResponse>;
/**
* Generates multiple images from a single prompt.
*
* This method allows for batch image generation with a specified count,
* using the structured parameter approach.
*
* @param options - Object containing prompt, count, and other parameters
* @returns Promise resolving to ImageGenerationResponse with multiple images
* @throws Error if count is not between 1 and 10
*
* @example
* ```typescript
* const response = await client.image.generateMultiple({
* prompt: "A fantasy castle",
* count: 4,
* model: "openai/dall-e-3",
* size: "1024x1024"
* });
* ```
*/
generateMultiple(options: {
prompt: string;
count: number;
model?: string;
size?: "256x256" | "512x512" | "1024x1024" | "1792x1024" | "1024x1792";
quality?: "standard" | "hd";
response_format?: "url" | "b64_json";
style?: "vivid" | "natural";
seed?: number;
user?: string;
}): Promise<ImageGenerationResponse>;
/**
* Validates image generation parameters for correctness.
*
* This static method performs validation on image generation parameters
* to ensure they meet the API requirements before making requests.
*
* @param prompt - Text prompt for image generation
* @param width - Optional width of the image
* @param height - Optional height of the image
* @throws Error if parameters are invalid
*
* @example
* ```typescript
* ImageService.validateImageGenerationParams(
* "A beautiful landscape",
* 1024,
* 1024
* );
* ```
*/
static validateImageGenerationParams(prompt: string, width?: number, height?: number): void;
}
interface AudioGenerationRequest extends BaseRequest {
/** Text input for speech synthesis */
input: string;
/** Voice to use for synthesis */
voice?: string;
/** Response format */
response_format?: "mp3" | "opus" | "aac" | "flac";
/** Speed of speech (0.25 to 4.0) */
speed?: number;
/** User identifier */
user?: string;
}
interface AudioGenerationResponse {
/** Audio data as buffer */
audioBuffer: Buffer;
/** Content type of the audio */
contentType: string;
/** Suggested filename */
filename: string;
/** Duration in seconds (if available) */
duration?: number;
}
interface AudioTranscriptionRequest {
/** Audio file to transcribe */
file: Buffer | string;
/** Model to use for transcription */
model?: string;
/** Language of the audio */
language?: string;
/** Response format */
response_format?: "json" | "text" | "srt" | "verbose_json" | "vtt";
/** Temperature for sampling */
temperature?: number;
/** Timestamp granularities */
timestamp_granularities?: ("word" | "segment")[];
/** Prompt for transcription */
prompt?: string;
/** User identifier */
user?: string;
}
interface AudioTranscriptionResponse {
/** Transcribed text */
text: string;
/** Language detected */
language?: string;
/** Duration in seconds */
duration?: number;
/** Segments with timestamps (if verbose) */
segments?: Array<{
/** Segment ID */
id: number;
/** Start time in seconds */
start: number;
/** End time in seconds */
end: number;
/** Transcribed text */
text: string;
/** Confidence score */
avg_logprob?: number;
/** Compression ratio */
compression_ratio?: number;
/** No speech probability */
no_speech_prob?: number;
/** Words with timestamps */
words?: Array<{
/** Word text */
word: string;
/** Start time in seconds */
start: number;
/** End time in seconds */
end: number;
}>;
}>;
}
/**
* Service for handling audio generation and transcription operations.
* Provides both low-level API methods and high-level convenience methods
* for common audio processing tasks including text-to-speech and speech-to-text.
*/
declare class AudioService extends BaseService {
/**
* Generates audio from text input using the Lunos AI API.
*
* This method handles the core audio generation functionality, validating
* the request parameters and making the API call to generate audio from text.
* The response includes the audio buffer, content type, and suggested filename.
*
* @param request - Complete audio generation request object containing
* input text, voice, format, speed, and other parameters
* @returns Promise resolving to AudioGenerationResponse with audio buffer and metadata
* @throws Error if request validation fails or API call fails
*
* @example
* ```typescript
* const response = await client.audio.generateAudio({
* input: "Hello, this is a test of text to speech.",
* voice: "alloy",
* model: "openai/tts",
* response_format: "mp3",
* speed: 1.0,
* appId: "my-app"
* });
* ```
*/
generateAudio(request: AudioGenerationRequest): Promise<AudioGenerationResponse>;
/**
* Generates audio and saves it directly to a file.
*
* This method combines audio generation with file saving, providing
* a convenient way to generate and store audio files in one operation.
*
* @param request - Audio generation request object
* @param filepath - Path where the audio file should be saved
* @returns Promise resolving to the filepath where the audio was saved
* @throws Error if audio generation fails or file saving fails
*
* @example
* ```typescript
* const filepath = await client.audio.generateAudioToFile({
* input: "Welcome to our application!",
* voice: "nova",
* model: "openai/tts"
* }, "./output/audio.mp3");
* ```
*/
generateAudioToFile(request: AudioGenerationRequest, filepath: string): Promise<string>;
/**
* Convenience method for text-to-speech with structured parameters.
*
* This method provides a simplified interface for text-to-speech using
* a structured object that separates the text input from other options.
*
* @param options - Object containing text input and optional generation parameters
* @returns Promise resolving to AudioGenerationResponse with generated audio
*
* @example
* ```typescript
* const response = await client.audio.textToSpeech({
* text: "Hello from Lunos AI! This is a test of text to speech.",
* voice: "alloy",
* model: "openai/tts",
* response_format: "mp3",
* speed: 1.0,
* appId: "my-app"
* });
* ```
*/
textToSpeech(options: {
text: string;
voice?: string;
model?: string;
response_format?: "mp3" | "opus" | "aac" | "flac";
speed?: number;
user?: string;
appId?: string;
}): Promise<AudioGenerationResponse>;
/**
* Convenience method for text-to-speech with specific format.
*
* This method automatically sets the response format while maintaining
* the structured parameter approach.
*
* @param options - Object containing text input, format, and other parameters
* @returns Promise resolving to AudioGenerationResponse with audio in specified format
*
* @example
* ```typescript
* const response = await client.audio.textToSpeechWithFormat({
* text: "This is a high-quality audio sample.",
* format: "flac",
* voice: "echo",
* model: "openai/tts",
* appId: "my-app"
* });
* ```
*/
textToSpeechWithFormat(options: {
text: string;
format: "mp3" | "opus" | "aac" | "flac";
voice?: string;
model?: string;
speed?: number;
user?: string;
appId?: string;
}): Promise<AudioGenerationResponse>;
/**
* Convenience method for text-to-speech with speed control.
*
* This method allows for speed adjustment while maintaining
* the structured parameter approach.
*
* @param options - Object containing text input, speed, and other parameters
* @returns Promise resolving to AudioGenerationResponse with speed-adjusted audio
* @throws Error if speed is not between 0.25 and 4.0
*
* @example
* ```typescript
* const response = await client.audio.textToSpeechWithSpeed({
* text: "This is a slow speech sample.",
* speed: 0.5,
* voice: "fable",
* model: "openai/tts",
* appId: "my-app"
* });
* ```
*/
textToSpeechWithSpeed(options: {
text: string;
speed: number;
voice?: string;
model?: string;
response_format?: "mp3" | "opus" | "aac" | "flac";
user?: string;
appId?: string;
}): Promise<AudioGenerationResponse>;
/**
* Gets the file extension for a content type.
*
* This private method maps MIME content types to their corresponding
* file extensions for proper file naming.
*
* @param contentType - MIME content type string
* @returns File extension string
*
* @example
* ```typescript
* const extension = this.getFileExtension("audio/mpeg"); // Returns "mp3"
* ```
*/
private getFileExtension;
/**
* Validates audio generation parameters for correctness.
*
* This static method performs validation on audio generation parameters
* to ensure they meet the API requirements before making requests.
*
* @param text - Text input for speech synthesis
* @param voice - Optional voice identifier
* @param speed - Optional speed multiplier
* @throws Error if parameters are invalid
*
* @example
* ```typescript
* AudioService.validateAudioGenerationParams(
* "Hello world",
* "alloy",
* 1.0
* );
* ```
*/
static validateAudioGenerationParams(text: string, voice?: string, speed?: number): void;
/**
* Helper to save any audio buffer to a file
*/
static saveAudioToFile(audioBuffer: Buffer, filepath: string): Promise<void>;
/**
* Helper to convert PCM buffer to WAV file using wav-encoder (mono, 24kHz, 16-bit signed)
* @param pcmBuffer - PCM audio buffer
* @param wavFilePath - Output WAV file path
* @param sampleRate - Sample rate (default 24000)
*/
static convertPCMToWav(pcmBuffer: Buffer, wavFilePath: string, sampleRate?: number): Promise<void>;
}
interface EmbeddingRequest extends BaseRequest {
/** Input text or array of texts to embed */
input: string | string[];
/** Encoding format for embeddings */
encoding_format?: "float" | "base64";
/** Dimensions for embeddings */
dimensions?: number;
/** User identifier */
user?: string;
}
interface EmbeddingData {
/** Object type */
object: "embedding";
/** Embedding vector */
embedding: number[];
/** Index of the embedding */
index: number;
}
interface EmbeddingResponse extends BaseResponse {
/** Response object type */
object: "list";
/** Generated embeddings */
data: EmbeddingData[];
/** Usage statistics */
usage: Usage;
}
/**
* Service for handling text embedding operations with the Lunos AI API.
* Provides both low-level API methods and high-level convenience methods
* for creating vector embeddings from text, with support for various
* encoding formats and dimensions.
*/
declare class EmbeddingService extends BaseService {
/**
* Creates embeddings for input text using the Lunos AI API.
*
* This method handles the core embedding functionality, validating
* the request parameters and making the API call to generate vector
* representations of text. Supports both single texts and arrays of texts.
*
* @param request - Complete embedding request object containing
* input text(s), model, encoding format, and dimensions
* @returns Promise resolving to EmbeddingResponse with embedding vectors
* @throws Error if request validation fails or API call fails
*
* @example
* ```typescript
* const response = await client.embedding.createEmbedding({
* input: "This is a sample text for embedding.",
* model: "openai/text-embedding-3-small",
* encoding_format: "float",
* dimensions: 1536,
* appId: "my-app"
* });
* ```
*/
createEmbedding(request: EmbeddingRequest): Promise<EmbeddingResponse>;
/**
* Convenience method for embedding text with structured parameters.
*
* This method provides a simplified interface for text embedding using
* a structured object that separates the input from other options.
*
* @param options - Object containing input text(s) and optional embedding parameters
* @returns Promise resolving to EmbeddingResponse with embedding vectors
*
* @example
* ```typescript
* const response = await client.embedding.embed({
* input: "This is a sample text for embedding.",
* model: "openai/text-embedding-3-small",
* encoding_format: "float",
* dimensions: 1536,
* appId: "my-app"
* });
* ```
*/
embed(options: {
input: string | string[];
model?: string;
encoding_format?: "float" | "base64";
dimensions?: number;
user?: string;
appId?: string;
}): Promise<EmbeddingResponse>;
/**
* Embeds a single text and returns the embedding vector as an array.
*
* This convenience method simplifies the process when you only need
* the raw embedding vector for a single text input.
*
* @param text - Single text string to embed
* @param model - Optional model identifier for embedding generation
* @param appId - Optional application identifier for analytics
* @returns Promise resolving to number array representing the embedding vector
* @throws Error if embedding generation fails or response is invalid
*
* @example
* ```typescript
* const embedding = await client.embedding.embedText(
* "This is a sample text for embedding.",
* "openai/text-embedding-3-small",
* "my-app"
* );
* console.log("Embedding dimensions:", embedding.length);
* ```
*/
embedText(text: string, model?: string, appId?: string): Promise<number[]>;
/**
* Embeds multiple texts and returns an array of embedding vectors.
*
* This convenience method processes multiple texts and returns their
* embedding vectors as a 2D array, useful for batch processing.
*
* @param texts - Array of text strings to embed
* @param model - Optional model identifier for embedding generation
* @param appId - Optional application identifier for analytics
* @returns Promise resolving to 2D number array with embedding vectors
* @throws Error if embedding generation fails or response is invalid
*
* @example
* ```typescript
* const embeddings = await client.embedding.embedMultiple([
* "First text for embedding",
* "Second text for embedding",
* "Third text for embedding"
* ], "openai/text-embedding-3-small", "my-app");
* console.log("Number of embeddings:", embeddings.length);
* ```
*/
embedMultiple(texts: string[], model?: string, appId?: string): Promise<number[][]>;
/**
* Embeds text with base64 encoding format.
*
* This method automatically sets the encoding format to base64,
* which can be useful for certain applications that require
* base64-encoded embedding vectors.
*
* @param options - Object containing input text(s) and other parameters
* @returns Promise resolving to EmbeddingResponse with base64-encoded embeddings
*
* @example
* ```typescript
* const response = await client.embedding.embedBase64({
* input: "Text for base64 embedding",
* model: "openai/text-embedding-3-small",
* dimensions: 1536
* });
* ```
*/
embedBase64(options: {
input: string | string[];
model?: string;
dimensions?: number;
user?: string;
}): Promise<EmbeddingResponse>;
/**
* Embeds text with float encoding format.
*
* This method automatically sets the encoding format to float,
* which is the standard format for most embedding applications.
*
* @param options - Object containing input text(s) and other parameters
* @returns Promise resolving to EmbeddingResponse with float-encoded embeddings
*
* @example
* ```typescript
* const response = await client.embedding.embedFloat({
* input: "Text for float embedding",
* model: "openai/text-embedding-3-small",
* dimensions: 1536
* });
* ```
*/
embedFloat(options: {
input: string | string[];
model?: string;
dimensions?: number;
user?: string;
}): Promise<EmbeddingResponse>;
/**
* Embeds text with custom dimensions specification.
*
* This method allows for explicit dimension specification while maintaining
* the structured parameter approach.
*
* @param options - Object containing input text(s), dimensions, and other parameters
* @returns Promise resolving to EmbeddingResponse with custom-dimension embeddings
* @throws Error if dimensions is less than 1
*
* @example
* ```typescript
* const response = await client.embedding.embedWithDimensions({
* input: "Text for custom dimension embedding",
* dimensions: 1024,
* model: "openai/text-embedding-3-small"
* });
* ```
*/
embedWithDimensions(options: {
input: string | string[];
dimensions: number;
model?: string;
encoding_format?: "float" | "base64";
user?: st