@lunos/client
Version:
Official TypeScript client library for Lunos AI API - A comprehensive AI proxy service supporting chat completions, image generation, audio synthesis, embeddings, and more.
1,607 lines (1,595 loc) • 98.7 kB
JavaScript
'use strict';
var fs = require('fs');
var path = require('path');
var WavEncoder = require('wav-encoder');
function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
function _interopNamespace(e) {
if (e && e.__esModule) return e;
var n = Object.create(null);
if (e) {
Object.keys(e).forEach(function (k) {
if (k !== 'default') {
var d = Object.getOwnPropertyDescriptor(e, k);
Object.defineProperty(n, k, d.get ? d : {
enumerable: true,
get: function () { return e[k]; }
});
}
});
}
n.default = e;
return Object.freeze(n);
}
var fs__namespace = /*#__PURE__*/_interopNamespace(fs);
var path__namespace = /*#__PURE__*/_interopNamespace(path);
var WavEncoder__default = /*#__PURE__*/_interopDefault(WavEncoder);
// Lunos AI Client Library - https://lunos.tech
// src/client/config/DefaultConfig.ts
var DEFAULT_CONFIG = {
baseUrl: "https://api.lunos.tech",
apiKey: "",
timeout: 6e4,
retries: 3,
retryDelay: 1e3,
fallback_model: void 0,
appId: "Unknown",
debug: false
};
function mergeConfig(userConfig) {
return {
...DEFAULT_CONFIG,
...userConfig
};
}
// src/client/errors/LunosError.ts
var LunosError = class extends Error {
status;
code;
details;
constructor(message, status = 0, code, details) {
super(message);
this.name = "LunosError";
this.status = status;
this.code = code;
this.details = details;
}
};
var APIError = class extends LunosError {
constructor(message, status, code, details) {
super(message, status, code, details);
this.name = "APIError";
}
};
var ValidationError = class extends LunosError {
constructor(message, details) {
super(message, 400, "VALIDATION_ERROR", details);
this.name = "ValidationError";
}
};
var AuthenticationError = class extends LunosError {
constructor(message = "Authentication failed") {
super(message, 401, "AUTHENTICATION_ERROR");
this.name = "AuthenticationError";
}
};
var RateLimitError = class extends LunosError {
constructor(message = "Rate limit exceeded", retryAfter) {
super(message, 429, "RATE_LIMIT_ERROR", { retryAfter });
this.name = "RateLimitError";
}
};
var NetworkError = class extends LunosError {
constructor(message = "Network error occurred") {
super(message, 0, "NETWORK_ERROR");
this.name = "NetworkError";
}
};
// src/utils/validation.ts
var ValidationUtils = class {
/**
* Validates a chat completion request
*/
static validateChatCompletionRequest(request) {
if (!request.messages || !Array.isArray(request.messages) || request.messages.length === 0) {
throw new ValidationError(
"Messages array is required and cannot be empty"
);
}
for (const message of request.messages) {
if (!message.role || !message.content) {
throw new ValidationError(
"Each message must have a role and content"
);
}
if (!["system", "user", "assistant", "function", "tool"].includes(
message.role
)) {
throw new ValidationError(`Invalid role: ${message.role}`);
}
}
if (request.temperature !== void 0 && (request.temperature < 0 || request.temperature > 2)) {
throw new ValidationError("Temperature must be between 0 and 2");
}
if (request.top_p !== void 0 && (request.top_p < 0 || request.top_p > 1)) {
throw new ValidationError("Top_p must be between 0 and 1");
}
if (request.max_tokens !== void 0 && request.max_tokens < 1) {
throw new ValidationError("Max_tokens must be at least 1");
}
if (request.presence_penalty !== void 0 && (request.presence_penalty < -2 || request.presence_penalty > 2)) {
throw new ValidationError("Presence penalty must be between -2 and 2");
}
if (request.frequency_penalty !== void 0 && (request.frequency_penalty < -2 || request.frequency_penalty > 2)) {
throw new ValidationError(
"Frequency penalty must be between -2 and 2"
);
}
}
/**
* Validates an image generation request
*/
static validateImageGenerationRequest(request) {
if (!request.prompt || typeof request.prompt !== "string" || request.prompt.trim().length === 0) {
throw new ValidationError("Prompt is required and cannot be empty");
}
if (request.n !== void 0 && (request.n < 1 || request.n > 10)) {
throw new ValidationError("Number of images must be between 1 and 10");
}
if (request.size && ![
"256x256",
"512x512",
"1024x1024",
"1792x1024",
"1024x1792"
].includes(request.size)) {
throw new ValidationError(
"Invalid size. Must be one of: 256x256, 512x512, 1024x1024, 1792x1024, 1024x1792"
);
}
if (request.width !== void 0 && (request.width < 256 || request.width > 1792)) {
throw new ValidationError("Width must be between 256 and 1792");
}
if (request.height !== void 0 && (request.height < 256 || request.height > 1792)) {
throw new ValidationError("Height must be between 256 and 1792");
}
if (request.quality && !["standard", "hd"].includes(request.quality)) {
throw new ValidationError('Quality must be either "standard" or "hd"');
}
if (request.response_format && !["url", "b64_json"].includes(request.response_format)) {
throw new ValidationError(
'Response format must be either "url" or "b64_json"'
);
}
if (request.style && !["vivid", "natural"].includes(request.style)) {
throw new ValidationError('Style must be either "vivid" or "natural"');
}
}
/**
* Validates an audio generation request
*/
static validateAudioGenerationRequest(request) {
if (!request.input || typeof request.input !== "string" || request.input.trim().length === 0) {
throw new ValidationError(
"Input text is required and cannot be empty"
);
}
if (request.input.length > 4096) {
throw new ValidationError("Input text cannot exceed 4096 characters");
}
if (request.voice) {
const openAIVoices = [
"alloy",
"echo",
"fable",
"onyx",
"nova",
"shimmer"
];
const googleVoices = [
"Zephyr",
"Puck",
"Charon",
"Kore",
"Fenrir",
"Leda",
"Orus",
"Aoede",
"Callirrhoe",
"Autonoe",
"Enceladus",
"Iapetus",
"Umbriel",
"Algieba",
"Despina",
"Erinome",
"Algenib",
"Rasalgethi",
"Laomedeia",
"Achernar",
"Alnilam",
"Schedar",
"Gacrux",
"Pulcherrima",
"Achird",
"Zubenelgenubi",
"Vindemiatrix",
"Sadachbia",
"Sadaltager",
"Sulafat"
];
const model = request.model || "";
if (model.startsWith("google")) {
if (!googleVoices.includes(request.voice)) {
throw new ValidationError(
`Invalid voice for Google TTS. Must be one of: ${googleVoices.join(
", "
)}`
);
}
} else {
if (!openAIVoices.includes(request.voice)) {
throw new ValidationError(
`Invalid voice for OpenAI TTS. Must be one of: ${openAIVoices.join(
", "
)}`
);
}
}
}
if (request.response_format && !["mp3", "opus", "aac", "flac", "pcm", "wav", "linear16"].includes(
request.response_format
)) {
throw new ValidationError(
"Response format must be one of: mp3, opus, aac, flac, pcm, wav, linear16"
);
}
if (request.speed !== void 0 && (request.speed < 0.25 || request.speed > 4)) {
throw new ValidationError("Speed must be between 0.25 and 4.0");
}
}
/**
* Validates an embedding request
*/
static validateEmbeddingRequest(request) {
if (!request.input) {
throw new ValidationError("Input is required");
}
if (typeof request.input === "string") {
if (request.input.trim().length === 0) {
throw new ValidationError("Input text cannot be empty");
}
} else if (Array.isArray(request.input)) {
if (request.input.length === 0) {
throw new ValidationError("Input array cannot be empty");
}
for (const text of request.input) {
if (typeof text !== "string" || text.trim().length === 0) {
throw new ValidationError(
"All input texts must be non-empty strings"
);
}
}
} else {
throw new ValidationError(
"Input must be a string or array of strings"
);
}
if (request.encoding_format && !["float", "base64"].includes(request.encoding_format)) {
throw new ValidationError(
'Encoding format must be either "float" or "base64"'
);
}
if (request.dimensions !== void 0 && request.dimensions < 1) {
throw new ValidationError("Dimensions must be at least 1");
}
}
/**
* Validates API key format
*/
static validateApiKey(apiKey) {
if (!apiKey || typeof apiKey !== "string" || apiKey.trim().length === 0) {
throw new ValidationError("API key is required");
}
if (apiKey.length < 10) {
throw new ValidationError("API key appears to be invalid (too short)");
}
}
/**
* Validates base URL format
*/
static validateBaseUrl(baseUrl) {
if (!baseUrl || typeof baseUrl !== "string") {
throw new ValidationError("Base URL is required");
}
try {
new URL(baseUrl);
} catch {
throw new ValidationError("Invalid base URL format");
}
}
/**
* Validates timeout value
*/
static validateTimeout(timeout) {
if (typeof timeout !== "number" || timeout < 1e3 || timeout > 3e5) {
throw new ValidationError(
"Timeout must be a number between 1000 and 300000 milliseconds"
);
}
}
/**
* Validates retry configuration
*/
static validateRetryConfig(retries, retryDelay) {
if (typeof retries !== "number" || retries < 0 || retries > 10) {
throw new ValidationError("Retries must be a number between 0 and 10");
}
if (typeof retryDelay !== "number" || retryDelay < 100 || retryDelay > 1e4) {
throw new ValidationError(
"Retry delay must be a number between 100 and 10000 milliseconds"
);
}
}
/**
* Validates a video generation request
*/
static validateVideoGenerationRequest(request) {
var _a;
if (!request.prompt || typeof request.prompt !== "string" || request.prompt.trim().length === 0) {
throw new ValidationError("Prompt is required and cannot be empty");
}
if (!request.model || typeof request.model !== "string" || request.model.trim().length === 0) {
throw new ValidationError("Model is required and cannot be empty");
}
if (((_a = request.parameters) == null ? void 0 : _a.aspectRatio) && request.parameters.aspectRatio !== "16:9") {
throw new ValidationError("Aspect ratio must be '16:9' or undefined");
}
if (request.response_format && request.response_format !== "mp4") {
throw new ValidationError(
"Response format must be 'mp4' or undefined"
);
}
}
/**
* Validates fallback model configuration
*/
static validateFallbackModel(fallbackModel) {
if (fallbackModel !== void 0) {
if (typeof fallbackModel !== "string" || fallbackModel.trim().length === 0) {
throw new ValidationError(
"Fallback model must be a non-empty string"
);
}
}
}
};
// src/services/base/BaseService.ts
var BaseService = class {
config;
fetchImpl;
constructor(config) {
this.config = config;
this.fetchImpl = config.fetch || fetch;
}
/**
* Makes a request to the API with retry logic and error handling
*/
async makeRequest(endpoint, options = {}, requestOptions = {}) {
const url = `${this.config.baseUrl}${endpoint}`;
const timeout = requestOptions.timeout || this.config.timeout;
const headers = {
"Content-Type": "application/json",
Authorization: `Bearer ${this.config.apiKey}`,
...this.config.headers,
...requestOptions.headers
};
const appId = requestOptions.appId || this.config.appId;
if (appId) {
headers["X-App-ID"] = appId;
}
const requestOptions_ = {
...options,
headers,
signal: requestOptions.signal || AbortSignal.timeout(timeout)
};
const retryConfig = {
maxRetries: this.config.retries,
baseDelay: this.config.retryDelay,
maxDelay: 1e4,
exponentialBackoff: true,
retryStatusCodes: [408, 429, 500, 502, 503, 504]
};
return this.makeRequestWithRetry(
url,
requestOptions_,
retryConfig,
requestOptions.fallback_model || this.config.fallback_model
);
}
/**
* Makes a streaming request to the API
*/
async makeStreamRequest(endpoint, options = {}, requestOptions = {}) {
const url = `${this.config.baseUrl}${endpoint}`;
const timeout = requestOptions.timeout || this.config.timeout;
const headers = {
"Content-Type": "application/json",
Authorization: `Bearer ${this.config.apiKey}`,
...this.config.headers,
...requestOptions.headers
};
const appId = requestOptions.appId || this.config.appId;
if (appId) {
headers["X-App-ID"] = appId;
}
const requestOptions_ = {
...options,
headers,
signal: requestOptions.signal || AbortSignal.timeout(timeout)
};
try {
const response = await this.fetchImpl(url, requestOptions_);
if (!response.ok) {
await this.handleErrorResponse(response);
}
if (!response.body) {
throw new LunosError("No response body for streaming request", 0);
}
return response.body;
} catch (error) {
const fallbackModel = requestOptions.fallback_model || this.config.fallback_model;
if (fallbackModel && this.shouldTryFallback(error)) {
return this.tryStreamWithFallbackModel(
url,
requestOptions_,
fallbackModel
);
}
if (error instanceof LunosError) {
throw error;
}
throw new NetworkError(
`Network error: ${error instanceof Error ? error.message : String(error)}`
);
}
}
/**
* Makes a request to the API and returns the raw response body as Buffer (for audio, etc)
*/
async makeRawRequest(endpoint, options = {}, requestOptions = {}) {
const url = `${this.config.baseUrl}${endpoint}`;
const timeout = requestOptions.timeout || this.config.timeout;
const headers = {
"Content-Type": "application/json",
Authorization: `Bearer ${this.config.apiKey}`,
...this.config.headers,
...requestOptions.headers
};
const appId = requestOptions.appId || this.config.appId;
if (appId) {
headers["X-App-ID"] = appId;
}
const requestOptions_ = {
...options,
headers,
signal: requestOptions.signal || AbortSignal.timeout(timeout)
};
const response = await this.fetchImpl(url, requestOptions_);
if (!response.ok) {
await this.handleErrorResponse(response);
}
const contentType = response.headers.get("content-type") || "application/octet-stream";
const arrayBuffer = await response.arrayBuffer();
return { buffer: Buffer.from(arrayBuffer), contentType };
}
/**
* Makes a request with retry logic
*/
async makeRequestWithRetry(url, options, retryConfig, fallbackModel) {
let lastError;
for (let attempt = 0; attempt <= retryConfig.maxRetries; attempt++) {
try {
const response = await this.fetchImpl(url, options);
if (!response.ok) {
await this.handleErrorResponse(response);
}
return await response.json();
} catch (error) {
lastError = error instanceof Error ? error : new Error(String(error));
if (error instanceof AuthenticationError || error instanceof ValidationError) {
throw error;
}
if (attempt === retryConfig.maxRetries) {
if (fallbackModel && this.shouldTryFallback(error)) {
return this.tryWithFallbackModel(
url,
options,
fallbackModel
);
}
throw lastError;
}
const delay = this.calculateRetryDelay(attempt, retryConfig);
if (this.config.debug) {
console.warn(
`Request failed, retrying in ${delay}ms (attempt ${attempt + 1}/${retryConfig.maxRetries + 1})`
);
}
await this.sleep(delay);
}
}
throw lastError;
}
/**
* Handles error responses from the API
*/
async handleErrorResponse(response) {
var _a;
let errorMessage;
let errorDetails;
try {
const errorData = await response.json();
errorMessage = ((_a = errorData.error) == null ? void 0 : _a.message) || `HTTP ${response.status}`;
errorDetails = errorData.error;
} catch {
errorMessage = `HTTP ${response.status}: ${response.statusText}`;
}
switch (response.status) {
case 401:
throw new AuthenticationError(errorMessage);
case 429:
const retryAfter = response.headers.get("retry-after");
throw new RateLimitError(
errorMessage,
retryAfter ? parseInt(retryAfter) : void 0
);
case 400:
throw new LunosError(
errorMessage,
response.status,
"BAD_REQUEST",
errorDetails
);
case 403:
throw new LunosError(
errorMessage,
response.status,
"FORBIDDEN",
errorDetails
);
case 404:
throw new LunosError(
errorMessage,
response.status,
"NOT_FOUND",
errorDetails
);
case 500:
case 502:
case 503:
case 504:
throw new LunosError(
errorMessage,
response.status,
"SERVER_ERROR",
errorDetails
);
default:
throw new APIError(errorMessage, response.status);
}
}
/**
* Calculates retry delay with exponential backoff
*/
calculateRetryDelay(attempt, retryConfig) {
if (!retryConfig.exponentialBackoff) {
return retryConfig.baseDelay;
}
const delay = retryConfig.baseDelay * Math.pow(2, attempt);
return Math.min(delay, retryConfig.maxDelay);
}
/**
* Sleep utility
*/
sleep(ms) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
/**
* Determines if an error should trigger fallback model usage
*/
shouldTryFallback(error) {
const modelErrorKeywords = [
"model",
"model not found",
"model unavailable",
"model error",
"invalid model",
"model not available",
"model temporarily unavailable"
];
const errorMessage = error.message.toLowerCase();
return modelErrorKeywords.some(
(keyword) => errorMessage.includes(keyword)
);
}
/**
* Attempts the request with a fallback model
*/
async tryWithFallbackModel(url, options, fallbackModel) {
if (this.config.debug) {
console.warn(`Trying with fallback model: ${fallbackModel}`);
}
try {
const body = JSON.parse(options.body);
const originalModel = body.model;
body.model = fallbackModel;
const fallbackOptions = {
...options,
body: JSON.stringify(body)
};
const response = await this.fetchImpl(url, fallbackOptions);
if (!response.ok) {
await this.handleErrorResponse(response);
}
const result = await response.json();
if (this.config.debug) {
console.warn(
`Successfully used fallback model: ${fallbackModel} (original: ${originalModel})`
);
}
return result;
} catch (error) {
if (this.config.debug) {
console.error(
`Fallback model ${fallbackModel} also failed:`,
error
);
}
throw error;
}
}
/**
* Attempts the streaming request with a fallback model
*/
async tryStreamWithFallbackModel(url, options, fallbackModel) {
if (this.config.debug) {
console.warn(
`Trying with fallback model for streaming: ${fallbackModel}`
);
}
try {
const body = JSON.parse(options.body);
const originalModel = body.model;
body.model = fallbackModel;
const fallbackOptions = {
...options,
body: JSON.stringify(body)
};
const response = await this.fetchImpl(url, fallbackOptions);
if (!response.ok) {
await this.handleErrorResponse(response);
}
if (!response.body) {
throw new LunosError("No response body for streaming request", 0);
}
if (this.config.debug) {
console.warn(
`Successfully used fallback model for streaming: ${fallbackModel} (original: ${originalModel})`
);
}
return response.body;
} catch (error) {
if (this.config.debug) {
console.error(
`Fallback model ${fallbackModel} also failed for streaming:`,
error
);
}
throw error;
}
}
/**
* Validates the service configuration
*/
validateConfig() {
ValidationUtils.validateApiKey(this.config.apiKey);
ValidationUtils.validateBaseUrl(this.config.baseUrl);
ValidationUtils.validateTimeout(this.config.timeout);
ValidationUtils.validateRetryConfig(
this.config.retries,
this.config.retryDelay
);
}
/**
* Logs debug information if debug mode is enabled
*/
log(message, data) {
if (this.config.debug) {
console.log(`[Lunos Debug] ${message}`, data || "");
}
}
};
// src/utils/streaming.ts
var StreamProcessor = class {
decoder;
buffer;
constructor() {
this.decoder = new TextDecoder();
this.buffer = "";
}
/**
* Processes a streaming response from the API
*/
async processStream(stream, options = {}) {
var _a, _b, _c;
const { onChunk, onComplete, onError, accumulate = false } = options;
const reader = stream.getReader();
let fullResponse = "";
let completed = false;
let error;
try {
while (true) {
const { done, value } = await reader.read();
if (done) {
completed = true;
break;
}
this.buffer += this.decoder.decode(value, { stream: true });
const lines = this.buffer.split("\n");
this.buffer = lines.pop() || "";
for (const line of lines) {
if (line.trim() === "") continue;
if (line.startsWith("data: ")) {
const data = line.slice(6);
if (data === "[DONE]") {
completed = true;
break;
}
try {
const parsed = JSON.parse(data);
if (onChunk) {
onChunk(parsed);
}
if (accumulate && ((_c = (_b = (_a = parsed.choices) == null ? void 0 : _a[0]) == null ? void 0 : _b.delta) == null ? void 0 : _c.content)) {
fullResponse += parsed.choices[0].delta.content;
}
} catch (e) {
console.warn("Failed to parse stream chunk:", e);
}
}
}
}
} catch (e) {
error = e instanceof Error ? e : new Error(String(e));
if (onError) {
onError(error);
}
} finally {
reader.releaseLock();
}
if (onComplete && completed) {
onComplete();
}
return {
fullResponse: accumulate ? fullResponse : void 0,
completed,
error
};
}
/**
* Creates a readable stream from a response stream
*/
createReadableStream(stream, options = {}) {
const { onChunk } = options;
return new ReadableStream({
start: async (controller) => {
try {
await this.processStream(stream, {
...options,
onChunk: (chunk) => {
var _a, _b, _c;
if (onChunk) onChunk(chunk);
if ((_c = (_b = (_a = chunk.choices) == null ? void 0 : _a[0]) == null ? void 0 : _b.delta) == null ? void 0 : _c.content) {
controller.enqueue(chunk.choices[0].delta.content);
}
},
onComplete: () => controller.close(),
onError: (error) => controller.error(error)
});
} catch (error) {
controller.error(error);
}
}
});
}
/**
* Processes a stream and returns the full response as a string
*/
async processStreamToString(stream, onChunk) {
const result = await this.processStream(stream, {
onChunk: (chunk) => {
var _a, _b, _c;
if (onChunk && ((_c = (_b = (_a = chunk.choices) == null ? void 0 : _a[0]) == null ? void 0 : _b.delta) == null ? void 0 : _c.content)) {
onChunk(chunk.choices[0].delta.content);
}
},
accumulate: true
});
if (result.error) {
throw result.error;
}
return result.fullResponse || "";
}
/**
* Validates a stream response
*/
static validateStreamResponse(response) {
if (!response.ok) {
throw new LunosError(
`Stream request failed: ${response.status} ${response.statusText}`,
response.status
);
}
if (!response.body) {
throw new LunosError("No response body for streaming request");
}
}
};
// src/services/ChatService.ts
var ChatService = class extends BaseService {
/**
* Creates a chat completion using the Lunos AI API.
*
* This method handles the core chat completion functionality, validating
* the request parameters and making the API call to generate responses
* based on conversation history. Supports fallback models for reliability.
*
* @param request - Complete chat completion request object containing
* messages, model, parameters, and optional fallback model
* @returns Promise resolving to ChatCompletionResponse with generated response
* @throws Error if request validation fails or API call fails
*
* @example
* ```typescript
* const response = await client.chat.createCompletion({
* model: "openai/gpt-4.1-mini",
* messages: [
* { role: "user", content: "Hello! Can you tell me a short joke?" }
* ],
* max_tokens: 100,
* fallback_model: "openai/gpt-4.1-mini",
* appId: "my-app"
* });
* ```
*/
async createCompletion(request) {
ValidationUtils.validateChatCompletionRequest(request);
this.log("Creating chat completion", {
model: request.model,
messages: request.messages.length,
fallback_model: request.fallback_model,
appId: request.appId
});
return this.makeRequest(
"/v1/chat/completions",
{
method: "POST",
body: JSON.stringify(request)
},
{
fallback_model: request.fallback_model,
appId: request.appId
}
);
}
/**
* Creates a streaming chat completion that returns a raw stream.
*
* This method creates a streaming chat completion and returns the raw
* ReadableStream for advanced stream processing. The stream contains
* Server-Sent Events (SSE) chunks that need to be parsed.
*
* @param request - Complete chat completion request object
* @returns Promise resolving to ReadableStream<Uint8Array> for raw stream processing
* @throws Error if request validation fails or API call fails
*
* @example
* ```typescript
* const stream = await client.chat.createCompletionStream({
* model: "openai/gpt-4.1-mini",
* messages: [
* { role: "user", content: "Write a haiku about programming." }
* ]
* });
* ```
*/
async createCompletionStream(request) {
ValidationUtils.validateChatCompletionRequest(request);
this.log("Creating streaming chat completion", {
model: request.model,
messages: request.messages.length,
fallback_model: request.fallback_model
});
const streamRequest = { ...request, stream: true };
return this.makeStreamRequest(
"/v1/chat/completions",
{
method: "POST",
body: JSON.stringify(streamRequest)
},
{
fallback_model: request.fallback_model
}
);
}
/**
* Creates a streaming chat completion with optional callback processing.
*
* This method creates a streaming chat completion and optionally processes
* the stream with a callback function. Similar to OpenAI's streaming API,
* it provides real-time access to generated content chunks.
*
* @param request - Complete chat completion request object
* @param onChunk - Optional callback function called for each content chunk
* @returns Promise resolving to ReadableStream<Uint8Array> for further processing
* @throws Error if request validation fails or API call fails
*
* @example
* ```typescript
* let streamedResponse = "";
* const stream = await client.chat.createCompletionWithStream(
* {
* model: "openai/gpt-4.1-mini",
* messages: [
* { role: "user", content: "Write a haiku about programming." }
* ]
* },
* (chunk) => {
* streamedResponse += chunk;
* process.stdout.write(chunk);
* }
* );
* ```
*/
async createCompletionWithStream(request, onChunk) {
const stream = await this.createCompletionStream(request);
if (onChunk) {
const processor = new StreamProcessor();
processor.processStream(stream, {
onChunk: (chunk) => {
var _a, _b, _c;
if ((_c = (_b = (_a = chunk.choices) == null ? void 0 : _a[0]) == null ? void 0 : _b.delta) == null ? void 0 : _c.content) {
onChunk(chunk.choices[0].delta.content);
}
}
});
}
return stream;
}
/**
* Creates a streaming chat completion and returns the full response as a string.
*
* This method is provided for backward compatibility and convenience.
* It processes the entire stream and returns the complete response as a string,
* while optionally calling a callback for each chunk during processing.
*
* @param request - Complete chat completion request object
* @param onChunk - Optional callback function called for each content chunk
* @returns Promise resolving to the complete response as a string
* @throws Error if request validation fails or API call fails
*
* @example
* ```typescript
* const response = await client.chat.createCompletionWithStreamToString(
* {
* model: "openai/gpt-4.1-mini",
* messages: [
* { role: "user", content: "Explain quantum computing." }
* ]
* },
* (chunk) => console.log("Chunk:", chunk)
* );
* ```
*/
async createCompletionWithStreamToString(request, onChunk) {
const stream = await this.createCompletionStream(request);
const processor = new StreamProcessor();
return processor.processStreamToString(stream, onChunk);
}
/**
* Gets a specific generation by ID from the API.
*
* This method retrieves information about a specific chat completion
* generation using its unique identifier.
*
* @param id - Unique identifier of the generation to retrieve
* @returns Promise resolving to generation information
* @throws Error if ID is not provided or API call fails
*
* @example
* ```typescript
* const generation = await client.chat.getGeneration("gen_123456789");
* ```
*/
async getGeneration(id) {
if (!id || typeof id !== "string") {
throw new Error("Generation ID is required");
}
this.log("Getting generation", { id });
return this.makeRequest(`/v1/chat/generation/${id}`);
}
/**
* Convenience method for simple chat completions with structured parameters.
*
* This method provides a simplified interface for chat completions using
* a structured object that separates messages from other options.
*
* @param options - Object containing messages and optional completion parameters
* @returns Promise resolving to ChatCompletionResponse with generated response
*
* @example
* ```typescript
* const response = await client.chat.chat({
* messages: [
* { role: "user", content: "What is machine learning?" }
* ],
* model: "openai/gpt-4.1-mini",
* max_tokens: 200,
* temperature: 0.7,
* appId: "my-app"
* });
* ```
*/
async chat(options) {
return this.createCompletion({
messages: options.messages,
model: options.model,
max_tokens: options.max_tokens,
temperature: options.temperature,
top_p: options.top_p,
frequency_penalty: options.frequency_penalty,
presence_penalty: options.presence_penalty,
stop: options.stop,
n: options.n,
stream: options.stream,
fallback_model: options.fallback_model,
user: options.user,
appId: options.appId
});
}
/**
* Convenience method for streaming chat completions with structured parameters.
*
* This method provides a simplified interface for streaming chat completions
* using a structured object that separates messages from other options.
*
* @param options - Object containing messages, callback, and optional parameters
* @returns Promise resolving to ReadableStream<Uint8Array> for stream processing
*
* @example
* ```typescript
* const stream = await client.chat.chatStream({
* messages: [
* { role: "user", content: "Write a story about a robot." }
* ],
* model: "openai/gpt-4.1-mini",
* onChunk: (chunk) => console.log(chunk),
* max_tokens: 500
* });
* ```
*/
async chatStream(options) {
return this.createCompletionWithStream(
{
messages: options.messages,
model: options.model,
max_tokens: options.max_tokens,
temperature: options.temperature,
top_p: options.top_p,
frequency_penalty: options.frequency_penalty,
presence_penalty: options.presence_penalty,
stop: options.stop,
n: options.n,
fallback_model: options.fallback_model,
user: options.user
},
options.onChunk
);
}
/**
* Creates a simple chat completion with a single user message.
*
* This convenience method simplifies chat completions when you only
* need to send a single user message without complex conversation history.
*
* @param options - Object containing user message and optional parameters
* @returns Promise resolving to ChatCompletionResponse with generated response
*
* @example
* ```typescript
* const response = await client.chat.chatWithUser({
* userMessage: "Explain the concept of recursion",
* model: "openai/gpt-4.1-mini",
* max_tokens: 300
* });
* ```
*/
async chatWithUser(options) {
return this.chat({
messages: [{ role: "user", content: options.userMessage }],
model: options.model,
max_tokens: options.max_tokens,
temperature: options.temperature,
top_p: options.top_p,
frequency_penalty: options.frequency_penalty,
presence_penalty: options.presence_penalty,
stop: options.stop,
n: options.n,
fallback_model: options.fallback_model,
user: options.user
});
}
/**
* Creates a chat completion with system and user messages.
*
* This convenience method is useful for setting up conversations with
* a system prompt that defines the AI's behavior or role.
*
* @param options - Object containing system message, user message, and optional parameters
* @returns Promise resolving to ChatCompletionResponse with generated response
*
* @example
* ```typescript
* const response = await client.chat.chatWithSystem({
* systemMessage: "You are a helpful coding assistant.",
* userMessage: "Write a function to calculate fibonacci numbers",
* model: "openai/gpt-4.1-mini",
* max_tokens: 400,
* appId: "my-app"
* });
* ```
*/
async chatWithSystem(options) {
return this.chat({
messages: [
{ role: "system", content: options.systemMessage },
{ role: "user", content: options.userMessage }
],
model: options.model,
max_tokens: options.max_tokens,
temperature: options.temperature,
top_p: options.top_p,
frequency_penalty: options.frequency_penalty,
presence_penalty: options.presence_penalty,
stop: options.stop,
n: options.n,
fallback_model: options.fallback_model,
user: options.user,
appId: options.appId
});
}
/**
* Creates a conversation with multiple messages.
*
* This method is an alias for the chat method, providing semantic clarity
* when working with multi-turn conversations.
*
* @param options - Object containing messages and optional parameters
* @returns Promise resolving to ChatCompletionResponse with generated response
*
* @example
* ```typescript
* const response = await client.chat.createConversation({
* messages: [
* { role: "system", content: "You are a helpful assistant." },
* { role: "user", content: "Hello!" },
* { role: "assistant", content: "Hi there! How can I help you today?" },
* { role: "user", content: "What's the weather like?" }
* ],
* model: "openai/gpt-4.1-mini"
* });
* ```
*/
async createConversation(options) {
return this.chat(options);
}
/**
* Gets API usage information for the current account.
*
* This method retrieves usage statistics and billing information
* for the authenticated API key.
*
* @returns Promise resolving to usage information object
* @throws Error if API call fails or endpoint is not available
*
* @example
* ```typescript
* const usage = await client.chat.getUsage();
* console.log("Total tokens used:", usage.total_tokens);
* ```
*/
async getUsage() {
return this.makeRequest("/v1/usage");
}
/**
* Gets account information for the authenticated API key.
*
* This method retrieves account details, limits, and settings
* for the current API key.
*
* @returns Promise resolving to account information object
* @throws Error if API call fails or endpoint is not available
*
* @example
* ```typescript
* const account = await client.chat.getAccount();
* console.log("Account ID:", account.id);
* ```
*/
async getAccount() {
return this.makeRequest("/v1/account");
}
/**
* Validates chat messages for correctness and completeness.
*
* This static method performs validation on chat message arrays
* to ensure they meet the API requirements before making requests.
*
* @param messages - Array of chat messages to validate
* @throws Error if messages are invalid or incomplete
*
* @example
* ```typescript
* ChatService.validateMessages([
* { role: "user", content: "Hello" },
* { role: "assistant", content: "Hi there!" }
* ]);
* ```
*/
static validateMessages(messages) {
if (!Array.isArray(messages) || messages.length === 0) {
throw new Error("Messages array is required and cannot be empty");
}
for (const message of messages) {
if (!message.role || !message.content) {
throw new Error("Each message must have a role and content");
}
if (!["system", "user", "assistant", "function", "tool"].includes(
message.role
)) {
throw new Error(`Invalid role: ${message.role}`);
}
}
}
};
// src/services/ImageService.ts
var ImageService = class extends BaseService {
/**
* Generates an image based on a text prompt using the Lunos AI API.
*
* This method handles the core image generation functionality, validating
* the request parameters and making the API call to generate images.
*
* @param request - Complete image generation request object containing
* prompt, model, size, quality, and other parameters
* @returns Promise resolving to ImageGenerationResponse with generated image data
* @throws Error if request validation fails or API call fails
*
* @example
* ```typescript
* const response = await client.image.generateImage({
* prompt: "A beautiful sunset over mountains",
* model: "openai/dall-e-3",
* size: "1024x1024",
* quality: "hd",
* appId: "my-app"
* });
* ```
*/
async generateImage(request) {
ValidationUtils.validateImageGenerationRequest(request);
this.log("Generating image", {
prompt: request.prompt,
model: request.model,
appId: request.appId
});
return this.makeRequest(
"/v1/image/generations",
{
method: "POST",
body: JSON.stringify(request)
},
{
appId: request.appId
}
);
}
/**
* Edits an existing image based on a text prompt and optional mask.
*
* This method allows for inpainting and outpainting operations by providing
* an existing image and a text prompt describing the desired changes.
*
* @param request - Image edit request containing the base image, prompt,
* optional mask, and generation parameters
* @returns Promise resolving to ImageGenerationResponse with edited image data
* @throws Error if image is not provided or API call fails
*
* @example
* ```typescript
* const response = await client.image.editImage({
* image: "base64_encoded_image_data",
* prompt: "Add a red car to the scene",
* model: "openai/dall-e-2",
* size: "1024x1024"
* });
* ```
*/
async editImage(request) {
if (!request.image) {
throw new Error("Image is required for editing");
}
this.log("Editing image", {
prompt: request.prompt,
model: request.model
});
return this.makeRequest("/v1/image/edits", {
method: "POST",
body: JSON.stringify(request)
});
}
/**
* Creates variations of an existing image.
*
* This method generates multiple variations of a provided base image,
* maintaining the overall composition while introducing subtle changes.
*
* @param request - Image variation request containing the base image
* and generation parameters
* @returns Promise resolving to ImageGenerationResponse with variation image data
* @throws Error if image is not provided or API call fails
*
* @example
* ```typescript
* const response = await client.image.createImageVariation({
* image: "base64_encoded_image_data",
* model: "openai/dall-e-2",
* n: 3,
* size: "1024x1024"
* });
* ```
*/
async createImageVariation(request) {
if (!request.image) {
throw new Error("Image is required for variation");
}
this.log("Creating image variation", { model: request.model });
return this.makeRequest("/v1/image/variations", {
method: "POST",
body: JSON.stringify(request)
});
}
/**
* Convenience method for simple image generation with structured parameters.
*
* This method provides a simplified interface for image generation using
* a structured object that separates the prompt from other options.
*
* @param options - Object containing prompt and optional generation parameters
* @returns Promise resolving to ImageGenerationResponse with generated image data
*
* @example
* ```typescript
* const response = await client.image.generate({
* prompt: "A futuristic city skyline",
* model: "openai/dall-e-3",
* size: "1024x1024",
* quality: "hd",
* appId: "my-app"
* });
* ```
*/
async generate(options) {
return this.generateImage({
prompt: options.prompt,
model: options.model,
size: options.size,
quality: options.quality,
response_format: options.response_format,
style: options.style,
n: options.n,
seed: options.seed,
user: options.user,
appId: options.appId
});
}
/**
* Convenience method for image generation with specific dimensions.
*
* This method allows for custom image dimensions while maintaining
* the structured parameter approach.
*
* @param options - Object containing prompt, dimensions, and other parameters
* @returns Promise resolving to ImageGenerationResponse with generated image data
*
* @example
* ```typescript
* const response = await client.image.generateWithSize({
* prompt: "A panoramic landscape",
* width: 1792,
* height: 1024,
* model: "openai/dall-e-3",
* quality: "hd",
* appId: "my-app"
* });
* ```
*/
async generateWithSize(options) {
return this.generateImage({
prompt: options.prompt,
width: options.width,
height: options.height,
model: options.model,
quality: options.quality,
response_format: options.response_format,
style: options.style,
n: options.n,
seed: options.seed,
user: options.user,
appId: options.appId
});
}
/**
* Convenience method for high-quality image generation.
*
* This method automatically sets the quality to "hd" for high-definition
* image generation while using the structured parameter approach.
*
* @param options - Object containing prompt and other parameters
* @returns Promise resolving to ImageGenerationResponse with HD image data
*
* @example
* ```typescript
* const response = await client.image.generateHD({
* prompt: "A detailed portrait of a cat",
* model: "openai/dall-e-3",
* size: "1024x1024"
* });
* ```
*/
async generateHD(options) {
return this.generateImage({
prompt: options.prompt,
quality: "hd",
model: options.model,
size: options.size,
response_format: options.response_format,
style: options.style,
n: options.n,
seed: options.seed,
user: options.user
});
}
/**
* Convenience method for image generation with base64 response format.
*
* This method automatically sets the response format to base64 JSON
* for direct image data access.
*
* @param options - Object containing prompt and other parameters
* @returns Promise resolving to ImageGenerationResponse with base64 image data
*
* @example
* ```typescript
* const response = await client.image.generateBase64({
* prompt: "A digital art piece",
* model: "openai/dall-e-3",
* size: "1024x1024",
* appId: "my-app"
* });
* ```
*/
async generateBase64(options) {
return this.generateImage({
prompt: options.prompt,
response_format: "b64_json",
model: options.model,
size: options.size,
quality: options.quality,
style: options.style,
n: options.n,
seed: options.seed,
user: options.user,
appId: options.appId
});
}
/**
* Convenience method for image generation with URL response format.
*
* This method automatically sets the response format to URL
* for direct image URL access.
*
* @param options - Object containing prompt and other parameters
* @returns Promise resolving to ImageGenerationResponse with image URLs
*
* @example
* ```typescript
* const response = await client.image.generateURL({
* prompt: "A modern office space",
* model: "openai/dall-e-3",
* size: "1024x1024"
* });
* ```
*/
async generateURL(options) {
return this.generateImage({
prompt: options.prompt,
response_format: "url",
model: options.model,
size: options.size,
quality: options.quality,
style: options.style,
n: options.n,
seed: options.seed,
user: options.user
});
}
/**
* Generates multiple images from a single prompt.
*
* This method allows for batch image generation with a specified count,
* using the structured parameter approach.
*
* @param options - Object containing prompt, count, and other parameters
* @returns Promise resolving to ImageGenerationResponse with multiple images
* @throws Error if count is not between 1 and 10
*
* @example
* ```typescript
* const response = await client.image.generateMultiple({
* prompt: "A fantasy castle",
* count: 4,
* model: "openai/dall-e-3",
* size: "1024x1024"
* });
* ```
*/
async generateMultiple(options) {
if (options.count < 1 || options.count > 10) {
throw new Error("Count must be between 1 and 10");
}
return this.generateImage({
prompt: options.prompt,
n: options.count,
model: options.model,
size: options.size,
quality: options.quality,
response_format: options.response_format,
style: options.style,
seed: options.seed,
user: options.user
});
}
/**
* Validates image generation parameters for correctness.
*
* This static method performs validation on image generation parameters
* to ensure they meet the API requirements before making requests.
*
* @param prompt - Text prompt for image generation
* @param width - Optional width of the image
* @param height - Optional height of the image
* @throws Error if parameters are invalid
*
* @example
* ```typescript
* ImageService.validateImageGenerationParams(
* "A beautiful landscape",
* 1024,
* 1024
* );
* ```
*/
static validateImageGenerationParams(prompt, width, height) {
if (!prompt || typeof prompt !== "string" || prompt.trim().length === 0) {
throw new Error("Prompt is required and cannot be empt