@neureus/sdk
Version:
Neureus Platform SDK - AI-native, edge-first application platform
168 lines (164 loc) • 4.56 kB
TypeScript
import { ChatMessage, ChatCompletionRequest, ChatCompletionResponse, ChatCompletionStreamChunk, ModelConfig } from '@neureus/ai-gateway';
export { AIGatewayError, AIProvider, AuthenticationError, CacheConfig, ChatCompletionRequest, ChatCompletionResponse, ChatCompletionStreamChunk, ChatMessage, ModelConfig, ProviderCapabilities, ProviderConfig, ProviderError, RateLimitError, RoutingStrategy } from '@neureus/ai-gateway';
/**
* Neureus AI Gateway SDK Client
*
* Provides a simple interface to interact with the Neureus AI Gateway,
* supporting multiple LLM providers with automatic fallback, caching,
* and cost optimization.
*
* @example
* ```typescript
* import { AIClient } from '@neureus/sdk/ai';
*
* const ai = new AIClient({
* apiKey: 'nru_...',
* baseUrl: 'https://api.neureus.ai'
* });
*
* // Non-streaming chat completion
* const response = await ai.chat.create({
* model: 'gpt-4',
* messages: [
* { role: 'user', content: 'What is Neureus?' }
* ]
* });
*
* // Streaming chat completion
* const stream = await ai.chat.stream({
* model: 'gpt-4',
* messages: [
* { role: 'user', content: 'Tell me a story' }
* ]
* });
*
* for await (const chunk of stream) {
* process.stdout.write(chunk.choices[0]?.delta?.content || '');
* }
* ```
*/
/**
* Configuration options for AIClient
*/
interface AIClientConfig {
/**
* Neureus API key (required)
* Get your API key from https://app.neureus.ai/settings/api-keys
*/
apiKey: string;
/**
* Base URL for the Neureus API
* @default 'https://api.neureus.ai'
*/
baseUrl?: string;
/**
* Request timeout in milliseconds
* @default 60000 (60 seconds)
*/
timeout?: number;
/**
* Number of retry attempts for failed requests
* @default 3
*/
retries?: number;
/**
* User ID for usage tracking (optional)
* @default ''
*/
userId?: string;
/**
* Team ID for usage tracking (optional)
* @default ''
*/
teamId?: string;
}
/**
* Request options for chat completion
*/
interface ChatCompletionOptions extends Omit<ChatCompletionRequest, 'messages' | 'model'> {
/**
* Model to use for completion
* @default 'gpt-3.5-turbo'
*/
model?: string;
}
/**
* Main AI client class
*/
declare class AIClient {
private http;
private config;
constructor(config: AIClientConfig);
/**
* Chat completion API
*/
chat: {
/**
* Create a non-streaming chat completion
*
* @example
* ```typescript
* const response = await ai.chat.create({
* model: 'gpt-4',
* messages: [
* { role: 'system', content: 'You are a helpful assistant.' },
* { role: 'user', content: 'What is the capital of France?' }
* ],
* temperature: 0.7
* });
*
* console.log(response.choices[0].message.content);
* ```
*/
create: (messages: ChatMessage[], options?: ChatCompletionOptions) => Promise<ChatCompletionResponse>;
/**
* Create a streaming chat completion
*
* @example
* ```typescript
* const stream = await ai.chat.stream({
* model: 'gpt-4',
* messages: [{ role: 'user', content: 'Tell me a story' }]
* });
*
* for await (const chunk of stream) {
* const content = chunk.choices[0]?.delta?.content;
* if (content) {
* process.stdout.write(content);
* }
* }
* ```
*/
stream: (messages: ChatMessage[], options?: ChatCompletionOptions) => Promise<AsyncIterable<ChatCompletionStreamChunk>>;
};
/**
* List available models
*
* @example
* ```typescript
* const models = await ai.models.list();
* console.log(models); // [{ name: 'gpt-4', provider: 'openai', ... }]
* ```
*/
models: {
list: () => Promise<ModelConfig[]>;
};
/**
* Parse Server-Sent Events stream into async iterable
*/
private parseSSEStream;
}
/**
* Create an AI client instance
*
* @example
* ```typescript
* import { createAIClient } from '@neureus/sdk/ai';
*
* const ai = createAIClient({
* apiKey: process.env.NEUREUS_API_KEY
* });
* ```
*/
declare function createAIClient(config: AIClientConfig): AIClient;
export { AIClient, type AIClientConfig, type ChatCompletionOptions, createAIClient };