UNPKG

@neureus/sdk

Version:

Neureus Platform SDK - AI-native, edge-first application platform

173 lines (171 loc) 4.99 kB
import ky from 'ky'; // src/ai.ts var AIClient = class { http; config; constructor(config) { this.config = { apiKey: config.apiKey, baseUrl: config.baseUrl || "https://api.neureus.ai", timeout: config.timeout || 6e4, retries: config.retries || 3, userId: config.userId || "", teamId: config.teamId || "" }; this.http = ky.create({ prefixUrl: this.config.baseUrl, timeout: this.config.timeout, retry: { limit: this.config.retries, methods: ["get", "post"], statusCodes: [408, 413, 429, 500, 502, 503, 504] }, hooks: { beforeRequest: [ (request) => { request.headers.set("Authorization", `Bearer ${this.config.apiKey}`); request.headers.set("Content-Type", "application/json"); request.headers.set("User-Agent", "Neureus-SDK/0.2.0"); } ] } }); } /** * Chat completion API */ chat = { /** * Create a non-streaming chat completion * * @example * ```typescript * const response = await ai.chat.create({ * model: 'gpt-4', * messages: [ * { role: 'system', content: 'You are a helpful assistant.' }, * { role: 'user', content: 'What is the capital of France?' } * ], * temperature: 0.7 * }); * * console.log(response.choices[0].message.content); * ``` */ create: async (messages, options) => { const request = { model: options?.model || "gpt-3.5-turbo", messages, temperature: options?.temperature ?? 0.7, maxTokens: options?.maxTokens, topP: options?.topP, frequencyPenalty: options?.frequencyPenalty, presencePenalty: options?.presencePenalty, stop: options?.stop, stream: false, cache: options?.cache ?? true, fallback: options?.fallback, metadata: options?.metadata, userId: options?.userId || this.config.userId || void 0, teamId: options?.teamId || this.config.teamId || void 0 }; return this.http.post("ai/chat/completions", { json: request }).json(); }, /** * Create a streaming chat completion * * @example * ```typescript * const stream = await ai.chat.stream({ * model: 'gpt-4', * messages: [{ role: 'user', content: 'Tell me a story' }] * }); * * for await (const chunk of stream) { * const content = chunk.choices[0]?.delta?.content; * if (content) { * process.stdout.write(content); * } * } * ``` */ stream: async (messages, options) => { const request = { model: options?.model || "gpt-3.5-turbo", messages, temperature: options?.temperature ?? 0.7, maxTokens: options?.maxTokens, topP: options?.topP, frequencyPenalty: options?.frequencyPenalty, presencePenalty: options?.presencePenalty, stop: options?.stop, stream: true, cache: options?.cache ?? true, fallback: options?.fallback, metadata: options?.metadata, userId: options?.userId || this.config.userId || void 0, teamId: options?.teamId || this.config.teamId || void 0 }; const response = await this.http.post("ai/chat/completions", { json: request }); return this.parseSSEStream(response.body); } }; /** * List available models * * @example * ```typescript * const models = await ai.models.list(); * console.log(models); // [{ name: 'gpt-4', provider: 'openai', ... }] * ``` */ models = { list: async () => { return this.http.get("ai/models").json(); } }; /** * Parse Server-Sent Events stream into async iterable */ async *parseSSEStream(body) { const reader = body.getReader(); const decoder = new TextDecoder(); let buffer = ""; try { while (true) { const { done, value } = await reader.read(); if (done) break; buffer += decoder.decode(value, { stream: true }); const lines = buffer.split("\n"); buffer = lines.pop() || ""; for (const line of lines) { const trimmed = line.trim(); if (trimmed === "") continue; if (trimmed.startsWith(":")) continue; if (trimmed === "data: [DONE]") return; if (trimmed.startsWith("data: ")) { const data = trimmed.slice(6); try { const chunk = JSON.parse(data); yield chunk; } catch (error) { console.error("Failed to parse SSE data:", data, error); } } } } } finally { reader.releaseLock(); } } }; function createAIClient(config) { return new AIClient(config); } export { AIClient, createAIClient }; //# sourceMappingURL=ai.js.map //# sourceMappingURL=ai.js.map