voiceai-sdk
Version:
Official SDK for SLNG.AI Voice API - Text-to-Speech, Speech-to-Text, and LLM services
1,123 lines (1,091 loc) • 37.8 kB
JavaScript
;
var __create = Object.create;
var __defProp = Object.defineProperty;
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
var __getOwnPropNames = Object.getOwnPropertyNames;
var __getProtoOf = Object.getPrototypeOf;
var __hasOwnProp = Object.prototype.hasOwnProperty;
var __esm = (fn, res) => function __init() {
return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
};
var __export = (target, all) => {
for (var name in all)
__defProp(target, name, { get: all[name], enumerable: true });
};
var __copyProps = (to, from, except, desc) => {
if (from && typeof from === "object" || typeof from === "function") {
for (let key of __getOwnPropNames(from))
if (!__hasOwnProp.call(to, key) && key !== except)
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
}
return to;
};
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
// If the importer is in node compatibility mode or this is not an ESM
// file that has been converted to a CommonJS file using a Babel-
// compatible transform (i.e. "__esModule" has not been set), then set
// "default" to the CommonJS "module.exports" for node compatibility.
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
mod
));
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
// node_modules/tsup/assets/cjs_shims.js
var init_cjs_shims = __esm({
"node_modules/tsup/assets/cjs_shims.js"() {
"use strict";
}
});
// src/utils/catalog.ts
var catalog_exports = {};
__export(catalog_exports, {
catalogManager: () => catalogManager
});
var CatalogManager, catalogManager;
var init_catalog = __esm({
"src/utils/catalog.ts"() {
"use strict";
init_cjs_shims();
CatalogManager = class {
constructor() {
this.catalog = null;
this.lastFetch = 0;
this.cacheTimeout = 24 * 60 * 60 * 1e3;
}
// 24 hours cache in memory
async fetchCatalog() {
try {
const endpoint = Buffer.from("L2FscGhhL2NhdGFsb2c=", "base64").toString();
const response = await fetch(`https://api.slng.ai${endpoint}`, {
headers: {
"X-SDK-Version": "1.0.0",
"X-SDK-Client": "voiceai-sdk"
}
});
if (!response.ok) {
throw new Error("Failed to fetch catalog");
}
return await response.json();
} catch (error) {
return this.getDefaultCatalog();
}
}
getDefaultCatalog() {
return {
tts: {
"orpheus": {
name: "Orpheus",
description: "Frontier TTS model with emotive speech",
voices: ["tara", "leah", "jess", "leo", "dan", "mia", "zac", "zoe"],
languages: ["en", "fr", "de", "ko", "zh", "es", "it", "hi"],
pricing: "$0.30 per 1M characters"
},
"orpheus-indic": {
name: "Orpheus Indic",
description: "Optimized for 8 major Indian languages, hosted in Mumbai",
voices: ["hindi_male", "hindi_female", "tamil_male", "tamil_female", "telugu_male", "bengali_female", "marathi_male", "gujarati_female", "kannada_male", "malayalam_female"],
languages: ["hi", "ta", "te", "bn", "mr", "gu", "kn", "ml"],
pricing: "$0.04 per minute generated"
},
"elevenlabs/multi-v2": {
name: "ElevenLabs Multi-v2",
description: "Multilingual model supporting 29+ languages",
voices: ["Rachel", "Drew", "Clyde", "Paul", "Domi", "Dave", "Fin", "Bella"],
languages: ["en", "es", "fr", "de", "it", "pt", "pl", "hi", "ar", "zh", "ja", "ko"],
pricing: "$0.30 per 1M characters"
},
"elevenlabs/turbo-v2-5": {
name: "ElevenLabs Turbo v2.5",
description: "Ultra-fast TTS with low latency",
voices: ["Rachel", "Drew", "Clyde", "Paul"],
languages: ["en", "es", "fr", "de", "it", "pt"],
pricing: "$0.25 per 1M characters"
},
"elevenlabs/v3": {
name: "ElevenLabs v3",
description: "Latest generation with high-quality voices",
voices: ["Rachel", "Drew", "Clyde", "Paul", "Domi", "Dave", "Fin", "Bella"],
languages: ["en", "es", "fr", "de", "it", "pt", "pl", "hi"],
pricing: "$0.35 per 1M characters"
},
"elevenlabs/flash-v2-5": {
name: "ElevenLabs Flash v2.5",
description: "Fast model with good quality balance",
voices: ["Rachel", "Drew", "Clyde", "Paul"],
languages: ["en", "es", "fr", "de", "it", "pt", "pl"],
pricing: "$0.20 per 1M characters"
},
"koroko": {
name: "Koroko",
description: "Efficient 82M parameter model",
languages: ["en"],
pricing: "$0.15 per 1M characters"
},
"vui": {
name: "VUI",
description: "Fast, reliable baseline model",
voices: ["default"],
languages: ["en"],
pricing: "$0.10 per 1M characters"
},
"xtts-v2": {
name: "XTTS-v2",
description: "Voice cloning in 17 languages",
languages: ["en", "es", "fr", "de", "it", "pt", "pl", "tr", "ru", "nl", "cs", "ar", "zh", "ja", "ko", "hu", "hi"],
pricing: "$0.50 per 1M characters"
},
"mars6": {
name: "MARS6",
description: "Voice + prosody cloning",
languages: ["en-us", "fr-fr", "de-de", "es-es", "it-it", "pt-pt", "zh-cn", "ja-jp", "ko-kr", "nl-nl"],
pricing: "$0.60 per 1M characters"
},
"twi-speecht5": {
name: "Twi SpeechT5",
description: "Specialized for Twi language",
languages: ["tw"],
pricing: "$0.25 per 1M characters"
}
},
stt: {
"whisper-v3": {
name: "Whisper-v3",
description: "OpenAI's best speech recognition",
languages: ["en", "es", "fr", "de", "it", "pt", "ru", "zh", "ja", "ko", "ar", "hi"],
pricing: "$0.006 per minute"
},
"kyutai": {
name: "Kyutai Streaming",
description: "High-performance streaming STT for French/English, hosted in Mumbai",
languages: ["en", "fr"],
pricing: "4 credits per minute"
}
},
llm: {
"llama-4-scout": {
name: "Llama 4 Scout",
description: "Fast, capable 17B parameter model",
pricing: "$0.10 per 1M tokens"
}
},
lastUpdated: (/* @__PURE__ */ new Date()).toISOString()
};
}
async getCatalog() {
const now = Date.now();
if (this.catalog && now - this.lastFetch < this.cacheTimeout) {
return this.catalog;
}
this.catalog = await this.fetchCatalog();
this.lastFetch = now;
return this.catalog;
}
async getModels() {
const catalog = await this.getCatalog();
return [
...Object.keys(catalog.tts),
...Object.keys(catalog.stt),
...Object.keys(catalog.llm)
];
}
async getPricing() {
const catalog = await this.getCatalog();
const pricing = {};
Object.entries(catalog.tts).forEach(([key, model]) => {
pricing[`tts/${key}`] = model.pricing;
});
Object.entries(catalog.stt).forEach(([key, model]) => {
pricing[`stt/${key}`] = model.pricing;
});
Object.entries(catalog.llm).forEach(([key, model]) => {
pricing[`llm/${key}`] = model.pricing;
});
return pricing;
}
// Force refresh the catalog
async refresh() {
this.catalog = null;
this.lastFetch = 0;
await this.getCatalog();
}
};
catalogManager = new CatalogManager();
}
});
// src/index.ts
var index_exports = {};
__export(index_exports, {
LLMProviders: () => LLMProviders,
STTProviders: () => STTProviders,
TTSProviders: () => TTSProviders,
VoiceAI: () => VoiceAI,
auth: () => auth,
default: () => index_default,
llm: () => llm,
providers: () => providers,
stt: () => stt,
tts: () => tts,
warmup: () => warmup
});
module.exports = __toCommonJS(index_exports);
init_cjs_shims();
// src/client.ts
init_cjs_shims();
var VoiceAI = class _VoiceAI {
constructor(config) {
this.config = {
baseUrl: "https://api.slng.ai",
...config
};
_VoiceAI.instance = this;
}
static getInstance() {
if (!_VoiceAI.instance) {
throw new Error(
'VoiceAI not initialized. Please call new VoiceAI({ apiKey: "your-key" }) first.\nGet your API key at https://slng.ai/signup'
);
}
return _VoiceAI.instance;
}
getConfig() {
return this.config;
}
updateApiKey(apiKey) {
this.config.apiKey = apiKey;
}
};
// src/tts.ts
init_cjs_shims();
// src/providers/index.ts
init_cjs_shims();
// src/providers/tts.ts
init_cjs_shims();
// src/utils/request.ts
init_cjs_shims();
var import_axios = __toESM(require("axios"));
var MODEL_TIMEOUTS = {
"orpheus": 9e4,
// 90s - can be slow on cold start
"orpheus-indic": 9e4,
// 90s - infrastructure cold start
"xtts-v2": 9e4,
// 90s - voice cloning takes time
"mars6": 9e4,
// 90s - complex processing
"elevenlabs/v3": 6e4,
// 60s
"whisper-v3": 12e4,
// 120s - large audio files
"llama-4-scout": 6e4
// 60s - LLM generation
};
function getTimeout(url, config, modelTimeout) {
if (modelTimeout) return modelTimeout;
for (const [model, timeout] of Object.entries(MODEL_TIMEOUTS)) {
if (url.includes(model.replace("/", "/"))) {
return timeout;
}
}
return config.timeout || 3e4;
}
async function makeRequest(options) {
const { url, method, data, headers, responseType = "json", config, modelTimeout } = options;
const timeout = getTimeout(url, config, modelTimeout);
const axiosConfig = {
url,
method,
baseURL: config.baseUrl || "https://api.slng.ai",
headers: {
"Authorization": `Bearer ${config.apiKey}`,
"User-Agent": "VoiceAI-SDK/0.1.2",
...headers
},
data,
responseType,
timeout
};
try {
const response = await (0, import_axios.default)(axiosConfig);
return response.data;
} catch (error) {
if (error.code === "ECONNABORTED" || error.code === "ETIMEDOUT") {
const isKnownSlowModel = Object.keys(MODEL_TIMEOUTS).some(
(model) => options.url.includes(model.replace("/", "/"))
);
if (isKnownSlowModel) {
throw new Error(
`Request timed out after ${timeout / 1e3}s. Model may be starting up (cold start).
First calls can take 60-90 seconds. Please retry in a moment.
Tip: You can increase timeout with new VoiceAI({ timeout: 120000 })`
);
} else {
throw new Error(
`Request timed out after ${timeout / 1e3}s.
Try again or increase timeout with new VoiceAI({ timeout: 60000 })`
);
}
}
if (error.response) {
const { status, data: data2 } = error.response;
const message = data2?.error || data2?.message || "Request failed";
if (status === 401) {
throw new Error(
`Authentication failed: ${message}
Please check your API key. Get one at https://slng.ai/signup`
);
} else if (status === 402) {
throw new Error(
`Insufficient credits: ${message}
Add credits at https://slng.ai/dashboard`
);
} else if (status === 429) {
throw new Error(
`Rate limit exceeded: ${message}
Please slow down your requests or upgrade your plan at https://slng.ai/pricing`
);
} else if (status === 400) {
throw new Error(`Invalid request: ${message}`);
} else if (status >= 500) {
throw new Error(
`Server error (${status}): ${message}
The model service may be temporarily unavailable. Please retry.
If this persists, contact hello@slng.ai`
);
} else {
throw new Error(
`API Error (${status}): ${message}
Need help? Contact founders at hello@slng.ai`
);
}
} else if (error.request) {
throw new Error(
"Network error: Unable to reach SLNG.AI servers.\nPlease check your internet connection."
);
} else {
throw new Error(`Unexpected error: ${error.message}`);
}
}
}
// src/providers/tts.ts
var TTSProviders = {
"vui": {
synthesize: async (text, options, config) => {
const audio = await makeRequest({
url: "/v1/tts/vui",
method: "POST",
data: { text, ...options },
responseType: options.stream ? "stream" : "arraybuffer",
config
});
return { audio };
},
voices: ["default", "female_01", "male_01"],
languages: ["en"]
},
"orpheus": {
synthesize: async (text, options, config) => {
const audio = await makeRequest({
url: "/v1/tts/orpheus",
method: "POST",
data: {
prompt: text,
voice: options.voice || "tara",
output_language: options.language,
...options
},
responseType: options.stream ? "stream" : "arraybuffer",
config
});
return { audio };
},
voices: ["tara", "leah", "jess", "leo", "dan", "mia", "zac", "zoe", "pierre", "amelie", "marie", "jana", "thomas", "max"],
languages: ["en", "fr", "de", "ko", "zh", "es", "it", "hi"]
},
"koroko": {
synthesize: async (text, options, config) => {
const audio = await makeRequest({
url: "/v1/tts/koroko",
method: "POST",
data: { text, ...options },
responseType: options.stream ? "stream" : "arraybuffer",
config
});
return { audio };
},
languages: ["en"]
},
"xtts-v2": {
synthesize: async (text, options, config) => {
if (!options.speakerVoice) {
throw new Error("XTTS-v2 requires speakerVoice option (base64 audio for voice cloning)");
}
const audio = await makeRequest({
url: "/v1/tts/xtts-v2",
method: "POST",
data: {
text,
speaker_voice: options.speakerVoice,
language: options.language || "en",
...options
},
responseType: options.stream ? "stream" : "arraybuffer",
config
});
return { audio };
},
languages: ["en", "es", "fr", "de", "it", "pt", "pl", "tr", "ru", "nl", "cs", "ar", "zh", "ja", "ko", "hu", "hi"]
},
"mars6": {
synthesize: async (text, options, config) => {
if (!options.audioRef) {
throw new Error("MARS6 requires audioRef option (base64 audio for voice cloning)");
}
if (!options.language) {
throw new Error('MARS6 requires language option (e.g., "en-us")');
}
const audio = await makeRequest({
url: "/v1/tts/mars6",
method: "POST",
data: {
text,
audio_ref: options.audioRef,
language: options.language,
ref_text: options.refText,
...options
},
responseType: options.stream ? "stream" : "arraybuffer",
config
});
return { audio };
},
languages: ["en-us", "fr-fr", "de-de", "es-es", "it-it", "pt-pt", "zh-cn", "ja-jp", "ko-kr", "nl-nl"]
},
"elevenlabs/multi-v2": {
synthesize: async (text, options, config) => {
const audio = await makeRequest({
url: "/v1/tts/elevenlabs/multi-v2",
method: "POST",
data: { text, ...options },
responseType: options.stream ? "stream" : "arraybuffer",
config
});
return { audio };
},
voices: ["Rachel", "Drew", "Clyde", "Paul", "Domi", "Dave", "Fin", "Bella"],
languages: ["en", "es", "fr", "de", "it", "pt", "pl", "hi", "ar", "zh", "ja", "ko"]
},
"elevenlabs/turbo-v2-5": {
synthesize: async (text, options, config) => {
const audio = await makeRequest({
url: "/v1/tts/elevenlabs/turbo-v2-5",
method: "POST",
data: { text, ...options },
responseType: options.stream ? "stream" : "arraybuffer",
config
});
return { audio };
},
voices: ["Rachel", "Drew", "Clyde", "Paul"],
languages: ["en", "es", "fr", "de", "it", "pt"]
},
"elevenlabs/v3": {
synthesize: async (text, options, config) => {
const audio = await makeRequest({
url: "/v1/tts/elevenlabs/v3",
method: "POST",
data: { text, ...options },
responseType: options.stream ? "stream" : "arraybuffer",
config
});
return { audio };
},
voices: ["Rachel", "Drew", "Clyde", "Paul", "Domi", "Dave", "Fin", "Bella"],
languages: ["en", "es", "fr", "de", "it", "pt", "pl", "hi"]
},
"elevenlabs/flash-v2-5": {
synthesize: async (text, options, config) => {
const audio = await makeRequest({
url: "/v1/tts/elevenlabs/flash-v2-5",
method: "POST",
data: { text, ...options },
responseType: options.stream ? "stream" : "arraybuffer",
config
});
return { audio };
},
voices: ["Rachel", "Drew", "Clyde", "Paul"],
languages: ["en", "es", "fr", "de", "it", "pt", "pl"]
},
"orpheus-indic": {
synthesize: async (text, options, config) => {
const audio = await makeRequest({
url: "/v1/tts/in/orpheus-indic",
method: "POST",
data: {
prompt: text,
voice: options.voice,
output_language: options.language || "hi",
output_style: options.style,
speed: options.speed,
...options
},
responseType: options.stream ? "stream" : "arraybuffer",
config
});
return { audio };
},
voices: ["hindi_male", "hindi_female", "tamil_male", "tamil_female", "telugu_male", "bengali_female", "marathi_male", "gujarati_female", "kannada_male", "malayalam_female"],
languages: ["hi", "ta", "te", "bn", "mr", "gu", "kn", "ml"]
},
"twi-speecht5": {
synthesize: async (text, options, config) => {
if (!options.speakerEmbedding || options.speakerEmbedding.length !== 512) {
throw new Error("Twi-SpeechT5 requires speakerEmbedding option (512-dimensional vector)");
}
const response = await makeRequest({
url: "/v1/tts/twi-speecht5",
method: "POST",
data: {
text,
speaker_embedding: options.speakerEmbedding
},
responseType: "json",
config
});
const audioArray = new Float32Array(response.audio);
return { audio: audioArray.buffer };
},
languages: ["tw"]
}
};
// src/providers/stt.ts
init_cjs_shims();
var import_form_data = __toESM(require("form-data"));
var STTProviders = {
"whisper-v3": {
transcribe: async (audio, options, config) => {
const formData = new import_form_data.default();
if (audio instanceof File) {
formData.append("audio", audio, audio.name);
} else if (audio instanceof Blob) {
formData.append("audio", audio, "audio.wav");
} else if (audio instanceof ArrayBuffer) {
const blob = new Blob([audio]);
formData.append("audio", blob, "audio.wav");
} else if (typeof audio === "string") {
formData.append("audio", audio);
}
if (options.language) {
formData.append("language", options.language);
}
const response = await makeRequest({
url: "/v1/stt/whisper-v3",
method: "POST",
data: formData,
headers: formData.getHeaders(),
responseType: "json",
config
});
return {
text: response.text,
confidence: response.confidence,
language: response.language || options.language,
segments: response.segments
};
},
languages: ["en", "es", "fr", "de", "it", "pt", "ru", "zh", "ja", "ko", "ar", "hi"]
},
"kyutai": {
transcribe: async (audio, options, config) => {
const formData = new import_form_data.default();
if (audio instanceof File) {
formData.append("audio", audio, audio.name);
} else if (audio instanceof Blob) {
formData.append("audio", audio, "audio.wav");
} else if (audio instanceof ArrayBuffer) {
const blob = new Blob([audio]);
formData.append("audio", blob, "audio.wav");
} else if (typeof audio === "string") {
formData.append("audio", audio);
}
if (options.language) {
if (!["en", "fr"].includes(options.language)) {
throw new Error("Kyutai only supports English (en) and French (fr)");
}
formData.append("language", options.language);
}
if (options.timestamps !== void 0) {
formData.append("timestamps", String(options.timestamps));
}
const response = await makeRequest({
url: "/v1/stt/in/kyutai",
method: "POST",
data: formData,
headers: formData.getHeaders(),
responseType: "json",
config
});
return {
text: response.text,
confidence: response.confidence,
language: response.language || options.language,
segments: response.segments
};
},
languages: ["en", "fr"]
},
"general": {
transcribe: async (audio, options, config) => {
const formData = new import_form_data.default();
if (audio instanceof File) {
formData.append("audio", audio, audio.name);
} else if (audio instanceof Blob) {
formData.append("audio", audio, "audio.wav");
} else if (audio instanceof ArrayBuffer) {
const blob = new Blob([audio]);
formData.append("audio", blob, "audio.wav");
} else if (typeof audio === "string") {
formData.append("audio", audio);
}
if (options.language) {
formData.append("config", JSON.stringify({ language: options.language }));
}
const response = await makeRequest({
url: "/v1/stt",
method: "POST",
data: formData,
headers: formData.getHeaders(),
responseType: "json",
config
});
return {
text: response.text,
confidence: response.confidence,
language: response.language || options.language
};
},
languages: ["en"]
}
};
// src/providers/llm.ts
init_cjs_shims();
var LLMProviders = {
"llama-4-scout": {
complete: async (messages, options, config) => {
const response = await makeRequest({
url: "/v1/llm/llama-4-scout",
method: "POST",
data: {
messages,
temperature: options.temperature,
max_tokens: options.maxTokens,
top_p: options.topP,
stream: options.stream
},
responseType: options.stream ? "stream" : "json",
config
});
if (options.stream) {
return response;
}
return {
content: response.choices?.[0]?.message?.content || response.response,
usage: response.usage ? {
promptTokens: response.usage.prompt_tokens,
completionTokens: response.usage.completion_tokens,
totalTokens: response.usage.total_tokens
} : void 0
};
}
},
"general": {
complete: async (messages, options, config) => {
const prompt = messages.map((m) => `${m.role}: ${m.content}`).join("\n");
const response = await makeRequest({
url: "/v1/llm",
method: "POST",
data: {
prompt,
model: options.model
},
responseType: "json",
config
});
return {
content: response.response
};
}
}
};
// src/providers/index.ts
var providers = {
tts: TTSProviders,
stt: STTProviders,
llm: LLMProviders
};
// src/tts.ts
async function synthesize(text, model, options = {}) {
const client = VoiceAI.getInstance();
const config = client.getConfig();
const provider = providers.tts[model];
if (!provider) {
throw new Error(
`Model "${model}" not found. Available models: ${Object.keys(providers.tts).join(", ")}
Need a different model? Contact founders at hello@slng.ai`
);
}
return provider.synthesize(text, options, config);
}
var tts = {
async synthesize(text, model, options) {
return synthesize(text, model, options);
},
vui: (text, options) => synthesize(text, "vui", options),
orpheus: (text, options) => synthesize(text, "orpheus", options),
koroko: (text, options) => synthesize(text, "koroko", options),
xtts: (text, options) => synthesize(text, "xtts-v2", options),
mars6: (text, options) => synthesize(text, "mars6", options),
orpheusIndic: (text, options) => {
if (!options?.language) {
throw new Error("Orpheus Indic requires language option (hi, ta, te, bn, mr, gu, kn, ml)");
}
return synthesize(text, "orpheus-indic", options);
},
elevenlabs: {
multiV2: (text, options) => synthesize(text, "elevenlabs/multi-v2", options),
turbo: (text, options) => synthesize(text, "elevenlabs/turbo-v2-5", options),
v3: (text, options) => synthesize(text, "elevenlabs/v3", options),
flash: (text, options) => synthesize(text, "elevenlabs/flash-v2-5", options)
},
models: Object.keys(providers.tts),
getVoices: (model) => {
const provider = providers.tts[model];
return provider?.voices || [];
},
getLanguages: (model) => {
const provider = providers.tts[model];
return provider?.languages || ["en"];
}
};
// src/stt.ts
init_cjs_shims();
async function transcribe(audio, model, options = {}) {
const client = VoiceAI.getInstance();
const config = client.getConfig();
const provider = providers.stt[model];
if (!provider) {
throw new Error(
`Model "${model}" not found. Available models: ${Object.keys(providers.stt).join(", ")}
Need a different model? Contact founders at hello@slng.ai`
);
}
return provider.transcribe(audio, options, config);
}
var stt = {
async transcribe(audio, model, options) {
return transcribe(audio, model, options);
},
whisper: (audio, options) => transcribe(audio, "whisper-v3", options),
kyutai: (audio, options) => transcribe(audio, "kyutai", options),
models: Object.keys(providers.stt),
getLanguages: (model) => {
const provider = providers.stt[model];
return provider?.languages || ["en"];
}
};
// src/llm.ts
init_cjs_shims();
async function complete(messages, model, options = {}) {
const client = VoiceAI.getInstance();
const config = client.getConfig();
const provider = providers.llm[model];
if (!provider) {
throw new Error(
`Model "${model}" not found. Available models: ${Object.keys(providers.llm).join(", ")}
Need a different model? Contact founders at hello@slng.ai`
);
}
const normalizedMessages = typeof messages === "string" ? [{ role: "user", content: messages }] : messages;
return provider.complete(normalizedMessages, options, config);
}
var llm = {
async complete(messages, model, options) {
return complete(messages, model, options);
},
llamaScout: (messages, options) => complete(messages, "llama-4-scout", options),
models: Object.keys(providers.llm)
};
// src/auth.ts
init_cjs_shims();
var Auth = class {
static async signup(_options) {
console.log(`
\u2554\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2557
\u2551 Welcome to SLNG.AI Voice Platform! \u2551
\u255A\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u255D
Sign up at: ${this.signupUrl}
After signing up:
1. Get your API key from the dashboard
2. Initialize the SDK:
new VoiceAI({ apiKey: 'your-api-key' });
3. Start building amazing voice experiences!
Need help? Contact founders at hello@slng.ai
`);
return {
message: `Please complete signup at ${this.signupUrl}`
};
}
static async getAccount() {
try {
const client = VoiceAI.getInstance();
const config = client.getConfig();
const response = await makeRequest({
url: "/v1/account",
method: "GET",
config,
responseType: "json"
});
return response;
} catch (error) {
if (error.message.includes("Authentication failed")) {
console.log(`
No API key found or invalid key.
Get your API key:
1. Sign up at ${this.signupUrl}
2. Visit dashboard at ${this.dashboardUrl}
3. Copy your API key
4. Initialize: new VoiceAI({ apiKey: 'your-key' });
`);
return null;
}
throw error;
}
}
static async checkCredits() {
const account = await this.getAccount();
return account?.credits || null;
}
static showDashboard() {
console.log(`
\u2554\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2557
\u2551 SLNG.AI Dashboard \u2551
\u255A\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u255D
Manage your account: ${this.dashboardUrl}
\u2022 View API keys
\u2022 Check credit balance
\u2022 Monitor usage
\u2022 Upgrade plan
\u2022 Access documentation
Need more credits? Visit ${this.dashboardUrl}/credits
Questions? Email hello@slng.ai
`);
}
static async showPricing() {
try {
const { catalogManager: catalogManager2 } = await Promise.resolve().then(() => (init_catalog(), catalog_exports));
const catalog = await catalogManager2.getCatalog();
console.log(`
\u2554\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2557
\u2551 SLNG.AI Pricing \u2551
\u255A\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u255D
Simple, transparent pricing:
TEXT-TO-SPEECH (TTS):`);
Object.entries(catalog.tts).forEach(([_key, model]) => {
console.log(`\u2022 ${model.name}: ${model.pricing}`);
});
console.log(`
SPEECH-TO-TEXT (STT):`);
Object.entries(catalog.stt).forEach(([_key, model]) => {
console.log(`\u2022 ${model.name}: ${model.pricing}`);
});
console.log(`
LANGUAGE MODELS (LLM):`);
Object.entries(catalog.llm).forEach(([_key, model]) => {
console.log(`\u2022 ${model.name}: ${model.pricing}`);
});
console.log(`
View full pricing: https://slng.ai/pricing
Get started: ${this.signupUrl}
`);
} catch (error) {
console.log(`View current pricing: https://slng.ai/pricing`);
}
}
static async showModels() {
try {
const { catalogManager: catalogManager2 } = await Promise.resolve().then(() => (init_catalog(), catalog_exports));
const catalog = await catalogManager2.getCatalog();
console.log(`
\u2554\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2557
\u2551 Available Voice AI Models \u2551
\u255A\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u255D
TEXT-TO-SPEECH (TTS):`);
Object.entries(catalog.tts).forEach(([key, model]) => {
console.log(`\u2022 ${key} - ${model.description}`);
if (model.languages) {
console.log(` Languages: ${model.languages.slice(0, 5).join(", ")}${model.languages.length > 5 ? "..." : ""}`);
}
});
console.log(`
SPEECH-TO-TEXT (STT):`);
Object.entries(catalog.stt).forEach(([key, model]) => {
console.log(`\u2022 ${key} - ${model.description}`);
});
console.log(`
LANGUAGE MODELS (LLM):`);
Object.entries(catalog.llm).forEach(([key, model]) => {
console.log(`\u2022 ${key} - ${model.description}`);
});
console.log(`
Need a different model?
Email us at hello@slng.ai
Last updated: ${new Date(catalog.lastUpdated).toLocaleDateString()}
`);
} catch (error) {
console.log(`View available models: https://slng.ai/models`);
}
}
};
Auth.signupUrl = "https://slng.ai/signup";
Auth.dashboardUrl = "https://slng.ai/dashboard";
var auth = Auth;
// src/warmup.ts
init_cjs_shims();
var Warmup = class {
/**
* Pre-warm a TTS model to avoid cold start delays
* @param model - The TTS model to warm up (e.g., 'orpheus', 'elevenlabs/multi-v2')
* @returns Promise that resolves when the model is ready
*/
static async tts(model) {
const client = VoiceAI.getInstance();
const config = client.getConfig();
try {
await makeRequest({
url: `/v1/tts/${model.replace("/", "/")}`,
method: "POST",
data: {
text: "Test",
warmup: true
// Signal this is a warmup request
},
config,
modelTimeout: 12e4,
// 2 minutes for warmup
responseType: "json"
});
console.log(`\u2713 Model ${model} is warmed up and ready`);
} catch (error) {
if (error.message.includes("timed out")) {
console.log(`\u26A0 Model ${model} is warming up (may take 60-90s for first call)`);
} else {
throw error;
}
}
}
/**
* Pre-warm an STT model
* @param model - The STT model to warm up (e.g., 'whisper-v3')
*/
static async stt(model) {
const client = VoiceAI.getInstance();
const config = client.getConfig();
try {
const sampleRate = 16e3;
const duration = 1;
const audioData = new Float32Array(sampleRate * duration);
await makeRequest({
url: `/v1/stt/${model}`,
method: "POST",
data: {
audio: Buffer.from(audioData.buffer).toString("base64"),
warmup: true
},
config,
modelTimeout: 12e4,
responseType: "json"
});
console.log(`\u2713 Model ${model} is warmed up and ready`);
} catch (error) {
if (error.message.includes("timed out")) {
console.log(`\u26A0 Model ${model} is warming up (may take 60-90s for first call)`);
} else {
console.log(`\u2713 Model ${model} warmup attempted`);
}
}
}
/**
* Pre-warm an LLM model
* @param model - The LLM model to warm up (e.g., 'llama-4-scout')
*/
static async llm(model) {
const client = VoiceAI.getInstance();
const config = client.getConfig();
try {
await makeRequest({
url: `/v1/llm/${model}`,
method: "POST",
data: {
messages: [{ role: "user", content: "Hi" }],
max_tokens: 1,
warmup: true
},
config,
modelTimeout: 12e4,
responseType: "json"
});
console.log(`\u2713 Model ${model} is warmed up and ready`);
} catch (error) {
if (error.message.includes("timed out")) {
console.log(`\u26A0 Model ${model} is warming up (may take 60-90s for first call)`);
} else {
console.log(`\u2713 Model ${model} warmup attempted`);
}
}
}
/**
* Pre-warm multiple models in parallel
* @param models - Array of model identifiers with their types
* @example
* await Warmup.multiple([
* { type: 'tts', model: 'orpheus' },
* { type: 'stt', model: 'whisper-v3' },
* { type: 'llm', model: 'llama-4-scout' }
* ]);
*/
static async multiple(models) {
console.log(`Warming up ${models.length} models...`);
const promises = models.map(({ type, model }) => {
switch (type) {
case "tts":
return this.tts(model);
case "stt":
return this.stt(model);
case "llm":
return this.llm(model);
default:
return Promise.resolve();
}
});
await Promise.allSettled(promises);
console.log("\u2713 All models warmed up");
}
};
var warmup = Warmup;
// src/types.ts
init_cjs_shims();
// src/index.ts
var index_default = VoiceAI;
// Annotate the CommonJS export names for ESM import in node:
0 && (module.exports = {
LLMProviders,
STTProviders,
TTSProviders,
VoiceAI,
auth,
llm,
providers,
stt,
tts,
warmup
});
//# sourceMappingURL=index.js.map