@neureus/sdk
Version:
Neureus Platform SDK - AI-native, edge-first application platform
813 lines (807 loc) • 25 kB
JavaScript
import ky from 'ky';
var __defProp = Object.defineProperty;
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
var __getOwnPropNames = Object.getOwnPropertyNames;
var __hasOwnProp = Object.prototype.hasOwnProperty;
var __esm = (fn, res) => function __init() {
return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
};
var __export = (target, all) => {
for (var name in all)
__defProp(target, name, { get: all[name], enumerable: true });
};
var __copyProps = (to, from, except, desc) => {
if (from && typeof from === "object" || typeof from === "function") {
for (let key of __getOwnPropNames(from))
if (!__hasOwnProp.call(to, key) && key !== except)
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
}
return to;
};
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
// src/ai.ts
var ai_exports = {};
__export(ai_exports, {
AIClient: () => AIClient,
createAIClient: () => createAIClient
});
function createAIClient(config) {
return new AIClient(config);
}
var AIClient;
var init_ai = __esm({
"src/ai.ts"() {
AIClient = class {
http;
config;
constructor(config) {
this.config = {
apiKey: config.apiKey,
baseUrl: config.baseUrl || "https://api.neureus.ai",
timeout: config.timeout || 6e4,
retries: config.retries || 3,
userId: config.userId || "",
teamId: config.teamId || ""
};
this.http = ky.create({
prefixUrl: this.config.baseUrl,
timeout: this.config.timeout,
retry: {
limit: this.config.retries,
methods: ["get", "post"],
statusCodes: [408, 413, 429, 500, 502, 503, 504]
},
hooks: {
beforeRequest: [
(request) => {
request.headers.set("Authorization", `Bearer ${this.config.apiKey}`);
request.headers.set("Content-Type", "application/json");
request.headers.set("User-Agent", "Neureus-SDK/0.2.0");
}
]
}
});
}
/**
* Chat completion API
*/
chat = {
/**
* Create a non-streaming chat completion
*
* @example
* ```typescript
* const response = await ai.chat.create({
* model: 'gpt-4',
* messages: [
* { role: 'system', content: 'You are a helpful assistant.' },
* { role: 'user', content: 'What is the capital of France?' }
* ],
* temperature: 0.7
* });
*
* console.log(response.choices[0].message.content);
* ```
*/
create: async (messages, options) => {
const request = {
model: options?.model || "gpt-3.5-turbo",
messages,
temperature: options?.temperature ?? 0.7,
maxTokens: options?.maxTokens,
topP: options?.topP,
frequencyPenalty: options?.frequencyPenalty,
presencePenalty: options?.presencePenalty,
stop: options?.stop,
stream: false,
cache: options?.cache ?? true,
fallback: options?.fallback,
metadata: options?.metadata,
userId: options?.userId || this.config.userId || void 0,
teamId: options?.teamId || this.config.teamId || void 0
};
return this.http.post("ai/chat/completions", {
json: request
}).json();
},
/**
* Create a streaming chat completion
*
* @example
* ```typescript
* const stream = await ai.chat.stream({
* model: 'gpt-4',
* messages: [{ role: 'user', content: 'Tell me a story' }]
* });
*
* for await (const chunk of stream) {
* const content = chunk.choices[0]?.delta?.content;
* if (content) {
* process.stdout.write(content);
* }
* }
* ```
*/
stream: async (messages, options) => {
const request = {
model: options?.model || "gpt-3.5-turbo",
messages,
temperature: options?.temperature ?? 0.7,
maxTokens: options?.maxTokens,
topP: options?.topP,
frequencyPenalty: options?.frequencyPenalty,
presencePenalty: options?.presencePenalty,
stop: options?.stop,
stream: true,
cache: options?.cache ?? true,
fallback: options?.fallback,
metadata: options?.metadata,
userId: options?.userId || this.config.userId || void 0,
teamId: options?.teamId || this.config.teamId || void 0
};
const response = await this.http.post("ai/chat/completions", {
json: request
});
return this.parseSSEStream(response.body);
}
};
/**
* List available models
*
* @example
* ```typescript
* const models = await ai.models.list();
* console.log(models); // [{ name: 'gpt-4', provider: 'openai', ... }]
* ```
*/
models = {
list: async () => {
return this.http.get("ai/models").json();
}
};
/**
* Parse Server-Sent Events stream into async iterable
*/
async *parseSSEStream(body) {
const reader = body.getReader();
const decoder = new TextDecoder();
let buffer = "";
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split("\n");
buffer = lines.pop() || "";
for (const line of lines) {
const trimmed = line.trim();
if (trimmed === "") continue;
if (trimmed.startsWith(":")) continue;
if (trimmed === "data: [DONE]") return;
if (trimmed.startsWith("data: ")) {
const data = trimmed.slice(6);
try {
const chunk = JSON.parse(data);
yield chunk;
} catch (error) {
console.error("Failed to parse SSE data:", data, error);
}
}
}
}
} finally {
reader.releaseLock();
}
}
};
}
});
// src/vector.ts
var vector_exports = {};
__export(vector_exports, {
VectorClient: () => VectorClient,
createVectorClient: () => createVectorClient
});
function createVectorClient(config) {
return new VectorClient(config);
}
var VectorClient;
var init_vector = __esm({
"src/vector.ts"() {
VectorClient = class {
http;
config;
constructor(config) {
this.config = {
apiKey: config.apiKey,
baseUrl: config.baseUrl || "https://api.neureus.ai",
timeout: config.timeout || 3e4,
retries: config.retries || 3,
indexName: config.indexName || "default",
...config.namespace !== void 0 && { namespace: config.namespace }
};
this.http = ky.create({
prefixUrl: this.config.baseUrl,
timeout: this.config.timeout,
retry: {
limit: this.config.retries,
methods: ["get", "post", "delete"],
statusCodes: [408, 413, 429, 500, 502, 503, 504]
},
hooks: {
beforeRequest: [
(request) => {
request.headers.set("Authorization", `Bearer ${this.config.apiKey}`);
request.headers.set("Content-Type", "application/json");
request.headers.set("User-Agent", "Neureus-SDK/0.2.0");
}
]
}
});
}
/**
* Index management API
*/
indices = {
/**
* Create a new vector index
*
* @example
* ```typescript
* await vectors.indices.create({
* name: 'documents',
* dimension: 1536, // OpenAI ada-002 dimensions
* metric: 'cosine',
* indexType: 'hnsw'
* });
* ```
*/
create: async (config) => {
return this.http.post("vector/indices", {
json: config
}).json();
},
/**
* List all indices
*
* @example
* ```typescript
* const indices = await vectors.indices.list();
* console.log(indices); // [{ name: 'documents', dimension: 1536, ... }]
* ```
*/
list: async () => {
return this.http.get("vector/indices").json();
},
/**
* Get index statistics
*
* @example
* ```typescript
* const stats = await vectors.indices.stats('documents');
* console.log(stats.vectorCount, stats.memoryUsage);
* ```
*/
stats: async (indexName) => {
return this.http.get(`vector/indices/${indexName}/stats`).json();
},
/**
* Drop (delete) an index
*
* @example
* ```typescript
* await vectors.indices.drop('old-index');
* ```
*/
drop: async (indexName) => {
return this.http.delete(`vector/indices/${indexName}`).json();
}
};
/**
* Upsert (insert or update) a single vector
*
* @example
* ```typescript
* const id = await vectors.upsert({
* vectors: [{
* id: 'doc1',
* vector: [0.1, 0.2, ...], // embedding vector
* metadata: { title: 'Document 1', page: 1 }
* }]
* });
* ```
*/
async upsert(options) {
const { vectors, namespace, indexName } = options;
if (vectors.length > 1) {
return this.http.post("vector/vectors/batch", {
json: {
batch: {
vectors,
namespace: namespace || this.config.namespace
},
indexName: indexName || this.config.indexName
}
}).json();
}
const result = await this.http.post("vector/vectors", {
json: {
vector: vectors[0],
namespace: namespace || this.config.namespace,
indexName: indexName || this.config.indexName
}
}).json();
return {
success: result.success,
ids: [result.id],
count: 1,
message: result.message
};
}
/**
* Get a vector by ID
*
* @example
* ```typescript
* const vector = await vectors.get('doc1');
* console.log(vector.metadata, vector.vector);
* ```
*/
async get(id, namespace) {
const params = new URLSearchParams();
if (namespace || this.config.namespace) {
params.set("namespace", namespace || this.config.namespace);
}
return this.http.get(`vector/vectors/${id}`, { searchParams: params }).json();
}
/**
* Delete a vector by ID
*
* @example
* ```typescript
* await vectors.delete('doc1');
* ```
*/
async delete(id, namespace, indexName) {
const params = new URLSearchParams();
if (namespace || this.config.namespace) {
params.set("namespace", namespace || this.config.namespace);
}
params.set("indexName", indexName || this.config.indexName);
return this.http.delete(`vector/vectors/${id}`, { searchParams: params }).json();
}
/**
* Search for similar vectors
*
* @example
* ```typescript
* const results = await vectors.search({
* vector: [0.1, 0.2, ...], // query embedding
* topK: 5,
* minSimilarity: 0.7,
* filter: { page: { $gt: 10 } }
* });
*
* for (const result of results.matches) {
* console.log(result.id, result.score, result.metadata);
* }
* ```
*/
async search(options) {
const { indexName, ...searchQuery } = options;
return this.http.post("vector/search", {
json: {
query: {
...searchQuery,
namespace: searchQuery.namespace || this.config.namespace
},
indexName: indexName || this.config.indexName
}
}).json();
}
/**
* Hybrid search (vector + keyword)
*
* Combines vector similarity search with keyword/text search
* for better retrieval accuracy.
*
* @example
* ```typescript
* const results = await vectors.hybridSearch({
* vector: [0.1, 0.2, ...],
* query: 'machine learning',
* topK: 10,
* alpha: 0.7 // 70% vector, 30% keyword
* });
* ```
*/
async hybridSearch(options) {
const { indexName, ...searchQuery } = options;
return this.http.post("vector/search/hybrid", {
json: {
query: {
...searchQuery,
namespace: searchQuery.namespace || this.config.namespace
},
indexName: indexName || this.config.indexName
}
}).json();
}
/**
* Clear all vectors in a namespace
*
* @example
* ```typescript
* await vectors.clear('temporary-docs');
* ```
*/
async clear(namespace, indexName) {
const params = new URLSearchParams();
params.set("indexName", indexName || this.config.indexName);
return this.http.delete(`vector/namespaces/${namespace}`, { searchParams: params }).json();
}
};
}
});
// src/rag.ts
var rag_exports = {};
__export(rag_exports, {
RAGClient: () => RAGClient,
createRAGClient: () => createRAGClient
});
function createRAGClient(config) {
return new RAGClient(config);
}
var RAGClient;
var init_rag = __esm({
"src/rag.ts"() {
RAGClient = class {
http;
config;
constructor(config) {
this.config = {
apiKey: config.apiKey,
baseUrl: config.baseUrl || "https://api.neureus.ai",
timeout: config.timeout || 6e4,
retries: config.retries || 3,
userId: config.userId || "",
teamId: config.teamId || ""
};
this.http = ky.create({
prefixUrl: this.config.baseUrl,
timeout: this.config.timeout,
retry: {
limit: this.config.retries,
methods: ["get", "post"],
statusCodes: [408, 413, 429, 500, 502, 503, 504]
},
hooks: {
beforeRequest: [
(request) => {
request.headers.set("Authorization", `Bearer ${this.config.apiKey}`);
request.headers.set("Content-Type", "application/json");
request.headers.set("User-Agent", "Neureus-SDK/0.2.0");
}
]
}
});
}
/**
* Pipeline management API
*/
pipelines = {
/**
* Create a new RAG pipeline
*
* @example
* ```typescript
* await rag.pipelines.create({
* name: 'customer-support',
* description: 'Customer support knowledge base',
* embedding: {
* model: 'text-embedding-ada-002',
* provider: 'openai',
* dimensions: 1536
* },
* chunking: {
* strategy: 'recursive',
* size: 512,
* overlap: 128
* },
* generation: {
* model: 'gpt-4',
* provider: 'openai',
* temperature: 0.1
* }
* });
* ```
*/
create: async (config) => {
return this.http.post("rag/pipelines", {
json: config
}).json();
},
/**
* List all RAG pipelines
*
* @example
* ```typescript
* const pipelines = await rag.pipelines.list();
* console.log(pipelines); // [{ name: 'docs', status: 'ready', ... }]
* ```
*/
list: async () => {
return this.http.get("rag/pipelines").json();
},
/**
* Get pipeline information
*
* @example
* ```typescript
* const info = await rag.pipelines.get('product-docs');
* console.log(info.stats.documentsCount, info.stats.totalQueries);
* ```
*/
get: async (pipelineName) => {
return this.http.get(`rag/pipelines/${pipelineName}`).json();
},
/**
* Update pipeline configuration
*
* @example
* ```typescript
* await rag.pipelines.update('product-docs', {
* generation: {
* temperature: 0.5
* }
* });
* ```
*/
update: async (pipelineName, updates) => {
return this.http.patch(`rag/pipelines/${pipelineName}`, {
json: updates
}).json();
},
/**
* Delete a pipeline
*
* @example
* ```typescript
* await rag.pipelines.delete('old-docs');
* ```
*/
delete: async (pipelineName) => {
return this.http.delete(`rag/pipelines/${pipelineName}`).json();
}
};
/**
* Ingest documents into a RAG pipeline
*
* @example
* ```typescript
* // Ingest from file
* await rag.ingest('product-docs', {
* source: './docs',
* type: 'file',
* format: 'markdown',
* recursive: true
* });
*
* // Ingest from URL
* await rag.ingest('product-docs', {
* source: 'https://example.com/docs',
* type: 'url',
* format: 'html'
* });
*
* // Ingest text directly
* await rag.ingest('product-docs', {
* source: 'This is my document content...',
* type: 'text',
* metadata: { title: 'Introduction' }
* });
* ```
*/
async ingest(pipelineName, options) {
const { waitForCompletion, ...ingestionRequest } = options;
if (waitForCompletion) {
return this.http.post(`rag/pipelines/${pipelineName}/ingest`, {
json: ingestionRequest,
searchParams: { wait: "true" }
}).json();
}
return this.http.post(`rag/pipelines/${pipelineName}/ingest`, {
json: ingestionRequest
}).json();
}
/**
* Query a RAG pipeline (non-streaming)
*
* @example
* ```typescript
* const response = await rag.query('product-docs', {
* query: 'How do I authenticate users?',
* topK: 5,
* minSimilarity: 0.7,
* includeSource: true
* });
*
* console.log('Answer:', response.answer);
* console.log('Sources:', response.sources);
* console.log('Performance:', response.performance);
* ```
*/
async query(pipelineName, options) {
const { streaming, ...queryRequest } = options;
return this.http.post(`rag/pipelines/${pipelineName}/query`, {
json: {
...queryRequest,
streaming: false,
userId: this.config.userId || void 0,
teamId: this.config.teamId || void 0
}
}).json();
}
/**
* Query a RAG pipeline with streaming response
*
* @example
* ```typescript
* const stream = await rag.queryStream('product-docs', {
* query: 'Explain the authentication flow',
* topK: 5
* });
*
* for await (const chunk of stream) {
* if (chunk.type === 'answer') {
* process.stdout.write(chunk.data.content);
* } else if (chunk.type === 'complete') {
* console.log('\nSources:', chunk.data.sources);
* }
* }
* ```
*/
async queryStream(pipelineName, options) {
const { streaming: _, ...queryRequest } = options;
const response = await this.http.post(`rag/pipelines/${pipelineName}/query`, {
json: {
...queryRequest,
streaming: true,
userId: this.config.userId || void 0,
teamId: this.config.teamId || void 0
}
});
return this.parseSSEStream(response.body);
}
/**
* Get pipeline statistics
*
* @example
* ```typescript
* const stats = await rag.stats('product-docs');
* console.log(`Documents: ${stats.documentsCount}`);
* console.log(`Queries: ${stats.totalQueries}`);
* console.log(`Avg response time: ${stats.avgResponseTime}ms`);
* ```
*/
async stats(pipelineName) {
const info = await this.pipelines.get(pipelineName);
return info.stats;
}
/**
* Clear all documents from a pipeline
*
* @example
* ```typescript
* await rag.clear('product-docs');
* ```
*/
async clear(pipelineName) {
return this.http.delete(`rag/pipelines/${pipelineName}/documents`).json();
}
/**
* Parse Server-Sent Events stream into async iterable
*/
async *parseSSEStream(body) {
const reader = body.getReader();
const decoder = new TextDecoder();
let buffer = "";
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split("\n");
buffer = lines.pop() || "";
for (const line of lines) {
const trimmed = line.trim();
if (trimmed === "") continue;
if (trimmed.startsWith(":")) continue;
if (trimmed === "data: [DONE]") return;
if (trimmed.startsWith("data: ")) {
const data = trimmed.slice(6);
try {
const chunk = JSON.parse(data);
yield chunk;
} catch (error) {
console.error("Failed to parse SSE data:", data, error);
}
}
}
}
} finally {
reader.releaseLock();
}
}
};
}
});
// src/index.ts
init_ai();
init_vector();
init_rag();
var NeureusClient = class {
/**
* AI Gateway client for LLM interactions
*/
ai;
/**
* Vector Database client for vector operations
*/
vector;
/**
* RAG Pipeline client for document Q&A
*/
rag;
/**
* Configuration used to create this client
*/
config;
constructor(config) {
this.config = {
apiKey: config.apiKey,
baseUrl: config.baseUrl || "https://api.neureus.ai",
timeout: config.timeout || 6e4,
retries: config.retries || 3,
userId: config.userId || "",
teamId: config.teamId || "",
defaultVectorIndex: config.defaultVectorIndex || "default",
defaultVectorNamespace: config.defaultVectorNamespace || ""
};
const { AIClient: AIClient2 } = (init_ai(), __toCommonJS(ai_exports));
const { VectorClient: VectorClient2 } = (init_vector(), __toCommonJS(vector_exports));
const { RAGClient: RAGClient2 } = (init_rag(), __toCommonJS(rag_exports));
this.ai = new AIClient2({
apiKey: this.config.apiKey,
baseUrl: this.config.baseUrl,
timeout: this.config.timeout,
retries: this.config.retries,
userId: this.config.userId,
teamId: this.config.teamId
});
this.vector = new VectorClient2({
apiKey: this.config.apiKey,
baseUrl: this.config.baseUrl,
timeout: this.config.timeout,
retries: this.config.retries,
indexName: this.config.defaultVectorIndex,
namespace: this.config.defaultVectorNamespace
});
this.rag = new RAGClient2({
apiKey: this.config.apiKey,
baseUrl: this.config.baseUrl,
timeout: this.config.timeout,
retries: this.config.retries,
userId: this.config.userId,
teamId: this.config.teamId
});
}
};
function createNeureusClient(config) {
return new NeureusClient(config);
}
var src_default = NeureusClient;
var VERSION = "0.2.0";
export { AIClient, NeureusClient, RAGClient, VERSION, VectorClient, createAIClient, createNeureusClient, createRAGClient, createVectorClient, src_default as default };
//# sourceMappingURL=index.js.map
//# sourceMappingURL=index.js.map