@juspay/neurolink
Version:
Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and
43 lines (42 loc) • 1.63 kB
TypeScript
import type { ZodType, ZodTypeDef } from "zod";
import { type Schema, type LanguageModelV1 } from "ai";
import type { AIProviderName } from "../core/types.js";
import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
import { BaseProvider } from "../core/baseProvider.js";
/**
* LiteLLM Provider - BaseProvider Implementation
* Provides access to 100+ models via LiteLLM proxy server
*/
export declare class LiteLLMProvider extends BaseProvider {
private model;
private static modelsCache;
private static modelsCacheTime;
private static readonly MODELS_CACHE_DURATION;
constructor(modelName?: string, sdk?: unknown);
protected getProviderName(): AIProviderName;
protected getDefaultModel(): string;
/**
* Returns the Vercel AI SDK model instance for LiteLLM
*/
protected getAISDKModel(): LanguageModelV1;
protected handleProviderError(error: unknown): Error;
/**
* LiteLLM supports tools for compatible models
*/
supportsTools(): boolean;
/**
* Provider-specific streaming implementation
* Note: This is only used when tools are disabled
*/
protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
/**
* Get available models from LiteLLM proxy server
* Dynamically fetches from /v1/models endpoint with caching and fallback
*/
getAvailableModels(): Promise<string[]>;
/**
* Fetch available models from LiteLLM proxy /v1/models endpoint
* @private
*/
private fetchModelsFromAPI;
}