UNPKG

@juspay/neurolink

Version:

Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and

164 lines (163 loc) 7.48 kB
import type { ValidationSchema } from "../types/typeAliases.js"; import type { Tool, LanguageModelV1 } from "ai"; import type { AIProvider, TextGenerationOptions, TextGenerationResult, EnhancedGenerateResult, AnalyticsData, AIProviderName, EvaluationData } from "../core/types.js"; import type { StreamOptions, StreamResult } from "../types/streamTypes.js"; import type { UnknownRecord } from "../types/common.js"; import type { NeuroLink } from "../neurolink.js"; /** * Abstract base class for all AI providers * Tools are integrated as first-class citizens - always available by default */ export declare abstract class BaseProvider implements AIProvider { protected readonly modelName: string; protected readonly providerName: AIProviderName; protected readonly defaultTimeout: number; protected readonly directTools: {}; protected mcpTools?: Record<string, Tool>; protected customTools?: Map<string, unknown>; protected toolExecutor?: (toolName: string, params: unknown) => Promise<unknown>; protected sessionId?: string; protected userId?: string; protected neurolink?: NeuroLink; constructor(modelName?: string, providerName?: AIProviderName, neurolink?: NeuroLink); /** * Check if this provider supports tool/function calling * Override in subclasses to disable tools for specific providers or models * @returns true by default, providers can override to return false */ supportsTools(): boolean; /** * Primary streaming method - implements AIProvider interface * When tools are involved, falls back to generate() with synthetic streaming */ stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: ValidationSchema): Promise<StreamResult>; /** * Text generation method - implements AIProvider interface * Tools are always available unless explicitly disabled */ generate(optionsOrPrompt: TextGenerationOptions | string, _analysisSchema?: ValidationSchema): Promise<EnhancedGenerateResult | null>; /** * Alias for generate method - implements AIProvider interface */ gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ValidationSchema): Promise<EnhancedGenerateResult | null>; /** * BACKWARD COMPATIBILITY: Legacy generateText method * Converts EnhancedGenerateResult to TextGenerationResult format * Ensures existing scripts using createAIProvider().generateText() continue to work */ generateText(options: TextGenerationOptions): Promise<TextGenerationResult>; /** * Provider-specific streaming implementation (only used when tools are disabled) */ protected abstract executeStream(options: StreamOptions, analysisSchema?: ValidationSchema): Promise<StreamResult>; /** * Get the provider name */ protected abstract getProviderName(): AIProviderName; /** * Get the default model for this provider */ protected abstract getDefaultModel(): string; /** * REQUIRED: Every provider MUST implement this method * Returns the Vercel AI SDK model instance for this provider */ protected abstract getAISDKModel(): LanguageModelV1 | Promise<LanguageModelV1>; /** * Get AI SDK model with middleware applied * This method wraps the base model with any configured middleware */ protected getAISDKModelWithMiddleware(options?: TextGenerationOptions | StreamOptions): Promise<LanguageModelV1>; /** * Extract middleware options from generation options */ private extractMiddlewareOptions; /** * Determine if middleware should be skipped for this request */ private shouldSkipMiddleware; /** * Get all available tools - direct tools are ALWAYS available * MCP tools are added when available (without blocking) */ protected getAllTools(): Promise<Record<string, Tool>>; /** * Convert MCP JSON Schema to Zod schema for AI SDK tools * Handles common MCP schema patterns safely */ private convertMCPSchemaToZod; /** * Set session context for MCP tools */ setSessionContext(sessionId?: string, userId?: string): void; /** * Provider-specific error handling */ protected abstract handleProviderError(error: unknown): Error; /** * Execute operation with timeout and proper cleanup * Consolidates identical timeout handling from 8/10 providers */ protected executeWithTimeout<T>(operation: () => Promise<T>, options: { timeout?: number | string; operationType?: string; }): Promise<T>; /** * Validate stream options - consolidates validation from 7/10 providers */ protected validateStreamOptions(options: StreamOptions): void; /** * Create text stream transformation - consolidates identical logic from 7/10 providers */ protected createTextStream(result: { textStream: AsyncIterable<string>; }): AsyncGenerator<{ content: string; }>; /** * Create standardized stream result - consolidates result structure */ protected createStreamResult(stream: AsyncGenerator<{ content: string; }>, additionalProps?: Partial<StreamResult>): StreamResult; /** * Create stream analytics - consolidates analytics from 4/10 providers */ protected createStreamAnalytics(result: UnknownRecord, startTime: number, options: StreamOptions): Promise<UnknownRecord | undefined>; /** * Handle common error patterns - consolidates error handling from multiple providers */ protected handleCommonErrors(error: unknown): Error | null; /** * Set up tool executor for a provider to enable actual tool execution * Consolidates identical setupToolExecutor logic from neurolink.ts (used in 4 places) * @param sdk - The NeuroLinkSDK instance for tool execution * @param functionTag - Function name for logging */ setupToolExecutor(sdk: { customTools: Map<string, unknown>; executeTool: (toolName: string, params: unknown) => Promise<unknown>; }, functionTag: string): void; protected normalizeTextOptions(optionsOrPrompt: TextGenerationOptions | string): TextGenerationOptions; protected normalizeStreamOptions(optionsOrPrompt: StreamOptions | string): StreamOptions; protected enhanceResult(result: EnhancedGenerateResult, options: TextGenerationOptions, startTime: number): Promise<EnhancedGenerateResult>; protected createAnalytics(result: EnhancedGenerateResult, responseTime: number, options: TextGenerationOptions): Promise<AnalyticsData>; protected createEvaluation(result: EnhancedGenerateResult, options: TextGenerationOptions): Promise<EvaluationData>; protected validateOptions(options: TextGenerationOptions): void; protected getProviderInfo(): { provider: string; model: string; }; /** * Get timeout value in milliseconds */ getTimeout(options: TextGenerationOptions | StreamOptions): number; /** * Utility method to chunk large prompts into smaller pieces * @param prompt The prompt to chunk * @param maxChunkSize Maximum size per chunk (default: 900,000 characters) * @param overlap Overlap between chunks to maintain context (default: 100 characters) * @returns Array of prompt chunks */ static chunkPrompt(prompt: string, maxChunkSize?: number, overlap?: number): string[]; }