@juspay/neurolink
Version:
Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and
188 lines (187 loc) • 6.89 kB
TypeScript
import type { ZodType, ZodTypeDef } from "zod";
import { type Schema, type LanguageModelV1, type LanguageModel } from "ai";
import type { AIProviderName } from "../core/types.js";
import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
import { BaseProvider } from "../core/baseProvider.js";
/**
* Google Vertex AI Provider v2 - BaseProvider Implementation
*
* Features:
* - Extends BaseProvider for shared functionality
* - Preserves existing Google Cloud authentication
* - Maintains Anthropic model support via dynamic imports
* - Fresh model creation for each request
* - Enhanced error handling with setup guidance
* - Tool registration and context management
*/
export declare class GoogleVertexProvider extends BaseProvider {
private projectId;
private location;
private registeredTools;
private toolContext;
private static modelConfigCache;
private static modelConfigCacheTime;
private static readonly CACHE_DURATION;
private static readonly MAX_CACHE_SIZE;
private static maxTokensCache;
private static maxTokensCacheTime;
constructor(modelName?: string, _providerName?: string, sdk?: unknown);
protected getProviderName(): AIProviderName;
protected getDefaultModel(): string;
/**
* Returns the Vercel AI SDK model instance for Google Vertex
* Creates fresh model instances for each request
*/
protected getAISDKModel(): Promise<LanguageModel>;
/**
* Initialize model creation logging and tracking
*/
private initializeModelCreationLogging;
/**
* Check if model is Anthropic-based and attempt creation
*/
private attemptAnthropicModelCreation;
/**
* Create Google Vertex model with comprehensive logging and error handling
*/
private createGoogleVertexModel;
/**
* Create Vertex AI instance and model with comprehensive logging
*/
private createVertexInstance;
/**
* Gets the appropriate model instance (Google or Anthropic)
* Uses dual provider architecture for proper model routing
* Creates fresh instances for each request to ensure proper authentication
*/
private getModel;
/**
* Log stream execution start with comprehensive analysis
*/
private logStreamExecutionStart;
/**
* Log timeout setup process
*/
private logTimeoutSetup;
/**
* Log successful timeout setup
*/
private logTimeoutSetupSuccess;
/**
* Log and perform stream options validation
*/
private logAndValidateStreamOptions;
/**
* Log start of message building process
*/
private logMessageBuildStart;
/**
* Log successful message building
*/
private logMessageBuildSuccess;
protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
protected handleProviderError(error: unknown): Error;
/**
* Memory-safe cache management for model configurations
* Implements LRU eviction to prevent memory leaks in long-running processes
*/
private static evictLRUCacheEntries;
/**
* Access and refresh cache entry (moves to end for LRU)
*/
private static accessCacheEntry;
/**
* Memory-safe cached check for whether maxTokens should be set for the given model
* Optimized for streaming performance with LRU eviction to prevent memory leaks
*/
private shouldSetMaxTokensCached;
/**
* Memory-safe check if model has maxTokens issues using configuration-based approach
* This replaces hardcoded model-specific logic with configurable behavior
* Includes LRU caching to avoid repeated configuration lookups during streaming
*/
private modelHasMaxTokensIssues;
/**
* Check if Anthropic models are available
* @returns Promise<boolean> indicating if Anthropic support is available
*/
hasAnthropicSupport(): Promise<boolean>;
/**
* Create an Anthropic model instance using vertexAnthropic provider
* Uses fresh vertex settings for each request with comprehensive validation
* @param modelName Anthropic model name (e.g., 'claude-3-sonnet@20240229')
* @returns LanguageModelV1 instance or null if not available
*/
createAnthropicModel(modelName: string): Promise<LanguageModelV1 | null>;
/**
* Validate Vertex AI authentication configuration
*/
private validateVertexAuthentication;
/**
* Validate Vertex AI project configuration
*/
private validateVertexProjectConfiguration;
/**
* Check if the specified region supports Anthropic models
*/
private checkVertexRegionalSupport;
/**
* Validate Anthropic model name format and availability
*/
private validateAnthropicModelName;
/**
* Analyze Anthropic model creation errors for detailed troubleshooting
*/
private analyzeAnthropicCreationError;
/**
* Get detailed troubleshooting steps based on error analysis
*/
private getAnthropicTroubleshootingSteps;
/**
* Register a tool with the AI provider
* @param name The name of the tool
* @param schema The Zod schema defining the tool's parameters
* @param description A description of what the tool does
* @param handler The function to execute when the tool is called
*/
registerTool(name: string, schema: ZodType<unknown>, description: string, handler: (params: Record<string, unknown>) => Promise<unknown>): void;
/**
* Set the context for tool execution
* @param context The context to use for tool execution
*/
setToolContext(context: Record<string, unknown>): void;
/**
* Get the current tool execution context
* @returns The current tool execution context
*/
getToolContext(): Record<string, unknown>;
/**
* Set the tool executor function for custom tool execution
* This method is called by BaseProvider.setupToolExecutor()
* @param executor Function to execute tools by name
*/
setToolExecutor(executor: (toolName: string, params: unknown) => Promise<unknown>): void;
/**
* Clear all static caches - useful for testing and memory cleanup
* Public method to allow external cache management
*/
static clearCaches(): void;
/**
* Get cache statistics for monitoring and debugging
*/
static getCacheStats(): {
modelConfigCacheSize: number;
maxTokensCacheSize: number;
maxCacheSize: number;
cacheAge: {
modelConfig: number;
maxTokens: number;
};
};
/**
* Get model suggestions when a model is not found
*/
private getModelSuggestions;
}
export default GoogleVertexProvider;
export { GoogleVertexProvider as GoogleVertexAI };