UNPKG

@juspay/neurolink

Version:

Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and

181 lines (180 loc) 5.59 kB
/** * SageMaker Language Model Implementation * * This module implements the LanguageModelV1 interface for Amazon SageMaker * integration with the Vercel AI SDK. */ import type { LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1StreamPart } from "ai"; import type { SageMakerConfig, SageMakerModelConfig } from "./types.js"; import type { ConnectivityResult } from "../../types/typeAliases.js"; /** * SageMaker Language Model implementing LanguageModelV1 interface */ export declare class SageMakerLanguageModel implements LanguageModelV1 { readonly specificationVersion = "v1"; readonly provider = "sagemaker"; readonly modelId: string; readonly supportsStreaming = true; readonly defaultObjectGenerationMode: "json"; private client; private config; private modelConfig; constructor(modelId: string, config: SageMakerConfig, modelConfig: SageMakerModelConfig); /** * Generate text synchronously using SageMaker endpoint */ doGenerate(options: LanguageModelV1CallOptions): Promise<{ text?: string; reasoning?: string | Array<{ type: "text"; text: string; signature?: string; } | { type: "redacted"; data: string; }>; files?: Array<{ data: string | Uint8Array; mimeType: string; }>; logprobs?: Array<{ token: string; logprob: number; topLogprobs: Array<{ token: string; logprob: number; }>; }>; usage: { promptTokens: number; completionTokens: number; totalTokens?: number; }; finishReason: "stop" | "length" | "content-filter" | "tool-calls" | "error" | "unknown"; warnings?: Array<{ type: "other"; message: string; }>; rawCall: { rawPrompt: unknown; rawSettings: Record<string, unknown>; }; rawResponse?: { headers?: Record<string, string>; }; request?: { body?: string; }; }>; /** * Generate text with streaming using SageMaker endpoint */ doStream(options: LanguageModelV1CallOptions): Promise<{ stream: ReadableStream<LanguageModelV1StreamPart>; rawCall: { rawPrompt: unknown; rawSettings: Record<string, unknown>; }; rawResponse?: { headers?: Record<string, string>; }; request?: { body?: string; }; warnings?: Array<{ type: "other"; message: string; }>; }>; /** * Convert AI SDK options to SageMaker request format */ private convertToSageMakerRequest; /** * Convert Vercel AI SDK tools to SageMaker format */ private convertToolsToSageMakerFormat; /** * Convert Vercel AI SDK tool choice to SageMaker format */ private convertToolChoiceToSageMakerFormat; /** * Convert Vercel AI SDK response format to SageMaker format (Phase 4) */ private convertResponseFormatToSageMakerFormat; /** * Extract text content from AI SDK prompt format */ private extractPromptText; /** * Extract generated text from SageMaker response */ private extractTextFromResponse; /** * Extract tool calls from SageMaker response (Phase 4) */ private extractToolCallsFromResponse; /** * Map SageMaker finish reason to standardized format */ private mapSageMakerFinishReason; /** * Get model configuration summary for debugging */ getModelInfo(): { modelId: string; provider: string; specificationVersion: string; endpointName: string; modelType: "mistral" | "huggingface" | "custom" | "claude" | "llama" | "jumpstart" | undefined; region: string; }; /** * Test basic connectivity to the SageMaker endpoint */ testConnectivity(): Promise<ConnectivityResult>; /** * Batch inference support (Phase 4) * Process multiple prompts in a single request for efficiency */ doBatchGenerate(prompts: string[], options?: { maxTokens?: number; temperature?: number; topP?: number; }): Promise<Array<{ text: string; usage: { promptTokens: number; completionTokens: number; totalTokens: number; }; finishReason: "stop" | "length" | "content-filter" | "tool-calls" | "error" | "unknown"; }>>; /** * Process prompts in parallel with advanced concurrency control and error handling */ private processPromptsInParallel; /** * Enhanced model information with batch capabilities */ getModelCapabilities(): { capabilities: { streaming: boolean; toolCalling: boolean; structuredOutput: boolean; batchInference: boolean; supportedResponseFormats: string[]; supportedToolTypes: string[]; maxBatchSize: number; adaptiveConcurrency: boolean; errorRecovery: boolean; }; modelId: string; provider: string; specificationVersion: string; endpointName: string; modelType: "mistral" | "huggingface" | "custom" | "claude" | "llama" | "jumpstart" | undefined; region: string; }; } export default SageMakerLanguageModel;