@juspay/neurolink
Version:
Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and
150 lines (149 loc) • 6.4 kB
JavaScript
import { createAnthropic } from "@ai-sdk/anthropic";
import { streamText } from "ai";
import { BaseProvider } from "../core/baseProvider.js";
import { logger } from "../utils/logger.js";
import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
import { AuthenticationError, NetworkError, ProviderError, RateLimitError, } from "../types/errors.js";
import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
import { validateApiKey, createAnthropicConfig, getProviderModel, } from "../utils/providerConfig.js";
import { buildMessagesArray } from "../utils/messageBuilder.js";
import { createProxyFetch } from "../proxy/proxyFetch.js";
// Configuration helpers - now using consolidated utility
const getAnthropicApiKey = () => {
return validateApiKey(createAnthropicConfig());
};
const getDefaultAnthropicModel = () => {
return getProviderModel("ANTHROPIC_MODEL", "claude-3-5-sonnet-20241022");
};
/**
* Anthropic Provider v2 - BaseProvider Implementation
* Fixed syntax and enhanced with proper error handling
*/
export class AnthropicProvider extends BaseProvider {
model;
constructor(modelName, sdk) {
super(modelName, "anthropic", sdk);
// Initialize Anthropic model with API key validation and proxy support
const apiKey = getAnthropicApiKey();
// Create Anthropic instance with proxy fetch
const anthropic = createAnthropic({
apiKey: apiKey,
fetch: createProxyFetch(),
});
// Initialize Anthropic model with proxy-aware instance
this.model = anthropic(this.modelName || getDefaultAnthropicModel());
logger.debug("Anthropic Provider v2 initialized", {
modelName: this.modelName,
provider: this.providerName,
});
}
getProviderName() {
return "anthropic";
}
getDefaultModel() {
return getDefaultAnthropicModel();
}
/**
* Returns the Vercel AI SDK model instance for Anthropic
*/
getAISDKModel() {
return this.model;
}
handleProviderError(error) {
if (error instanceof TimeoutError) {
throw new NetworkError(`Request timed out after ${error.timeout}ms`, this.providerName);
}
const errorRecord = error;
const message = typeof errorRecord?.message === "string"
? errorRecord.message
: "Unknown error";
if (message.includes("API_KEY_INVALID") ||
message.includes("Invalid API key")) {
throw new AuthenticationError("Invalid Anthropic API key. Please check your ANTHROPIC_API_KEY environment variable.", this.providerName);
}
if (message.includes("rate limit") ||
message.includes("too_many_requests") ||
message.includes("429")) {
throw new RateLimitError("Anthropic rate limit exceeded. Please try again later.", this.providerName);
}
if (message.includes("ECONNRESET") ||
message.includes("ENOTFOUND") ||
message.includes("ECONNREFUSED") ||
message.includes("network") ||
message.includes("connection")) {
throw new NetworkError(`Connection error: ${message}`, this.providerName);
}
if (message.includes("500") ||
message.includes("502") ||
message.includes("503") ||
message.includes("504") ||
message.includes("server error")) {
throw new ProviderError(`Server error: ${message}`, this.providerName);
}
throw new ProviderError(`Anthropic error: ${message}`, this.providerName);
}
// executeGenerate removed - BaseProvider handles all generation with tools
async executeStream(options, analysisSchema) {
this.validateStreamOptions(options);
const timeout = this.getTimeout(options);
const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
try {
// ✅ Get tools for streaming (same as generate method)
const shouldUseTools = !options.disableTools && this.supportsTools();
const tools = shouldUseTools ? await this.getAllTools() : {};
// Build message array from options
const messages = buildMessagesArray(options);
const result = await streamText({
model: this.model,
messages: messages,
temperature: options.temperature,
maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
tools,
maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
toolChoice: shouldUseTools ? "auto" : "none",
abortSignal: timeoutController?.controller.signal,
});
timeoutController?.cleanup();
const transformedStream = this.createTextStream(result);
// ✅ Note: Vercel AI SDK's streamText() method limitations with tools
// The streamText() function doesn't provide the same tool result access as generateText()
// Full tool support is now available with real streaming
const toolCalls = [];
const toolResults = [];
const usage = await result.usage;
const finishReason = await result.finishReason;
return {
stream: transformedStream,
provider: this.providerName,
model: this.modelName,
toolCalls, // ✅ Include tool calls in stream result
toolResults, // ✅ Include tool results in stream result
usage: usage
? {
inputTokens: usage.promptTokens || 0,
outputTokens: usage.completionTokens || 0,
totalTokens: usage.totalTokens || 0,
}
: undefined,
finishReason: finishReason || undefined,
};
}
catch (error) {
timeoutController?.cleanup();
throw this.handleProviderError(error);
}
}
async isAvailable() {
try {
getAnthropicApiKey();
return true;
}
catch {
return false;
}
}
getModel() {
return this.model;
}
}
export default AnthropicProvider;