ai.libx.js
Version:
Unified API bridge for various AI models (LLMs, image/video generation, TTS, STT) - stateless, edge-compatible
154 lines (131 loc) • 4.05 kB
text/typescript
import { BaseAdapter } from './base/BaseAdapter';
import { ChatOptions, ChatResponse, StreamChunk, Message } from '../types';
import { streamLines } from '../utils/stream';
import { handleProviderError } from '../utils/errors';
import { contentToString } from '../utils/content-helpers';
interface AnthropicMessage {
role: 'user' | 'assistant';
content: string;
}
interface AnthropicRequest {
model: string;
messages: AnthropicMessage[];
max_tokens: number;
temperature?: number;
top_p?: number;
top_k?: number;
stop_sequences?: string[];
stream?: boolean;
system?: string;
}
/**
* Anthropic Claude API adapter
*/
export class AnthropicAdapter extends BaseAdapter {
get name(): string {
return 'anthropic';
}
async chat(options: ChatOptions): Promise<ChatResponse | AsyncIterable<StreamChunk>> {
try {
const apiKey = this.getApiKey(options);
const baseUrl = this.getBaseUrl('https://api.anthropic.com/v1');
// Strip provider prefix from model if present
const model = options.model.replace(/^anthropic\//, '');
// Extract system message if present
const systemMessage = options.messages.find((m) => m.role === 'system');
const nonSystemMessages = options.messages.filter((m) => m.role !== 'system');
const request: AnthropicRequest = {
model,
messages: this.transformMessages(nonSystemMessages),
max_tokens: options.maxTokens || 4096,
stream: options.stream || false,
};
// Add system prompt
if (systemMessage) {
request.system = contentToString(systemMessage.content);
}
// Add optional parameters
if (options.temperature !== undefined) request.temperature = options.temperature;
if (options.topP !== undefined) request.top_p = options.topP;
if (options.topK !== undefined) request.top_k = options.topK;
if (options.stop && Array.isArray(options.stop)) {
request.stop_sequences = options.stop;
}
// Merge provider-specific options
if (options.providerOptions) {
Object.assign(request, options.providerOptions);
}
const response = await this.fetchWithErrorHandling(
`${baseUrl}/messages`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
'x-api-key': apiKey,
'anthropic-version': '2023-06-01',
},
body: JSON.stringify(request),
},
this.name
);
if (options.stream) {
return this.handleStreamResponse(response, model);
}
return this.handleNonStreamResponse(await response.json(), model);
} catch (error) {
handleProviderError(error, this.name);
}
}
private transformMessages(messages: Message[]): AnthropicMessage[] {
return messages.map((msg) => ({
role: msg.role === 'user' ? 'user' : 'assistant',
content: contentToString(msg.content),
}));
}
private handleNonStreamResponse(data: any, model: string): ChatResponse {
const content = data.content?.[0]?.text || '';
return {
content,
finishReason: data.stop_reason,
usage: data.usage ? {
promptTokens: data.usage.input_tokens,
completionTokens: data.usage.output_tokens,
totalTokens: data.usage.input_tokens + data.usage.output_tokens,
} : undefined,
model,
raw: data,
};
}
private async *handleStreamResponse(response: Response, model: string): AsyncIterable<StreamChunk> {
if (!response.body) {
throw new Error('No response body for streaming');
}
for await (const line of streamLines(response.body)) {
if (!line.startsWith('data: ')) continue;
const data = line.slice(6).trim();
if (data === '[DONE]') break;
try {
const chunk = JSON.parse(data);
if (chunk.type === 'content_block_delta') {
const content = chunk.delta?.text || '';
if (content) {
yield {
content,
index: chunk.index,
};
}
} else if (chunk.type === 'message_delta') {
if (chunk.delta?.stop_reason) {
yield {
content: '',
finishReason: chunk.delta.stop_reason,
};
}
}
} catch (e) {
// Skip invalid JSON
continue;
}
}
}
}