ai.libx.js
Version:
Unified API bridge for various AI models (LLMs, image/video generation, TTS, STT) - stateless, edge-compatible
101 lines (86 loc) • 2.61 kB
text/typescript
import { BaseAdapter } from './base/BaseAdapter';
import { ChatOptions, ChatResponse, StreamChunk, Message } from '../types';
import { handleProviderError } from '../utils/errors';
import { contentToString } from '../utils/content-helpers';
interface AI21Message {
role: string;
content: string;
}
interface AI21Request {
model: string;
messages: AI21Message[];
temperature?: number;
max_tokens?: number;
top_p?: number;
stop?: string[];
}
/**
* AI21 Labs API adapter
*/
export class AI21Adapter extends BaseAdapter {
get name(): string {
return 'ai21';
}
async chat(options: ChatOptions): Promise<ChatResponse | AsyncIterable<StreamChunk>> {
try {
const apiKey = this.getApiKey(options);
const baseUrl = this.getBaseUrl('https://api.ai21.com/studio/v1');
// Strip provider prefix from model if present
const model = options.model.replace(/^ai21\//, '');
const request: AI21Request = {
model,
messages: this.transformMessages(options.messages),
};
// Add optional parameters
if (options.temperature !== undefined) request.temperature = options.temperature;
if (options.maxTokens !== undefined) request.max_tokens = options.maxTokens;
if (options.topP !== undefined) request.top_p = options.topP;
if (options.stop && Array.isArray(options.stop)) {
request.stop = options.stop;
}
// Merge provider-specific options
if (options.providerOptions) {
Object.assign(request, options.providerOptions);
}
const response = await this.fetchWithErrorHandling(
`${baseUrl}/chat/completions`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${apiKey}`,
},
body: JSON.stringify(request),
},
this.name
);
const data = await response.json();
return this.handleNonStreamResponse(data, model);
} catch (error) {
handleProviderError(error, this.name);
}
}
private transformMessages(messages: Message[]): AI21Message[] {
return messages.map((msg) => ({
role: msg.role,
content: contentToString(msg.content),
}));
}
private handleNonStreamResponse(data: any, model: string): ChatResponse {
const choice = data.choices?.[0];
if (!choice) {
throw new Error('No choices in response');
}
return {
content: choice.message?.content || '',
finishReason: choice.finish_reason,
usage: data.usage ? {
promptTokens: data.usage.prompt_tokens || 0,
completionTokens: data.usage.completion_tokens || 0,
totalTokens: data.usage.total_tokens || 0,
} : undefined,
model,
raw: data,
};
}
}