ai.libx.js
Version:
Unified API bridge for various AI models (LLMs, image/video generation, TTS, STT) - stateless, edge-compatible
110 lines (93 loc) • 3.14 kB
text/typescript
import { BaseAdapter } from './base/BaseAdapter';
import { ChatOptions, ChatResponse, StreamChunk } from '../types';
import { parseSSEStream } from '../utils/stream';
import { handleProviderError } from '../utils/errors';
import { contentToString } from '../utils/content-helpers';
/**
* Groq API adapter (uses OpenAI-compatible API)
*/
export class GroqAdapter extends BaseAdapter {
get name(): string {
return 'groq';
}
async chat(options: ChatOptions): Promise<ChatResponse | AsyncIterable<StreamChunk>> {
try {
const apiKey = this.getApiKey(options);
const baseUrl = this.getBaseUrl('https://api.groq.com/openai/v1');
// Strip provider prefix from model if present
const model = options.model.replace(/^groq\//, '');
const request: any = {
model,
messages: options.messages.map((msg) => ({
role: msg.role,
content: contentToString(msg.content),
})),
stream: options.stream || false,
};
// Add optional parameters
if (options.temperature !== undefined) request.temperature = options.temperature;
if (options.maxTokens !== undefined) request.max_tokens = options.maxTokens;
if (options.topP !== undefined) request.top_p = options.topP;
if (options.frequencyPenalty !== undefined) request.frequency_penalty = options.frequencyPenalty;
if (options.presencePenalty !== undefined) request.presence_penalty = options.presencePenalty;
if (options.stop !== undefined) request.stop = options.stop;
// Merge provider-specific options
if (options.providerOptions) {
Object.assign(request, options.providerOptions);
}
const response = await this.fetchWithErrorHandling(
`${baseUrl}/chat/completions`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${apiKey}`,
},
body: JSON.stringify(request),
},
this.name
);
if (options.stream) {
return this.handleStreamResponse(response, model);
}
return this.handleNonStreamResponse(await response.json(), model);
} catch (error) {
handleProviderError(error, this.name);
}
}
private handleNonStreamResponse(data: any, model: string): ChatResponse {
const choice = data.choices?.[0];
if (!choice) {
throw new Error('No choices in response');
}
return {
content: choice.message?.content || '',
finishReason: choice.finish_reason,
usage: data.usage ? {
promptTokens: data.usage.prompt_tokens,
completionTokens: data.usage.completion_tokens,
totalTokens: data.usage.total_tokens,
} : undefined,
model,
raw: data,
};
}
private async *handleStreamResponse(response: Response, model: string): AsyncIterable<StreamChunk> {
if (!response.body) {
throw new Error('No response body for streaming');
}
for await (const chunk of parseSSEStream(response.body)) {
const choice = chunk.choices?.[0];
if (!choice) continue;
const content = choice.delta?.content || '';
const finishReason = choice.finish_reason;
if (content || finishReason) {
yield {
content,
finishReason,
index: choice.index,
};
}
}
}
}