promptforge
Version:
Adaptive Prompt Intelligence & Orchestration SDK - Manage, optimize, and serve prompts for LLMs with versioning, feedback loops, and multi-provider support
226 lines • 8.24 kB
JavaScript
import { LLMProvider } from '../types.js';
export class OpenAIAdapter {
async execute(prompt, config) {
const { default: OpenAI } = await import('openai');
const client = new OpenAI({
apiKey: config.apiKey || process.env.OPENAI_API_KEY,
baseURL: config.baseURL,
});
const response = await client.chat.completions.create({
model: config.model,
messages: [{ role: 'user', content: prompt }],
temperature: config.temperature,
max_tokens: config.maxTokens,
top_p: config.topP,
frequency_penalty: config.frequencyPenalty,
presence_penalty: config.presencePenalty,
});
const choice = response.choices[0];
if (!choice || !choice.message.content) {
throw new Error('No response from OpenAI');
}
return {
content: choice.message.content,
inputTokens: response.usage?.prompt_tokens || 0,
outputTokens: response.usage?.completion_tokens || 0,
model: response.model,
provider: LLMProvider.OPENAI,
finishReason: choice.finish_reason,
metadata: { id: response.id },
};
}
async isAvailable() {
return !!(process.env.OPENAI_API_KEY);
}
}
export class AnthropicAdapter {
async execute(prompt, config) {
const { default: Anthropic } = await import('@anthropic-ai/sdk');
const client = new Anthropic({
apiKey: config.apiKey || process.env.ANTHROPIC_API_KEY,
});
const response = await client.messages.create({
model: config.model,
max_tokens: config.maxTokens || 4096,
temperature: config.temperature,
top_p: config.topP,
messages: [{ role: 'user', content: prompt }],
});
const content = response.content[0];
if (content.type !== 'text') {
throw new Error('Unexpected response type from Anthropic');
}
return {
content: content.text,
inputTokens: response.usage.input_tokens,
outputTokens: response.usage.output_tokens,
model: response.model,
provider: LLMProvider.ANTHROPIC,
finishReason: response.stop_reason || undefined,
metadata: { id: response.id },
};
}
async isAvailable() {
return !!(process.env.ANTHROPIC_API_KEY);
}
}
export class GoogleAdapter {
async execute(prompt, config) {
const { GoogleGenerativeAI } = await import('@google/generative-ai');
const genAI = new GoogleGenerativeAI(config.apiKey || process.env.GOOGLE_API_KEY || '');
const model = genAI.getGenerativeModel({ model: config.model });
const result = await model.generateContent({
contents: [{ role: 'user', parts: [{ text: prompt }] }],
generationConfig: {
temperature: config.temperature,
maxOutputTokens: config.maxTokens,
topP: config.topP,
},
});
const response = result.response;
const text = response.text();
const usageMetadata = response.usageMetadata;
const inputTokens = usageMetadata?.promptTokenCount || 0;
const outputTokens = usageMetadata?.candidatesTokenCount || 0;
return {
content: text,
inputTokens,
outputTokens,
model: config.model,
provider: LLMProvider.GOOGLE,
metadata: { candidates: response.candidates?.length },
};
}
async isAvailable() {
return !!(process.env.GOOGLE_API_KEY);
}
}
export class MistralAdapter {
async execute(prompt, config) {
const MistralClient = (await import('@mistralai/mistralai')).default;
const client = new MistralClient(config.apiKey || process.env.MISTRAL_API_KEY || '');
const response = await client.chat({
model: config.model,
messages: [{ role: 'user', content: prompt }],
temperature: config.temperature,
maxTokens: config.maxTokens,
topP: config.topP,
});
const choice = response.choices[0];
if (!choice || !choice.message.content) {
throw new Error('No response from Mistral');
}
return {
content: choice.message.content,
inputTokens: response.usage?.prompt_tokens || 0,
outputTokens: response.usage?.completion_tokens || 0,
model: response.model || config.model,
provider: LLMProvider.MISTRAL,
finishReason: choice.finish_reason,
metadata: { id: response.id },
};
}
async isAvailable() {
return !!(process.env.MISTRAL_API_KEY);
}
}
export class OllamaAdapter {
async execute(prompt, config) {
const axios = (await import('axios')).default;
const baseURL = config.baseURL || process.env.OLLAMA_BASE_URL || 'http://localhost:11434';
const response = await axios.post(`${baseURL}/api/generate`, {
model: config.model,
prompt,
stream: false,
options: {
temperature: config.temperature,
top_p: config.topP,
num_predict: config.maxTokens,
},
});
return {
content: response.data.response,
inputTokens: response.data.prompt_eval_count || 0,
outputTokens: response.data.eval_count || 0,
model: config.model,
provider: LLMProvider.OLLAMA,
metadata: {
total_duration: response.data.total_duration,
load_duration: response.data.load_duration,
},
};
}
async isAvailable() {
try {
const axios = (await import('axios')).default;
const baseURL = process.env.OLLAMA_BASE_URL || 'http://localhost:11434';
await axios.get(`${baseURL}/api/tags`);
return true;
}
catch {
return false;
}
}
}
export class LLMRouter {
adapters;
constructor() {
this.adapters = new Map([
[LLMProvider.OPENAI, new OpenAIAdapter()],
[LLMProvider.ANTHROPIC, new AnthropicAdapter()],
[LLMProvider.GOOGLE, new GoogleAdapter()],
[LLMProvider.MISTRAL, new MistralAdapter()],
[LLMProvider.OLLAMA, new OllamaAdapter()],
]);
}
/**
* Execute prompt with specified provider or fallback chain
*/
async execute(prompt, config, fallbackProviders = []) {
const providersToTry = [config.provider, ...fallbackProviders];
let lastError;
for (const provider of providersToTry) {
const adapter = this.adapters.get(provider);
if (!adapter) {
continue;
}
try {
const isAvailable = await adapter.isAvailable();
if (!isAvailable) {
continue;
}
const providerConfig = { ...config, provider };
return await adapter.execute(prompt, providerConfig);
}
catch (error) {
lastError = error;
console.warn(`Provider ${provider} failed: ${lastError.message}`);
continue;
}
}
throw new Error(`All providers failed. Last error: ${lastError?.message || 'Unknown error'}`);
}
/**
* Get available providers
*/
async getAvailableProviders() {
const available = [];
for (const [provider, adapter] of this.adapters.entries()) {
if (await adapter.isAvailable()) {
available.push(provider);
}
}
return available;
}
/**
* Check if a specific provider is available
*/
async isProviderAvailable(provider) {
const adapter = this.adapters.get(provider);
if (!adapter) {
return false;
}
return adapter.isAvailable();
}
}
//# sourceMappingURL=llm-adapters.js.map