tlnt
Version:
TLNT - HMS-Powered Multi-Agent Platform with Government Agency Analysis, Deep Research, and Enterprise-Ready Deployment. Self-optimizing multi-domain AI agent with continuous learning and enterprise-grade performance monitoring.
212 lines • 6.83 kB
JavaScript
import OpenAI from 'openai';
import Anthropic from '@anthropic-ai/sdk';
export class OpenAIProvider {
name = 'openai';
client;
constructor(apiKey) {
this.client = new OpenAI({
apiKey: apiKey || process.env.OPENAI_API_KEY,
});
}
async isAvailable() {
try {
await this.client.models.list();
return true;
}
catch {
return false;
}
}
async chat(messages, opts = {}) {
const response = await this.client.chat.completions.create({
model: opts.model || 'gpt-4o-mini',
messages: messages.map(msg => ({
role: msg.role,
content: msg.content
})),
temperature: opts.temperature || 0.7,
max_tokens: opts.maxTokens || 1000,
stream: false
});
const choice = response.choices[0];
if (!choice.message.content) {
throw new Error('No content in OpenAI response');
}
return {
content: choice.message.content,
metadata: {
model: response.model,
tokensUsed: response.usage?.total_tokens || 0,
finishReason: choice.finish_reason || 'unknown'
}
};
}
}
export class AnthropicProvider {
name = 'anthropic';
client;
constructor(apiKey) {
this.client = new Anthropic({
apiKey: apiKey || process.env.ANTHROPIC_API_KEY,
});
}
async isAvailable() {
try {
// Just check if we have an API key - actual API test would be too expensive
return !!this.client.apiKey;
}
catch {
return false;
}
}
async chat(messages, opts = {}) {
// Convert messages to Anthropic format
const anthropicMessages = messages
.filter(msg => msg.role !== 'system')
.map(msg => ({
role: msg.role,
content: msg.content
}));
// Extract system message if present
const systemMessage = messages.find(msg => msg.role === 'system')?.content;
const response = await this.client.messages.create({
model: opts.model || 'claude-3-sonnet-20240229',
max_tokens: opts.maxTokens || 1000,
temperature: opts.temperature || 0.7,
system: systemMessage,
messages: anthropicMessages
});
const content = response.content
.filter((block) => block.type === 'text')
.map((block) => block.text)
.join('\n');
return {
content,
metadata: {
model: response.model,
tokensUsed: response.usage.input_tokens + response.usage.output_tokens,
finishReason: response.stop_reason || 'unknown'
}
};
}
}
export class LocalProvider {
name = 'local';
baseUrl;
constructor(baseUrl = 'http://localhost:1234/v1') {
this.baseUrl = baseUrl;
}
async isAvailable() {
try {
const response = await fetch(`${this.baseUrl}/models`);
return response.ok;
}
catch {
return false;
}
}
async chat(messages, opts = {}) {
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: opts.model || 'local-model',
messages: messages.map(msg => ({
role: msg.role,
content: msg.content
})),
temperature: opts.temperature || 0.7,
max_tokens: opts.maxTokens || 1000,
stream: false
})
});
if (!response.ok) {
throw new Error(`Local LLM request failed: ${response.statusText}`);
}
const data = await response.json();
const choice = data.choices[0];
return {
content: choice.message.content,
metadata: {
model: data.model,
tokensUsed: data.usage?.total_tokens || 0,
finishReason: choice.finish_reason || 'unknown'
}
};
}
}
export class LLMRouter {
providers = [];
fallbackChain = [];
constructor() {
this.setupProviders();
}
setupProviders() {
// Initialize providers based on available API keys
if (process.env.OPENAI_API_KEY) {
this.providers.push(new OpenAIProvider());
this.fallbackChain.push('openai');
}
if (process.env.ANTHROPIC_API_KEY) {
this.providers.push(new AnthropicProvider());
this.fallbackChain.push('anthropic');
}
// Always add local provider as fallback
this.providers.push(new LocalProvider());
this.fallbackChain.push('local');
}
async chat(messages, opts = {}) {
let providersToTry;
if (opts.provider) {
// Use specific provider if requested
const provider = this.providers.find(p => p.name === opts.provider);
if (!provider) {
throw new Error(`Provider '${opts.provider}' not available`);
}
providersToTry = [provider];
}
else {
// Use fallback chain
providersToTry = this.fallbackChain
.map(name => this.providers.find(p => p.name === name))
.filter(Boolean);
}
let lastError = null;
for (const provider of providersToTry) {
try {
if (await provider.isAvailable()) {
return await provider.chat(messages, opts);
}
}
catch (error) {
lastError = error instanceof Error ? error : new Error(String(error));
console.warn(`Provider ${provider.name} failed:`, error);
continue;
}
}
throw new Error(`All LLM providers failed. Last error: ${lastError?.message}`);
}
async getAvailableProviders() {
const available = [];
for (const provider of this.providers) {
try {
if (await provider.isAvailable()) {
available.push(provider.name);
}
}
catch {
// Provider not available
}
}
return available;
}
addProvider(provider) {
this.providers.push(provider);
}
setFallbackChain(chain) {
this.fallbackChain = chain;
}
}
//# sourceMappingURL=llmProvider.js.map