rag-cli-tester
Version:
A lightweight CLI tool for testing RAG (Retrieval-Augmented Generation) systems with different embedding combinations
277 lines • 11.5 kB
JavaScript
;
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.ProviderManager = exports.OpenAICompatibleLLMProvider = exports.GeminiEmbeddingProvider = exports.OpenAIEmbeddingProvider = exports.LocalEmbeddingProvider = void 0;
const chalk_1 = __importDefault(require("chalk"));
class LocalEmbeddingProvider {
constructor(config) {
this.pipeline = null;
this.config = config;
}
async initialize() {
try {
const transformers = await eval('import("@xenova/transformers")');
this.pipeline = await transformers.pipeline('feature-extraction', this.config.localModel || 'Xenova/all-MiniLM-L6-v2');
}
catch (error) {
console.error('Failed to initialize local embedding model:', error);
throw error;
}
}
async generateEmbedding(text) {
if (!this.pipeline) {
throw new Error('Pipeline not initialized');
}
const result = await this.pipeline(text);
return Array.isArray(result.data) ? result.data : Array.from(result.data);
}
}
exports.LocalEmbeddingProvider = LocalEmbeddingProvider;
class OpenAIEmbeddingProvider {
constructor(config) {
this.config = config;
}
async initialize() {
if (!this.config.openaiApiKey) {
throw new Error('OpenAI API key is required');
}
}
async generateEmbedding(text) {
try {
const response = await fetch('https://api.openai.com/v1/embeddings', {
method: 'POST',
headers: {
'Authorization': `Bearer ${this.config.openaiApiKey}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: this.config.openaiModel || 'text-embedding-3-small',
input: text,
}),
});
const data = await response.json();
if (!response.ok) {
throw new Error(`OpenAI API error: ${data.error?.message || response.statusText}`);
}
return data.data[0].embedding;
}
catch (error) {
console.error('Failed to generate OpenAI embedding:', error);
throw error;
}
}
}
exports.OpenAIEmbeddingProvider = OpenAIEmbeddingProvider;
class GeminiEmbeddingProvider {
constructor(config) {
this.config = config;
}
async initialize() {
if (!this.config.geminiApiKey) {
throw new Error('Gemini API key is required');
}
}
async generateEmbedding(text) {
try {
const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/${this.config.geminiModel || 'embedding-001'}:embedContent?key=${this.config.geminiApiKey}`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
content: {
parts: [{ text }]
}
}),
});
const data = await response.json();
if (!response.ok) {
throw new Error(`Gemini API error: ${data.error?.message || response.statusText}`);
}
return data.embedding.values;
}
catch (error) {
console.error('Failed to generate Gemini embedding:', error);
throw error;
}
}
}
exports.GeminiEmbeddingProvider = GeminiEmbeddingProvider;
class OpenAICompatibleLLMProvider {
constructor(config) {
this.config = config;
}
async initialize() {
if (!this.config.apiKey) {
throw new Error('API key is required');
}
}
async generateText(prompt, context) {
try {
// Determine the API endpoint and format based on provider
const { endpoint, headers, body, responseExtractor } = this.getProviderConfig(prompt, context);
console.log(chalk_1.default.gray(` 🔗 API Endpoint: ${endpoint}`));
console.log(chalk_1.default.gray(` 📤 Request Body: ${JSON.stringify(body, null, 2)}`));
const response = await fetch(endpoint, {
method: 'POST',
headers,
body: JSON.stringify(body),
});
// Check if response is HTML (error page) instead of JSON
const contentType = response.headers.get('content-type');
if (contentType && contentType.includes('text/html')) {
const htmlResponse = await response.text();
console.error(chalk_1.default.red(`❌ API returned HTML instead of JSON:`));
console.error(chalk_1.default.red(` Status: ${response.status} ${response.statusText}`));
console.error(chalk_1.default.red(` Endpoint: ${endpoint}`));
console.error(chalk_1.default.red(` Response preview: ${htmlResponse.substring(0, 200)}...`));
throw new Error(`API endpoint returned HTML instead of JSON. Check your endpoint URL: ${endpoint}`);
}
const data = await response.json();
if (!response.ok) {
throw new Error(`${this.config.provider} API error: ${data.error?.message || response.statusText}`);
}
return responseExtractor(data);
}
catch (error) {
console.error(`Failed to generate ${this.config.provider} text:`, error);
throw error;
}
}
getProviderConfig(prompt, context) {
const basePrompt = `${prompt}\n\nContext: ${context}`;
switch (this.config.provider) {
case 'openai':
return {
endpoint: 'https://api.openai.com/v1/chat/completions',
headers: {
'Authorization': `Bearer ${this.config.apiKey}`,
'Content-Type': 'application/json',
},
body: {
model: this.config.model || 'gpt-4o',
messages: [
{ role: 'system', content: prompt },
{ role: 'user', content: context }
],
temperature: 0.7,
max_tokens: 1000,
},
responseExtractor: (data) => data.choices[0].message.content.trim()
};
case 'gemini':
return {
endpoint: `https://generativelanguage.googleapis.com/v1beta/models/${this.config.model || 'gemini-1.5-flash'}:generateContent?key=${this.config.apiKey}`,
headers: {
'Content-Type': 'application/json',
},
body: {
contents: [{
parts: [{ text: basePrompt }]
}]
},
responseExtractor: (data) => data.candidates[0].content.parts[0].text.trim()
};
case 'anthropic':
return {
endpoint: 'https://api.anthropic.com/v1/messages',
headers: {
'x-api-key': this.config.apiKey,
'Content-Type': 'application/json',
'anthropic-version': '2023-06-01',
},
body: {
model: this.config.model || 'claude-3-sonnet-20240229',
max_tokens: 1000,
messages: [
{ role: 'user', content: basePrompt }
],
},
responseExtractor: (data) => data.content[0].text.trim()
};
case 'custom':
// For any OpenAI-compatible API
let customEndpoint = this.config.endpoint || 'https://api.openai.com/v1/chat/completions';
// Ensure the endpoint has the correct path for chat completions
if (!customEndpoint.endsWith('/chat/completions')) {
if (customEndpoint.endsWith('/')) {
customEndpoint = customEndpoint + 'chat/completions';
}
else if (customEndpoint.endsWith('/v1')) {
customEndpoint = customEndpoint + '/chat/completions';
}
else if (customEndpoint.endsWith('/v1/')) {
customEndpoint = customEndpoint + 'chat/completions';
}
else {
customEndpoint = customEndpoint + '/chat/completions';
}
}
return {
endpoint: customEndpoint,
headers: {
'Authorization': `Bearer ${this.config.apiKey}`,
'Content-Type': 'application/json',
},
body: {
model: this.config.model || 'gpt-4o',
messages: [
{ role: 'system', content: prompt },
{ role: 'user', content: context }
],
temperature: 0.7,
max_tokens: 1000,
enable_thinking: false,
stream: false,
},
responseExtractor: (data) => data.choices[0].message.content.trim()
};
default:
throw new Error(`Unsupported provider: ${this.config.provider}`);
}
}
}
exports.OpenAICompatibleLLMProvider = OpenAICompatibleLLMProvider;
class ProviderManager {
static createEmbeddingProvider(config) {
switch (config.model) {
case 'openai':
return new OpenAIEmbeddingProvider(config);
case 'gemini':
return new GeminiEmbeddingProvider(config);
case 'local':
default:
return new LocalEmbeddingProvider(config);
}
}
static createLLMProvider(config) {
return new OpenAICompatibleLLMProvider(config);
}
static detectAvailableProviders() {
const providers = {
embedding: ['local'],
llm: []
};
// Check for API keys in environment
if (process.env.OPENAI_API_KEY) {
providers.embedding.push('openai');
providers.llm.push('openai');
}
if (process.env.GEMINI_API_KEY || process.env.GOOGLE_AI_API_KEY) {
providers.embedding.push('gemini');
providers.llm.push('gemini');
}
if (process.env.ANTHROPIC_API_KEY) {
providers.llm.push('anthropic');
}
// Check for custom OpenAI-compatible API
if (process.env.CUSTOM_API_KEY) {
providers.llm.push('custom');
}
return providers;
}
}
exports.ProviderManager = ProviderManager;
//# sourceMappingURL=providers.js.map