lamplighter-mcp
Version:
An intelligent context engine for AI-assisted software development
137 lines (116 loc) • 4.28 kB
text/typescript
import 'openai/shims/node'; // Add node fetch shim
import OpenAI from 'openai';
import dotenv from 'dotenv';
// Load environment variables
dotenv.config();
// Define AI configuration types
export interface AIConfig {
temperature?: number;
maxTokens?: number;
topP?: number;
}
// Define the base interface for all AI service implementations
export interface IAIService {
generateText(prompt: string, config?: AIConfig): Promise<string>;
}
// Factory class to get the appropriate AI service implementation
export class AIService {
private static instance: IAIService;
static getInstance(): IAIService {
if (!this.instance) {
const provider = process.env.AI_PROVIDER?.toLowerCase() || 'openai';
switch (provider) {
case 'google':
this.instance = new GoogleGeminiService();
break;
case 'openai':
default:
this.instance = new OpenAIService();
break;
}
console.log(`[AIService] Initialized with provider: ${provider}`);
}
return this.instance;
}
// Main method to generate text with the configured provider
static async generateText(prompt: string, config?: AIConfig): Promise<string> {
return this.getInstance().generateText(prompt, config);
}
}
// OpenAI implementation
class OpenAIService implements IAIService {
private client: OpenAI;
private modelName: string;
constructor() {
const apiKey = process.env.OPENAI_API_KEY;
if (!apiKey) {
throw new Error('OpenAI API key is required. Set OPENAI_API_KEY in your environment variables.');
}
this.client = new OpenAI({ apiKey });
this.modelName = process.env.AI_MODEL || 'gpt-4';
console.log(`[OpenAIService] Initialized with model: ${this.modelName}`);
}
async generateText(prompt: string, config?: AIConfig): Promise<string> {
try {
const response = await this.client.chat.completions.create({
model: this.modelName,
messages: [{ role: 'user', content: prompt }],
temperature: config?.temperature ?? 0.7,
max_tokens: config?.maxTokens,
top_p: config?.topP ?? 1,
});
return response.choices[0]?.message?.content || '';
} catch (error) {
console.error('[OpenAIService] Error generating text:', error);
throw new Error(`Failed to generate text with OpenAI: ${error instanceof Error ? error.message : String(error)}`);
}
}
}
// Google Gemini implementation
class GoogleGeminiService implements IAIService {
private modelName: string;
private apiKey: string;
constructor() {
this.apiKey = process.env.GOOGLE_API_KEY || '';
if (!this.apiKey) {
throw new Error('Google API key is required. Set GOOGLE_API_KEY in your environment variables.');
}
this.modelName = process.env.AI_MODEL || 'gemini-pro';
console.log(`[GoogleGeminiService] Initialized with model: ${this.modelName}`);
}
async generateText(prompt: string, config?: AIConfig): Promise<string> {
try {
// For now using fetch directly since Google's SDK is changing frequently
const url = `https://generativelanguage.googleapis.com/v1/models/${this.modelName}:generateContent?key=${this.apiKey}`;
const response = await fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
contents: [
{
parts: [
{ text: prompt }
]
}
],
generationConfig: {
temperature: config?.temperature ?? 0.7,
topP: config?.topP ?? 1,
maxOutputTokens: config?.maxTokens ?? 1024,
}
}),
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`Google API error: ${response.status} ${errorText}`);
}
const data = await response.json();
return data.candidates[0]?.content?.parts[0]?.text || '';
} catch (error) {
console.error('[GoogleGeminiService] Error generating text:', error);
throw new Error(`Failed to generate text with Google Gemini: ${error instanceof Error ? error.message : String(error)}`);
}
}
}