skailan-ai
Version:
Servicio de IA y procesamiento de lenguaje natural para Skailan
55 lines • 2.15 kB
JavaScript
import { GoogleGenerativeAI } from '@google/generative-ai';
import OpenAI from 'openai';
export class AIService {
llmConfig;
openaiClient = null;
geminiClient = null;
constructor(llmConfig) {
this.llmConfig = llmConfig;
if (llmConfig.provider === 'openai') {
this.openaiClient = new OpenAI({ apiKey: llmConfig.apiKey });
}
else if (llmConfig.provider === 'gemini') {
this.geminiClient = new GoogleGenerativeAI(llmConfig.apiKey);
}
}
getOpenAIParameters(parameters) {
return {
model: this.llmConfig.model,
temperature: parameters?.temperature,
top_p: parameters?.top_p,
max_tokens: parameters?.max_tokens,
// Add other OpenAI specific parameters as needed
};
}
getGeminiParameters(parameters) {
return {
temperature: parameters?.temperature,
topP: parameters?.top_p,
maxOutputTokens: parameters?.max_tokens,
// Add other Gemini specific parameters as needed
};
}
async generateText(prompt, parameters = {}) {
if (this.llmConfig.provider === 'openai' && this.openaiClient) {
const completion = await this.openaiClient.chat.completions.create({
model: this.llmConfig.model,
messages: [{ role: 'user', content: prompt }],
temperature: parameters?.temperature,
top_p: parameters?.top_p,
max_tokens: parameters?.max_tokens,
});
return completion.choices[0]?.message?.content || '';
}
else if (this.llmConfig.provider === 'gemini' && this.geminiClient) {
const model = this.geminiClient.getGenerativeModel({ model: this.llmConfig.model });
const result = await model.generateContent(prompt);
const response = await result.response;
return response.text();
}
else {
throw new Error('Unsupported LLM provider or client not initialized.');
}
}
}
//# sourceMappingURL=AIService.js.map