survey-mcp-server
Version:
Survey management server handling survey creation, response collection, analysis, and reporting with database access for data management
120 lines • 4.28 kB
JavaScript
import OpenAI from 'openai';
import { config } from './config.js';
import { logger } from './logger.js';
export class LLMClient {
constructor(apiKey) {
const key = apiKey || config.openai.apiKey;
if (!key) {
throw new Error('OpenAI API key is required');
}
this.openai = new OpenAI({
apiKey: key
});
logger.info('LLM client initialized successfully');
}
async ask(query, systemPrompt, modelName = 'gpt-4o', jsonMode = false, temperature = 0.1, maxTokens) {
try {
const messages = [];
if (systemPrompt) {
messages.push({
role: 'system',
content: systemPrompt
});
}
messages.push({
role: 'user',
content: query
});
const requestParams = {
model: modelName,
messages,
temperature,
max_tokens: maxTokens
};
if (jsonMode) {
requestParams.response_format = { type: 'json_object' };
}
logger.debug(`Making LLM request with model: ${modelName}`);
const response = await this.openai.chat.completions.create(requestParams);
const content = response.choices[0]?.message?.content;
if (!content) {
throw new Error('No content in LLM response');
}
// Parse JSON if json_mode is enabled
if (jsonMode) {
try {
return JSON.parse(content);
}
catch (parseError) {
logger.error('Failed to parse JSON response:', parseError);
throw new Error('Invalid JSON response from LLM');
}
}
return content;
}
catch (error) {
logger.error('LLM request failed:', error);
throw error;
}
}
async chatCompletion(messages, modelName = 'gpt-4o', temperature = 0.1, maxTokens) {
try {
const requestParams = {
model: modelName,
messages,
temperature,
max_tokens: maxTokens
};
logger.debug(`Making chat completion request with model: ${modelName}`);
const response = await this.openai.chat.completions.create(requestParams);
const content = response.choices[0]?.message?.content;
if (!content) {
throw new Error('No content in chat completion response');
}
return content;
}
catch (error) {
logger.error('Chat completion request failed:', error);
throw error;
}
}
async generateEmbedding(text, model = 'text-embedding-ada-002') {
try {
logger.debug(`Generating embedding for text of length: ${text.length}`);
const response = await this.openai.embeddings.create({
model,
input: text
});
return response.data[0].embedding;
}
catch (error) {
logger.error('Embedding generation failed:', error);
throw error;
}
}
async solveCaptcha(captchaImage, prompt = "This is a captcha image containing only numbers. Please extract and return only the numbers you see in the image, nothing else.") {
try {
const messages = [
{
role: 'user',
content: [
{ type: 'text', text: prompt },
{
type: 'image_url',
image_url: {
url: `data:image/jpeg;base64,${captchaImage}`
}
}
]
}
];
const result = await this.chatCompletion(messages, 'gpt-4o-mini');
return result.trim();
}
catch (error) {
logger.error('Captcha solving failed:', error);
throw error;
}
}
}
//# sourceMappingURL=llm.js.map