@myea/aem-mcp-handler
Version:
Advanced AEM MCP request handler with intelligent search, multi-locale support, and comprehensive content management capabilities
346 lines (345 loc) • 11.6 kB
JavaScript
import express from 'express';
import axios from 'axios';
import dotenv from 'dotenv';
dotenv.config();
const router = express.Router();
// MCP server configuration
const MCP_SERVER_URL = process.env.AEM_MCP_URL || 'http://localhost:3000/mcp';
const MCP_USERNAME = process.env.MCP_USERNAME;
const MCP_PASSWORD = process.env.MCP_PASSWORD;
let requestId = 1;
// Abstract LLM Provider Interface
class LLMProvider {
config;
constructor(config) {
this.config = config;
}
// Public getter for config info
getProviderInfo() {
return {
provider: this.config.provider,
model: this.config.model,
configured: !!this.config.apiKey
};
}
}
// OpenAI Provider Implementation
class OpenAIProvider extends LLMProvider {
openai;
constructor(config) {
super(config);
try {
const OpenAI = require('openai');
this.openai = new OpenAI({
apiKey: config.apiKey || process.env.OPENAI_API_KEY,
baseURL: config.baseUrl
});
}
catch (error) {
throw new Error('OpenAI package not installed. Run: npm install openai');
}
}
async chat(messages, options) {
const formattedMessages = this.formatMessages(messages);
const requestParams = {
model: this.config.model,
messages: formattedMessages,
temperature: options?.temperature ?? this.config.temperature ?? 0.7,
};
if (options?.maxTokens || this.config.maxTokens) {
requestParams.max_tokens = options?.maxTokens ?? this.config.maxTokens;
}
const completion = await this.openai.chat.completions.create(requestParams);
return this.parseResponse(completion.choices[0].message);
}
formatMessages(messages) {
return messages.map(msg => ({
role: msg.role,
content: msg.content
}));
}
parseResponse(response) {
return {
content: response.content
};
}
}
// Anthropic Provider Implementation
class AnthropicProvider extends LLMProvider {
anthropic;
constructor(config) {
super(config);
try {
const Anthropic = require('@anthropic-ai/sdk');
this.anthropic = new Anthropic({
apiKey: config.apiKey || process.env.ANTHROPIC_API_KEY,
baseURL: config.baseUrl
});
}
catch (error) {
throw new Error('Anthropic package not installed. Run: npm install @anthropic-ai/sdk');
}
}
async chat(messages, options) {
const formattedMessages = this.formatMessages(messages);
const systemMessage = formattedMessages.find(m => m.role === 'system');
const userMessages = formattedMessages.filter(m => m.role !== 'system');
const requestParams = {
model: this.config.model,
messages: userMessages,
max_tokens: options?.maxTokens ?? this.config.maxTokens ?? 4096,
temperature: options?.temperature ?? this.config.temperature ?? 0.7,
};
if (systemMessage) {
requestParams.system = systemMessage.content;
}
const completion = await this.anthropic.messages.create(requestParams);
return this.parseResponse(completion);
}
formatMessages(messages) {
return messages.map(msg => ({
role: msg.role === 'function' || msg.role === 'tool' ? 'user' : msg.role,
content: msg.content
}));
}
parseResponse(response) {
const content = response.content?.[0]?.text || '';
return { content };
}
}
// Ollama Provider Implementation
class OllamaProvider extends LLMProvider {
baseUrl;
constructor(config) {
super(config);
this.baseUrl = config.baseUrl || 'http://localhost:11434';
}
async chat(messages, options) {
const formattedMessages = this.formatMessages(messages);
const requestParams = {
model: this.config.model,
messages: formattedMessages,
options: {
temperature: options?.temperature ?? this.config.temperature ?? 0.7,
num_predict: options?.maxTokens ?? this.config.maxTokens ?? -1
},
stream: false
};
const response = await axios.post(`${this.baseUrl}/api/chat`, requestParams, {
headers: {
'Content-Type': 'application/json',
...this.config.customHeaders
}
});
return this.parseResponse(response.data);
}
formatMessages(messages) {
return messages.map(msg => ({
role: msg.role,
content: msg.content
}));
}
parseResponse(response) {
return {
content: response.message?.content || ''
};
}
}
// Generic HTTP Provider for custom APIs
class CustomHTTPProvider extends LLMProvider {
constructor(config) {
super(config);
if (!config.baseUrl) {
throw new Error('baseUrl is required for custom HTTP provider');
}
}
async chat(messages, options) {
const requestParams = {
model: this.config.model,
messages: this.formatMessages(messages),
temperature: options?.temperature ?? this.config.temperature ?? 0.7,
max_tokens: options?.maxTokens ?? this.config.maxTokens
};
const response = await axios.post(`${this.config.baseUrl}/chat/completions`, requestParams, {
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.config.apiKey}`,
...this.config.customHeaders
}
});
return this.parseResponse(response.data.choices?.[0]?.message);
}
formatMessages(messages) {
return messages.map(msg => ({
role: msg.role,
content: msg.content
}));
}
parseResponse(response) {
return {
content: response?.content || ''
};
}
}
// LLM Provider Factory
class LLMProviderFactory {
static create(config) {
switch (config.provider) {
case 'openai':
return new OpenAIProvider(config);
case 'anthropic':
return new AnthropicProvider(config);
case 'ollama':
return new OllamaProvider(config);
case 'custom':
return new CustomHTTPProvider(config);
default:
throw new Error(`Unsupported LLM provider: ${config.provider}`);
}
}
}
// Configuration from environment variables
function getLLMConfig() {
const provider = (process.env.LLM_PROVIDER || 'openai');
const config = {
provider,
model: process.env.LLM_MODEL || getDefaultModel(provider),
apiKey: process.env.LLM_API_KEY,
baseUrl: process.env.LLM_BASE_URL,
temperature: process.env.LLM_TEMPERATURE ? parseFloat(process.env.LLM_TEMPERATURE) : 0.7,
maxTokens: process.env.LLM_MAX_TOKENS ? parseInt(process.env.LLM_MAX_TOKENS) : undefined
};
// Parse custom headers if provided
if (process.env.LLM_CUSTOM_HEADERS) {
try {
config.customHeaders = JSON.parse(process.env.LLM_CUSTOM_HEADERS);
}
catch (error) {
console.warn('Failed to parse LLM_CUSTOM_HEADERS:', error);
}
}
return config;
}
function getDefaultModel(provider) {
const defaults = {
openai: 'gpt-4-turbo-preview',
anthropic: 'claude-3-sonnet-20240229',
ollama: 'llama2',
azure: 'gpt-4',
gemini: 'gemini-pro',
custom: 'gpt-3.5-turbo'
};
return defaults[provider] || 'gpt-3.5-turbo';
}
// Initialize LLM provider
const llmProvider = LLMProviderFactory.create(getLLMConfig());
/**
* Helper to call the AEM MCP server via JSON-RPC
*/
async function callMCP(method, params) {
const rpcReq = {
jsonrpc: "2.0",
id: requestId++,
method,
params,
};
const headers = {
"Content-Type": "application/json",
Accept: "application/json",
};
if (MCP_USERNAME && MCP_PASSWORD) {
const b64 = Buffer.from(`${MCP_USERNAME}:${MCP_PASSWORD}`).toString("base64");
headers["Authorization"] = `Basic ${b64}`;
}
const response = await axios.post(MCP_SERVER_URL, rpcReq, { headers });
return response.data;
}
/**
* Simple universal chat message handler - delegates all functionality to MCP
*/
export async function handleChatMessage(message, history) {
try {
const providerInfo = llmProvider.getProviderInfo();
console.error(`[LLM Integration] Processing message with ${providerInfo.provider}: ${message.substring(0, 100)}...`);
// Simple system message for general assistance
const systemMessage = {
role: "system",
content: "You are a helpful AI assistant. Respond naturally to user questions and requests."
};
const userMessage = {
role: "user",
content: message
};
// Convert history to universal format
const historyMessages = history.map(msg => ({
role: msg.role,
content: msg.content
}));
// Call LLM provider for simple chat response
const response = await llmProvider.chat([systemMessage, ...historyMessages, userMessage], { temperature: 0.7 });
console.error(`[LLM Integration] LLM response received, content length: ${response.content?.length || 0}`);
return response.content || "I understood your request but couldn't generate a response.";
}
catch (error) {
console.error('[LLM Integration] Error:', error);
return `Sorry, I encountered an error: ${error.message}`;
}
}
// Express routes
router.post('/chat', async (req, res) => {
const { message, conversationHistory = [] } = req.body;
if (!message) {
res.status(400).json({ error: 'Message is required' });
return;
}
const reply = await handleChatMessage(message, conversationHistory);
res.json({
response: reply,
mcpInvoked: false,
functionName: null,
functionArgs: null,
timestamp: new Date().toISOString()
});
});
// Health check
router.get('/health', async (_req, res) => {
let mcpConnected = false;
try {
const test = await callMCP('listMethods');
mcpConnected = !test.error;
}
catch { }
const providerInfo = llmProvider.getProviderInfo();
res.json({
server: 'healthy',
llm: providerInfo,
mcp: mcpConnected ? 'connected' : 'disconnected',
timestamp: new Date().toISOString()
});
});
// List available MCP methods
router.get('/mcp/methods', async (_req, res) => {
try {
const list = await callMCP('listMethods');
res.json(list.result);
}
catch (e) {
res.status(500).json({ error: e.message });
}
});
// Direct MCP proxy for testing
router.post('/mcp/call', async (req, res) => {
const { method, params } = req.body;
if (!method) {
res.status(400).json({ error: 'method is required' });
return;
}
try {
const result = await callMCP(method, params);
res.json(result);
}
catch (e) {
res.status(500).json({ error: e.message });
}
});
export default router;