backend-mcp
Version:
Generador automΓ‘tico de backends con Node.js, Express, Prisma y mΓ³dulos configurables. Servidor MCP compatible con npx para agentes IA. Soporta PostgreSQL, MySQL, MongoDB y SQLite.
1,767 lines (1,535 loc) β’ 52.7 kB
JavaScript
// modules/monitoring/init.js
const fs = require('fs');
const path = require('path');
class MonitoringModule {
constructor(config = {}) {
this.config = {
metrics: config.metrics !== false,
logging: config.logging !== false,
tracing: config.tracing || false,
healthChecks: config.healthChecks !== false,
alerting: config.alerting || false,
dashboards: config.dashboards !== false,
prometheus: config.prometheus !== false,
grafana: config.grafana || false,
jaeger: config.jaeger || false,
elasticsearch: config.elasticsearch || false,
...config
};
this.projectRoot = config.projectRoot || process.cwd();
}
async init() {
console.log('π Initializing Monitoring Module...');
try {
await this.setupDirectories();
await this.generateConfigurations();
await this.generateMiddleware();
await this.generateServices();
if (this.config.dashboards) {
await this.generateDashboards();
}
if (this.config.prometheus || this.config.grafana) {
await this.generateDockerConfigs();
}
await this.generateScripts();
await this.updatePackageJson();
console.log('β
Monitoring Module initialized successfully!');
console.log('π Generated files:', this.getGeneratedFiles());
return {
success: true,
message: 'Monitoring module initialized successfully',
files: this.getGeneratedFiles()
};
} catch (error) {
console.error('β Error initializing monitoring module:', error);
throw error;
}
}
async setupDirectories() {
const dirs = [
'config/monitoring',
'middleware/monitoring',
'services/monitoring',
'dashboards',
'docker/monitoring',
'scripts/monitoring',
'logs'
];
for (const dir of dirs) {
const dirPath = path.join(this.projectRoot, dir);
if (!fs.existsSync(dirPath)) {
fs.mkdirSync(dirPath, { recursive: true });
console.log(`π Created directory: ${dir}`);
}
}
}
async generateConfigurations() {
// Main monitoring configuration
const monitoringConfig = `// config/monitoring/index.js
module.exports = {
metrics: {
enabled: ${this.config.metrics},
port: process.env.PROMETHEUS_PORT || 9090,
path: '/metrics',
collectDefaultMetrics: true,
prefix: process.env.METRICS_PREFIX || 'app_',
labels: {
service: process.env.SERVICE_NAME || 'backend-app',
version: process.env.APP_VERSION || '1.0.0',
environment: process.env.NODE_ENV || 'development'
}
},
logging: {
enabled: ${this.config.logging},
level: process.env.LOG_LEVEL || 'info',
format: 'json',
transports: {
console: {
enabled: true,
colorize: process.env.NODE_ENV !== 'production'
},
file: {
enabled: true,
filename: 'logs/app.log',
maxsize: 100 * 1024 * 1024, // 100MB
maxFiles: 10,
tailable: true
},
elasticsearch: {
enabled: ${this.config.elasticsearch},
host: process.env.ELASTICSEARCH_URL || 'http://localhost:9200',
index: 'app-logs'
}
}
},
tracing: {
enabled: ${this.config.tracing},
serviceName: process.env.SERVICE_NAME || 'backend-app',
jaeger: {
endpoint: process.env.JAEGER_ENDPOINT || 'http://localhost:14268/api/traces',
samplingRate: parseFloat(process.env.TRACING_SAMPLING_RATE) || 0.1
}
},
healthChecks: {
enabled: ${this.config.healthChecks},
path: '/health',
interval: 30000, // 30 seconds
timeout: 5000, // 5 seconds
checks: {
database: true,
cache: true,
externalServices: true,
diskSpace: true,
memory: true
}
},
alerting: {
enabled: ${this.config.alerting},
channels: {
slack: {
enabled: !!process.env.SLACK_WEBHOOK_URL,
webhookUrl: process.env.SLACK_WEBHOOK_URL,
channel: process.env.SLACK_CHANNEL || '#alerts'
},
email: {
enabled: !!process.env.ALERT_EMAIL_TO,
to: process.env.ALERT_EMAIL_TO,
from: process.env.ALERT_EMAIL_FROM
},
pagerduty: {
enabled: !!process.env.PAGERDUTY_API_KEY,
apiKey: process.env.PAGERDUTY_API_KEY,
serviceKey: process.env.PAGERDUTY_SERVICE_KEY
}
},
rules: {
errorRate: {
threshold: 0.05, // 5%
duration: 300000 // 5 minutes
},
responseTime: {
threshold: 1000, // 1 second
duration: 120000 // 2 minutes
},
memoryUsage: {
threshold: 0.9, // 90%
duration: 300000 // 5 minutes
}
}
}
};
`;
// Prometheus configuration
const prometheusConfig = `# config/monitoring/prometheus.yml
global:
scrape_interval: 15s
evaluation_interval: 15s
rule_files:
- "alert_rules.yml"
alerting:
alertmanagers:
- static_configs:
- targets:
- alertmanager:9093
scrape_configs:
- job_name: 'app'
static_configs:
- targets: ['app:3000']
metrics_path: '/metrics'
scrape_interval: 15s
- job_name: 'node-exporter'
static_configs:
- targets: ['node-exporter:9100']
- job_name: 'postgres-exporter'
static_configs:
- targets: ['postgres-exporter:9187']
- job_name: 'redis-exporter'
static_configs:
- targets: ['redis-exporter:9121']
`;
// Alert rules
const alertRules = `# config/monitoring/alert_rules.yml
groups:
- name: application
rules:
- alert: HighErrorRate
expr: rate(http_requests_total{status=~"5.."}[5m]) / rate(http_requests_total[5m]) > 0.05
for: 5m
labels:
severity: warning
annotations:
summary: "High error rate detected"
description: "Error rate is {{ $value | humanizePercentage }} for the last 5 minutes"
- alert: HighResponseTime
expr: histogram_quantile(0.95, rate(http_request_duration_seconds_bucket[5m])) > 1
for: 2m
labels:
severity: warning
annotations:
summary: "High response time detected"
description: "95th percentile response time is {{ $value }}s"
- alert: ServiceDown
expr: up == 0
for: 1m
labels:
severity: critical
annotations:
summary: "Service is down"
description: "{{ $labels.instance }} has been down for more than 1 minute"
- alert: HighMemoryUsage
expr: (node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes > 0.9
for: 5m
labels:
severity: warning
annotations:
summary: "High memory usage"
description: "Memory usage is {{ $value | humanizePercentage }}"
- alert: DiskSpaceLow
expr: (node_filesystem_size_bytes - node_filesystem_free_bytes) / node_filesystem_size_bytes > 0.85
for: 10m
labels:
severity: warning
annotations:
summary: "Disk space low"
description: "Disk usage is {{ $value | humanizePercentage }} on {{ $labels.mountpoint }}"
`;
// Grafana datasource configuration
const grafanaDatasources = `# config/monitoring/grafana/datasources.yml
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
access: proxy
url: http://prometheus:9090
isDefault: true
editable: true
- name: Jaeger
type: jaeger
access: proxy
url: http://jaeger:16686
editable: true
- name: Elasticsearch
type: elasticsearch
access: proxy
url: http://elasticsearch:9200
database: "app-logs"
interval: Daily
timeField: "@timestamp"
editable: true
`;
// Write configuration files
fs.writeFileSync(path.join(this.projectRoot, 'config/monitoring/index.js'), monitoringConfig);
fs.writeFileSync(path.join(this.projectRoot, 'config/monitoring/prometheus.yml'), prometheusConfig);
fs.writeFileSync(path.join(this.projectRoot, 'config/monitoring/alert_rules.yml'), alertRules);
// Create Grafana config directory
const grafanaDir = path.join(this.projectRoot, 'config/monitoring/grafana');
if (!fs.existsSync(grafanaDir)) {
fs.mkdirSync(grafanaDir, { recursive: true });
}
fs.writeFileSync(path.join(grafanaDir, 'datasources.yml'), grafanaDatasources);
console.log('β
Generated monitoring configurations');
}
async generateMiddleware() {
// Metrics middleware
const metricsMiddleware = `// middleware/monitoring/metrics.js
const promClient = require('prom-client');
const config = require('../../config/monitoring');
// Create a Registry
const register = new promClient.Registry();
// Add default metrics
if (config.metrics.collectDefaultMetrics) {
promClient.collectDefaultMetrics({
register,
prefix: config.metrics.prefix,
labels: config.metrics.labels
});
}
// Custom metrics
const httpRequestsTotal = new promClient.Counter({
name: config.metrics.prefix + 'http_requests_total',
help: 'Total number of HTTP requests',
labelNames: ['method', 'route', 'status'],
registers: [register]
});
const httpRequestDuration = new promClient.Histogram({
name: config.metrics.prefix + 'http_request_duration_seconds',
help: 'Duration of HTTP requests in seconds',
labelNames: ['method', 'route', 'status'],
buckets: [0.1, 0.3, 0.5, 0.7, 1, 3, 5, 7, 10],
registers: [register]
});
const activeConnections = new promClient.Gauge({
name: config.metrics.prefix + 'active_connections',
help: 'Number of active connections',
registers: [register]
});
const databaseQueries = new promClient.Counter({
name: config.metrics.prefix + 'database_queries_total',
help: 'Total number of database queries',
labelNames: ['operation', 'table', 'status'],
registers: [register]
});
const databaseQueryDuration = new promClient.Histogram({
name: config.metrics.prefix + 'database_query_duration_seconds',
help: 'Duration of database queries in seconds',
labelNames: ['operation', 'table'],
buckets: [0.01, 0.05, 0.1, 0.3, 0.5, 1, 3, 5],
registers: [register]
});
// Middleware function
const metricsMiddleware = (req, res, next) => {
if (!config.metrics.enabled) {
return next();
}
const start = Date.now();
// Increment active connections
activeConnections.inc();
// Override res.end to capture metrics
const originalEnd = res.end;
res.end = function(...args) {
const duration = (Date.now() - start) / 1000;
const route = req.route ? req.route.path : req.path;
const labels = {
method: req.method,
route: route,
status: res.statusCode
};
// Record metrics
httpRequestsTotal.inc(labels);
httpRequestDuration.observe(labels, duration);
// Decrement active connections
activeConnections.dec();
originalEnd.apply(this, args);
};
next();
};
// Metrics endpoint
const metricsEndpoint = async (req, res) => {
try {
res.set('Content-Type', register.contentType);
res.end(await register.metrics());
} catch (error) {
res.status(500).end(error.message);
}
};
module.exports = {
metricsMiddleware,
metricsEndpoint,
register,
metrics: {
httpRequestsTotal,
httpRequestDuration,
activeConnections,
databaseQueries,
databaseQueryDuration
}
};
`;
// Logging middleware
const loggingMiddleware = `// middleware/monitoring/logging.js
const winston = require('winston');
const { ElasticsearchTransport } = require('winston-elasticsearch');
const config = require('../../config/monitoring');
// Create logger
const transports = [];
// Console transport
if (config.logging.transports.console.enabled) {
transports.push(new winston.transports.Console({
format: winston.format.combine(
winston.format.timestamp(),
winston.format.errors({ stack: true }),
config.logging.transports.console.colorize
? winston.format.colorize()
: winston.format.uncolorize(),
winston.format.printf(({ timestamp, level, message, ...meta }) => {
return \`\${timestamp} [\${level}]: \${message} \${Object.keys(meta).length ? JSON.stringify(meta) : ''}\`;
})
)
}));
}
// File transport
if (config.logging.transports.file.enabled) {
transports.push(new winston.transports.File({
filename: config.logging.transports.file.filename,
maxsize: config.logging.transports.file.maxsize,
maxFiles: config.logging.transports.file.maxFiles,
tailable: config.logging.transports.file.tailable,
format: winston.format.combine(
winston.format.timestamp(),
winston.format.errors({ stack: true }),
winston.format.json()
)
}));
}
// Elasticsearch transport
if (config.logging.transports.elasticsearch.enabled) {
transports.push(new ElasticsearchTransport({
level: config.logging.level,
clientOpts: {
node: config.logging.transports.elasticsearch.host
},
index: config.logging.transports.elasticsearch.index,
transformer: (logData) => {
return {
'@timestamp': new Date().toISOString(),
level: logData.level,
message: logData.message,
service: config.metrics.labels.service,
version: config.metrics.labels.version,
environment: config.metrics.labels.environment,
...logData.meta
};
}
}));
}
const logger = winston.createLogger({
level: config.logging.level,
format: winston.format.combine(
winston.format.timestamp(),
winston.format.errors({ stack: true }),
winston.format.json()
),
transports,
exitOnError: false
});
// Request logging middleware
const requestLoggingMiddleware = (req, res, next) => {
if (!config.logging.enabled) {
return next();
}
const start = Date.now();
const requestId = req.headers['x-request-id'] || require('crypto').randomUUID();
// Add request ID to request object
req.requestId = requestId;
// Log request
logger.info('HTTP Request', {
requestId,
method: req.method,
url: req.url,
userAgent: req.get('User-Agent'),
ip: req.ip,
userId: req.user?.id
});
// Override res.end to log response
const originalEnd = res.end;
res.end = function(...args) {
const duration = Date.now() - start;
logger.info('HTTP Response', {
requestId,
method: req.method,
url: req.url,
statusCode: res.statusCode,
duration,
userId: req.user?.id
});
originalEnd.apply(this, args);
};
next();
};
// Error logging middleware
const errorLoggingMiddleware = (error, req, res, next) => {
logger.error('HTTP Error', {
requestId: req.requestId,
method: req.method,
url: req.url,
error: error.message,
stack: error.stack,
userId: req.user?.id
});
next(error);
};
module.exports = {
logger,
requestLoggingMiddleware,
errorLoggingMiddleware
};
`;
// Health check middleware
const healthMiddleware = `// middleware/monitoring/health.js
const config = require('../../config/monitoring');
class HealthChecker {
constructor() {
this.checks = new Map();
this.results = new Map();
this.setupDefaultChecks();
if (config.healthChecks.enabled) {
this.startPeriodicChecks();
}
}
setupDefaultChecks() {
// Database health check
if (config.healthChecks.checks.database) {
this.addCheck('database', async () => {
try {
// Add your database connection check here
// Example: await prisma.$queryRaw\`SELECT 1\`;
return { status: 'healthy', message: 'Database connection successful' };
} catch (error) {
return { status: 'unhealthy', message: error.message };
}
});
}
// Cache health check
if (config.healthChecks.checks.cache) {
this.addCheck('cache', async () => {
try {
// Add your cache connection check here
// Example: await redis.ping();
return { status: 'healthy', message: 'Cache connection successful' };
} catch (error) {
return { status: 'unhealthy', message: error.message };
}
});
}
// Memory check
if (config.healthChecks.checks.memory) {
this.addCheck('memory', async () => {
const memUsage = process.memoryUsage();
const totalMem = require('os').totalmem();
const usedMem = memUsage.heapUsed;
const memPercentage = (usedMem / totalMem) * 100;
if (memPercentage > 90) {
return { status: 'unhealthy', message: \`High memory usage: \${memPercentage.toFixed(2)}%\` };
}
return { status: 'healthy', message: \`Memory usage: \${memPercentage.toFixed(2)}%\` };
});
}
// Disk space check
if (config.healthChecks.checks.diskSpace) {
this.addCheck('diskSpace', async () => {
try {
const fs = require('fs');
const stats = fs.statSync('.');
// Simplified disk check - in production, use a proper disk space library
return { status: 'healthy', message: 'Disk space sufficient' };
} catch (error) {
return { status: 'unhealthy', message: error.message };
}
});
}
}
addCheck(name, checkFunction) {
this.checks.set(name, checkFunction);
}
async runCheck(name) {
const checkFunction = this.checks.get(name);
if (!checkFunction) {
return { status: 'unknown', message: 'Check not found' };
}
try {
const result = await Promise.race([
checkFunction(),
new Promise((_, reject) =>
setTimeout(() => reject(new Error('Health check timeout')), config.healthChecks.timeout)
)
]);
this.results.set(name, { ...result, timestamp: new Date().toISOString() });
return result;
} catch (error) {
const result = { status: 'unhealthy', message: error.message, timestamp: new Date().toISOString() };
this.results.set(name, result);
return result;
}
}
async runAllChecks() {
const results = {};
const promises = Array.from(this.checks.keys()).map(async (name) => {
results[name] = await this.runCheck(name);
});
await Promise.all(promises);
return results;
}
async getHealth() {
const checks = await this.runAllChecks();
const overallStatus = Object.values(checks).every(check => check.status === 'healthy')
? 'healthy'
: 'unhealthy';
return {
status: overallStatus,
timestamp: new Date().toISOString(),
service: config.metrics.labels.service,
version: config.metrics.labels.version,
checks
};
}
startPeriodicChecks() {
setInterval(async () => {
await this.runAllChecks();
}, config.healthChecks.interval);
}
}
const healthChecker = new HealthChecker();
// Health endpoint middleware
const healthEndpoint = async (req, res) => {
try {
const health = await healthChecker.getHealth();
const statusCode = health.status === 'healthy' ? 200 : 503;
res.status(statusCode).json(health);
} catch (error) {
res.status(500).json({
status: 'error',
message: error.message,
timestamp: new Date().toISOString()
});
}
};
// Readiness endpoint
const readinessEndpoint = async (req, res) => {
try {
const health = await healthChecker.getHealth();
const isReady = health.status === 'healthy';
res.status(isReady ? 200 : 503).json({
ready: isReady,
timestamp: new Date().toISOString()
});
} catch (error) {
res.status(503).json({
ready: false,
error: error.message,
timestamp: new Date().toISOString()
});
}
};
// Liveness endpoint
const livenessEndpoint = (req, res) => {
res.status(200).json({
alive: true,
timestamp: new Date().toISOString()
});
};
module.exports = {
healthChecker,
healthEndpoint,
readinessEndpoint,
livenessEndpoint
};
`;
// Write middleware files
fs.writeFileSync(path.join(this.projectRoot, 'middleware/monitoring/metrics.js'), metricsMiddleware);
fs.writeFileSync(path.join(this.projectRoot, 'middleware/monitoring/logging.js'), loggingMiddleware);
fs.writeFileSync(path.join(this.projectRoot, 'middleware/monitoring/health.js'), healthMiddleware);
console.log('β
Generated monitoring middleware');
}
async generateServices() {
// Metrics service
const metricsService = `// services/monitoring/metrics-service.js
const promClient = require('prom-client');
const { metrics, register } = require('../../middleware/monitoring/metrics');
const config = require('../../config/monitoring');
class MetricsService {
constructor() {
this.customMetrics = new Map();
}
// Create custom counter
createCounter(name, help, labelNames = []) {
const counter = new promClient.Counter({
name: config.metrics.prefix + name,
help,
labelNames,
registers: [register]
});
this.customMetrics.set(name, counter);
return counter;
}
// Create custom histogram
createHistogram(name, help, labelNames = [], buckets = undefined) {
const histogram = new promClient.Histogram({
name: config.metrics.prefix + name,
help,
labelNames,
buckets,
registers: [register]
});
this.customMetrics.set(name, histogram);
return histogram;
}
// Create custom gauge
createGauge(name, help, labelNames = []) {
const gauge = new promClient.Gauge({
name: config.metrics.prefix + name,
help,
labelNames,
registers: [register]
});
this.customMetrics.set(name, gauge);
return gauge;
}
// Increment counter
incrementCounter(name, labels = {}, value = 1) {
const metric = this.customMetrics.get(name) || metrics[name];
if (metric && typeof metric.inc === 'function') {
metric.inc(labels, value);
}
}
// Record histogram value
recordHistogram(name, value, labels = {}) {
const metric = this.customMetrics.get(name) || metrics[name];
if (metric && typeof metric.observe === 'function') {
metric.observe(labels, value);
}
}
// Set gauge value
setGauge(name, value, labels = {}) {
const metric = this.customMetrics.get(name) || metrics[name];
if (metric && typeof metric.set === 'function') {
metric.set(labels, value);
}
}
// Record database query metrics
recordDatabaseQuery(operation, table, duration, success = true) {
const labels = {
operation,
table,
status: success ? 'success' : 'error'
};
metrics.databaseQueries.inc(labels);
if (success) {
metrics.databaseQueryDuration.observe({ operation, table }, duration / 1000);
}
}
// Record business metrics
recordBusinessMetric(event, value = 1, labels = {}) {
const metricName = \`business_\${event}\`;
let metric = this.customMetrics.get(metricName);
if (!metric) {
metric = this.createCounter(metricName, \`Business metric for \${event}\`, Object.keys(labels));
}
metric.inc(labels, value);
}
// Get all metrics
async getAllMetrics() {
return await register.metrics();
}
// Clear all metrics
clearMetrics() {
register.clear();
this.customMetrics.clear();
}
}
module.exports = new MetricsService();
`;
// Alert service
const alertService = `// services/monitoring/alert-service.js
const axios = require('axios');
const { logger } = require('../../middleware/monitoring/logging');
const config = require('../../config/monitoring');
class AlertService {
constructor() {
this.alertHistory = new Map();
this.alertCooldowns = new Map();
}
async sendAlert(alert) {
const { name, severity, message, value, labels = {} } = alert;
// Check cooldown
if (this.isInCooldown(name)) {
logger.debug(\`Alert \${name} is in cooldown, skipping\`);
return;
}
const alertData = {
name,
severity,
message,
value,
labels,
timestamp: new Date().toISOString(),
service: config.metrics.labels.service,
environment: config.metrics.labels.environment
};
try {
// Send to configured channels
const promises = [];
if (config.alerting.channels.slack.enabled) {
promises.push(this.sendSlackAlert(alertData));
}
if (config.alerting.channels.email.enabled) {
promises.push(this.sendEmailAlert(alertData));
}
if (config.alerting.channels.pagerduty.enabled && severity === 'critical') {
promises.push(this.sendPagerDutyAlert(alertData));
}
await Promise.allSettled(promises);
// Record alert
this.recordAlert(name, alertData);
logger.info('Alert sent', { alert: alertData });
} catch (error) {
logger.error('Failed to send alert', { error: error.message, alert: alertData });
}
}
async sendSlackAlert(alert) {
const color = this.getSeverityColor(alert.severity);
const payload = {
channel: config.alerting.channels.slack.channel,
username: 'Monitoring Bot',
icon_emoji: ':warning:',
attachments: [{
color,
title: \`π¨ \${alert.name}\`,
text: alert.message,
fields: [
{ title: 'Severity', value: alert.severity, short: true },
{ title: 'Service', value: alert.service, short: true },
{ title: 'Environment', value: alert.environment, short: true },
{ title: 'Time', value: alert.timestamp, short: true }
],
footer: 'Monitoring System',
ts: Math.floor(Date.now() / 1000)
}]
};
await axios.post(config.alerting.channels.slack.webhookUrl, payload);
}
async sendEmailAlert(alert) {
// Implement email sending logic here
// This would typically use your email service
logger.info('Email alert would be sent', { alert });
}
async sendPagerDutyAlert(alert) {
const payload = {
routing_key: config.alerting.channels.pagerduty.serviceKey,
event_action: 'trigger',
dedup_key: \`\${alert.name}-\${alert.service}\`,
payload: {
summary: alert.message,
severity: alert.severity,
source: alert.service,
component: alert.name,
group: alert.environment,
class: 'monitoring',
custom_details: alert.labels
}
};
await axios.post('https://events.pagerduty.com/v2/enqueue', payload, {
headers: {
'Content-Type': 'application/json',
'Authorization': \`Token token=\${config.alerting.channels.pagerduty.apiKey}\`
}
});
}
getSeverityColor(severity) {
const colors = {
critical: '#FF0000',
warning: '#FFA500',
info: '#0000FF'
};
return colors[severity] || '#808080';
}
isInCooldown(alertName) {
const cooldownEnd = this.alertCooldowns.get(alertName);
return cooldownEnd && Date.now() < cooldownEnd;
}
recordAlert(name, alert) {
// Record alert history
if (!this.alertHistory.has(name)) {
this.alertHistory.set(name, []);
}
const history = this.alertHistory.get(name);
history.push(alert);
// Keep only last 100 alerts per type
if (history.length > 100) {
history.shift();
}
// Set cooldown (5 minutes)
this.alertCooldowns.set(name, Date.now() + 5 * 60 * 1000);
}
// Check alert conditions
checkAlertConditions(metrics) {
// This would typically be called by a monitoring job
// Implementation depends on your specific metrics and thresholds
logger.debug('Checking alert conditions', { metricsCount: Object.keys(metrics).length });
}
getAlertHistory(alertName) {
return this.alertHistory.get(alertName) || [];
}
getAllAlertHistory() {
const history = {};
for (const [name, alerts] of this.alertHistory) {
history[name] = alerts;
}
return history;
}
}
module.exports = new AlertService();
`;
// Write service files
fs.writeFileSync(path.join(this.projectRoot, 'services/monitoring/metrics-service.js'), metricsService);
fs.writeFileSync(path.join(this.projectRoot, 'services/monitoring/alert-service.js'), alertService);
console.log('β
Generated monitoring services');
}
async generateDashboards() {
// Application dashboard
const applicationDashboard = `{
"dashboard": {
"id": null,
"title": "Application Monitoring",
"tags": ["monitoring", "application"],
"timezone": "browser",
"panels": [
{
"id": 1,
"title": "Request Rate",
"type": "graph",
"targets": [
{
"expr": "rate(app_http_requests_total[5m])",
"legendFormat": "{{method}} {{route}}"
}
],
"gridPos": {"h": 8, "w": 12, "x": 0, "y": 0}
},
{
"id": 2,
"title": "Response Time (95th percentile)",
"type": "graph",
"targets": [
{
"expr": "histogram_quantile(0.95, rate(app_http_request_duration_seconds_bucket[5m]))",
"legendFormat": "95th percentile"
}
],
"gridPos": {"h": 8, "w": 12, "x": 12, "y": 0}
},
{
"id": 3,
"title": "Error Rate",
"type": "graph",
"targets": [
{
"expr": "rate(app_http_requests_total{status=~\"5..\"}[5m]) / rate(app_http_requests_total[5m])",
"legendFormat": "Error Rate"
}
],
"gridPos": {"h": 8, "w": 12, "x": 0, "y": 8}
},
{
"id": 4,
"title": "Active Connections",
"type": "graph",
"targets": [
{
"expr": "app_active_connections",
"legendFormat": "Active Connections"
}
],
"gridPos": {"h": 8, "w": 12, "x": 12, "y": 8}
}
],
"time": {
"from": "now-1h",
"to": "now"
},
"refresh": "5s"
}
}
`;
// Infrastructure dashboard
const infrastructureDashboard = `{
"dashboard": {
"id": null,
"title": "Infrastructure Monitoring",
"tags": ["monitoring", "infrastructure"],
"timezone": "browser",
"panels": [
{
"id": 1,
"title": "CPU Usage",
"type": "graph",
"targets": [
{
"expr": "100 - (avg by (instance) (rate(node_cpu_seconds_total{mode=\"idle\"}[5m])) * 100)",
"legendFormat": "{{instance}}"
}
],
"gridPos": {"h": 8, "w": 12, "x": 0, "y": 0}
},
{
"id": 2,
"title": "Memory Usage",
"type": "graph",
"targets": [
{
"expr": "(1 - (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes)) * 100",
"legendFormat": "Memory Usage %"
}
],
"gridPos": {"h": 8, "w": 12, "x": 12, "y": 0}
},
{
"id": 3,
"title": "Disk Usage",
"type": "graph",
"targets": [
{
"expr": "(1 - (node_filesystem_free_bytes / node_filesystem_size_bytes)) * 100",
"legendFormat": "{{mountpoint}}"
}
],
"gridPos": {"h": 8, "w": 12, "x": 0, "y": 8}
},
{
"id": 4,
"title": "Network I/O",
"type": "graph",
"targets": [
{
"expr": "rate(node_network_receive_bytes_total[5m])",
"legendFormat": "Receive {{device}}"
},
{
"expr": "rate(node_network_transmit_bytes_total[5m])",
"legendFormat": "Transmit {{device}}"
}
],
"gridPos": {"h": 8, "w": 12, "x": 12, "y": 8}
}
],
"time": {
"from": "now-1h",
"to": "now"
},
"refresh": "5s"
}
}
`;
// Write dashboard files
fs.writeFileSync(path.join(this.projectRoot, 'dashboards/application.json'), applicationDashboard);
fs.writeFileSync(path.join(this.projectRoot, 'dashboards/infrastructure.json'), infrastructureDashboard);
console.log('β
Generated Grafana dashboards');
}
async generateDockerConfigs() {
// Docker Compose for monitoring stack
const monitoringDockerCompose = `# docker/monitoring/docker-compose.monitoring.yml
version: '3.8'
services:
prometheus:
image: prom/prometheus:latest
container_name: prometheus
ports:
- "9090:9090"
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
- ./alert_rules.yml:/etc/prometheus/alert_rules.yml
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--storage.tsdb.retention.time=200h'
- '--web.enable-lifecycle'
- '--web.enable-admin-api'
networks:
- monitoring
grafana:
image: grafana/grafana:latest
container_name: grafana
ports:
- "3001:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_USERS_ALLOW_SIGN_UP=false
volumes:
- grafana_data:/var/lib/grafana
- ./grafana/datasources.yml:/etc/grafana/provisioning/datasources/datasources.yml
- ./grafana/dashboards.yml:/etc/grafana/provisioning/dashboards/dashboards.yml
- ../../dashboards:/var/lib/grafana/dashboards
networks:
- monitoring
depends_on:
- prometheus
node-exporter:
image: prom/node-exporter:latest
container_name: node-exporter
ports:
- "9100:9100"
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.rootfs=/rootfs'
- '--path.sysfs=/host/sys'
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
networks:
- monitoring
alertmanager:
image: prom/alertmanager:latest
container_name: alertmanager
ports:
- "9093:9093"
volumes:
- ./alertmanager.yml:/etc/alertmanager/alertmanager.yml
- alertmanager_data:/alertmanager
networks:
- monitoring
jaeger:
image: jaegertracing/all-in-one:latest
container_name: jaeger
ports:
- "16686:16686"
- "14268:14268"
environment:
- COLLECTOR_OTLP_ENABLED=true
networks:
- monitoring
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0
container_name: elasticsearch
environment:
- discovery.type=single-node
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- xpack.security.enabled=false
ports:
- "9200:9200"
volumes:
- elasticsearch_data:/usr/share/elasticsearch/data
networks:
- monitoring
kibana:
image: docker.elastic.co/kibana/kibana:8.8.0
container_name: kibana
ports:
- "5601:5601"
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
networks:
- monitoring
depends_on:
- elasticsearch
volumes:
prometheus_data:
grafana_data:
alertmanager_data:
elasticsearch_data:
networks:
monitoring:
driver: bridge
`;
// Alertmanager configuration
const alertmanagerConfig = `# docker/monitoring/alertmanager.yml
global:
smtp_smarthost: 'localhost:587'
smtp_from: 'alerts@yourcompany.com'
route:
group_by: ['alertname']
group_wait: 10s
group_interval: 10s
repeat_interval: 1h
receiver: 'web.hook'
receivers:
- name: 'web.hook'
slack_configs:
- api_url: '\${SLACK_WEBHOOK_URL}'
channel: '#alerts'
title: 'Alert: {{ range .Alerts }}{{ .Annotations.summary }}{{ end }}'
text: '{{ range .Alerts }}{{ .Annotations.description }}{{ end }}'
email_configs:
- to: '\${ALERT_EMAIL_TO}'
subject: 'Alert: {{ range .Alerts }}{{ .Annotations.summary }}{{ end }}'
body: |
{{ range .Alerts }}
Alert: {{ .Annotations.summary }}
Description: {{ .Annotations.description }}
Labels: {{ range .Labels.SortedPairs }}{{ .Name }}={{ .Value }} {{ end }}
{{ end }}
inhibit_rules:
- source_match:
severity: 'critical'
target_match:
severity: 'warning'
equal: ['alertname', 'dev', 'instance']
`;
// Grafana dashboard provisioning
const grafanaDashboards = `# docker/monitoring/grafana/dashboards.yml
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: false
updateIntervalSeconds: 10
allowUiUpdates: true
options:
path: /var/lib/grafana/dashboards
`;
// Create monitoring docker directory
const dockerMonitoringDir = path.join(this.projectRoot, 'docker/monitoring');
if (!fs.existsSync(dockerMonitoringDir)) {
fs.mkdirSync(dockerMonitoringDir, { recursive: true });
}
// Create grafana config directory
const grafanaConfigDir = path.join(dockerMonitoringDir, 'grafana');
if (!fs.existsSync(grafanaConfigDir)) {
fs.mkdirSync(grafanaConfigDir, { recursive: true });
}
// Write Docker configuration files
fs.writeFileSync(path.join(dockerMonitoringDir, 'docker-compose.monitoring.yml'), monitoringDockerCompose);
fs.writeFileSync(path.join(dockerMonitoringDir, 'alertmanager.yml'), alertmanagerConfig);
fs.writeFileSync(path.join(grafanaConfigDir, 'dashboards.yml'), grafanaDashboards);
// Copy prometheus config to docker directory
const prometheusConfigSource = path.join(this.projectRoot, 'config/monitoring/prometheus.yml');
const prometheusConfigDest = path.join(dockerMonitoringDir, 'prometheus.yml');
if (fs.existsSync(prometheusConfigSource)) {
fs.copyFileSync(prometheusConfigSource, prometheusConfigDest);
}
// Copy alert rules to docker directory
const alertRulesSource = path.join(this.projectRoot, 'config/monitoring/alert_rules.yml');
const alertRulesDest = path.join(dockerMonitoringDir, 'alert_rules.yml');
if (fs.existsSync(alertRulesSource)) {
fs.copyFileSync(alertRulesSource, alertRulesDest);
}
console.log('β
Generated Docker monitoring configurations');
}
async generateScripts() {
// Setup monitoring script
const setupScript = `#!/bin/bash
# scripts/monitoring/setup-monitoring.sh
set -e
echo "π§ Setting up monitoring stack..."
# Create necessary directories
mkdir -p logs
mkdir -p docker/monitoring/grafana
# Copy configuration files
echo "π Copying configuration files..."
cp config/monitoring/prometheus.yml docker/monitoring/
cp config/monitoring/alert_rules.yml docker/monitoring/
cp config/monitoring/grafana/datasources.yml docker/monitoring/grafana/
# Set up environment variables
if [ ! -f .env.monitoring ]; then
echo "π Creating monitoring environment file..."
cat > .env.monitoring << EOF
# Monitoring Configuration
PROMETHEUS_PORT=9090
GRAFANA_PORT=3001
ALERTMANAGER_PORT=9093
JAEGER_PORT=16686
ELASTICSEARCH_PORT=9200
KIBANA_PORT=5601
# Alert Configuration
SLACK_WEBHOOK_URL=
ALERT_EMAIL_TO=
ALERT_EMAIL_FROM=
PAGERDUTY_API_KEY=
PAGERDUTY_SERVICE_KEY=
# External Services
NEW_RELIC_LICENSE_KEY=
DATADOG_API_KEY=
SENTRY_DSN=
EOF
echo "β οΈ Please configure .env.monitoring with your alert settings"
fi
# Install monitoring dependencies
echo "π¦ Installing monitoring dependencies..."
npm install --save \
prom-client \
winston \
winston-elasticsearch \
@opentelemetry/api \
@opentelemetry/sdk-node \
@opentelemetry/auto-instrumentations-node
echo "β
Monitoring setup completed!"
echo "π Run 'npm run monitoring:start' to start the monitoring stack"
`;
// Start monitoring script
const startScript = `#!/bin/bash
# scripts/monitoring/start-monitoring.sh
set -e
echo "π Starting monitoring stack..."
# Load environment variables
if [ -f .env.monitoring ]; then
export $(cat .env.monitoring | xargs)
fi
# Start monitoring services
echo "π³ Starting Docker monitoring stack..."
docker-compose -f docker/monitoring/docker-compose.monitoring.yml up -d
# Wait for services to be ready
echo "β³ Waiting for services to be ready..."
sleep 30
# Check service health
echo "π Checking service health..."
# Check Prometheus
if curl -f http://localhost:9090/-/healthy > /dev/null 2>&1; then
echo "β
Prometheus is healthy"
else
echo "β Prometheus is not responding"
fi
# Check Grafana
if curl -f http://localhost:3001/api/health > /dev/null 2>&1; then
echo "β
Grafana is healthy"
else
echo "β Grafana is not responding"
fi
# Check Elasticsearch
if curl -f http://localhost:9200/_cluster/health > /dev/null 2>&1; then
echo "β
Elasticsearch is healthy"
else
echo "β Elasticsearch is not responding"
fi
echo "π Monitoring stack is running!"
echo "π Grafana: http://localhost:3001 (admin/admin)"
echo "π Prometheus: http://localhost:9090"
echo "π Jaeger: http://localhost:16686"
echo "π Kibana: http://localhost:5601"
`;
// Backup metrics script
const backupScript = `#!/bin/bash
# scripts/monitoring/backup-metrics.sh
set -e
BACKUP_DIR="backups/metrics/$(date +%Y%m%d_%H%M%S)"
echo "πΎ Creating metrics backup..."
# Create backup directory
mkdir -p "$BACKUP_DIR"
# Backup Prometheus data
echo "π Backing up Prometheus data..."
docker exec prometheus tar czf - /prometheus | cat > "$BACKUP_DIR/prometheus_data.tar.gz"
# Backup Grafana dashboards
echo "π Backing up Grafana dashboards..."
docker exec grafana tar czf - /var/lib/grafana/dashboards | cat > "$BACKUP_DIR/grafana_dashboards.tar.gz"
# Backup configuration files
echo "βοΈ Backing up configuration files..."
tar czf "$BACKUP_DIR/monitoring_config.tar.gz" config/monitoring/ docker/monitoring/
# Create backup manifest
cat > "$BACKUP_DIR/manifest.json" << EOF
{
"timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
"version": "1.0.0",
"files": [
"prometheus_data.tar.gz",
"grafana_dashboards.tar.gz",
"monitoring_config.tar.gz"
]
}
EOF
echo "β
Backup created: $BACKUP_DIR"
echo "π Backup size: $(du -sh "$BACKUP_DIR" | cut -f1)"
`;
// Alert test script
const alertTestScript = `#!/bin/bash
# scripts/monitoring/alert-test.sh
set -e
echo "π§ͺ Testing alert system..."
# Load environment variables
if [ -f .env.monitoring ]; then
export $(cat .env.monitoring | xargs)
fi
# Test Slack webhook
if [ ! -z "$SLACK_WEBHOOK_URL" ]; then
echo "π± Testing Slack webhook..."
curl -X POST "$SLACK_WEBHOOK_URL" \
-H 'Content-type: application/json' \
--data '{
"text": "π§ͺ Test alert from monitoring system",
"attachments": [{
"color": "warning",
"title": "Test Alert",
"text": "This is a test alert to verify the monitoring system is working correctly.",
"fields": [
{"title": "Severity", "value": "test", "short": true},
{"title": "Service", "value": "monitoring", "short": true}
]
}]
}'
echo "β
Slack test completed"
else
echo "β οΈ SLACK_WEBHOOK_URL not configured, skipping Slack test"
fi
# Test email alerts (if configured)
if [ ! -z "$ALERT_EMAIL_TO" ]; then
echo "π§ Email alerts configured for: $ALERT_EMAIL_TO"
else
echo "β οΈ Email alerts not configured"
fi
# Test PagerDuty (if configured)
if [ ! -z "$PAGERDUTY_API_KEY" ]; then
echo "π PagerDuty integration configured"
else
echo "β οΈ PagerDuty integration not configured"
fi
echo "β
Alert system test completed"
`;
// Write script files
fs.writeFileSync(path.join(this.projectRoot, 'scripts/monitoring/setup-monitoring.sh'), setupScript);
fs.writeFileSync(path.join(this.projectRoot, 'scripts/monitoring/start-monitoring.sh'), startScript);
fs.writeFileSync(path.join(this.projectRoot, 'scripts/monitoring/backup-metrics.sh'), backupScript);
fs.writeFileSync(path.join(this.projectRoot, 'scripts/monitoring/alert-test.sh'), alertTestScript);
// Make scripts executable (Unix systems)
if (process.platform !== 'win32') {
try {
const scripts = [
'scripts/monitoring/setup-monitoring.sh',
'scripts/monitoring/start-monitoring.sh',
'scripts/monitoring/backup-metrics.sh',
'scripts/monitoring/alert-test.sh'
];
scripts.forEach(script => {
fs.chmodSync(path.join(this.projectRoot, script), '755');
});
} catch (error) {
console.warn('β οΈ Could not make scripts executable:', error.message);
}
}
console.log('β
Generated monitoring scripts');
}
async updatePackageJson() {
const packageJsonPath = path.join(this.projectRoot, 'package.json');
let packageJson = {};
if (fs.existsSync(packageJsonPath)) {
packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
}
// Add monitoring scripts
packageJson.scripts = {
...packageJson.scripts,
'monitoring:setup': 'bash scripts/monitoring/setup-monitoring.sh',
'monitoring:start': 'bash scripts/monitoring/start-monitoring.sh',
'monitoring:stop': 'docker-compose -f docker/monitoring/docker-compose.monitoring.yml down',
'monitoring:restart': 'npm run monitoring:stop && npm run monitoring:start',
'monitoring:logs': 'docker-compose -f docker/monitoring/docker-compose.monitoring.yml logs -f',
'monitoring:backup': 'bash scripts/monitoring/backup-metrics.sh',
'monitoring:test-alerts': 'bash scripts/monitoring/alert-test.sh',
'monitoring:clean': 'docker-compose -f docker/monitoring/docker-compose.monitoring.yml down -v'
};
// Add monitoring dependencies
packageJson.dependencies = {
...packageJson.dependencies,
'prom-client': '^14.2.0',
'winston': '^3.10.0',
'winston-elasticsearch': '^0.17.4'
};
// Add optional tracing dependencies
if (this.config.tracing) {
packageJson.dependencies = {
...packageJson.dependencies,
'@opentelemetry/api': '^1.6.0',
'@opentelemetry/sdk-node': '^0.43.0',
'@opentelemetry/auto-instrumentations-node': '^0.39.4',
'@opentelemetry/exporter-jaeger': '^1.17.0'
};
}
// Add development dependencies
packageJson.devDependencies = {
...packageJson.devDependencies,
'@types/prom-client': '^14.2.0'
};
fs.writeFileSync(packageJsonPath, JSON.stringify(packageJson, null, 2));
console.log('β
Updated package.json with monitoring dependencies and scripts');
}
getGeneratedFiles() {
const files = [
'config/monitoring/index.js',
'config/monitoring/prometheus.yml',
'config/monitoring/alert_rules.yml',
'config/monitoring/grafana/datasources.yml',
'middleware/monitoring/metrics.js',
'middleware/monitoring/logging.js',
'middleware/monitoring/health.js',
'services/monitoring/metrics-service.js',
'services/monitoring/alert-service.js',
'scripts/monitoring/setup-monitoring.sh',
'scripts/monitoring/start-monitoring.sh',
'scripts/monitoring/backup-metrics.sh',
'scripts/monitoring/alert-test.sh'
];
if (this.config.dashboards) {
files.push(
'dashboards/application.json',
'dashboards/infrastructure.json'
);
}
if (this.config.prometheus || this.config.grafana) {
files.push(
'docker/monitoring/docker-compose.monitoring.yml',
'docker/monitoring/alertmanager.yml',
'docker/monitoring/grafana/dashboards.yml',
'docker/monitoring/prometheus.yml',
'docker/monitoring/alert_rules.yml'
);
}
return files;
}
// Integration methods
setupDatabaseMonitoring(dbClient) {
if (!this.config.metrics) return;
const metricsService = require('../../services/monitoring/metrics-service');
// Wrap database queries with metrics
const originalQuery = dbClient.query;
dbClient.query = async function(sql, params) {
const start = Date.now();
const operation = sql.split(' ')[0].toUpperCase();
const table = this.extractTableName(sql);
try {
const result = await originalQuery.call(this, sql, params);
const duration = Date.now() - start;
metricsService.recordDatabaseQuery(operation, table, duration, true);
return result;
} catch (error) {
const duration = Date.now() - start;
metricsService.recordDatabaseQuery(operation, table, duration, false);
throw error;
}
};
}
setupCacheMonitoring(cacheClient) {
if (!this.config.metrics) return;
const metricsService = require('../../services/monitoring/metrics-service');
// Create cache metrics
const cacheHits = metricsService.createCounter('cache_hits_total', 'Total cache hits', ['operation']);
const cacheMisses = metricsService.createCounter('cache_misses_total', 'Total cache misses', ['operation']);
const cacheOperationDuration = metricsService.createHistogram('cache_operation_duration_seconds', 'Cache operation duration', ['operation']);
// Wrap cache operations
const operations = ['get', 'set', 'del', 'exists'];
operations.forEach(op => {
if (typeof cacheClient[op] === 'function') {
const originalOp = cacheClient[op];
cacheClient[op] = async function(...args) {
const start = Date.now();
try {
const result = await originalOp.apply(this, args);
const duration = (Date.now() - start) / 1000;
cacheOperationDuration.observe({ operation: op }, duration);
if (op === 'get') {
if (result !== null && result !== undefined) {
cacheHits.inc({ operation: op });
} else {
cacheMisses.inc({ operation: op });
}
}