UNPKG

backend-mcp

Version:

Generador automático de backends con Node.js, Express, Prisma y módulos configurables. Servidor MCP compatible con npx para agentes IA. Soporta PostgreSQL, MySQL, MongoDB y SQLite.

507 lines (433 loc) 10.8 kB
# modules/monitoring/manifest.yaml module: name: monitoring version: 1.0.0 description: "Comprehensive monitoring and observability module with metrics, logging, tracing, and alerting" category: infrastructure author: "Backend MCP Generator" license: "MIT" # Module activation conditions activation: auto: true conditions: - production_ready - performance_monitoring - observability_required # Entry points for different monitoring aspects entry_points: metrics: "init.js" logging: "init.js" tracing: "init.js" alerting: "init.js" health_checks: "init.js" dashboards: "init.js" # Dependencies (optional) dependencies: optional: - database - cache - email - websockets - docker - ci # Environment variables environment: required: - NODE_ENV optional: - PROMETHEUS_PORT - GRAFANA_URL - GRAFANA_API_KEY - JAEGER_ENDPOINT - ELASTICSEARCH_URL - SLACK_WEBHOOK_URL - PAGERDUTY_API_KEY - NEW_RELIC_LICENSE_KEY - DATADOG_API_KEY - SENTRY_DSN - LOG_LEVEL - METRICS_ENABLED - TRACING_ENABLED - ALERTING_ENABLED # Monitoring features features: metrics: - application_metrics - system_metrics - business_metrics - custom_metrics - prometheus_integration - statsd_support - histogram_tracking - counter_tracking - gauge_tracking - summary_tracking logging: - structured_logging - log_aggregation - log_rotation - log_filtering - log_correlation - elasticsearch_integration - winston_logger - request_logging - error_logging - audit_logging tracing: - distributed_tracing - request_tracing - database_tracing - external_api_tracing - jaeger_integration - zipkin_support - opentelemetry - span_correlation - trace_sampling health_checks: - application_health - database_health - cache_health - external_service_health - custom_health_checks - readiness_probes - liveness_probes - startup_probes alerting: - threshold_alerts - anomaly_detection - error_rate_alerts - performance_alerts - availability_alerts - slack_notifications - email_notifications - pagerduty_integration - webhook_alerts dashboards: - grafana_dashboards - application_dashboard - infrastructure_dashboard - business_dashboard - error_dashboard - performance_dashboard # Monitoring tools and integrations tools: metrics: - prometheus - statsd - influxdb - datadog - new_relic logging: - winston - elasticsearch - logstash - kibana - fluentd tracing: - jaeger - zipkin - opentelemetry - aws_xray visualization: - grafana - kibana - datadog_dashboards - new_relic_insights alerting: - alertmanager - pagerduty - slack - email - webhooks # Generated files files: config: - "config/monitoring.js" - "config/prometheus.yml" - "config/grafana/datasources.yml" - "config/grafana/dashboards.yml" - "config/alerting.js" middleware: - "middleware/metrics.js" - "middleware/logging.js" - "middleware/tracing.js" - "middleware/health.js" services: - "services/metrics-service.js" - "services/logging-service.js" - "services/tracing-service.js" - "services/health-service.js" - "services/alert-service.js" dashboards: - "dashboards/application.json" - "dashboards/infrastructure.json" - "dashboards/business.json" - "dashboards/errors.json" docker: - "docker/prometheus.yml" - "docker/grafana.yml" - "docker/jaeger.yml" - "docker/elasticsearch.yml" scripts: - "scripts/setup-monitoring.sh" - "scripts/start-monitoring.sh" - "scripts/backup-metrics.sh" - "scripts/alert-test.sh" # Integrations integrations: database: - connection_monitoring - query_performance - connection_pool_metrics - slow_query_logging cache: - hit_rate_monitoring - memory_usage - connection_monitoring - performance_metrics api: - request_metrics - response_time_tracking - error_rate_monitoring - throughput_metrics external_services: - availability_monitoring - response_time_tracking - error_tracking - dependency_mapping # Metrics configuration metrics: collection_interval: 15s retention_period: 30d application: - request_count - request_duration - error_rate - active_connections - memory_usage - cpu_usage business: - user_registrations - orders_created - revenue_metrics - conversion_rates system: - disk_usage - network_io - file_descriptors - garbage_collection # Logging configuration logging: level: info format: json rotation: max_size: 100MB max_files: 10 max_age: 30d outputs: - console - file - elasticsearch fields: - timestamp - level - message - service - version - trace_id - span_id - user_id - request_id # Tracing configuration tracing: sampling_rate: 0.1 max_spans: 1000 instrumentation: - http_requests - database_queries - cache_operations - external_apis - message_queues # Health check configuration health_checks: interval: 30s timeout: 5s checks: - database_connection - cache_connection - external_services - disk_space - memory_usage # Alerting rules alerting: rules: - name: high_error_rate condition: error_rate > 5% duration: 5m severity: warning - name: high_response_time condition: response_time_p95 > 1s duration: 2m severity: warning - name: service_down condition: up == 0 duration: 1m severity: critical - name: high_memory_usage condition: memory_usage > 90% duration: 5m severity: warning - name: disk_space_low condition: disk_usage > 85% duration: 10m severity: warning # Usage examples examples: basic_setup: | const MonitoringModule = require('./modules/monitoring/init'); const monitoring = new MonitoringModule({ metrics: true, logging: true, tracing: false, alerting: true }); await monitoring.init(); custom_metrics: | const { metricsService } = require('./services/metrics-service'); // Counter metricsService.incrementCounter('user_registrations', { source: 'web' }); // Histogram metricsService.recordHistogram('request_duration', 150, { method: 'GET', route: '/api/users' }); // Gauge metricsService.setGauge('active_connections', 42); custom_logging: | const logger = require('./services/logging-service'); logger.info('User registered', { userId: 123, email: 'user@example.com', source: 'web' }); logger.error('Database connection failed', { error: error.message, stack: error.stack, database: 'postgres' }); health_checks: | const { healthService } = require('./services/health-service'); // Add custom health check healthService.addCheck('external_api', async () => { const response = await fetch('https://api.example.com/health'); return response.ok; }); // Get health status const health = await healthService.getHealth(); # AI Instructions ai_instructions: | When implementing the monitoring module: 1. **Metrics Collection**: - Implement Prometheus metrics with proper labels - Create custom metrics for business logic - Set up automatic instrumentation for common operations - Configure metric retention and aggregation 2. **Logging Strategy**: - Use structured logging with consistent fields - Implement log correlation with trace IDs - Set up log rotation and retention policies - Configure different log levels for different environments 3. **Distributed Tracing**: - Implement OpenTelemetry instrumentation - Set up trace sampling for performance - Configure span correlation across services - Implement trace context propagation 4. **Health Monitoring**: - Create comprehensive health checks - Implement readiness and liveness probes - Monitor external dependencies - Set up graceful degradation 5. **Alerting Setup**: - Define meaningful alert thresholds - Implement alert routing and escalation - Set up notification channels - Create runbooks for common alerts 6. **Dashboard Creation**: - Design informative Grafana dashboards - Create different views for different audiences - Implement drill-down capabilities - Set up automated dashboard provisioning 7. **Performance Considerations**: - Minimize monitoring overhead - Use sampling for high-volume metrics - Implement efficient log buffering - Optimize dashboard queries # Automation scripts scripts: setup: "scripts/setup-monitoring.sh" start: "scripts/start-monitoring.sh" backup: "scripts/backup-metrics.sh" test_alerts: "scripts/alert-test.sh" dashboard_import: "scripts/import-dashboards.sh" # Testing testing: unit_tests: - metrics collection - logging functionality - health checks - alert conditions integration_tests: - prometheus integration - grafana connectivity - alert delivery - dashboard rendering load_tests: - metrics performance - logging throughput - monitoring overhead # Performance metrics performance: metrics_overhead: "< 2% CPU" logging_overhead: "< 1% CPU" memory_usage: "< 100MB" disk_usage: "< 1GB/day" network_overhead: "< 1MB/min" # Monitoring and observability observability: self_monitoring: true metrics_about_metrics: true logging_about_logging: true health_of_monitoring: true # Security considerations security: metric_access_control: true log_sanitization: true secure_dashboards: true encrypted_communications: true audit_monitoring_access: true # Scalability scalability: horizontal_scaling: true metric_federation: true log_sharding: true distributed_tracing: true multi_tenant_support: true # Compatibility compatibility: monitoring_tools: - prometheus - grafana - jaeger - elasticsearch - datadog - new_relic platforms: - docker - kubernetes - aws - gcp - azure node_versions: - ">=16.0.0" databases: - postgresql - mysql - mongodb - redis