UNPKG

recoder-code

Version:

🚀 AI-powered development platform - Chat with 32+ models, build projects, automate workflows. Free models included!

244 lines (233 loc) • 6.24 kB
services: # Main CLI Application recoder-cli: build: context: . dockerfile: Dockerfile.production environment: - NODE_ENV=production - REDIS_URL=redis://redis:6379 - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/recoder_db - OPENROUTER_API_KEY=${OPENROUTER_API_KEY} - LOG_LEVEL=info - ENABLE_MONITORING=true - ENABLE_METRICS=true - ML_TRAINING_SERVICE_URL=http://ml-training:8000 - PLUGIN_REGISTRY_URL=http://plugin-registry:3001 - COLLABORATION_SERVICE_URL=http://collaboration:3002 ports: - "8080:8080" volumes: - ./config:/app/config:ro - ./logs:/app/logs - ./data:/app/data depends_on: - redis - postgres - ml-training - plugin-registry - collaboration restart: unless-stopped healthcheck: test: ["CMD", "node", "scripts/health-check.js"] interval: 30s timeout: 10s retries: 3 start_period: 40s # ML Training Service ml-training: build: context: ./ml-training-service dockerfile: Dockerfile environment: - DATABASE_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/recoder_db - REDIS_URL=redis://redis:6379 - HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN} - MLFLOW_TRACKING_URI=http://mlflow:5000 - LOG_LEVEL=info ports: - "8000:8000" volumes: - ml_models:/app/models - ml_datasets:/app/datasets - ml_checkpoints:/app/checkpoints - ./logs:/app/logs depends_on: - redis - postgres - mlflow restart: unless-stopped deploy: resources: limits: memory: 4G cpus: '2.0' reservations: memory: 2G cpus: '1.0' # Plugin Registry Service plugin-registry: build: context: ./plugin-registry-service dockerfile: Dockerfile environment: - NODE_ENV=production - DATABASE_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/recoder_db - REDIS_URL=redis://redis:6379 - LOG_LEVEL=info - STORAGE_TYPE=local - STORAGE_PATH=/app/storage ports: - "3001:3001" volumes: - plugin_storage:/app/storage - ./logs:/app/logs depends_on: - redis - postgres restart: unless-stopped # Collaboration Service collaboration: build: context: ./collaboration-service dockerfile: Dockerfile environment: - NODE_ENV=production - REDIS_URL=redis://redis:6379 - LOG_LEVEL=info ports: - "3002:3002" volumes: - collaboration_sessions:/app/sessions - ./logs:/app/logs depends_on: - redis restart: unless-stopped # Database postgres: image: postgres:15-alpine environment: - POSTGRES_DB=recoder_db - POSTGRES_USER=postgres - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} - POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C ports: - "5432:5432" volumes: - postgres_data:/var/lib/postgresql/data - ./sql/init-production.sql:/docker-entrypoint-initdb.d/init.sql:ro restart: unless-stopped deploy: resources: limits: memory: 1G cpus: '0.5' # Redis Cache redis: image: redis:7-alpine command: redis-server --appendonly yes --requirepass ${REDIS_PASSWORD} environment: - REDIS_PASSWORD=${REDIS_PASSWORD} ports: - "6379:6379" volumes: - redis_data:/data restart: unless-stopped deploy: resources: limits: memory: 512M cpus: '0.25' # MLflow Tracking Server mlflow: image: python:3.11-slim command: > bash -c "pip install mlflow psycopg2-binary && mlflow server --host 0.0.0.0 --port 5000 --backend-store-uri postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/recoder_db --default-artifact-root /mlflow/artifacts" environment: - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} ports: - "5000:5000" volumes: - mlflow_artifacts:/mlflow/artifacts depends_on: - postgres restart: unless-stopped # Monitoring - Prometheus prometheus: image: prom/prometheus:latest command: - '--config.file=/etc/prometheus/prometheus.yml' - '--storage.tsdb.path=/prometheus' - '--web.console.libraries=/etc/prometheus/console_libraries' - '--web.console.templates=/etc/prometheus/consoles' - '--storage.tsdb.retention.time=30d' - '--web.enable-lifecycle' ports: - "9090:9090" volumes: - ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml:ro - ./monitoring/rules:/etc/prometheus/rules:ro - prometheus_data:/prometheus restart: unless-stopped # Monitoring - Grafana grafana: image: grafana/grafana:latest environment: - GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD} - GF_USERS_ALLOW_SIGN_UP=false - GF_INSTALL_PLUGINS=grafana-piechart-panel,grafana-worldmap-panel ports: - "3000:3000" volumes: - grafana_data:/var/lib/grafana - ./monitoring/grafana/dashboards:/var/lib/grafana/dashboards:ro - ./monitoring/grafana/provisioning:/etc/grafana/provisioning:ro depends_on: - prometheus restart: unless-stopped # Reverse Proxy nginx: image: nginx:alpine ports: - "80:80" - "443:443" volumes: - ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro - ./nginx/ssl:/etc/nginx/ssl:ro - ./logs/nginx:/var/log/nginx depends_on: - recoder-cli - ml-training - plugin-registry - collaboration restart: unless-stopped volumes: postgres_data: driver: local redis_data: driver: local ml_models: driver: local ml_datasets: driver: local ml_checkpoints: driver: local plugin_storage: driver: local collaboration_sessions: driver: local mlflow_artifacts: driver: local prometheus_data: driver: local grafana_data: driver: local networks: default: driver: bridge ipam: config: - subnet: 172.20.0.0/16