shipdeck
Version:
Ship MVPs in 48 hours. Fix bugs in 30 seconds. The command deck for developers who ship.
1,114 lines (999 loc) • 29.6 kB
JavaScript
/**
* DevOps Agent - Deployment and Infrastructure Automation Expert
* Specializes in CI/CD, containerization, cloud deployments, and infrastructure as code
*/
const { BaseAgent } = require('./base-agent');
class DevOpsAgent extends BaseAgent {
constructor(options = {}) {
super({
name: 'DevOps Agent',
description: 'Deployment and infrastructure automation expert specializing in CI/CD, Docker, cloud deployments, and production-ready configurations',
version: '1.0.0',
...options
});
// DevOps-specific configuration
this.devopsConfig = {
dockerRegistry: 'docker.io',
defaultNodeVersion: '20-alpine',
defaultPythonVersion: '3.11-slim',
ciProvider: 'github-actions', // github-actions, gitlab-ci, circleci
cloudProvider: 'aws', // aws, gcp, azure, vercel
monitoringEnabled: true,
securityScanning: true,
...options.devopsConfig
};
// Template configurations
this.templates = {
docker: this.getDockerTemplates(),
githubActions: this.getGitHubActionsTemplates(),
vercel: this.getVercelTemplates(),
nginx: this.getNginxTemplates(),
pm2: this.getPM2Templates(),
terraform: this.getTerraformTemplates(),
monitoring: this.getMonitoringTemplates()
};
}
/**
* Get DevOps agent capabilities
* @returns {Array<string>} Array of capabilities
*/
getCapabilities() {
return ['docker', 'ci-cd', 'deployment', 'monitoring', 'scaling', 'security', 'infrastructure', 'automation'];
}
/**
* Get system prompt specific to DevOps tasks
* @returns {string} System prompt
*/
getSystemPrompt() {
return `You are the DevOps Agent, an expert in deployment automation and infrastructure management.
Core Expertise:
- Docker containerization and multi-stage builds
- CI/CD pipeline design and optimization
- Cloud deployment strategies (AWS, GCP, Azure, Vercel)
- Infrastructure as Code (Terraform, CloudFormation)
- Security best practices and secrets management
- Monitoring, logging, and observability
- Auto-scaling and performance optimization
- Zero-downtime deployment strategies
Security Principles:
- Never expose secrets in configurations
- Use environment variables for sensitive data
- Implement least privilege access
- Enable security scanning in pipelines
- Encrypt data in transit and at rest
- Regular security audits and updates
Performance Standards:
- Fast build times with optimized caching
- Minimal container image sizes
- Efficient resource utilization
- Automated scaling based on metrics
- Health checks and graceful shutdowns
Always provide production-ready configurations with comprehensive error handling, monitoring, and security measures.`;
}
/**
* Execute DevOps task
* @param {Object} task - Task configuration
* @param {Object} context - Execution context
* @returns {Promise<Object>} Execution result
*/
async execute(task, context = {}) {
this.validateTask(task);
const { type, technology, environment = 'production', options = {} } = task;
try {
let result;
switch (type) {
case 'docker':
result = await this.generateDockerConfig(technology, environment, options);
break;
case 'ci-cd':
result = await this.generateCICDPipeline(technology, environment, options);
break;
case 'deployment':
result = await this.generateDeploymentConfig(technology, environment, options);
break;
case 'monitoring':
result = await this.generateMonitoringConfig(technology, environment, options);
break;
case 'infrastructure':
result = await this.generateInfrastructureCode(technology, environment, options);
break;
case 'security':
result = await this.generateSecurityConfig(technology, environment, options);
break;
case 'scaling':
result = await this.generateScalingConfig(technology, environment, options);
break;
default:
throw new Error(`Unsupported task type: ${type}`);
}
return {
type,
technology,
environment,
configurations: result,
metadata: {
generated_at: new Date().toISOString(),
agent: this.name,
version: this.version
}
};
} catch (error) {
this.log('error', `DevOps task execution failed: ${error.message}`, { task, context });
throw error;
}
}
/**
* Generate Docker configurations
* @param {string} technology - Technology stack
* @param {string} environment - Target environment
* @param {Object} options - Configuration options
* @returns {Promise<Object>} Docker configurations
*/
async generateDockerConfig(technology, environment, options = {}) {
const template = this.templates.docker[technology] || this.templates.docker.node;
const config = {
dockerfile: this.customizeDockerfile(template.dockerfile, environment, options),
dockerignore: template.dockerignore,
compose: environment !== 'production' ? this.generateDockerCompose(technology, environment, options) : null,
buildScript: this.generateDockerBuildScript(technology, environment, options)
};
return config;
}
/**
* Generate CI/CD pipeline configurations
* @param {string} technology - Technology stack
* @param {string} environment - Target environment
* @param {Object} options - Configuration options
* @returns {Promise<Object>} CI/CD configurations
*/
async generateCICDPipeline(technology, environment, options = {}) {
const ciProvider = options.provider || this.devopsConfig.ciProvider;
let config = {};
switch (ciProvider) {
case 'github-actions':
config = {
workflows: this.generateGitHubActionsWorkflows(technology, environment, options),
secrets: this.generateRequiredSecrets(technology, environment),
environments: this.generateEnvironmentConfig(environment)
};
break;
case 'gitlab-ci':
config = {
gitlab_ci: this.generateGitLabCI(technology, environment, options),
variables: this.generateRequiredVariables(technology, environment)
};
break;
case 'circleci':
config = {
circle_config: this.generateCircleCI(technology, environment, options),
contexts: this.generateCircleCIContexts(technology, environment)
};
break;
default:
throw new Error(`Unsupported CI provider: ${ciProvider}`);
}
return config;
}
/**
* Generate deployment configurations
* @param {string} technology - Technology stack
* @param {string} environment - Target environment
* @param {Object} options - Configuration options
* @returns {Promise<Object>} Deployment configurations
*/
async generateDeploymentConfig(technology, environment, options = {}) {
const platform = options.platform || this.devopsConfig.cloudProvider;
let config = {};
switch (platform) {
case 'vercel':
config = {
'vercel.json': this.templates.vercel[technology] || this.templates.vercel.nextjs,
'build-script': this.generateVercelBuildScript(technology, options),
'env-setup': this.generateEnvironmentSetup(platform, environment)
};
break;
case 'aws':
config = {
'ecs-task-definition': this.generateECSTaskDefinition(technology, environment, options),
'ecs-service': this.generateECSService(technology, environment, options),
'alb-config': this.generateALBConfig(technology, environment, options),
'cloudformation': this.generateCloudFormationTemplate(technology, environment, options)
};
break;
case 'gcp':
config = {
'app.yaml': this.generateAppEngineConfig(technology, environment, options),
'cloudbuild.yaml': this.generateCloudBuildConfig(technology, environment, options),
'gcp-resources': this.generateGCPResources(technology, environment, options)
};
break;
case 'kubernetes':
config = {
'deployment.yaml': this.generateKubernetesDeployment(technology, environment, options),
'service.yaml': this.generateKubernetesService(technology, environment, options),
'ingress.yaml': this.generateKubernetesIngress(technology, environment, options),
'configmap.yaml': this.generateKubernetesConfigMap(technology, environment, options)
};
break;
default:
config = {
'pm2.config.js': this.templates.pm2[technology] || this.templates.pm2.node,
'nginx.conf': this.templates.nginx.default,
'systemd-service': this.generateSystemdService(technology, environment, options)
};
}
return config;
}
/**
* Generate monitoring configurations
* @param {string} technology - Technology stack
* @param {string} environment - Target environment
* @param {Object} options - Configuration options
* @returns {Promise<Object>} Monitoring configurations
*/
async generateMonitoringConfig(technology, environment, options = {}) {
return {
healthChecks: this.generateHealthChecks(technology, environment, options),
logging: this.generateLoggingConfig(technology, environment, options),
metrics: this.generateMetricsConfig(technology, environment, options),
alerts: this.generateAlertsConfig(technology, environment, options),
dashboards: this.generateDashboardConfig(technology, environment, options)
};
}
/**
* Docker templates
*/
getDockerTemplates() {
return {
node: {
dockerfile: `# Multi-stage Node.js Docker build
FROM node:${this.devopsConfig.defaultNodeVersion} AS dependencies
WORKDIR /app
COPY package*.json ./
RUN npm ci --only=production && npm cache clean --force
FROM node:${this.devopsConfig.defaultNodeVersion} AS build
WORKDIR /app
COPY package*.json ./
RUN npm ci
COPY . .
RUN npm run build
FROM node:${this.devopsConfig.defaultNodeVersion} AS runtime
RUN addgroup --system --gid 1001 nodejs
RUN adduser --system --uid 1001 nextjs
WORKDIR /app
COPY --from=dependencies /app/node_modules ./node_modules
COPY --from=build --chown=nextjs:nodejs /app/dist ./dist
COPY --from=build --chown=nextjs:nodejs /app/package*.json ./
USER nextjs
EXPOSE 3000
ENV NODE_ENV=production
ENV PORT=3000
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
CMD curl -f http://localhost:3000/health || exit 1
CMD ["npm", "start"]`,
dockerignore: `node_modules
npm-debug.log*
.npm
.next
.env*
.git
.gitignore
README.md
Dockerfile
.dockerignore
coverage
.nyc_output
test
tests
**/*.test.js
**/*.spec.js`
},
python: {
dockerfile: `# Multi-stage Python Docker build
FROM python:${this.devopsConfig.defaultPythonVersion} AS dependencies
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
FROM python:${this.devopsConfig.defaultPythonVersion} AS runtime
RUN addgroup --system --gid 1001 appgroup
RUN adduser --system --uid 1001 appuser --gid 1001
WORKDIR /app
COPY --from=dependencies /usr/local/lib/python3.11/site-packages /usr/local/lib/python3.11/site-packages
COPY --from=dependencies /usr/local/bin /usr/local/bin
COPY --chown=appuser:appgroup . .
USER appuser
EXPOSE 8000
ENV PYTHONPATH=/app
ENV PYTHONUNBUFFERED=1
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
CMD curl -f http://localhost:8000/health || exit 1
CMD ["python", "main.py"]`,
dockerignore: `__pycache__
*.pyc
*.pyo
*.pyd
.Python
build
develop-eggs
dist
downloads
eggs
.eggs
lib
lib64
parts
sdist
var
wheels
*.egg-info
.installed.cfg
*.egg
.env*
.git
.gitignore
README.md
Dockerfile
.dockerignore
test
tests
**/*test*.py
.pytest_cache`
}
};
}
/**
* GitHub Actions templates
*/
getGitHubActionsTemplates() {
return {
node: `name: CI/CD Pipeline
on:
push:
branches: [ main, develop ]
pull_request:
branches: [ main ]
env:
NODE_VERSION: '20'
REGISTRY: docker.io
IMAGE_NAME: \${{ github.repository }}
jobs:
test:
name: Test and Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: \${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Run linter
run: npm run lint
- name: Run type check
run: npm run type-check
- name: Run tests
run: npm test -- --coverage
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
with:
file: ./coverage/lcov.info
security:
name: Security Scan
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Run security audit
run: npm audit --audit-level=high
- name: Run Snyk security scan
uses: snyk/actions/node@master
env:
SNYK_TOKEN: \${{ secrets.SNYK_TOKEN }}
build:
name: Build and Push Docker Image
needs: [test, security]
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/main'
steps:
- uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Registry
uses: docker/login-action@v3
with:
registry: \${{ env.REGISTRY }}
username: \${{ secrets.DOCKER_USERNAME }}
password: \${{ secrets.DOCKER_PASSWORD }}
- name: Extract metadata
id: meta
uses: docker/metadata-action@v5
with:
images: \${{ env.REGISTRY }}/\${{ env.IMAGE_NAME }}
tags: |
type=ref,event=branch
type=sha,prefix={{branch}}-
type=raw,value=latest,enable={{is_default_branch}}
- name: Build and push Docker image
uses: docker/build-push-action@v5
with:
context: .
push: true
tags: \${{ steps.meta.outputs.tags }}
labels: \${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
deploy:
name: Deploy to Production
needs: [build]
runs-on: ubuntu-latest
environment: production
if: github.ref == 'refs/heads/main'
steps:
- name: Deploy to production
run: |
echo "Deploying to production environment"
# Add your deployment commands here`,
python: `name: Python CI/CD Pipeline
on:
push:
branches: [ main, develop ]
pull_request:
branches: [ main ]
env:
PYTHON_VERSION: '3.11'
REGISTRY: docker.io
IMAGE_NAME: \${{ github.repository }}
jobs:
test:
name: Test and Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: \${{ env.PYTHON_VERSION }}
cache: 'pip'
- name: Install dependencies
run: |
pip install -r requirements.txt
pip install -r requirements-dev.txt
- name: Run linter
run: |
flake8 .
black --check .
isort --check-only .
- name: Run type checker
run: mypy .
- name: Run tests
run: pytest --cov=. --cov-report=xml
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
with:
file: ./coverage.xml
security:
name: Security Scan
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Run security scan
run: |
pip install safety bandit
safety check
bandit -r .
build:
name: Build and Push Docker Image
needs: [test, security]
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/main'
steps:
- uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Registry
uses: docker/login-action@v3
with:
registry: \${{ env.REGISTRY }}
username: \${{ secrets.DOCKER_USERNAME }}
password: \${{ secrets.DOCKER_PASSWORD }}
- name: Build and push Docker image
uses: docker/build-push-action@v5
with:
context: .
push: true
tags: \${{ env.REGISTRY }}/\${{ env.IMAGE_NAME }}:latest
cache-from: type=gha
cache-to: type=gha,mode=max`
};
}
/**
* Vercel templates
*/
getVercelTemplates() {
return {
nextjs: {
"version": 2,
"builds": [
{
"src": "package.json",
"use": "@vercel/next"
}
],
"env": {
"NODE_ENV": "production"
},
"build": {
"env": {
"NODE_ENV": "production"
}
},
"functions": {
"pages/api/**/*.js": {
"maxDuration": 30
}
},
"headers": [
{
"source": "/(.*)",
"headers": [
{
"key": "X-Content-Type-Options",
"value": "nosniff"
},
{
"key": "X-Frame-Options",
"value": "DENY"
},
{
"key": "X-XSS-Protection",
"value": "1; mode=block"
}
]
}
],
"redirects": [
{
"source": "/health",
"destination": "/api/health",
"permanent": false
}
]
},
node: {
"version": 2,
"builds": [
{
"src": "index.js",
"use": "@vercel/node"
}
],
"routes": [
{
"src": "/(.*)",
"dest": "/index.js"
}
],
"env": {
"NODE_ENV": "production"
},
"functions": {
"index.js": {
"maxDuration": 30
}
}
}
};
}
/**
* Nginx templates
*/
getNginxTemplates() {
return {
default: `server {
listen 80;
server_name localhost;
# Security headers
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
# Gzip compression
gzip on;
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
location / {
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_cache_bypass \$http_upgrade;
# Timeouts
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
}
location /health {
access_log off;
return 200 "healthy\\n";
add_header Content-Type text/plain;
}
# Static files caching
location ~* \\.(js|css|png|jpg|jpeg|gif|ico|svg)$ {
expires 1y;
add_header Cache-Control "public, immutable";
}
}`
};
}
/**
* PM2 templates
*/
getPM2Templates() {
return {
node: {
apps: [{
name: 'app',
script: './dist/index.js',
instances: 'max',
exec_mode: 'cluster',
env: {
NODE_ENV: 'production',
PORT: 3000
},
env_production: {
NODE_ENV: 'production',
PORT: 3000
},
error_file: './logs/err.log',
out_file: './logs/out.log',
log_file: './logs/combined.log',
time: true,
max_memory_restart: '500M',
node_args: '--max_old_space_size=512',
kill_timeout: 5000,
wait_ready: true,
listen_timeout: 10000,
health_check_grace_period: 10000
}]
},
python: {
apps: [{
name: 'python-app',
script: 'main.py',
interpreter: 'python3',
instances: 1,
exec_mode: 'fork',
env: {
PYTHONPATH: '.',
ENVIRONMENT: 'production'
},
error_file: './logs/err.log',
out_file: './logs/out.log',
log_file: './logs/combined.log',
time: true,
max_memory_restart: '500M',
kill_timeout: 5000
}]
}
};
}
/**
* Terraform templates
*/
getTerraformTemplates() {
return {
aws_ecs: `# AWS ECS Infrastructure
provider "aws" {
region = var.aws_region
}
variable "aws_region" {
description = "AWS region"
type = string
default = "us-west-2"
}
variable "app_name" {
description = "Application name"
type = string
}
variable "environment" {
description = "Environment (dev, staging, prod)"
type = string
}
# VPC
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
Name = "\${var.app_name}-\${var.environment}"
}
}
# Internet Gateway
resource "aws_internet_gateway" "main" {
vpc_id = aws_vpc.main.id
tags = {
Name = "\${var.app_name}-\${var.environment}"
}
}
# ECS Cluster
resource "aws_ecs_cluster" "main" {
name = "\${var.app_name}-\${var.environment}"
setting {
name = "containerInsights"
value = "enabled"
}
}`
};
}
/**
* Monitoring templates
*/
getMonitoringTemplates() {
return {
healthCheck: {
endpoint: '/health',
timeout: 5000,
interval: 30000,
retries: 3
},
logging: {
level: 'info',
format: 'json',
transports: ['console', 'file'],
rotation: {
maxSize: '10m',
maxFiles: 5
}
}
};
}
/**
* Customize Dockerfile based on environment and options
*/
customizeDockerfile(template, environment, options) {
let dockerfile = template;
// Add development tools for non-production
if (environment !== 'production') {
dockerfile = dockerfile.replace(
'RUN npm ci --only=production',
'RUN npm ci'
);
}
// Add custom port if specified
if (options.port) {
dockerfile = dockerfile.replace(/EXPOSE \d+/, `EXPOSE ${options.port}`);
dockerfile = dockerfile.replace(/ENV PORT=\d+/, `ENV PORT=${options.port}`);
}
// Add custom health check
if (options.healthCheck) {
const healthCheckCmd = `HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
CMD ${options.healthCheck} || exit 1`;
dockerfile = dockerfile.replace(/HEALTHCHECK[^\n]+\n[^\n]+/, healthCheckCmd);
}
return dockerfile;
}
/**
* Generate Docker Compose for development
*/
generateDockerCompose(technology, environment, options) {
const services = {
app: {
build: '.',
ports: [`${options.port || 3000}:${options.port || 3000}`],
environment: {
NODE_ENV: environment,
PORT: options.port || 3000
},
volumes: environment === 'development' ? ['.:/app', '/app/node_modules'] : undefined,
depends_on: []
}
};
// Add database if specified
if (options.database) {
services.app.depends_on.push(options.database);
if (options.database === 'postgres') {
services.postgres = {
image: 'postgres:15-alpine',
environment: {
POSTGRES_DB: 'appdb',
POSTGRES_USER: 'user',
POSTGRES_PASSWORD: 'password'
},
volumes: ['postgres_data:/var/lib/postgresql/data'],
ports: ['5432:5432']
};
} else if (options.database === 'redis') {
services.redis = {
image: 'redis:7-alpine',
ports: ['6379:6379'],
volumes: ['redis_data:/data']
};
}
}
const compose = {
version: '3.8',
services,
volumes: {}
};
// Add volumes for databases
if (options.database === 'postgres') {
compose.volumes.postgres_data = {};
}
if (options.database === 'redis') {
compose.volumes.redis_data = {};
}
return compose;
}
/**
* Generate required secrets list
*/
generateRequiredSecrets(technology, environment) {
const baseSecrets = [
'DOCKER_USERNAME',
'DOCKER_PASSWORD'
];
if (technology === 'node') {
baseSecrets.push('NPM_TOKEN');
}
if (environment === 'production') {
baseSecrets.push(
'SENTRY_AUTH_TOKEN',
'DATABASE_URL',
'JWT_SECRET'
);
}
return baseSecrets;
}
/**
* Generate health checks configuration
*/
generateHealthChecks(technology, environment, options) {
return {
http: {
path: '/health',
port: options.port || 3000,
timeout: 5,
interval: 30,
retries: 3,
startPeriod: 60
},
readiness: {
path: '/ready',
port: options.port || 3000,
timeout: 5,
interval: 10,
retries: 3
},
liveness: {
path: '/health',
port: options.port || 3000,
timeout: 5,
interval: 30,
retries: 3,
failureThreshold: 3
}
};
}
/**
* Generate logging configuration
*/
generateLoggingConfig(technology, environment, options) {
return {
level: environment === 'production' ? 'info' : 'debug',
format: 'json',
transports: {
console: {
enabled: true,
colorize: environment !== 'production'
},
file: {
enabled: environment === 'production',
filename: 'app.log',
maxsize: 10485760, // 10MB
maxFiles: 5
},
cloudWatch: {
enabled: environment === 'production' && options.cloudProvider === 'aws',
logGroup: `/aws/ecs/${options.serviceName || 'app'}`,
logStream: '{instanceId}'
}
},
fields: {
timestamp: true,
level: true,
message: true,
service: options.serviceName || 'app',
version: options.version || '1.0.0',
environment
}
};
}
/**
* Generate metrics configuration
*/
generateMetricsConfig(technology, environment, options) {
return {
enabled: true,
port: 9090,
path: '/metrics',
collectDefaultMetrics: true,
customMetrics: {
httpRequestDuration: {
type: 'histogram',
name: 'http_request_duration_seconds',
help: 'HTTP request duration in seconds',
labelNames: ['method', 'route', 'status']
},
httpRequestTotal: {
type: 'counter',
name: 'http_requests_total',
help: 'Total number of HTTP requests',
labelNames: ['method', 'route', 'status']
},
activeConnections: {
type: 'gauge',
name: 'active_connections',
help: 'Number of active connections'
}
}
};
}
/**
* Generate GitHub Actions workflows
*/
generateGitHubActionsWorkflows(technology, environment, options) {
const template = this.templates.githubActions[technology];
// Customize workflow based on options
let workflow = template;
if (options.deployment?.platform === 'vercel') {
workflow += `
deploy-vercel:
name: Deploy to Vercel
needs: [build]
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/main'
steps:
- uses: actions/checkout@v4
- uses: amondnet/vercel-action@v25
with:
vercel-token: \${{ secrets.VERCEL_TOKEN }}
vercel-org-id: \${{ secrets.ORG_ID }}
vercel-project-id: \${{ secrets.PROJECT_ID }}
vercel-args: '--prod'`;
}
return {
'.github/workflows/ci-cd.yml': workflow
};
}
/**
* Generate Docker build script
*/
generateDockerBuildScript(technology, environment, options) {
return `#!/bin/bash
set -e
# Build arguments
IMAGE_NAME="\${IMAGE_NAME:-app}"
TAG="\${TAG:-latest}"
REGISTRY="\${REGISTRY:-docker.io}"
echo "Building Docker image: \${REGISTRY}/\${IMAGE_NAME}:\${TAG}"
# Build with cache
docker build \\
--cache-from \${REGISTRY}/\${IMAGE_NAME}:latest \\
--tag \${REGISTRY}/\${IMAGE_NAME}:\${TAG} \\
--tag \${REGISTRY}/\${IMAGE_NAME}:latest \\
.
echo "Build complete: \${REGISTRY}/\${IMAGE_NAME}:\${TAG}"
# Push to registry if PUSH=true
if [ "\${PUSH:-false}" = "true" ]; then
echo "Pushing to registry..."
docker push \${REGISTRY}/\${IMAGE_NAME}:\${TAG}
docker push \${REGISTRY}/\${IMAGE_NAME}:latest
echo "Push complete"
fi`;
}
}
module.exports = DevOpsAgent;