aws-container-image-scanner
Version:
AWS Container Image Scanner - Enterprise tool for scanning EKS clusters, analyzing Bitnami container dependencies, and generating migration guidance for AWS ECR alternatives with security best practices.
362 lines (323 loc) ⢠12.9 kB
JavaScript
Object.defineProperty(exports, "__esModule", { value: true });
exports.MigrationPlanner = void 0;
const tslib_1 = require("tslib");
const fs = tslib_1.__importStar(require("fs/promises"));
const chalk_1 = tslib_1.__importDefault(require("chalk"));
class MigrationPlanner {
generateMigrationPlan(scanResults) {
const migrationPlan = {
databases: {},
analytics: {},
messaging: {},
totalMigrations: 0
};
if (scanResults.clusters) {
scanResults.clusters.forEach((cluster) => {
if (cluster.images) {
cluster.images.forEach((image) => {
const imageName = typeof image === 'string' ? image : image.name;
if (imageName && imageName.includes('bitnami/')) {
const service = this.extractServiceName(imageName);
const alternative = image.awsAlternative || this.getAlternative(service);
if (alternative) {
this.categorizeService(service, alternative, migrationPlan);
migrationPlan.totalMigrations++;
}
}
});
}
});
}
return migrationPlan;
}
generateMigrationScript(service, config) {
const templates = {
mysql: `#!/bin/bash
# MySQL to Amazon RDS Migration Script
# Generated by Container Image Scanner
echo "Starting MySQL to Amazon RDS Migration..."
# Create RDS instance
aws rds create-db-instance \\
--db-instance-identifier mysql-migration \\
--db-instance-class db.t3.micro \\
--engine mysql \\
--master-username admin \\
--master-user-password CHANGE_ME \\
--allocated-storage 20
# Export data from existing MySQL
kubectl exec -it ${config.workload || 'mysql-pod'} -- mysqldump -u root -p --all-databases > mysql-backup.sql
echo "Migration script generated. Please review and customize before execution."
`,
postgresql: `#!/bin/bash
# PostgreSQL to Amazon Aurora Migration Script
# Generated by Container Image Scanner
echo "Starting PostgreSQL to Amazon Aurora Migration..."
# Create Aurora cluster
aws rds create-db-cluster \\
--db-cluster-identifier postgres-migration \\
--engine aurora-postgresql \\
--master-username postgres \\
--master-user-password CHANGE_ME
# Export data from existing PostgreSQL
kubectl exec -it ${config.workload || 'postgres-pod'} -- pg_dump -U postgres --all > postgres-backup.sql
echo "Migration script generated. Please review and customize before execution."
`,
elasticsearch: `#!/bin/bash
# Elasticsearch to Amazon OpenSearch Migration Script
# Generated by Container Image Scanner
echo "Starting Elasticsearch to Amazon OpenSearch Migration..."
# Create OpenSearch domain
aws opensearch create-domain \\
--domain-name elasticsearch-migration \\
--elasticsearch-version 7.10 \\
--elasticsearch-cluster-config InstanceType=t3.small.elasticsearch,InstanceCount=1
# Install elasticdump for data migration
npm install -g elasticdump
echo "Migration script generated. Please review and customize before execution."
`
};
return templates[service] || `#!/bin/bash
# Generic migration template for ${service}
# Generated by Container Image Scanner
echo "Starting migration for ${service}..."
echo "Please customize this script for your specific requirements."
# Add your migration steps here
`;
}
updateKubernetesManifest(manifest, config) {
const lines = manifest.split('\n');
const updatedLines = [];
for (const line of lines) {
updatedLines.push(line);
if (line.includes('image:') && line.includes('bitnami/')) {
updatedLines.push(` # MIGRATION NOTE: Replace with ${config.targetService}`);
updatedLines.push(` # Original image: ${config.sourceImage}`);
}
}
return updatedLines.join('\n');
}
generateHelmValues(service, _config) {
const templates = {
mysql: `# Amazon RDS for MySQL Configuration
# Generated by Container Image Scanner
rds:
engine: mysql
engineVersion: "8.0"
instanceClass: db.t3.micro
allocatedStorage: 20
multiAZ: true
backupRetentionPeriod: 7
# Security
vpcSecurityGroupIds: []
subnetGroupName: ""
# Monitoring
monitoringInterval: 60
performanceInsightsEnabled: true
`,
elasticsearch: `# Amazon OpenSearch Service Configuration
# Generated by Container Image Scanner
opensearch:
version: "2.3"
instanceType: t3.small.search
instanceCount: 1
dedicatedMasterEnabled: false
# Storage
ebsEnabled: true
volumeType: gp3
volumeSize: 20
# Security
encryptionAtRestEnabled: true
nodeToNodeEncryptionEnabled: true
`
};
return templates[service] || `# Configuration for ${service}
# Generated by Container Image Scanner
${service}:
# Add your configuration here
enabled: true
`;
}
assessMigrationComplexity(service) {
const complexityMap = {
nginx: 'LOW',
apache: 'LOW',
mysql: 'MEDIUM',
postgresql: 'MEDIUM',
mongodb: 'MEDIUM',
redis: 'MEDIUM',
elasticsearch: 'HIGH',
kafka: 'HIGH',
rabbitmq: 'MEDIUM'
};
return complexityMap[service] || 'MEDIUM';
}
getEffortEstimate(service) {
const estimates = {
mysql: { hours: 16, complexity: 'MEDIUM', specialist: 'Database' },
postgresql: { hours: 20, complexity: 'MEDIUM', specialist: 'Database' },
elasticsearch: { hours: 32, complexity: 'HIGH', specialist: 'Analytics' },
kafka: { hours: 40, complexity: 'HIGH', specialist: 'Streaming' },
nginx: { hours: 8, complexity: 'LOW', specialist: 'Container' }
};
return estimates[service] || { hours: 16, complexity: 'MEDIUM', specialist: 'General' };
}
getAwsSpecialist(service) {
const specialists = {
mysql: {
team: 'Database',
contact: 'database-sa@amazon.com',
escalation: 'Database SA Manager'
},
postgresql: {
team: 'Database',
contact: 'database-sa@amazon.com',
escalation: 'Database SA Manager'
},
elasticsearch: {
team: 'Analytics',
contact: 'analytics-sa@amazon.com',
escalation: 'Analytics SA Manager'
},
kafka: {
team: 'Streaming',
contact: 'streaming-sa@amazon.com',
escalation: 'Streaming SA Manager'
},
nginx: {
team: 'Container',
contact: 'container-sa@amazon.com',
escalation: 'Container SA Manager'
}
};
return specialists[service] || {
team: 'General',
contact: 'general-sa@amazon.com',
escalation: 'SA Manager'
};
}
async writeMigrationFiles(migrationPlan, outputDir) {
const fs = require('fs').promises;
await fs.mkdir(outputDir, { recursive: true });
await fs.mkdir(`${outputDir}/scripts`, { recursive: true });
await fs.mkdir(`${outputDir}/manifests`, { recursive: true });
await fs.writeFile(`${outputDir}/migration-plan.json`, JSON.stringify(migrationPlan, null, 2));
const readme = `# Migration Guide
This directory contains migration plans and scripts generated by the Container Image Scanner.
## Files
- \`migration-plan.json\` - Complete migration plan
- \`scripts/\` - Migration scripts for each service
- \`manifests/\` - Updated Kubernetes manifests
## Services to Migrate
${Object.entries(migrationPlan.databases).map(([service, target]) => `- ${service} ā ${target}`).join('\n')}
## Next Steps
1. Review the migration plan
2. Customize scripts for your environment
3. Test migrations in non-production
4. Execute production migrations
`;
await fs.writeFile(`${outputDir}/README.md`, readme);
}
extractServiceName(imageName) {
const parts = imageName.split('/');
const nameWithTag = parts[parts.length - 1] || 'unknown';
return nameWithTag.split(':')[0] || 'unknown';
}
categorizeService(service, alternative, plan) {
const dbServices = ['mysql', 'postgresql', 'mongodb', 'redis', 'mariadb'];
const analyticsServices = ['elasticsearch', 'influxdb', 'grafana'];
const messagingServices = ['kafka', 'rabbitmq', 'nats'];
if (dbServices.includes(service)) {
plan.databases[service] = alternative;
}
else if (analyticsServices.includes(service)) {
plan.analytics[service] = alternative;
}
else if (messagingServices.includes(service)) {
plan.messaging[service] = alternative;
}
}
async generateMigrationPlanAsync(options) {
console.log(chalk_1.default.bgBlue.white.bold(' š MIGRATION PLANNING '));
console.log(chalk_1.default.blue('Generating migration guidance\n'));
try {
const content = await fs.readFile(options.input, 'utf-8');
const scanResults = JSON.parse(content);
const outputDir = options.output || './migration-plan';
await fs.mkdir(outputDir, { recursive: true });
await this.generateMigrationGuide(scanResults, outputDir);
console.log(chalk_1.default.green(`\nā
Migration plan generated in ${outputDir}/`));
console.log(chalk_1.default.cyan('Generated files:'));
console.log(chalk_1.default.cyan(`⢠migration-guide.md - Migration recommendations`));
}
catch (error) {
console.error(chalk_1.default.red(`ā Migration planning failed: ${error.message}`));
throw error;
}
}
async generateMigrationGuide(scanResults, outputDir) {
const bitnamiImages = scanResults.images.filter(img => img.image.includes('bitnami'));
let guide = `# Container Migration Guide
Generated on: ${new Date().toISOString()}
Total Bitnami images found: ${bitnamiImages.length}
## Migration Recommendations
`;
if (bitnamiImages.length === 0) {
guide += `No Bitnami images found. No migration required.`;
}
else {
guide += `Found ${bitnamiImages.length} Bitnami images that may need migration:
`;
const imageGroups = new Map();
bitnamiImages.forEach(img => {
const imageName = img.image.split('/')[1]?.split(':')[0] || img.image;
if (!imageGroups.has(imageName)) {
imageGroups.set(imageName, []);
}
imageGroups.get(imageName).push(img);
});
for (const [imageName, images] of imageGroups) {
guide += `### ${imageName}
**Found in**: ${images.length} location(s)
**Alternative**: ${this.getAlternative(imageName)}
**Migration Steps**:
1. Review current configuration
2. Test alternative image in non-production
3. Update container image references
4. Deploy and validate
`;
}
guide += `## Next Steps
1. Review each image migration recommendation
2. Test alternatives in development environment
3. Plan migration timeline
4. Update deployment configurations
5. Monitor applications after migration
## AWS Service Alternatives
Consider these AWS managed services as alternatives:
- **Databases**: Amazon RDS, Aurora, DynamoDB, DocumentDB
- **Caching**: Amazon ElastiCache, MemoryDB
- **Search**: Amazon OpenSearch Service
- **Messaging**: Amazon MSK, Amazon MQ, SQS, SNS
- **Monitoring**: Amazon CloudWatch, Managed Grafana, Managed Prometheus
`;
}
await fs.writeFile(`${outputDir}/migration-guide.md`, guide);
}
getAlternative(imageName) {
const alternatives = {
'nginx': 'public.ecr.aws/nginx/nginx',
'mysql': 'public.ecr.aws/docker/library/mysql',
'postgresql': 'public.ecr.aws/docker/library/postgres',
'redis': 'public.ecr.aws/docker/library/redis',
'mongodb': 'public.ecr.aws/docker/library/mongo',
'node': 'public.ecr.aws/docker/library/node',
'python': 'public.ecr.aws/docker/library/python',
'apache': 'public.ecr.aws/docker/library/httpd',
'grafana': 'public.ecr.aws/grafana/grafana',
'prometheus': 'public.ecr.aws/prom/prometheus'
};
return alternatives[imageName] || `Check AWS ECR Public Gallery for ${imageName} alternatives`;
}
}
exports.MigrationPlanner = MigrationPlanner;
;