dryrun-ci
Version:
DryRun CI - Local GitLab CI/CD pipeline testing tool with Docker execution, performance monitoring, and security sandboxing
383 lines (382 loc) • 15.3 kB
JavaScript
;
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
var desc = Object.getOwnPropertyDescriptor(m, k);
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
desc = { enumerable: true, get: function() { return m[k]; } };
}
Object.defineProperty(o, k2, desc);
}) : (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
o[k2] = m[k];
}));
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
Object.defineProperty(o, "default", { enumerable: true, value: v });
}) : function(o, v) {
o["default"] = v;
});
var __importStar = (this && this.__importStar) || function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
__setModuleDefault(result, mod);
return result;
};
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.DockerExecutionEngine = void 0;
const dockerode_1 = __importDefault(require("dockerode"));
const events_1 = require("events");
const fs = __importStar(require("fs"));
const path = __importStar(require("path"));
const execution_1 = require("../types/execution");
const artifactManager_1 = require("./artifactManager");
class DockerExecutionEngine extends events_1.EventEmitter {
constructor(projectContext = null) {
super();
this.artifactManager = null;
this.containers = new Map();
this.docker = new dockerode_1.default();
this.projectContext = projectContext;
// Verify Docker connection
this.verifyDockerConnection();
}
initializeArtifactManager(config) {
this.artifactManager = new artifactManager_1.ArtifactManager(config);
}
async verifyDockerConnection() {
try {
await this.docker.ping();
this.emit('docker-ready');
}
catch (error) {
this.emit('docker-error', new Error('Docker not available. Please ensure Docker is running.'));
}
}
async executeJob(job, config, workingDirectory = process.cwd()) {
const metrics = {
startTime: new Date(),
memoryUsage: { peak: 0, average: 0, samples: [] },
cpuUsage: { peak: 0, average: 0, samples: [] },
diskUsage: { read: 0, write: 0, totalSpace: 0 },
networkUsage: { bytesIn: 0, bytesOut: 0, connections: 0 }
};
let container = null;
try {
// Create container for job execution
container = await this.createJobContainer(job, config, workingDirectory);
const containerInfo = await this.getContainerInfo(container);
this.containers.set(containerInfo.id, containerInfo);
metrics.containerId = containerInfo.id;
this.emit('job-started', { job: job.name, containerId: containerInfo.id });
// Initialize artifact manager if artifacts are enabled
if (config.artifacts.enableCollection) {
this.initializeArtifactManager(config.artifacts);
}
// Execute job scripts
await this.executeJobScripts(container, job, config);
// Collect artifacts if enabled
if (config.artifacts.enableCollection && job.artifacts?.paths && this.artifactManager) {
await this.artifactManager.collectArtifactsFromContainer(containerInfo.id, job, `execution-${Date.now()}`);
}
metrics.exitCode = 0;
this.emit('job-completed', { job: job.name, metrics });
}
catch (error) {
metrics.exitCode = 1;
this.emit('job-failed', { job: job.name, error, metrics });
throw error;
}
finally {
metrics.endTime = new Date();
metrics.duration = metrics.endTime.getTime() - metrics.startTime.getTime();
// Cleanup container
if (container) {
await this.cleanupContainer(container);
}
// Cleanup artifact manager if initialized
if (this.artifactManager) {
this.artifactManager.cleanup();
this.artifactManager = null;
}
}
return metrics;
}
async createJobContainer(job, config, workingDirectory) {
const image = job.image || this.getDefaultImage(job);
// Pull image if not exists
await this.pullImageIfNeeded(image);
// Prepare environment variables
const environment = this.prepareEnvironment(job, config);
// Prepare volume mounts
const volumes = this.prepareVolumes(workingDirectory, config);
// Create container configuration
const containerConfig = {
Image: image,
Env: environment,
WorkingDir: '/workspace',
HostConfig: {
Binds: volumes,
Memory: this.parseMemoryLimit(config.resources.memory),
CpuQuota: this.parseCpuLimit(config.resources.cpu),
NetworkMode: config.networking.allowInternet ? 'bridge' : 'none',
ReadonlyRootfs: config.security === execution_1.SecurityLevel.STRICT,
SecurityOpt: this.getSecurityOptions(config.security),
Ulimits: this.getResourceLimits(config.resources),
Dns: config.networking.customDNS.length > 0 ? config.networking.customDNS : undefined
},
AttachStdout: true,
AttachStderr: true,
Tty: false
};
// Apply security restrictions based on security level
if (config.security !== execution_1.SecurityLevel.NONE) {
containerConfig.HostConfig = {
...containerConfig.HostConfig,
...this.getSecurityRestrictions(config.security)
};
}
const container = await this.docker.createContainer(containerConfig);
await container.start();
return container;
}
getSecurityRestrictions(security) {
switch (security) {
case execution_1.SecurityLevel.STRICT:
return {
SecurityOpt: ['no-new-privileges:true', 'seccomp:unconfined'],
ReadonlyRootfs: true,
NetworkMode: 'none'
};
case execution_1.SecurityLevel.BASIC:
return {
SecurityOpt: ['no-new-privileges:true'],
ReadonlyRootfs: false
};
default:
return {};
}
}
async executeJobScripts(container, job, config) {
const scripts = [
...(job.before_script || []),
...job.script || [],
...(job.after_script || [])
];
for (const script of scripts) {
this.emit('script-started', { script, job: job.name });
try {
const exec = await container.exec({
Cmd: ['sh', '-c', script],
AttachStdout: true,
AttachStderr: true
});
const stream = await exec.start({});
// Capture output
const output = [];
stream.on('data', (chunk) => {
const data = chunk.toString();
output.push(data);
this.emit('script-output', { script, output: data, job: job.name });
});
// Wait for script completion
await new Promise((resolve, reject) => {
stream.on('end', resolve);
stream.on('error', reject);
// Timeout handling
setTimeout(() => {
reject(new Error(`Script timeout after ${config.resources.executionTimeout}s`));
}, config.resources.executionTimeout * 1000);
});
// Check exit code
const inspectResult = await exec.inspect();
if (inspectResult.ExitCode !== 0) {
throw new Error(`Script failed with exit code ${inspectResult.ExitCode}`);
}
this.emit('script-completed', { script, output, job: job.name });
}
catch (error) {
this.emit('script-failed', { script, error, job: job.name });
throw error;
}
}
}
async collectArtifacts(container, artifactPaths, outputPath) {
for (const artifactPath of artifactPaths) {
try {
const stream = await container.getArchive({ path: artifactPath });
const outputFile = path.join(outputPath, `artifacts-${Date.now()}.tar`);
// Ensure output directory exists
await fs.promises.mkdir(path.dirname(outputFile), { recursive: true });
// Save artifact
const writeStream = fs.createWriteStream(outputFile);
stream.pipe(writeStream);
await new Promise((resolve, reject) => {
writeStream.on('finish', resolve);
writeStream.on('error', reject);
});
this.emit('artifact-collected', { path: artifactPath, output: outputFile });
}
catch (error) {
this.emit('artifact-failed', { path: artifactPath, error });
}
}
}
async pullImageIfNeeded(image) {
try {
await this.docker.getImage(image).inspect();
}
catch (error) {
this.emit('image-pulling', { image });
await new Promise((resolve, reject) => {
this.docker.pull(image, (err, stream) => {
if (err)
return reject(err);
this.docker.modem.followProgress(stream, (err) => {
if (err)
return reject(err);
resolve();
}, (event) => {
this.emit('image-pull-progress', { image, event });
});
});
});
this.emit('image-pulled', { image });
}
}
prepareEnvironment(job, _config) {
void _config; // Intentionally unused
const env = [
'CI=true',
'GITLAB_CI=true',
'CI_JOB_NAME=' + job.name,
'CI_JOB_STAGE=' + job.stage,
'CI_COMMIT_SHA=local-testing',
'CI_COMMIT_REF_NAME=local-branch',
'CI_PROJECT_DIR=/workspace'
];
// Add job variables
if (job.variables) {
Object.entries(job.variables).forEach(([key, value]) => {
env.push(`${key}=${value}`);
});
}
return env;
}
prepareVolumes(workingDirectory, config) {
const volumes = [
`${workingDirectory}:/workspace:rw`
];
// Add cache volume if enabled
if (config.cache.enableCache) {
volumes.push(`${config.cache.cachePath}:/cache:rw`);
}
return volumes;
}
getDefaultImage(_job) {
void _job; // Intentionally unused
// Smart image detection based on project context
if (this.projectContext?.packageJson) {
return 'node:18-alpine';
}
if (this.projectContext?.dockerfile) {
return 'alpine:latest';
}
return 'ubuntu:20.04';
}
parseMemoryLimit(memory) {
const match = memory.match(/^(\d+)([kmgtKMGT]?)([bB]?)$/);
if (!match)
return 512 * 1024 * 1024; // 512MB default
const value = parseInt(match[1]);
const unit = match[2].toLowerCase();
switch (unit) {
case 'k': return value * 1024;
case 'm': return value * 1024 * 1024;
case 'g': return value * 1024 * 1024 * 1024;
case 't': return value * 1024 * 1024 * 1024 * 1024;
default: return value;
}
}
parseCpuLimit(cpu) {
return Math.floor(parseFloat(cpu) * 100000); // Docker CPU quota format
}
getSecurityOptions(security) {
switch (security) {
case execution_1.SecurityLevel.STRICT:
return ['no-new-privileges:true', 'seccomp:unconfined'];
case execution_1.SecurityLevel.BASIC:
return ['no-new-privileges:true'];
default:
return [];
}
}
getResourceLimits(_limits) {
void _limits; // Intentionally unused
return [
{ Name: 'nofile', Soft: 1024, Hard: 1024 },
{ Name: 'nproc', Soft: 1024, Hard: 1024 }
];
}
async getContainerInfo(container) {
const inspect = await container.inspect();
return {
id: inspect.Id,
image: inspect.Config.Image,
status: inspect.State.Status,
ports: Object.keys(inspect.Config.ExposedPorts || {}).map(p => parseInt(p.split('/')[0])),
volumes: Object.entries(inspect.Mounts || {}).map(([, mount]) => ({
hostPath: mount.Source,
containerPath: mount.Destination,
readOnly: mount.RW === false
})),
environment: inspect.Config.Env?.reduce((acc, env) => {
const [key, value] = env.split('=');
acc[key] = value;
return acc;
}, {}) || {}
};
}
async cleanupContainer(container) {
try {
await container.stop();
await container.remove();
this.emit('container-cleaned', { containerId: container.id });
}
catch (error) {
this.emit('cleanup-failed', { containerId: container.id, error });
}
}
async getSecurityReport(containerId) {
// Return a basic security report since SecuritySandbox is not implemented
return {
containerId,
securityLevel: execution_1.SecurityLevel.BASIC,
vulnerabilities: [],
networkActivity: [],
fileSystemAccess: [],
privilegeEscalation: false
};
}
async listContainers() {
return Array.from(this.containers.values());
}
async killAllContainers() {
const promises = Array.from(this.containers.keys()).map(async (containerId) => {
try {
const container = this.docker.getContainer(containerId);
await container.kill();
await container.remove();
}
catch (error) {
// Container might already be stopped
}
});
await Promise.all(promises);
this.containers.clear();
this.emit('all-containers-killed');
}
}
exports.DockerExecutionEngine = DockerExecutionEngine;