@atomist/sdm-core
Version:
Atomist Software Delivery Machine - Implementation
323 lines • 14.2 kB
JavaScript
/*
* Copyright © 2019 Atomist, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
Object.defineProperty(exports, "__esModule", { value: true });
const automation_client_1 = require("@atomist/automation-client");
const sdm_1 = require("@atomist/sdm");
const fs = require("fs-extra");
const stringify = require("json-stringify-safe");
const _ = require("lodash");
const os = require("os");
const path = require("path");
const container_1 = require("./container");
const provider_1 = require("./provider");
const util_1 = require("./util");
exports.dockerContainerScheduler = (goal, registration) => {
goal.addFulfillment(Object.assign({ goalExecutor: executeDockerJob(goal, registration) }, registration));
};
/**
* Execute container goal using Docker CLI. Wait on completion of
* first container, then kill all the rest.
*/
function executeDockerJob(goal, registration) {
// tslint:disable-next-line:cyclomatic-complexity
return (gi) => __awaiter(this, void 0, void 0, function* () {
const { goalEvent, progressLog, configuration } = gi;
const goalName = goalEvent.uniqueName.split("#")[0].toLowerCase();
const namePrefix = "sdm-";
const nameSuffix = `-${goalEvent.goalSetId.slice(0, 7)}-${goalName}`;
const tmpDir = path.join(dockerTmpDir(), goalEvent.repo.owner, goalEvent.repo.name, goalEvent.goalSetId);
const containerDir = path.join(tmpDir, `${namePrefix}tmp-${automation_client_1.guid()}${nameSuffix}`);
return configuration.sdm.projectLoader.doWithProject(Object.assign(Object.assign({}, gi), { readOnly: false, cloneDir: containerDir, cloneOptions: sdm_1.minimalClone(goalEvent.push, { detachHead: true }) }),
// tslint:disable-next-line:cyclomatic-complexity
(project) => __awaiter(this, void 0, void 0, function* () {
var _a;
const spec = Object.assign(Object.assign({}, registration), (!!registration.callback ? yield registration.callback(_.cloneDeep(registration), project, goal, goalEvent, gi) : {}));
if (!spec.containers || spec.containers.length < 1) {
throw new Error("No containers defined in GoalContainerSpec");
}
const inputDir = path.join(tmpDir, `${namePrefix}tmp-${automation_client_1.guid()}${nameSuffix}`);
const outputDir = path.join(tmpDir, `${namePrefix}tmp-${automation_client_1.guid()}${nameSuffix}`);
try {
yield util_1.prepareInputAndOutput(inputDir, outputDir, gi);
}
catch (e) {
const message = `Failed to prepare input and output for goal ${goalName}: ${e.message}`;
progressLog.write(message);
return { code: 1, message };
}
const spawnOpts = {
log: progressLog,
cwd: containerDir,
};
const network = `${namePrefix}network-${automation_client_1.guid()}${nameSuffix}`;
let networkCreateRes;
try {
networkCreateRes = yield sdm_1.spawnLog("docker", ["network", "create", network], spawnOpts);
}
catch (e) {
networkCreateRes = {
cmdString: `'docker' 'network' 'create' '${network}'`,
code: 128,
error: e,
output: [undefined, "", e.message],
pid: -1,
signal: undefined,
status: 128,
stdout: "",
stderr: e.message,
};
}
if (networkCreateRes.code) {
let message = `Failed to create Docker network '${network}'` +
((networkCreateRes.error) ? `: ${networkCreateRes.error.message}` : "");
progressLog.write(message);
try {
yield dockerCleanup({ spawnOpts });
}
catch (e) {
networkCreateRes.code++;
message += `; ${e.message}`;
}
return { code: networkCreateRes.code, message };
}
const atomistEnvs = (yield util_1.containerEnvVars(gi.goalEvent, gi)).map(env => `--env=${env.name}=${env.value}`);
const spawnedContainers = [];
const failures = [];
for (const container of spec.containers) {
let secrets = {
env: [],
files: [],
};
try {
secrets = yield provider_1.prepareSecrets(container, gi);
if (!!((_a = secrets) === null || _a === void 0 ? void 0 : _a.files)) {
const secretPath = path.join(inputDir, ".secrets");
yield fs.ensureDir(secretPath);
for (const file of secrets.files) {
const secretFile = path.join(secretPath, automation_client_1.guid());
file.hostPath = secretFile;
yield fs.writeFile(secretFile, file.value);
}
}
}
catch (e) {
failures.push(e.message);
}
const containerName = `${namePrefix}${container.name}${nameSuffix}`;
let containerArgs;
try {
containerArgs = containerDockerOptions(container, registration);
}
catch (e) {
progressLog.write(e.message);
failures.push(e.message);
break;
}
const dockerArgs = [
"run",
"--tty",
"--rm",
`--name=${containerName}`,
`--volume=${containerDir}:${container_1.ContainerProjectHome}`,
`--volume=${inputDir}:${container_1.ContainerInput}`,
`--volume=${outputDir}:${container_1.ContainerOutput}`,
...secrets.files.map(f => `--volume=${f.hostPath}:${f.mountPath}`),
`--network=${network}`,
`--network-alias=${container.name}`,
...containerArgs,
...(registration.dockerOptions || []),
...(container.dockerOptions || []),
...atomistEnvs,
...secrets.env.map(e => `--env=${e.name}=${e.value}`),
container.image,
...(container.args || []),
];
if (spawnedContainers.length < 1) {
dockerArgs.splice(5, 0, `--workdir=${container_1.ContainerProjectHome}`);
}
const promise = sdm_1.spawnLog("docker", dockerArgs, spawnOpts);
spawnedContainers.push({ name: containerName, promise });
}
if (failures.length > 0) {
try {
yield dockerCleanup({
network,
spawnOpts,
containers: spawnedContainers,
});
}
catch (e) {
failures.push(e.message);
}
return {
code: failures.length,
message: `Failed to spawn Docker containers: ${failures.join("; ")}`,
};
}
const main = spawnedContainers[0];
try {
const result = yield main.promise;
if (result.code) {
const msg = `Docker container '${main.name}' failed` + ((result.error) ? `: ${result.error.message}` : "");
progressLog.write(msg);
failures.push(msg);
}
}
catch (e) {
const message = `Failed to execute main Docker container '${main.name}': ${e.message}`;
progressLog.write(message);
failures.push(message);
}
const outputFile = path.join(outputDir, "result.json");
let outputResult;
if ((yield fs.pathExists(outputFile)) && failures.length === 0) {
try {
outputResult = yield util_1.processResult(yield fs.readJson(outputFile), gi);
}
catch (e) {
const message = `Failed to read output from Docker container '${main.name}': ${e.message}`;
progressLog.write(message);
failures.push(message);
}
}
const sidecars = spawnedContainers.slice(1);
try {
yield dockerCleanup({
network,
spawnOpts,
containers: sidecars,
});
}
catch (e) {
failures.push(e.message);
}
if (failures.length === 0 && !!outputResult) {
return outputResult;
}
else {
return {
code: failures.length,
message: (failures.length > 0) ? failures.join("; ") : "Successfully completed container job",
};
}
}));
});
}
exports.executeDockerJob = executeDockerJob;
/**
* Generate container specific Docker command-line options.
*
* @param container Goal container spec
* @param registration Container goal registration object
* @return Docker command-line entrypoint, env, p, and volume options
*/
function containerDockerOptions(container, registration) {
const entryPoint = [];
if (container.command && container.command.length > 0) {
// Docker CLI entrypoint must be a binary...
entryPoint.push(`--entrypoint=${container.command[0]}`);
// ...so prepend any other command elements to args array
if (container.args) {
container.args.splice(0, 0, ...container.command.slice(1));
}
else {
container.args = container.command.slice(1);
}
}
const envs = (container.env || []).map(env => `--env=${env.name}=${env.value}`);
const ports = (container.ports || []).map(port => `-p=${port.containerPort}`);
const volumes = [];
for (const vm of (container.volumeMounts || [])) {
const volume = (registration.volumes || []).find(v => v.name === vm.name);
if (!volume) {
const msg = `Container '${container.name}' references volume '${vm.name}' which not provided in goal registration ` +
`volumes: ${stringify(registration.volumes)}`;
automation_client_1.logger.error(msg);
throw new Error(msg);
}
volumes.push(`--volume=${volume.hostPath.path}:${vm.mountPath}`);
}
return [
...entryPoint,
...envs,
...ports,
...volumes,
];
}
exports.containerDockerOptions = containerDockerOptions;
/**
* Use a temporary under the home directory so Docker can use it as a
* volume mount.
*/
function dockerTmpDir() {
return path.join(os.homedir(), ".atomist", "tmp");
}
exports.dockerTmpDir = dockerTmpDir;
/**
* Kill running Docker containers, then delete network, and
* remove directory container directory. If the copy fails, it throws
* an error. Other errors are logged and ignored.
*
* @param opts See [[CleanupOptions]]
*/
function dockerCleanup(opts) {
return __awaiter(this, void 0, void 0, function* () {
if (opts.containers) {
yield dockerKill(opts.containers, opts.spawnOpts);
}
if (opts.network) {
const networkDeleteRes = yield sdm_1.spawnLog("docker", ["network", "rm", opts.network], opts.spawnOpts);
if (networkDeleteRes.code) {
const msg = `Failed to delete Docker network '${opts.network}'` +
((networkDeleteRes.error) ? `: ${networkDeleteRes.error.message}` : "");
opts.spawnOpts.log.write(msg);
}
}
});
}
/**
* Kill Docker containers. Any errors are caught and logged, but not
* re-thrown.
*
* @param containers Containers to kill, they will be killed by name
* @param opts Options to use when calling spawnLog
*/
function dockerKill(containers, opts) {
return __awaiter(this, void 0, void 0, function* () {
try {
const killPromises = [];
for (const container of containers) {
killPromises.push(sdm_1.spawnLog("docker", ["kill", container.name], opts));
}
yield Promise.all(killPromises);
}
catch (e) {
const message = `Failed to kill Docker containers: ${e.message}`;
opts.log.write(message);
}
});
}
//# sourceMappingURL=docker.js.map
;