@atomist/sdm
Version:
Atomist Software Delivery Machine SDK
313 lines • 13.6 kB
JavaScript
;
/*
* Copyright © 2020 Atomist, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.dockerTmpDir = exports.containerDockerOptions = exports.executeDockerJob = exports.dockerContainerScheduler = void 0;
const string_1 = require("@atomist/automation-client/lib/internal/util/string");
const logger_1 = require("@atomist/automation-client/lib/util/logger");
const fs = require("fs-extra");
const stringify = require("json-stringify-safe");
const _ = require("lodash");
const os = require("os");
const path = require("path");
const minimalClone_1 = require("../../../api-helper/goal/minimalClone");
const child_process_1 = require("../../../api-helper/misc/child_process");
const container_1 = require("./container");
const provider_1 = require("./provider");
const util_1 = require("./util");
const dockerContainerScheduler = (goal, registration) => {
goal.addFulfillment(Object.assign({ goalExecutor: executeDockerJob(goal, registration) }, registration));
};
exports.dockerContainerScheduler = dockerContainerScheduler;
/**
* Execute container goal using Docker CLI. Wait on completion of
* first container, then kill all the rest.
*/
function executeDockerJob(goal, registration) {
// tslint:disable-next-line:cyclomatic-complexity
return async (gi) => {
const { goalEvent, progressLog, configuration } = gi;
const goalName = goalEvent.uniqueName.split("#")[0].toLowerCase();
const namePrefix = "sdm-";
const nameSuffix = `-${goalEvent.goalSetId.slice(0, 7)}-${goalName}`;
const tmpDir = path.join(dockerTmpDir(), goalEvent.repo.owner, goalEvent.repo.name, goalEvent.goalSetId);
const containerDir = path.join(tmpDir, `${namePrefix}tmp-${string_1.guid()}${nameSuffix}`);
return configuration.sdm.projectLoader.doWithProject(Object.assign(Object.assign({}, gi), { readOnly: false, cloneDir: containerDir, cloneOptions: minimalClone_1.minimalClone(goalEvent.push, { detachHead: true }) }),
// tslint:disable-next-line:cyclomatic-complexity
async (project) => {
const spec = Object.assign(Object.assign({}, registration), (!!registration.callback ? await registration.callback(_.cloneDeep(registration), project, goal, goalEvent, gi) : {}));
if (!spec.containers || spec.containers.length < 1) {
throw new Error("No containers defined in GoalContainerSpec");
}
const inputDir = path.join(tmpDir, `${namePrefix}tmp-${string_1.guid()}${nameSuffix}`);
const outputDir = path.join(tmpDir, `${namePrefix}tmp-${string_1.guid()}${nameSuffix}`);
try {
await util_1.prepareInputAndOutput(inputDir, outputDir, gi);
}
catch (e) {
const message = `Failed to prepare input and output for goal ${goalName}: ${e.message}`;
progressLog.write(message);
return { code: 1, message };
}
const spawnOpts = {
log: progressLog,
cwd: containerDir,
};
const network = `${namePrefix}network-${string_1.guid()}${nameSuffix}`;
let networkCreateRes;
try {
networkCreateRes = await child_process_1.spawnLog("docker", ["network", "create", network], spawnOpts);
}
catch (e) {
networkCreateRes = {
cmdString: `'docker' 'network' 'create' '${network}'`,
code: 128,
error: e,
output: [undefined, "", e.message],
pid: -1,
signal: undefined,
status: 128,
stdout: "",
stderr: e.message,
};
}
if (networkCreateRes.code) {
let message = `Failed to create Docker network '${network}'` +
((networkCreateRes.error) ? `: ${networkCreateRes.error.message}` : "");
progressLog.write(message);
try {
await dockerCleanup({ spawnOpts });
}
catch (e) {
networkCreateRes.code++;
message += `; ${e.message}`;
}
return { code: networkCreateRes.code, message };
}
const atomistEnvs = (await util_1.containerEnvVars(gi.goalEvent, gi)).map(env => `--env=${env.name}=${env.value}`);
const spawnedContainers = [];
const failures = [];
for (const container of spec.containers) {
let secrets = {
env: [],
files: [],
};
try {
secrets = await provider_1.prepareSecrets(container, gi);
if (!!(secrets === null || secrets === void 0 ? void 0 : secrets.files)) {
const secretPath = path.join(inputDir, ".secrets");
await fs.ensureDir(secretPath);
for (const file of secrets.files) {
const secretFile = path.join(secretPath, string_1.guid());
file.hostPath = secretFile;
await fs.writeFile(secretFile, file.value);
}
}
}
catch (e) {
failures.push(e.message);
}
const containerName = `${namePrefix}${container.name}${nameSuffix}`;
let containerArgs;
try {
containerArgs = containerDockerOptions(container, registration);
}
catch (e) {
progressLog.write(e.message);
failures.push(e.message);
break;
}
const dockerArgs = [
"run",
"--tty",
"--rm",
`--name=${containerName}`,
`--volume=${containerDir}:${container_1.ContainerProjectHome}`,
`--volume=${inputDir}:${container_1.ContainerInput}`,
`--volume=${outputDir}:${container_1.ContainerOutput}`,
...secrets.files.map(f => `--volume=${f.hostPath}:${f.mountPath}`),
`--network=${network}`,
`--network-alias=${container.name}`,
...containerArgs,
...(registration.dockerOptions || []),
...(container.dockerOptions || []),
...atomistEnvs,
...secrets.env.map(e => `--env=${e.name}=${e.value}`),
container.image,
...(container.args || []),
];
if (spawnedContainers.length < 1) {
dockerArgs.splice(5, 0, `--workdir=${container_1.ContainerProjectHome}`);
}
const promise = child_process_1.spawnLog("docker", dockerArgs, spawnOpts);
spawnedContainers.push({ name: containerName, promise });
}
if (failures.length > 0) {
try {
await dockerCleanup({
network,
spawnOpts,
containers: spawnedContainers,
});
}
catch (e) {
failures.push(e.message);
}
return {
code: failures.length,
message: `Failed to spawn Docker containers: ${failures.join("; ")}`,
};
}
const main = spawnedContainers[0];
try {
const result = await main.promise;
if (result.code) {
const msg = `Docker container '${main.name}' failed` + ((result.error) ? `: ${result.error.message}` : "");
progressLog.write(msg);
failures.push(msg);
}
}
catch (e) {
const message = `Failed to execute main Docker container '${main.name}': ${e.message}`;
progressLog.write(message);
failures.push(message);
}
const outputFile = path.join(outputDir, "result.json");
let outputResult;
if ((await fs.pathExists(outputFile)) && failures.length === 0) {
try {
outputResult = await util_1.processResult(await fs.readJson(outputFile), gi);
}
catch (e) {
const message = `Failed to read output from Docker container '${main.name}': ${e.message}`;
progressLog.write(message);
failures.push(message);
}
}
const sidecars = spawnedContainers.slice(1);
try {
await dockerCleanup({
network,
spawnOpts,
containers: sidecars,
});
}
catch (e) {
failures.push(e.message);
}
if (failures.length === 0 && !!outputResult) {
return outputResult;
}
else {
return {
code: failures.length,
message: (failures.length > 0) ? failures.join("; ") : "Successfully completed container job",
};
}
});
};
}
exports.executeDockerJob = executeDockerJob;
/**
* Generate container specific Docker command-line options.
*
* @param container Goal container spec
* @param registration Container goal registration object
* @return Docker command-line entrypoint, env, p, and volume options
*/
function containerDockerOptions(container, registration) {
const entryPoint = [];
if (container.command && container.command.length > 0) {
// Docker CLI entrypoint must be a binary...
entryPoint.push(`--entrypoint=${container.command[0]}`);
// ...so prepend any other command elements to args array
if (container.args) {
container.args.splice(0, 0, ...container.command.slice(1));
}
else {
container.args = container.command.slice(1);
}
}
const envs = (container.env || []).map(env => `--env=${env.name}=${env.value}`);
const ports = (container.ports || []).map(port => `-p=${port.containerPort}`);
const volumes = [];
for (const vm of (container.volumeMounts || [])) {
const volume = (registration.volumes || []).find(v => v.name === vm.name);
if (!volume) {
const msg = `Container '${container.name}' references volume '${vm.name}' which not provided in goal registration ` +
`volumes: ${stringify(registration.volumes)}`;
logger_1.logger.error(msg);
throw new Error(msg);
}
volumes.push(`--volume=${volume.hostPath.path}:${vm.mountPath}`);
}
return [
...entryPoint,
...envs,
...ports,
...volumes,
];
}
exports.containerDockerOptions = containerDockerOptions;
/**
* Use a temporary under the home directory so Docker can use it as a
* volume mount.
*/
function dockerTmpDir() {
return path.join(os.homedir(), ".atomist", "tmp");
}
exports.dockerTmpDir = dockerTmpDir;
/**
* Kill running Docker containers, then delete network, and
* remove directory container directory. If the copy fails, it throws
* an error. Other errors are logged and ignored.
*
* @param opts See [[CleanupOptions]]
*/
async function dockerCleanup(opts) {
if (opts.containers) {
await dockerKill(opts.containers, opts.spawnOpts);
}
if (opts.network) {
const networkDeleteRes = await child_process_1.spawnLog("docker", ["network", "rm", opts.network], opts.spawnOpts);
if (networkDeleteRes.code) {
const msg = `Failed to delete Docker network '${opts.network}'` +
((networkDeleteRes.error) ? `: ${networkDeleteRes.error.message}` : "");
opts.spawnOpts.log.write(msg);
}
}
}
/**
* Kill Docker containers. Any errors are caught and logged, but not
* re-thrown.
*
* @param containers Containers to kill, they will be killed by name
* @param opts Options to use when calling spawnLog
*/
async function dockerKill(containers, opts) {
try {
const killPromises = [];
for (const container of containers) {
killPromises.push(child_process_1.spawnLog("docker", ["kill", container.name], opts));
}
await Promise.all(killPromises);
}
catch (e) {
const message = `Failed to kill Docker containers: ${e.message}`;
opts.log.write(message);
}
}
//# sourceMappingURL=docker.js.map