@atomist/sdm-core
Version:
Atomist Software Delivery Machine - Implementation
571 lines • 25 kB
JavaScript
/*
* Copyright © 2019 Atomist, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
Object.defineProperty(exports, "__esModule", { value: true });
const automation_client_1 = require("@atomist/automation-client");
const poll_1 = require("@atomist/automation-client/lib/internal/util/poll");
const sdm_1 = require("@atomist/sdm");
const k8s = require("@kubernetes/client-node");
const fs = require("fs-extra");
const stringify = require("json-stringify-safe");
const _ = require("lodash");
const os = require("os");
const path = require("path");
const stream_1 = require("stream");
const config_1 = require("../../pack/k8s/config");
const KubernetesFulfillmentGoalScheduler_1 = require("../../pack/k8s/KubernetesFulfillmentGoalScheduler");
const KubernetesGoalScheduler_1 = require("../../pack/k8s/KubernetesGoalScheduler");
const service_1 = require("../../pack/k8s/service");
const array_1 = require("../../util/misc/array");
const goalCaching_1 = require("../cache/goalCaching");
const container_1 = require("./container");
const provider_1 = require("./provider");
const util_1 = require("./util");
exports.k8sContainerScheduler = (goal, registration) => {
goal.addFulfillment(Object.assign({ goalExecutor: executeK8sJob() }, registration));
goal.addFulfillmentCallback({
goal,
callback: k8sFulfillmentCallback(goal, registration),
});
};
/**
* Add Kubernetes job scheduling information to SDM goal event data
* for use by the [[KubernetesGoalScheduler]].
*/
function k8sFulfillmentCallback(goal, registration) {
// tslint:disable-next-line:cyclomatic-complexity
return (goalEvent, repoContext) => __awaiter(this, void 0, void 0, function* () {
var _a;
let spec = _.cloneDeep(registration);
if (registration.callback) {
spec = yield repoContext.configuration.sdm.projectLoader.doWithProject(Object.assign(Object.assign({}, repoContext), { readOnly: true, cloneOptions: sdm_1.minimalClone(goalEvent.push, { detachHead: true }) }), (p) => __awaiter(this, void 0, void 0, function* () {
return Object.assign(Object.assign({}, spec), (yield registration.callback(_.cloneDeep(registration), p, goal, goalEvent, repoContext)) || {});
}));
}
if (!spec.containers || spec.containers.length < 1) {
throw new Error("No containers defined in K8sGoalContainerSpec");
}
// Preserve the container registration in the goal data before it gets munged with internals
let data = parseGoalEventData(goalEvent);
let newData = {};
delete spec.callback;
_.set(newData, container_1.ContainerRegistrationGoalDataKey, spec);
goalEvent.data = JSON.stringify(_.merge(data, newData));
if (spec.containers[0].workingDir === "") {
delete spec.containers[0].workingDir;
}
else if (!spec.containers[0].workingDir) {
spec.containers[0].workingDir = container_1.ContainerProjectHome;
}
const goalSchedulers = array_1.toArray(repoContext.configuration.sdm.goalScheduler) || [];
const k8sScheduler = goalSchedulers.find(gs => gs instanceof KubernetesGoalScheduler_1.KubernetesGoalScheduler);
if (!k8sScheduler) {
throw new Error("Failed to find KubernetesGoalScheduler in goal schedulers");
}
if (!k8sScheduler.podSpec) {
throw new Error("KubernetesGoalScheduler has no podSpec defined");
}
const containerEnvs = yield util_1.containerEnvVars(goalEvent, repoContext);
const projectVolume = `project-${automation_client_1.guid().split("-")[0]}`;
const inputVolume = `input-${automation_client_1.guid().split("-")[0]}`;
const outputVolume = `output-${automation_client_1.guid().split("-")[0]}`;
const ioVolumes = [
{
name: projectVolume,
emptyDir: {},
},
{
name: inputVolume,
emptyDir: {},
},
{
name: outputVolume,
emptyDir: {},
},
];
const ioVolumeMounts = [
{
mountPath: container_1.ContainerProjectHome,
name: projectVolume,
},
{
mountPath: container_1.ContainerInput,
name: inputVolume,
},
{
mountPath: container_1.ContainerOutput,
name: outputVolume,
},
];
const copyContainer = _.cloneDeep(k8sScheduler.podSpec.spec.containers[0]);
delete copyContainer.lifecycle;
delete copyContainer.livenessProbe;
delete copyContainer.readinessProbe;
copyContainer.name = `container-goal-init-${automation_client_1.guid().split("-")[0]}`;
copyContainer.env = [
...(copyContainer.env || []),
...KubernetesGoalScheduler_1.k8sJobEnv(k8sScheduler.podSpec, goalEvent, repoContext.context),
...containerEnvs,
{
name: "ATOMIST_ISOLATED_GOAL_INIT",
value: "true",
},
{
name: "ATOMIST_CONFIG",
value: JSON.stringify({
cluster: {
enabled: false,
},
ws: {
enabled: false,
},
}),
},
];
spec.initContainers = spec.initContainers || [];
const parameters = JSON.parse(goalEvent.parameters || "{}");
const secrets = yield provider_1.prepareSecrets(_.merge({}, registration.containers[0], (parameters["@atomist/sdm/secrets"] || {})), repoContext);
delete spec.containers[0].secrets;
[...spec.containers, ...spec.initContainers].forEach(c => {
c.env = [
...(secrets.env || []),
...containerEnvs,
...(c.env || []),
];
});
if (!!((_a = secrets) === null || _a === void 0 ? void 0 : _a.files)) {
for (const file of secrets.files) {
const fileName = path.basename(file.mountPath);
const dirname = path.dirname(file.mountPath);
let secretName = `secret-${automation_client_1.guid().split("-")[0]}`;
const vm = (copyContainer.volumeMounts || [])
.find(m => m.mountPath === dirname);
if (!!vm) {
secretName = vm.name;
}
else {
copyContainer.volumeMounts = [
...(copyContainer.volumeMounts || []),
{
mountPath: dirname,
name: secretName,
},
];
spec.volumes = [
...(spec.volumes || []),
{
name: secretName,
emptyDir: {},
},
];
}
[...spec.containers, ...spec.initContainers].forEach((c) => {
c.volumeMounts = [
...(c.volumeMounts || []),
{
mountPath: file.mountPath,
name: secretName,
subPath: fileName,
},
];
});
}
}
spec.initContainers = [
copyContainer,
...spec.initContainers,
];
const serviceSpec = {
type: service_1.K8sServiceRegistrationType.K8sService,
spec: {
container: spec.containers,
initContainer: spec.initContainers,
volume: [
...ioVolumes,
...(spec.volumes || []),
],
volumeMount: ioVolumeMounts,
},
};
// Store k8s service registration in goal data
data = JSON.parse(goalEvent.data || "{}");
newData = {};
_.set(newData, `${sdm_1.ServiceRegistrationGoalDataKey}.${registration.name}`, serviceSpec);
goalEvent.data = JSON.stringify(_.merge(data, newData));
return goalEvent;
});
}
exports.k8sFulfillmentCallback = k8sFulfillmentCallback;
/**
* Get container registration from goal event data, use
* [[k8sFulfillmentcallback]] to get a goal event schedulable by a
* [[KubernetesGoalScheduler]], then schedule the goal using that
* scheduler.
*/
exports.scheduleK8sJob = (gi) => __awaiter(void 0, void 0, void 0, function* () {
const { goalEvent } = gi;
const { uniqueName } = goalEvent;
const data = parseGoalEventData(goalEvent);
const containerReg = data["@atomist/sdm/container"];
if (!containerReg) {
throw new Error(`Goal ${uniqueName} event data has no container spec: ${goalEvent.data}`);
}
const goalSchedulers = array_1.toArray(gi.configuration.sdm.goalScheduler) || [];
const k8sScheduler = goalSchedulers.find(gs => gs instanceof KubernetesGoalScheduler_1.KubernetesGoalScheduler);
if (!k8sScheduler) {
throw new Error(`Failed to find KubernetesGoalScheduler in goal schedulers: ${stringify(goalSchedulers)}`);
}
// the k8sFulfillmentCallback may already have been called, so wipe it out
delete data[sdm_1.ServiceRegistrationGoalDataKey];
goalEvent.data = JSON.stringify(data);
try {
const schedulableGoalEvent = yield k8sFulfillmentCallback(gi.goal, containerReg)(goalEvent, gi);
const scheduleResult = yield k8sScheduler.schedule(Object.assign(Object.assign({}, gi), { goalEvent: schedulableGoalEvent }));
if (scheduleResult.code) {
return Object.assign(Object.assign({}, scheduleResult), { message: `Failed to schedule container goal ${uniqueName}: ${scheduleResult.message}` });
}
schedulableGoalEvent.state = sdm_1.SdmGoalState.in_process;
return schedulableGoalEvent;
}
catch (e) {
const message = `Failed to schedule container goal ${uniqueName} as Kubernetes job: ${e.message}`;
gi.progressLog.write(message);
return { code: 1, message };
}
});
/**
* Wait for first container to exit and stream its logs to the
* progress log.
*/
function executeK8sJob() {
// tslint:disable-next-line:cyclomatic-complexity
return (gi) => __awaiter(this, void 0, void 0, function* () {
const { goalEvent, progressLog, configuration, id, credentials } = gi;
const projectDir = process.env.ATOMIST_PROJECT_DIR || container_1.ContainerProjectHome;
const inputDir = process.env.ATOMIST_INPUT_DIR || container_1.ContainerInput;
const outputDir = process.env.ATOMIST_OUTPUT_DIR || container_1.ContainerOutput;
const data = parseGoalEventData(goalEvent);
if (!data[container_1.ContainerRegistrationGoalDataKey]) {
throw new Error("Failed to read k8s ContainerRegistration from goal data");
}
if (!data[container_1.ContainerRegistrationGoalDataKey]) {
throw new Error(`Goal ${gi.goal.uniqueName} has no Kubernetes container registration: ${gi.goalEvent.data}`);
}
const registration = data[container_1.ContainerRegistrationGoalDataKey];
if (process.env.ATOMIST_ISOLATED_GOAL_INIT === "true") {
return configuration.sdm.projectLoader.doWithProject(Object.assign(Object.assign({}, gi), { readOnly: false, cloneDir: projectDir, cloneOptions: sdm_1.minimalClone(goalEvent.push, { detachHead: true }) }), (project) => __awaiter(this, void 0, void 0, function* () {
var _a;
try {
yield util_1.prepareInputAndOutput(inputDir, outputDir, gi);
}
catch (e) {
const message = `Failed to prepare input and output for goal ${goalEvent.name}: ${e.message}`;
progressLog.write(message);
return { code: 1, message };
}
const secrets = yield provider_1.prepareSecrets(_.merge({}, registration.containers[0], ((gi.parameters || {})["@atomist/sdm/secrets"] || {})), gi);
if (!!((_a = secrets) === null || _a === void 0 ? void 0 : _a.files)) {
for (const file of secrets.files) {
yield fs.writeFile(file.mountPath, file.value);
}
}
goalEvent.state = sdm_1.SdmGoalState.in_process;
return goalEvent;
}));
}
let containerName = _.get(registration, "containers[0].name");
if (!containerName) {
const msg = `Failed to get main container name from goal registration: ${stringify(registration)}`;
progressLog.write(msg);
let svcSpec;
try {
svcSpec = _.get(data, `${sdm_1.ServiceRegistrationGoalDataKey}.${registration.name}.spec`);
}
catch (e) {
const message = `Failed to parse Kubernetes spec from goal data '${goalEvent.data}': ${e.message}`;
progressLog.write(message);
return { code: 1, message };
}
containerName = _.get(svcSpec, "container[1].name");
if (!containerName) {
const message = `Failed to get main container name from either goal registration or data: '${goalEvent.data}'`;
progressLog.write(message);
return { code: 1, message };
}
}
const ns = yield KubernetesGoalScheduler_1.readNamespace();
const podName = os.hostname();
let kc;
try {
kc = config_1.loadKubeConfig();
}
catch (e) {
const message = `Failed to load Kubernetes configuration: ${e.message}`;
progressLog.write(message);
return { code: 1, message };
}
const container = {
config: kc,
name: containerName,
pod: podName,
ns,
log: progressLog,
};
try {
yield containerStarted(container);
}
catch (e) {
progressLog.write(e.message);
return { code: 1, message: e.message };
}
const log = followK8sLog(container);
const status = { code: 0, message: `Container '${containerName}' completed successfully` };
try {
const podStatus = yield containerWatch(container);
progressLog.write(`Container '${containerName}' exited: ${stringify(podStatus)}`);
}
catch (e) {
const message = `Container '${containerName}' failed: ${e.message}`;
progressLog.write(message);
status.code++;
status.message = message;
}
finally {
// Give the logs some time to be delivered
yield poll_1.sleep(1000);
log.abort();
}
const outputFile = path.join(outputDir, "result.json");
let outputResult;
if ((yield fs.pathExists(outputFile)) && status.code === 0) {
try {
outputResult = yield util_1.processResult(yield fs.readJson(outputFile), gi);
}
catch (e) {
const message = `Failed to read output from container: ${e.message}`;
progressLog.write(message);
status.code++;
status.message += ` but f${message.slice(1)}`;
}
}
const cacheEntriesToPut = [
...(registration.output || []),
...((gi.parameters || {})[goalCaching_1.CacheOutputGoalDataKey] || []),
];
if (cacheEntriesToPut.length > 0) {
try {
const project = automation_client_1.GitCommandGitProject.fromBaseDir(id, projectDir, credentials, () => __awaiter(this, void 0, void 0, function* () {
}));
const cp = goalCaching_1.cachePut({ entries: cacheEntriesToPut });
yield cp.listener(project, gi, sdm_1.GoalProjectListenerEvent.after);
}
catch (e) {
const message = `Failed to put cache output from container: ${e.message}`;
progressLog.write(message);
status.code++;
status.message += ` but f${message.slice(1)}`;
}
}
return outputResult || status;
});
}
exports.executeK8sJob = executeK8sJob;
/**
* Read and parse container goal registration from goal event data.
*/
function parseGoalEventData(goalEvent) {
const goalName = goalEvent.uniqueName;
if (!goalEvent || !goalEvent.data) {
return {};
}
let data;
try {
data = JSON.parse(goalEvent.data);
}
catch (e) {
e.message = `Failed to parse goal event data for ${goalName} as JSON '${goalEvent.data}': ${e.message}`;
throw e;
}
return data;
}
/**
* If running as isolated goal, use [[executeK8sJob]] to execute the
* goal. Otherwise, schedule the goal execution as a Kubernetes job
* using [[scheduleK8sjob]].
*/
const containerExecutor = gi => (process.env.ATOMIST_ISOLATED_GOAL) ? executeK8sJob()(gi) : exports.scheduleK8sJob(gi);
/**
* Restore cache input entries before fulfilling goal.
*/
const containerFulfillerCacheRestore = {
name: "ContainerFulfillerCacheRestore",
events: [sdm_1.GoalProjectListenerEvent.before],
listener: (project, gi, event) => __awaiter(void 0, void 0, void 0, function* () {
const data = parseGoalEventData(gi.goalEvent);
if (!data[container_1.ContainerRegistrationGoalDataKey]) {
throw new Error(`Goal ${gi.goal.uniqueName} has no Kubernetes container registration: ${gi.goalEvent.data}`);
}
const registration = data[container_1.ContainerRegistrationGoalDataKey];
if (registration.input && registration.input.length > 0) {
try {
const cp = goalCaching_1.cacheRestore({ entries: registration.input });
return cp.listener(project, gi, sdm_1.GoalProjectListenerEvent.before);
}
catch (e) {
const message = `Failed to restore cache input to container for goal ${gi.goal.uniqueName}: ${e.message}`;
gi.progressLog.write(message);
return { code: 1, message };
}
}
else {
return { code: 0, message: "No container input cache entries to restore" };
}
}),
};
/**
* Goal that fulfills requested container goals by scheduling them as
* Kubernetes jobs.
*/
exports.K8sContainerFulfiller = new sdm_1.GoalWithFulfillment({
displayName: "Kubernetes Container Goal Fulfiller",
uniqueName: KubernetesFulfillmentGoalScheduler_1.DefaultKubernetesFulfillmentOptions.name,
})
.with({
goalExecutor: containerExecutor,
name: `${KubernetesFulfillmentGoalScheduler_1.DefaultKubernetesFulfillmentOptions.name}-executor`,
})
.withProjectListener(containerFulfillerCacheRestore);
/**
* Wait for container in pod to start, return when it does.
*
* @param container Information about container to check
* @param attempts Maximum number of attempts, waiting 500 ms between
*/
function containerStarted(container, attempts = 240) {
return __awaiter(this, void 0, void 0, function* () {
let core;
try {
core = container.config.makeApiClient(k8s.CoreV1Api);
}
catch (e) {
e.message = `Failed to create Kubernetes core API client: ${e.message}`;
container.log.write(e.message);
throw e;
}
const sleepTime = 500; // ms
for (let i = 0; i < attempts; i++) {
yield poll_1.sleep(500);
const pod = (yield core.readNamespacedPod(container.pod, container.ns)).body;
const containerStatus = pod.status.containerStatuses.find(c => c.name === container.name);
if (containerStatus && (!!_.get(containerStatus, "state.running.startedAt") || !!_.get(containerStatus, "state.terminated"))) {
const message = `Container '${container.name}' started`;
container.log.write(message);
return;
}
}
const errMsg = `Container '${container.name}' failed to start within ${attempts * sleepTime} ms`;
container.log.write(errMsg);
throw new Error(errMsg);
});
}
/**
* Watch pod until container `container.name` exits. Resolve promise
* with status if container `container.name` exits with status 0.
* Reject promise otherwise, including pod status in the `podStatus`
* property of the error.
*
* @param container Information about container to watch
* @return Status of pod after container terminates
*/
function containerWatch(container) {
return new Promise((resolve, reject) => {
let watch;
try {
watch = new k8s.Watch(container.config);
}
catch (e) {
e.message = `Failed to create Kubernetes watch client: ${e.message}`;
container.log.write(e.message);
reject(e);
}
const watchPath = `/api/v1/watch/namespaces/${container.ns}/pods/${container.pod}`;
let watcher;
watcher = watch.watch(watchPath, {}, (phase, obj) => {
const pod = obj;
if (pod && pod.status && pod.status.containerStatuses) {
const containerStatus = pod.status.containerStatuses.find(c => c.name === container.name);
if (containerStatus && containerStatus.state && containerStatus.state.terminated) {
const exitCode = _.get(containerStatus, "state.terminated.exitCode");
if (exitCode === 0) {
const msg = `Container '${container.name}' exited with status 0`;
container.log.write(msg);
resolve(pod.status);
}
else {
const msg = `Container '${container.name}' exited with status ${exitCode}`;
container.log.write(msg);
const err = new Error(msg);
err.podStatus = pod.status;
reject(err);
}
if (watcher) {
watcher.abort();
}
return;
}
}
container.log.write(`Container '${container.name}' still running`);
}, err => {
err.message = `Container watcher failed: ${err.message}`;
container.log.write(err.message);
reject(err);
});
});
}
/**
* Set up log follower for container.
*/
function followK8sLog(container) {
const k8sLog = new k8s.Log(container.config);
const logStream = new stream_1.Writable({
write: (chunk, encoding, callback) => {
container.log.write(chunk.toString());
callback();
},
});
const doneCallback = e => {
if (e) {
if (e.message) {
container.log.write(e.message);
}
else {
container.log.write(stringify(e));
}
}
};
const logOptions = { follow: true };
return k8sLog.log(container.ns, container.pod, container.name, logStream, doneCallback, logOptions);
}
//# sourceMappingURL=k8s.js.map
;