@atomist/sdm
Version:
Atomist Software Delivery Machine SDK
669 lines • 27.4 kB
JavaScript
;
/*
* Copyright © 2020 Atomist, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.deletePods = exports.listPods = exports.deleteJob = exports.listJobs = exports.readNamespace = exports.sanitizeName = exports.isConfiguredInEnv = exports.createJobSpec = exports.k8sJobEnv = exports.k8sJobName = exports.killJobFilter = exports.zombiePodFilter = exports.KubernetesGoalScheduler = exports.k8sJobTtl = void 0;
// tslint:disable:max-file-line-count
const configuration_1 = require("@atomist/automation-client/lib/configuration");
const globals_1 = require("@atomist/automation-client/lib/globals");
const logger_1 = require("@atomist/automation-client/lib/util/logger");
const retry_1 = require("@atomist/automation-client/lib/util/retry");
const k8s = require("@kubernetes/client-node");
const cluster = require("cluster");
const fs = require("fs-extra");
const stringify = require("json-stringify-safe");
const _ = require("lodash");
const os = require("os");
const sdmGoal_1 = require("../../../api-helper/goal/sdmGoal");
const ServiceRegistration_1 = require("../../../api/registration/ServiceRegistration");
const util_1 = require("../../../core/goal/container/util");
const array_1 = require("../../../core/util/misc/array");
const config_1 = require("../kubernetes/config");
const error_1 = require("../support/error");
const namespace_1 = require("../support/namespace");
const service_1 = require("./service");
/**
* Return the configured Kubernetes job time-to-live,
* `sdm.k8s.job.ttl` or, if that is not available, twice the value
* returned by [[sdmGoalTimeout]].
*/
function k8sJobTtl(cfg) {
var _a, _b, _c;
return ((_c = (_b = (_a = cfg === null || cfg === void 0 ? void 0 : cfg.sdm) === null || _a === void 0 ? void 0 : _a.k8s) === null || _b === void 0 ? void 0 : _b.job) === null || _c === void 0 ? void 0 : _c.ttl) || configuration_1.configurationValue("sdm.k8s.job.ttl", 2 * sdmGoal_1.sdmGoalTimeout(cfg));
}
exports.k8sJobTtl = k8sJobTtl;
/**
* GoalScheduler implementation that schedules SDM goals as Kubernetes
* jobs.
*/
class KubernetesGoalScheduler {
constructor(options = { isolateAll: false }) {
this.options = options;
}
async supports(gi) {
return !process.env.ATOMIST_ISOLATED_GOAL &&
(
// Goal is marked as isolated and SDM is configured to use k8s jobs
(gi.goal.definition.isolated && isConfiguredInEnv("kubernetes")) ||
// Force all goals to run isolated via env var
isConfiguredInEnv("kubernetes-all") ||
// Force all goals to run isolated via explicit option
(this.options.isolateAll && isConfiguredInEnv("kubernetes")) ||
// Force all goals to run isolated via explicit configuration
_.get(gi.configuration, "sdm.k8s.isolateAll", false) === true);
}
async schedule(gi) {
const { goalEvent } = gi;
const podNs = await readNamespace();
const kc = config_1.loadKubeConfig();
const batch = kc.makeApiClient(k8s.BatchV1Api);
const defaultJobSpec = createJobSpec(_.cloneDeep(this.podSpec), podNs, gi);
const jobSpec = await this.beforeCreation(gi, defaultJobSpec);
const jobDesc = `k8s job '${jobSpec.metadata.namespace}:${jobSpec.metadata.name}' for goal '${goalEvent.uniqueName}'`;
gi.progressLog.write(`/--`);
gi.progressLog.write(`Scheduling k8s job '${jobSpec.metadata.namespace}:${jobSpec.metadata.name}' for goal '${goalEvent.name} (${goalEvent.uniqueName})'`);
gi.progressLog.write("\\--");
try {
// Check if this job was previously launched
await batch.readNamespacedJob(jobSpec.metadata.name, jobSpec.metadata.namespace);
logger_1.logger.debug(`${jobDesc} already exists. Deleting...`);
try {
await batch.deleteNamespacedJob(jobSpec.metadata.name, jobSpec.metadata.namespace, undefined, undefined, undefined, undefined, undefined, { propagationPolicy: "Foreground" });
logger_1.logger.debug(`${jobDesc} deleted`);
}
catch (e) {
logger_1.logger.error(`Failed to delete ${jobDesc}: ${stringify(e.body)}`);
return {
code: 1,
message: `Failed to delete ${jobDesc}: ${error_1.k8sErrMsg(e)}`,
};
}
}
catch (e) {
// This is ok to ignore as it just means the job doesn't exist
}
try {
logger_1.logger.debug(`Job spec for ${jobDesc}: ${JSON.stringify(jobSpec)}`);
// Previous deletion might not have completed; hence the retry here
const jobResult = (await retry_1.doWithRetry(() => batch.createNamespacedJob(jobSpec.metadata.namespace, jobSpec), `Scheduling ${jobDesc}`)).body;
await this.afterCreation(gi, jobResult);
logger_1.logger.info(`Scheduled ${jobDesc} with result: ${stringify(jobResult.status)}`);
logger_1.logger.log("silly", stringify(jobResult));
}
catch (e) {
logger_1.logger.error(`Failed to schedule ${jobDesc}: ${stringify(e.body)}`);
return {
code: 1,
message: `Failed to schedule ${jobDesc}: ${error_1.k8sErrMsg(e)}`,
};
}
await gi.progressLog.flush();
return {
code: 0,
message: `Scheduled ${jobDesc}`,
};
}
/**
* Extension point for sub classes to modify the provided jobSpec
* before the Job gets created in k8s. It should return the
* modified jobSpec.
* @param gi goal invocation
* @param jobSpec Default job spec
* @return desired job spec
*/
async beforeCreation(gi, jobSpec) {
return jobSpec;
}
/**
* Extension point for sub classes to modify k8s resources after the job has been created.
* The provided jobSpec contains the result of the job creation API call.
* @param gi
* @param jobSpec
*/
async afterCreation(gi, jobSpec) {
return;
}
/**
* If running in Kubernetes, read current pod spec. Populate
* `this.podSpec` with a merge of `this.options.podSpec` and the
* current pod spec. If neither is available, throw an error.
*/
async initialize(configuration) {
var _a, _b;
const podName = process.env.ATOMIST_POD_NAME || os.hostname();
const podNs = await readNamespace();
let parentPodSpec;
if (util_1.runningInK8s()) {
try {
const kc = config_1.loadKubeClusterConfig();
const core = kc.makeApiClient(k8s.CoreV1Api);
parentPodSpec = (await core.readNamespacedPod(podName, podNs)).body.spec;
}
catch (e) {
logger_1.logger.error(`Failed to obtain parent pod spec from k8s: ${error_1.k8sErrMsg(e)}`);
if (!this.options.podSpec) {
throw new Error(`Failed to obtain parent pod spec from k8s: ${error_1.k8sErrMsg(e)}`);
}
}
}
else if (!this.options.podSpec) {
throw new Error(`Not running in Kubernetes and no pod spec provided`);
}
this.podSpec = _.merge({}, this.options.podSpec, parentPodSpec);
if (configuration.cluster.enabled === false || cluster.isMaster) {
const cleanupInterval = ((_b = (_a = configuration.sdm.k8s) === null || _a === void 0 ? void 0 : _a.job) === null || _b === void 0 ? void 0 : _b.cleanupInterval) || 1000 * 60 * 1;
setInterval(async () => {
try {
await this.cleanUp(configuration);
logger_1.logger.debug("Finished cleaning scheduled goal Kubernetes jobs");
}
catch (e) {
logger_1.logger.warn(`Failed cleaning scheduled goal Kubernetes jobs: ${e.message}`);
}
}, cleanupInterval).unref();
}
}
/**
* Extension point to allow for custom clean up logic.
*/
async cleanUp(configuration) {
await cleanupJobs(configuration);
}
}
exports.KubernetesGoalScheduler = KubernetesGoalScheduler;
/**
* Delete Kubernetes jobs created by this SDM that have either
*
* - exceeded their time-to-live, as returned by [[k8sJobTtl]]
* - have pod whose first container has exited, indicating the goal has
* timed out or some other error has occured
*/
async function cleanupJobs(configuration) {
const selector = `atomist.com/creator=${sanitizeName(configuration.name)}`;
const jobs = await listJobs(selector);
if (jobs.length < 1) {
logger_1.logger.debug(`No scheduled goal Kubernetes jobs found for label selector '${selector}'`);
return;
}
const pods = await listPods(selector);
const zombiePods = pods.filter(zombiePodFilter);
const ttl = k8sJobTtl(configuration);
const killJobs = jobs.filter(killJobFilter(zombiePods, ttl));
if (killJobs.length < 1) {
logger_1.logger.debug(`No scheduled goal Kubernetes jobs were older than TTL '${ttl}' or zombies`);
}
else {
logger_1.logger.debug("Deleting scheduled goal Kubernetes jobs: " +
killJobs.map(j => `${j.metadata.namespace}/${j.metadata.name}`).join(","));
}
for (const delJob of killJobs) {
const job = { name: delJob.metadata.name, namespace: delJob.metadata.namespace };
await deleteJob(job);
await deletePods(job);
}
}
/**
* Return true for pods whose first container has terminated but at
* least one other container has not.
*/
function zombiePodFilter(pod) {
var _a, _b;
if (!((_a = pod.status) === null || _a === void 0 ? void 0 : _a.containerStatuses) || pod.status.containerStatuses.length < 1) {
return false;
}
const watcher = pod.status.containerStatuses[0];
const rest = pod.status.containerStatuses.slice(1);
return !!((_b = watcher.state) === null || _b === void 0 ? void 0 : _b.terminated) && rest.some(p => { var _a; return !((_a = p.state) === null || _a === void 0 ? void 0 : _a.terminated); });
}
exports.zombiePodFilter = zombiePodFilter;
/**
* Return true for jobs that have exceeded the TTL or whose child is
* in the provided list of pods. Return false otherwise.
*/
function killJobFilter(pods, ttl) {
return (job) => {
var _a;
const now = Date.now();
if (!((_a = job.status) === null || _a === void 0 ? void 0 : _a.startTime)) {
return false;
}
const jobAge = now - job.status.startTime.getTime();
if (jobAge > ttl) {
return true;
}
if (pods.some(p => { var _a, _b; return (_b = (_a = p.metadata) === null || _a === void 0 ? void 0 : _a.ownerReferences) === null || _b === void 0 ? void 0 : _b.some(o => o.kind === "Job" && o.name === job.metadata.name); })) {
return true;
}
return false;
};
}
exports.killJobFilter = killJobFilter;
/** Unique name for goal to use in k8s job spec. */
function k8sJobGoalName(goalEvent) {
return goalEvent.uniqueName.split("#")[0].toLowerCase();
}
/** Unique name for job to use in k8s job spec. */
function k8sJobName(podSpec, goalEvent) {
const goalName = k8sJobGoalName(goalEvent);
return `${podSpec.containers[0].name}-job-${goalEvent.goalSetId.slice(0, 7)}-${goalName}`
.slice(0, 63).replace(/[^a-z0-9]*$/, "");
}
exports.k8sJobName = k8sJobName;
/**
* Kubernetes container spec environment variables that specify an SDM
* running in single-goal mode.
*/
function k8sJobEnv(podSpec, goalEvent, context) {
const goalName = k8sJobGoalName(goalEvent);
const jobName = k8sJobName(podSpec, goalEvent);
const envVars = [
{
name: "ATOMIST_JOB_NAME",
value: jobName,
},
{
name: "ATOMIST_REGISTRATION_NAME",
value: `${globals_1.automationClientInstance().configuration.name}-job-${goalEvent.goalSetId.slice(0, 7)}-${goalName}`,
},
{
name: "ATOMIST_GOAL_TEAM",
value: context.workspaceId,
},
{
name: "ATOMIST_GOAL_TEAM_NAME",
value: context.context.workspaceName,
},
{
name: "ATOMIST_GOAL_ID",
value: goalEvent.id,
},
{
name: "ATOMIST_GOAL_SET_ID",
value: goalEvent.goalSetId,
},
{
name: "ATOMIST_GOAL_UNIQUE_NAME",
value: goalEvent.uniqueName,
},
{
name: "ATOMIST_CORRELATION_ID",
value: context.correlationId,
},
{
name: "ATOMIST_ISOLATED_GOAL",
value: "true",
},
];
return envVars;
}
exports.k8sJobEnv = k8sJobEnv;
/**
* Create a jobSpec by modifying the provided podSpec
* @param podSpec
* @param podNs
* @param gi
*/
function createJobSpec(podSpec, podNs, gi) {
const { goalEvent, context } = gi;
const jobSpec = createJobSpecWithAffinity(podSpec, gi);
jobSpec.metadata.name = k8sJobName(podSpec, goalEvent);
jobSpec.metadata.namespace = podNs;
jobSpec.spec.backoffLimit = 0;
jobSpec.spec.template.spec.restartPolicy = "Never";
jobSpec.spec.template.spec.containers[0].name = jobSpec.metadata.name;
jobSpec.spec.template.spec.containers[0].env.push(...k8sJobEnv(podSpec, goalEvent, context));
delete jobSpec.spec.template.spec.containers[0].livenessProbe;
delete jobSpec.spec.template.spec.containers[0].readinessProbe;
rewriteCachePath(jobSpec, context.workspaceId);
// Add additional specs from registered services to the job spec
if (_.get(gi.configuration, "sdm.k8s.service.enabled", true)) {
if (!!goalEvent.data) {
let data;
try {
data = sdmGoal_1.goalData(goalEvent);
}
catch (e) {
logger_1.logger.warn(`Failed to parse goal data on '${goalEvent.uniqueName}'`);
data = {};
}
if (!!data[ServiceRegistration_1.ServiceRegistrationGoalDataKey]) {
_.forEach(data[ServiceRegistration_1.ServiceRegistrationGoalDataKey], (v, k) => {
logger_1.logger.debug(`Service with name '${k}' and type '${v.type}' found for goal '${goalEvent.uniqueName}'`);
if (v.type === service_1.K8sServiceRegistrationType.K8sService) {
const spec = v.spec;
if (!!spec.container) {
const c = array_1.toArray(spec.container);
jobSpec.spec.template.spec.containers.push(...c);
}
if (!!spec.initContainer) {
const ic = array_1.toArray(spec.initContainer);
jobSpec.spec.template.spec.initContainers = [
...(jobSpec.spec.template.spec.initContainers || []),
...ic,
];
}
if (!!spec.volume) {
const vo = array_1.toArray(spec.volume);
jobSpec.spec.template.spec.volumes = [
...(jobSpec.spec.template.spec.volumes || []),
...vo,
];
}
if (!!spec.volumeMount) {
const vm = array_1.toArray(spec.volumeMount);
[...jobSpec.spec.template.spec.containers, ...jobSpec.spec.template.spec.initContainers].forEach(c => {
c.volumeMounts = [
...(c.volumeMounts || []),
...vm,
];
});
}
if (!!spec.imagePullSecret) {
const ips = array_1.toArray(spec.imagePullSecret);
jobSpec.spec.template.spec.imagePullSecrets = [
...(jobSpec.spec.template.spec.imagePullSecrets || []),
...ips,
];
}
}
});
}
}
}
return jobSpec;
}
exports.createJobSpec = createJobSpec;
/**
* Create a k8s Job spec with affinity to jobs for the same goal set
* @param goalSetId
*/
function createJobSpecWithAffinity(podSpec, gi) {
const { goalEvent, configuration, context } = gi;
_.defaultsDeep(podSpec.affinity, {
podAffinity: {
preferredDuringSchedulingIgnoredDuringExecution: [
{
weight: 100,
podAffinityTerm: {
labelSelector: {
matchExpressions: [
{
key: "atomist.com/goal-set-id",
operator: "In",
values: [
goalEvent.goalSetId,
],
},
],
},
topologyKey: "kubernetes.io/hostname",
},
},
],
},
});
// Clean up podSpec
// See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#pod-v1-core note on nodeName
delete podSpec.nodeName;
const labels = {
"atomist.com/goal-set-id": goalEvent.goalSetId,
"atomist.com/goal-id": goalEvent.id,
"atomist.com/creator": sanitizeName(configuration.name),
"atomist.com/workspace-id": context.workspaceId,
};
const detail = {
sdm: {
name: configuration.name,
version: configuration.version,
},
goal: {
goalId: goalEvent.id,
goalSetId: goalEvent.goalSetId,
uniqueName: goalEvent.uniqueName,
},
};
const annotations = {
"atomist.com/sdm": JSON.stringify(detail),
};
return {
kind: "Job",
apiVersion: "batch/v1",
metadata: {
labels,
annotations,
},
spec: {
template: {
metadata: {
labels,
},
spec: podSpec,
},
},
};
}
/**
* Rewrite the volume host path to include the workspace id to prevent cross workspace content ending
* up in the same directory.
* @param jobSpec
* @param workspaceId
*/
function rewriteCachePath(jobSpec, workspaceId) {
const cachePath = configuration_1.configurationValue("sdm.cache.path", "/opt/data");
const containers = _.get(jobSpec, "spec.template.spec.containers", []);
const cacheVolumeNames = [];
containers.forEach(c => {
cacheVolumeNames.push(...c.volumeMounts.filter(vm => vm.mountPath === cachePath).map(cm => cm.name));
});
_.uniq(cacheVolumeNames).forEach(vn => {
const volume = _.get(jobSpec, "spec.template.spec.volumes", []).find(v => v.name === vn);
if (!!volume && !!volume.hostPath && !!volume.hostPath.path) {
const path = volume.hostPath.path;
if (!path.endsWith(workspaceId) || !path.endsWith(`${workspaceId}/`)) {
if (path.endsWith("/")) {
volume.hostPath.path = `${path}${workspaceId}`;
}
else {
volume.hostPath.path = `${path}/${workspaceId}`;
}
}
}
});
}
/**
* Checks if one of the provided values is configured in ATOMIST_GOAL_SCHEDULER or -
* for backwards compatibility reasons - ATOMIST_GOAL_LAUNCHER.
* @param values
*/
function isConfiguredInEnv(...values) {
const value = process.env.ATOMIST_GOAL_SCHEDULER || process.env.ATOMIST_GOAL_LAUNCHER;
if (!!value) {
try {
const json = JSON.parse(value);
if (Array.isArray(json)) {
return json.some(v => values.includes(v));
}
else {
return values.includes(json);
}
}
catch (e) {
if (typeof value === "string") {
return values.includes(value);
}
}
}
return false;
}
exports.isConfiguredInEnv = isConfiguredInEnv;
/**
* Strip out any characters that aren't allowed a k8s label value
* @param name
*/
function sanitizeName(name) {
return name.replace(/@/g, "").replace(/\//g, ".");
}
exports.sanitizeName = sanitizeName;
/**
* Read the namespace from the following sources in order. It returns
* the first truthy value found.
*
* 1. ATOMIST_POD_NAMESPACE environment variable
* 2. ATOMIST_DEPLOYMENT_NAMESPACE environment variable
* 3. Contents of [[K8sNamespaceFile]]
* 4. "default"
*
* service account files. Falls back to the default namespace if no
* other configuration can be found.
*/
async function readNamespace() {
let podNs = process.env.ATOMIST_POD_NAMESPACE || process.env.ATOMIST_DEPLOYMENT_NAMESPACE;
if (!!podNs) {
return podNs;
}
if (await fs.pathExists(namespace_1.K8sNamespaceFile)) {
podNs = (await fs.readFile(namespace_1.K8sNamespaceFile)).toString().trim();
}
if (!!podNs) {
return podNs;
}
return "default";
}
exports.readNamespace = readNamespace;
/**
* List Kubernetes jobs matching the provided label selector. Jobs
* are listed across all namespaces if
* `configuration.sdm.k8s.job.singleNamespace` is not set to `false`.
* If that configuration value is not set or set to `true`, jobs are
* listed from the namespace provide by [[readNamespace]].
*
* @param labelSelector
* @return array of Kubernetes jobs matching the label selector
*/
async function listJobs(labelSelector) {
var _a, _b;
const kc = config_1.loadKubeConfig();
const batch = kc.makeApiClient(k8s.BatchV1Api);
const jobs = [];
let continu;
try {
if (configuration_1.configurationValue("sdm.k8s.job.singleNamespace", true)) {
const podNs = await readNamespace();
do {
const listJobResponse = await batch.listNamespacedJob(podNs, undefined, undefined, continu, undefined, labelSelector);
jobs.push(...listJobResponse.body.items);
continu = (_a = listJobResponse.body.metadata) === null || _a === void 0 ? void 0 : _a._continue;
} while (continu);
}
else {
do {
const listJobResponse = await batch.listJobForAllNamespaces(undefined, continu, undefined, labelSelector);
jobs.push(...listJobResponse.body.items);
continu = (_b = listJobResponse.body.metadata) === null || _b === void 0 ? void 0 : _b._continue;
} while (continu);
}
}
catch (e) {
e.message = `Failed to list scheduled goal Kubernetes jobs: ${error_1.k8sErrMsg(e)}`;
throw e;
}
return jobs;
}
exports.listJobs = listJobs;
/**
* Delete the provided job. Failures are ignored.
*/
async function deleteJob(job) {
try {
const kc = config_1.loadKubeConfig();
const batch = kc.makeApiClient(k8s.BatchV1Api);
await batch.readNamespacedJob(job.name, job.namespace);
try {
await batch.deleteNamespacedJob(job.name, job.namespace, undefined, undefined, undefined, undefined, undefined, { propagationPolicy: "Foreground" });
}
catch (e) {
logger_1.logger.warn(`Failed to delete k8s jobs '${job.namespace}:${job.name}': ${error_1.k8sErrMsg(e)}`);
}
}
catch (e) {
// This is ok to ignore because the job doesn't exist any more
}
}
exports.deleteJob = deleteJob;
/**
* List Kubernetes pods matching the provided label selector. Jobs
* are listed in a the current namespace or cluster-wide depending on
* evn configuration
*
* @param labelSelector
*/
async function listPods(labelSelector) {
var _a, _b;
const kc = config_1.loadKubeConfig();
const core = kc.makeApiClient(k8s.CoreV1Api);
const pods = [];
let continu;
try {
if (configuration_1.configurationValue("sdm.k8s.job.singleNamespace", true)) {
const podNs = await readNamespace();
do {
const listResponse = await core.listNamespacedPod(podNs, undefined, undefined, continu, undefined, labelSelector);
pods.push(...listResponse.body.items);
continu = (_a = listResponse.body.metadata) === null || _a === void 0 ? void 0 : _a._continue;
} while (continu);
}
else {
do {
const listResponse = await core.listPodForAllNamespaces(undefined, continu, undefined, labelSelector);
pods.push(...listResponse.body.items);
continu = (_b = listResponse.body.metadata) === null || _b === void 0 ? void 0 : _b._continue;
} while (continu);
}
}
catch (e) {
e.message = `Failed to list scheduled goal Kubernetes pods: ${error_1.k8sErrMsg(e)}`;
throw e;
}
return pods;
}
exports.listPods = listPods;
/**
* Delete the provided pods. Failures are ignored.
*/
async function deletePods(job) {
try {
const kc = config_1.loadKubeConfig();
const core = kc.makeApiClient(k8s.CoreV1Api);
const selector = `job-name=${job.name}`;
const pods = await core.listNamespacedPod(job.namespace, undefined, undefined, undefined, undefined, selector);
if (pods.body && pods.body.items) {
for (const pod of pods.body.items) {
try {
await core.deleteNamespacedPod(pod.metadata.name, pod.metadata.namespace, undefined, undefined, undefined, undefined, undefined, { propagationPolicy: "Foreground" });
}
catch (e) {
// Probably ok because pod might be gone already
logger_1.logger.debug(`Failed to delete k8s pod '${pod.metadata.namespace}:${pod.metadata.name}': ${error_1.k8sErrMsg(e)}`);
}
}
}
}
catch (e) {
logger_1.logger.warn(`Failed to list pods for k8s job '${job.namespace}:${job.name}': ${error_1.k8sErrMsg(e)}`);
}
}
exports.deletePods = deletePods;
//# sourceMappingURL=KubernetesGoalScheduler.js.map