@cto.ai/ops
Version:
💻 CTO.ai - The CLI built for Teams 🚀
493 lines (490 loc) • 22.9 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.OpService = void 0;
const tslib_1 = require("tslib");
const fs = tslib_1.__importStar(require("fs-extra"));
const os = tslib_1.__importStar(require("os"));
const path = tslib_1.__importStar(require("path"));
const util = tslib_1.__importStar(require("util"));
const debug_1 = tslib_1.__importDefault(require("debug"));
const CustomErrors_1 = require("./../errors/CustomErrors");
const env_1 = require("./../constants/env");
const CustomErrors_2 = require("../errors/CustomErrors");
const opConfig_1 = require("../constants/opConfig");
const utils_1 = require("./../utils");
const templateUtils_1 = require("./../utils/templateUtils");
const validate_1 = require("./../utils/validate");
const cliSdk = require("@cto.ai/cli-sdk");
const { sdk, ux } = cliSdk;
const Analytics_1 = require("./../services/Analytics");
const Container_1 = require("./../services/Container");
const ErrorTemplate_1 = require("./../errors/ErrorTemplate");
const Feathers_1 = require("./../services/Feathers");
const Image_1 = require("./../services/Image");
const opConfig_2 = require("../constants/opConfig");
const OpsYml_1 = require("../types/OpsYml");
const RegistryAuth_1 = require("./../services/RegistryAuth");
const utils_2 = require("./../utils");
const arrayUtils_1 = require("../utils/arrayUtils");
const exec = util.promisify(require('child_process').exec);
const debug = (0, debug_1.default)('ops:OpService');
class OpService {
constructor(registryAuthService = new RegistryAuth_1.RegistryAuthService(), imageService = new Image_1.ImageService(), containerService = new Container_1.ContainerService(), analytics = new Analytics_1.AnalyticsService(env_1.OPS_SEGMENT_KEY), api = new Feathers_1.FeathersClient()) {
this.registryAuthService = registryAuthService;
this.imageService = imageService;
this.containerService = containerService;
this.analytics = analytics;
this.api = api;
this.opBuild = async (op, opPath, config, options) => {
const { team: { name: teamName }, user, } = config;
const opImageTag = (0, utils_1.getOpImageTag)(teamName, op.name, op.version, op.isPublic);
await this.imageService.build((0, utils_1.getOpUrl)(env_1.OPS_REGISTRY_HOST, opImageTag), opPath, op, options);
// TODO: Analytics services should *always* get non-workflow related data
// from the config
// TODO: What about sending an event for all the built ops?
this.analytics.track('Ops CLI Build', {
username: user.username,
team: teamName,
name: op.name,
version: op.version,
description: op.description,
namespace: `@${teamName}/${op.name}`,
namespace_version: `@${teamName}/${op.name}:${op.version}`,
image: `${env_1.OPS_REGISTRY_HOST}/${op.name}:${op.version}`,
}, config);
};
// XXX: Althought it will result in a little duplication it would be better to move
// the following logic to their respective cli commands. "Inversion of control"
this.opsBuildLoop = async (ops, opPath, config, options) => {
const filteredOps = ops
.filter(op => {
return 'run' in op;
})
.map(op => {
if (!(0, validate_1.isValidOpName)(op.name)) {
throw new CustomErrors_1.InvalidInputCharacter(`Workflow Name: ${op.name}`);
}
if (!(0, validate_1.isValidOpVersion)(op.version)) {
throw new CustomErrors_1.InvalidOpVersionFormat();
}
return op;
});
for (const op of filteredOps) {
// XXX: Presentation layer shouldn't be in the business layer
console.log(`🛠${ux.colors.white('Building:')} ${ux.colors.callOutCyan(op.name + ':' + op.version)}\n`);
await this.opBuild(op, opPath, config, options);
}
};
this.setDefaultRegistryToken = async (config) => {
try {
const { data } = await this.api.find('private/registry/token/pipeline', {
headers: {
Authorization: config.tokens.accessToken,
},
});
try {
await this.api.create(`/private/teams/${config.team.name}/secrets`, {
secrets: {
CTOAI_USER_REGISTRY_CREDENTIALS: data,
//'DOCKER_REGISTRY_URL': '',
// 'DOCKER_REGISTRY_USERNAME': '',
// 'DOCKER_REGISTRY_TOKEN': ''
},
}, {
headers: { Authorization: config.tokens.accessToken },
});
}
catch (err) {
debug('%O', err);
switch (err.error[0].code) {
case 400:
throw new CustomErrors_2.InvalidSecretVault(err);
case 401:
throw new CustomErrors_2.UserUnauthorized(err);
case 403:
if (err.error[0].message.includes('invalid secret token')) {
throw new CustomErrors_2.InvalidSecretToken(err);
}
else {
throw new CustomErrors_2.NoSecretsProviderFound(err);
}
default:
throw new CustomErrors_2.SetSecretsProvider(err);
}
}
}
catch (e) {
debug('%O', e);
}
};
this.getOpsFromFileSystem = async (opPath) => {
const manifest = await fs
.readFile(path.join(opPath, opConfig_2.OP_FILE), 'utf8')
.catch(err => {
debug('%O', err);
throw new CustomErrors_1.FileNotFoundError(err, opPath, opConfig_2.OP_FILE);
});
if (manifest.length == 0) {
throw new CustomErrors_1.NoLocalOpsOrPipelinesFound();
}
const { ops, pipelines, services } = (0, utils_2.parseYaml)(manifest);
if (ops.length == 0 && pipelines.length == 0 && services.length == 0) {
throw new CustomErrors_1.NoLocalOpsOrPipelinesFound();
}
return { ops, pipelines, services };
};
this.convertOpsToCommands = async (opsToBuild, config, opPath) => {
let convertedOps = [];
for (let workflow of opsToBuild) {
switch (workflow.type) {
case opConfig_2.PIPELINE_TYPE:
const pipelineOps = await this.convertPipelinesToOps([workflow], config, opPath);
convertedOps.push(...pipelineOps);
break;
case opConfig_2.SERVICE_TYPE:
const serviceOps = (0, OpsYml_1.convertServicesToOps)([
workflow,
]);
convertedOps.push(...serviceOps);
break;
default:
convertedOps.push(workflow);
}
}
return (0, arrayUtils_1.flatten)(convertedOps);
};
this.convertPipelinesToOps = async (pipelines, config, opPath) => {
const ops = [];
for (const pipeline of pipelines) {
for (let jobIndex = 0; jobIndex < pipeline.jobs.length; jobIndex++) {
const job = pipeline.jobs[jobIndex];
const { name, description, linked } = job;
if (linked) {
continue;
}
const templateDir = path.resolve(__dirname, '../templates/pipeline/_bashPipelineTemplate');
const destDir = opPath + '/.ops/jobs/' + name;
await (0, templateUtils_1.copyTemplate)(templateDir, destDir);
await (0, templateUtils_1.customizeManifest)('command', { name, description, version: pipeline.version }, destDir);
await (0, templateUtils_1.renderFile)(path.resolve(__dirname, '../templates/pipeline/_basePipeline/Dockerfile'), path.join(destDir, 'Dockerfile'), { registry: env_1.OPS_REGISTRY_HOST, image: env_1.OPS_PIPELINE_IMAGE });
const jobContents = await this.createJobContents(config, pipeline, job, jobIndex);
this.writeJobContents(jobContents, destDir + '/main.sh');
this.addJobPackages(job.packages, destDir + '/dependencies.sh');
let jobOp = await this.getOpsFromFileSystem(destDir);
//set workflow type as 'job'
jobOp.ops[0].type = opConfig_1.JOB_TYPE;
jobOp.ops[0].isPublic = pipeline.isPublic;
ops.push(jobOp.ops[0]);
}
}
return ops;
};
this.createJobContents = async (config, pipeline, job, jobIndex) => {
const { team: { id: teamId }, } = config;
const { name: pipelineName } = pipeline;
const { name: jobName } = job;
return `#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
${job.steps.join('\n')}
`;
};
this.addJobPackages = (packages, targetFile) => {
if (packages !== undefined && packages.length > 0) {
const installStr = `\napt-get update && apt-get -y install ${packages.join(' ')} && rm -r /var/cache/apt/archives/`;
fs.appendFileSync(targetFile, installStr);
}
};
this.writeJobContents = (contents, targetFile) => {
fs.removeSync(targetFile);
fs.writeFileSync(targetFile, contents);
};
this.updateOpFields = (inputs) => {
let { op, parsedArgs: { opParams }, } = inputs;
if (op.sdk == opConfig_1.SDK2) {
op.run = `${opConfig_1.SDK2_DAEMON_ENTRYPOINT} ${op.run}`;
}
if (op.name.includes(':')) {
op.version = op.name.split(':')[1];
op.name = op.name.split(':')[0];
}
op.run = [op.run, ...opParams].join(' ');
return Object.assign(Object.assign({}, inputs), { op });
};
this.getImage = async (inputs) => {
const { op, config, version, parsedArgs: { args: { nameOrPath }, flags: { build }, }, flags, } = inputs;
try {
op.image = this.setOpImageUrl(op, config);
let localImage;
try {
localImage = await this.imageService.checkLocalImage(op.image);
}
catch (e) { }
if (!localImage || build) {
const opPath = op.type != 'job'
? path.resolve(process.cwd(), nameOrPath)
: `/tmp/${op.name}`;
op.isPublished
? await this.pullImageFromRegistry(op, config, version)
: await this.imageService.build(`${op.image}`, opPath, op, flags);
}
return inputs;
}
catch (err) {
if (err instanceof ErrorTemplate_1.ErrorTemplate) {
throw err;
}
debug('%O', err);
throw new Error('Unable to find image for this workflow');
}
};
this.pullImageFromRegistry = async (op, config, version) => {
const { authconfig } = await this.registryAuthService.create(config.tokens.accessToken, op.teamName, op.name, version, true, false);
// pull image
await this.imageService.pull(op, authconfig);
};
this.setOpImageUrl = (op, config) => {
const opIdentifier = op.isPublished ? op.id : op.name;
const teamName = op.teamName ? op.teamName : config.team.name;
const opImageTag = (0, utils_1.getOpImageTag)(teamName, opIdentifier, op.version, op.isPublic);
return (0, utils_1.getOpUrl)(env_1.OPS_REGISTRY_HOST, opImageTag);
};
this.setEnvs = (inputs) => {
const { config, op, parsedArgs } = inputs;
const defaultEnv = {
OPS_HOME: path.resolve(sdk.homeDir() + '/.config/@cto.ai/ops'),
CONFIG_DIR: `/${config.team.name}/${op.name}`,
STATE_DIR: `/${config.team.name}/${op.name}/${op.runId}`,
LOGGER_PLUGINS_STDOUT_ENABLED: 'true',
SDK_RUN_ID: op.runId,
OPS_ACCESS_TOKEN: config.tokens.accessToken,
OPS_API_PATH: env_1.OPS_API_PATH,
OPS_API_HOST: env_1.OPS_API_HOST,
OPS_OP_ID: op.id,
OPS_OP_NAME: op.name,
OPS_TEAM_ID: config.team.id,
OPS_TEAM_NAME: config.team.name,
OPS_USER_NAME: config.user.username,
OPS_USER_EMAIL: config.user.email,
OPS_HOST_PLATFORM: os.platform(),
PLATFORM_CONTAINER_REGISTRY: env_1.OPS_REGISTRY_HOST,
};
if (parsedArgs.flags.batch) {
defaultEnv.SDK_BATCH_MODE = '1';
}
let opsHome = (process.env.HOME || process.env.USERPROFILE) + '/.config/@cto.ai/ops';
op.opsHome = opsHome === undefined ? '' : opsHome;
op.stateDir = `/${config.team.name}/${op.runId}`;
op.configDir = `/${config.team.name}/${op.name}`;
const rawEnvs = op.env;
const opsYamlEnv = op.env
? rawEnvs.reduce(this.convertEnvStringsToObject, {})
: {};
op.env = Object.entries(Object.assign(Object.assign({}, defaultEnv), opsYamlEnv))
.map(this.overrideEnvWithProcessEnv(process.env))
.map(([key, val]) => `${key}=${val}`);
return Object.assign(Object.assign({}, inputs), { config, op });
};
this.hostSetup = (_a) => {
var { op } = _a, rest = tslib_1.__rest(_a, ["op"]);
if (!fs.existsSync(op.stateDir)) {
try {
fs.ensureDirSync(path.resolve(op.opsHome + op.stateDir));
}
catch (err) {
debug('%O', err);
throw new CustomErrors_1.CouldNotMakeDir(err, path.resolve(op.opsHome + op.stateDir));
}
}
return Object.assign(Object.assign({}, rest), { op: Object.assign(Object.assign({}, op), { bind: op.bind ? op.bind.map(this.replaceHomeAlias) : [] }) });
};
this.setBinds = (_a) => {
var { op } = _a, rest = tslib_1.__rest(_a, ["op"]);
return Object.assign(Object.assign({}, rest), { op: Object.assign(Object.assign({}, op), { bind: op.bind ? op.bind.map(this.replaceHomeAlias) : [] }) });
};
this.getOptions = (_a) => {
var { op, config } = _a, rest = tslib_1.__rest(_a, ["op", "config"]);
const Image = op.image;
const WorkingDir = '/ops';
const Cmd = op.run ? op.run.split(' ') : [];
if (op.mountCwd) {
const bindFrom = process.cwd();
const bindTo = '/cwd';
const cwDir = `${bindFrom}:${bindTo}`;
op.bind.push(cwDir);
}
if (op.mountHome) {
const homeDir = `${env_1.HOME}:/root:rw`;
op.bind.push(homeDir);
}
const options = {
// name: `${config.team.name}-${op.name}`,
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd,
Env: op.env,
WorkingDir,
HostConfig: {
Binds: op.bind,
NetworkMode: op.network,
SecurityOpt: ['seccomp=unconfined', 'apparmor=unconfined'],
},
Image,
OpenStdin: true,
StdinOnce: false,
Tty: true,
Volumes: {},
VolumesFrom: [],
};
debug('Docker options', options);
return Object.assign(Object.assign({}, rest), { op, options });
};
this.addPortsToOptions = async (_a) => {
var { op, options } = _a, rest = tslib_1.__rest(_a, ["op", "options"]);
/**
* Turns a string of ports to the syntax docker understands if it exists
* https://docs.docker.com/engine/api/v1.39/#operation/ContainerCreate
*
* e.g.
* ports:
* - 3000:3000
* - 5000:9000
* Will turn to
* PortBindings: {
* "3000/tcp": [
* {
* "HostPort": "3000"
* },
* "5000/tcp": [
* {
* "HostPort": "9000"
* }
* ]
* ExposedPorts: {
* "3000/tcp": {},
* "5000/tcp": {}
* }
*/
const ExposedPorts = {};
const PortBindings = {};
if (op.port) {
const parsedPorts = op.port
.filter(p => !!p) // Remove null values
.map(port => {
if (typeof port !== 'string')
throw new CustomErrors_1.YamlPortError(port);
const portSplits = port.split(':');
if (!portSplits.length || portSplits.length > 2) {
throw new CustomErrors_1.YamlPortError(port);
}
portSplits.forEach(p => {
const portNumber = parseInt(p, 10);
if (!portNumber)
throw new CustomErrors_1.YamlPortError(port);
});
return { host: portSplits[0], machine: `${portSplits[1]}/tcp` };
});
parsedPorts.forEach(parsedPorts => {
ExposedPorts[parsedPorts.machine] = {};
});
parsedPorts.forEach(parsedPorts => {
PortBindings[parsedPorts.machine] = [
...(PortBindings[parsedPorts.machine] || []),
{
HostPort: parsedPorts.host,
},
];
});
}
options = Object.assign(Object.assign({}, options), { ExposedPorts, HostConfig: Object.assign(Object.assign({}, options.HostConfig), { PortBindings }) });
return Object.assign(Object.assign({}, rest), { op,
options });
};
this.createContainer = async (inputs) => {
// TODO: This is a quick hack. We'll be able to do better when we
// record daemon versions on publish.
if (inputs.parsedArgs.flags.batch) {
try {
const { stdout: versionOutput } = await exec(`docker run --rm ${inputs.op.image} /bin/sdk-daemon --version`);
const versionComponents = versionOutput
.split('.')
.map((piece) => parseInt(piece));
// support starts at daemon version 2.2.0
if (versionComponents[0] < 2 || versionComponents[1] < 2) {
throw 'temp';
}
}
catch (err) {
throw new Error('Workflow does not support batch mode. Rebuild it with the latest CLI to add support.');
}
}
try {
const { op, options } = inputs;
const container = await this.containerService.create(op, options);
return Object.assign(Object.assign({}, inputs), { container });
}
catch (err) {
debug('%O', err);
throw new Error('Error creating Docker container');
}
};
this.attachToContainer = async (inputs) => {
const { container } = inputs;
if (!container)
throw new Error('No docker container for attachment');
try {
const options = {
stream: true,
stdin: true,
stdout: true,
stderr: true,
hijack: true,
};
const stream = await container.attach(options);
stream.on('end', () => {
if (!inputs.isPipeline) {
container.remove(() => process.exit(0));
}
});
this.containerService.handleStream(stream);
await this.containerService.start(stream);
return inputs;
}
catch (err) {
debug('%O', err);
throw new Error(err);
}
};
this.convertEnvStringsToObject = (acc, curr) => {
const [key, val] = curr.split('=');
return Object.assign(Object.assign({}, acc), { [key]: val });
};
this.overrideEnvWithProcessEnv = (processEnv) => ([key, val,]) => [key, processEnv[key] || val];
this.replaceHomeAlias = (bindPair) => {
const [first, ...rest] = bindPair.split(':');
const from = first.replace('~', env_1.HOME).replace('$HOME', env_1.HOME);
const to = rest.join('');
return `${from}:${to}`;
};
}
async run(op, isPipeline, parsedArgs, config, version, flags) {
try {
const opServicePipeline = (0, utils_1.asyncPipe)(this.updateOpFields, this.getImage, this.setEnvs, this.hostSetup, this.setBinds, this.getOptions, this.addPortsToOptions, this.createContainer, this.attachToContainer);
await opServicePipeline({
op,
isPipeline,
config,
parsedArgs,
version,
flags,
});
return true;
}
catch (err) {
debug('%O', err);
throw err;
}
}
}
exports.OpService = OpService;