@hotglue/cli
Version:
hotglue CLI tools
263 lines (225 loc) • 9.31 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.handler = exports.desc = exports.command = exports.builder = void 0;
var _path = _interopRequireDefault(require("path"));
var _promises = require("fs/promises");
var _debug = _interopRequireDefault(require("../../helpers/debug"));
var _ora = _interopRequireDefault(require("ora"));
var _axios = _interopRequireDefault(require("axios/dist/node/axios.cjs"));
var _awsSdk = _interopRequireDefault(require("aws-sdk"));
var _cliTable = _interopRequireDefault(require("cli-table"));
var _utils = require("../../helpers/utils");
var _descriptions = _interopRequireDefault(require("../../helpers/descriptions"));
var _print = require("../../helpers/print");
var _api = require("../../helpers/api");
var _flow = require("../../helpers/flow");
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
const debug = (0, _debug.default)('commands:etl:deploy');
const command = 'deploy';
exports.command = command;
const desc = 'Deploy ETL scripts';
exports.desc = desc;
const builder = async yargs => {
debug('builder', command);
return yargs.option('flow', _descriptions.default.options['flow'].config).demandOption('flow', _descriptions.default.options['flow'].demandText).option('tap', _descriptions.default.options['tap'].config).option('all', _descriptions.default.options['all'].config).option('tenant', { ..._descriptions.default.options['tenant'].config,
default: 'default'
}).option('sourceFolder', _descriptions.default.options['sourceFolder'].config).demandOption('sourceFolder', _descriptions.default.options['sourceFolder'].demandText);
};
exports.builder = builder;
const handler = async argv => {
debug('handler', command, argv);
const {
hg,
json,
apikey,
env,
flow,
tap,
all,
tenant,
sourceFolder
} = argv;
const {
clientApiBaseUri
} = hg;
if (!all && !tap) {
throw new Error(`${_descriptions.default.options['tap'].demandText} Or you can pass the --all flag to run this command for all taps/connectors.`);
}
let message;
let spinner = (0, _ora.default)();
const folderPrefix = _path.default.resolve(process.cwd(), sourceFolder);
try {
message = (0, _print.themed)(`Deploying script for Tenant ${(0, _print.themed)(tenant, 'info')} Flow ${(0, _print.themed)(flow, 'info')}${tap ? ` and Tap ${(0, _print.themed)(tap, 'info')}` : ''} to ${(0, _print.themed)(env, 'info')}`);
!json && spinner.info((0, _print.themed)(`Info: ${message}.`, 'secondary')); // 1. build list of deployable files
const deployableFiles = await (0, _utils.getFolderFiles)(folderPrefix, {
recursive: true,
filter: {
pattern: '!((**/sync-output)|(**/etl-output)|(**/snapshots))'
}
});
if (deployableFiles.length === 0) {
json ? (0, _print.printJSON)({
status: 'error',
error: 'There are no files to deploy at the specified location!'
}) : spinner.fail((0, _print.themed)(`Error: ${(0, _print.themed)('There are no files to deploy at the specified location!')}.`, 'secondary'));
return;
} // 2. get STS credentials
message = (0, _print.themed)(`Verifying user and authorizing`);
!json && spinner.start((0, _print.themed)(`In progress: ${message}...`, 'secondary'));
const params = {
debug,
baseUri: clientApiBaseUri,
task: 'etl-deploy',
env,
tenant,
flow,
apikey
};
if (tap) params.tap = tap;
const {
accessKeyId,
secretAccessKey,
sessionToken
} = await (0, _api.genCredentialsOnClientApi)(params);
!json && spinner.succeed((0, _print.themed)(`Finished: ${message}.`, 'secondary')); // create authenticated S3 instance
const s3 = new _awsSdk.default.S3({
accessKeyId,
secretAccessKey,
sessionToken
});
const isV2 = await (0, _flow.isV2Flow)(s3, env, tenant, flow); // 3. verify location is valid (consider ListObjects policy + listObjectsV2 + scan folder for key = config.json)
message = (0, _print.themed)(`Validating flow and tap location`);
!json && spinner.start((0, _print.themed)(`In progress: ${message}...`, 'secondary'));
const supportedFlows = await (0, _api.getSupportedFlows)({
debug,
baseUri: clientApiBaseUri,
apikey,
env
});
const supportedFlow = supportedFlows.find(({
id
}) => id === flow);
let connectors = [];
try {
if (tenant === 'default' || supportedFlow !== null && supportedFlow !== void 0 && supportedFlow.type) {
const supportedSources = isV2 ? await (0, _api.getSupportedConnectors)({
debug,
baseUri: clientApiBaseUri,
env,
flow,
apikey
}) : await (0, _api.getSupportedSources)({
debug,
baseUri: clientApiBaseUri,
env,
flow,
apikey
});
connectors = (supportedSources ?? []).filter(ss => tap ? ss[isV2 ? 'id' : 'tap'] === tap : true);
if (tap && connectors.length === 0) throw new Error('Tap is not supported');
} else {
const linkedSources = isV2 ? await (0, _api.getLinkedConnectors)({
debug,
baseUri: clientApiBaseUri,
env,
flow,
tenant,
apikey
}) : await (0, _api.getLinkedSources)({
debug,
baseUri: clientApiBaseUri,
env,
flow,
tenant,
apikey
});
connectors = (linkedSources ?? []).filter(ls => tap ? ls[isV2 ? 'id' : 'tap'] === tap : true);
if (tap && connectors.length === 0) throw new Error('Tap is not linked');
}
} catch (err) {
debug('err', err);
throw new Error(`Target location doesn't exist. Check your tenant, flow and tap arguments.`);
}
!json && spinner.succeed((0, _print.themed)(`Finished: ${message}.`, 'secondary'));
const table = new _cliTable.default({
head: ['File', 'Status']
});
for (const connector of connectors) {
const connectorId = connector[isV2 ? 'id' : 'tap'];
message = (0, _print.themed)(`Preparing ${connectorId} deployment target`);
!json && spinner.start((0, _print.themed)(`In progress: ${message}...`, 'secondary'));
const rootKey = `${tenant}/flows/${flow}/${await (0, _flow.getEntityLabel)(null, null, null, null, isV2)}/${connectorId}/etl/`;
const {
Contents
} = await s3.listObjectsV2({
Bucket: env,
Prefix: `${rootKey}`
}).promise();
!json && spinner.succeed((0, _print.themed)(`Finished: ${message}.`, 'secondary'));
debug('contents', Contents); // 4. cleanup old files
const filesToDelete = Contents.map(item => ({
Key: item.Key
}));
if (filesToDelete.length > 0) {
message = (0, _print.themed)(`Removing old ${connectorId} ETL files`);
!json && spinner.start((0, _print.themed)(`In progress: ${message}...`, 'secondary'));
const delParams = {
Bucket: env,
Delete: {
Objects: filesToDelete,
Quiet: true
}
};
await s3.deleteObjects(delParams).promise();
filesToDelete.forEach(({
Key
}) => table.push([`${connectorId}/${Key.substring(rootKey.length)}`, (0, _print.themed)('Deleted', 'warn')]));
!json && spinner.succeed((0, _print.themed)(`Finished: ${message}.`, 'secondary'));
} // 5. upload new files
// message = themed(`Deploying ETL scripts`);
// !json && spinner.info(themed(`Info: ${message}.`, 'secondary'));
for await (const file of deployableFiles) {
const relativePath = process.platform === 'win32' ? _path.default.relative(sourceFolder, file).replace(/\\/g, '/') : _path.default.relative(sourceFolder, file);
const key = `${rootKey}${relativePath}`;
message = (0, _print.themed)(`Deploying file: ${(0, _print.themed)(`${connectorId}/${relativePath}`, 'info')}`);
!json && spinner.start((0, _print.themed)(`In progress: ${message}...`, 'secondary'));
const readBuffer = await (0, _promises.readFile)(file);
const params = {
Bucket: env,
Key: key,
Body: readBuffer
};
const res = await s3.putObject(params).promise();
!json && spinner.succeed((0, _print.themed)(`Finished: ${message}.`, 'secondary'));
debug('s3-put-res', res);
table.push([`${connectorId}/${relativePath}`, (0, _print.themed)('Deployed', 'info')]);
}
}
if (json) {
(0, _print.printJSON)({
status: 'success',
deployedFiles: deployableFiles
});
} else {
// print results
(0, _print.cl)(table.toString());
}
} catch (err) {
if (json) {
(0, _print.printJSON)({
status: 'error',
error: err
});
} else {
spinner.fail((0, _print.themed)(`Error: ${message}.`, 'secondary'));
(0, _print.pr)((0, _print.themed)(`Message: ${(0, _print.themed)(err.message)}`, 'secondary'));
debug(err);
if (err && err.response && err.response.data) {
debug('response', err.response.data);
}
}
}
};
exports.handler = handler;