aws-spa
Version:
A no-brainer script to deploy a single page app on AWS
333 lines (270 loc) • 10 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.upsertLifeCycleConfiguration = exports.tagBucket = exports.syncToS3 = exports.setBucketWebsite = exports.setBucketPolicyForOAC = exports.setBucketPolicy = exports.removeBucketWebsite = exports.identifyingTag = exports.doesS3BucketExists = exports.createBucket = exports.confirmBucketManagement = exports.blockBucketPublicAccess = exports.allowBucketPublicAccess = exports.LIFE_CYCLE_OLD_BRANCH_ID = void 0;
var _fs = require("fs");
var _inquirer = _interopRequireDefault(require("inquirer"));
var _mimeTypes = require("mime-types");
var _awsServices = require("./aws-services");
var _fsHelper = require("./fs-helper");
var _logger = require("./logger");
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
const doesS3BucketExists = async bucketName => {
try {
_logger.logger.info(`[S3] 🔍 Looking for bucket "${bucketName}"...`);
await _awsServices.s3.headBucket({
Bucket: bucketName
});
} catch (error) {
if (error.statusCode === 404) {
_logger.logger.info(`[S3] 😬 Bucket "${bucketName}" not found...`);
return false;
}
throw error;
}
_logger.logger.info(`[S3] 🔍 Bucket "${bucketName}" found`);
return true;
};
exports.doesS3BucketExists = doesS3BucketExists;
const createBucket = async bucketName => {
_logger.logger.info(`[S3] ✏️ Creating "${bucketName}" bucket...`);
try {
await _awsServices.s3.createBucket({
Bucket: bucketName
});
} catch (error) {
if (error.statusCode === 409) {
throw new Error('[S3] It seems that a bucket already exists but in an unsupported region... You should delete it first.');
}
throw error;
}
};
exports.createBucket = createBucket;
const confirmBucketManagement = async bucketName => {
_logger.logger.info(`[S3] 🔍 Checking that tag "${identifyingTag.Key}:${identifyingTag.Value}" exists on bucket "${bucketName}"...`);
try {
const {
TagSet
} = await _awsServices.s3.getBucketTagging({
Bucket: bucketName
});
const tag = TagSet?.find(_tag => _tag.Key === identifyingTag.Key && _tag.Value === identifyingTag.Value);
if (tag) {
_logger.logger.info(`[S3] 👍 Tag "${identifyingTag.Key}:${identifyingTag.Value}" found`);
return true;
}
} catch (error) {
if (error.statusCode !== 404) {
throw error;
}
}
const {
continueUpdate
} = await _inquirer.default.prompt([{
type: 'confirm',
name: 'continueUpdate',
message: `[S3] Bucket "${bucketName}" is not yet managed by aws-spa. Would you like it to be modified (public access & website config) & managed by aws-spa?`,
default: false
}]);
if (continueUpdate) {
return true;
}
throw new Error('You can use another domain name or delete the S3 bucket...');
};
exports.confirmBucketManagement = confirmBucketManagement;
const tagBucket = async bucketName => {
_logger.logger.info(`[S3] ✏️ Tagging "${bucketName}" bucket with "${identifyingTag.Key}:${identifyingTag.Value}"...`);
await _awsServices.s3.putBucketTagging({
Bucket: bucketName,
Tagging: {
TagSet: [identifyingTag]
}
});
};
exports.tagBucket = tagBucket;
const removeBucketWebsite = bucketName => {
_logger.logger.info(`[S3] 🔏 Ensure bucket "${bucketName}" is not a static website hosting`);
try {
return _awsServices.s3.deleteBucketWebsite({
Bucket: bucketName
});
} catch (error) {
_logger.logger.error(`[S3] ❌ Error when removing static website hosting for bucket "${bucketName}"`, error);
}
};
exports.removeBucketWebsite = removeBucketWebsite;
const setBucketWebsite = bucketName => {
_logger.logger.info(`[S3] ✏️ Set bucket website with IndexDocument: "index.html" & ErrorDocument: "index.html" to "${bucketName}"...`);
return _awsServices.s3.putBucketWebsite({
Bucket: bucketName,
WebsiteConfiguration: {
ErrorDocument: {
Key: 'index.html'
},
IndexDocument: {
Suffix: 'index.html'
}
}
});
};
exports.setBucketWebsite = setBucketWebsite;
const setBucketPolicy = bucketName => {
_logger.logger.info(`[S3] ✏️ Allow public read to "${bucketName}"...`);
return _awsServices.s3.putBucketPolicy({
Bucket: bucketName,
Policy: JSON.stringify({
Statement: [{
Sid: 'AllowPublicRead',
Effect: 'Allow',
Principal: {
AWS: '*'
},
Action: 's3:GetObject',
Resource: `arn:aws:s3:::${bucketName}/*`
}]
})
});
};
exports.setBucketPolicy = setBucketPolicy;
const setBucketPolicyForOAC = (bucketName, distributionId) => {
_logger.logger.info(`[S3] 🔏 Allow distribution ${distributionId} to read from "${bucketName}"...`);
try {
return _awsServices.s3.putBucketPolicy({
Bucket: bucketName,
Policy: JSON.stringify({
Statement: [{
Sid: 'AllowCloudFrontServicePrincipal',
Effect: 'Allow',
Principal: {
Service: 'cloudfront.amazonaws.com'
},
Action: 's3:GetObject',
Resource: `arn:aws:s3:::${bucketName}/*`,
Condition: {
StringEquals: {
'AWS:SourceArn': `arn:aws:cloudfront::651828462322:distribution/${distributionId}`
}
}
}]
})
});
} catch (error) {
_logger.logger.error(`[S3] ❌ Error when allowing distribution to read from "${bucketName}"`, error);
}
};
exports.setBucketPolicyForOAC = setBucketPolicyForOAC;
const blockBucketPublicAccess = bucketName => {
_logger.logger.info(`[S3] 🔏 Block public access for bucket "${bucketName}"...`);
const params = {
Bucket: bucketName,
PublicAccessBlockConfiguration: {
BlockPublicAcls: true,
IgnorePublicAcls: true,
BlockPublicPolicy: true,
RestrictPublicBuckets: true
}
};
try {
return _awsServices.s3.putPublicAccessBlock(params);
} catch (error) {
_logger.logger.error(`[S3] ❌ Error blocking public access for bucket "${bucketName}"`, error);
}
};
exports.blockBucketPublicAccess = blockBucketPublicAccess;
const allowBucketPublicAccess = bucketName => {
_logger.logger.info(`[S3] ✅ Allow public access for bucket "${bucketName}"...`);
try {
return _awsServices.s3.deletePublicAccessBlock({
Bucket: bucketName
});
} catch (error) {
_logger.logger.error(`[S3] ❌ Error allowing public access for bucket "${bucketName}"`, error);
}
};
exports.allowBucketPublicAccess = allowBucketPublicAccess;
const identifyingTag = {
Key: 'managed-by-aws-spa',
Value: 'v1'
};
exports.identifyingTag = identifyingTag;
const syncToS3 = function (folder, bucketName, cacheBustedPrefix, subfolder) {
_logger.logger.info(`[S3] ✏️ Uploading "${folder}" folder on "${bucketName}"...`);
const filesToUpload = (0, _fsHelper.readRecursively)(folder);
return Promise.all(filesToUpload.map(file => {
const filenameParts = file.split('.');
const key = file.replace(`${folder}/`, '');
const prefix = subfolder ? `${subfolder}/` : '';
return _awsServices.s3.putObject({
Bucket: bucketName,
Key: `${prefix}${key}`,
Body: (0, _fs.createReadStream)(file),
CacheControl: getCacheControl(key, cacheBustedPrefix),
ContentType: (0, _mimeTypes.lookup)(filenameParts[filenameParts.length - 1]) || 'application/octet-stream'
});
}));
};
exports.syncToS3 = syncToS3;
const getCacheControl = (filename, cacheBustedPrefix) => {
if (filename === 'index.html') {
// This will allow CloudFront to store the file on the edge location,
// but it will force it to revalidate it with the origin with each request.
// If the file hasn't changed, CloudFront will not need to transfer the
// file's entire content from the origin.
return 'public, must-revalidate, proxy-revalidate, max-age=0';
}
if (cacheBustedPrefix && filename.startsWith(cacheBustedPrefix)) {
// js & css files should have a hash so if index.html change: the js & css
// file will change. It allows to have an aggressive cache for js & css files.
return 'max-age=31536000';
}
return undefined;
};
const LIFE_CYCLE_OLD_BRANCH_ID = 'expire-old-branches';
exports.LIFE_CYCLE_OLD_BRANCH_ID = LIFE_CYCLE_OLD_BRANCH_ID;
const upsertLifeCycleConfiguration = async (bucketName, objectExpirationDays) => {
let hasSimilarRule = false;
let lifeCycleConfiguration = {
Rules: []
};
try {
lifeCycleConfiguration = await _awsServices.s3.getBucketLifecycleConfiguration({
Bucket: bucketName
});
hasSimilarRule = lifeCycleConfiguration.Rules?.some(rule => rule.ID === LIFE_CYCLE_OLD_BRANCH_ID && rule.Expiration?.Days === objectExpirationDays) ?? false;
} catch (error) {
if (error.Code !== 'NoSuchLifecycleConfiguration') {
throw error;
}
}
if (hasSimilarRule) {
_logger.logger.info(`[S3] 👍 Lifecycle configuration "${LIFE_CYCLE_OLD_BRANCH_ID}" already exists, no update required for "${bucketName}" `);
return;
}
lifeCycleConfiguration.Rules?.forEach(rule => {
if (!rule.Filter) {
rule.Filter = {
Prefix: ''
};
}
});
const rulesToKeep = lifeCycleConfiguration.Rules?.filter(rule => rule.ID !== LIFE_CYCLE_OLD_BRANCH_ID);
const updatedLifeCycleConfiguration = {
Bucket: bucketName,
LifecycleConfiguration: {
Rules: [...(rulesToKeep || []), {
ID: LIFE_CYCLE_OLD_BRANCH_ID,
Status: 'Enabled',
Filter: {
Prefix: ''
},
Expiration: {
Days: objectExpirationDays
}
}]
}
};
await _awsServices.s3.putBucketLifecycleConfiguration(updatedLifeCycleConfiguration);
_logger.logger.info(`[S3] ✅ Lifecycle configuration "${LIFE_CYCLE_OLD_BRANCH_ID}" added for "${bucketName}" `);
};
exports.upsertLifeCycleConfiguration = upsertLifeCycleConfiguration;