eip-cloud-services
Version:
Houses a collection of helpers for connecting with Cloud services.
407 lines (352 loc) • 16.8 kB
JavaScript
/**
* Helpful Information
* ----------------------------------------------------------------
*
* Cache-Control Directives
*
* The Cache-Control header contains directives that control the caching behavior
* of a resource in both client and proxy caches. These directives provide fine-grained
* control over how and when caches should store and serve the resource. Understanding
* these directives is important for optimizing caching strategies and ensuring the
* proper handling of cached content.
*
* - max-age: Specifies the maximum age of the resource in seconds.
* - s-maxage: Specifies the maximum age of the resource in shared caches (e.g., CDNs e.g., CloudFront).
* - public: Indicates that the response can be cached by both public and private caches.
* - private: Indicates that the response is intended for a single user and should not be
* cached by shared caches.
* - no-cache: Requires the cache to revalidate the resource with the server before
* serving it. The resource may still be cached, but the cache must check with the server
* for any updates.
* - no-store: Instructs caches not to store the response under any circumstances.
* - must-revalidate: Requires the cache to revalidate the resource with the server before
* serving it to subsequent requests. If the resource is expired, the cache must send a
* conditional request to the server for revalidation.
* - proxy-revalidate: Similar to must-revalidate, but specifically applies to proxy caches.
* Instructs proxy caches to revalidate the resource with the server, even if it has
* previously been validated and marked as fresh.
* - no-transform: Instructs intermediaries not to modify the response, such as by
* transforming the content encoding or media type.
* - immutable: Indicates that the resource is considered immutable and should not change.
* Caches can store immutable resources indefinitely.
*
* It's important to carefully consider the appropriate combination of Cache-Control
* directives based on your caching requirements and the desired behavior of your
* client and proxy caches.
*
* Taking care of cache-control directives will ensure cloud infrastructure costs are kept low
* and ensures the best CWVs performance.
*/
const { S3Client, HeadObjectCommand, GetObjectCommand, PutObjectCommand, DeleteObjectCommand, CopyObjectCommand, ListObjectsV2Command } = require ( '@aws-sdk/client-s3' );
const fs = require ( 'fs' );
const path = require ( 'path' );
let config = {};
const configDirPath = `${ process.cwd ()}/config`;
if ( fs.existsSync ( configDirPath ) && fs.statSync ( configDirPath ).isDirectory () ) {
config = require ( 'config' ); // require the config directory if it exists
}
const zlib = require ( 'zlib' );
const crypto = require ( 'crypto' );
const { cwd } = require ( 'process' );
const { log } = config?.s3?.logsFunction ? require ( `${ cwd ()}/${config?.s3?.logsFunction}` ) : console;
const S3 = new S3Client ( { region: 'eu-west-1' } );
const { pipeline, Writable } = require ( 'stream' );
const util = require ( 'util' );
const pipelineAsync = util.promisify ( pipeline );
/**
* Check if an object exists in S3.
*
* @param {string} key - The object key.
* @param {string} [bucket=config?.s3?.Bucket] - The bucket name. Defaults to the configured bucket.
* @returns {Promise<boolean>} A promise that resolves to true if the object exists, false otherwise.
* @description Checks if an object with the specified key exists in S3.
*/
exports.exists = async ( key, bucket = config?.s3?.Bucket ) => {
try {
const command = new HeadObjectCommand ( {
Bucket: bucket,
Key: key
} );
await S3.send ( command );
if ( config?.s3?.logs === 'verbose' )
log ( `S3 [EXISTS]: ${key} on ${bucket} - Exists` );
return true;
}
catch ( error ) {
if ( config?.s3?.logs === 'verbose' )
log ( `S3 [EXISTS]: ${key} on ${bucket} - Does not exist` );
return false;
}
};
/**
* Get an object from S3 and optionally download it to a file.
*
* @param {string} key - The object key.
* @param {string} [bucket=config?.s3?.Bucket] - The bucket name. Defaults to the configured bucket.
* @param {Object} [options] - Optional parameters, including targetFolder for file download.
* @param {string} [options.targetFolder] - If provided the requested object will be downloaded to this folder instead of being returned. The response will be the file path.
* @returns {Promise} A promise that resolves to the retrieved object or the path to the downloaded file.
* @description Retrieves an object from S3 based on the provided key and optionally downloads it to a specified folder.
*/
exports.get = async ( key, bucket = config?.s3?.Bucket, options = {} ) => {
try {
const command = new GetObjectCommand ( {
Bucket: bucket,
Key: key
} );
if ( config?.s3?.logs === 'verbose' )
log ( `S3 [GET]: Getting ${bucket}/${key}.` );
const response = await S3.send ( command );
// If options.targetFolder is specified, download the file
if ( options.targetFolder ) {
const filePath = path.join ( options.targetFolder, key.replace ( /\//g, '_' ) );
const fileStream = fs.createWriteStream ( filePath );
response.Body.pipe ( fileStream );
await new Promise ( ( resolve, reject ) => {
fileStream.on ( 'finish', resolve );
fileStream.on ( 'error', reject );
} );
if ( config?.s3?.logs === 'outputs' || config?.s3?.logs === 'verbose' )
log ( `S3 [GET]: Downloaded ${key} to ${filePath}.` );
return filePath;
}
else {
let data = await streamToBuffer ( response.Body );
if ( response.ContentEncoding && response.ContentEncoding === 'gzip' ) {
if ( config?.s3?.logs === 'verbose' )
log ( `S3 [GET]: ${key} on ${bucket} was unzipped (was gzipped).` );
data = zlib.unzipSync ( data );
}
if ( response.ContentType !== 'application/json' && !response.Metadata[ 'tmg-json' ] ) {
if ( config?.s3?.logs === 'output' )
log ( `S3 [GET]: Returned ${response.ContentType} from ${bucket}/${key}.` );
return data.toString ( 'utf8' );
}
if ( ( response.ContentType === 'application/json' || response.Metadata[ 'tmg-json' ] ) && response.Metadata[ 'tmg-crypt' ] && response.Metadata[ 'tmg-crypt-vec' ] ) {
const key = await crypto.subtle.importKey (
'raw',
Buffer.from ( response.Metadata[ 'tmg-crypt' ], 'base64' ),
{ name: 'AES-CBC', length: 256 },
false,
[ 'decrypt' ]
);
const iv = Buffer.from ( response.Metadata[ 'tmg-crypt-vec' ], 'base64' );
const decryptedArrayBuffer = await crypto.subtle.decrypt (
{ name: 'AES-CBC', iv },
key,
Buffer.from ( data.toString (), 'base64' )
);
data = Buffer.from ( decryptedArrayBuffer ).toString ( 'utf8' );
if ( config?.s3?.logs === 'verbose' )
log ( `S3 [GET]: ${key} on ${bucket} - JSON content was decrypted.` );
}
if ( config?.s3?.logs === 'output' )
log ( `S3 [GET]: ${bucket}/${key} - JSON content was returned.` );
return JSON.parse ( data.toString ( 'utf8' ) );
}
}
catch ( error ) {
if ( error.Code === 'NoSuchKey' ) {
return null;
}
throw error;
}
};
/**
* Set an object in S3.
*
* @param {string} key - The object key.
* @param {Buffer|Uint8Array|Blob|string} body - The object body.
* @param {object} [options] - The optional parameters for setting the object.
* @param {string} [options.bucket=config?.s3?.Bucket] - The bucket name. Defaults to the configured bucket.
* @param {string} [options.contentType='application/json'] - The content type of the object. Defaults to 'application/json'.
* @param {string} [options.acl='public-read'] - The ACL (Access Control List) of the object. Defaults to 'public-read'.
* @param {string} [options.cacheControl='max-age=25,s-maxage=30,must-revalidate'] - Sets cache control for the object.
* @param {boolean} [options.encrypt=false] - When storing JSON parsing this as true will encrypt the data with a random uuid stored in the metadata of the object.
* @param {object} [options.metadata={}] - Sets metadata for the object.
* @returns {Promise} A promise that resolves when the object is successfully set in S3.
* @description Sets an object in S3 with the provided key, body, and optional parameters.
*/
exports.set = async ( key, body, options = {} ) => {
const {
bucket = config?.s3?.Bucket,
contentType = 'application/json',
acl = 'public-read',
cacheControl = 'max-age=25,s-maxage=30,must-revalidate',
encrypt = false,
metadata = {}
} = options;
if ( encrypt && ( contentType === 'application/json' || contentType === 'text/plain' ) ) {
if ( config?.s3?.logs === 'verbose' )
log ( `S3 [SET]: ${bucket}/${key} - Encrypting.` );
const encoder = new TextEncoder ();
const data = encoder.encode ( body );
const encryptionKey = await crypto.subtle.generateKey (
{ name: 'AES-CBC', length: 256 },
true,
[ 'encrypt', 'decrypt' ]
);
const iv = crypto.randomBytes ( 16 );
const exportedKey = await crypto.subtle.exportKey ( 'raw', encryptionKey );
const exportedIV = iv.toString ( 'base64' );
metadata[ 'Tmg-Crypt' ] = Buffer.from ( exportedKey ).toString ( 'base64' );
metadata[ 'Tmg-Crypt-Vec' ] = exportedIV;
const encryptedData = await crypto.subtle.encrypt (
{ name: 'AES-CBC', iv },
encryptionKey,
data
);
body = Buffer.from ( encryptedData ).toString ( 'base64' );
}
try {
const command = new PutObjectCommand ( {
Bucket: bucket,
Key: key,
Body: body,
ContentType: contentType,
ACL: acl,
CacheControl: cacheControl,
Metadata: metadata,
ContentLength: Buffer.byteLength ( body )
} );
const data = await S3.send ( command );
if ( config?.s3?.logs === 'outputs' || config?.s3?.logs === 'verbose' )
log ( `S3 [SET]: ${bucket}/${key} - Stored.` );
return data;
}
catch ( error ) {
console.log ( error );
throw error;
}
};
/**
* Delete an object from S3.
*
* @param {string} key - The object key.
* @param {string} [bucket=config?.s3?.Bucket] - The bucket name. Defaults to the configured bucket.
* @returns {Promise} A promise that resolves when the object is successfully deleted from S3.
* @description Deletes an object from S3 based on the provided key.
*/
exports.del = async ( key, bucket = config?.s3?.Bucket ) => {
try {
const command = new DeleteObjectCommand ( {
Bucket: bucket,
Key: key
} );
const data = await S3.send ( command );
if ( config?.s3?.logs === 'outputs' || config?.s3?.logs === 'verbose' )
log ( `S3 [DELETE]: ${key} on ${bucket} - Deleted.` );
return data;
}
catch ( error ) {
throw error;
}
};
/**
* Copy an object within S3 to a different location. (This keep the original like a COPY / PASTE operation)
*
* @param {string} sourceKey - The source object key.
* @param {string} destinationKey - The destination object key.
* @param {string} [sourceBucket=config?.s3?.Bucket] - The source bucket name. Defaults to the configured bucket.
* @param {string} [destinationBucket=config?.s3?.Bucket] - The destination bucket name. Defaults to the configured bucket.
* @returns {Promise} A promise that resolves when the object is successfully moved in S3.
* @description Moves an object from the source location to the destination location within S3.
*/
exports.copy = async ( sourceKey, destinationKey, sourceBucket = config?.s3?.Bucket, destinationBucket = config?.s3?.Bucket ) => {
try {
// Copy the object to the destination location
const copyCommand = new CopyObjectCommand ( {
CopySource: `/${sourceBucket}/${sourceKey}`,
Bucket: destinationBucket,
Key: destinationKey,
ACL: 'public-read',
MetadataDirective: 'COPY',
} );
await S3.send ( copyCommand );
if ( config?.s3?.logs === 'outputs' || config?.s3?.logs === 'verbose' ){
if ( sourceBucket === destinationBucket ){
log ( `S3 [COPY]: ${sourceKey} moved to ${destinationKey} on ${sourceBucket}.` );
}
else {
log ( `S3 [COPY]: ${sourceKey} on ${sourceBucket} moved to ${destinationKey} on ${destinationBucket}.` );
}
}
}
catch ( error ) {
throw error;
}
};
/**
* Move an object within S3 to a different location. (This deletes the original like a CUT / PASTE operation)
*
* @param {string} sourceKey - The source object key.
* @param {string} destinationKey - The destination object key.
* @param {string} [sourceBucket=config?.s3?.Bucket] - The source bucket name. Defaults to the configured bucket.
* @param {string} [destinationBucket=config?.s3?.Bucket] - The destination bucket name. Defaults to the configured bucket.
* @returns {Promise} A promise that resolves when the object is successfully moved in S3.
* @description Moves an object from the source location to the destination location within S3.
*/
exports.move = async ( sourceKey, destinationKey, sourceBucket = config?.s3?.Bucket, destinationBucket = config?.s3?.Bucket ) => {
try {
// Copy the object to the destination location
await this.copy ( sourceKey, destinationKey, sourceBucket, destinationBucket );
// Delete the object from the source location
await this.del ( sourceKey, sourceBucket );
if ( config?.s3?.logs === 'outputs' || config?.s3?.logs === 'verbose' ){
if ( sourceBucket === destinationBucket ){
log ( `S3 [MOVE]: ${sourceKey} moved to ${destinationKey} on ${sourceBucket}.` );
}
else {
log ( `S3 [MOVE]: ${sourceKey} on ${sourceBucket} moved to ${destinationKey} on ${destinationBucket}.` );
}
}
}
catch ( error ) {
throw error;
}
};
/**
* List objects in an S3 bucket filtered by a prefix, with support for pagination.
*
* @param {string} prefix - The prefix to filter objects by.
* @param {string} [bucket=config?.s3?.Bucket] - The bucket name. Defaults to the configured bucket.
* @param {string} [continuationToken] - The continuation token for pagination (optional).
* @returns {Promise} A promise that resolves with the list of objects and potentially a continuation token for further pagination.
* @description Retrieves a list of objects from S3 that match the given prefix, with support for pagination.
*/
exports.listObjects = async ( prefix, bucket = config?.s3?.Bucket, continuationToken ) => {
try {
const params = {
Bucket: bucket,
Prefix: prefix,
};
if ( continuationToken ) {
params.ContinuationToken = continuationToken;
}
const command = new ListObjectsV2Command ( params );
const data = await S3.send ( command );
if ( config?.s3?.logs === 'outputs' || config?.s3?.logs === 'verbose' ) {
log ( `S3 [LIST]: Retrieved list for prefix ${prefix} on ${bucket}${continuationToken ? ' with continuation token' : ''}.` );
}
return {
objects: data.Contents,
isTruncated: data.IsTruncated,
nextContinuationToken: data.NextContinuationToken,
};
}
catch ( error ) {
throw error;
}
};
exports.getClient = S3;
const streamToBuffer = async ( stream ) => {
const chunks = [];
const collectorStream = new Writable ( {
write ( chunk, encoding, callback ) {
chunks.push ( chunk );
callback ();
}
} );
await pipelineAsync ( stream, collectorStream );
return Buffer.concat ( chunks );
};