amplify-s3-chunk-upload
Version:
A custom storage upload plugin for AWS Amplify. Instead of reading file completely in memory, it helps to read file chunk by chunk.
347 lines (346 loc) • 14.7 kB
JavaScript
"use strict";
/*
* Copyright 2017-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.StorageChunkManagedUpload = void 0;
const core_1 = require("@aws-amplify/core");
const client_s3_1 = require("@aws-sdk/client-s3");
const axios_http_handler_1 = require("./axios-http-handler");
const events = require("events");
const fetch_http_handler_1 = require("@aws-sdk/fetch-http-handler");
const logger = new core_1.ConsoleLogger('AWSS3ProviderManagedUpload');
const localTestingStorageEndpoint = 'http://localhost:20005';
const SET_CONTENT_LENGTH_HEADER = 'contentLengthMiddleware';
class StorageChunkManagedUpload {
constructor(params, credentials, opts, emitter) {
this.credentials = credentials;
// Defaults
this.minPartSize = 5 * 1024 * 1024; // in MB
this.queueSize = 4;
// Data for current upload
this.body = null;
this.params = null;
this.opts = null;
this.multiPartMap = [];
this.cancel = false;
// Progress reporting
this.bytesUploaded = 0;
this.totalBytesToUpload = 0;
this.emitter = null;
this.params = params;
this.opts = opts;
this.emitter = emitter;
}
upload() {
return __awaiter(this, void 0, void 0, function* () {
this.body = yield this.validateAndSanitizeBody(this.params.Body);
this.totalBytesToUpload = this.byteLength(this.body);
if (this.totalBytesToUpload <= this.minPartSize) {
// Multipart upload is not required. Upload the sanitized body as is
// We could get body as promise, let's resolve it.
this.params.Body = yield Promise.resolve(this.body.slice(0, this.totalBytesToUpload));
const putObjectCommand = new client_s3_1.PutObjectCommand(this.params);
const s3 = yield this._createNewS3Client(this.opts, this.emitter);
return s3.send(putObjectCommand);
}
else {
// Step 1: Initiate the multi part upload
const uploadId = yield this.createMultiPartUpload();
// Step 2: Upload chunks in parallel as requested
const numberOfPartsToUpload = Math.ceil(this.totalBytesToUpload / this.minPartSize);
for (let start = 0; start < numberOfPartsToUpload; start += this.queueSize) {
/** This first block will try to cancel the upload if the cancel
* request came before any parts uploads have started.
*/
yield this.checkIfUploadCancelled(uploadId);
// Upload as many as `queueSize` parts simultaneously
const parts = yield this.createParts(start);
yield this.uploadParts(uploadId, parts);
/** Call cleanup a second time in case there were part upload requests
* in flight. This is to ensure that all parts are cleaned up.
*/
yield this.checkIfUploadCancelled(uploadId);
}
// Step 3: Finalize the upload such that S3 can recreate the file
return yield this.finishMultiPartUpload(uploadId);
}
});
}
createParts(startPartNumber) {
return __awaiter(this, void 0, void 0, function* () {
const parts = [];
let partNumber = startPartNumber;
for (let bodyStart = startPartNumber * this.minPartSize; bodyStart < this.totalBytesToUpload && parts.length < this.queueSize;) {
const bodyEnd = Math.min(bodyStart + this.minPartSize, this.totalBytesToUpload);
parts.push({
bodyPart: yield Promise.resolve(this.body.slice(bodyStart, bodyEnd)),
partNumber: ++partNumber,
emitter: new events.EventEmitter(),
_lastUploadedBytes: 0,
});
bodyStart += this.minPartSize;
}
return parts;
});
}
createMultiPartUpload() {
return __awaiter(this, void 0, void 0, function* () {
const createMultiPartUploadCommand = new client_s3_1.CreateMultipartUploadCommand(this.params);
const s3 = yield this._createNewS3Client(this.opts);
// @aws-sdk/client-s3 seems to be ignoring the `ContentType` parameter, so we
// are explicitly adding it via middleware.
// https://github.com/aws/aws-sdk-js-v3/issues/2000
s3.middlewareStack.add(next => (args) => {
if (this.params.ContentType &&
args &&
args.request &&
args.request.headers) {
args.request.headers['Content-Type'] = this.params.ContentType;
}
return next(args);
}, {
step: 'build',
});
const response = yield s3.send(createMultiPartUploadCommand);
logger.debug(response.UploadId);
return response.UploadId;
});
}
/**
* @private Not to be extended outside of tests
* @VisibleFotTesting
*/
uploadParts(uploadId, parts) {
return __awaiter(this, void 0, void 0, function* () {
const promises = [];
for (const part of parts) {
this.setupEventListener(part);
const uploadPartCommandInput = {
PartNumber: part.partNumber,
Body: part.bodyPart,
UploadId: uploadId,
Key: this.params.Key,
Bucket: this.params.Bucket,
};
const uploadPartCommand = new client_s3_1.UploadPartCommand(uploadPartCommandInput);
const s3 = yield this._createNewS3Client(this.opts, part.emitter);
promises.push(s3.send(uploadPartCommand));
}
try {
const allResults = yield Promise.all(promises);
// The order of resolved promises is the same as input promise order.
for (let i = 0; i < allResults.length; i++) {
this.multiPartMap.push({
PartNumber: parts[i].partNumber,
ETag: allResults[i].ETag,
});
}
}
catch (error) {
logger.error('error happened while uploading a part. Cancelling the multipart upload', error);
this.cancelUpload();
return;
}
});
}
finishMultiPartUpload(uploadId) {
return __awaiter(this, void 0, void 0, function* () {
const input = {
Bucket: this.params.Bucket,
Key: this.params.Key,
UploadId: uploadId,
MultipartUpload: { Parts: this.multiPartMap },
};
const completeUploadCommand = new client_s3_1.CompleteMultipartUploadCommand(input);
const s3 = yield this._createNewS3Client(this.opts);
try {
const data = yield s3.send(completeUploadCommand);
return data.Key;
}
catch (error) {
logger.error('error happened while finishing the upload. Cancelling the multipart upload', error);
this.cancelUpload();
return;
}
});
}
checkIfUploadCancelled(uploadId) {
return __awaiter(this, void 0, void 0, function* () {
if (this.cancel) {
let errorMessage = 'Upload was cancelled.';
try {
yield this.cleanup(uploadId);
}
catch (error) {
errorMessage += error.errorMessage;
}
throw new Error(errorMessage);
}
});
}
cancelUpload() {
this.cancel = true;
}
cleanup(uploadId) {
return __awaiter(this, void 0, void 0, function* () {
// Reset this's state
this.body = null;
this.multiPartMap = [];
this.bytesUploaded = 0;
this.totalBytesToUpload = 0;
const input = {
Bucket: this.params.Bucket,
Key: this.params.Key,
UploadId: uploadId,
};
const s3 = yield this._createNewS3Client(this.opts);
yield s3.send(new client_s3_1.AbortMultipartUploadCommand(input));
// verify that all parts are removed.
const data = yield s3.send(new client_s3_1.ListPartsCommand(input));
if (data && data.Parts && data.Parts.length > 0) {
throw new Error('Multi Part upload clean up failed');
}
});
}
setupEventListener(part) {
part.emitter.on(axios_http_handler_1.SEND_PROGRESS_EVENT, (progress) => {
this.progressChanged(part.partNumber, progress.loaded - part._lastUploadedBytes);
part._lastUploadedBytes = progress.loaded;
});
}
progressChanged(partNumber, incrementalUpdate) {
this.bytesUploaded += incrementalUpdate;
this.emitter.emit(axios_http_handler_1.SEND_PROGRESS_EVENT, {
loaded: this.bytesUploaded,
total: this.totalBytesToUpload,
part: partNumber,
key: this.params.Key,
});
}
byteLength(input) {
if (input === null || input === undefined)
return 0;
if (typeof input.byteLength === 'number') {
return input.byteLength;
}
else if (typeof input.length === 'number') {
return input.length;
}
else if (typeof input.size === 'number') {
return input.size;
}
else if (typeof input.path === 'string') {
/* NodeJs Support
return require('fs').lstatSync(input.path).size;
*/
}
else {
throw new Error('Cannot determine length of ' + input);
}
}
validateAndSanitizeBody(body) {
return __awaiter(this, void 0, void 0, function* () {
if (this.isGenericObject(body)) {
// Any javascript object
return JSON.stringify(body);
}
else if (this.isBlob(body)) {
// If it's a blob, we need to convert it to an array buffer as axios has issues
// with correctly identifying blobs in *react native* environment. For more
// details see https://github.com/aws-amplify/amplify-js/issues/5311
if (core_1.Platform.isReactNative) {
return yield fetch_http_handler_1.streamCollector(body);
}
return body;
}
else {
// Files, arrayBuffer etc
return body;
}
/* TODO: streams and files for nodejs
if (
typeof body.path === 'string' &&
require('fs').lstatSync(body.path).size > 0
) {
return body;
} */
});
}
isBlob(body) {
return typeof Blob !== 'undefined' && body instanceof Blob;
}
isGenericObject(body) {
if (body !== null && typeof body === 'object') {
try {
return !(this.byteLength(body) >= 0);
}
catch (error) {
// If we cannot determine the length of the body, consider it
// as a generic object and upload a stringified version of it
return true;
}
}
return false;
}
/**
* @private
* creates an S3 client with new V3 aws sdk
*/
_createNewS3Client(config, emitter) {
return __awaiter(this, void 0, void 0, function* () {
const credentials = yield this._getCredentials();
const { region, dangerouslyConnectToHttpEndpointForTesting } = config;
let localTestingConfig = {};
if (dangerouslyConnectToHttpEndpointForTesting) {
localTestingConfig = {
endpoint: localTestingStorageEndpoint,
tls: false,
bucketEndpoint: false,
forcePathStyle: true,
};
}
const client = new client_s3_1.S3Client(Object.assign(Object.assign({ region,
credentials }, localTestingConfig), { requestHandler: new axios_http_handler_1.AxiosHttpHandler({}, emitter), customUserAgent: core_1.getAmplifyUserAgent() }));
client.middlewareStack.remove(SET_CONTENT_LENGTH_HEADER);
return client;
});
}
/**
* @private
*/
_getCredentials() {
return this.credentials
.get()
.then((credentials) => {
if (!credentials)
return false;
const cred = this.credentials.shear(credentials);
logger.debug('set credentials for storage', cred);
return cred;
})
.catch((error) => {
logger.warn('ensure credentials error', error);
return false;
});
}
}
exports.StorageChunkManagedUpload = StorageChunkManagedUpload;