UNPKG

@aws-amplify/storage

Version:

Storage category of aws-amplify

216 lines (213 loc) • 8.91 kB
import { Amplify } from '@aws-amplify/core'; import { StorageAction } from '@aws-amplify/core/internals/utils'; import { getDataChunker } from './getDataChunker.mjs'; import '@smithy/md5-js'; import '@aws-amplify/core/internals/aws-client-utils'; import '../../../utils/client/runtime/s3TransferHandler/fetch.mjs'; import 'fast-xml-parser'; import '../../../utils/client/runtime/s3TransferHandler/xhr.mjs'; import 'buffer'; import { StorageError } from '../../../../../errors/StorageError.mjs'; import { resolveS3ConfigAndInput } from '../../../utils/resolveS3ConfigAndInput.mjs'; import { CanceledError } from '../../../../../errors/CanceledError.mjs'; import '../../../../../errors/types/validation.mjs'; import { logger } from '../../../../../utils/logger.mjs'; import { DEFAULT_QUEUE_SIZE, DEFAULT_ACCESS_LEVEL } from '../../../utils/constants.mjs'; import { loadOrCreateMultipartUpload } from './initialUpload.mjs'; import { getConcurrentUploadsProgressTracker } from './progressTracker.mjs'; import { getUploadsCacheKey, removeCachedUpload } from './uploadCache.mjs'; import { uploadPartExecutor } from './uploadPartExecutor.mjs'; import '../../../utils/client/base.mjs'; import '../../../utils/client/getObject.mjs'; import '../../../utils/client/listObjectsV2.mjs'; import '../../../utils/client/putObject.mjs'; import '../../../utils/client/createMultipartUpload.mjs'; import '../../../utils/client/uploadPart.mjs'; import { completeMultipartUpload } from '../../../utils/client/completeMultipartUpload.mjs'; import '../../../utils/client/listParts.mjs'; import { abortMultipartUpload } from '../../../utils/client/abortMultipartUpload.mjs'; import '../../../utils/client/copyObject.mjs'; import { headObject } from '../../../utils/client/headObject.mjs'; import '../../../utils/client/deleteObject.mjs'; import { getStorageUserAgentValue } from '../../../utils/userAgent.mjs'; // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 /** * Create closure hiding the multipart upload implementation details and expose the upload job and control functions( * onPause, onResume, onCancel). * * @internal */ const getMultipartUploadHandlers = ({ options: uploadDataOptions, key, data }, size) => { let resolveCallback; let rejectCallback; let inProgressUpload; let s3Config; let abortController; let bucket; let keyPrefix; let uploadCacheKey; // Special flag that differentiates HTTP requests abort error caused by pause() from ones caused by cancel(). // The former one should NOT cause the upload job to throw, but cancels any pending HTTP requests. // This should be replaced by a special abort reason. However,the support of this API is lagged behind. let isAbortSignalFromPause = false; const startUpload = async () => { const resolvedS3Options = await resolveS3ConfigAndInput(Amplify, uploadDataOptions); s3Config = resolvedS3Options.s3Config; bucket = resolvedS3Options.bucket; keyPrefix = resolvedS3Options.keyPrefix; abortController = new AbortController(); isAbortSignalFromPause = false; const { contentDisposition, contentEncoding, contentType = 'application/octet-stream', metadata, accessLevel, onProgress, } = uploadDataOptions ?? {}; if (!inProgressUpload) { const { uploadId, cachedParts } = await loadOrCreateMultipartUpload({ s3Config, accessLevel: resolveAccessLevel(accessLevel), bucket, keyPrefix, key, contentType, contentDisposition, contentEncoding, metadata, data, size, abortSignal: abortController.signal, }); inProgressUpload = { uploadId, completedParts: cachedParts, }; } const finalKey = keyPrefix + key; uploadCacheKey = size ? getUploadsCacheKey({ file: data instanceof File ? data : undefined, accessLevel: resolveAccessLevel(uploadDataOptions?.accessLevel), contentType: uploadDataOptions?.contentType, bucket: bucket, size, key, }) : undefined; const dataChunker = getDataChunker(data, size); const completedPartNumberSet = new Set(inProgressUpload.completedParts.map(({ PartNumber }) => PartNumber)); const onPartUploadCompletion = (partNumber, eTag) => { inProgressUpload?.completedParts.push({ PartNumber: partNumber, ETag: eTag, }); }; const concurrentUploadsProgressTracker = getConcurrentUploadsProgressTracker({ size, onProgress, }); const concurrentUploadPartExecutors = []; for (let index = 0; index < DEFAULT_QUEUE_SIZE; index++) { concurrentUploadPartExecutors.push(uploadPartExecutor({ dataChunkerGenerator: dataChunker, completedPartNumberSet, s3Config, abortSignal: abortController.signal, bucket, finalKey, uploadId: inProgressUpload.uploadId, onPartUploadCompletion, onProgress: concurrentUploadsProgressTracker.getOnProgressListener(), isObjectLockEnabled: resolvedS3Options.isObjectLockEnabled, })); } await Promise.all(concurrentUploadPartExecutors); const { ETag: eTag } = await completeMultipartUpload({ ...s3Config, abortSignal: abortController.signal, userAgentValue: getStorageUserAgentValue(StorageAction.UploadData), }, { Bucket: bucket, Key: finalKey, UploadId: inProgressUpload.uploadId, MultipartUpload: { Parts: inProgressUpload.completedParts.sort((partA, partB) => partA.PartNumber - partB.PartNumber), }, }); if (size) { const { ContentLength: uploadedObjectSize } = await headObject(s3Config, { Bucket: bucket, Key: finalKey, }); if (uploadedObjectSize && uploadedObjectSize !== size) { throw new StorageError({ name: 'Error', message: `Upload failed. Expected object size ${size}, but got ${uploadedObjectSize}.`, }); } } if (uploadCacheKey) { await removeCachedUpload(uploadCacheKey); } return { key, eTag, contentType, metadata, }; }; const startUploadWithResumability = () => startUpload() .then(resolveCallback) .catch(error => { const abortSignal = abortController?.signal; if (abortSignal?.aborted && isAbortSignalFromPause) { logger.debug('upload paused.'); } else { // Uncaught errors should be exposed to the users. rejectCallback(error); } }); const multipartUploadJob = () => new Promise((resolve, reject) => { resolveCallback = resolve; rejectCallback = reject; startUploadWithResumability(); }); const onPause = () => { isAbortSignalFromPause = true; abortController?.abort(); }; const onResume = () => { startUploadWithResumability(); }; const onCancel = (message) => { // 1. abort in-flight API requests abortController?.abort(message); const cancelUpload = async () => { // 2. clear upload cache. if (uploadCacheKey) { await removeCachedUpload(uploadCacheKey); } // 3. clear multipart upload on server side. await abortMultipartUpload(s3Config, { Bucket: bucket, Key: keyPrefix + key, UploadId: inProgressUpload?.uploadId, }); }; cancelUpload().catch(e => { logger.debug('error when cancelling upload task.', e); }); rejectCallback( // Internal error that should not be exposed to the users. They should use isCancelError() to check if // the error is caused by cancel(). new CanceledError(message ? { message } : undefined)); }; return { multipartUploadJob, onPause, onResume, onCancel, }; }; const resolveAccessLevel = (accessLevel) => accessLevel ?? Amplify.libraryOptions.Storage?.S3?.defaultAccessLevel ?? DEFAULT_ACCESS_LEVEL; export { getMultipartUploadHandlers }; //# sourceMappingURL=uploadHandlers.mjs.map