UNPKG

@uppy/aws-s3

Version:

Upload to Amazon S3 with Uppy

554 lines (553 loc) 23.4 kB
import { RequestClient } from '@uppy/companion-client'; import { BasePlugin, EventManager, } from '@uppy/core'; import { createAbortError, filterFilesToEmitUploadStarted, filterFilesToUpload, getAllowedMetaFields, RateLimitedQueue, } from '@uppy/utils'; import packageJson from '../package.json' with { type: 'json' }; import createSignedURL from './createSignedURL.js'; import { HTTPCommunicationQueue } from './HTTPCommunicationQueue.js'; import MultipartUploader from './MultipartUploader.js'; import { throwIfAborted } from './utils.js'; function assertServerError(res) { if (res?.error) { const error = new Error(res.message); Object.assign(error, res.error); throw error; } return res; } /** * Computes the expiry time for a request signed with temporary credentials. If * no expiration was provided, or an invalid value (e.g. in the past) is * provided, undefined is returned. This function assumes the client clock is in * sync with the remote server, which is a requirement for the signature to be * validated for AWS anyway. */ function getExpiry(credentials) { const expirationDate = credentials.Expiration; if (expirationDate) { const timeUntilExpiry = Math.floor((new Date(expirationDate) - Date.now()) / 1000); if (timeUntilExpiry > 9) { return timeUntilExpiry; } } return undefined; } function getAllowedMetadata({ meta, allowedMetaFields, querify = false, }) { const metaFields = allowedMetaFields ?? Object.keys(meta); if (!meta) return {}; return Object.fromEntries(metaFields .filter((key) => meta[key] != null) .map((key) => { const realKey = querify ? `metadata[${key}]` : key; const value = String(meta[key]); return [realKey, value]; })); } const defaultOptions = { allowedMetaFields: true, limit: 6, getTemporarySecurityCredentials: false, shouldUseMultipart: ((file) => (file.size || 0) > 100 * 1024 * 1024), retryDelays: [0, 1000, 3000, 5000], }; export default class AwsS3Multipart extends BasePlugin { static VERSION = packageJson.version; #companionCommunicationQueue; #client; requests; uploaderEvents; uploaders; constructor(uppy, opts) { super(uppy, { ...defaultOptions, uploadPartBytes: AwsS3Multipart.uploadPartBytes, createMultipartUpload: null, listParts: null, abortMultipartUpload: null, completeMultipartUpload: null, signPart: null, getUploadParameters: null, ...opts, }); // We need the `as any` here because of the dynamic default options. this.type = 'uploader'; this.id = this.opts.id || 'AwsS3Multipart'; this.#setClient(opts); const dynamicDefaultOptions = { createMultipartUpload: this.createMultipartUpload, listParts: this.listParts, abortMultipartUpload: this.abortMultipartUpload, completeMultipartUpload: this.completeMultipartUpload, signPart: opts?.getTemporarySecurityCredentials ? this.createSignedURL : this.signPart, getUploadParameters: opts?.getTemporarySecurityCredentials ? this.createSignedURL : this.getUploadParameters, }; for (const key of Object.keys(dynamicDefaultOptions)) { if (this.opts[key] == null) { this.opts[key] = dynamicDefaultOptions[key].bind(this); } } /** * Simultaneous upload limiting is shared across all uploads with this plugin. * * @type {RateLimitedQueue} */ this.requests = this.opts.rateLimitedQueue ?? new RateLimitedQueue(this.opts.limit); this.#companionCommunicationQueue = new HTTPCommunicationQueue(this.requests, this.opts, this.#setS3MultipartState, this.#getFile); this.uploaders = Object.create(null); this.uploaderEvents = Object.create(null); } [Symbol.for('uppy test: getClient')]() { return this.#client; } #setClient(opts) { if (opts == null || !('endpoint' in opts || 'companionUrl' in opts || 'headers' in opts || 'companionHeaders' in opts || 'cookiesRule' in opts || 'companionCookiesRule' in opts)) return; if ('companionUrl' in opts && !('endpoint' in opts)) { this.uppy.log('`companionUrl` option has been removed in @uppy/aws-s3, use `endpoint` instead.', 'warning'); } if ('companionHeaders' in opts && !('headers' in opts)) { this.uppy.log('`companionHeaders` option has been removed in @uppy/aws-s3, use `headers` instead.', 'warning'); } if ('companionCookiesRule' in opts && !('cookiesRule' in opts)) { this.uppy.log('`companionCookiesRule` option has been removed in @uppy/aws-s3, use `cookiesRule` instead.', 'warning'); } if ('endpoint' in opts) { this.#client = new RequestClient(this.uppy, { pluginId: this.id, provider: 'AWS', companionUrl: this.opts.endpoint, companionHeaders: this.opts.headers, companionCookiesRule: this.opts.cookiesRule, }); } else { if ('headers' in opts) { this.#setCompanionHeaders(); } if ('cookiesRule' in opts) { this.#client.opts.companionCookiesRule = opts.cookiesRule; } } } setOptions(newOptions) { this.#companionCommunicationQueue.setOptions(newOptions); super.setOptions(newOptions); this.#setClient(newOptions); } /** * Clean up all references for a file's upload: the MultipartUploader instance, * any events related to the file, and the Companion WebSocket connection. * * Set `opts.abort` to tell S3 that the multipart upload is cancelled and must be removed. * This should be done when the user cancels the upload, not when the upload is completed or errored. */ resetUploaderReferences(fileID, opts) { if (this.uploaders[fileID]) { this.uploaders[fileID].abort({ really: opts?.abort || false }); this.uploaders[fileID] = null; } if (this.uploaderEvents[fileID]) { this.uploaderEvents[fileID].remove(); this.uploaderEvents[fileID] = null; } } #assertHost(method) { if (!this.#client) { throw new Error(`Expected a \`endpoint\` option containing a URL, or if you are not using Companion, a custom \`${method}\` implementation.`); } } createMultipartUpload(file, signal) { this.#assertHost('createMultipartUpload'); throwIfAborted(signal); const allowedMetaFields = getAllowedMetaFields(this.opts.allowedMetaFields, file.meta); const metadata = getAllowedMetadata({ meta: file.meta, allowedMetaFields }); return this.#client .post('s3/multipart', { filename: file.name, type: file.type, metadata, }, { signal }) .then(assertServerError); } listParts(file, { key, uploadId, signal }, oldSignal) { signal ??= oldSignal; this.#assertHost('listParts'); throwIfAborted(signal); const filename = encodeURIComponent(key); return this.#client .get(`s3/multipart/${encodeURIComponent(uploadId)}?key=${filename}`, { signal }) .then(assertServerError); } completeMultipartUpload(file, { key, uploadId, parts, signal }, oldSignal) { signal ??= oldSignal; this.#assertHost('completeMultipartUpload'); throwIfAborted(signal); const filename = encodeURIComponent(key); const uploadIdEnc = encodeURIComponent(uploadId); return this.#client .post(`s3/multipart/${uploadIdEnc}/complete?key=${filename}`, { parts: parts.map(({ ETag, PartNumber }) => ({ ETag, PartNumber })) }, { signal }) .then(assertServerError); } #cachedTemporaryCredentials; async #getTemporarySecurityCredentials(options) { throwIfAborted(options?.signal); if (this.#cachedTemporaryCredentials == null) { const { getTemporarySecurityCredentials } = this.opts; // We do not await it just yet, so concurrent calls do not try to override it: if (getTemporarySecurityCredentials === true) { this.#assertHost('getTemporarySecurityCredentials'); this.#cachedTemporaryCredentials = this.#client .get('s3/sts', options) .then(assertServerError); } else { this.#cachedTemporaryCredentials = getTemporarySecurityCredentials(options); } this.#cachedTemporaryCredentials = await this.#cachedTemporaryCredentials; setTimeout(() => { // At half the time left before expiration, we clear the cache. That's // an arbitrary tradeoff to limit the number of requests made to the // remote while limiting the risk of using an expired token in case the // clocks are not exactly synced. // The HTTP cache should be configured to ensure a client doesn't request // more tokens than it needs, but this timeout provides a second layer of // security in case the HTTP cache is disabled or misconfigured. this.#cachedTemporaryCredentials = null; }, (getExpiry(this.#cachedTemporaryCredentials.credentials) || 0) * 500); } return this.#cachedTemporaryCredentials; } async createSignedURL(file, options) { const data = await this.#getTemporarySecurityCredentials(options); const expires = getExpiry(data.credentials) || 604_800; // 604 800 is the max value accepted by AWS. const { uploadId, key, partNumber } = options; // Return an object in the correct shape. return { method: 'PUT', expires, fields: {}, url: `${await createSignedURL({ accountKey: data.credentials.AccessKeyId, accountSecret: data.credentials.SecretAccessKey, sessionToken: data.credentials.SessionToken, expires, bucketName: data.bucket, Region: data.region, Key: key ?? `${crypto.randomUUID()}-${file.name}`, uploadId, partNumber, })}`, // Provide content type header required by S3 headers: { 'Content-Type': file.type, }, }; } signPart(file, { uploadId, key, partNumber, signal }) { this.#assertHost('signPart'); throwIfAborted(signal); if (uploadId == null || key == null || partNumber == null) { throw new Error('Cannot sign without a key, an uploadId, and a partNumber'); } const filename = encodeURIComponent(key); return this.#client .get(`s3/multipart/${encodeURIComponent(uploadId)}/${partNumber}?key=${filename}`, { signal }) .then(assertServerError); } abortMultipartUpload(file, { key, uploadId, signal }) { this.#assertHost('abortMultipartUpload'); const filename = encodeURIComponent(key); const uploadIdEnc = encodeURIComponent(uploadId); return this.#client .delete(`s3/multipart/${uploadIdEnc}?key=${filename}`, undefined, { signal, }) .then(assertServerError); } getUploadParameters(file, options) { this.#assertHost('getUploadParameters'); const { meta } = file; const { type, name: filename } = meta; const allowedMetaFields = getAllowedMetaFields(this.opts.allowedMetaFields, file.meta); const metadata = getAllowedMetadata({ meta, allowedMetaFields, querify: true, }); const query = new URLSearchParams({ filename, type, ...metadata }); return this.#client.get(`s3/params?${query}`, options); } static async uploadPartBytes({ signature: { url, expires, headers, method = 'PUT' }, body, size = body.size, onProgress, onComplete, signal, }) { throwIfAborted(signal); if (url == null) { throw new Error('Cannot upload to an undefined URL'); } return new Promise((resolve, reject) => { const xhr = new XMLHttpRequest(); xhr.open(method, url, true); if (headers) { Object.keys(headers).forEach((key) => { xhr.setRequestHeader(key, headers[key]); }); } xhr.responseType = 'text'; if (typeof expires === 'number') { xhr.timeout = expires * 1000; } function onabort() { xhr.abort(); } function cleanup() { signal?.removeEventListener('abort', onabort); } signal?.addEventListener('abort', onabort); xhr.upload.addEventListener('progress', (ev) => { onProgress(ev); }); xhr.addEventListener('abort', () => { cleanup(); reject(createAbortError()); }); xhr.addEventListener('timeout', () => { cleanup(); const error = new Error('Request has expired'); error.source = { status: 403 }; reject(error); }); xhr.addEventListener('load', () => { cleanup(); if (xhr.status === 403 && xhr.responseText.includes('<Message>Request has expired</Message>')) { const error = new Error('Request has expired'); error.source = xhr; reject(error); return; } if (xhr.status < 200 || xhr.status >= 300) { const error = new Error('Non 2xx'); error.source = xhr; reject(error); return; } onProgress?.({ loaded: size, lengthComputable: true }); // https://developer.mozilla.org/en-US/docs/Web/API/XMLHttpRequest/getAllResponseHeaders#examples const arr = xhr .getAllResponseHeaders() .trim() .split(/[\r\n]+/); // @ts-expect-error null is allowed to avoid inherited properties const headersMap = { __proto__: null }; for (const line of arr) { const parts = line.split(': '); const header = parts.shift(); const value = parts.join(': '); headersMap[header] = value; } const { etag, location } = headersMap; // More info bucket settings when this is not present: // https://github.com/transloadit/uppy/issues/5388#issuecomment-2464885562 if (method.toUpperCase() === 'POST' && location == null) { // Not being able to read the Location header is not a fatal error. console.error('@uppy/aws-s3: Could not read the Location header. This likely means CORS is not configured correctly on the S3 Bucket. See https://uppy.io/docs/aws-s3/#setting-up-your-s3-bucket'); } if (etag == null) { console.error('@uppy/aws-s3: Could not read the ETag header. This likely means CORS is not configured correctly on the S3 Bucket. See https://uppy.io/docs/aws-s3/#setting-up-your-s3-bucket'); return; } onComplete?.(etag); resolve({ ...headersMap, ETag: etag, // keep capitalised ETag for backwards compatiblity }); }); xhr.addEventListener('error', (ev) => { cleanup(); const error = new Error('Unknown error'); error.source = ev.target; reject(error); }); xhr.send(body); }); } #setS3MultipartState = (file, { key, uploadId }) => { const cFile = this.uppy.getFile(file.id); if (cFile == null) { // file was removed from store return; } this.uppy.setFileState(file.id, { s3Multipart: { ...cFile.s3Multipart, key, uploadId, }, }); }; #getFile = (file) => { return this.uppy.getFile(file.id) || file; }; #uploadLocalFile(file) { return new Promise((resolve, reject) => { const onProgress = (bytesUploaded, bytesTotal) => { const latestFile = this.uppy.getFile(file.id); this.uppy.emit('upload-progress', latestFile, { uploadStarted: latestFile.progress.uploadStarted ?? 0, bytesUploaded, bytesTotal, }); }; const onError = (err) => { this.uppy.log(err); this.uppy.emit('upload-error', file, err); this.resetUploaderReferences(file.id); reject(err); }; const onSuccess = (result) => { const uploadResp = { body: { ...result, }, status: 200, uploadURL: result.location, }; this.resetUploaderReferences(file.id); this.uppy.emit('upload-success', this.#getFile(file), uploadResp); if (result.location) { this.uppy.log(`Download ${file.name} from ${result.location}`); } resolve(undefined); }; if (file.data == null) throw new Error('File data is empty'); const upload = new MultipartUploader(file.data, { // .bind to pass the file object to each handler. companionComm: this.#companionCommunicationQueue, log: (...args) => this.uppy.log(...args), getChunkSize: this.opts.getChunkSize ? this.opts.getChunkSize.bind(this) : undefined, onProgress, onError, onSuccess, onPartComplete: (part) => { this.uppy.emit('s3-multipart:part-uploaded', this.#getFile(file), part); }, file, shouldUseMultipart: this.opts.shouldUseMultipart, ...file.s3Multipart, }); this.uploaders[file.id] = upload; const eventManager = new EventManager(this.uppy); this.uploaderEvents[file.id] = eventManager; eventManager.onFileRemove(file.id, (removed) => { upload.abort(); this.resetUploaderReferences(file.id, { abort: true }); resolve(`upload ${removed} was removed`); }); eventManager.onCancelAll(file.id, () => { upload.abort(); this.resetUploaderReferences(file.id, { abort: true }); resolve(`upload ${file.id} was canceled`); }); eventManager.onFilePause(file.id, (isPaused) => { if (isPaused) { upload.pause(); } else { upload.start(); } }); eventManager.onPauseAll(file.id, () => { upload.pause(); }); eventManager.onResumeAll(file.id, () => { upload.start(); }); upload.start(); }); } #getCompanionClientArgs(file) { return { ...('remote' in file && file.remote?.body), protocol: 's3-multipart', size: file.data.size, metadata: file.meta, }; } #upload = async (fileIDs) => { if (fileIDs.length === 0) return undefined; const files = this.uppy.getFilesByIds(fileIDs); const filesFiltered = filterFilesToUpload(files); const filesToEmit = filterFilesToEmitUploadStarted(filesFiltered); this.uppy.emit('upload-start', filesToEmit); const promises = filesFiltered.map((file) => { if (file.isRemote) { const getQueue = () => this.requests; this.#setResumableUploadsCapability(false); const controller = new AbortController(); const removedHandler = (removedFile) => { if (removedFile.id === file.id) controller.abort(); }; this.uppy.on('file-removed', removedHandler); const uploadPromise = this.uppy .getRequestClientForFile(file) .uploadRemoteFile(file, this.#getCompanionClientArgs(file), { signal: controller.signal, getQueue, }); this.requests.wrapSyncFunction(() => { this.uppy.off('file-removed', removedHandler); }, { priority: -1 })(); return uploadPromise; } return this.#uploadLocalFile(file); }); const upload = await Promise.allSettled(promises); // After the upload is done, another upload may happen with only local files. // We reset the capability so that the next upload can use resumable uploads. this.#setResumableUploadsCapability(true); return upload; }; #setCompanionHeaders = () => { this.#client?.setCompanionHeaders(this.opts.headers); }; #setResumableUploadsCapability = (boolean) => { const { capabilities } = this.uppy.getState(); this.uppy.setState({ capabilities: { ...capabilities, resumableUploads: boolean, }, }); }; #resetResumableCapability = () => { this.#setResumableUploadsCapability(true); }; install() { this.#setResumableUploadsCapability(true); this.uppy.addPreProcessor(this.#setCompanionHeaders); this.uppy.addUploader(this.#upload); this.uppy.on('cancel-all', this.#resetResumableCapability); } uninstall() { this.uppy.removePreProcessor(this.#setCompanionHeaders); this.uppy.removeUploader(this.#upload); this.uppy.off('cancel-all', this.#resetResumableCapability); } }