s3-tus-store
Version:
[](https://travis-ci.org/blockai/s3-tus-store)
242 lines (206 loc) • 8.59 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", {
value: true
});
var _slicedToArray = function () { function sliceIterator(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"]) _i["return"](); } finally { if (_d) throw _e; } } return _arr; } return function (arr, i) { if (Array.isArray(arr)) { return arr; } else if (Symbol.iterator in Object(arr)) { return sliceIterator(arr, i); } else { throw new TypeError("Invalid attempt to destructure non-iterable instance"); } }; }();
var _fixedSizeStreamSplitter = require('fixed-size-stream-splitter');
var _fixedSizeStreamSplitter2 = _interopRequireDefault(_fixedSizeStreamSplitter);
var _debug = require('debug');
var _debug2 = _interopRequireDefault(_debug);
var _commonStreams = require('common-streams');
var _stream = require('stream');
var _eos = require('./eos');
var _eos2 = _interopRequireDefault(_eos);
var _tmpFile = require('./tmp-file');
var _tmpFile2 = _interopRequireDefault(_tmpFile);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _asyncToGenerator(fn) { return function () { var gen = fn.apply(this, arguments); return new Promise(function (resolve, reject) { function step(key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { return Promise.resolve(value).then(function (value) { step("next", value); }, function (err) { step("throw", err); }); } } return step("next"); }); }; }
const debug = (0, _debug2.default)('s3-tus-store:partbypart');
const uploadPart = (() => {
var _ref2 = _asyncToGenerator(function* (rs, guessedPartSize, partNumber, _ref) {
let client = _ref.client,
bucket = _ref.bucket,
uploadId = _ref.uploadId,
minPartSize = _ref.minPartSize,
key = _ref.key;
debug('uploadPart', {
guessedPartSize,
partNumber,
bucket,
key,
uploadId,
minPartSize
}, { bucket, key, uploadId });
//
// Optimistically guess that Content-Length is guessedPartSize
// but keep a temporary copy on disk in case the stream ends
// before we reached have "guessedPartSize"
//
// If the actual part size is > minPartSize, write part with
// content-length = actual part size
//
// Otherwise, there is not much we can do, short of temporarily
// writing the data to S3 key and waiting for the next call to
// to .append() to read it, merge it with the new stream
// and write to a new part.... But that is a TODO!
//
const through = rs.pipe(new _stream.PassThrough());
const Body = new _stream.PassThrough();
const baseParams = {
Key: key,
Bucket: bucket,
UploadId: uploadId,
PartNumber: partNumber
};
const request = client.uploadPart(Object.assign({
Body
}, baseParams, {
ContentLength: guessedPartSize
}));
// Ensure body is not smaller than content length,
// otherwise requests stall indefinitely.
const tmpFile = yield (0, _tmpFile2.default)();
debug('tmpFile path', tmpFile.path);
// In parallel, we write to a temporary file to resume
// in case original stream fails (we guessed part size wrong)
// We always guess right part size for last part so no need for temp
// file.
const fileWrittenPromise = (0, _eos2.default)(through.pipe(tmpFile.createWriteStream()));
const streamSizePromise = new Promise(function (resolve) {
through.pipe(new _commonStreams.SizeStream(function (byteCount) {
// This will resolve once the upload to S3 is done
resolve(byteCount);
}))
// Upload to S3 hasn't started yet
.pipe(Body);
});
let actualSize;
// Wait for upload to complete
streamSizePromise.then(function (size) {
actualSize = size;
if (size < guessedPartSize) {
debug('actualSize', actualSize);
debug('guessedPartSize', guessedPartSize);
debug('Oops, our guessedPartSize was larger than actualSize');
// make sure request is aborted
request.abort();
}
});
// stream was shorter than we expected,
// we aborted the request. now, let's make sure
// tmp file is written and try to upload its content
// with correct content length
const planB = (() => {
var _ref3 = _asyncToGenerator(function* () {
debug('plan B');
debug(`actualSize = ${ actualSize }`);
debug(`minPartSize = ${ minPartSize }`);
if (actualSize < minPartSize) {
// Nothing we can do.. short of uploading to a S3 key...
// and rewriting later when we have a new write? TODO
// PS: we always guess the size of the last part correctly
// so this is never called for the last part
throw new Error(`Upload parts must be at least ${ minPartSize }`);
}
// make sure temporary was file completely written to disk...
yield fileWrittenPromise;
const tmpFileRs = tmpFile.createReadStream();
// Captures errors so we dont get uncaught error events
const tmpFileRsEos = (0, _eos2.default)(tmpFileRs, { writable: false });
var _ref4 = yield Promise.all([client.uploadPart(Object.assign({}, baseParams, {
Body: tmpFileRs,
ContentLength: actualSize
})).promise(), tmpFileRsEos]),
_ref5 = _slicedToArray(_ref4, 1);
const ETag = _ref5[0].ETag;
// TODO: put whole function in try/catch to make sure tmpfile
// remove even when errors...
return {
ETag,
PartNumber: partNumber,
Size: actualSize
};
});
return function planB() {
return _ref3.apply(this, arguments);
};
})();
const planA = function () {
return request.promise().then(function (_ref6) {
let ETag = _ref6.ETag;
return {
ETag,
PartNumber: partNumber,
Size: guessedPartSize
};
}).catch(function (err) {
if (err.code === 'RequestAbortedError') {
debug('request aborted');
return planB();
}
throw err;
});
};
/*
.then((result) => {
debug(result)
return result
})
*/
return planA().catch(function (err) {
tmpFile.rm(); // dont need to block for this...
throw err;
}).then(function (result) {
tmpFile.rm(); // dont need to block for this...
return result;
});
});
return function uploadPart(_x, _x2, _x3, _x4) {
return _ref2.apply(this, arguments);
};
})();
exports.default = function () {
let opts = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};
return new Promise((resolve, reject) => {
const body = opts.body,
maxPartSize = opts.maxPartSize,
bytesLimit = opts.bytesLimit,
nextPartNumber = opts.nextPartNumber;
debug('writePartByPart', { bytesLimit, nextPartNumber });
let done = false;
let promise = Promise.resolve();
let splitIndex = 0;
const newParts = [];
const onSplit = rs => {
debug('onSplit');
if (done) return;
const partNumber = nextPartNumber + splitIndex;
const bytesWritten = splitIndex * maxPartSize;
debug('bytesWritten', bytesWritten);
const bytesRemaining = bytesLimit - bytesWritten;
// We always guess the size of the last part correctly
const guessedPartSize = Math.min(bytesRemaining, maxPartSize);
// wait for previous uploadPart operation to complete
promise = promise.then(() => {
debug('calling uploadPart', { bytesWritten });
return uploadPart(rs, guessedPartSize, partNumber, opts);
}).then(newPart => {
newParts.push(newPart);
debug('newParts', newParts);
}).catch(err => {
done = true;
reject(err);
});
splitIndex += 1;
};
body.on('error', err => {
reject(err);
}).pipe((0, _fixedSizeStreamSplitter2.default)(maxPartSize, onSplit)).on('error', reject).on('finish', () => {
if (done) return;
// Make sure all upload part promises are completed...
promise.then(() => {
resolve(newParts);
}).catch(reject);
});
});
};