minio
Version:
S3 Compatible Cloud Storage client
308 lines (238 loc) • 11.8 kB
JavaScript
/*
* Minio Javascript Library for Amazon S3 Compatible Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
Object.defineProperty(exports, '__esModule', {
value: true
});
var _createClass = (function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ('value' in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; })();
var _get = function get(_x, _x2, _x3) { var _again = true; _function: while (_again) { var object = _x, property = _x2, receiver = _x3; _again = false; if (object === null) object = Function.prototype; var desc = Object.getOwnPropertyDescriptor(object, property); if (desc === undefined) { var parent = Object.getPrototypeOf(object); if (parent === null) { return undefined; } else { _x = parent; _x2 = property; _x3 = receiver; _again = true; desc = parent = undefined; continue _function; } } else if ('value' in desc) { return desc.value; } else { var getter = desc.get; if (getter === undefined) { return undefined; } return getter.call(receiver); } } };
function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj['default'] = obj; return newObj; } }
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { 'default': obj }; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError('Cannot call a class as a function'); } }
function _inherits(subClass, superClass) { if (typeof superClass !== 'function' && superClass !== null) { throw new TypeError('Super expression must either be null or a function, not ' + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var _stream = require('stream');
var _crypto = require('crypto');
var _crypto2 = _interopRequireDefault(_crypto);
var _querystring = require('querystring');
var querystring = _interopRequireWildcard(_querystring);
// We extend Transform because Writable does not implement ._flush().
var ObjectUploader = (function (_Transform) {
_inherits(ObjectUploader, _Transform);
function ObjectUploader(client, bucketName, objectName, partSize, contentType, callback) {
_classCallCheck(this, ObjectUploader);
_get(Object.getPrototypeOf(ObjectUploader.prototype), 'constructor', this).call(this);
this.emptyStream = true;
this.client = client;
this.bucketName = bucketName;
this.objectName = objectName;
// The size of each multipart, chunked by BlockStream2.
this.partSize = partSize;
// This contentType for the object.
this.contentType = contentType;
// Call like: callback(error, etag).
this.callback = callback;
// We need to keep track of what number chunk/part we're on. This increments
// each time _write() is called. Starts with 1, not 0.
this.partNumber = 1;
// A list of the previously uploaded chunks, for resuming a file upload. This
// will be null if we aren't resuming an upload.
this.oldParts = null;
// Keep track of the etags for aggregating the chunks together later. Each
// etag represents a single chunk of the file.
this.etags = [];
// This is for the multipart upload request — if null, we're either not initiated
// yet or we're flushing in one packet.
this.id = null;
// Handle errors.
this.on('error', function (err) {
callback(err);
});
}
_createClass(ObjectUploader, [{
key: '_transform',
value: function _transform(chunk, encoding, callback) {
var _this = this;
this.emptyStream = false;
var method = 'PUT';
var headers = {
'Content-Length': chunk.length,
'Content-Type': this.contentType
};
var md5digest = '';
// Calculate and set Content-MD5 header if SHA256 is not set.
// This will happen only when there is a secure connection to the s3 server.
if (!this.client.enableSHA256) {
md5digest = _crypto2['default'].createHash('md5').update(chunk).digest();
headers['Content-MD5'] = md5digest.toString('base64');
}
// We can flush the object in one packet if it fits in one chunk. This is true
// if the chunk size is smaller than the part size, signifying the end of the
// stream.
if (this.partNumber == 1 && chunk.length < this.partSize) {
// PUT the chunk in a single request — use an empty query.
var _options = {
method: method, headers: headers,
query: '',
bucketName: this.bucketName,
objectName: this.objectName
};
this.client.makeRequest(_options, chunk, 200, '', true, function (err, response) {
if (err) return callback(err);
var etag = response.headers.etag;
if (etag) {
etag = etag.replace(/^"/, '').replace(/"$/, '');
}
// Ignore the 'data' event so that the stream closes. (nodejs stream requirement)
response.on('data', function () {});
// Give the etag back, we're done!
process.nextTick(function () {
_this.callback(null, etag);
});
// Because we're sure the stream has ended, allow it to flush and end.
callback();
});
return;
}
// If we aren't flushing in one packet, we need to initiate the multipart upload,
// if it hasn't already been done. The write will be buffered until the upload has been
// initiated.
if (this.id === null) {
this.once('ready', function () {
_this._transform(chunk, encoding, callback);
});
// Check for an incomplete previous upload.
this.client.findUploadId(this.bucketName, this.objectName, function (err, id) {
if (err) return _this.emit('error', err);
// If no upload ID exists, initiate a new one.
if (!id) {
_this.client.initiateNewMultipartUpload(_this.bucketName, _this.objectName, _this.contentType, function (err, id) {
if (err) return callback(err);
_this.id = id;
// We are now ready to accept new chunks — this will flush the buffered chunk.
_this.emit('ready');
});
return;
}
_this.id = id;
// Retrieve the pre-uploaded parts, if we need to resume the upload.
_this.client.listParts(_this.bucketName, _this.objectName, id, function (err, etags) {
if (err) return _this.emit('error', err);
// It is possible for no parts to be already uploaded.
if (!etags) etags = [];
// oldParts will become an object, allowing oldParts[partNumber].etag
_this.oldParts = etags.reduce(function (prev, item) {
if (!prev[item.part]) {
prev[item.part] = item;
}
return prev;
}, {});
_this.emit('ready');
});
});
return;
}
// Continue uploading various parts if we have initiated multipart upload.
var partNumber = this.partNumber++;
// Check to see if we've already uploaded this chunk. If the hash sums match,
// we can skip to the next chunk.
if (this.oldParts) {
var oldPart = this.oldParts[partNumber];
//Calulcate the md5 hash, if it has not already been calculated.
if (!md5digest) {
md5digest = _crypto2['default'].createHash('md5').update(chunk).digest();
}
if (oldPart && md5digest.toString('hex') === oldPart.etag) {
// The md5 matches, the chunk has already been uploaded.
this.etags.push({ part: partNumber, etag: oldPart.etag });
callback();
return;
}
}
// Write the chunk with an uploader.
var query = querystring.stringify({
partNumber: partNumber,
uploadId: this.id
});
var options = {
method: method, query: query, headers: headers,
bucketName: this.bucketName,
objectName: this.objectName
};
this.client.makeRequest(options, chunk, 200, '', true, function (err, response) {
if (err) return callback(err);
// In order to aggregate the parts together, we need to collect the etags.
var etag = response.headers.etag;
if (etag) etag = etag.replace(/^"/, '').replace(/"$/, '');
_this.etags.push({ part: partNumber, etag: etag });
// We're ready for the next chunk.
callback();
});
}
}, {
key: '_flush',
value: function _flush(callback) {
var _this2 = this;
if (this.emptyStream) {
var method = 'PUT';
var headers = {
'Content-Length': 0,
'Content-Type': this.contentType
};
var options = {
method: method, headers: headers,
query: '',
bucketName: this.bucketName,
objectName: this.objectName
};
this.client.makeRequest(options, '', 200, '', true, function (err, response) {
if (err) return callback(err);
var etag = response.headers.etag;
if (etag) {
etag = etag.replace(/^"/, '').replace(/"$/, '');
}
// Ignore the 'data' event so that the stream closes. (nodejs stream requirement)
response.on('data', function () {});
// Give the etag back, we're done!
process.nextTick(function () {
_this2.callback(null, etag);
});
// Because we're sure the stream has ended, allow it to flush and end.
callback();
});
return;
}
// If it has been uploaded in a single packet, we don't have to do anything.
if (this.id === null) {
return;
}
// This is called when all of the chunks uploaded successfully, thus
// completing the multipart upload.
this.client.completeMultipartUpload(this.bucketName, this.objectName, this.id, this.etags, function (err, etag) {
if (err) return callback(err);
// Call our callback on the next tick to allow the streams infrastructure
// to finish what its doing before we continue.
process.nextTick(function () {
_this2.callback(null, etag);
});
callback();
});
}
}]);
return ObjectUploader;
})(_stream.Transform);
exports['default'] = ObjectUploader;
module.exports = exports['default'];
//# sourceMappingURL=object-uploader.js.map