azurite
Version:
An open source Azure Storage API compatible server
447 lines • 20.1 kB
JavaScript
Object.defineProperty(exports, "__esModule", { value: true });
const tslib_1 = require("tslib");
const fs_1 = require("fs");
const multistream = require("multistream");
const path_1 = require("path");
const util_1 = require("util");
const uuid = require("uuid");
const IBlobMetadataStore_1 = require("../../blob/persistence/IBlobMetadataStore");
const BufferStream_1 = tslib_1.__importDefault(require("../utils/BufferStream"));
const constants_1 = require("../utils/constants");
const utils_1 = require("../utils/utils");
const ZeroBytesStream_1 = tslib_1.__importDefault(require("../ZeroBytesStream"));
const OperationQueue_1 = tslib_1.__importDefault(require("./OperationQueue"));
const statAsync = (0, util_1.promisify)(fs_1.stat);
const mkdirAsync = (0, util_1.promisify)(fs_1.mkdir);
const unlinkAsync = (0, util_1.promisify)(fs_1.unlink);
const truncateAsync = (0, util_1.promisify)(fs_1.truncate);
// The max size of an extent.
const MAX_EXTENT_SIZE = constants_1.DEFAULT_MAX_EXTENT_SIZE;
var AppendStatusCode;
(function (AppendStatusCode) {
AppendStatusCode[AppendStatusCode["Idle"] = 0] = "Idle";
AppendStatusCode[AppendStatusCode["Appending"] = 1] = "Appending";
})(AppendStatusCode || (AppendStatusCode = {}));
const openAsync = (0, util_1.promisify)(fs_1.open);
const closeAsync = (0, util_1.promisify)(fs_1.close);
/**
* Persistency layer data store source implementation interacting with the storage media.
* It provides the methods to read and write data with the storage.
*
* @export
* @class FSExtentStore
* @implements {IExtentStore}
*/
class FSExtentStore {
constructor(metadata, persistencyConfiguration, logger) {
this.persistencyConfiguration = persistencyConfiguration;
this.logger = logger;
this.initialized = false;
this.closed = true;
this.activeWriteExtents = [];
this.persistencyPath = new Map();
for (const storeDestination of persistencyConfiguration) {
this.persistencyPath.set(storeDestination.locationId, storeDestination.locationPath);
for (let i = 0; i < storeDestination.maxConcurrency; i++) {
const appendExtent = this.createAppendExtent(storeDestination.locationId);
this.activeWriteExtents.push(appendExtent);
}
}
this.activeWriteExtentsNumber = this.activeWriteExtents.length;
this.metadataStore = metadata;
this.appendQueue = new OperationQueue_1.default(this.activeWriteExtentsNumber, logger);
this.readQueue = new OperationQueue_1.default(constants_1.DEFAULT_READ_CONCURRENCY, logger);
}
isInitialized() {
return this.initialized;
}
isClosed() {
return this.closed;
}
async init() {
for (const storeDestination of this.persistencyConfiguration) {
try {
await statAsync(storeDestination.locationPath);
}
catch {
await mkdirAsync(storeDestination.locationPath);
}
}
if (!this.metadataStore.isInitialized()) {
await this.metadataStore.init();
}
this.initialized = true;
this.closed = false;
}
async close() {
if (!this.metadataStore.isClosed()) {
await this.metadataStore.close();
}
this.closed = true;
}
async clean() {
if (this.isClosed()) {
for (const path of this.persistencyConfiguration) {
try {
await (0, utils_1.rimrafAsync)(path.locationPath);
}
catch {
// TODO: Find out why sometimes it throws no permission error
/* NOOP */
}
}
return;
}
throw new Error(`Cannot clean FSExtentStore, it's not closed.`);
}
/**
* This method may create a new extent or append data to an existing extent.
* Return the extent chunk information including the extentId, offset and count.
*
* @param {(NodeJS.ReadableStream | Buffer)} data
* @param {string} [contextId]
* @returns {Promise<IExtentChunk>}
* @memberof FSExtentStore
*/
async appendExtent(data, contextId) {
const op = () => new Promise((resolve, reject) => {
(async () => {
let appendExtentIdx = 0;
for (let i = 1; i < this.activeWriteExtentsNumber; i++) {
if (this.activeWriteExtents[i].appendStatus === AppendStatusCode.Idle) {
appendExtentIdx = i;
break;
}
}
this.activeWriteExtents[appendExtentIdx].appendStatus =
AppendStatusCode.Appending;
this.logger.info(`FSExtentStore:appendExtent() Select extent from idle location for extent append operation. LocationId:${appendExtentIdx} extentId:${this.activeWriteExtents[appendExtentIdx].id} offset:${this.activeWriteExtents[appendExtentIdx].offset} MAX_EXTENT_SIZE:${MAX_EXTENT_SIZE} `, contextId);
if (this.activeWriteExtents[appendExtentIdx].offset >= MAX_EXTENT_SIZE) {
this.logger.info(`FSExtentStore:appendExtent() Size of selected extent offset is larger than maximum extent size ${MAX_EXTENT_SIZE} bytes, try appending to new extent.`, contextId);
const selectedFd = this.activeWriteExtents[appendExtentIdx].fd;
if (selectedFd) {
this.logger.info(`FSExtentStore:appendExtent() Close unused fd:${selectedFd}.`, contextId);
try {
await closeAsync(selectedFd);
}
catch (err) {
this.logger.error(`FSExtentStore:appendExtent() Close unused fd:${selectedFd} error:${JSON.stringify(err)}.`, contextId);
}
}
await this.getNewExtent(this.activeWriteExtents[appendExtentIdx]);
this.logger.info(`FSExtentStore:appendExtent() Allocated new extent LocationID:${appendExtentIdx} extentId:${this.activeWriteExtents[appendExtentIdx].id} offset:${this.activeWriteExtents[appendExtentIdx].offset} MAX_EXTENT_SIZE:${MAX_EXTENT_SIZE} `, contextId);
}
let rs;
if (data instanceof Buffer) {
rs = new BufferStream_1.default(data);
}
else {
rs = data;
}
const appendExtent = this.activeWriteExtents[appendExtentIdx];
const id = appendExtent.id;
const path = this.generateExtentPath(appendExtent.locationId, id);
let fd = appendExtent.fd;
this.logger.debug(`FSExtentStore:appendExtent() Get fd:${fd} for extent:${id} from cache.`, contextId);
if (fd === undefined) {
fd = await openAsync(path, "a");
appendExtent.fd = fd;
this.logger.debug(`FSExtentStore:appendExtent() Open file:${path} for extent:${id}, get new fd:${fd}`, contextId);
}
const ws = (0, fs_1.createWriteStream)(path, {
fd,
autoClose: false
});
this.logger.debug(`FSExtentStore:appendExtent() Created write stream for fd:${fd}`, contextId);
let count = 0;
this.logger.debug(`FSExtentStore:appendExtent() Start writing to extent ${id}`, contextId);
try {
count = await this.streamPipe(rs, ws, fd, contextId);
const offset = appendExtent.offset;
appendExtent.offset += count;
const extent = {
id,
locationId: appendExtent.locationId,
path: id,
size: count + offset,
lastModifiedInMS: Date.now()
};
this.logger.debug(`FSExtentStore:appendExtent() Write finish, start updating extent metadata. extent:${JSON.stringify(extent)}`, contextId);
await this.metadataStore.updateExtent(extent);
this.logger.debug(`FSExtentStore:appendExtent() Update extent metadata done. Resolve()`, contextId);
appendExtent.appendStatus = AppendStatusCode.Idle;
return {
id,
offset,
count
};
}
catch (err) {
// Reset cursor position to the current offset. On Windows, truncating a file open in append mode doesn't
// work, so we need to close the file descriptor first.
try {
appendExtent.fd = undefined;
await closeAsync(fd);
await truncateAsync(path, appendExtent.offset);
// Indicate that the extent is ready for the next append operation.
appendExtent.appendStatus = AppendStatusCode.Idle;
}
catch (truncate_err) {
this.logger.error(`FSExtentStore:appendExtent() Truncate path:${path} len: ${appendExtent.offset} error:${JSON.stringify(truncate_err)}.`, contextId);
}
throw err;
}
})()
.then(resolve)
.catch(reject);
});
return this.appendQueue.operate(op, contextId);
}
/**
* Read data from persistency layer according to the given IExtentChunk.
*
* @param {IExtentChunk} [extentChunk]
* @returns {Promise<NodeJS.ReadableStream>}
* @memberof FSExtentStore
*/
async readExtent(extentChunk, contextId) {
if (extentChunk === undefined || extentChunk.count === 0) {
return new ZeroBytesStream_1.default(0);
}
if (extentChunk.id === IBlobMetadataStore_1.ZERO_EXTENT_ID) {
const subRangeCount = Math.min(extentChunk.count);
return new ZeroBytesStream_1.default(subRangeCount);
}
const persistencyId = await this.metadataStore.getExtentLocationId(extentChunk.id);
const path = this.generateExtentPath(persistencyId, extentChunk.id);
const op = () => new Promise((resolve, reject) => {
this.logger.verbose(`FSExtentStore:readExtent() Creating read stream. LocationId:${persistencyId} extentId:${extentChunk.id} path:${path} offset:${extentChunk.offset} count:${extentChunk.count} end:${extentChunk.offset + extentChunk.count - 1}`, contextId);
const stream = (0, fs_1.createReadStream)(path, {
start: extentChunk.offset,
end: extentChunk.offset + extentChunk.count - 1
}).on("close", () => {
this.logger.verbose(`FSExtentStore:readExtent() Read stream closed. LocationId:${persistencyId} extentId:${extentChunk.id} path:${path} offset:${extentChunk.offset} count:${extentChunk.count} end:${extentChunk.offset + extentChunk.count - 1}`, contextId);
});
resolve(stream);
});
return this.readQueue.operate(op, contextId);
}
/**
* Merge several extent chunks to a ReadableStream according to the offset and count.
*
* @param {(IExtentChunk)[]} extentChunkArray
* @param {number} [offset=0]
* @param {number} [count=Infinity]
* @param {string} [contextId]
* @returns {Promise<NodeJS.ReadableStream>}
* @memberof FSExtentStore
*/
async readExtents(extentChunkArray, offset = 0, count = Infinity, contextId) {
this.logger.verbose(`FSExtentStore:readExtents() Start read from multi extents...`, contextId);
if (count === 0) {
return new ZeroBytesStream_1.default(0);
}
const start = offset; // Start inclusive position in the merged stream
const end = offset + count; // End exclusive position in the merged stream
const streams = [];
let accumulatedOffset = 0; // Current payload offset in the merged stream
for (const chunk of extentChunkArray) {
const nextOffset = accumulatedOffset + chunk.count;
if (nextOffset <= start) {
accumulatedOffset = nextOffset;
continue;
}
else if (end <= accumulatedOffset) {
break;
}
else {
let chunkStart = chunk.offset;
let chunkEnd = chunk.offset + chunk.count;
if (start > accumulatedOffset) {
chunkStart = chunkStart + start - accumulatedOffset; // Inclusive
}
if (end <= nextOffset) {
chunkEnd = chunkEnd - (nextOffset - end); // Exclusive
}
streams.push(await this.readExtent({
id: chunk.id,
offset: chunkStart,
count: chunkEnd - chunkStart
}, contextId));
accumulatedOffset = nextOffset;
}
}
// TODO: What happens when count exceeds merged payload length?
// throw an error of just return as much data as we can?
if (end !== Infinity && accumulatedOffset < end) {
throw new RangeError(
// tslint:disable-next-line:max-line-length
`Not enough payload data error. Total length of payloads is ${accumulatedOffset}, while required data offset is ${offset}, count is ${count}.`);
}
return multistream(streams);
}
/**
* Delete the extents from persistency layer.
*
* @param {Iterable<string>} persistency
* @returns {Promise<number>} Number of extents deleted
* @memberof IExtentStore
*/
async deleteExtents(extents) {
let count = 0;
for (const id of extents) {
// Should not delete active write extents
// Will not throw error because GC doesn't know extent is active, and will call this method to
// delete active extents
if (this.isActiveExtent(id)) {
this.logger.debug(`FSExtentStore:deleteExtents() Skip deleting active extent:${id}`);
continue;
}
const locationId = await this.metadataStore.getExtentLocationId(id);
const path = this.generateExtentPath(locationId, id);
this.logger.debug(`FSExtentStore:deleteExtents() Delete extent:${id} location:${locationId} path:${path}`);
try {
await unlinkAsync(path);
await this.metadataStore.deleteExtent(id);
}
catch (err) {
if (err.code === "ENOENT") {
await this.metadataStore.deleteExtent(id);
}
}
count++;
}
return count;
}
/**
* Return its metadata store.
*
* @returns {IExtentMetadataStore}
* @memberof IExtentStore
*/
getMetadataStore() {
return this.metadataStore;
}
async streamPipe(rs, ws, fd, contextId) {
return new Promise((resolve, reject) => {
this.logger.debug(`FSExtentStore:streamPipe() Start piping data to write stream`, contextId);
let count = 0;
let wsEnd = false;
rs.on("data", data => {
count += data.length;
if (!ws.write(data)) {
rs.pause();
}
})
.on("end", () => {
this.logger.debug(`FSExtentStore:streamPipe() Readable stream triggers close event, ${count} bytes piped`, contextId);
if (!wsEnd) {
this.logger.debug(`FSExtentStore:streamPipe() Invoke write stream end()`, contextId);
ws.end();
wsEnd = true;
}
})
.on("close", () => {
this.logger.debug(`FSExtentStore:streamPipe() Readable stream triggers close event, ${count} bytes piped`, contextId);
if (!wsEnd) {
this.logger.debug(`FSExtentStore:streamPipe() Invoke write stream end()`, contextId);
ws.end();
wsEnd = true;
}
})
.on("error", err => {
this.logger.debug(`FSExtentStore:streamPipe() Readable stream triggers error event, error:${JSON.stringify(err)}, after ${count} bytes piped. Reject streamPipe().`, contextId);
reject(err);
});
ws.on("drain", () => {
rs.resume();
})
.on("finish", () => {
if (typeof fd === "number") {
this.logger.debug(`FSExtentStore:streamPipe() Writable stream triggers finish event, after ${count} bytes piped. Flush data to fd:${fd}.`, contextId);
(0, fs_1.fdatasync)(fd, err => {
if (err) {
this.logger.debug(`FSExtentStore:streamPipe() Flush data to fd:${fd} failed with error:${JSON.stringify(err)}. Reject streamPipe().`, contextId);
reject(err);
}
else {
this.logger.debug(`FSExtentStore:streamPipe() Flush data to fd:${fd} successfully. Resolve streamPipe().`, contextId);
resolve(count);
}
});
}
else {
this.logger.debug(`FSExtentStore:streamPipe() Resolve streamPipe() without flushing data.`, contextId);
resolve(count);
}
})
.on("error", err => {
this.logger.debug(`FSExtentStore:streamPipe() Writable stream triggers error event, error:${JSON.stringify(err)}, after ${count} bytes piped. Reject streamPipe().`, contextId);
reject(err);
});
});
}
/**
* Check an extent is one of active extents or not.
*
* @private
* @param {string} id
* @returns {boolean}
* @memberof FSExtentStore
*/
isActiveExtent(id) {
// TODO: Use map instead of array to quick check
for (const extent of this.activeWriteExtents) {
if (extent.id === id) {
return true;
}
}
return false;
}
/**
* Create a new append extent model for a new write directory.
*
* @private
* @param {string} persistencyPath
* @returns {IAppendExtent}
* @memberof FSExtentStore
*/
createAppendExtent(persistencyId) {
return {
id: uuid(),
offset: 0,
appendStatus: AppendStatusCode.Idle,
locationId: persistencyId
};
}
/**
* Select a new extent to append for an exist write directory.
*
* @private
* @param {IAppendExtent} appendExtent
* @memberof FSExtentStore
*/
getNewExtent(appendExtent) {
appendExtent.id = uuid();
appendExtent.offset = 0;
appendExtent.fd = undefined;
}
/**
* Generate the file path for a new extent.
*
* @private
* @param {string} extentId
* @returns {string}
* @memberof FSExtentStore
*/
generateExtentPath(persistencyId, extentId) {
const directoryPath = this.persistencyPath.get(persistencyId);
if (!directoryPath) {
// To be completed
}
return (0, path_1.join)(directoryPath, extentId);
}
}
exports.default = FSExtentStore;
//# sourceMappingURL=FSExtentStore.js.map
;