UNPKG

hfs

Version:
326 lines (325 loc) 18 kB
"use strict"; var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); exports.dontOverwriteUploading = exports.minAvailableMb = exports.deleteUnfinishedUploadsAfter = void 0; exports.getUploadMeta = getUploadMeta; exports.uploadWriter = uploadWriter; const vfs_1 = require("./vfs"); const const_1 = require("./const"); const path_1 = require("path"); const fs_1 = __importDefault(require("fs")); const misc_1 = require("./misc"); const frontEndApis_1 = require("./frontEndApis"); const config_1 = require("./config"); const util_os_1 = require("./util-os"); const connections_1 = require("./connections"); const throttler_1 = require("./throttler"); const auth_1 = require("./auth"); const comments_1 = require("./comments"); const lodash_1 = __importDefault(require("lodash")); const events_1 = __importDefault(require("./events")); const promises_1 = require("fs/promises"); const expiringCache_1 = require("./expiringCache"); const first_1 = require("./first"); const stream_1 = require("stream"); const promises_2 = require("node:fs/promises"); const xxhashjs_1 = __importDefault(require("xxhashjs")); exports.deleteUnfinishedUploadsAfter = (0, config_1.defineConfig)('delete_unfinished_uploads_after', 86400); exports.minAvailableMb = (0, config_1.defineConfig)('min_available_mb', 100); exports.dontOverwriteUploading = (0, config_1.defineConfig)('dont_overwrite_uploading', true); const waitingToBeDeleted = {}; (0, first_1.onProcessExit)(() => { if (!Object.keys(waitingToBeDeleted).length) return; console.log("removing unfinished uploads"); for (const path in waitingToBeDeleted) try { fs_1.default.rmSync(path, { force: true }); } catch (_a) { } }); const ATTR_UPLOADER = 'uploader'; function getUploadMeta(path) { return (0, misc_1.loadFileAttr)(path, ATTR_UPLOADER); } function setUploadMeta(path, ctx) { return (0, misc_1.storeFileAttr)(path, ATTR_UPLOADER, { username: (0, auth_1.getCurrentUsername)(ctx) || undefined, ip: ctx.ip, }); } async function calcHash(fn, limit = Infinity) { const hash = xxhashjs_1.default.h32(); const stream = new stream_1.Transform({ transform(chunk, enc, done) { hash.update(chunk); done(); } }); fs_1.default.createReadStream(fn, { end: limit - 1 }).pipe(stream); console.debug('hashing', fn); await (0, stream_1.once)(stream, 'finish'); console.debug('hashed', fn); return hash.digest().toString(16); } const diskSpaceCache = (0, expiringCache_1.expiringCache)(3000); // invalidate shortly const uploadingFiles = new Map(); // stay sync because we use this function with formidable() function uploadWriter(base, baseUri, path, ctx) { let fullPath = ''; if ((0, misc_1.dirTraversal)(path)) return fail(const_1.HTTP_FOOL); if ((0, vfs_1.statusCodeForMissingPerm)(base, 'can_upload', ctx)) { if (!ctx.get('x-hfs-wait')) { // you can disable the following behavior // avoid waiting hours for just an error const t = setTimeout(() => (0, connections_1.disconnect)(ctx), 30000); ctx.res.on('finish', () => clearTimeout(t)); } return fail(); } // enforce minAvailableMb fullPath = (0, path_1.join)(base.source, path); const dir = (0, path_1.dirname)(fullPath); const min = exports.minAvailableMb.get() * (1 << 20); const contentLength = Number(ctx.headers["content-length"]); const isPartial = ctx.query.partial !== undefined; // while the presence of "partial" conveys the upload is split... const stillToWrite = Math.max(contentLength, Number(ctx.query.partial) || 0); // ...the number is used to tell how much space we need (fullSize - offset) if (isNaN(stillToWrite)) { if (min) return fail(const_1.HTTP_BAD_REQUEST, 'content-length mandatory'); } else try { // refer to the source of the closest node that actually belongs to the vfs, so that cache is more effective let closestVfsNode = base; // if base=root, there's no parent and no original while ((closestVfsNode === null || closestVfsNode === void 0 ? void 0 : closestVfsNode.parent) && !closestVfsNode.original) closestVfsNode = closestVfsNode.parent; // if it's not original, it surely has a parent const statDir = closestVfsNode.source; const res = diskSpaceCache.try(statDir, () => (0, util_os_1.getDiskSpaceSync)(statDir)); if (!res) throw 'miss'; const { free } = res; if (typeof free !== 'number' || isNaN(free)) throw ''; if (stillToWrite > free - (min || 0)) return fail(const_1.HTTP_INSUFFICIENT_STORAGE); } catch (e) { // warn, but let it through console.warn("can't check disk size:", e.message || String(e)); } // optionally 'skip' if (ctx.query.existing === 'skip' && fs_1.default.existsSync(fullPath)) return fail(const_1.HTTP_CONFLICT, 'exists'); const already = uploadingFiles.get(fullPath); // this can be checked so early because this function is sync if (already) // if it's the same client, we tell to retry later return fail(ctx.query.notifications && ctx.query.notifications === already.query.notifications ? const_1.HTTP_NOT_MODIFIED : const_1.HTTP_CONFLICT, 'already uploading'); let overwriteRequestedButForbidden = false; try { const sendCurrentSize = lodash_1.default.debounce(() => (0, frontEndApis_1.notifyClient)(ctx, const_1.UPLOAD_RESUMABLE, { path, written: getCurrentSize() }), 1000, { maxWait: 1000 }); // if upload creates a folder, then add meta to it too if (!dir.endsWith(':\\') && fs_1.default.mkdirSync(dir, { recursive: true })) setUploadMeta(dir, ctx); // use temporary name while uploading const keepName = (0, path_1.basename)(fullPath).slice(-200); const firstTempName = (0, path_1.join)(dir, 'hfs$upload-' + keepName); const altTempName = (0, path_1.join)(dir, 'hfs$upload2-' + keepName); // this file makes sense only while smaller than firstTempName const splitAndPreserving = ctx.query.preserveTempFile; // frontend knows about existing temp that can be resumed, but it is not resuming that, but instead it is continuing split-uploading on alternative temp file let tempName = splitAndPreserving ? altTempName : firstTempName; const stats = (0, misc_1.try_)(() => fs_1.default.statSync(tempName)); const resumableSize = (stats === null || stats === void 0 ? void 0 : stats.size) || 0; // we use size to even when user has not required resume, yet, to notify frontend of the possibility const firstResumableStats = tempName === firstTempName ? stats : (0, misc_1.try_)(() => fs_1.default.statSync(firstTempName)); let resumableTempName = resumableSize > 0 ? tempName : undefined; if (resumableTempName) tempName = altTempName; // checks for resume feature let resume = Number(ctx.query.resume); if (resume > resumableSize) return fail(const_1.HTTP_RANGE_NOT_SATISFIABLE); // warn frontend about resume possibility let resumeInfo = resumableTempName ? waitingToBeDeleted[resumableTempName] : undefined; if ((resumeInfo === null || resumeInfo === void 0 ? void 0 : resumeInfo.mtimeMs) && (resumeInfo === null || resumeInfo === void 0 ? void 0 : resumeInfo.mtimeMs) !== (0, misc_1.try_)(() => fs_1.default.statSync(resumableTempName).mtimeMs)) // outdated? resumeInfo = undefined; if (!resume) (0, misc_1.with_)(resumableTempName, async (x) => { (0, frontEndApis_1.notifyClient)(ctx, const_1.UPLOAD_RESUMABLE, !x ? { path } : { path, size: resumableSize, // a resumable file exists without a record? then we record it (delayedDelete), plus we provide a hash ASAP, since there's no previous giveBack to compare with ...resumeInfo || lodash_1.default.omit(delayedDelete(path, exports.deleteUnfinishedUploadsAfter.get() || 0), 'giveBack'), // giveBack makes sense only if coming from resumeObject timeout: undefined // this entry is here to remove the property copied in the previous line }); if (x && !resumeInfo) (0, frontEndApis_1.notifyClient)(ctx, const_1.UPLOAD_RESUMABLE_HASH, { path, hash: await (0, misc_1.parseFile)(x, calcHash) }); // negligible memory leak }); // append if resuming const resuming = resume && resumableTempName; if (!resuming) resume = 0; const writeStream = (0, misc_1.createStreamLimiter)(contentLength !== null && contentLength !== void 0 ? contentLength : Infinity); if (resume && resumableTempName && !splitAndPreserving) { // we want to resume the firstTempName, actually fs_1.default.rm(altTempName, () => { }); tempName = resumableTempName; } let isWritingSecondFile = tempName === altTempName; const fullSize = stillToWrite + resume; ctx.state.uploadDestinationPath = tempName; // allow plugins to mess with the write-stream, because the read-stream can be complicated in case of multipart const obj = { ctx, writeStream, uri: '' }; const resEvent = events_1.default.emit('uploadStart', obj); if (resEvent === null || resEvent === void 0 ? void 0 : resEvent.isDefaultPrevented()) return; const fileStream = resume && resumableTempName ? fs_1.default.createWriteStream(resumableTempName, { flags: 'r+', start: resume }) : fs_1.default.createWriteStream(tempName); writeStream.on('error', e => { releaseFile(); console.debug(e); }); writeStream.pipe(fileStream); Object.assign(obj, { fileStream }); trackProgress(); cancelDeletion(tempName); uploadingFiles.set(fullPath, ctx); console.debug('upload started'); // the file stream doesn't have an event for data being written, so we use 'data' of its feeder, which happens before, so we postpone a bit, trying to have a fresher number writeStream.on('data', () => setTimeout(checkIfNewUploadBecameLargerThanResumable)); const lockMiddleware = (0, misc_1.pendingPromise)(); // expose when all operations stopped writeStream.once('close', async () => { try { ctx.state.uploadSize = bytesGot(); // in case content-length is not specified await new Promise(res => fileStream.close(res)); // this only seems necessary on Windows if (ctx.isAborted()) { // in the very unlikely case the connection is interrupted between last-byte and here, we still consider it unfinished, as the client had no way to know, and will resume, but it would get an error if we finish the process if (isWritingSecondFile) // we don't want to be left with 2 temp files return (0, promises_1.rm)(altTempName).catch(console.warn); const sec = exports.deleteUnfinishedUploadsAfter.get(); return lodash_1.default.isNumber(sec) && delayedDelete(tempName, sec); } if (isPartial) // we are supposed to leave the upload as unfinished, with the temp name return ctx.status = const_1.HTTP_NO_CONTENT; // lockMiddleware contains an empty string, so we must take care of the status let dest = fullPath; if (exports.dontOverwriteUploading.get() && !await overwriteAnyway() && fs_1.default.existsSync(dest)) { if (overwriteRequestedButForbidden) { await (0, promises_1.rm)(tempName).catch(console.warn); releaseFile(); return fail(); } const ext = (0, path_1.extname)(dest); const base = dest.slice(0, -ext.length || Infinity); let i = 1; do dest = `${base} (${i++})${ext}`; while (fs_1.default.existsSync(dest)); } try { await (0, promises_1.rename)(tempName, dest); const t = Number(ctx.query.giveBack); // we know giveBack contains lastModified in ms if (t) // so we use it to touch the file await (0, promises_2.utimes)(dest, Date.now() / 1000, t / 1000); cancelDeletion(tempName); // not necessary, as deletion's failure is silent, but still if (isWritingSecondFile) { // we've been using altTempName, but now we're done, so we can delete firstTempName cancelDeletion(firstTempName); await (0, promises_1.rm)(firstTempName); // wait, so the client can count on the temp-file being gone } releaseFile(); ctx.state.uploadDestinationPath = dest; void setUploadMeta(dest, ctx); if (ctx.query.comment) void (0, comments_1.setCommentFor)(dest, String(ctx.query.comment)); obj.uri = (0, misc_1.enforceFinal)('/', baseUri) + (0, misc_1.pathEncode)((0, path_1.basename)(dest)); events_1.default.emit('uploadFinished', obj); console.debug("upload finished", dest); if (resEvent) for (const cb of resEvent) if (lodash_1.default.isFunction(cb)) cb(obj); } catch (err) { void setUploadMeta(tempName, ctx); console.error("couldn't rename temp to", dest, String(err)); } } finally { releaseFile(); lockMiddleware.resolve(obj.uri); } }); return Object.assign(obj.writeStream, { lockMiddleware }); function trackProgress() { let lastGot = 0; let lastGotTime = 0; Object.assign(ctx.state, { opTotal: fullSize, opOffset: resume / fullSize, opProgress: 0 }); const conn = (0, connections_1.updateConnectionForCtx)(ctx); if (!conn) return; const h = setInterval(() => { const now = Date.now(); const got = bytesGot(); const inSpeed = (0, throttler_1.roundSpeed)((got - lastGot) / (now - lastGotTime)); lastGot = got; lastGotTime = now; (0, connections_1.updateConnection)(conn, { inSpeed, got }, { opProgress: (resume + got) / fullSize }); }, 1000); writeStream.once('close', () => clearInterval(h)); } function getCurrentSize() { return bytesGot() + resume; } function bytesGot() { return fileStream.bytesWritten + fileStream.writableLength; } function checkIfNewUploadBecameLargerThanResumable() { sendCurrentSize(); // keep the client updated in case it needs to resume on disconnection if (isWritingSecondFile && getCurrentSize() > (firstResumableStats === null || firstResumableStats === void 0 ? void 0 : firstResumableStats.size)) try { // better be sync here, as we don't want the upload to finish in the middle of the rename fs_1.default.renameSync(tempName, firstTempName); // try to rename $upload2 to $upload, overwriting tempName = firstTempName; isWritingSecondFile = false; resumableTempName = undefined; (0, frontEndApis_1.notifyClient)(ctx, const_1.UPLOAD_RESUMABLE, { path }); // no longer resumable } catch (_a) { } } } catch (e) { releaseFile(); throw e; } async function overwriteAnyway() { if (ctx.query.existing !== 'overwrite') return false; const n = await (0, vfs_1.getNodeByName)(path, base); if (n && !(0, vfs_1.statusCodeForMissingPerm)(n, 'can_delete', ctx)) return true; overwriteRequestedButForbidden = true; return false; } function delayedDelete(path, secs) { var _a; clearTimeout((_a = waitingToBeDeleted[path]) === null || _a === void 0 ? void 0 : _a.timeout); return waitingToBeDeleted[path] = { giveBack: ctx.query.giveBack, mtimeMs: (0, misc_1.try_)(() => fs_1.default.statSync(path).mtimeMs), expires: Date.now() + secs * 1000, timeout: setTimeout(() => { delete waitingToBeDeleted[path]; void (0, promises_1.rm)(path); }, secs * 1000) }; } function cancelDeletion(path) { var _a; clearTimeout((_a = waitingToBeDeleted[path]) === null || _a === void 0 ? void 0 : _a.timeout); delete waitingToBeDeleted[path]; } function releaseFile() { uploadingFiles.delete(fullPath); } function fail(status = ctx.status, msg) { console.debug('upload failed', status, msg || ''); ctx.status = status; if (msg) ctx.body = msg; (0, frontEndApis_1.notifyClient)(ctx, const_1.UPLOAD_REQUEST_STATUS, { [path]: status }); // allow browsers to detect failure while still sending body } }