graphql-upload-ts
Version:
TypeScript-first middleware and Upload scalar for GraphQL multipart requests (file uploads) with support for Apollo Server, Express, Koa, and more.
458 lines (451 loc) • 19.6 kB
JavaScript
import busboy from 'busboy';
import createError from 'http-errors';
import objectPath from 'object-path';
import { randomBytes } from 'node:crypto';
import { EventEmitter } from 'node:events';
import { close, unlink, closeSync, unlinkSync, open, write, read } from 'node:fs';
import { tmpdir } from 'node:os';
import { join } from 'node:path';
import { Writable, Readable } from 'node:stream';
class ReadAfterDestroyedError extends Error {
}
class ReadAfterReleasedError extends Error {
}
// Use a “proxy” event emitter configured to have an infinite maximum number of
// listeners to prevent Node.js max listeners exceeded warnings if many
// `fs-capacitor` `ReadStream` instances are created at the same time. See:
// https://github.com/mike-marcacci/fs-capacitor/issues/30
const processExitProxy = new EventEmitter();
processExitProxy.setMaxListeners(Number.POSITIVE_INFINITY);
process.once('exit', () => processExitProxy.emit('exit'));
class ReadStream extends Readable {
constructor(writeStream, options) {
super({
highWaterMark: options?.highWaterMark,
encoding: options?.encoding,
autoDestroy: true,
});
this._pos = 0;
this._writeStream = writeStream;
}
_read(n) {
if (this.destroyed)
return;
if (typeof this._writeStream._fd !== 'number') {
this._writeStream.once('ready', () => this._read(n));
return;
}
// Using `allocUnsafe` here is OK because we return a slice the length of
// `bytesRead`, and discard the rest. This prevents node from having to zero
// out the entire allocation first.
const buf = Buffer.allocUnsafe(n);
read(this._writeStream._fd, buf, 0, n, this._pos, (error, bytesRead) => {
if (error)
this.destroy(error);
// Push any read bytes into the local stream buffer.
if (bytesRead) {
this._pos += bytesRead;
this.push(buf.slice(0, bytesRead));
return;
}
// If there were no more bytes to read and the write stream is finished,
// then this stream has reached the end.
if (this._writeStream._writableState
.finished) {
// Check if we have consumed the whole file up to where
// the write stream has written before ending the stream
if (this._pos < this._writeStream._pos)
this._read(n);
else
this.push(null);
return;
}
// Otherwise, wait for the write stream to add more data or finish.
const retry = () => {
this._writeStream.off('finish', retry);
this._writeStream.off('write', retry);
this._read(n);
};
this._writeStream.on('finish', retry);
this._writeStream.on('write', retry);
});
}
}
class WriteStream extends Writable {
constructor(options) {
super({
highWaterMark: options?.highWaterMark,
defaultEncoding: options?.defaultEncoding,
autoDestroy: false,
});
this._fd = null;
this._path = null;
this._pos = 0;
this._readStreams = new Set();
this._released = false;
this._cleanup = (callback) => {
const fd = this._fd;
const path = this._path;
if (typeof fd !== 'number' || typeof path !== 'string') {
callback(null);
return;
}
// Close the file descriptor.
close(fd, (closeError) => {
// An error here probably means the fd was already closed, but we can
// still try to unlink the file.
unlink(path, (unlinkError) => {
// If we are unable to unlink the file, the operating system will
// clean up on next restart, since we use store thes in `os.tmpdir()`
this._fd = null;
// We avoid removing this until now in case an exit occurs while
// asyncronously cleaning up.
processExitProxy.off('exit', this._cleanupSync);
callback(unlinkError ?? closeError);
});
});
};
this._cleanupSync = () => {
processExitProxy.off('exit', this._cleanupSync);
if (typeof this._fd === 'number')
try {
closeSync(this._fd);
}
catch (_error) {
// An error here probably means the fd was already closed, but we can
// still try to unlink the file.
}
try {
if (this._path !== null) {
unlinkSync(this._path);
}
}
catch (_error) {
// If we are unable to unlink the file, the operating system will clean
// up on next restart, since we use store thes in `os.tmpdir()`
}
};
// Generate a random filename.
randomBytes(16, (error, buffer) => {
if (error) {
this.destroy(error);
return;
}
this._path = join((options?.tmpdir ?? tmpdir)(), `capacitor-${buffer.toString('hex')}.tmp`);
// Create a file in the OS's temporary files directory.
open(this._path, 'wx+', 0o600, (error, fd) => {
if (error) {
this.destroy(error);
return;
}
// Cleanup when the process exits or is killed.
processExitProxy.once('exit', this._cleanupSync);
this._fd = fd;
this.emit('ready');
});
});
}
_final(callback) {
if (typeof this._fd !== 'number') {
this.once('ready', () => this._final(callback));
return;
}
callback();
}
_write(chunk, encoding, callback) {
if (typeof this._fd !== 'number') {
this.once('ready', () => this._write(chunk, encoding, callback));
return;
}
write(this._fd, chunk, 0, chunk.length, this._pos, (error) => {
if (error) {
callback(error);
return;
}
// It's safe to increment `this._pos` after flushing to the filesystem
// because node streams ensure that only one `_write()` is active at a
// time. If this assumption is broken, the behavior of this library is
// undefined, regardless of where this is incremented. Relocating this
// to increment syncronously would result in correct file contents, but
// the out-of-order writes would still open the potential for read streams
// to scan positions that have not yet been written.
this._pos += chunk.length;
this.emit('write');
callback();
});
}
release() {
this._released = true;
if (this._readStreams.size === 0)
this.destroy();
}
_destroy(error, callback) {
// Destroy all attached read streams.
for (const readStream of this._readStreams) {
readStream.destroy(error || undefined);
}
// This capacitor is fully initialized.
if (typeof this._fd === 'number' && typeof this._path === 'string') {
this._cleanup((cleanupError) => callback(cleanupError ?? error));
return;
}
// This capacitor has not yet finished initialization; if initialization
// does complete, immediately clean up after.
this.once('ready', () => {
this._cleanup((cleanupError) => {
if (cleanupError) {
this.emit('error', cleanupError);
}
});
});
callback(error);
}
createReadStream(options) {
if (this.destroyed)
throw new ReadAfterDestroyedError('A ReadStream cannot be created from a destroyed WriteStream.');
if (this._released)
throw new ReadAfterReleasedError('A ReadStream cannot be created from a released WriteStream.');
const readStream = new ReadStream(this, options);
this._readStreams.add(readStream);
readStream.once('close', () => {
this._readStreams.delete(readStream);
if (this._released && this._readStreams.size === 0) {
this.destroy();
}
});
return readStream;
}
}
/**
* Safely ignores a Node.js readable stream.
*/
function ignoreStream(stream) {
// Prevent an unhandled error from crashing the process.
stream.on('error', () => { });
// Waste the stream.
stream.resume();
}
class Upload {
constructor() {
this.promise = new Promise((resolve, reject) => {
this._resolve = resolve;
this._reject = reject;
});
// Prevent unhandled promise rejection errors
this.promise.catch(() => { });
}
resolve(file) {
this.file = file;
this._resolve(file);
}
reject(error) {
this._reject(error);
}
}
/**
* Maximum size for non-file form fields (in bytes).
* This applies to text fields like 'operations' and 'map' in GraphQL multipart requests.
* These fields contain JSON data (query, variables, and file mappings).
* Default: 1MB - sufficient for large GraphQL queries and variable sets.
*/
const DEFAULT_MAX_FIELD_SIZE = 1000000; // 1MB
/**
* Maximum size for uploaded files (in bytes).
* This applies to the actual binary file content being uploaded.
* Default: 5MB - a reasonable limit for most web applications.
* Can be overridden per request for larger files (videos, high-res images, etc.).
*/
const DEFAULT_MAX_FILE_SIZE = 5000000; // 5MB
/**
* Maximum number of files that can be uploaded in a single request.
* Default: Infinity - no limit on the number of files.
* Should be set to a reasonable number in production to prevent abuse.
*/
const DEFAULT_MAX_FILES = Number.POSITIVE_INFINITY;
const GRAPHQL_MULTIPART_REQUEST_SPEC_URL = 'https://github.com/jaydenseric/graphql-multipart-request-spec';
async function processRequest(request, response, options) {
const { maxFieldSize = DEFAULT_MAX_FIELD_SIZE, maxFileSize = DEFAULT_MAX_FILE_SIZE, maxFiles = DEFAULT_MAX_FILES, } = options ?? {};
return new Promise((resolve, reject) => {
let released = false;
let exitError;
let operations;
let operationsPath;
let map;
const parser = busboy({
headers: request.headers,
defParamCharset: 'utf8',
limits: {
fieldSize: maxFieldSize,
fields: 2, // Only operations and map.
fileSize: maxFileSize,
files: maxFiles,
},
});
function exit(error, isParserError = false) {
if (exitError)
return;
exitError = error;
if (map) {
for (const upload of map.values()) {
if (!upload.file)
upload.reject(exitError);
}
}
// If the error came from the parser, don’t cause it to be emitted again.
if (isParserError) {
parser.destroy();
}
else {
parser.destroy(exitError);
}
request.unpipe(parser);
// With a sufficiently large request body, subsequent events in the same
// event frame cause the stream to pause after the parser is destroyed. To
// ensure that the request resumes, the call to .resume() is scheduled for
// later in the event loop.
setImmediate(() => request.resume());
reject(exitError);
}
parser.on('field', (fieldName, value, { valueTruncated }) => {
if (valueTruncated)
return exit(createError(413, `The ‘${fieldName}’ multipart field value exceeds the ${maxFieldSize} byte size limit.`));
switch (fieldName) {
case 'operations':
try {
operations = JSON.parse(value);
}
catch (_error) {
return exit(createError(400, `Invalid JSON in the ‘operations’ multipart field (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL}).`));
}
// `operations` should be an object or an array. Note that arrays
// and `null` have an `object` type.
if (typeof operations !== 'object' || !operations)
return exit(createError(400, `Invalid type for the ‘operations’ multipart field (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL}).`));
operationsPath = objectPath(operations);
break;
case 'map': {
if (!operations)
return exit(createError(400, `Disordered multipart fields; ‘map’ should follow ‘operations’ (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL}).`));
let parsedMap;
try {
parsedMap = JSON.parse(value);
}
catch (_error) {
return exit(createError(400, `Invalid JSON in the ‘map’ multipart field (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL}).`));
}
// `map` should be an object.
if (typeof parsedMap !== 'object' || !parsedMap || Array.isArray(parsedMap))
return exit(createError(400, `Invalid type for the ‘map’ multipart field (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL}).`));
const mapEntries = Object.entries(parsedMap);
// Check max files is not exceeded, even though the number of files
// to parse might not match the map provided by the client.
if (mapEntries.length > maxFiles)
return exit(createError(413, `${maxFiles} max file uploads exceeded.`));
map = new Map();
for (const [fieldName, paths] of mapEntries) {
if (!Array.isArray(paths))
return exit(createError(400, `Invalid type for the ‘map’ multipart field entry key ‘${fieldName}’ array (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL}).`));
map.set(fieldName, new Upload());
for (const [index, path] of paths.entries()) {
if (typeof path !== 'string')
return exit(createError(400, `Invalid type for the ‘map’ multipart field entry key ‘${fieldName}’ array index ‘${index}’ value (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL}).`));
try {
operationsPath?.set(path, map.get(fieldName));
}
catch (_error) {
return exit(createError(400, `Invalid object path for the ‘map’ multipart field entry key ‘${fieldName}’ array index ‘${index}’ value ‘${path}’ (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL}).`));
}
}
}
resolve(operations);
}
}
});
parser.on('file', (fieldName, stream, { filename, encoding, mimeType: mimetype }) => {
if (!map) {
ignoreStream(stream);
return exit(createError(400, `Disordered multipart fields; files should follow ‘map’ (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL}).`));
}
const upload = map.get(fieldName);
if (!upload) {
// The file is extraneous. As the rest can still be processed, just
// ignore it and don’t exit with an error.
ignoreStream(stream);
return;
}
let fileError;
const capacitor = new WriteStream();
capacitor.on('error', () => {
stream.unpipe();
stream.resume();
});
stream.on('limit', () => {
fileError = createError(413, `File truncated as it exceeds the ${maxFileSize} byte size limit.`);
stream.unpipe();
capacitor.destroy(fileError);
});
stream.on('error', (error) => {
fileError = error;
stream.unpipe();
capacitor.destroy(fileError);
});
const file = {
fieldName,
filename,
mimetype,
encoding,
createReadStream(options) {
const error = fileError || (released ? exitError : null);
if (error)
throw error;
return capacitor.createReadStream(options);
},
capacitor,
};
Object.defineProperty(file, 'capacitor', {
enumerable: false,
configurable: false,
writable: false,
});
stream.pipe(capacitor);
upload.resolve(file);
});
parser.once('filesLimit', () => exit(createError(413, `${maxFiles} max file uploads exceeded.`)));
parser.once('finish', () => {
request.unpipe(parser);
request.resume();
if (!operations)
return exit(createError(400, `Missing multipart field ‘operations’ (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL}).`));
if (!map)
return exit(createError(400, `Missing multipart field ‘map’ (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL}).`));
for (const upload of map.values())
if (!upload.file)
upload.reject(createError(400, 'File missing in the request.'));
});
// Use the `on` method instead of `once` as in edge cases the same parser
// could have multiple `error` events and all must be handled to prevent the
// Node.js process exiting with an error. One edge case is if there is a
// malformed part header as well as an unexpected end of the form.
parser.on('error', (error) => {
exit(error, true);
});
response.once('close', () => {
released = true;
if (map) {
for (const upload of map.values()) {
if (upload.file) {
// Release resources and clean up temporary files.
upload.file.capacitor.release();
}
}
}
});
request.once('close', () => {
if (!request.readableEnded)
exit(createError(499, 'Request disconnected during file upload stream parsing.'));
});
request.pipe(parser);
});
}
export { GRAPHQL_MULTIPART_REQUEST_SPEC_URL, processRequest };
//# sourceMappingURL=process-request.mjs.map