graphql-upload-ts
Version:
TypeScript-first middleware and Upload scalar for GraphQL multipart requests (file uploads) with support for Apollo Server, Express, Koa, and more.
942 lines (929 loc) • 37.8 kB
JavaScript
;
var node_crypto = require('node:crypto');
var node_events = require('node:events');
var node_fs = require('node:fs');
var node_os = require('node:os');
var node_path = require('node:path');
var node_stream = require('node:stream');
var graphql = require('graphql');
var busboy = require('busboy');
var createError = require('http-errors');
var objectPath = require('object-path');
class ReadAfterDestroyedError extends Error {
}
class ReadAfterReleasedError extends Error {
}
// Use a “proxy” event emitter configured to have an infinite maximum number of
// listeners to prevent Node.js max listeners exceeded warnings if many
// `fs-capacitor` `ReadStream` instances are created at the same time. See:
// https://github.com/mike-marcacci/fs-capacitor/issues/30
const processExitProxy = new node_events.EventEmitter();
processExitProxy.setMaxListeners(Number.POSITIVE_INFINITY);
process.once('exit', () => processExitProxy.emit('exit'));
class ReadStream extends node_stream.Readable {
constructor(writeStream, options) {
super({
highWaterMark: options?.highWaterMark,
encoding: options?.encoding,
autoDestroy: true,
});
this._pos = 0;
this._writeStream = writeStream;
}
_read(n) {
if (this.destroyed)
return;
if (typeof this._writeStream._fd !== 'number') {
this._writeStream.once('ready', () => this._read(n));
return;
}
// Using `allocUnsafe` here is OK because we return a slice the length of
// `bytesRead`, and discard the rest. This prevents node from having to zero
// out the entire allocation first.
const buf = Buffer.allocUnsafe(n);
node_fs.read(this._writeStream._fd, buf, 0, n, this._pos, (error, bytesRead) => {
if (error)
this.destroy(error);
// Push any read bytes into the local stream buffer.
if (bytesRead) {
this._pos += bytesRead;
this.push(buf.slice(0, bytesRead));
return;
}
// If there were no more bytes to read and the write stream is finished,
// then this stream has reached the end.
if (this._writeStream._writableState
.finished) {
// Check if we have consumed the whole file up to where
// the write stream has written before ending the stream
if (this._pos < this._writeStream._pos)
this._read(n);
else
this.push(null);
return;
}
// Otherwise, wait for the write stream to add more data or finish.
const retry = () => {
this._writeStream.off('finish', retry);
this._writeStream.off('write', retry);
this._read(n);
};
this._writeStream.on('finish', retry);
this._writeStream.on('write', retry);
});
}
}
class WriteStream extends node_stream.Writable {
constructor(options) {
super({
highWaterMark: options?.highWaterMark,
defaultEncoding: options?.defaultEncoding,
autoDestroy: false,
});
this._fd = null;
this._path = null;
this._pos = 0;
this._readStreams = new Set();
this._released = false;
this._cleanup = (callback) => {
const fd = this._fd;
const path = this._path;
if (typeof fd !== 'number' || typeof path !== 'string') {
callback(null);
return;
}
// Close the file descriptor.
node_fs.close(fd, (closeError) => {
// An error here probably means the fd was already closed, but we can
// still try to unlink the file.
node_fs.unlink(path, (unlinkError) => {
// If we are unable to unlink the file, the operating system will
// clean up on next restart, since we use store thes in `os.tmpdir()`
this._fd = null;
// We avoid removing this until now in case an exit occurs while
// asyncronously cleaning up.
processExitProxy.off('exit', this._cleanupSync);
callback(unlinkError ?? closeError);
});
});
};
this._cleanupSync = () => {
processExitProxy.off('exit', this._cleanupSync);
if (typeof this._fd === 'number')
try {
node_fs.closeSync(this._fd);
}
catch (_error) {
// An error here probably means the fd was already closed, but we can
// still try to unlink the file.
}
try {
if (this._path !== null) {
node_fs.unlinkSync(this._path);
}
}
catch (_error) {
// If we are unable to unlink the file, the operating system will clean
// up on next restart, since we use store thes in `os.tmpdir()`
}
};
// Generate a random filename.
node_crypto.randomBytes(16, (error, buffer) => {
if (error) {
this.destroy(error);
return;
}
this._path = node_path.join((options?.tmpdir ?? node_os.tmpdir)(), `capacitor-${buffer.toString('hex')}.tmp`);
// Create a file in the OS's temporary files directory.
node_fs.open(this._path, 'wx+', 0o600, (error, fd) => {
if (error) {
this.destroy(error);
return;
}
// Cleanup when the process exits or is killed.
processExitProxy.once('exit', this._cleanupSync);
this._fd = fd;
this.emit('ready');
});
});
}
_final(callback) {
if (typeof this._fd !== 'number') {
this.once('ready', () => this._final(callback));
return;
}
callback();
}
_write(chunk, encoding, callback) {
if (typeof this._fd !== 'number') {
this.once('ready', () => this._write(chunk, encoding, callback));
return;
}
node_fs.write(this._fd, chunk, 0, chunk.length, this._pos, (error) => {
if (error) {
callback(error);
return;
}
// It's safe to increment `this._pos` after flushing to the filesystem
// because node streams ensure that only one `_write()` is active at a
// time. If this assumption is broken, the behavior of this library is
// undefined, regardless of where this is incremented. Relocating this
// to increment syncronously would result in correct file contents, but
// the out-of-order writes would still open the potential for read streams
// to scan positions that have not yet been written.
this._pos += chunk.length;
this.emit('write');
callback();
});
}
release() {
this._released = true;
if (this._readStreams.size === 0)
this.destroy();
}
_destroy(error, callback) {
// Destroy all attached read streams.
for (const readStream of this._readStreams) {
readStream.destroy(error || undefined);
}
// This capacitor is fully initialized.
if (typeof this._fd === 'number' && typeof this._path === 'string') {
this._cleanup((cleanupError) => callback(cleanupError ?? error));
return;
}
// This capacitor has not yet finished initialization; if initialization
// does complete, immediately clean up after.
this.once('ready', () => {
this._cleanup((cleanupError) => {
if (cleanupError) {
this.emit('error', cleanupError);
}
});
});
callback(error);
}
createReadStream(options) {
if (this.destroyed)
throw new ReadAfterDestroyedError('A ReadStream cannot be created from a destroyed WriteStream.');
if (this._released)
throw new ReadAfterReleasedError('A ReadStream cannot be created from a released WriteStream.');
const readStream = new ReadStream(this, options);
this._readStreams.add(readStream);
readStream.once('close', () => {
this._readStreams.delete(readStream);
if (this._released && this._readStreams.size === 0) {
this.destroy();
}
});
return readStream;
}
}
class Upload {
constructor() {
this.promise = new Promise((resolve, reject) => {
this._resolve = resolve;
this._reject = reject;
});
// Prevent unhandled promise rejection errors
this.promise.catch(() => { });
}
resolve(file) {
this.file = file;
this._resolve(file);
}
reject(error) {
this._reject(error);
}
}
/**
* A GraphQL `Upload` scalar that can be used in a GraphQL schema.
* Its value in resolvers is a promise that resolves to file upload details
* for processing and storage.
*
* @example Import usage
* ```typescript
* import { GraphQLUpload } from 'graphql-upload-ts';
* ```
*
* @example Schema usage with GraphQL Tools
* ```typescript
* import { makeExecutableSchema } from '@graphql-tools/schema';
* import { GraphQLUpload } from 'graphql-upload-ts';
*
* const schema = makeExecutableSchema({
* typeDefs: `
* scalar Upload
*
* type Mutation {
* uploadFile(file: Upload!): Boolean
* }
* `,
* resolvers: {
* Upload: GraphQLUpload,
* Mutation: {
* uploadFile: async (_, { file }) => {
* const { filename, mimetype, createReadStream } = await file;
* const stream = createReadStream();
* // Process the file stream...
* return true;
* }
* }
* },
* });
* ```
*/
const uploadScalarConfig = {
name: 'Upload',
description: 'The `Upload` scalar type represents a file upload.',
parseValue(value) {
if (value instanceof Upload) {
return value.promise;
}
throw new graphql.GraphQLError('Upload value invalid. Expected Upload instance.', {
extensions: { code: 'INVALID_UPLOAD_VALUE' },
});
},
parseLiteral(node) {
throw new graphql.GraphQLError('Upload literal unsupported. Uploads can only be passed as variables.', {
nodes: node,
extensions: { code: 'UPLOAD_LITERAL_UNSUPPORTED' },
});
},
serialize() {
throw new graphql.GraphQLError('Upload serialization unsupported. Uploads cannot be serialized.', {
extensions: { code: 'UPLOAD_SERIALIZATION_UNSUPPORTED' },
});
},
};
const GraphQLUpload = new graphql.GraphQLScalarType(uploadScalarConfig);
/**
* Safely ignores a Node.js readable stream.
*/
function ignoreStream(stream) {
// Prevent an unhandled error from crashing the process.
stream.on('error', () => { });
// Waste the stream.
stream.resume();
}
/**
* Maximum size for non-file form fields (in bytes).
* This applies to text fields like 'operations' and 'map' in GraphQL multipart requests.
* These fields contain JSON data (query, variables, and file mappings).
* Default: 1MB - sufficient for large GraphQL queries and variable sets.
*/
const DEFAULT_MAX_FIELD_SIZE = 1000000; // 1MB
/**
* Maximum size for uploaded files (in bytes).
* This applies to the actual binary file content being uploaded.
* Default: 5MB - a reasonable limit for most web applications.
* Can be overridden per request for larger files (videos, high-res images, etc.).
*/
const DEFAULT_MAX_FILE_SIZE = 5000000; // 5MB
/**
* Maximum number of files that can be uploaded in a single request.
* Default: Infinity - no limit on the number of files.
* Should be set to a reasonable number in production to prevent abuse.
*/
const DEFAULT_MAX_FILES = Number.POSITIVE_INFINITY;
function validateOptions(options) {
const validatedOptions = {
maxFieldSize: options?.maxFieldSize ?? DEFAULT_MAX_FIELD_SIZE,
maxFileSize: options?.maxFileSize ?? DEFAULT_MAX_FILE_SIZE,
maxFiles: options?.maxFiles ?? DEFAULT_MAX_FILES,
};
if (validatedOptions.maxFieldSize <= 0) {
throw new Error('maxFieldSize must be a positive number');
}
if (validatedOptions.maxFileSize <= 0) {
throw new Error('maxFileSize must be a positive number');
}
if (validatedOptions.maxFiles <= 0) {
throw new Error('maxFiles must be a positive number');
}
if (!Number.isInteger(validatedOptions.maxFiles) &&
validatedOptions.maxFiles !== Number.POSITIVE_INFINITY) {
throw new Error('maxFiles must be an integer');
}
return validatedOptions;
}
function validateMimeType(mimetype, allowedTypes) {
if (!allowedTypes || allowedTypes.length === 0) {
return { isValid: true };
}
const isValid = allowedTypes.some((type) => {
if (type.endsWith('/*')) {
const prefix = type.slice(0, -2);
return mimetype.startsWith(`${prefix}/`);
}
return mimetype === type;
});
return {
isValid,
error: isValid ? undefined : `File type '${mimetype}' is not allowed`,
};
}
function validateFileExtension(filename, allowedExtensions) {
if (!allowedExtensions || allowedExtensions.length === 0) {
return { isValid: true };
}
const extension = filename.split('.').pop()?.toLowerCase();
if (!extension) {
return {
isValid: false,
error: 'File must have an extension',
};
}
const isValid = allowedExtensions.some((ext) => ext.toLowerCase() === extension);
return {
isValid,
error: isValid ? undefined : `File extension '.${extension}' is not allowed`,
};
}
function sanitizeFilename(filename) {
// Remove path traversal attempts
let sanitized = filename.replace(/\.\./g, '');
// Remove directory separators
sanitized = sanitized.replace(/[/\\]/g, '');
// Remove control characters and non-printable characters
// biome-ignore lint/suspicious/noControlCharactersInRegex: Intentional to remove control characters
sanitized = sanitized.replace(/[\x00-\x1f\x7f-\x9f]/g, '');
// Remove leading/trailing dots and spaces
sanitized = sanitized.replace(/^[\s.]+|[\s.]+$/g, '');
// If filename is empty after sanitization, generate a default name
if (!sanitized) {
sanitized = `upload_${Date.now()}`;
}
// Limit filename length
const maxLength = 255;
if (sanitized.length > maxLength) {
const extension = sanitized.split('.').pop();
const nameWithoutExt = sanitized.substring(0, sanitized.lastIndexOf('.'));
if (extension && extension.length < 20) {
sanitized = `${nameWithoutExt.substring(0, maxLength - extension.length - 1)}.${extension}`;
}
else {
sanitized = sanitized.substring(0, maxLength);
}
}
return sanitized;
}
const GRAPHQL_MULTIPART_REQUEST_SPEC_URL$1 = 'https://github.com/jaydenseric/graphql-multipart-request-spec';
async function processRequest(request, response, options) {
const { maxFieldSize = DEFAULT_MAX_FIELD_SIZE, maxFileSize = DEFAULT_MAX_FILE_SIZE, maxFiles = DEFAULT_MAX_FILES, } = options ?? {};
return new Promise((resolve, reject) => {
let released = false;
let exitError;
let operations;
let operationsPath;
let map;
const parser = busboy({
headers: request.headers,
defParamCharset: 'utf8',
limits: {
fieldSize: maxFieldSize,
fields: 2, // Only operations and map.
fileSize: maxFileSize,
files: maxFiles,
},
});
function exit(error, isParserError = false) {
if (exitError)
return;
exitError = error;
if (map) {
for (const upload of map.values()) {
if (!upload.file)
upload.reject(exitError);
}
}
// If the error came from the parser, don’t cause it to be emitted again.
if (isParserError) {
parser.destroy();
}
else {
parser.destroy(exitError);
}
request.unpipe(parser);
// With a sufficiently large request body, subsequent events in the same
// event frame cause the stream to pause after the parser is destroyed. To
// ensure that the request resumes, the call to .resume() is scheduled for
// later in the event loop.
setImmediate(() => request.resume());
reject(exitError);
}
parser.on('field', (fieldName, value, { valueTruncated }) => {
if (valueTruncated)
return exit(createError(413, `The ‘${fieldName}’ multipart field value exceeds the ${maxFieldSize} byte size limit.`));
switch (fieldName) {
case 'operations':
try {
operations = JSON.parse(value);
}
catch (_error) {
return exit(createError(400, `Invalid JSON in the ‘operations’ multipart field (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL$1}).`));
}
// `operations` should be an object or an array. Note that arrays
// and `null` have an `object` type.
if (typeof operations !== 'object' || !operations)
return exit(createError(400, `Invalid type for the ‘operations’ multipart field (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL$1}).`));
operationsPath = objectPath(operations);
break;
case 'map': {
if (!operations)
return exit(createError(400, `Disordered multipart fields; ‘map’ should follow ‘operations’ (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL$1}).`));
let parsedMap;
try {
parsedMap = JSON.parse(value);
}
catch (_error) {
return exit(createError(400, `Invalid JSON in the ‘map’ multipart field (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL$1}).`));
}
// `map` should be an object.
if (typeof parsedMap !== 'object' || !parsedMap || Array.isArray(parsedMap))
return exit(createError(400, `Invalid type for the ‘map’ multipart field (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL$1}).`));
const mapEntries = Object.entries(parsedMap);
// Check max files is not exceeded, even though the number of files
// to parse might not match the map provided by the client.
if (mapEntries.length > maxFiles)
return exit(createError(413, `${maxFiles} max file uploads exceeded.`));
map = new Map();
for (const [fieldName, paths] of mapEntries) {
if (!Array.isArray(paths))
return exit(createError(400, `Invalid type for the ‘map’ multipart field entry key ‘${fieldName}’ array (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL$1}).`));
map.set(fieldName, new Upload());
for (const [index, path] of paths.entries()) {
if (typeof path !== 'string')
return exit(createError(400, `Invalid type for the ‘map’ multipart field entry key ‘${fieldName}’ array index ‘${index}’ value (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL$1}).`));
try {
if (operationsPath?.get(path) !== null) {
throw new Error('Invalid object path');
}
operationsPath?.set(path, map.get(fieldName));
}
catch (_error) {
return exit(createError(400, `Invalid object path for the ‘map’ multipart field entry key ‘${fieldName}’ array index ‘${index}’ value ‘${path}’ (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL$1}).`));
}
}
}
resolve(operations);
}
}
});
parser.on('file', (fieldName, stream, { filename, encoding, mimeType: mimetype }) => {
if (!map) {
ignoreStream(stream);
return exit(createError(400, `Disordered multipart fields; files should follow ‘map’ (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL$1}).`));
}
const upload = map.get(fieldName);
if (!upload) {
// The file is extraneous. As the rest can still be processed, just
// ignore it and don’t exit with an error.
ignoreStream(stream);
return;
}
let fileError;
const capacitor = new WriteStream();
capacitor.on('error', () => {
stream.unpipe();
stream.resume();
});
stream.on('limit', () => {
fileError = createError(413, `File truncated as it exceeds the ${maxFileSize} byte size limit.`);
stream.unpipe();
capacitor.destroy(fileError);
});
stream.on('error', (error) => {
fileError = error;
stream.unpipe();
capacitor.destroy(fileError);
});
const file = {
fieldName,
filename,
mimetype,
encoding,
createReadStream(options) {
const error = fileError || (released ? exitError : null);
if (error)
throw error;
return capacitor.createReadStream(options);
},
capacitor,
};
Object.defineProperty(file, 'capacitor', {
enumerable: false,
configurable: false,
writable: false,
});
stream.pipe(capacitor);
upload.resolve(file);
});
parser.once('filesLimit', () => exit(createError(413, `${maxFiles} max file uploads exceeded.`)));
parser.once('finish', () => {
request.unpipe(parser);
request.resume();
if (!operations)
return exit(createError(400, `Missing multipart field ‘operations’ (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL$1}).`));
if (!map)
return exit(createError(400, `Missing multipart field ‘map’ (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL$1}).`));
for (const upload of map.values())
if (!upload.file)
upload.reject(createError(400, 'File missing in the request.'));
});
// Use the `on` method instead of `once` as in edge cases the same parser
// could have multiple `error` events and all must be handled to prevent the
// Node.js process exiting with an error. One edge case is if there is a
// malformed part header as well as an unexpected end of the form.
parser.on('error', (error) => {
exit(error, true);
});
response.once('close', () => {
released = true;
if (map) {
for (const upload of map.values()) {
if (upload.file) {
// Release resources and clean up temporary files.
upload.file.capacitor.release();
}
}
}
});
request.once('close', () => {
if (!request.readableEnded)
exit(createError(499, 'Request disconnected during file upload stream parsing.'));
});
request.pipe(parser);
});
}
/**
* Creates a Bunway middleware for handling GraphQL multipart requests.
*
* @example
* ```ts
* import bunway from 'bunway';
* import { graphqlUploadBunway } from 'graphql-upload-ts';
* import { createYoga } from 'graphql-yoga';
*
* const app = bunway();
* const yoga = createYoga({ schema, multipart: false });
*
* app.all(
* '/graphql',
* graphqlUploadBunway({ maxFileSize: 20_000_000, maxFiles: 10 }),
* yogaToBunwayHandler(yoga),
* );
* ```
*
* On success the middleware sets `req.body` to the parsed GraphQL operation
* with `FileUpload` scalars substituted into the `variables` tree, then calls
* `next()`. On a parse/validation error it calls `next(error)` with an
* `HttpError` the caller can translate to a status code.
*/
function graphqlUploadBunway(options = {}) {
const { processRequest: processRequest$1 = processRequest, ...uploadOptions } = options;
return function graphqlUploadBunwayMiddleware(req, res, next) {
const contentType = (req.original.headers.get('content-type') ?? '').toLowerCase();
if (!contentType.startsWith('multipart/')) {
next();
return;
}
// Kick off the async flow in a detached promise so we can keep the
// middleware's synchronous signature (matches every other graphql-upload-ts
// middleware).
void (async () => {
let rawBody;
try {
rawBody = await req.rawBody();
}
catch (err) {
next(err);
return;
}
// Plain headers record for busboy (it iterates `req.headers` as an
// object). Bunway/Fetch `Headers` expose `get` + `forEach`, not
// object-property access, so we materialize a record up-front.
const headers = {};
req.original.headers.forEach((value, key) => {
headers[key] = value;
});
// Readable that yields the captured bytes. busboy pipes it; nothing
// else should attach data/end listeners first, or it will switch to
// flowing mode and the parser will see an empty stream.
const nodeReadable = node_stream.Readable.from([Buffer.from(rawBody)]);
const shimReq = nodeReadable;
shimReq.headers = headers;
shimReq.url = req.original.url;
shimReq.method = req.original.method;
shimReq.body = null;
// Bunway's BunResponse has no EventEmitter surface. Install a minimal
// `.once` / `.on` backed by an internal EventEmitter so graphql-upload
// can register its capacitor cleanup listeners, and fire 'close' when
// `res.end()` runs.
const resEmitter = new node_events.EventEmitter();
const hadOnce = typeof res.once === 'function';
if (!hadOnce) {
res.once = ((event, listener) => {
resEmitter.once(event, listener);
return res;
});
res.on = ((event, listener) => {
resEmitter.on(event, listener);
return res;
});
const originalEnd = res.end.bind(res);
res.end = ((...args) => {
const ret = originalEnd(...args);
resEmitter.emit('close');
return ret;
});
}
try {
const body = await processRequest$1(shimReq, res, uploadOptions);
req.body = body;
shimReq.body = body;
next();
}
catch (err) {
const httpErr = err;
if ('status' in httpErr && 'expose' in httpErr && httpErr.expose) {
res.status(httpErr.status);
}
next(err);
}
})();
};
}
/**
* Creates Express middleware for handling GraphQL multipart requests (file uploads).
* This middleware processes multipart/form-data requests and converts them into a format
* that GraphQL servers can understand.
*
* @example Basic setup with Apollo Server
* ```typescript
* import express from 'express';
* import { graphqlUploadExpress } from 'graphql-upload-ts';
* import { ApolloServer } from '@apollo/server';
*
* const app = express();
*
* app.use(
* '/graphql',
* graphqlUploadExpress({
* maxFileSize: 10_000_000, // 10MB
* maxFiles: 10
* })
* );
*
* // Apollo Server setup continues...
* ```
*/
function graphqlUploadExpress(options = {}) {
const { processRequest: processRequest$1 = processRequest, overrideSendResponse = processRequest$1 !== processRequest, ...uploadOptions } = options;
return function graphqlUploadExpressMiddleware(request, response, next) {
if (!request.is('multipart/form-data')) {
return next();
}
// Only override response.send if explicitly enabled
if (overrideSendResponse) {
// Store the original send method
const originalSend = response.send.bind(response);
let requestFinished = false;
// Monitor when the request is complete
request.on('end', () => {
requestFinished = true;
});
// Override send to ensure request is complete before sending response
response.send = (...args) => {
if (!requestFinished) {
// If request isn't finished, wait for it
request.on('end', () => {
response.send = originalSend;
originalSend(...args);
});
}
else {
// Request is already finished, send immediately
response.send = originalSend;
originalSend(...args);
}
return response;
};
}
processRequest$1(request, response, uploadOptions)
.then((body) => {
request.body = body;
next();
})
.catch((error) => {
if ('status' in error && 'expose' in error && error.expose) {
response.status(error.status);
}
next(error);
});
};
}
/**
* Creates Koa middleware for handling GraphQL multipart requests (file uploads).
* This middleware processes multipart/form-data requests and converts them into a format
* that GraphQL servers can understand.
*
* @example Basic setup with Apollo Server Koa
* ```typescript
* import Koa from 'koa';
* import { graphqlUploadKoa } from 'graphql-upload-ts';
* import { ApolloServer } from '@apollo/server';
* import { koaMiddleware } from '@as-integrations/koa';
*
* const app = new Koa();
*
* app.use(
* graphqlUploadKoa({
* maxFileSize: 10_000_000, // 10MB
* maxFiles: 10
* })
* );
*
* // Apollo Server setup continues...
* ```
*/
function graphqlUploadKoa(options = {}) {
const { processRequest: processRequest$1 = processRequest, ...uploadOptions } = options;
return async function graphqlUploadKoaMiddleware(ctx, next) {
if (!ctx.request.is('multipart/form-data')) {
return next();
}
const finished = new Promise((resolve) => {
ctx.req.on('end', resolve);
});
try {
ctx.body = await processRequest$1(ctx.req, ctx.res, uploadOptions);
await next();
}
finally {
await finished;
}
};
}
exports.UploadErrorCode = void 0;
(function (UploadErrorCode) {
UploadErrorCode["FILE_TOO_LARGE"] = "FILE_TOO_LARGE";
UploadErrorCode["TOO_MANY_FILES"] = "TOO_MANY_FILES";
UploadErrorCode["FIELD_TOO_LARGE"] = "FIELD_TOO_LARGE";
UploadErrorCode["INVALID_MULTIPART"] = "INVALID_MULTIPART";
UploadErrorCode["MISSING_OPERATIONS"] = "MISSING_OPERATIONS";
UploadErrorCode["MISSING_MAP"] = "MISSING_MAP";
UploadErrorCode["INVALID_JSON"] = "INVALID_JSON";
UploadErrorCode["INVALID_MAP"] = "INVALID_MAP";
UploadErrorCode["FILE_MISSING"] = "FILE_MISSING";
UploadErrorCode["STREAM_ERROR"] = "STREAM_ERROR";
UploadErrorCode["REQUEST_DISCONNECTED"] = "REQUEST_DISCONNECTED";
UploadErrorCode["INVALID_UPLOAD_VALUE"] = "INVALID_UPLOAD_VALUE";
UploadErrorCode["UPLOAD_LITERAL_UNSUPPORTED"] = "UPLOAD_LITERAL_UNSUPPORTED";
UploadErrorCode["UPLOAD_SERIALIZATION_UNSUPPORTED"] = "UPLOAD_SERIALIZATION_UNSUPPORTED";
})(exports.UploadErrorCode || (exports.UploadErrorCode = {}));
class UploadError extends Error {
constructor(message, code, status = 400, expose = true) {
super(message);
this.name = 'UploadError';
this.code = code;
this.status = status;
this.expose = expose;
Object.setPrototypeOf(this, UploadError.prototype);
}
toGraphQLError() {
return new graphql.GraphQLError(this.message, {
extensions: {
code: this.code,
status: this.status,
},
});
}
}
class FileTooLargeError extends UploadError {
constructor(maxSize) {
super(`File truncated as it exceeds the ${maxSize} byte size limit.`, exports.UploadErrorCode.FILE_TOO_LARGE, 413);
}
}
class TooManyFilesError extends UploadError {
constructor(maxFiles) {
super(`${maxFiles} max file uploads exceeded.`, exports.UploadErrorCode.TOO_MANY_FILES, 413);
}
}
class FieldTooLargeError extends UploadError {
constructor(fieldName, maxSize) {
super(`The '${fieldName}' multipart field value exceeds the ${maxSize} byte size limit.`, exports.UploadErrorCode.FIELD_TOO_LARGE, 413);
}
}
class InvalidMultipartError extends UploadError {
constructor(message) {
super(message, exports.UploadErrorCode.INVALID_MULTIPART);
}
}
class MissingOperationsError extends UploadError {
constructor() {
super(`Missing multipart field 'operations' (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL}).`, exports.UploadErrorCode.MISSING_OPERATIONS);
}
}
class MissingMapError extends UploadError {
constructor() {
super(`Missing multipart field 'map' (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL}).`, exports.UploadErrorCode.MISSING_MAP);
}
}
class InvalidJSONError extends UploadError {
constructor(fieldName) {
super(`Invalid JSON in the '${fieldName}' multipart field (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL}).`, exports.UploadErrorCode.INVALID_JSON);
}
}
class InvalidMapError extends UploadError {
constructor(message) {
super(`${message} (${GRAPHQL_MULTIPART_REQUEST_SPEC_URL}).`, exports.UploadErrorCode.INVALID_MAP);
}
}
class FileMissingError extends UploadError {
constructor() {
super('File missing in the request.', exports.UploadErrorCode.FILE_MISSING);
}
}
class StreamError extends UploadError {
constructor(message) {
super(message, exports.UploadErrorCode.STREAM_ERROR, 500);
}
}
class RequestDisconnectedError extends UploadError {
constructor() {
super('Request disconnected during file upload stream parsing.', exports.UploadErrorCode.REQUEST_DISCONNECTED, 499);
}
}
const GRAPHQL_MULTIPART_REQUEST_SPEC_URL = 'https://github.com/jaydenseric/graphql-multipart-request-spec';
exports.DEFAULT_MAX_FIELD_SIZE = DEFAULT_MAX_FIELD_SIZE;
exports.DEFAULT_MAX_FILES = DEFAULT_MAX_FILES;
exports.DEFAULT_MAX_FILE_SIZE = DEFAULT_MAX_FILE_SIZE;
exports.FieldTooLargeError = FieldTooLargeError;
exports.FileMissingError = FileMissingError;
exports.FileTooLargeError = FileTooLargeError;
exports.GRAPHQL_MULTIPART_REQUEST_SPEC_URL = GRAPHQL_MULTIPART_REQUEST_SPEC_URL$1;
exports.GraphQLUpload = GraphQLUpload;
exports.InvalidJSONError = InvalidJSONError;
exports.InvalidMapError = InvalidMapError;
exports.InvalidMultipartError = InvalidMultipartError;
exports.MissingMapError = MissingMapError;
exports.MissingOperationsError = MissingOperationsError;
exports.ReadAfterDestroyedError = ReadAfterDestroyedError;
exports.ReadAfterReleasedError = ReadAfterReleasedError;
exports.ReadStream = ReadStream;
exports.RequestDisconnectedError = RequestDisconnectedError;
exports.StreamError = StreamError;
exports.TooManyFilesError = TooManyFilesError;
exports.Upload = Upload;
exports.UploadError = UploadError;
exports.WriteStream = WriteStream;
exports.graphqlUploadBunway = graphqlUploadBunway;
exports.graphqlUploadExpress = graphqlUploadExpress;
exports.graphqlUploadKoa = graphqlUploadKoa;
exports.ignoreStream = ignoreStream;
exports.processRequest = processRequest;
exports.sanitizeFilename = sanitizeFilename;
exports.validateFileExtension = validateFileExtension;
exports.validateMimeType = validateMimeType;
exports.validateOptions = validateOptions;
//# sourceMappingURL=index.js.map