mockttp-mvs
Version:
Mock HTTP server for testing HTTP clients and stubbing webservices
890 lines (888 loc) • 48.1 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.HandlerLookup = exports.JsonRpcResponseHandler = exports.TimeoutHandler = exports.ResetConnectionHandler = exports.CloseConnectionHandler = exports.PassThroughHandler = exports.FileHandler = exports.StreamHandler = exports.CallbackHandler = exports.SimpleHandler = exports.AbortError = void 0;
const _ = require("lodash");
const url = require("url");
const tls = require("tls");
const http = require("http");
const https = require("https");
const fs = require("fs/promises");
const h2Client = require("http2-wrapper");
const cacheable_lookup_1 = require("cacheable-lookup");
const base64_arraybuffer_1 = require("base64-arraybuffer");
const stream_1 = require("stream");
const common_tags_1 = require("common-tags");
const typed_error_1 = require("typed-error");
const request_utils_1 = require("../../util/request-utils");
const header_utils_1 = require("../../util/header-utils");
const buffer_utils_1 = require("../../util/buffer-utils");
const socket_util_1 = require("../../util/socket-util");
const serialization_1 = require("../../serialization/serialization");
const body_serialization_1 = require("../../serialization/body-serialization");
const dns_1 = require("../../util/dns");
const error_1 = require("../../util/error");
const rule_parameters_1 = require("../rule-parameters");
const http_agents_1 = require("../http-agents");
const passthrough_handling_1 = require("../passthrough-handling");
const request_handler_definitions_1 = require("./request-handler-definitions");
// An error that indicates that the handler is aborting the request.
// This could be intentional, or an upstream server aborting the request.
class AbortError extends typed_error_1.TypedError {
constructor(message, code) {
super(message);
this.code = code;
}
}
exports.AbortError = AbortError;
function isSerializedBuffer(obj) {
return obj && obj.type === 'Buffer' && !!obj.data;
}
class SimpleHandler extends request_handler_definitions_1.SimpleHandlerDefinition {
async handle(_request, response) {
if (this.headers)
(0, request_utils_1.dropDefaultHeaders)(response);
(0, request_utils_1.writeHead)(response, this.status, this.statusMessage, this.headers);
if (isSerializedBuffer(this.data)) {
this.data = Buffer.from(this.data);
}
response.end(this.data || "");
}
}
exports.SimpleHandler = SimpleHandler;
async function writeResponseFromCallback(result, response) {
if (result.json !== undefined) {
result.headers = _.assign(result.headers || {}, { 'Content-Type': 'application/json' });
result.body = JSON.stringify(result.json);
delete result.json;
}
if (result.headers) {
(0, request_utils_1.dropDefaultHeaders)(response);
validateCustomHeaders({}, result.headers);
}
if (result.body && !result.rawBody) {
// RawBody takes priority if both are set (useful for backward compat) but if not then
// the body is automatically encoded to match the content-encoding header.
result.rawBody = await (0, request_utils_1.encodeBodyBuffer)(Buffer.from(result.body), result.headers ?? {});
}
(0, request_utils_1.writeHead)(response, result.statusCode || result.status || 200, result.statusMessage, result.headers);
response.end(result.rawBody || "");
}
class CallbackHandler extends request_handler_definitions_1.CallbackHandlerDefinition {
async handle(request, response) {
let req = await (0, request_utils_1.waitForCompletedRequest)(request);
let outResponse;
try {
outResponse = await this.callback(req);
}
catch (error) {
(0, request_utils_1.writeHead)(response, 500, 'Callback handler threw an exception');
response.end((0, error_1.isErrorLike)(error) ? error.toString() : error);
return;
}
if (outResponse === 'close') {
request.socket.end();
throw new AbortError('Connection closed intentionally by rule');
}
else if (outResponse === 'reset') {
(0, socket_util_1.requireSocketResetSupport)();
(0, socket_util_1.resetOrDestroy)(request);
throw new AbortError('Connection reset intentionally by rule');
}
else {
await writeResponseFromCallback(outResponse, response);
}
}
/**
* @internal
*/
static deserialize({ name, version }, channel) {
const rpcCallback = async (request) => {
const callbackResult = await channel.request({ args: [
(version || -1) >= 2
? (0, body_serialization_1.withSerializedBodyReader)(request)
: request // Backward compat: old handlers
] });
if (typeof callbackResult === 'string') {
return callbackResult;
}
else {
return (0, body_serialization_1.withDeserializedCallbackBuffers)(callbackResult);
}
};
// Pass across the name from the real callback, for explain()
Object.defineProperty(rpcCallback, "name", { value: name });
// Call the client's callback (via stream), and save a handler on our end for
// the response that comes back.
return new CallbackHandler(rpcCallback);
}
}
exports.CallbackHandler = CallbackHandler;
class StreamHandler extends request_handler_definitions_1.StreamHandlerDefinition {
async handle(_request, response) {
if (!this.stream.done) {
if (this.headers)
(0, request_utils_1.dropDefaultHeaders)(response);
(0, request_utils_1.writeHead)(response, this.status, undefined, this.headers);
this.stream.pipe(response);
this.stream.done = true;
}
else {
throw new Error((0, common_tags_1.stripIndent) `
Stream request handler called more than once - this is not supported.
Streams can typically only be read once, so all subsequent requests would be empty.
To mock repeated stream requests, call 'thenStream' repeatedly with multiple streams.
(Have a better way to handle this? Open an issue at ${require('../../../package.json').bugs.url})
`);
}
}
/**
* @internal
*/
static deserialize(handlerData, channel) {
const handlerStream = new stream_1.Transform({
objectMode: true,
transform: function (message, encoding, callback) {
const { event, content } = message;
let deserializedEventData = content && (content.type === 'string' ? content.value :
content.type === 'buffer' ? Buffer.from(content.value, 'base64') :
content.type === 'arraybuffer' ? Buffer.from((0, base64_arraybuffer_1.decode)(content.value)) :
content.type === 'nil' && undefined);
if (event === 'data' && deserializedEventData) {
this.push(deserializedEventData);
}
else if (event === 'end') {
this.end();
}
callback();
}
});
// When we get piped (i.e. to a live request), ping upstream to start streaming, and then
// pipe the resulting data into our live stream (which is streamed to the request, like normal)
handlerStream.once('resume', () => {
channel.pipe(handlerStream);
channel.write({});
});
return new StreamHandler(handlerData.status, handlerStream, handlerData.headers);
}
}
exports.StreamHandler = StreamHandler;
class FileHandler extends request_handler_definitions_1.FileHandlerDefinition {
async handle(_request, response) {
// Read the file first, to ensure we error cleanly if it's unavailable
const fileContents = await fs.readFile(this.filePath);
if (this.headers)
(0, request_utils_1.dropDefaultHeaders)(response);
(0, request_utils_1.writeHead)(response, this.status, this.statusMessage, this.headers);
response.end(fileContents);
}
}
exports.FileHandler = FileHandler;
function validateCustomHeaders(originalHeaders, modifiedHeaders, headerWhitelist = []) {
if (!modifiedHeaders)
return;
// We ignore most returned pseudo headers, so we error if you try to manually set them
const invalidHeaders = _(modifiedHeaders)
.pickBy((value, name) => name.toString().startsWith(':') &&
// We allow returning a preexisting header value - that's ignored
// silently, so that mutating & returning the provided headers is always safe.
value !== originalHeaders[name] &&
// In some cases, specific custom pseudoheaders may be allowed, e.g. requests
// can have custom :scheme and :authority headers set.
!headerWhitelist.includes(name))
.keys();
if (invalidHeaders.size() > 0) {
throw new Error(`Cannot set custom ${invalidHeaders.join(', ')} pseudoheader values`);
}
}
// Used in merging as a marker for values to omit, because lodash ignores undefineds.
const OMIT_SYMBOL = Symbol('omit-value');
// We play some games to preserve undefined values during serialization, because we differentiate them
// in some transforms from null/not-present keys.
const mapOmitToUndefined = (input) => _.mapValues(input, (v) => v === request_handler_definitions_1.SERIALIZED_OMIT || v === OMIT_SYMBOL
? undefined // Replace our omit placeholders with actual undefineds
: v);
class PassThroughHandler extends request_handler_definitions_1.PassThroughHandlerDefinition {
async trustedCACertificates() {
if (!this.extraCACertificates.length)
return undefined;
if (!this._trustedCACertificates) {
this._trustedCACertificates = Promise.all(tls.rootCertificates
.concat(this.extraCACertificates.map(certObject => {
if ('cert' in certObject) {
return certObject.cert.toString('utf8');
}
else {
return fs.readFile(certObject.certPath, 'utf8');
}
})));
}
return this._trustedCACertificates;
}
lookup() {
if (!this.lookupOptions) {
if (!this._cacheableLookupInstance) {
// By default, use 10s caching of hostnames, just to reduce the delay from
// endlessly 10ms query delay for 'localhost' with every request.
this._cacheableLookupInstance = new dns_1.CachedDns(10000);
}
return this._cacheableLookupInstance.lookup;
}
else {
if (!this._cacheableLookupInstance) {
this._cacheableLookupInstance = new cacheable_lookup_1.default({
maxTtl: this.lookupOptions.maxTtl,
errorTtl: this.lookupOptions.errorTtl,
// As little caching of "use the fallback server" as possible:
fallbackDuration: 0
});
if (this.lookupOptions.servers) {
this._cacheableLookupInstance.servers = this.lookupOptions.servers;
}
}
return this._cacheableLookupInstance.lookup;
}
}
async handle(clientReq, clientRes) {
// Don't let Node add any default standard headers - we want full control
(0, request_utils_1.dropDefaultHeaders)(clientRes);
// Capture raw request data:
let { method, url: reqUrl, rawHeaders } = clientReq;
let { protocol, hostname, port, path } = url.parse(reqUrl);
// We have to capture the request stream immediately, to make sure nothing is lost if it
// goes past its max length (truncating the data) before we start sending upstream.
const clientReqBody = clientReq.body.asStream();
const isH2Downstream = (0, request_utils_1.isHttp2)(clientReq);
if ((0, socket_util_1.isLocalhostAddress)(hostname) && clientReq.remoteIpAddress && !(0, socket_util_1.isLocalhostAddress)(clientReq.remoteIpAddress)) {
// If we're proxying localhost traffic from another remote machine, then we should really be proxying
// back to that machine, not back to ourselves! Best example is docker containers: if we capture & inspect
// their localhost traffic, it should still be sent back into that docker container.
hostname = clientReq.remoteIpAddress;
// We don't update the host header - from the POV of the target, it's still localhost traffic.
}
if (this.forwarding) {
const { targetHost, updateHostHeader } = this.forwarding;
if (!targetHost.includes('/')) {
// We're forwarding to a bare hostname
[hostname, port] = targetHost.split(':');
}
else {
// We're forwarding to a fully specified URL; override the host etc, but never the path.
({ protocol, hostname, port } = url.parse(targetHost));
}
const hostHeaderName = isH2Downstream ? ':authority' : 'host';
let hostHeader = (0, header_utils_1.findRawHeader)(rawHeaders, hostHeaderName);
if (!hostHeader) {
// Should never happen really, but just in case:
hostHeader = [hostHeaderName, hostname];
rawHeaders.unshift(hostHeader);
}
;
if (updateHostHeader === undefined || updateHostHeader === true) {
// If updateHostHeader is true, or just not specified, match the new target
hostHeader[1] = hostname + (port ? `:${port}` : '');
}
else if (updateHostHeader) {
// If it's an explicit custom value, use that directly.
hostHeader[1] = updateHostHeader;
} // Otherwise: falsey means don't touch it.
}
// Check if this request is a request loop:
if ((0, socket_util_1.isSocketLoop)(this.outgoingSockets, clientReq.socket)) {
throw new Error((0, common_tags_1.oneLine) `
Passthrough loop detected. This probably means you're sending a request directly
to a passthrough endpoint, which is forwarding it to the target URL, which is a
passthrough endpoint, which is forwarding it to the target URL, which is a
passthrough endpoint...` +
'\n\n' + (0, common_tags_1.oneLine) `
You should either explicitly mock a response for this URL (${reqUrl}), or use
the server as a proxy, instead of making requests to it directly.
`);
}
// Override the request details, if a transform or callback is specified:
let reqBodyOverride;
// Set during modification here - if set, we allow overriding certain H2 headers so that manual
// modification of the supported headers works as expected.
let headersManuallyModified = false;
if (this.transformRequest) {
let headers = (0, header_utils_1.rawHeadersToObject)(rawHeaders);
const { replaceMethod, updateHeaders, replaceHeaders, replaceBody, replaceBodyFromFile, updateJsonBody, matchReplaceBody } = this.transformRequest;
if (replaceMethod) {
method = replaceMethod;
}
if (updateHeaders) {
headers = {
...headers,
...updateHeaders
};
headersManuallyModified = true;
}
else if (replaceHeaders) {
headers = { ...replaceHeaders };
headersManuallyModified = true;
}
if (replaceBody) {
// Note that we're replacing the body without actually waiting for the real one, so
// this can result in sending a request much more quickly!
reqBodyOverride = (0, buffer_utils_1.asBuffer)(replaceBody);
}
else if (replaceBodyFromFile) {
reqBodyOverride = await fs.readFile(replaceBodyFromFile);
}
else if (updateJsonBody) {
const { body: realBody } = await (0, request_utils_1.waitForCompletedRequest)(clientReq);
if (await realBody.getJson() === undefined) {
throw new Error("Can't transform non-JSON request body");
}
const updatedBody = _.mergeWith(await realBody.getJson(), updateJsonBody, (_oldValue, newValue) => {
// We want to remove values with undefines, but Lodash ignores
// undefined return values here. Fortunately, JSON.stringify
// ignores Symbols, omitting them from the result.
if (newValue === undefined)
return OMIT_SYMBOL;
});
reqBodyOverride = (0, buffer_utils_1.asBuffer)(JSON.stringify(updatedBody));
}
else if (matchReplaceBody) {
const { body: realBody } = await (0, request_utils_1.waitForCompletedRequest)(clientReq);
const originalBody = await realBody.getText();
if (originalBody === undefined) {
throw new Error("Can't match & replace non-decodeable request body");
}
let replacedBody = originalBody;
for (let [match, result] of matchReplaceBody) {
replacedBody = replacedBody.replace(match, result);
}
if (replacedBody !== originalBody) {
reqBodyOverride = (0, buffer_utils_1.asBuffer)(replacedBody);
}
}
if (reqBodyOverride) {
// We always re-encode the body to match the resulting content-encoding header:
reqBodyOverride = await (0, request_utils_1.encodeBodyBuffer)(reqBodyOverride, headers);
headers['content-length'] = (0, passthrough_handling_1.getContentLengthAfterModification)(reqBodyOverride, clientReq.headers, (updateHeaders && updateHeaders['content-length'] !== undefined)
? headers // Iff you replaced the content length
: replaceHeaders);
}
if (headersManuallyModified || reqBodyOverride) {
// If the headers have been updated (implicitly or explicitly) we need to regenerate them. We avoid
// this if possible, because it normalizes headers, which is slightly lossy (e.g. they're lowercased).
rawHeaders = (0, header_utils_1.objectHeadersToRaw)(headers);
}
}
else if (this.beforeRequest) {
const completedRequest = await (0, request_utils_1.waitForCompletedRequest)(clientReq);
const modifiedReq = await this.beforeRequest({
...completedRequest,
headers: _.cloneDeep(completedRequest.headers),
rawHeaders: _.cloneDeep(completedRequest.rawHeaders)
});
if (modifiedReq?.response) {
if (modifiedReq.response === 'close') {
const socket = clientReq.socket;
socket.end();
throw new AbortError('Connection closed intentionally by rule');
}
else if (modifiedReq.response === 'reset') {
(0, socket_util_1.requireSocketResetSupport)();
(0, socket_util_1.resetOrDestroy)(clientReq);
throw new AbortError('Connection reset intentionally by rule');
}
else {
// The callback has provided a full response: don't passthrough at all, just use it.
await writeResponseFromCallback(modifiedReq.response, clientRes);
return;
}
}
method = modifiedReq?.method || method;
reqUrl = modifiedReq?.url || reqUrl;
headersManuallyModified = !!modifiedReq?.headers;
let headers = modifiedReq?.headers || clientReq.headers;
Object.assign(headers, isH2Downstream
? (0, passthrough_handling_1.getH2HeadersAfterModification)(reqUrl, clientReq.headers, modifiedReq?.headers)
: { 'host': (0, passthrough_handling_1.getHostAfterModification)(reqUrl, clientReq.headers, modifiedReq?.headers) });
validateCustomHeaders(clientReq.headers, modifiedReq?.headers, passthrough_handling_1.OVERRIDABLE_REQUEST_PSEUDOHEADERS // These are handled by getCorrectPseudoheaders above
);
reqBodyOverride = await (0, passthrough_handling_1.buildOverriddenBody)(modifiedReq, headers);
if (reqBodyOverride) {
// Automatically match the content-length to the body, unless it was explicitly overriden.
headers['content-length'] = (0, passthrough_handling_1.getContentLengthAfterModification)(reqBodyOverride, clientReq.headers, modifiedReq?.headers);
}
// Reparse the new URL, if necessary
if (modifiedReq?.url) {
if (!(0, request_utils_1.isAbsoluteUrl)(modifiedReq?.url))
throw new Error("Overridden request URLs must be absolute");
({ protocol, hostname, port, path } = url.parse(reqUrl));
}
rawHeaders = (0, header_utils_1.objectHeadersToRaw)(headers);
}
const strictHttpsChecks = (0, passthrough_handling_1.shouldUseStrictHttps)(hostname, port, this.ignoreHostHttpsErrors);
// Use a client cert if it's listed for the host+port or whole hostname
const hostWithPort = `${hostname}:${port}`;
const clientCert = this.clientCertificateHostMap[hostWithPort] ||
this.clientCertificateHostMap[hostname] ||
{};
const trustedCerts = await this.trustedCACertificates();
const caConfig = trustedCerts
? { ca: trustedCerts }
: {};
// We only do H2 upstream for HTTPS. Http2-wrapper doesn't support H2C, it's rarely used
// and we can't use ALPN to detect HTTP/2 support cleanly.
let shouldTryH2Upstream = isH2Downstream && protocol === 'https:';
const effectivePort = !!port
? parseInt(port, 10)
: (protocol === 'https:' ? 443 : 80);
let family;
if (hostname === 'localhost') {
// Annoying special case: some localhost servers listen only on either ipv4 or ipv6.
// Very specific situation, but a very common one for development use.
// We need to work out which one family is, as Node sometimes makes bad choices.
if (await (0, socket_util_1.isLocalPortActive)('::1', effectivePort))
family = 6;
else
family = 4;
}
// Remote clients might configure a passthrough rule with a parameter reference for the proxy,
// delegating proxy config to the admin server. That's fine initially, but you can't actually
// handle a request in that case - make sure our proxyConfig is always dereferenced before use.
const proxySettingSource = (0, rule_parameters_1.assertParamDereferenced)(this.proxyConfig);
// Mirror the keep-alive-ness of the incoming request in our outgoing request
const agent = await (0, http_agents_1.getAgent)({
protocol: (protocol || undefined),
hostname: hostname,
port: effectivePort,
tryHttp2: shouldTryH2Upstream,
keepAlive: (0, request_utils_1.shouldKeepAlive)(clientReq),
proxySettingSource
});
if (agent && !('http2' in agent)) {
// I.e. only use HTTP/2 if we're using an HTTP/2-compatible agent
shouldTryH2Upstream = false;
}
let makeRequest = (shouldTryH2Upstream
? (options, cb) => h2Client.auto(options, cb).catch((e) => {
// If an error occurs during auto detection via ALPN, that's an
// TypeError implies it's an invalid HTTP/2 request that was rejected.
// Anything else implies an upstream HTTP/2 issue.
e.causedByUpstreamError = !(e instanceof TypeError);
throw e;
})
// HTTP/1 + TLS
: protocol === 'https:'
? https.request
// HTTP/1 plaintext:
: http.request);
if (isH2Downstream && shouldTryH2Upstream) {
// We drop all incoming pseudoheaders, and regenerate them (except legally modified ones)
rawHeaders = rawHeaders.filter(([key]) => !key.toString().startsWith(':') ||
(headersManuallyModified &&
passthrough_handling_1.OVERRIDABLE_REQUEST_PSEUDOHEADERS.includes(key.toLowerCase())));
}
else if (isH2Downstream && !shouldTryH2Upstream) {
rawHeaders = (0, header_utils_1.h2HeadersToH1)(rawHeaders);
}
// Drop proxy-connection header. This is almost always intended for us, not for upstream servers,
// and forwarding it causes problems (most notably, it triggers lots of weird-traffic blocks,
// most notably by Cloudflare).
rawHeaders = rawHeaders.filter(([key]) => key.toLowerCase() !== 'proxy-connection');
let serverReq;
return new Promise((resolve, reject) => (async () => {
serverReq = await makeRequest({
protocol,
method,
hostname,
port,
family,
path,
headers: shouldTryH2Upstream
? (0, header_utils_1.rawHeadersToObjectPreservingCase)(rawHeaders)
: (0, header_utils_1.flattenPairedRawHeaders)(rawHeaders),
lookup: this.lookup(),
// ^ Cast required to handle __promisify__ type hack in the official Node types
agent,
// TLS options:
...passthrough_handling_1.UPSTREAM_TLS_OPTIONS,
minVersion: strictHttpsChecks ? tls.DEFAULT_MIN_VERSION : 'TLSv1',
rejectUnauthorized: strictHttpsChecks,
...clientCert,
...caConfig
}, (serverRes) => (async () => {
serverRes.on('error', (e) => {
e.causedByUpstreamError = true;
reject(e);
});
let serverStatusCode = serverRes.statusCode;
let serverStatusMessage = serverRes.statusMessage;
let serverRawHeaders = (0, header_utils_1.pairFlatRawHeaders)(serverRes.rawHeaders);
let resBodyOverride;
if (isH2Downstream) {
serverRawHeaders = (0, header_utils_1.h1HeadersToH2)(serverRawHeaders);
}
if (this.transformResponse) {
let serverHeaders = (0, header_utils_1.rawHeadersToObject)(serverRawHeaders);
const { replaceStatus, updateHeaders, replaceHeaders, replaceBody, replaceBodyFromFile, updateJsonBody, matchReplaceBody } = this.transformResponse;
if (replaceStatus) {
serverStatusCode = replaceStatus;
serverStatusMessage = undefined; // Reset to default
}
if (updateHeaders) {
serverHeaders = {
...serverHeaders,
...updateHeaders
};
}
else if (replaceHeaders) {
serverHeaders = { ...replaceHeaders };
}
if (replaceBody) {
// Note that we're replacing the body without actually waiting for the real one, so
// this can result in sending a request much more quickly!
resBodyOverride = (0, buffer_utils_1.asBuffer)(replaceBody);
}
else if (replaceBodyFromFile) {
resBodyOverride = await fs.readFile(replaceBodyFromFile);
}
else if (updateJsonBody) {
const rawBody = await (0, buffer_utils_1.streamToBuffer)(serverRes);
const realBody = (0, request_utils_1.buildBodyReader)(rawBody, serverRes.headers);
if (await realBody.getJson() === undefined) {
throw new Error("Can't transform non-JSON response body");
}
const updatedBody = _.mergeWith(await realBody.getJson(), updateJsonBody, (_oldValue, newValue) => {
// We want to remove values with undefines, but Lodash ignores
// undefined return values here. Fortunately, JSON.stringify
// ignores Symbols, omitting them from the result.
if (newValue === undefined)
return OMIT_SYMBOL;
});
resBodyOverride = (0, buffer_utils_1.asBuffer)(JSON.stringify(updatedBody));
}
else if (matchReplaceBody) {
const rawBody = await (0, buffer_utils_1.streamToBuffer)(serverRes);
const realBody = (0, request_utils_1.buildBodyReader)(rawBody, serverRes.headers);
const originalBody = await realBody.getText();
if (originalBody === undefined) {
throw new Error("Can't match & replace non-decodeable response body");
}
let replacedBody = originalBody;
for (let [match, result] of matchReplaceBody) {
replacedBody = replacedBody.replace(match, result);
}
if (replacedBody !== originalBody) {
resBodyOverride = (0, buffer_utils_1.asBuffer)(replacedBody);
}
}
if (resBodyOverride) {
// We always re-encode the body to match the resulting content-encoding header:
resBodyOverride = await (0, request_utils_1.encodeBodyBuffer)(resBodyOverride, serverHeaders);
serverHeaders['content-length'] = (0, passthrough_handling_1.getContentLengthAfterModification)(resBodyOverride, serverRes.headers, (updateHeaders && updateHeaders['content-length'] !== undefined)
? serverHeaders // Iff you replaced the content length
: replaceHeaders, method === 'HEAD' // HEAD responses are allowed mismatched content-length
);
}
serverRawHeaders = (0, header_utils_1.objectHeadersToRaw)(serverHeaders);
}
else if (this.beforeResponse) {
let modifiedRes;
let body;
body = await (0, buffer_utils_1.streamToBuffer)(serverRes);
let serverHeaders = (0, header_utils_1.rawHeadersToObject)(serverRawHeaders);
modifiedRes = await this.beforeResponse({
id: clientReq.id,
statusCode: serverStatusCode,
statusMessage: serverRes.statusMessage,
headers: serverHeaders,
rawHeaders: _.cloneDeep(serverRawHeaders),
body: (0, request_utils_1.buildBodyReader)(body, serverHeaders)
});
if (modifiedRes === 'close') {
// Dump the real response data and kill the client socket:
serverRes.resume();
clientReq.socket.end();
throw new AbortError('Connection closed intentionally by rule');
}
else if (modifiedRes === 'reset') {
(0, socket_util_1.requireSocketResetSupport)();
// Dump the real response data and kill the client socket:
serverRes.resume();
(0, socket_util_1.resetOrDestroy)(clientReq);
throw new AbortError('Connection reset intentionally by rule');
}
validateCustomHeaders(serverHeaders, modifiedRes?.headers);
serverStatusCode = modifiedRes?.statusCode ||
modifiedRes?.status ||
serverStatusCode;
serverStatusMessage = modifiedRes?.statusMessage ||
serverStatusMessage;
serverHeaders = modifiedRes?.headers || serverHeaders;
resBodyOverride = await (0, passthrough_handling_1.buildOverriddenBody)(modifiedRes, serverHeaders);
if (resBodyOverride) {
serverHeaders['content-length'] = (0, passthrough_handling_1.getContentLengthAfterModification)(resBodyOverride, serverRes.headers, modifiedRes?.headers, method === 'HEAD' // HEAD responses are allowed mismatched content-length
);
}
else {
// If you don't specify a body override, we need to use the real
// body anyway, because as we've read it already streaming it to
// the response won't work
resBodyOverride = body;
}
serverRawHeaders = (0, header_utils_1.objectHeadersToRaw)(serverHeaders);
}
(0, request_utils_1.writeHead)(clientRes, serverStatusCode, serverStatusMessage, serverRawHeaders
.filter(([key, value]) => {
if (key === ':status')
return false;
if (!(0, request_utils_1.validateHeader)(key, value)) {
console.warn(`Not forwarding invalid header: "${key}: ${value}"`);
// Nothing else we can do in this case regardless - setHeaders will
// throw within Node if we try to set this value.
return false;
}
return true;
}));
if (resBodyOverride) {
// Return the override data to the client:
clientRes.end(resBodyOverride);
// Dump the real response data:
serverRes.resume();
resolve();
}
else {
serverRes.pipe(clientRes);
serverRes.once('end', resolve);
}
})().catch(reject));
serverReq.once('socket', (socket) => {
// This event can fire multiple times for keep-alive sockets, which are used to
// make multiple requests. If/when that happens, we don't need more event listeners.
if (this.outgoingSockets.has(socket))
return;
// Add this port to our list of active ports, once it's connected (before then it has no port)
if (socket.connecting) {
socket.once('connect', () => {
this.outgoingSockets.add(socket);
});
}
else if (socket.localPort !== undefined) {
this.outgoingSockets.add(socket);
}
// Remove this port from our list of active ports when it's closed
// This is called for both clean closes & errors.
socket.once('close', () => this.outgoingSockets.delete(socket));
});
if (reqBodyOverride) {
clientReqBody.resume(); // Dump any remaining real request body
if (reqBodyOverride.length > 0)
serverReq.end(reqBodyOverride);
else
serverReq.end(); // http2-wrapper fails given an empty buffer for methods that aren't allowed a body
}
else {
// asStream includes all content, including the body before this call
clientReqBody.pipe(serverReq);
clientReqBody.on('error', () => serverReq.abort());
}
// If the downstream connection aborts, before the response has been completed,
// we also abort the upstream connection. Important to avoid unnecessary connections,
// and to correctly proxy client connection behaviour to the upstream server.
function abortUpstream() {
serverReq.abort();
}
// Handle the case where the downstream connection is prematurely closed before
// fully sending the request or receiving the response.
clientReq.on('aborted', abortUpstream);
clientRes.on('close', abortUpstream);
// Disable the upstream request abort handlers once the response has been received.
clientRes.once('finish', () => {
clientReq.off('aborted', abortUpstream);
clientRes.off('close', abortUpstream);
});
serverReq.on('error', (e) => {
e.causedByUpstreamError = true;
reject(e);
});
// We always start upstream connections *immediately*. This might be less efficient, but it
// ensures that we're accurately mirroring downstream, which has indeed already connected.
serverReq.flushHeaders();
// For similar reasons, we don't want any buffering on outgoing data at all if possible:
serverReq.setNoDelay(true);
})().catch(reject)).catch((e) => {
// All errors anywhere above (thrown or from explicit reject()) should end up here.
// We tag the response with the error code, for debugging from events:
clientRes.tags.push('passthrough-error:' + e.code);
// Tag responses, so programmatic examination can react to this
// event, without having to parse response data or similar.
const tlsAlertMatch = /SSL alert number (\d+)/.exec(e.message ?? '');
if (tlsAlertMatch) {
clientRes.tags.push('passthrough-tls-error:ssl-alert-' + tlsAlertMatch[1]);
}
if (e.causedByUpstreamError && !serverReq?.aborted) {
if (e.code === 'ECONNRESET' || e.code === 'ECONNREFUSED' || this.simulateConnectionErrors) {
// The upstream socket failed: forcibly break the downstream stream to match. This could
// happen due to a reset, TLS or DNS failures, or anything - but critically it's a
// connection-level issue, so we try to create connection issues downstream.
(0, socket_util_1.resetOrDestroy)(clientReq);
throw new AbortError(`Upstream connection error: ${e.message ?? e}`, e.code);
}
else {
e.statusCode = 502;
e.statusMessage = 'Error communicating with upstream server';
throw e;
}
}
else {
throw e;
}
});
}
/**
* @internal
*/
static deserialize(data, channel, ruleParams) {
let beforeRequest;
if (data.hasBeforeRequestCallback) {
beforeRequest = async (req) => {
const result = (0, body_serialization_1.withDeserializedCallbackBuffers)(await channel.request('beforeRequest', {
args: [(0, body_serialization_1.withSerializedBodyReader)(req)]
}));
if (result.response && typeof result.response !== 'string') {
result.response = (0, body_serialization_1.withDeserializedCallbackBuffers)(result.response);
}
return result;
};
}
let beforeResponse;
if (data.hasBeforeResponseCallback) {
beforeResponse = async (res) => {
const callbackResult = await channel.request('beforeResponse', {
args: [(0, body_serialization_1.withSerializedBodyReader)(res)]
});
if (callbackResult && typeof callbackResult !== 'string') {
return (0, body_serialization_1.withDeserializedCallbackBuffers)(callbackResult);
}
else {
return callbackResult;
}
};
}
return new PassThroughHandler({
beforeRequest,
beforeResponse,
proxyConfig: (0, serialization_1.deserializeProxyConfig)(data.proxyConfig, channel, ruleParams),
transformRequest: data.transformRequest ? {
...data.transformRequest,
...(data.transformRequest?.replaceBody !== undefined ? {
replaceBody: (0, serialization_1.deserializeBuffer)(data.transformRequest.replaceBody)
} : {}),
...(data.transformRequest?.updateHeaders !== undefined ? {
updateHeaders: mapOmitToUndefined(JSON.parse(data.transformRequest.updateHeaders))
} : {}),
...(data.transformRequest?.updateJsonBody !== undefined ? {
updateJsonBody: mapOmitToUndefined(JSON.parse(data.transformRequest.updateJsonBody))
} : {}),
...(data.transformRequest?.matchReplaceBody !== undefined ? {
matchReplaceBody: data.transformRequest.matchReplaceBody.map(([match, result]) => [
!_.isString(match) && 'regexSource' in match
? new RegExp(match.regexSource, match.flags)
: match,
result
])
} : {})
} : undefined,
transformResponse: data.transformResponse ? {
...data.transformResponse,
...(data.transformResponse?.replaceBody !== undefined ? {
replaceBody: (0, serialization_1.deserializeBuffer)(data.transformResponse.replaceBody)
} : {}),
...(data.transformResponse?.updateHeaders !== undefined ? {
updateHeaders: mapOmitToUndefined(JSON.parse(data.transformResponse.updateHeaders))
} : {}),
...(data.transformResponse?.updateJsonBody !== undefined ? {
updateJsonBody: mapOmitToUndefined(JSON.parse(data.transformResponse.updateJsonBody))
} : {}),
...(data.transformResponse?.matchReplaceBody !== undefined ? {
matchReplaceBody: data.transformResponse.matchReplaceBody.map(([match, result]) => [
!_.isString(match) && 'regexSource' in match
? new RegExp(match.regexSource, match.flags)
: match,
result
])
} : {})
} : undefined,
// Backward compat for old clients:
...data.forwardToLocation ? {
forwarding: { targetHost: data.forwardToLocation }
} : {},
forwarding: data.forwarding,
lookupOptions: data.lookupOptions,
simulateConnectionErrors: !!data.simulateConnectionErrors,
ignoreHostHttpsErrors: data.ignoreHostCertificateErrors,
trustAdditionalCAs: data.extraCACertificates,
clientCertificateHostMap: _.mapValues(data.clientCertificateHostMap, ({ pfx, passphrase }) => ({ pfx: (0, serialization_1.deserializeBuffer)(pfx), passphrase })),
});
}
}
exports.PassThroughHandler = PassThroughHandler;
class CloseConnectionHandler extends request_handler_definitions_1.CloseConnectionHandlerDefinition {
async handle(request) {
const socket = request.socket;
socket.end();
throw new AbortError('Connection closed intentionally by rule');
}
}
exports.CloseConnectionHandler = CloseConnectionHandler;
class ResetConnectionHandler extends request_handler_definitions_1.ResetConnectionHandlerDefinition {
constructor() {
super();
(0, socket_util_1.requireSocketResetSupport)();
}
async handle(request) {
(0, socket_util_1.requireSocketResetSupport)();
(0, socket_util_1.resetOrDestroy)(request);
throw new AbortError('Connection reset intentionally by rule');
}
/**
* @internal
*/
static deserialize() {
(0, socket_util_1.requireSocketResetSupport)();
return new ResetConnectionHandler();
}
}
exports.ResetConnectionHandler = ResetConnectionHandler;
class TimeoutHandler extends request_handler_definitions_1.TimeoutHandlerDefinition {
async handle() {
// Do nothing, leaving the socket open but never sending a response.
return new Promise(() => { });
}
}
exports.TimeoutHandler = TimeoutHandler;
class JsonRpcResponseHandler extends request_handler_definitions_1.JsonRpcResponseHandlerDefinition {
async handle(request, response) {
const data = await request.body.asJson()
.catch(() => { }); // Handle parsing errors with the check below
if (!data || data.jsonrpc !== '2.0' || !('id' in data)) { // N.B. id can be null
throw new Error("Can't send a JSON-RPC response to an invalid JSON-RPC request");
}
response.writeHead(200, {
'content-type': 'application/json'
});
response.end(JSON.stringify({
jsonrpc: '2.0',
id: data.id,
...this.result
}));
}
}
exports.JsonRpcResponseHandler = JsonRpcResponseHandler;
exports.HandlerLookup = {
'simple': SimpleHandler,
'callback': CallbackHandler,
'stream': StreamHandler,
'file': FileHandler,
'passthrough': PassThroughHandler,
'close-connection': CloseConnectionHandler,
'reset-connection': ResetConnectionHandler,
'timeout': TimeoutHandler,
'json-rpc-response': JsonRpcResponseHandler
};
//# sourceMappingURL=request-handlers.js.map