mockttp
Version:
Mock HTTP server for testing HTTP clients and stubbing webservices
907 lines (905 loc) • 56.4 kB
JavaScript
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.StepLookup = exports.DelayStepImpl = exports.JsonRpcResponseStepImpl = exports.TimeoutStepImpl = exports.ResetConnectionStepImpl = exports.CloseConnectionStepImpl = exports.PassThroughStepImpl = exports.FileStepImpl = exports.StreamStepImpl = exports.CallbackStepImpl = exports.FixedResponseStepImpl = exports.AbortError = void 0;
const buffer_1 = require("buffer");
const url = require("url");
const http = require("http");
const https = require("https");
const _ = require("lodash");
const fs = require("fs/promises");
const h2Client = require("http2-wrapper");
const base64_arraybuffer_1 = require("base64-arraybuffer");
const stream_1 = require("stream");
const common_tags_1 = require("common-tags");
const typed_error_1 = require("typed-error");
const fast_json_patch_1 = require("fast-json-patch");
const util_1 = require("@httptoolkit/util");
const url_1 = require("../../util/url");
const request_utils_1 = require("../../util/request-utils");
const header_utils_1 = require("../../util/header-utils");
const buffer_utils_1 = require("../../util/buffer-utils");
const socket_util_1 = require("../../util/socket-util");
const match_replace_1 = require("../match-replace");
const serialization_1 = require("../../serialization/serialization");
const body_serialization_1 = require("../../serialization/body-serialization");
const rule_parameters_1 = require("../rule-parameters");
const http_agents_1 = require("../http-agents");
const passthrough_handling_1 = require("../passthrough-handling");
const request_step_definitions_1 = require("./request-step-definitions");
// An error that indicates that the step is aborting the request.
// This could be intentional, or an upstream server aborting the request.
class AbortError extends typed_error_1.TypedError {
constructor(message, code) {
super(message);
this.code = code;
}
}
exports.AbortError = AbortError;
function isSerializedBuffer(obj) {
return obj?.type === 'Buffer' && !!obj.data;
}
class FixedResponseStepImpl extends request_step_definitions_1.FixedResponseStep {
async handle(_request, response) {
if (this.headers)
(0, header_utils_1.dropDefaultHeaders)(response);
(0, request_utils_1.writeHead)(response, this.status, this.statusMessage, this.headers);
if (isSerializedBuffer(this.data)) {
this.data = buffer_1.Buffer.from(this.data);
}
if (this.trailers) {
response.addTrailers(this.trailers);
}
response.end(this.data || "");
}
}
exports.FixedResponseStepImpl = FixedResponseStepImpl;
async function writeResponseFromCallback(result, response) {
if (result.json !== undefined) {
result.headers = Object.assign(result.headers || {}, {
'Content-Type': 'application/json'
});
result.body = JSON.stringify(result.json);
delete result.json;
}
if (result.headers) {
(0, header_utils_1.dropDefaultHeaders)(response);
validateCustomHeaders({}, result.headers);
}
if (result.body && !result.rawBody) {
// RawBody takes priority if both are set (useful for backward compat) but if not then
// the body is automatically encoded to match the content-encoding header.
result.rawBody = await (0, request_utils_1.encodeBodyBuffer)(
// Separate string case mostly required due to TS type issues:
typeof result.body === 'string'
? buffer_1.Buffer.from(result.body, "utf8")
: buffer_1.Buffer.from(result.body), result.headers ?? {});
}
(0, request_utils_1.writeHead)(response, result.statusCode || 200, result.statusMessage, result.headers);
if (result.trailers)
response.addTrailers(result.trailers);
response.end(result.rawBody || "");
}
class CallbackStepImpl extends request_step_definitions_1.CallbackStep {
async handle(request, response) {
let req = await (0, request_utils_1.waitForCompletedRequest)(request);
let outResponse;
try {
outResponse = await this.callback(req);
}
catch (error) {
(0, request_utils_1.writeHead)(response, 500, 'Callback step threw an exception');
console.warn(`Callback step exception: ${error.message ?? error}`);
response.end((0, util_1.isErrorLike)(error) ? error.toString() : error);
return;
}
if (outResponse === 'close') {
request.socket.end();
throw new AbortError('Connection closed intentionally by rule', 'E_RULE_CB_CLOSE');
}
else if (outResponse === 'reset') {
(0, socket_util_1.requireSocketResetSupport)();
(0, socket_util_1.resetOrDestroy)(request);
throw new AbortError('Connection reset intentionally by rule', 'E_RULE_CB_RESET');
}
else {
await writeResponseFromCallback(outResponse, response);
}
}
/**
* @internal
*/
static deserialize({ name }, channel, options) {
const rpcCallback = async (request) => {
const callbackResult = await channel.request({ args: [await (0, body_serialization_1.withSerializedBodyReader)(request, options.bodySerializer)] });
if (typeof callbackResult === 'string') {
return callbackResult;
}
else {
return (0, body_serialization_1.withDeserializedCallbackBuffers)(callbackResult);
}
};
// Pass across the name from the real callback, for explain()
Object.defineProperty(rpcCallback, "name", { value: name });
// Call the client's callback (via stream), and save a step on our end for
// the response that comes back.
return new request_step_definitions_1.CallbackStep(rpcCallback);
}
}
exports.CallbackStepImpl = CallbackStepImpl;
class StreamStepImpl extends request_step_definitions_1.StreamStep {
async handle(_request, response) {
if (!this.stream.done) {
if (this.headers)
(0, header_utils_1.dropDefaultHeaders)(response);
(0, request_utils_1.writeHead)(response, this.status, undefined, this.headers);
response.flushHeaders();
this.stream.pipe(response);
this.stream.done = true;
this.stream.on('error', (e) => response.destroy(e));
}
else {
throw new Error((0, common_tags_1.stripIndent) `
Stream request step called more than once - this is not supported.
Streams can typically only be read once, so all subsequent requests would be empty.
To mock repeated stream requests, call 'thenStream' repeatedly with multiple streams.
(Have a better way to handle this? Open an issue at ${require('../../../package.json').bugs.url})
`);
}
}
/**
* @internal
*/
static deserialize(stepData, channel) {
const stepStream = new stream_1.Transform({
objectMode: true,
transform: function (message, encoding, callback) {
const { event, content } = message;
let deserializedEventData = content && (content.type === 'string' ? content.value :
content.type === 'buffer' ? buffer_1.Buffer.from(content.value, 'base64') :
content.type === 'arraybuffer' ? buffer_1.Buffer.from((0, base64_arraybuffer_1.decode)(content.value)) :
content.type === 'nil' && undefined);
if (event === 'data' && deserializedEventData) {
this.push(deserializedEventData);
}
else if (event === 'end') {
this.end();
}
callback();
}
});
// When we get piped (i.e. to a live request), ping upstream to start streaming, and then
// pipe the resulting data into our live stream (which is streamed to the request, like normal)
stepStream.once('resume', () => {
channel.pipe(stepStream);
channel.write({});
});
return new request_step_definitions_1.StreamStep(stepData.status, stepStream, stepData.headers);
}
}
exports.StreamStepImpl = StreamStepImpl;
class FileStepImpl extends request_step_definitions_1.FileStep {
async handle(_request, response) {
// Read the file first, to ensure we error cleanly if it's unavailable
const fileContents = await fs.readFile(this.filePath);
if (this.headers)
(0, header_utils_1.dropDefaultHeaders)(response);
(0, request_utils_1.writeHead)(response, this.status, this.statusMessage, this.headers);
response.end(fileContents);
}
}
exports.FileStepImpl = FileStepImpl;
function validateCustomHeaders(originalHeaders, modifiedHeaders, headerWhitelist = []) {
if (!modifiedHeaders)
return;
// We ignore most returned pseudo headers, so we error if you try to manually set them
const invalidHeaders = _(modifiedHeaders)
.pickBy((value, name) => name.toString().startsWith(':') &&
// We allow returning a preexisting header value - that's ignored
// silently, so that mutating & returning the provided headers is always safe.
value !== originalHeaders[name] &&
// In some cases, specific custom pseudoheaders may be allowed, e.g. requests
// can have custom :scheme and :authority headers set.
!headerWhitelist.includes(name))
.keys();
if (invalidHeaders.size() > 0) {
throw new Error(`Cannot set custom ${invalidHeaders.join(', ')} pseudoheader values`);
}
}
// Used in merging as a marker for values to omit, because lodash ignores undefineds.
const OMIT_SYMBOL = Symbol('omit-value');
// We play some games to preserve undefined values during serialization, because we differentiate them
// in some transforms from null/not-present keys.
const mapOmitToUndefined = (input) => _.mapValues(input, (v) => v === request_step_definitions_1.SERIALIZED_OMIT || v === OMIT_SYMBOL
? undefined // Replace our omit placeholders with actual undefineds
: v);
class PassThroughStepImpl extends request_step_definitions_1.PassThroughStep {
async trustedCACertificates() {
if (!this.extraCACertificates.length)
return undefined;
if (!this._trustedCACertificates) {
this._trustedCACertificates = (0, passthrough_handling_1.getTrustedCAs)(undefined, this.extraCACertificates);
}
return this._trustedCACertificates;
}
async handle(clientReq, clientRes, options) {
// Don't let Node add any default standard headers - we want full control
(0, header_utils_1.dropDefaultHeaders)(clientRes);
// Capture raw request data:
let { method, url: reqUrl, rawHeaders, destination } = clientReq;
let { protocol, pathname, search: query } = url.parse(reqUrl);
const clientSocket = clientReq.socket;
// Actual IP address or hostname
let hostAddress = destination.hostname;
// Same as hostAddress, unless it's an IP, in which case it's our best guess of the
// functional 'name' for the host (from Host header or SNI).
let hostname = (0, passthrough_handling_1.getEffectiveHostname)(hostAddress, clientSocket, rawHeaders);
let port = destination.port.toString();
// Check if this request is a request loop:
if ((0, socket_util_1.isSocketLoop)(this.outgoingSockets, clientSocket)) {
throw new Error((0, common_tags_1.oneLine) `
Passthrough loop detected. This probably means you're sending a request directly
to a passthrough endpoint, which is forwarding it to the target URL, which is a
passthrough endpoint, which is forwarding it to the target URL, which is a
passthrough endpoint...` +
'\n\n' + (0, common_tags_1.oneLine) `
You should either explicitly mock a response for this URL (${reqUrl}), or use
the server as a proxy, instead of making requests to it directly.
`);
}
// We have to capture the request stream immediately, to make sure nothing is lost if it
// goes past its max length (truncating the data) before we start sending upstream.
const clientReqBody = clientReq.body.asStream();
const isH2Downstream = (0, request_utils_1.isHttp2)(clientReq);
hostAddress = await (0, passthrough_handling_1.getClientRelativeHostname)(hostAddress, clientReq.remoteIpAddress, (0, passthrough_handling_1.getDnsLookupFunction)(this.lookupOptions));
// Override the request details, if a transform or callback is specified:
let reqBodyOverride;
if (this.transformRequest) {
const { replaceMethod, updateHeaders, replaceHeaders, replaceBody, replaceBodyFromFile, updateJsonBody, patchJsonBody, matchReplaceBody } = this.transformRequest;
const originalHostname = hostname;
({
reqUrl,
protocol,
hostname,
port,
pathname,
query,
rawHeaders
} = (0, passthrough_handling_1.applyDestinationTransforms)(this.transformRequest, {
isH2Downstream,
rawHeaders,
port,
protocol,
hostname,
pathname,
query
}));
// If you modify the hostname, we also treat that as modifying the
// resulting destination in turn:
if (hostname !== originalHostname) {
hostAddress = hostname;
}
if (replaceMethod) {
method = replaceMethod;
}
if (updateHeaders) {
rawHeaders = (0, header_utils_1.updateRawHeaders)(rawHeaders, updateHeaders);
}
else if (replaceHeaders) {
rawHeaders = (0, header_utils_1.objectHeadersToRaw)(replaceHeaders);
}
if (replaceBody) {
// Note that we're replacing the body without actually waiting for the real one, so
// this can result in sending a request much more quickly!
reqBodyOverride = (0, buffer_utils_1.asBuffer)(replaceBody);
}
else if (replaceBodyFromFile) {
reqBodyOverride = await fs.readFile(replaceBodyFromFile);
}
else if (updateJsonBody) {
const { body: realBody } = await (0, request_utils_1.waitForCompletedRequest)(clientReq);
const jsonBody = await realBody.getJson();
if (jsonBody === undefined) {
throw new Error("Can't update JSON in non-JSON request body");
}
const updatedBody = _.mergeWith(jsonBody, updateJsonBody, (_oldValue, newValue) => {
// We want to remove values with undefines, but Lodash ignores
// undefined return values here. Fortunately, JSON.stringify
// ignores Symbols, omitting them from the result.
if (newValue === undefined)
return OMIT_SYMBOL;
});
reqBodyOverride = (0, buffer_utils_1.asBuffer)(JSON.stringify(updatedBody));
}
else if (patchJsonBody) {
const { body: realBody } = await (0, request_utils_1.waitForCompletedRequest)(clientReq);
const jsonBody = await realBody.getJson();
if (jsonBody === undefined) {
throw new Error("Can't patch JSON in non-JSON request body");
}
(0, fast_json_patch_1.applyPatch)(jsonBody, patchJsonBody, true); // Mutates the JSON body returned above
reqBodyOverride = (0, buffer_utils_1.asBuffer)(JSON.stringify(jsonBody));
}
else if (matchReplaceBody) {
const { body: realBody } = await (0, request_utils_1.waitForCompletedRequest)(clientReq);
const originalBody = await realBody.getText();
if (originalBody === undefined) {
throw new Error("Can't match & replace non-decodeable request body");
}
const replacedBody = (0, match_replace_1.applyMatchReplace)(originalBody, matchReplaceBody);
if (replacedBody !== originalBody) {
reqBodyOverride = (0, buffer_utils_1.asBuffer)(replacedBody);
}
}
if (reqBodyOverride) { // Can't check framing without body changes, since we won't have the body yet
// We always re-encode the body to match the resulting content-encoding header:
reqBodyOverride = await (0, request_utils_1.encodeBodyBuffer)(reqBodyOverride, rawHeaders);
const updatedCLHeader = (0, passthrough_handling_1.getRequestContentLengthAfterModification)(reqBodyOverride, clientReq.headers, (updateHeaders && ((0, header_utils_1.getHeaderValue)(updateHeaders, 'content-length') !== undefined ||
(0, header_utils_1.getHeaderValue)(updateHeaders, 'transfer-encoding')?.includes('chunked')))
? rawHeaders // Iff you replaced the relevant headers
: replaceHeaders, { httpVersion: isH2Downstream ? 2 : 1 });
if (updatedCLHeader !== undefined) {
rawHeaders = (0, header_utils_1.updateRawHeaders)(rawHeaders, {
'content-length': updatedCLHeader
});
}
}
}
else if (this.beforeRequest) {
const clientRawHeaders = rawHeaders;
const clientHeaders = (0, header_utils_1.rawHeadersToObject)(clientRawHeaders);
const completedRequest = await (0, request_utils_1.waitForCompletedRequest)(clientReq);
const modifiedReq = await this.beforeRequest({
...completedRequest,
url: reqUrl, // May have been overwritten by forwarding
headers: _.cloneDeep(clientHeaders),
rawHeaders: _.cloneDeep(clientRawHeaders)
});
if (modifiedReq?.response) {
if (modifiedReq.response === 'close') {
clientSocket.end();
throw new AbortError('Connection closed intentionally by rule', 'E_RULE_BREQ_CLOSE');
}
else if (modifiedReq.response === 'reset') {
(0, socket_util_1.requireSocketResetSupport)();
(0, socket_util_1.resetOrDestroy)(clientReq);
throw new AbortError('Connection reset intentionally by rule', 'E_RULE_BREQ_RESET');
}
else {
// The callback has provided a full response: don't passthrough at all, just use it.
await writeResponseFromCallback(modifiedReq.response, clientRes);
return;
}
}
method = modifiedReq?.method || method;
// Reparse the new URL, if necessary
if (modifiedReq?.url) {
if (!(0, url_1.isAbsoluteUrl)(modifiedReq?.url))
throw new Error("Overridden request URLs must be absolute");
reqUrl = modifiedReq.url;
const parsedUrl = url.parse(reqUrl);
({ protocol, port, pathname, search: query } = parsedUrl);
hostname = parsedUrl.hostname;
hostAddress = hostname;
}
let headers = modifiedReq?.headers || clientHeaders;
// We need to make sure the Host/:authority header is updated correctly - following the user's returned value if
// they provided one, but updating it if not to match the effective target URL of the request:
Object.assign(headers, isH2Downstream
? (0, passthrough_handling_1.getH2HeadersAfterModification)(reqUrl, clientHeaders, modifiedReq?.headers)
: { 'host': (0, passthrough_handling_1.getHostAfterModification)(reqUrl, clientHeaders, modifiedReq?.headers) });
validateCustomHeaders(clientHeaders, modifiedReq?.headers, passthrough_handling_1.MODIFIABLE_PSEUDOHEADERS // These are handled by getH2HeadersAfterModification above
);
reqBodyOverride = await (0, passthrough_handling_1.buildOverriddenBody)(modifiedReq, headers);
if (reqBodyOverride || modifiedReq?.headers) {
// Automatically match the content-length to the body:
const updatedCLHeader = (0, passthrough_handling_1.getRequestContentLengthAfterModification)(reqBodyOverride || completedRequest.body.buffer, clientHeaders, modifiedReq?.headers, { httpVersion: isH2Downstream ? 2 : 1 });
if (updatedCLHeader !== undefined) {
headers['content-length'] = updatedCLHeader;
}
}
rawHeaders = (0, header_utils_1.objectHeadersToRaw)(headers);
}
const effectivePort = (0, url_1.getEffectivePort)({ protocol, port });
const trustedCAs = await this.trustedCACertificates();
// We only do H2 upstream for HTTPS. Http2-wrapper doesn't support H2C, it's rarely used
// and we can't use ALPN to detect HTTP/2 support cleanly.
let shouldTryH2Upstream = isH2Downstream && protocol === 'https:';
let family;
if (hostname === 'localhost') {
// Annoying special case: some localhost servers listen only on either ipv4 or ipv6.
// Very specific situation, but a very common one for development use.
// We need to work out which one family is, as Node sometimes makes bad choices.
if (await (0, socket_util_1.isLocalPortActive)('::1', effectivePort))
family = 6;
else
family = 4;
}
// Remote clients might configure a passthrough rule with a parameter reference for the proxy,
// delegating proxy config to the admin server. That's fine initially, but you can't actually
// handle a request in that case - make sure our proxyConfig is always dereferenced before use.
const proxySettingSource = (0, rule_parameters_1.assertParamDereferenced)(this.proxyConfig);
// Mirror the keep-alive-ness of the incoming request in our outgoing request
const agent = await (0, http_agents_1.getAgent)({
protocol: (protocol || undefined),
hostname: hostname,
port: effectivePort,
tryHttp2: shouldTryH2Upstream,
keepAlive: (0, request_utils_1.shouldKeepAlive)(clientReq),
proxySettingSource
});
if (agent && !('http2' in agent)) {
// I.e. only use HTTP/2 if we're using an HTTP/2-compatible agent
shouldTryH2Upstream = false;
}
let makeRequest = (shouldTryH2Upstream
? (options, cb) => h2Client.auto(options, cb).catch((e) => {
// If an error occurs during auto detection via ALPN, that's an
// TypeError implies it's an invalid HTTP/2 request that was rejected.
// Anything else implies an upstream HTTP/2 issue.
e.causedByUpstreamError = !(e instanceof TypeError);
throw e;
})
// HTTP/1 + TLS
: protocol === 'https:'
? https.request
// HTTP/1 plaintext:
: http.request);
if (isH2Downstream && shouldTryH2Upstream) {
// We drop all incoming pseudoheaders, and regenerate them (except legally modified ones)
rawHeaders = rawHeaders.filter(([key]) => !key.toString().startsWith(':') ||
passthrough_handling_1.MODIFIABLE_PSEUDOHEADERS.includes(key.toLowerCase()));
}
else if (isH2Downstream && !shouldTryH2Upstream) {
rawHeaders = (0, header_utils_1.h2HeadersToH1)(rawHeaders, method);
}
let serverReq;
return new Promise((resolve, reject) => (async () => {
serverReq = await makeRequest({
protocol,
method,
hostname: hostAddress,
port,
family,
path: `${pathname || '/'}${query || ''}`,
headers: shouldTryH2Upstream
? (0, header_utils_1.rawHeadersToObjectPreservingCase)(rawHeaders)
: (0, header_utils_1.flattenPairedRawHeaders)(rawHeaders),
setDefaultHeaders: shouldTryH2Upstream, // For now, we need this for unexpected H2->H1 header fallback
lookup: (0, passthrough_handling_1.getDnsLookupFunction)(this.lookupOptions),
// ^ Cast required to handle __promisify__ type hack in the official Node types
agent,
// TLS options:
...(0, passthrough_handling_1.getUpstreamTlsOptions)({
hostname,
port: effectivePort,
ignoreHostHttpsErrors: this.ignoreHostHttpsErrors,
clientCertificateHostMap: this.clientCertificateHostMap,
trustedCAs
})
}, (serverRes) => (async () => {
serverRes.on('error', (e) => {
reportUpstreamAbort(e);
reject(e);
});
// Forward server trailers, if we receive any:
serverRes.on('end', () => {
if (!serverRes.rawTrailers?.length)
return;
const trailersToForward = (0, header_utils_1.pairFlatRawHeaders)(serverRes.rawTrailers)
.filter(([key, value]) => {
if (!(0, header_utils_1.validateHeader)(key, value)) {
console.warn(`Not forwarding invalid trailer: "${key}: ${value}"`);
// Nothing else we can do in this case regardless - setHeaders will
// throw within Node if we try to set this value.
return false;
}
return true;
});
try {
clientRes.addTrailers((0, request_utils_1.isHttp2)(clientReq)
// HTTP/2 compat doesn't support raw headers here (yet)
? (0, header_utils_1.rawHeadersToObjectPreservingCase)(trailersToForward)
: trailersToForward);
}
catch (e) {
console.warn(`Failed to forward response trailers: ${e}`);
}
});
let serverStatusCode = serverRes.statusCode;
let serverStatusMessage = serverRes.statusMessage;
let serverRawHeaders = (0, header_utils_1.pairFlatRawHeaders)(serverRes.rawHeaders);
// This is only set if we need to read the body here, for a callback or similar. If so,
// we keep the buffer in case we need it afterwards (if the cb doesn't replace it).
let originalBody;
// This is set when we override the body data. Note that this doesn't mean we actually
// read & buffered the original data! With a fixed replacement body we can skip that.
let resBodyOverride;
if (options.emitEventCallback) {
options.emitEventCallback('passthrough-response-head', {
statusCode: serverStatusCode,
statusMessage: serverStatusMessage,
httpVersion: serverRes.httpVersion,
rawHeaders: serverRawHeaders
});
}
if (isH2Downstream) {
serverRawHeaders = (0, header_utils_1.h1HeadersToH2)(serverRawHeaders);
}
if (this.transformResponse) {
const { replaceStatus, updateHeaders, replaceHeaders, replaceBody, replaceBodyFromFile, updateJsonBody, patchJsonBody, matchReplaceBody } = this.transformResponse;
if (replaceStatus) {
serverStatusCode = replaceStatus;
serverStatusMessage = undefined; // Reset to default
}
if (updateHeaders) {
serverRawHeaders = (0, header_utils_1.updateRawHeaders)(serverRawHeaders, updateHeaders);
}
else if (replaceHeaders) {
serverRawHeaders = (0, header_utils_1.objectHeadersToRaw)(replaceHeaders);
}
if (replaceBody) {
// Note that we're replacing the body without actually waiting for the real one, so
// this can result in sending a request much more quickly!
resBodyOverride = (0, buffer_utils_1.asBuffer)(replaceBody);
}
else if (replaceBodyFromFile) {
resBodyOverride = await fs.readFile(replaceBodyFromFile);
}
else if (updateJsonBody) {
originalBody = await (0, buffer_utils_1.streamToBuffer)(serverRes);
const realBody = (0, request_utils_1.buildBodyReader)(originalBody, serverRes.headers);
const jsonBody = await realBody.getJson();
if (jsonBody === undefined) {
throw new Error("Can't update JSON in non-JSON response body");
}
const updatedBody = _.mergeWith(jsonBody, updateJsonBody, (_oldValue, newValue) => {
// We want to remove values with undefines, but Lodash ignores
// undefined return values here. Fortunately, JSON.stringify
// ignores Symbols, omitting them from the result.
if (newValue === undefined)
return OMIT_SYMBOL;
});
resBodyOverride = (0, buffer_utils_1.asBuffer)(JSON.stringify(updatedBody));
}
else if (patchJsonBody) {
originalBody = await (0, buffer_utils_1.streamToBuffer)(serverRes);
const realBody = (0, request_utils_1.buildBodyReader)(originalBody, serverRes.headers);
const jsonBody = await realBody.getJson();
if (jsonBody === undefined) {
throw new Error("Can't patch JSON in non-JSON response body");
}
(0, fast_json_patch_1.applyPatch)(jsonBody, patchJsonBody, true); // Mutates the JSON body returned above
resBodyOverride = (0, buffer_utils_1.asBuffer)(JSON.stringify(jsonBody));
}
else if (matchReplaceBody) {
originalBody = await (0, buffer_utils_1.streamToBuffer)(serverRes);
const realBody = (0, request_utils_1.buildBodyReader)(originalBody, serverRes.headers);
const originalBodyText = await realBody.getText();
if (originalBodyText === undefined) {
throw new Error("Can't match & replace non-decodeable response body");
}
let replacedBody = originalBodyText;
for (let [match, result] of matchReplaceBody) {
replacedBody = replacedBody.replace(match, result);
}
if (replacedBody !== originalBodyText) {
resBodyOverride = (0, buffer_utils_1.asBuffer)(replacedBody);
}
}
if (resBodyOverride) { // Can't check framing without body changes, since we won't have the body yet
// In the above cases, the overriding data is assumed to always be in decoded form,
// so we re-encode the body to match the resulting content-encoding header:
resBodyOverride = await (0, request_utils_1.encodeBodyBuffer)(resBodyOverride, serverRawHeaders);
const updatedCLHeader = (0, passthrough_handling_1.getResponseContentLengthAfterModification)(resBodyOverride, serverRes.headers, (updateHeaders && (0, header_utils_1.getHeaderValue)(updateHeaders, 'content-length') !== undefined)
? serverRawHeaders // Iff you replaced the content length
: replaceHeaders, { httpMethod: method, httpVersion: serverRes.httpVersion.startsWith('1.') ? 1 : 2 });
if (updatedCLHeader !== undefined) {
serverRawHeaders = (0, header_utils_1.updateRawHeaders)(serverRawHeaders, {
'content-length': updatedCLHeader
});
}
}
}
else if (this.beforeResponse) {
let modifiedRes;
originalBody = await (0, buffer_utils_1.streamToBuffer)(serverRes);
let serverHeaders = (0, header_utils_1.rawHeadersToObject)(serverRawHeaders);
let reqHeader = (0, header_utils_1.rawHeadersToObjectPreservingCase)(rawHeaders);
modifiedRes = await this.beforeResponse({
id: clientReq.id,
statusCode: serverStatusCode,
statusMessage: serverRes.statusMessage,
headers: serverHeaders,
rawHeaders: _.cloneDeep(serverRawHeaders),
body: (0, request_utils_1.buildBodyReader)(originalBody, serverHeaders)
}, {
id: clientReq.id,
protocol: protocol?.replace(':', '') ?? '',
method: method,
httpVersion: serverRes.httpVersion,
url: reqUrl,
destination: {
hostname: hostname || 'localhost',
port: effectivePort
},
path: `${pathname || '/'}${query || ''}`,
headers: reqHeader,
rawHeaders: rawHeaders,
timingEvents: clientReq.timingEvents,
tags: clientReq.tags,
body: (0, request_utils_1.buildBodyReader)(reqBodyOverride ? buffer_1.Buffer.from(reqBodyOverride.buffer) : await clientReq.body.asDecodedBuffer(), reqHeader),
rawTrailers: clientReq.rawTrailers ?? [],
trailers: (0, header_utils_1.rawHeadersToObject)(clientReq.rawTrailers ?? []),
});
if (modifiedRes === 'close' || modifiedRes === 'reset') {
// If you kill the connection, we need to fire an upstream event separately here, since
// this means the body won't be delivered in normal response events.
if (options.emitEventCallback) {
options.emitEventCallback('passthrough-response-body', {
overridden: true,
rawBody: originalBody
});
}
if (modifiedRes === 'close') {
clientSocket.end();
}
else if (modifiedRes === 'reset') {
(0, socket_util_1.requireSocketResetSupport)();
(0, socket_util_1.resetOrDestroy)(clientReq);
}
throw new AbortError(`Connection ${modifiedRes === 'close' ? 'closed' : 'reset'} intentionally by rule`, `E_RULE_BRES_${modifiedRes.toUpperCase()}`);
}
validateCustomHeaders(serverHeaders, modifiedRes?.headers);
serverStatusCode = modifiedRes?.statusCode ||
serverStatusCode;
serverStatusMessage = modifiedRes?.statusMessage ||
serverStatusMessage;
serverHeaders = modifiedRes?.headers || serverHeaders;
resBodyOverride = await (0, passthrough_handling_1.buildOverriddenBody)(modifiedRes, serverHeaders);
if (resBodyOverride || modifiedRes?.headers) {
const updatedContentLength = (0, passthrough_handling_1.getResponseContentLengthAfterModification)(resBodyOverride || originalBody, serverRes.headers, modifiedRes?.headers, {
httpMethod: method,
httpVersion: serverRes.httpVersion.startsWith('1.') ? 1 : 2
});
if (updatedContentLength !== undefined) {
serverHeaders['content-length'] = updatedContentLength;
}
}
serverRawHeaders = (0, header_utils_1.objectHeadersToRaw)(serverHeaders);
}
(0, request_utils_1.writeHead)(clientRes, serverStatusCode, serverStatusMessage, serverRawHeaders
.filter(([key, value]) => {
if (key === ':status')
return false;
if (!(0, header_utils_1.validateHeader)(key, value)) {
console.warn(`Not forwarding invalid header: "${key}: ${value}"`);
// Nothing else we can do in this case regardless - setHeaders will
// throw within Node if we try to set this value.
return false;
}
return true;
}));
if (resBodyOverride) {
// Return the override data to the client:
clientRes.end(resBodyOverride);
// Dump the real response data, in case that body wasn't read yet:
serverRes.resume();
resolve();
}
else if (originalBody) {
// If the original body was read, and not overridden, then send it
// onward directly:
clientRes.end(originalBody);
resolve();
}
else {
// Otherwise the body hasn't been read - stream it live:
serverRes.pipe(clientRes);
serverRes.once('end', resolve);
}
if (options.emitEventCallback) {
if (!!resBodyOverride) {
(originalBody
? Promise.resolve(originalBody)
: (0, buffer_utils_1.streamToBuffer)(serverRes)).then((upstreamBody) => {
options.emitEventCallback('passthrough-response-body', {
overridden: true,
rawBody: upstreamBody
});
}).catch((e) => reportUpstreamAbort(e));
}
else {
options.emitEventCallback('passthrough-response-body', {
overridden: false
// We don't bother buffering & re-sending the body if
// it's the same as the one being sent to the client.
});
}
}
})().catch(reject));
serverReq.once('socket', (socket) => {
// This event can fire multiple times for keep-alive sockets, which are used to
// make multiple requests. If/when that happens, we don't need more event listeners.
if (this.outgoingSockets.has(socket))
return;
// Add this port to our list of active ports, once it's connected (before then it has no port)
if (socket.connecting) {
socket.once('connect', () => {
this.outgoingSockets.add(socket);
});
}
else if (socket.localPort !== undefined) {
this.outgoingSockets.add(socket);
}
// Remove this port from our list of active ports when it's closed
// This is called for both clean closes & errors.
socket.once('close', () => this.outgoingSockets.delete(socket));
});
// Forward any request trailers received from the client:
const forwardTrailers = () => {
if (clientReq.rawTrailers?.length) {
if (serverReq.addTrailers) {
serverReq.addTrailers(clientReq.rawTrailers);
}
else {
// See https://github.com/szmarczak/http2-wrapper/issues/103
console.warn('Not forwarding request trailers - not yet supported for HTTP/2');
}
}
};
// This has to be above the pipe setup below, or we end the stream before adding the
// trailers, and they're lost.
if (clientReqBody.readableEnded) {
forwardTrailers();
}
else {
clientReqBody.once('end', forwardTrailers);
}
// Forward the request body to the upstream server:
if (reqBodyOverride) {
clientReqBody.resume(); // Dump any remaining real request body
if (reqBodyOverride.length > 0)
serverReq.end(reqBodyOverride);
else
serverReq.end(); // http2-wrapper fails given an empty buffer for methods that aren't allowed a body
}
else {
// asStream includes all content, including the body before this call
clientReqBody.pipe(serverReq);
clientReqBody.on('error', () => serverReq.abort());
}
// If the downstream connection aborts, before the response has been completed,
// we also abort the upstream connection. Important to avoid unnecessary connections,
// and to correctly proxy client connection behaviour to the upstream server.
function abortUpstream() {
serverReq.abort();
}
// If the upstream fails, for any reason, we need to fire an event to any rule
// listeners who might be present (although only the first time)
let reportedUpstreamError = false;
function reportUpstreamAbort(e) {
e.causedByUpstreamError = true;
if (!options.emitEventCallback)
return;
if (reportedUpstreamError)
return;
reportedUpstreamError = true;
options.emitEventCallback('passthrough-abort', {
downstreamAborted: !!(serverReq?.aborted),
tags: [
...clientReq.tags,
(0, passthrough_handling_1.buildUpstreamErrorTags)(e)
],
error: {
name: e.name,
code: e.code,
message: e.message,
stack: e.stack
}
});
}
// Handle the case where the downstream connection is prematurely closed before
// fully sending the request or receiving the response.
clientReq.on('aborted', abortUpstream);
clientRes.on('close', abortUpstream);
// Disable the upstream request abort handlers once the response has been received.
clientRes.once('finish', () => {
clientReq.off('aborted', abortUpstream);
clientRes.off('close', abortUpstream);
});
serverReq.on('error', (e) => {
reportUpstreamAbort(e);
reject(e);
});
// We always start upstream connections *immediately*. This might be less efficient, but it
// ensures that we're accurately mirroring downstream, which has indeed already connected.
serverReq.flushHeaders();
// For similar reasons, we don't want any buffering on outgoing data at all if possible:
serverReq.setNoDelay(true);
// Fire rule events, to allow in-depth debugging of upstream traffic & modifications,
// so anybody interested can see _exactly_ what we're sending upstream here:
if (options.emitEventCallback) {
options.emitEventCallback('passthrough-request-head', {
method,
protocol: protocol.replace(/:$/, ''),
hostname,
port,
path: `${pathname || '/'}${query || ''}`,
rawHeaders
});
if (!!reqBodyOverride) {
options.emitEventCallback('passthrough-request-body', {
overridden: true,
rawBody: reqBodyOverride
});
}
else {
options.emitEventCallback('passthrough-request-body', {
overridden: false
});
}
}
})().catch(reject)).catch((e) => {
clientRes.tags.push(...(0, passthrough_handling_1.buildUpstreamErrorTags)(e));
if (e.causedByUpstreamError && !serverReq?.aborted) {
if (this.simulateConnectionErrors) {
// The upstream socket failed: forcibly break the downstream stream to match. This could
// happen due to a reset, TLS or DNS failures, or anything - but critically it's a
// connection-level issue, so we try to create connection issues downstream.
(0, socket_util_1.resetOrDestroy)(clientReq);
// Aggregate errors can be thrown if multiple (IPv4/6) addresses were tested. Note that
// AggregateError only exists in Node 15+. If that happens, we need to combine errors:
const errorMessage = typeof AggregateError !== 'undefined' && (e instanceof AggregateError)
? e.errors.map(e => e.message).join(', ')
: (e.message ?? e.code ?? e);
throw new AbortError(`Upstream connection error: ${errorMessage}`, e.code || 'E_MIRRORED_FAILURE');
}
else {
e.statusCode = 502;
e.statusMessage = 'Error communicating with upstream server';
throw e;
}
}
else {
throw e;
}
});
}
/**
* @internal
*/
static deserialize(data, channel, { ruleParams, bodySerializer }) {
let beforeRequest;
if (data.hasBeforeRequestCallback) {
beforeRequest = async (req) => {
const result = (0, body_serialization_1.withDeserializedCallbackBuffers)(await channel.request('beforeRequest', {
args: [await (0, body_serialization_1.withSerializedBodyReader)(req, bodySerializer)]
}));
if (result.response && typeof result.response !== 'string') {
result.response = (0, body_serialization_1.withDeserializedCallbackBuffers)(result.response);
}
return result;
};
}
let beforeResponse;
if (data.hasBeforeResponseCallback) {
beforeResponse = async (res, req) => {
const callbackResult = await channel.request('beforeResponse', {
args: [
await (0, body_serialization_1.withSerializedBodyReader)(res, bodySerializer),
await (0, body_serialization_1.withSerializedBodyReader)(req, bodySerializer)
]
});
if (callbackResult && typeof callbackResult !== 'string') {
return (0, body_serialization_1.withDeserializedCallbackBuffers)(callbackResult);
}
else {
return callbackResult;
}
};
}
// Backward compat for old clients:
if (data.forwarding && !data.transformRequest?.replaceHost) {
const [targetHost, setProtocol] = data.forwarding.targetHost.split('://').reverse();
data.transformRequest ?? (data.transformRequest = {});
data.transformRequest.replac