mockttp
Version:
Mock HTTP server for testing HTTP clients and stubbing webservices
455 lines • 22.9 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.getDnsLookupFunction = exports.MODIFIABLE_PSEUDOHEADERS = void 0;
exports.getUpstreamTlsOptions = getUpstreamTlsOptions;
exports.getTrustedCAs = getTrustedCAs;
exports.buildOverriddenBody = buildOverriddenBody;
exports.getEffectiveHostname = getEffectiveHostname;
exports.applyDestinationTransforms = applyDestinationTransforms;
exports.getHostAfterModification = getHostAfterModification;
exports.getH2HeadersAfterModification = getH2HeadersAfterModification;
exports.getRequestContentLengthAfterModification = getRequestContentLengthAfterModification;
exports.getResponseContentLengthAfterModification = getResponseContentLengthAfterModification;
exports.getClientRelativeHostname = getClientRelativeHostname;
exports.buildUpstreamErrorTags = buildUpstreamErrorTags;
const buffer_1 = require("buffer");
const fs = require("fs/promises");
const tls = require("tls");
const url = require("url");
const _ = require("lodash");
const common_tags_1 = require("common-tags");
const cacheable_lookup_1 = require("cacheable-lookup");
const semver = require("semver");
const util_1 = require("@httptoolkit/util");
const util_2 = require("../util/util");
const buffer_utils_1 = require("../util/buffer-utils");
const ip_utils_1 = require("../util/ip-utils");
const dns_1 = require("../util/dns");
const request_utils_1 = require("../util/request-utils");
const openssl_compat_1 = require("../util/openssl-compat");
const header_utils_1 = require("../util/header-utils");
const url_1 = require("../util/url");
const socket_extensions_1 = require("../util/socket-extensions");
const request_step_impls_1 = require("./requests/request-step-impls");
const match_replace_1 = require("./match-replace");
// TLS settings for proxied connections, intended to avoid TLS fingerprint blocking
// issues so far as possible, by closely emulating a Firefox Client Hello:
const NEW_CURVES_SUPPORTED = (0, openssl_compat_1.areFFDHECurvesSupported)(process.versions.openssl);
const SSL_OP_LEGACY_SERVER_CONNECT = 1 << 2;
const SSL_OP_TLSEXT_PADDING = 1 << 4;
const SSL_OP_NO_ENCRYPT_THEN_MAC = 1 << 19;
// All settings are designed to exactly match Firefox v103, since that's a good baseline
// that seems to be widely accepted and is easy to emulate from Node.js.
function getUpstreamTlsOptions({ hostname, port, ignoreHostHttpsErrors, clientCertificateHostMap, trustedCAs }) {
const strictHttpsChecks = shouldUseStrictHttps(hostname, port, ignoreHostHttpsErrors);
const hostWithPort = `${hostname}:${port}`;
const clientCert = clientCertificateHostMap[hostWithPort] ||
clientCertificateHostMap[hostname] ||
clientCertificateHostMap['*'] ||
{};
return {
servername: hostname && !(0, ip_utils_1.isIP)(hostname)
? hostname
: undefined, // Can't send IPs in SNI
// We precisely control the various TLS parameters here to limit TLS fingerprinting issues:
ecdhCurve: [
'X25519',
'prime256v1', // N.B. Equivalent to secp256r1
'secp384r1',
'secp521r1',
...(NEW_CURVES_SUPPORTED
? [
'ffdhe2048',
'ffdhe3072'
] : [])
].join(':'),
sigalgs: [
'ecdsa_secp256r1_sha256',
'ecdsa_secp384r1_sha384',
'ecdsa_secp521r1_sha512',
'rsa_pss_rsae_sha256',
'rsa_pss_rsae_sha384',
'rsa_pss_rsae_sha512',
'rsa_pkcs1_sha256',
'rsa_pkcs1_sha384',
'rsa_pkcs1_sha512',
'ECDSA+SHA1',
'rsa_pkcs1_sha1'
].join(':'),
ciphers: [
'TLS_AES_128_GCM_SHA256',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_256_GCM_SHA384',
'ECDHE-ECDSA-AES128-GCM-SHA256',
'ECDHE-RSA-AES128-GCM-SHA256',
'ECDHE-ECDSA-CHACHA20-POLY1305',
'ECDHE-RSA-CHACHA20-POLY1305',
'ECDHE-ECDSA-AES256-GCM-SHA384',
'ECDHE-RSA-AES256-GCM-SHA384',
'ECDHE-ECDSA-AES256-SHA',
'ECDHE-ECDSA-AES128-SHA',
'ECDHE-RSA-AES128-SHA',
'ECDHE-RSA-AES256-SHA',
'AES128-GCM-SHA256',
'AES256-GCM-SHA384',
'AES128-SHA',
'AES256-SHA',
// This magic cipher is the very obtuse way that OpenSSL downgrades the overall
// security level to allow various legacy settings, protocols & ciphers:
...(!strictHttpsChecks
? ['@SECLEVEL=0']
: [])
].join(':'),
secureOptions: strictHttpsChecks
? SSL_OP_TLSEXT_PADDING | SSL_OP_NO_ENCRYPT_THEN_MAC
: SSL_OP_TLSEXT_PADDING | SSL_OP_NO_ENCRYPT_THEN_MAC | SSL_OP_LEGACY_SERVER_CONNECT,
...{
// Valid, but not included in Node.js TLS module types:
requestOSCP: true
},
// Trust intermediate certificates from the trusted CA list too. Without this, trusted CAs
// are only used when they are self-signed root certificates. Seems to cause issues in Node v20
// in HTTP/2 tests, so disabled below the supported v22 version.
allowPartialTrustChain: semver.satisfies(process.version, '>=22.9.0'),
// Allow TLSv1, if !strict:
minVersion: strictHttpsChecks ? tls.DEFAULT_MIN_VERSION : 'TLSv1',
// Skip certificate validation entirely, if not strict:
rejectUnauthorized: strictHttpsChecks,
// Override the set of trusted CAs, if configured to do so:
...(trustedCAs ? {
ca: trustedCAs
} : {}),
// Use a client cert, if one matches for this hostname+port:
...clientCert
};
}
async function getTrustedCAs(trustedCAs, additionalTrustedCAs) {
if (trustedCAs && additionalTrustedCAs?.length) {
throw new Error(`trustedCAs and additionalTrustedCAs options are mutually exclusive`);
}
if (trustedCAs) {
return Promise.all(trustedCAs.map((caDefinition) => getCA(caDefinition)));
}
if (additionalTrustedCAs) {
const CAs = await Promise.all(additionalTrustedCAs.map((caDefinition) => getCA(caDefinition)));
return tls.rootCertificates.concat(CAs);
}
}
const getCA = async (caDefinition) => {
return 'certPath' in caDefinition
? await fs.readFile(caDefinition.certPath, 'utf8')
: 'cert' in caDefinition
? caDefinition.cert.toString('utf8')
: (0, util_1.unreachableCheck)(caDefinition);
};
// --- Various helpers for deriving parts of request/response data given partial overrides: ---
/**
* Takes a callback result and some headers, and returns a ready to send body, using the headers
* (and potentially modifying them) to match the content type & encoding.
*/
async function buildOverriddenBody(callbackResult, headers) {
// Raw bodies are easy: use them as is.
if (callbackResult?.rawBody)
return callbackResult?.rawBody;
// In the json/body case, we need to get the body and transform it into a buffer
// for consistent handling later, and encode it to match the headers.
let replacementBody;
if (callbackResult?.json) {
headers['content-type'] = 'application/json';
replacementBody = JSON.stringify(callbackResult?.json);
}
else {
replacementBody = callbackResult?.body;
}
if (replacementBody === undefined)
return replacementBody;
let rawBuffer;
if ((0, request_utils_1.isMockttpBody)(replacementBody)) {
// It's our own bodyReader instance. That's not supposed to happen, but
// it's ok, we just need to use the buffer data instead of the whole object
rawBuffer = buffer_1.Buffer.from(replacementBody.buffer);
}
else if (replacementBody === '') {
// For empty bodies, it's slightly more convenient if they're truthy
rawBuffer = buffer_1.Buffer.alloc(0);
}
else {
rawBuffer = (0, buffer_utils_1.asBuffer)(replacementBody);
}
return await (0, request_utils_1.encodeBodyBuffer)(rawBuffer, headers);
}
/**
* Effectively match the slightly-different-context logic in MockttpServer for generating a 'name'
* for a request's destination (e.g. in the URL). We prioritise domain names over IPs, and
* derive the most appropriate name available. In this method we consider only hostnames, so we
* drop the port, as that's always specified elsewhere.
*/
function getEffectiveHostname(destinationHostname, socket, rawHeaders) {
return destinationHostname && !(0, ip_utils_1.isIP)(destinationHostname)
? destinationHostname
: ( // Use header info rather than raw IPs, if we can:
(0, header_utils_1.getHeaderValue)(rawHeaders, ':authority') ??
(0, header_utils_1.getHeaderValue)(rawHeaders, 'host') ??
socket[socket_extensions_1.TlsMetadata]?.sniHostname ??
destinationHostname ?? // Use bare IP destination if we have nothing else
'localhost').replace(/:\d+$/, '');
}
/**
* If you override some headers, they have implications for the effective URL we send the
* request to. If you override that and the URL at the same time, it gets complicated.
*
* This method calculates the correct header value we should use: prioritising the header
* value you provide, printing a warning if it's contradictory, or return the URL-inferred
* value to override the header correctly if you didn't specify.
*/
function deriveUrlLinkedHeader(originalHeaders, replacementHeaders, headerName, expectedValue // The inferred 'correct' value from the URL
) {
const replacementValue = replacementHeaders?.[headerName];
if (replacementValue !== undefined) {
if (replacementValue !== expectedValue && replacementValue === originalHeaders[headerName]) {
// If you rewrite the URL-based header wrongly, by explicitly setting it to the
// existing value, we accept it but print a warning. This would be easy to
// do if you mutate the existing headers, for example, and ignore the host.
console.warn((0, common_tags_1.oneLine) `
Passthrough callback set the URL and the ${headerName} header
to mismatched values, which may be a mistake. The URL implies
${expectedValue}, whilst the header was set to ${replacementValue}.
`);
}
// Whatever happens, if you explicitly set a value, we use it.
return replacementValue;
}
// If you didn't override the header at all, then we automatically ensure
// the correct value is set automatically.
return expectedValue;
}
function applyDestinationTransforms(transform, { isH2Downstream, rawHeaders, port, protocol, hostname, pathname, query }) {
const { setProtocol, replaceHost, matchReplaceHost, matchReplacePath, matchReplaceQuery, } = transform;
if (setProtocol) {
const wasDefaultPort = port === null || (0, url_1.getDefaultPort)(protocol || 'http') === parseInt(port, 10);
protocol = setProtocol + ':';
// If we were on the default port, update that accordingly:
if (wasDefaultPort) {
port = (0, url_1.getDefaultPort)(protocol).toString();
}
}
if (replaceHost) {
const { targetHost } = replaceHost;
[hostname, port] = targetHost.split(':');
}
if (matchReplaceHost) {
const result = (0, match_replace_1.applyMatchReplace)(port === null ? hostname : `${hostname}:${port}`, matchReplaceHost.replacements);
[hostname, port] = result.split(':');
}
if ((replaceHost?.updateHostHeader ?? matchReplaceHost?.updateHostHeader) !== false) {
const updateHostHeader = replaceHost?.updateHostHeader ?? matchReplaceHost?.updateHostHeader;
const hostHeaderName = isH2Downstream ? ':authority' : 'host';
let hostHeaderIndex = (0, header_utils_1.findRawHeaderIndex)(rawHeaders, hostHeaderName);
let hostHeader;
if (hostHeaderIndex === -1) {
// Should never happen really, but just in case:
hostHeader = [hostHeaderName, hostname];
hostHeaderIndex = rawHeaders.length;
}
else {
// Clone this - we don't want to modify the original headers, as they're used for events
hostHeader = _.clone(rawHeaders[hostHeaderIndex]);
}
rawHeaders[hostHeaderIndex] = hostHeader;
if (updateHostHeader === undefined || updateHostHeader === true) {
// If updateHostHeader is true, or just not specified, match the new target
hostHeader[1] = hostname + (port ? `:${port}` : '');
}
else if (updateHostHeader) {
// If it's an explicit custom value, use that directly.
hostHeader[1] = updateHostHeader;
} // Otherwise: falsey means don't touch it.
}
if (matchReplacePath) {
pathname = (0, match_replace_1.applyMatchReplace)(pathname || '/', matchReplacePath);
}
if (matchReplaceQuery) {
query = (0, match_replace_1.applyMatchReplace)(query || '', matchReplaceQuery);
}
return {
reqUrl: new URL(`${protocol}//${hostname}${(port ? `:${port}` : '')}${pathname || '/'}${query || ''}`).toString(),
protocol,
hostname,
port,
pathname,
query,
rawHeaders
};
}
/**
* Autocorrect the host header only in the case that if you didn't explicitly
* override it yourself for some reason (e.g. if you're testing bad behaviour).
*/
function getHostAfterModification(reqUrl, originalHeaders, replacementHeaders) {
return deriveUrlLinkedHeader(originalHeaders, replacementHeaders, 'host', url.parse(reqUrl).host);
}
// These pseudoheaders are modifable, in that they are independent from the other HTTP
// request params: you can send plain HTTP but set :scheme:https, and you can send
// to one hostname but set another hostname as the authority.
exports.MODIFIABLE_PSEUDOHEADERS = [
':authority',
':scheme'
];
/**
* Automatically update the :scheme and :authority headers to match the updated URL,
* as long as they weren't explicitly overriden themselves, in which case let them
* be set to any invalid value you like (e.g. to send a request to one server but
* pretend it was sent to another).
*/
function getH2HeadersAfterModification(reqUrl, originalHeaders, replacementHeaders) {
const parsedUrl = url.parse(reqUrl);
return {
':scheme': deriveUrlLinkedHeader(originalHeaders, replacementHeaders, ':scheme', parsedUrl.protocol.slice(0, -1)),
':authority': deriveUrlLinkedHeader(originalHeaders, replacementHeaders, ':authority', parsedUrl.host)
};
}
// When modifying requests, we ensure you always have correct framing, as it's impossible
// to send a request with framing that doesn't match the body.
function getRequestContentLengthAfterModification(body, originalHeaders, replacementHeaders, context) {
// If there was a content-length header, it might now be wrong, and it's annoying
// to need to set your own content-length override when you just want to change
// the body. To help out, if you override the body in a way that results in invalid
// content-length headers, we fix them for you.
// For HTTP/2, framing is optional/advisory so we can just skip this entirely.
if (context.httpVersion !== 1)
return undefined;
const resultingHeaders = replacementHeaders || originalHeaders;
if ((0, header_utils_1.getHeaderValue)(resultingHeaders, 'transfer-encoding')?.includes('chunked')) {
return undefined; // No content-length header games needed
}
const expectedLength = (0, util_2.byteLength)(body).toString();
const contentLengthHeader = (0, header_utils_1.getHeaderValue)(resultingHeaders, 'content-length');
if (contentLengthHeader === expectedLength)
return undefined;
if (contentLengthHeader === undefined)
return expectedLength; // Differs from responses
// The content-length is expected, but it's wrong or missing.
// If there is a wrong content-length set, and it's not just leftover from the original headers (i.e.
// you intentionally set it) then we show a warning since we're ignoring your (invalid) instructions.
if (contentLengthHeader && contentLengthHeader !== (0, header_utils_1.getHeaderValue)(originalHeaders, 'content-length')) {
console.warn(`Invalid request content-length header was ignored - resetting from ${contentLengthHeader} to ${expectedLength}`);
}
return expectedLength;
}
// When modifying responses, we ensure you always have correct framing, but in a slightly more
// relaxed way than for requests: we allow no framing and HEAD responses, we just block invalid values.
function getResponseContentLengthAfterModification(body, originalHeaders, replacementHeaders, context) {
// For HEAD requests etc, you can set an arbitrary content-length header regardless
// of the empty body, so we don't bother checking anything. For HTTP/2, framing is
// optional/advisory so we can just skip this entirely.
if (context.httpVersion !== 1 || context.httpMethod === 'HEAD')
return undefined;
const resultingHeaders = replacementHeaders || originalHeaders;
if ((0, header_utils_1.getHeaderValue)(resultingHeaders, 'transfer-encoding')?.includes('chunked')) {
return undefined; // No content-length header games needed
}
const expectedLength = (0, util_2.byteLength)(body).toString();
const contentLengthHeader = (0, header_utils_1.getHeaderValue)(resultingHeaders, 'content-length');
if (contentLengthHeader === expectedLength)
return undefined;
if (contentLengthHeader === undefined)
return undefined; // Differs from requests - we do allow this for responses
// The content-length is set, but it's wrong.
// If there is a wrong content-length set, and it's not just leftover from the original headers (i.e.
// you intentionally set it) then we show a warning since we're ignoring your (invalid) instructions.
if (contentLengthHeader && contentLengthHeader !== (0, header_utils_1.getHeaderValue)(originalHeaders, 'content-length')) {
console.warn(`Invalid response content-length header was ignored - resetting from ${contentLengthHeader} to ${expectedLength}`);
}
return expectedLength;
}
// Function to check if we should skip https errors for the current hostname and port,
// based on the given config
function shouldUseStrictHttps(hostname, port, ignoreHostHttpsErrors) {
let skipHttpsErrors = false;
if (ignoreHostHttpsErrors === true) {
// Ignore cert errors if `ignoreHostHttpsErrors` is set to true, or
skipHttpsErrors = true;
}
else if (Array.isArray(ignoreHostHttpsErrors) && (
// if the whole hostname or host+port is whitelisted
_.includes(ignoreHostHttpsErrors, hostname) ||
_.includes(ignoreHostHttpsErrors, `${hostname}:${port}`))) {
skipHttpsErrors = true;
}
return !skipHttpsErrors;
}
exports.getDnsLookupFunction = _.memoize((lookupOptions) => {
if (!lookupOptions) {
// By default, use 10s caching of hostnames, just to reduce the delay from
// endlessly 10ms query delay for 'localhost' with every request.
return new dns_1.CachedDns(10000).lookup;
}
else {
// Or if options are provided, use those to configure advanced DNS cases:
const cacheableLookup = new cacheable_lookup_1.default({
maxTtl: lookupOptions.maxTtl,
errorTtl: lookupOptions.errorTtl,
// As little caching of "use the fallback server" as possible:
fallbackDuration: 0
});
if (lookupOptions.servers) {
cacheableLookup.servers = lookupOptions.servers;
}
return cacheableLookup.lookup;
}
});
async function getClientRelativeHostname(hostname, remoteIp, lookupFn) {
if (!remoteIp || (0, ip_utils_1.isLocalhostAddress)(remoteIp))
return hostname;
// Otherwise, we have a request from a different machine (or Docker container/VM/etc) and we need
// to make sure that 'localhost' means _that_ machine, not ourselves.
// This check must be run before req modifications. If a modification changes the address to localhost,
// then presumably it really does mean *this* localhost.
if (
// If the hostname is a known localhost address, we're done:
(0, ip_utils_1.isLocalhostAddress)(hostname) ||
// Otherwise, we look up the IP, so we can accurately check for localhost-bound requests. This adds a little
// delay, but since it's cached we save the equivalent delay in request lookup later, so it should be
// effectively free. We ignore errors to delegate unresolvable etc to request processing later.
(0, ip_utils_1.isLocalhostAddress)(await (0, dns_1.dnsLookup)(lookupFn, hostname).catch(() => null))) {
return (0, ip_utils_1.normalizeIP)(remoteIp);
// Note that we just redirect - we don't update the host header. From the POV of the target, it's still
// 'localhost' traffic that should appear identical to normal.
}
else {
return hostname;
}
}
function buildUpstreamErrorTags(e) {
const tags = [];
// OpenSSL can throw all sorts of weird & wonderful errors here, and rarely exposes a
// useful error code from them. To handle that, we try to detect the most common cases,
// notable including the useless but common 'unsupported' error that covers all
// OpenSSL-unsupported (e.g. legacy) configurations.
if (!e.code && e.stack?.split('\n')[1]?.includes('node:internal/tls/secure-context')) {
let tlsErrorTag;
if (e.message === 'unsupported') {
e.code = 'ERR_TLS_CONTEXT_UNSUPPORTED';
tlsErrorTag = 'context-unsupported';
e.message = 'Unsupported TLS configuration';
}
else {
e.code = 'ERR_TLS_CONTEXT_UNKNOWN';
tlsErrorTag = 'context-unknown';
e.message = `TLS context error: ${e.message}`;
}
tags.push(`passthrough-tls-error:${tlsErrorTag}`);
}
// All raw error codes are included in the tags:
tags.push('passthrough-error:' + e.code);
// We build tags for by SSL alerts, for each recognition elsewhere:
const tlsAlertMatch = /SSL alert number (\d+)/.exec(e.message ?? '');
if (tlsAlertMatch) {
tags.push('passthrough-tls-error:ssl-alert-' + tlsAlertMatch[1]);
}
if (e instanceof request_step_impls_1.AbortError) {
tags.push('passthrough-error:mockttp-abort');
}
return tags;
}
//# sourceMappingURL=passthrough-handling.js.map