@graphql-hive/gateway
Version:
1,540 lines (1,514 loc) • 50.7 kB
JavaScript
import cluster from 'node:cluster';
import module from 'node:module';
import { availableParallelism, freemem, platform, release } from 'node:os';
import { join, isAbsolute, resolve, dirname } from 'node:path';
import { Option, Command, InvalidArgumentError } from '@commander-js/extra-typings';
import { process as process$1 } from '@graphql-mesh/cross-helpers';
import { LogLevel, DefaultLogger, registerTerminateHandler, isUrl, PubSub } from '@graphql-mesh/utils';
import { getGraphQLWSOptions, handleLoggingConfig as handleLoggingConfig$1, createGatewayRuntime } from '@graphql-hive/gateway-runtime';
import { lstat } from 'node:fs/promises';
import { pathToFileURL } from 'node:url';
import { promises } from 'node:fs';
import { createServer as createServer$1 } from 'node:http';
import { createServer } from 'node:https';
import { isValidPath, asArray } from '@graphql-tools/utils';
import { CodeFileLoader } from '@graphql-tools/code-file-loader';
import { GraphQLFileLoader } from '@graphql-tools/graphql-file-loader';
import { loadTypedefs } from '@graphql-tools/load';
const unit = Object.create(null);
const m = 60000, h = m * 60, d = h * 24, y = d * 365.25;
unit.year = unit.yr = unit.y = y;
unit.month = unit.mo = unit.mth = y / 12;
unit.week = unit.wk = unit.w = d * 7;
unit.day = unit.d = d;
unit.hour = unit.hr = unit.h = h;
unit.minute = unit.min = unit.m = m;
unit.second = unit.sec = unit.s = 1000;
unit.millisecond = unit.millisec = unit.ms = 1;
unit.microsecond = unit.microsec = unit.us = unit.µs = 1e-3;
unit.nanosecond = unit.nanosec = unit.ns = 1e-6;
unit.group = ',';
unit.decimal = '.';
unit.placeholder = ' _';
const durationRE = /((?:\d{1,16}(?:\.\d{1,16})?|\.\d{1,16})(?:[eE][-+]?\d{1,4})?)\s?([\p{L}]{0,14})/gu;
parse.unit = unit;
/**
* convert `str` to ms
*
* @param {string} str
* @param {string} format
* @return {number}
*/
function parse(str = '', format = 'ms') {
let result = null, prevUnits;
String(str)
.replace(new RegExp(`(\\d)[${parse.unit.placeholder}${parse.unit.group}](\\d)`, 'g'), '$1$2') // clean up group separators / placeholders
.replace(parse.unit.decimal, '.') // normalize decimal separator
.replace(durationRE, (_, n, units) => {
// if no units, find next smallest units or fall back to format value
// eg. 1h30 -> 1h30m
if (!units) {
if (prevUnits) {
for (const u in parse.unit) if (parse.unit[u] < prevUnits) { units = u; break }
}
else units = format;
}
else units = units.toLowerCase();
prevUnits = units = parse.unit[units] || parse.unit[units.replace(/s$/, '')];
if (units) result = (result || 0) + n * units;
});
return result && ((result / (parse.unit[format] || 1)) * (str[0] === '-' ? -1 : 1))
}
// Taken from graphql-js
// https://github.com/graphql/graphql-js/blob/main/src/jsutils/inspect.ts
const MAX_RECURSIVE_DEPTH = 3;
/**
* Used to print values in error messages.
*/
function inspect(value) {
return formatValue(value, []);
}
function formatValue(value, seenValues) {
switch (typeof value) {
case 'string':
return JSON.stringify(value);
case 'function':
return value.name ? `[function ${value.name}]` : '[function]';
case 'object':
return formatObjectValue(value, seenValues);
default:
return String(value);
}
}
function formatError(value) {
// eslint-disable-next-line no-constant-condition
if ((value.name = 'GraphQLError')) {
return value.toString();
}
return `${value.name}: ${value.message};\n ${value.stack}`;
}
function formatObjectValue(value, previouslySeenValues) {
if (value === null) {
return 'null';
}
if (value instanceof Error) {
if (value.name === 'AggregateError') {
return (formatError(value) +
'\n' +
formatArray(value.errors, previouslySeenValues));
}
return formatError(value);
}
if (previouslySeenValues.includes(value)) {
return '[Circular]';
}
const seenValues = [...previouslySeenValues, value];
if (isJSONable(value)) {
const jsonValue = value.toJSON();
// check for infinite recursion
if (jsonValue !== value) {
return typeof jsonValue === 'string' ? jsonValue : formatValue(jsonValue, seenValues);
}
}
else if (Array.isArray(value)) {
return formatArray(value, seenValues);
}
return formatObject(value, seenValues);
}
function isJSONable(value) {
return typeof value.toJSON === 'function';
}
function formatObject(object, seenValues) {
const entries = Object.entries(object);
if (entries.length === 0) {
return '{}';
}
if (seenValues.length > MAX_RECURSIVE_DEPTH) {
return '[' + getObjectTag(object) + ']';
}
const properties = entries.map(([key, value]) => key + ': ' + formatValue(value, seenValues));
return '{ ' + properties.join(', ') + ' }';
}
function formatArray(array, seenValues) {
if (array.length === 0) {
return '[]';
}
if (seenValues.length > MAX_RECURSIVE_DEPTH) {
return '[Array]';
}
const len = array.length;
const items = [];
for (let i = 0; i < len; ++i) {
items.push(formatValue(array[i], seenValues));
}
return '[' + items.join(', ') + ']';
}
function getObjectTag(object) {
const tag = Object.prototype.toString
.call(object)
.replace(/^\[object /, '')
.replace(/]$/, '');
if (tag === 'Object' && typeof object.constructor === 'function') {
const name = object.constructor.name;
if (typeof name === 'string' && name !== '') {
return name;
}
}
return tag;
}
function truthy(val) {
return val === true || val === 1 || ["1", "t", "true", "y", "yes"].includes(String(val));
}
class JSONLogger {
name;
meta;
logLevel;
console;
constructor(opts) {
this.name = opts?.name;
this.console = opts?.console || console;
this.meta = opts?.meta || {};
const debugStrs = [process$1.env["DEBUG"], globalThis.DEBUG];
if (opts?.level != null) {
this.logLevel = opts.level;
} else {
this.logLevel = LogLevel.info;
for (const debugStr of debugStrs) {
if (debugStr) {
if (truthy(debugStr)) {
this.logLevel = LogLevel.debug;
break;
}
if (opts?.name) {
if (debugStr?.toString()?.includes(opts.name)) {
this.logLevel = LogLevel.debug;
break;
}
}
}
}
}
}
log(...messageArgs) {
if (this.logLevel > LogLevel.info) {
return;
}
const finalMessage = this.prepareFinalMessage("info", messageArgs);
this.console.log(finalMessage);
}
warn(...messageArgs) {
if (this.logLevel > LogLevel.warn) {
return;
}
const finalMessage = this.prepareFinalMessage("warn", messageArgs);
this.console.warn(finalMessage);
}
info(...messageArgs) {
if (this.logLevel > LogLevel.info) {
return;
}
const finalMessage = this.prepareFinalMessage("info", messageArgs);
this.console.info(finalMessage);
}
error(...messageArgs) {
if (this.logLevel > LogLevel.error) {
return;
}
const finalMessage = this.prepareFinalMessage("error", messageArgs);
this.console.error(finalMessage);
}
debug(...messageArgs) {
if (this.logLevel > LogLevel.debug) {
return;
}
const finalMessage = this.prepareFinalMessage("debug", messageArgs);
this.console.debug(finalMessage);
}
child(nameOrMeta) {
let newName;
let newMeta;
if (typeof nameOrMeta === "string") {
newName = this.name ? `${this.name}, ${nameOrMeta}` : nameOrMeta;
newMeta = this.meta;
} else if (typeof nameOrMeta === "object") {
newName = this.name;
newMeta = { ...this.meta, ...nameOrMeta };
} else {
throw new Error("Invalid argument type");
}
return new JSONLogger({
name: newName,
meta: newMeta,
level: this.logLevel,
console: this.console
});
}
addPrefix(prefix) {
if (typeof prefix === "string") {
this.name = this.name ? `${this.name}, ${prefix}` : prefix;
} else if (typeof prefix === "object") {
this.meta = { ...this.meta, ...prefix };
}
return this;
}
prepareFinalMessage(level, messageArgs) {
const flattenedMessageArgs = messageArgs.flat(Infinity).flatMap((messageArg) => {
if (typeof messageArg === "function") {
messageArg = messageArg();
}
if (messageArg?.toJSON) {
messageArg = messageArg.toJSON();
}
if (messageArg instanceof AggregateError) {
return messageArg.errors;
}
return messageArg;
});
const finalMessage = {
...this.meta,
level,
time: (/* @__PURE__ */ new Date()).toISOString()
};
if (this.name) {
finalMessage["name"] = this.name;
}
const extras = [];
for (let messageArg of flattenedMessageArgs) {
if (messageArg == null) {
continue;
}
const typeofMessageArg = typeof messageArg;
if (typeofMessageArg === "string" || typeofMessageArg === "number" || typeofMessageArg === "boolean") {
finalMessage["msg"] = finalMessage["msg"] ? finalMessage["msg"] + ", " + messageArg : messageArg;
} else if (typeofMessageArg === "object") {
if (messageArg instanceof Error) {
finalMessage["msg"] = finalMessage["msg"] ? finalMessage["msg"] + ", " + messageArg.message : messageArg.message;
finalMessage["stack"] = messageArg.stack;
} else if (Object.prototype.toString.call(messageArg).startsWith("[object")) {
Object.assign(finalMessage, messageArg);
} else {
extras.push(messageArg);
}
}
}
if (extras.length) {
if (extras.length === 1) {
finalMessage["extras"] = inspect(extras[0]);
} else {
finalMessage["extras"] = extras.map((extra) => inspect(extra));
}
}
return JSON.stringify(finalMessage);
}
}
function getDefaultLogger(opts) {
const logFormat = process$1.env["LOG_FORMAT"] || globalThis.LOG_FORMAT;
if (logFormat) {
if (logFormat.toLowerCase() === "json") {
return new JSONLogger(opts);
} else if (logFormat.toLowerCase() === "pretty") {
return new DefaultLogger(opts?.name, opts?.level);
}
}
const nodeEnv = process$1.env["NODE_ENV"] || globalThis.NODE_ENV;
if (nodeEnv === "production") {
return new JSONLogger(opts);
}
return new DefaultLogger(opts?.name, opts?.level);
}
const defaultConfigExtensions = [
".ts",
".mts",
".cts",
".js",
".mjs",
".cjs"
];
const defaultConfigFileName = "gateway.config";
function createDefaultConfigPaths(configFileName) {
return defaultConfigExtensions.map((ext) => `${configFileName}${ext}`);
}
async function loadConfig(opts) {
let importedConfig = null;
if (!opts.configPath) {
!opts.quiet && opts.log.debug(`Searching for default config files`);
const configPaths = [
...createDefaultConfigPaths(defaultConfigFileName),
...createDefaultConfigPaths(opts.configFileName),
// For backwards compatibility of Mesh Compose users
...createDefaultConfigPaths("mesh.config")
];
for (const configPath of configPaths) {
const absoluteConfigPath = join(process.cwd(), configPath);
const exists = await lstat(absoluteConfigPath).then(() => true).catch(() => false);
if (exists) {
!opts.quiet && opts.log.info(`Found default config file ${absoluteConfigPath}`);
const importUrl = pathToFileURL(absoluteConfigPath).toString();
const module = await import(importUrl);
importedConfig = Object(module).gatewayConfig || null;
if (!importedConfig) {
!opts.quiet && opts.log.warn(
`No "gatewayConfig" exported from config file at ${absoluteConfigPath}`
);
}
break;
}
}
} else {
const configPath = isAbsolute(opts.configPath) ? opts.configPath : join(process.cwd(), opts.configPath);
!opts.quiet && opts.log.info(`Loading config file at path ${configPath}`);
const exists = await lstat(configPath).then(() => true).catch(() => false);
if (!exists) {
throw new Error(`Cannot find config file at ${configPath}`);
}
const importUrl = pathToFileURL(configPath).toString();
const module = await import(importUrl);
importedConfig = Object(module).gatewayConfig || null;
if (!importedConfig) {
throw new Error(
`No "gatewayConfig" exported from config file at ${configPath}`
);
}
}
if (importedConfig) {
!opts.quiet && opts.log.info("Loaded config");
} else {
!opts.quiet && opts.log.debug("No config loaded");
}
return importedConfig || {};
}
async function getBuiltinPluginsFromConfig(config, ctx) {
const plugins = [];
if (config.jwt) {
const { useJWT } = await import('@graphql-mesh/plugin-jwt-auth');
plugins.push(useJWT(config.jwt));
}
if (config.prometheus) {
const { default: useMeshPrometheus } = await import('@graphql-mesh/plugin-prometheus');
plugins.push(useMeshPrometheus(config.prometheus));
}
if (config.openTelemetry) {
const { useOpenTelemetry } = await import('@graphql-mesh/plugin-opentelemetry');
plugins.push(
useOpenTelemetry({
logger: ctx.logger,
...config.openTelemetry
})
);
}
if (config.rateLimiting) {
const { default: useMeshRateLimit } = await import('@graphql-mesh/plugin-rate-limit');
plugins.push(
useMeshRateLimit({
...config.rateLimiting,
cache: ctx.cache
})
);
}
if (config.jit) {
const { useJIT } = await import('@graphql-mesh/plugin-jit');
plugins.push(useJIT());
}
if (config.awsSigv4) {
const { useAWSSigv4 } = await import('@graphql-hive/plugin-aws-sigv4');
plugins.push(useAWSSigv4(config.awsSigv4));
}
return plugins;
}
async function getCacheInstanceFromConfig(config, ctx) {
if (typeof config.cache === "function") {
return config.cache(ctx);
}
if (config.cache && "type" in config.cache) {
switch (config.cache.type) {
case "redis": {
const { default: RedisCache } = await import('@graphql-mesh/cache-redis');
return new RedisCache({
...ctx,
...config.cache
});
}
case "cfw-kv": {
const { default: CloudflareKVCacheStorage } = await import('@graphql-mesh/cache-cfw-kv');
return new CloudflareKVCacheStorage({
...ctx,
...config.cache
});
}
case "upstash-redis": {
const { default: UpstashRedisCache } = await import('@graphql-mesh/cache-upstash-redis');
return new UpstashRedisCache({
...ctx,
...config.cache
});
}
}
if (config.cache.type !== "localforage") {
ctx.logger.warn(
"Unknown cache type, falling back to localforage",
config.cache
);
}
const { default: LocalforageCache2 } = await import('@graphql-mesh/cache-localforage');
return new LocalforageCache2({
...ctx,
...config.cache
});
}
if (config.cache) {
return config.cache;
}
const { default: LocalforageCache } = await import('@graphql-mesh/cache-localforage');
return new LocalforageCache(ctx);
}
async function startBunServer(gwRuntime, opts) {
const serverOptions = {
fetch: gwRuntime,
port: opts.port || defaultOptions.port,
hostname: opts.host || defaultOptions.host,
reusePort: true,
idleTimeout: opts.requestTimeout
};
if (opts.sslCredentials) {
if (opts.sslCredentials.ca_file_name) {
serverOptions.ca = Bun.file(opts.sslCredentials.ca_file_name);
}
if (opts.sslCredentials.cert_file_name) {
serverOptions.cert = Bun.file(opts.sslCredentials.cert_file_name);
}
if (opts.sslCredentials.dh_params_file_name) {
serverOptions.dhParamsFile = opts.sslCredentials.dh_params_file_name;
}
if (opts.sslCredentials.key_file_name) {
serverOptions.key = Bun.file(opts.sslCredentials.key_file_name);
}
if (opts.sslCredentials.passphrase) {
serverOptions.passphrase = opts.sslCredentials.passphrase;
}
if (opts.sslCredentials.ssl_ciphers) ;
if (opts.sslCredentials.ssl_prefer_low_memory_usage) {
serverOptions.lowMemoryMode = opts.sslCredentials.ssl_prefer_low_memory_usage;
}
}
if (!opts.disableWebsockets) {
const { makeHandler } = await import('graphql-ws/use/bun');
serverOptions.websocket = makeHandler(
getGraphQLWSOptions(gwRuntime, (ctx) => ({
socket: ctx.extra.socket,
...ctx.extra.socket.data || {}
}))
);
serverOptions.fetch = function(request, server2) {
if (request.headers.has("Sec-WebSocket-Key") && server2.upgrade(request, {
data: {
request
}
})) {
return void 0;
}
return gwRuntime.handleRequest(request, server2);
};
}
const server = Bun.serve(serverOptions);
opts.log.info(`Listening on ${server.url}`);
gwRuntime.disposableStack.use(server);
}
async function startNodeHttpServer(gwRuntime, opts) {
const {
log,
host = defaultOptions.host,
port = defaultOptions.port,
sslCredentials,
maxHeaderSize,
disableWebsockets,
requestTimeout
} = opts;
let server;
let protocol;
if (sslCredentials) {
protocol = "https";
const sslOptionsForNodeHttp = {};
if (sslCredentials.ca_file_name) {
sslOptionsForNodeHttp.ca = await promises.readFile(
sslCredentials.ca_file_name
);
}
if (sslCredentials.cert_file_name) {
sslOptionsForNodeHttp.cert = await promises.readFile(
sslCredentials.cert_file_name
);
}
if (sslCredentials.dh_params_file_name) {
sslOptionsForNodeHttp.dhparam = await promises.readFile(
sslCredentials.dh_params_file_name
);
}
if (sslCredentials.key_file_name) {
sslOptionsForNodeHttp.key = await promises.readFile(
sslCredentials.key_file_name
);
}
if (sslCredentials.passphrase) {
sslOptionsForNodeHttp.passphrase = sslCredentials.passphrase;
}
if (sslCredentials.ssl_ciphers) {
sslOptionsForNodeHttp.ciphers = sslCredentials.ssl_ciphers;
}
if (sslCredentials.ssl_prefer_low_memory_usage) {
sslOptionsForNodeHttp.honorCipherOrder = true;
}
server = createServer(
{
...sslOptionsForNodeHttp,
maxHeaderSize,
requestTimeout
},
gwRuntime
);
} else {
protocol = "http";
server = createServer$1(
{
maxHeaderSize,
requestTimeout
},
gwRuntime
);
}
const url = `${protocol}://${host}:${port}`.replace("0.0.0.0", "localhost");
log.debug(`Starting server on ${url}`);
if (!disableWebsockets) {
log.debug("Setting up WebSocket server");
const { WebSocketServer } = await import('ws');
const wsServer = new WebSocketServer({
path: gwRuntime.graphqlEndpoint,
server
});
const { useServer } = await import('graphql-ws/use/ws');
useServer(
getGraphQLWSOptions(gwRuntime, (ctx) => ({
req: ctx.extra?.request,
socket: ctx.extra?.socket
})),
wsServer
);
gwRuntime.disposableStack.defer(
() => new Promise((resolve, reject) => {
log.info(`Stopping the WebSocket server`);
wsServer.close((err) => {
if (err) {
return reject(err);
}
log.info(`Stopped the WebSocket server successfully`);
return resolve();
});
})
);
}
return new Promise((resolve, reject) => {
server.once("error", reject);
server.listen(port, host, () => {
log.info(`Listening on ${url}`);
gwRuntime.disposableStack.defer(
() => new Promise((resolve2) => {
process.stderr.write("\n");
log.info(`Stopping the server`);
server.closeAllConnections();
server.close(() => {
log.info(`Stopped the server successfully`);
return resolve2();
});
})
);
return resolve();
});
});
}
function startServerForRuntime(runtime, {
log,
host = defaultOptions.host,
port = defaultOptions.port,
sslCredentials,
maxHeaderSize = 16384,
disableWebsockets = false
}) {
process.on("message", (message) => {
if (message === "invalidateUnifiedGraph") {
log.info(`Invalidating Supergraph`);
runtime.invalidateUnifiedGraph();
}
});
const serverOpts = {
log,
host,
port,
maxHeaderSize,
disableWebsockets,
...sslCredentials ? { sslCredentials } : {}
};
const startServer = globalThis.Bun ? startBunServer : startNodeHttpServer;
return startServer(runtime, serverOpts);
}
function handleFork(log, config) {
try {
if (cluster.isPrimary && config.fork && config.fork > 1) {
const workers = /* @__PURE__ */ new Set();
let expectedToExit = false;
log.debug(`Forking ${config.fork} workers`);
for (let i = 0; i < config.fork; i++) {
const worker = cluster.fork();
const workerLogger = log.child({ worker: worker.id });
worker.once("exit", (code, signal) => {
const logData = {
signal
};
if (code != null) {
logData["code"] = code;
}
if (expectedToExit) {
workerLogger.debug("exited", logData);
} else {
workerLogger.error(
"exited unexpectedly. A restart is recommended to ensure the stability of the service",
logData
);
}
workers.delete(worker);
if (!expectedToExit && workers.size === 0) {
log.error(`All workers exited unexpectedly. Exiting`, logData);
process.exit(1);
}
});
workers.add(worker);
}
registerTerminateHandler((signal) => {
log.info("Killing workers", {
signal
});
expectedToExit = true;
workers.forEach((w) => {
w.kill(signal);
});
});
return true;
}
} catch (e) {
log.error(`Error while forking workers: `, e);
}
return false;
}
function handleLoggingConfig(loggingConfig, ctx) {
ctx.log = handleLoggingConfig$1(loggingConfig, ctx.log);
}
function handleReportingConfig(ctx, loadedConfig, cliOpts) {
const confOpts = {
...loadedConfig.reporting?.type === "hive" ? {
hiveRegistryToken: loadedConfig.reporting.token,
hiveUsageTarget: loadedConfig.reporting.target,
hiveUsageAccessToken: loadedConfig.reporting.token
} : {},
...loadedConfig.reporting?.type === "graphos" ? {
apolloGraphRef: loadedConfig.reporting.graphRef,
apolloKey: loadedConfig.reporting.apiKey
} : {}
};
const opts = { ...confOpts, ...cliOpts };
if (cliOpts.hiveRegistryToken && cliOpts.hiveUsageAccessToken) {
ctx.log.error(
`Cannot use "--hive-registry-token" with "--hive-usage-access-token". Please use "--hive-usage-target" and "--hive-usage-access-token" or the config instead.`
);
process.exit(1);
}
if (cliOpts.hiveRegistryToken && opts.hiveUsageTarget) {
ctx.log.error(
`Cannot use "--hive-registry-token" with a target. Please use "--hive-usage-target" and "--hive-usage-access-token" or the config instead.`
);
process.exit(1);
}
if (opts.hiveUsageTarget && !opts.hiveUsageAccessToken) {
ctx.log.error(
`Hive usage target needs an access token. Please provide it through the "--hive-usage-access-token <token>" option or the config.`
);
process.exit(1);
}
if (opts.hiveUsageAccessToken && !opts.hiveUsageTarget) {
ctx.log.error(
`Hive usage access token needs a target. Please provide it through the "--hive-usage-target <target>" option or the config.`
);
process.exit(1);
}
const hiveUsageAccessToken = opts.hiveUsageAccessToken || opts.hiveRegistryToken;
if (hiveUsageAccessToken) {
if (opts.hiveUsageTarget) {
ctx.log.info(`Configuring Hive usage reporting`);
} else {
ctx.log.info(`Configuring Hive registry reporting`);
}
return {
...loadedConfig.reporting,
type: "hive",
token: hiveUsageAccessToken,
target: opts.hiveUsageTarget
};
}
if (opts.apolloKey) {
ctx.log.info(`Configuring Apollo GraphOS registry reporting`);
if (!opts.apolloGraphRef?.includes("@")) {
ctx.log.error(
`Apollo GraphOS requires a graph ref in the format <graph-id>@<graph-variant>. Please provide a valid graph ref ${opts.apolloGraphRef ? `not ${opts.apolloGraphRef}` : ""}.`
);
process.exit(1);
}
return {
...loadedConfig.reporting,
type: "graphos",
apiKey: opts.apolloKey,
graphRef: opts.apolloGraphRef
};
}
return null;
}
const addCommand$2 = (ctx, cli) => cli.command("proxy").description(
"serve a proxy to a GraphQL API and add additional features such as monitoring/tracing, caching, rate limiting, security, and more"
).argument("[endpoint]", "URL of the endpoint GraphQL API to proxy").option(
"--schema <schemaPathOrUrl>",
"path to the GraphQL schema file or a url from where to pull the schema"
).action(async function proxy(endpoint) {
const {
hiveCdnEndpoint,
hiveCdnKey,
hiveRegistryToken,
hiveUsageTarget,
hiveUsageAccessToken,
maskedErrors,
hivePersistedDocumentsEndpoint,
hivePersistedDocumentsToken,
...opts
} = this.optsWithGlobals();
const loadedConfig = await loadConfig({
log: ctx.log,
configPath: opts.configPath,
quiet: !cluster.isPrimary,
configFileName: ctx.configFileName
});
let proxy2;
if (endpoint) {
proxy2 = { endpoint };
} else if ("proxy" in loadedConfig) {
proxy2 = loadedConfig.proxy;
}
if (!proxy2) {
ctx.log.error(
"Proxy endpoint not defined. Please provide it in the [endpoint] argument or in the config file."
);
process.exit(1);
}
let schema;
const hiveCdnEndpointOpt = (
// TODO: take schema from optsWithGlobals once https://github.com/commander-js/extra-typings/pull/76 is merged
this.opts().schema || hiveCdnEndpoint
);
const hiveCdnLogger = ctx.log.child({ source: "Hive CDN" });
if (hiveCdnEndpointOpt) {
if (hiveCdnKey) {
if (!isUrl(hiveCdnEndpointOpt)) {
hiveCdnLogger.error(
"Endpoint must be a URL when providing --hive-cdn-key but got " + hiveCdnEndpointOpt
);
process.exit(1);
}
schema = {
type: "hive",
endpoint: hiveCdnEndpointOpt,
// see validation above
key: hiveCdnKey
};
} else {
schema = this.opts().schema;
}
} else if ("schema" in loadedConfig) {
schema = loadedConfig.schema;
}
if (hiveCdnKey && !schema) {
process.stderr.write(
`error: option '--schema <schemaPathOrUrl>' is required when providing '--hive-cdn-key <key>'
`
);
process.exit(1);
}
const registryConfig = {};
const reporting = handleReportingConfig(ctx, loadedConfig, {
hiveRegistryToken,
hiveUsageTarget,
hiveUsageAccessToken,
// proxy can only do reporting to hive registry
apolloGraphRef: void 0,
apolloKey: void 0
});
if (reporting) {
registryConfig.reporting = reporting;
}
const pubsub = loadedConfig.pubsub || new PubSub();
const cwd = loadedConfig.cwd || process.cwd();
if (loadedConfig.logging != null) {
handleLoggingConfig(loadedConfig.logging, ctx);
}
const cache = await getCacheInstanceFromConfig(loadedConfig, {
pubsub,
logger: ctx.log,
cwd
});
const builtinPlugins = await getBuiltinPluginsFromConfig(
{
...loadedConfig,
...opts
},
{
logger: ctx.log,
cache}
);
const config = {
...defaultOptions,
...loadedConfig,
...opts,
pollingInterval: opts.polling || ("pollingInterval" in loadedConfig ? loadedConfig.pollingInterval : void 0) || defaultOptions.pollingInterval,
...registryConfig,
proxy: proxy2,
schema,
logging: ctx.log,
productName: ctx.productName,
productDescription: ctx.productDescription,
productPackageName: ctx.productPackageName,
productLink: ctx.productLink,
...ctx.productLogo ? { productLogo: ctx.productLogo } : {},
pubsub,
cache,
plugins(ctx2) {
const userPlugins = loadedConfig.plugins?.(ctx2) ?? [];
return [...builtinPlugins, ...userPlugins];
}
};
if (hivePersistedDocumentsEndpoint) {
const token = hivePersistedDocumentsToken || loadedConfig.persistedDocuments && "token" in loadedConfig.persistedDocuments && loadedConfig.persistedDocuments.token;
if (!token) {
ctx.log.error(
`Hive persisted documents needs a CDN token. Please provide it through the "--hive-persisted-documents-token <token>" option or the config.`
);
process.exit(1);
}
config.persistedDocuments = {
...loadedConfig.persistedDocuments,
type: "hive",
endpoint: hivePersistedDocumentsEndpoint,
token
};
}
if (maskedErrors != null) {
config.maskedErrors = maskedErrors;
}
if (typeof config.pollingInterval === "number" && config.pollingInterval < 1e4) {
process.stderr.write(
`error: polling interval duration too short ${config.pollingInterval}, use at least 10 seconds
`
);
process.exit(1);
}
return runProxy(ctx, config);
});
async function runProxy({ log }, config) {
if (handleFork(log, config)) {
return;
}
log.info(`Proxying requests to ${config.proxy.endpoint}`);
const runtime = createGatewayRuntime(config);
await startServerForRuntime(runtime, {
...config,
log
});
}
const addCommand$1 = (ctx, cli) => cli.command("subgraph").description(
"serve a Federation subgraph that can be used with any Federation compatible router like Apollo Router/Gateway"
).argument(
"[schemaPathOrUrl]",
'path to the subgraph schema file or a url from where to pull the subgraph schema (default: "subgraph.graphql")'
).action(async function subgraph(schemaPathOrUrl) {
const {
maskedErrors,
hiveRegistryToken,
hiveUsageTarget,
hiveUsageAccessToken,
hivePersistedDocumentsEndpoint,
hivePersistedDocumentsToken,
...opts
} = this.optsWithGlobals();
const loadedConfig = await loadConfig({
log: ctx.log,
configPath: opts.configPath,
quiet: !cluster.isPrimary,
configFileName: ctx.configFileName
});
let subgraph2 = "subgraph.graphql";
if (schemaPathOrUrl) {
subgraph2 = schemaPathOrUrl;
} else if ("subgraph" in loadedConfig) {
subgraph2 = loadedConfig.subgraph;
}
const registryConfig = {};
const reporting = handleReportingConfig(ctx, loadedConfig, {
hiveRegistryToken,
hiveUsageTarget,
hiveUsageAccessToken,
// subgraph can only do reporting to hive registry
apolloGraphRef: void 0,
apolloKey: void 0
});
if (reporting) {
registryConfig.reporting = reporting;
}
const pubsub = loadedConfig.pubsub || new PubSub();
const cwd = loadedConfig.cwd || process.cwd();
if (loadedConfig.logging != null) {
handleLoggingConfig(loadedConfig.logging, ctx);
}
const cache = await getCacheInstanceFromConfig(loadedConfig, {
pubsub,
logger: ctx.log,
cwd
});
const builtinPlugins = await getBuiltinPluginsFromConfig(
{
...loadedConfig,
...opts
},
{
logger: ctx.log,
cache}
);
const config = {
...defaultOptions,
...loadedConfig,
...opts,
pollingInterval: opts.polling || ("pollingInterval" in loadedConfig ? loadedConfig.pollingInterval : void 0) || defaultOptions.pollingInterval,
...registryConfig,
subgraph: subgraph2,
logging: loadedConfig.logging ?? ctx.log,
productName: ctx.productName,
productDescription: ctx.productDescription,
productPackageName: ctx.productPackageName,
productLink: ctx.productLink,
productLogo: ctx.productLogo,
pubsub,
cache,
plugins(ctx2) {
const userPlugins = loadedConfig.plugins?.(ctx2) ?? [];
return [...builtinPlugins, ...userPlugins];
}
};
if (hivePersistedDocumentsEndpoint) {
const token = hivePersistedDocumentsToken || loadedConfig.persistedDocuments && "token" in loadedConfig.persistedDocuments && loadedConfig.persistedDocuments.token;
if (!token) {
ctx.log.error(
`Hive persisted documents needs a CDN token. Please provide it through the "--hive-persisted-documents-token <token>" option or the config.`
);
process.exit(1);
}
config.persistedDocuments = {
...loadedConfig.persistedDocuments,
type: "hive",
endpoint: hivePersistedDocumentsEndpoint,
token
};
}
if (maskedErrors != null) {
config.maskedErrors = maskedErrors;
}
if (typeof config.pollingInterval === "number" && config.pollingInterval < 1e4) {
process.stderr.write(
`error: polling interval duration too short ${config.pollingInterval}, use at least 10 seconds
`
);
process.exit(1);
}
return runSubgraph(ctx, config);
});
async function runSubgraph({ log }, config) {
let absSchemaPath = null;
if (typeof config.subgraph === "string" && isValidPath(config.subgraph) && !isUrl(config.subgraph)) {
const subgraphPath = config.subgraph;
absSchemaPath = isAbsolute(subgraphPath) ? String(subgraphPath) : resolve(process.cwd(), subgraphPath);
try {
await lstat(absSchemaPath);
} catch {
throw new Error(`Subgraph schema at ${absSchemaPath} does not exist`);
}
}
if (handleFork(log, config)) {
return;
}
const runtime = createGatewayRuntime(config);
if (absSchemaPath) {
log.info(`Serving local subgraph from ${absSchemaPath}`);
} else if (isUrl(String(config.subgraph))) {
log.info(`Serving remote subgraph from ${config.subgraph}`);
} else {
log.info("Serving subgraph from config");
}
await startServerForRuntime(runtime, {
...config,
log
});
}
const addCommand = (ctx, cli) => cli.command("supergraph").description(
"serve a Federation supergraph provided by a compliant composition tool such as Mesh Compose or Apollo Rover"
).argument(
"[schemaPathOrUrl]",
'path to the composed supergraph schema file or a url from where to pull the supergraph schema (default: "supergraph.graphql")'
).addOption(
new Option(
"--apollo-uplink <uplink>",
"The URL of the managed federation up link. When retrying after a failure, you should cycle through the default up links using this option."
).env("APOLLO_SCHEMA_CONFIG_DELIVERY_ENDPOINT")
).action(async function supergraph(schemaPathOrUrl) {
const {
hiveCdnEndpoint,
hiveCdnKey,
hiveRegistryToken,
hiveUsageTarget,
hiveUsageAccessToken,
maskedErrors,
apolloGraphRef,
apolloKey,
hivePersistedDocumentsEndpoint,
hivePersistedDocumentsToken,
...opts
} = this.optsWithGlobals();
const { apolloUplink } = this.opts();
const loadedConfig = await loadConfig({
log: ctx.log,
configPath: opts.configPath,
quiet: !cluster.isPrimary,
configFileName: ctx.configFileName
});
let supergraph2 = "supergraph.graphql";
if (schemaPathOrUrl) {
ctx.log.info(`Supergraph will be loaded from ${schemaPathOrUrl}`);
if (hiveCdnKey) {
ctx.log.info(`Using Hive CDN key`);
if (!isUrl(schemaPathOrUrl)) {
ctx.log.error(
"Hive CDN endpoint must be a URL when providing --hive-cdn-key but got " + schemaPathOrUrl
);
process.exit(1);
}
supergraph2 = {
type: "hive",
endpoint: schemaPathOrUrl,
key: hiveCdnKey
};
} else if (apolloKey) {
ctx.log.info(`Using GraphOS API key`);
if (!schemaPathOrUrl.includes("@")) {
ctx.log.error(
`Apollo GraphOS requires a graph ref in the format <graph-id>@<graph-variant> when providing --apollo-key. Please provide a valid graph ref not ${schemaPathOrUrl}.`
);
process.exit(1);
}
supergraph2 = {
type: "graphos",
apiKey: apolloKey,
graphRef: schemaPathOrUrl,
...apolloUplink ? { upLink: apolloUplink } : {}
};
} else {
supergraph2 = schemaPathOrUrl;
}
} else if (hiveCdnEndpoint) {
if (!isUrl(hiveCdnEndpoint)) {
ctx.log.error(
`Hive CDN endpoint must be a valid URL but got ${hiveCdnEndpoint}. Please provide a valid URL.`
);
process.exit(1);
}
if (!hiveCdnKey) {
ctx.log.error(
`Hive CDN requires an API key. Please provide an API key using the --hive-cdn-key option.Learn more at https://the-guild.dev/graphql/hive/docs/features/high-availability-cdn#cdn-access-tokens`
);
process.exit(1);
}
ctx.log.info(`Using Hive CDN endpoint: ${hiveCdnEndpoint}`);
supergraph2 = {
type: "hive",
endpoint: hiveCdnEndpoint,
key: hiveCdnKey
};
} else if (apolloGraphRef) {
if (!apolloGraphRef.includes("@")) {
ctx.log.error(
`Apollo GraphOS requires a graph ref in the format <graph-id>@<graph-variant>. Please provide a valid graph ref not ${apolloGraphRef}.`
);
process.exit(1);
}
if (!apolloKey) {
ctx.log.error(
`Apollo GraphOS requires an API key. Please provide an API key using the --apollo-key option.`
);
process.exit(1);
}
ctx.log.info(`Using Apollo Graph Ref: ${apolloGraphRef}`);
supergraph2 = {
type: "graphos",
apiKey: apolloKey,
graphRef: apolloGraphRef,
upLink: apolloUplink
};
} else if ("supergraph" in loadedConfig) {
supergraph2 = loadedConfig.supergraph;
} else {
ctx.log.info(`Using default supergraph location: ${supergraph2}`);
}
const registryConfig = {};
const reporting = handleReportingConfig(ctx, loadedConfig, {
hiveRegistryToken,
hiveUsageTarget,
hiveUsageAccessToken,
apolloGraphRef: apolloGraphRef || schemaPathOrUrl,
apolloKey
});
if (reporting) {
registryConfig.reporting = reporting;
}
const pubsub = loadedConfig.pubsub || new PubSub();
const cwd = loadedConfig.cwd || process.cwd();
if (loadedConfig.logging != null) {
handleLoggingConfig(loadedConfig.logging, ctx);
}
const cache = await getCacheInstanceFromConfig(loadedConfig, {
pubsub,
logger: ctx.log,
cwd
});
const builtinPlugins = await getBuiltinPluginsFromConfig(
{
...loadedConfig,
...opts
},
{
logger: ctx.log,
cache}
);
const config = {
...defaultOptions,
...loadedConfig,
...opts,
pollingInterval: opts.polling || ("pollingInterval" in loadedConfig ? loadedConfig.pollingInterval : void 0) || defaultOptions.pollingInterval,
...registryConfig,
supergraph: supergraph2,
logging: ctx.log,
productName: ctx.productName,
productDescription: ctx.productDescription,
productPackageName: ctx.productPackageName,
productLink: ctx.productLink,
productLogo: ctx.productLogo,
pubsub,
cache,
plugins(ctx2) {
const userPlugins = loadedConfig.plugins?.(ctx2) ?? [];
return [...builtinPlugins, ...userPlugins];
}
};
if (hivePersistedDocumentsEndpoint) {
const token = hivePersistedDocumentsToken || loadedConfig.persistedDocuments && "token" in loadedConfig.persistedDocuments && loadedConfig.persistedDocuments.token;
if (!token) {
ctx.log.error(
`Hive persisted documents needs a CDN token. Please provide it through the "--hive-persisted-documents-token <token>" option or the config.`
);
process.exit(1);
}
config.persistedDocuments = {
...loadedConfig.persistedDocuments,
type: "hive",
endpoint: hivePersistedDocumentsEndpoint,
token
};
}
if (maskedErrors != null) {
config.maskedErrors = maskedErrors;
}
if (typeof config.pollingInterval === "number" && config.pollingInterval < 1e4) {
process.stderr.write(
`error: polling interval duration too short ${config.pollingInterval}, use at least 10 seconds
`
);
process.exit(1);
}
return runSupergraph(ctx, config);
}).allowUnknownOption(process.env.NODE_ENV === "test").allowExcessArguments(process.env.NODE_ENV === "test");
async function runSupergraph({ log }, config) {
let absSchemaPath = null;
if (typeof config.supergraph === "string" && isValidPath(config.supergraph) && !isUrl(config.supergraph)) {
const supergraphPath = config.supergraph;
absSchemaPath = isAbsolute(supergraphPath) ? String(supergraphPath) : resolve(process.cwd(), supergraphPath);
log.info(`Reading supergraph from ${absSchemaPath}`);
try {
await lstat(absSchemaPath);
} catch {
log.error(
`Could not read supergraph from ${absSchemaPath}. Make sure the file exists.`
);
process.exit(1);
}
}
if (absSchemaPath) {
delete config.pollingInterval;
if (cluster.isPrimary) {
let watcher;
try {
watcher = await import('@parcel/watcher');
} catch (e) {
if (Object(e).code !== "MODULE_NOT_FOUND") {
log.debug("Problem while importing @parcel/watcher", e);
}
log.warn(
`If you want to enable hot reloading when ${absSchemaPath} changes, make sure "@parcel/watcher" is available`
);
}
if (watcher) {
try {
log.info(`Watching ${absSchemaPath} for changes`);
const absSupergraphDirname = dirname(absSchemaPath);
const subscription = await watcher.subscribe(
absSupergraphDirname,
(err, events) => {
if (err) {
log.error(err);
return;
}
if (events.some(
(event) => event.path === absSchemaPath && event.type === "update"
)) {
log.info(
`${absSchemaPath} changed. Invalidating supergraph...`
);
if (config.fork && config.fork > 1) {
for (const workerId in cluster.workers) {
cluster.workers[workerId].send("invalidateUnifiedGraph");
}
} else {
runtime.invalidateUnifiedGraph();
}
}
}
);
registerTerminateHandler((signal) => {
log.info(`Closing watcher for ${absSchemaPath} on ${signal}`);
return subscription.unsubscribe().catch((err) => {
log.error(`Failed to close watcher for ${absSchemaPath}!`, err);
});
});
} catch (err) {
log.error(`Failed to watch ${absSchemaPath}!`);
throw err;
}
}
}
}
if (handleFork(log, config)) {
return;
}
if (config.additionalTypeDefs) {
const loaders = [new GraphQLFileLoader(), new CodeFileLoader()];
const additionalTypeDefsArr = asArray(config.additionalTypeDefs);
config.additionalTypeDefs = await Promise.all(
additionalTypeDefsArr.flatMap(async (ptr) => {
if (typeof ptr === "string" && ptr.length <= 255 && isValidPath(ptr)) {
const sources = await loadTypedefs(ptr, {
loaders
});
return sources.map((source) => {
const typeSource = source.document || source.rawSDL || source.schema;
if (!typeSource) {
throw new Error(`Invalid source ${source.location || ptr}`);
}
return typeSource;
});
}
return ptr;
})
);
}
const runtime = createGatewayRuntime(config);
if (absSchemaPath) {
log.info(`Serving local supergraph from ${absSchemaPath}`);
} else if (isUrl(String(config.supergraph))) {
log.info(`Serving remote supergraph from ${config.supergraph}`);
} else if (typeof config.supergraph === "object" && "type" in config.supergraph && config.supergraph.type === "hive") {
log.info(
`Serving supergraph from Hive CDN at ${config.supergraph.endpoint}`
);
} else {
log.info("Serving supergraph from config");
}
await startServerForRuntime(runtime, {
...config,
log
});
}
const addCommands = (ctx, cli) => {
addCommand(ctx, cli);
addCommand$1(ctx, cli);
addCommand$2(ctx, cli);
};
function getFreeMemInGb() {
return freemem() / 1024 ** 2;
}
function getMaxConcurrencyPerMem() {
return parseInt(String(getFreeMemInGb()));
}
function getMaxConcurrencyPerCpu() {
return availableParallelism();
}
function getMaxConcurrency() {
const result = Math.min(getMaxConcurrencyPerMem(), getMaxConcurrencyPerCpu());
if (result < 1) {
return 1;
}
return result;
}
function defineConfig(config) {
return config;
}
const maxFork = getMaxConcurrency();
const defaultOptions = {
fork: process.env["NODE_ENV"] === "production" ? maxFork : 1,
host: platform().toLowerCase() === "win32" || // is WSL?
release().toLowerCase().includes("microsoft") ? "127.0.0.1" : "0.0.0.0",
port: 4e3,
pollingInterval: 1e4
};
let cli = new Command().configureHelp({
// will print help of global options for each command
showGlobalOptions: true
}).addOption(
new Option(
"--fork <count>",
`count of workers to spawn. uses "${maxFork}" (available parallelism) workers when NODE_ENV is "production", otherwise "1" (the main) worker (default: ${defaultOptions.fork})`
).env("FORK").argParser((v) => {
const count = parseInt(v);
if (isNaN(count)) {
throw new InvalidArgumentError("not a number.");
}
if (count > maxFork) {
throw new InvalidArgumentError(
`exceedes number of available parallelism "${maxFork}".`
);
}
return count;
})
).addOption(
new Option(
"-c, --config-path <path>",
`path to the configuration file. defaults to the following files respectively in the current working directory: ${createDefaultConfigPaths("gateway").join(", ")}`
).env("CONFIG_PATH")
).addOption(
new Option(
"-h, --host <hostname>",
`host to use for serving (default: ${defaultOptions.host})`
)
).addOption(
new Option(
"-p, --port <number>",
`port to use for serving (default: ${defaultOptions.port})`
).env("PORT").argParser((v) => {
const port = parseInt(v);
if (isNaN(port)) {
throw new InvalidArgumentError("not a number.");
}
return port;
})
).addOption(
new Option(
"--polling <duration>",
`schema polling interval in human readable duration (default: 10s)`
).env("POLLING").argParser((v) => {
const interval = parse(v);
if (!interval) {
throw new InvalidArgumentError("not a duration.");
}
return interval;
})
).option("--no-masked-errors", "don't mask unexpected errors in responses").option(
"--masked-errors",
"mask unexpected errors in responses (default: true)",
// we use "null" intentionally so that we know when the user provided the flag vs when not
// see here https://github.com/tj/commander.js/blob/970ecae402b253de691e6a9066fea22f38fe7431/lib/command.js#L655
// @ts-expect-error
null
).addOption(
new Option(
"--hive-registry-token <token>",
'[DEPRECATED: please use "--hive-usage-target" and "--hive-usage-access-token"] Hive registry token for usage metrics reporting'
).env("HIVE_REGISTRY_TOKEN")
).addOption(
new Option(
"--hive-usage-target <target>",
'Hive registry target to which the usage data should be reported to. requires the "--hive-usage-access-token <token>" option'
).env("HIVE_USAGE_TARGET")
).addOption(
new Option(
"--hive-usage-access-token <token>",
'Hive registry access token for usage metrics reporting. requires the "--hive-usage-target <target>" option'
).env("HIVE_USAGE_ACCESS_TOKEN")
).option(
"--hive-persisted-documents-endpoint <endpoint>",
'[EXPERIMENTAL] Hive CDN endpoint for fetching the persisted documents. requires the "--hive-persisted-documents-token <token>" option'
).option(
"--hive-persisted-documents-token <token>",
'[EXPERIMENTAL] Hive persisted documents CDN endpoint token. requires the "--hive-persisted-documents-endpoint <endpoint>" option'
).addOption(
new Option(
"--hive-cdn-endpoint <endpoint>",
"Hive CDN endpoint for fetching the schema"
).env("HIVE_CDN_ENDPOINT")
).addOption(
new Option(
"--hive-cdn-key <key>",
'Hive CDN API key for fetching the schema. implies that the "schemaPathOrUrl" argument is a url'
).env("HIVE_CDN_KEY")
).addOption(
new Option(
"--apollo-graph-ref <graphRef>",
"Apollo graph ref of the managed federation graph (<YOUR_GRAPH_ID>@<VARIANT>)"
).env("APOLLO_GRAPH_REF")
).addOption(
new Option(
"--apollo-key <apiKey>",
"Apollo API key to use to authenticate with the managed federation up link"
).env("APOLLO_KEY")
).option("--disable-websockets", "Disable WebSockets support").addOption(
new Option(
"--jit",
"Enable Just-In-Time compilation of GraphQL documents"
).env("JIT")
);
async function run(userCtx) {
const ctx = {
log: userCtx.log || getDefaultLogger(),
productName: "Hive Gateway",
productDescription: "Federated GraphQL Gateway",
productPackageName: "@graphql-hive/gateway",
productLink: "https://the-guild.dev/graphql/hive/docs/gateway",
binName: "hive-gateway",
configFileName: "gateway.config",
version: globalThis.__VERSION__ || "dev",
...userCtx
};
const { binName, productDescription, version } = ctx;
cli = cli.name(binName).description(productDescription).version(version);
if (cluster.worker?.id) {
ctx.log = ctx.log.child({ worker: cluster.worker.id });
}
addCommands(ctx, cli);
return cli.parseAsync();
}
function handleNodeWarnings() {
const originalProcessEmitWarning