@graphql-hive/gateway
Version:
1,482 lines (1,458 loc) • 52.5 kB
JavaScript
import cluster from 'node:cluster';
import module from 'node:module';
import { availableParallelism, freemem, platform, release } from 'node:os';
import { join, isAbsolute, resolve } from 'node:path';
import { Option, Command, InvalidArgumentError } from '@commander-js/extra-typings';
import { LegacyLogger, Logger } from '@graphql-hive/logger';
import { renderGraphiQL } from '@graphql-yoga/render-graphiql';
import { getGraphQLWSOptions, createLoggerFromLogging, createGatewayRuntime } from '@graphql-hive/gateway-runtime';
import { MemPubSub } from '@graphql-hive/pubsub';
import { registerTerminateHandler, isUrl } from '@graphql-mesh/utils';
import { lstat, watch } from 'node:fs/promises';
import { pathToFileURL } from 'node:url';
import { promises } from 'node:fs';
import { createServer as createServer$1 } from 'node:http';
import { createServer } from 'node:https';
import { fakePromise, isValidPath, asArray } from '@graphql-tools/utils';
import { BatchSpanProcessor } from '@opentelemetry/sdk-trace-base';
import { CodeFileLoader } from '@graphql-tools/code-file-loader';
import { GraphQLFileLoader } from '@graphql-tools/graphql-file-loader';
import { loadTypedefs } from '@graphql-tools/load';
function getEnvStr(key, opts = {}) {
const globalThat = opts.globalThis ?? globalThis;
let variable = globalThat.process?.env?.[key] || // @ts-expect-error can exist in wrangler and maybe other runtimes
globalThat.env?.[key] || // @ts-expect-error can exist in deno
globalThat.Deno?.env?.get(key) || // @ts-expect-error could be
globalThat[key];
if (variable != null) {
variable += "";
} else {
variable = void 0;
}
return variable?.trim();
}
function getEnvBool(key, opts = {}) {
return strToBool(getEnvStr(key, opts));
}
function getNodeEnv(opts = {}) {
return getEnvStr("NODE_ENV", opts);
}
function strToBool(str) {
return ["1", "t", "true", "y", "yes", "on", "enabled"].includes(
(str || "").toLowerCase()
);
}
function isDebug() {
return getEnvBool("DEBUG");
}
function isNode() {
return typeof process !== "undefined" && process.versions && process.versions.node && typeof Bun === "undefined";
}
const unit = Object.create(null);
const m = 60000, h = m * 60, d = h * 24, y = d * 365.25;
unit.year = unit.yr = unit.y = y;
unit.month = unit.mo = unit.mth = y / 12;
unit.week = unit.wk = unit.w = d * 7;
unit.day = unit.d = d;
unit.hour = unit.hr = unit.h = h;
unit.minute = unit.min = unit.m = m;
unit.second = unit.sec = unit.s = 1000;
unit.millisecond = unit.millisec = unit.ms = 1;
unit.microsecond = unit.microsec = unit.us = unit.µs = 1e-3;
unit.nanosecond = unit.nanosec = unit.ns = 1e-6;
unit.group = ',';
unit.decimal = '.';
unit.placeholder = ' _';
const durationRE = /((?:\d{1,16}(?:\.\d{1,16})?|\.\d{1,16})(?:[eE][-+]?\d{1,4})?)\s?([\p{L}]{0,14})/gu;
parse.unit = unit;
/**
* convert `str` to ms
*
* @param {string} str
* @param {string} format
* @return {number|null}
*/
function parse(str = '', format = 'ms') {
let result = null, prevUnits;
String(str)
.replace(new RegExp(`(\\d)[${parse.unit.placeholder}${parse.unit.group}](\\d)`, 'g'), '$1$2') // clean up group separators / placeholders
.replace(parse.unit.decimal, '.') // normalize decimal separator
.replace(durationRE, (_, n, units) => {
// if no units, find next smallest units or fall back to format value
// eg. 1h30 -> 1h30m
if (!units) {
if (prevUnits) {
for (const u in parse.unit) if (parse.unit[u] < prevUnits) { units = u; break }
}
else units = format;
}
else units = units.toLowerCase();
prevUnits = units = parse.unit[units] || parse.unit[units.replace(/s$/, '')];
if (units) result = (result || 0) + n * units;
});
return result && ((result / (parse.unit[format] || 1)) * (str[0] === '-' ? -1 : 1))
}
const defaultConfigExtensions = [
".ts",
".mts",
".cts",
".js",
".mjs",
".cjs"
];
const defaultConfigFileName = "gateway.config";
function createDefaultConfigPaths(configFileName) {
return defaultConfigExtensions.map((ext) => `${configFileName}${ext}`);
}
async function loadConfig(opts) {
let importedConfig = null;
if (!opts.configPath) {
!opts.quiet && opts.log.debug(`Searching for default config files`);
const configPaths = [
...createDefaultConfigPaths(defaultConfigFileName),
...createDefaultConfigPaths(opts.configFileName),
// For backwards compatibility of Mesh Compose users
...createDefaultConfigPaths("mesh.config")
];
for (const configPath of configPaths) {
const absoluteConfigPath = join(process.cwd(), configPath);
const exists = await lstat(absoluteConfigPath).then(() => true).catch(() => false);
if (exists) {
!opts.quiet && opts.log.info(`Found default config file ${absoluteConfigPath}`);
const importUrl = pathToFileURL(absoluteConfigPath).toString();
const module = await import(importUrl);
importedConfig = Object(module).gatewayConfig || null;
if (!importedConfig && !configPath.includes("mesh.config")) {
!opts.quiet && opts.log.warn(
`No "gatewayConfig" exported from config file at ${absoluteConfigPath}`
);
}
break;
}
}
} else {
const configPath = isAbsolute(opts.configPath) ? opts.configPath : join(process.cwd(), opts.configPath);
!opts.quiet && opts.log.info(`Loading config file at path ${configPath}`);
const exists = await lstat(configPath).then(() => true).catch(() => false);
if (!exists) {
throw new Error(`Cannot find config file at ${configPath}`);
}
const importUrl = pathToFileURL(configPath).toString();
const module = await import(importUrl);
importedConfig = Object(module).gatewayConfig || null;
if (!importedConfig) {
throw new Error(
`No "gatewayConfig" exported from config file at ${configPath}`
);
}
}
if (importedConfig) {
!opts.quiet && opts.log.info("Loaded config");
} else {
!opts.quiet && opts.log.debug("No config loaded");
}
return importedConfig || {};
}
async function getBuiltinPluginsFromConfig(config, ctx) {
const plugins = [];
if (config.jwt) {
const { useJWT } = await import('@graphql-mesh/plugin-jwt-auth');
plugins.push(useJWT(config.jwt));
}
if (config.prometheus) {
const { default: useMeshPrometheus } = await import('@graphql-mesh/plugin-prometheus');
plugins.push(useMeshPrometheus(config.prometheus));
}
if (config.openTelemetry) {
const { useOpenTelemetry } = await import('@graphql-hive/plugin-opentelemetry');
plugins.push(useOpenTelemetry({ ...config.openTelemetry, log: ctx.log }));
}
if (config.rateLimiting) {
const { default: useMeshRateLimit } = await import('@graphql-mesh/plugin-rate-limit');
plugins.push(
useMeshRateLimit({
config: Array.isArray(config.rateLimiting) ? config.rateLimiting : typeof config.rateLimiting === "object" ? config.rateLimiting.config : [],
cache: ctx.cache
})
);
}
if (config.jit) {
const { useJIT } = await import('@graphql-mesh/plugin-jit');
plugins.push(useJIT());
}
if (config.awsSigv4) {
const { useAWSSigv4 } = await import('@graphql-hive/plugin-aws-sigv4');
plugins.push(useAWSSigv4(config.awsSigv4));
}
if (config.maxTokens) {
const { maxTokensPlugin: useMaxTokens } = await import('@escape.tech/graphql-armor-max-tokens');
const maxTokensPlugin = useMaxTokens({
n: typeof config.maxTokens === "number" ? config.maxTokens : 1e3
});
plugins.push(
// @ts-expect-error the armor plugin does not inherit the context
maxTokensPlugin
);
}
if (config.maxDepth) {
const { maxDepthPlugin: useMaxDepth } = await import('@escape.tech/graphql-armor-max-depth');
const maxDepthPlugin = useMaxDepth({
n: typeof config.maxDepth === "number" ? config.maxDepth : 6
});
plugins.push(
// @ts-expect-error the armor plugin does not inherit the context
maxDepthPlugin
);
}
if (config.blockFieldSuggestions) {
const { blockFieldSuggestionsPlugin: useBlockFieldSuggestions } = await import('@escape.tech/graphql-armor-block-field-suggestions');
const blockFieldSuggestionsPlugin = useBlockFieldSuggestions();
plugins.push(
// @ts-expect-error the armor plugin does not inherit the context
blockFieldSuggestionsPlugin
);
}
return plugins;
}
async function getCacheInstanceFromConfig(config, ctx) {
if (typeof config.cache === "function") {
return config.cache(ctx);
}
if (config.cache && "type" in config.cache) {
switch (config.cache.type) {
case "redis": {
const { default: RedisCache } = await import('@graphql-mesh/cache-redis');
return new RedisCache({
...ctx,
...config.cache,
// TODO: use new logger
logger: LegacyLogger.from(ctx.log)
});
}
case "cfw-kv": {
const { default: CloudflareKVCacheStorage } = await import('@graphql-mesh/cache-cfw-kv');
return new CloudflareKVCacheStorage({
...ctx,
...config.cache
});
}
case "upstash-redis": {
const { default: UpstashRedisCache } = await import('@graphql-mesh/cache-upstash-redis');
return new UpstashRedisCache({
...ctx,
...config.cache
});
}
}
if (config.cache.type !== "localforage") {
ctx.log.warn(
"Unknown cache type, falling back to localforage",
config.cache
);
}
const { default: LocalforageCache2 } = await import('@graphql-mesh/cache-localforage');
return new LocalforageCache2({
...ctx,
...config.cache
});
}
if (config.cache) {
return config.cache;
}
const { default: LocalforageCache } = await import('@graphql-mesh/cache-localforage');
return new LocalforageCache(ctx);
}
async function startBunServer(gwRuntime, opts) {
const serverOptions = {
fetch: gwRuntime,
port: opts.port || defaultOptions.port,
hostname: opts.host || defaultOptions.host,
reusePort: true,
idleTimeout: opts.requestTimeout
};
if (opts.sslCredentials) {
if (opts.sslCredentials.ca_file_name) {
serverOptions.ca = Bun.file(opts.sslCredentials.ca_file_name);
}
if (opts.sslCredentials.cert_file_name) {
serverOptions.cert = Bun.file(opts.sslCredentials.cert_file_name);
}
if (opts.sslCredentials.dh_params_file_name) {
serverOptions.dhParamsFile = opts.sslCredentials.dh_params_file_name;
}
if (opts.sslCredentials.key_file_name) {
serverOptions.key = Bun.file(opts.sslCredentials.key_file_name);
}
if (opts.sslCredentials.passphrase) {
serverOptions.passphrase = opts.sslCredentials.passphrase;
}
if (opts.sslCredentials.ssl_ciphers) ;
if (opts.sslCredentials.ssl_prefer_low_memory_usage) {
serverOptions.lowMemoryMode = opts.sslCredentials.ssl_prefer_low_memory_usage;
}
}
if (!opts.disableWebsockets) {
const { makeHandler } = await import('graphql-ws/use/bun');
serverOptions.websocket = makeHandler(
getGraphQLWSOptions(gwRuntime, (ctx) => ({
socket: ctx.extra.socket,
...ctx.extra.socket.data || {}
}))
);
serverOptions.fetch = function(request, server2) {
if (request.headers.has("Sec-WebSocket-Key") && server2.upgrade(request, {
data: {
request
}
})) {
return void 0;
}
return gwRuntime.handleRequest(request, server2);
};
}
const server = Bun.serve(serverOptions);
opts.log.info(`Listening on ${server.url}`);
gwRuntime.disposableStack.use(server);
}
async function startNodeHttpServer(gwRuntime, opts) {
const {
log,
host = defaultOptions.host,
port = defaultOptions.port,
sslCredentials,
maxHeaderSize,
disableWebsockets,
requestTimeout
} = opts;
let server;
let protocol;
if (sslCredentials) {
protocol = "https";
const sslOptionsForNodeHttp = {};
if (sslCredentials.ca_file_name) {
sslOptionsForNodeHttp.ca = await promises.readFile(
sslCredentials.ca_file_name
);
}
if (sslCredentials.cert_file_name) {
sslOptionsForNodeHttp.cert = await promises.readFile(
sslCredentials.cert_file_name
);
}
if (sslCredentials.dh_params_file_name) {
sslOptionsForNodeHttp.dhparam = await promises.readFile(
sslCredentials.dh_params_file_name
);
}
if (sslCredentials.key_file_name) {
sslOptionsForNodeHttp.key = await promises.readFile(
sslCredentials.key_file_name
);
}
if (sslCredentials.passphrase) {
sslOptionsForNodeHttp.passphrase = sslCredentials.passphrase;
}
if (sslCredentials.ssl_ciphers) {
sslOptionsForNodeHttp.ciphers = sslCredentials.ssl_ciphers;
}
if (sslCredentials.ssl_prefer_low_memory_usage) {
sslOptionsForNodeHttp.honorCipherOrder = true;
}
server = createServer(
{
...sslOptionsForNodeHttp,
maxHeaderSize,
requestTimeout
},
gwRuntime
);
} else {
protocol = "http";
server = createServer$1(
{
maxHeaderSize,
requestTimeout
},
gwRuntime
);
}
const url = `${protocol}://${host}:${port}`.replace("0.0.0.0", "localhost");
log.debug(`Starting server on ${url}`);
if (!disableWebsockets) {
log.debug("Setting up WebSocket server");
const { WebSocketServer } = await import('ws');
const wsServer = new WebSocketServer({
path: gwRuntime.graphqlEndpoint,
server
});
const { useServer } = await import('graphql-ws/use/ws');
useServer(
getGraphQLWSOptions(gwRuntime, (ctx) => ({
req: ctx.extra?.request,
socket: ctx.extra?.socket
})),
wsServer
);
gwRuntime.disposableStack.defer(
() => new Promise((resolve, reject) => {
log.info("Stopping the WebSocket server");
wsServer.close((err) => {
if (err) {
return reject(err);
}
log.info("Stopped the WebSocket server successfully");
return resolve();
});
})
);
}
return new Promise((resolve, reject) => {
server.once("error", reject);
server.listen(port, host, () => {
log.info(`Listening on ${url}`);
gwRuntime.disposableStack.defer(
() => new Promise((resolve2) => {
process.stderr.write("\n");
log.info("Stopping the server");
server.closeAllConnections();
server.close(() => {
log.info("Stopped the server successfully");
return resolve2();
});
})
);
return resolve();
});
});
}
function startServerForRuntime(runtime, {
log,
host = defaultOptions.host,
port = defaultOptions.port,
sslCredentials,
maxHeaderSize = 16384,
disableWebsockets = false
}) {
process.on("message", (message) => {
if (message === "invalidateUnifiedGraph") {
log.info("Invalidating Supergraph");
runtime.invalidateUnifiedGraph();
}
});
const serverOpts = {
log,
host,
port,
maxHeaderSize,
disableWebsockets,
...sslCredentials ? { sslCredentials } : {}
};
const startServer = globalThis.Bun ? startBunServer : startNodeHttpServer;
return startServer(runtime, serverOpts);
}
function handleFork(log, config) {
try {
if (cluster.isPrimary && config.fork && config.fork > 1) {
const workers = /* @__PURE__ */ new Set();
let expectedToExit = false;
log.debug(`Forking ${config.fork} workers`);
for (let i = 0; i < config.fork; i++) {
const worker = cluster.fork();
const workerLogger = log.child({ worker: worker.id });
worker.once("exit", (code, signal) => {
const logData = {
signal
};
if (code != null) {
logData["code"] = code;
}
if (expectedToExit) {
workerLogger.debug(logData, "exited");
} else {
workerLogger.error(
logData,
"Exited unexpectedly. A restart is recommended to ensure the stability of the service"
);
}
workers.delete(worker);
if (!expectedToExit && workers.size === 0) {
log.error(logData, "All workers exited unexpectedly. Exiting...");
process.exit(1);
}
});
workers.add(worker);
}
registerTerminateHandler((signal) => {
log.info(`Killing workers on ${signal}`);
expectedToExit = true;
workers.forEach((w) => {
w.kill(signal);
});
});
return true;
}
} catch (e) {
log.error(
// @ts-expect-error very likely an instanceof error
e,
"Error while forking workers"
);
}
return false;
}
async function handleOpenTelemetryCLIOpts(ctx, cliOpts) {
const accessToken = cliOpts.hiveTraceAccessToken;
const traceEndpoint = cliOpts.hiveTraceEndpoint;
const target = cliOpts.hiveTarget;
const openTelemetry = cliOpts.openTelemetry;
const exporterType = cliOpts.openTelemetryExporterType ?? "otlp-http";
const log = ctx.log.child("[OpenTelemetry] ");
if (openTelemetry || accessToken) {
log.debug(
{ openTelemetry, exporterType, target, traceEndpoint },
"Initializing OpenTelemetry SDK"
);
return fakePromise().then(async () => {
const { openTelemetrySetup, HiveTracingSpanProcessor } = await import('@graphql-hive/plugin-opentelemetry/setup');
const processors = [];
const logAttributes = {
traceEndpoints: [],
contextManager: false
};
let integrationName;
if (openTelemetry) {
const otelEndpoint = typeof openTelemetry === "string" ? openTelemetry : getEnvStr("OTEL_EXPORTER_OTLP_ENDPOINT");
log.debug({ exporterType, otelEndpoint }, "Setting up OTLP Exporter");
integrationName = "OpenTelemetry";
logAttributes.traceEndpoints.push({
url: otelEndpoint ?? null,
type: exporterType
});
log.debug({ type: exporterType }, "Loading OpenTelemetry exporter");
const { OTLPTraceExporter } = await import(`@opentelemetry/exporter-trace-${exporterType}`);
processors.push(
new BatchSpanProcessor(new OTLPTraceExporter({ url: otelEndpoint }))
);
}
if (accessToken) {
log.debug({ target, traceEndpoint }, "Setting up Hive Tracing");
integrationName ??= "Hive Tracing";
if (!target) {
ctx.log.error(
'Hive tracing needs a target. Please provide it through "--hive-target <target>"'
);
process.exit(1);
}
logAttributes.traceEndpoints.push({
url: traceEndpoint,
type: "hive tracing",
target
});
processors.push(
new HiveTracingSpanProcessor({
accessToken,
target,
endpoint: traceEndpoint
})
);
}
log.debug("Trying to load AsyncLocalStorage based Context Manager");
const contextManager = await import('@opentelemetry/context-async-hooks').then((module) => {
logAttributes.contextManager = true;
return new module.AsyncLocalStorageContextManager();
}).catch(() => null);
openTelemetrySetup({
log,
traces: { processors },
resource: await detectResource().catch((err) => {
if (err && typeof err === "object" && "code" in err && err.code === "ERR_MODULE_NOT_FOUND") {
ctx.log.warn(
err,
`NodeJS modules necessary for environment detection is missing, please install it to auto-detect the environment`
);
return void 0;
}
throw err;
}),
contextManager
});
log.info(logAttributes, `${integrationName} integration is enabled`);
return true;
});
}
return false;
}
async function detectResource() {
if (isNode()) {
const autoInstrumentationsNodeName = "@opentelemetry/auto-instrumentations-node";
const { getResourceDetectors } = await import(autoInstrumentationsNodeName);
const resourcesName = "@opentelemetry/resources";
const { detectResources } = await import(resourcesName);
return detectResources({ detectors: getResourceDetectors() });
}
return void 0;
}
function handleReportingConfig(ctx, loadedConfig, cliOpts) {
const confOpts = {
...loadedConfig.reporting?.type === "hive" ? {
hiveRegistryToken: loadedConfig.reporting.token,
hiveTarget: loadedConfig.reporting.target,
hiveUsageAccessToken: loadedConfig.reporting.token
} : {},
...loadedConfig.reporting?.type === "graphos" ? {
apolloGraphRef: loadedConfig.reporting.graphRef,
apolloKey: loadedConfig.reporting.apiKey
} : {}
};
const opts = {
...confOpts,
...Object.entries(cliOpts).reduce((acc, [key, val]) => {
if (val != null) {
return { ...acc, [key]: val };
}
return acc;
}, {}),
hiveTarget: (
// cli arguments always take precedence over config
confOpts.hiveTarget ?? cliOpts.hiveTarget ?? cliOpts.hiveUsageTarget
)
};
if (cliOpts.hiveRegistryToken && cliOpts.hiveUsageAccessToken) {
ctx.log.error(
'Cannot use "--hive-registry-token" with "--hive-usage-access-token". Please use "--hive-usage-target" and "--hive-usage-access-token" or the config instead.'
);
process.exit(1);
}
if (cliOpts.hiveUsageTarget && cliOpts.hiveTarget) {
ctx.log.error(
'Cannot use "--hive-usage-target" with "--hive-target". Please only use "--hive-target"'
);
process.exit(1);
}
if (cliOpts.hiveRegistryToken && opts.hiveTarget) {
ctx.log.error(
'Cannot use "--hive-registry-token" with a target. Please use "--hive-usage-target" and "--hive-usage-access-token" or the config instead.'
);
process.exit(1);
}
if (opts.hiveTarget && !opts.hiveAccessToken && !opts.hiveUsageAccessToken && !opts.hiveTraceAccessToken) {
ctx.log.error(
'Hive usage target needs an access token. Please provide it through "--hive-access-token <token>", or specific "--hive-usage-access-token <token>" and "--hive-trace-access-token" options, or the config.'
);
process.exit(1);
}
if (!opts.hiveRegistryToken && (opts.hiveAccessToken || opts.hiveUsageAccessToken || opts.hiveTraceAccessToken) && !opts.hiveTarget) {
ctx.log.error(
'Hive access token needs a target. Please provide it through the "--hive-target <target>" option or the config.'
);
process.exit(1);
}
const hiveUsageAccessToken = opts.hiveAccessToken || opts.hiveUsageAccessToken || opts.hiveRegistryToken;
if (hiveUsageAccessToken) {
if (opts.hiveUsageTarget) {
ctx.log.info("Configuring Hive usage reporting");
} else {
ctx.log.info("Configuring Hive registry reporting");
}
return {
...loadedConfig.reporting,
type: "hive",
token: hiveUsageAccessToken,
target: opts.hiveTarget
};
}
if (opts.apolloKey) {
ctx.log.info("Configuring Apollo GraphOS registry reporting");
if (!opts.apolloGraphRef?.includes("@")) {
ctx.log.error(
`Apollo GraphOS requires a graph ref in the format <graph-id>@<graph-variant>. Please provide a valid graph ref ${opts.apolloGraphRef ? `not ${opts.apolloGraphRef}` : ""}.`
);
process.exit(1);
}
return {
...loadedConfig.reporting,
type: "graphos",
apiKey: opts.apolloKey,
graphRef: opts.apolloGraphRef
};
}
return null;
}
const addCommand$2 = (ctx, cli) => cli.command("proxy").description(
"serve a proxy to a GraphQL API and add additional features such as monitoring/tracing, caching, rate limiting, security, and more"
).argument("[endpoint]", "URL of the endpoint GraphQL API to proxy").option(
"--schema <schemaPathOrUrl>",
"path to the GraphQL schema file or a url from where to pull the schema"
).action(async function proxy(endpoint) {
const {
opentelemetry,
opentelemetryExporterType,
hiveCdnEndpoint,
hiveCdnKey,
hiveRegistryToken,
hiveTarget,
hiveUsageTarget,
hiveAccessToken,
hiveUsageAccessToken,
hiveTraceAccessToken,
hiveTraceEndpoint,
maskedErrors,
hivePersistedDocumentsEndpoint,
hivePersistedDocumentsToken,
...opts
} = this.optsWithGlobals();
ctx.log.info(`Starting ${ctx.productName} ${ctx.version} in proxy mode`);
const openTelemetryEnabledByCLI = await handleOpenTelemetryCLIOpts(ctx, {
openTelemetry: opentelemetry,
openTelemetryExporterType: opentelemetryExporterType,
hiveTarget,
hiveTraceAccessToken,
hiveTraceEndpoint
});
const loadedConfig = await loadConfig({
log: ctx.log,
configPath: opts.configPath,
quiet: !cluster.isPrimary,
configFileName: ctx.configFileName
});
let proxy2;
if (endpoint) {
proxy2 = { endpoint };
} else if ("proxy" in loadedConfig) {
proxy2 = loadedConfig.proxy;
}
if (!proxy2) {
ctx.log.error(
"Proxy endpoint not defined. Please provide it in the [endpoint] argument or in the config file."
);
process.exit(1);
}
let schema;
const hiveCdnEndpointOpt = (
// TODO: take schema from optsWithGlobals once https://github.com/commander-js/extra-typings/pull/76 is merged
this.opts().schema || hiveCdnEndpoint
);
if (hiveCdnEndpointOpt) {
if (hiveCdnKey) {
if (!isUrl(hiveCdnEndpointOpt)) {
ctx.log.error(
"Endpoint must be a URL when providing --hive-cdn-key but got " + hiveCdnEndpointOpt
);
process.exit(1);
}
schema = {
type: "hive",
endpoint: hiveCdnEndpointOpt,
// see validation above
key: hiveCdnKey
};
} else {
schema = this.opts().schema;
}
} else if ("schema" in loadedConfig) {
schema = loadedConfig.schema;
}
if (hiveCdnKey && !schema) {
process.stderr.write(
`error: option '--schema <schemaPathOrUrl>' is required when providing '--hive-cdn-key <key>'
`
);
process.exit(1);
}
const registryConfig = {};
const reporting = handleReportingConfig(ctx, loadedConfig, {
hiveRegistryToken,
hiveTarget,
hiveUsageTarget,
hiveAccessToken,
hiveUsageAccessToken,
hiveTraceAccessToken,
// proxy can only do reporting to hive registry
apolloGraphRef: void 0,
apolloKey: void 0
});
if (reporting) {
registryConfig.reporting = reporting;
}
const pubsub = loadedConfig.pubsub || new MemPubSub();
const cwd = loadedConfig.cwd || process.cwd();
if (loadedConfig.logging != null) {
ctx.log = createLoggerFromLogging(loadedConfig.logging);
}
const cache = await getCacheInstanceFromConfig(loadedConfig, {
pubsub,
log: ctx.log,
cwd
});
const builtinPlugins = await getBuiltinPluginsFromConfig(
{
...loadedConfig,
...opts,
openTelemetry: openTelemetryEnabledByCLI ? { ...loadedConfig.openTelemetry, traces: true } : loadedConfig.openTelemetry
},
{
log: ctx.log,
cache}
);
const config = {
...defaultOptions,
...loadedConfig,
...opts,
pollingInterval: opts.polling || ("pollingInterval" in loadedConfig ? loadedConfig.pollingInterval : void 0) || defaultOptions.pollingInterval,
...registryConfig,
proxy: proxy2,
schema,
logging: ctx.log,
productName: ctx.productName,
productDescription: ctx.productDescription,
productPackageName: ctx.productPackageName,
productLink: ctx.productLink,
...ctx.productLogo ? { productLogo: ctx.productLogo } : {},
pubsub,
cache,
plugins(ctx2) {
const userPlugins = loadedConfig.plugins?.(ctx2) ?? [];
return [...builtinPlugins, ...userPlugins];
}
};
if (hivePersistedDocumentsEndpoint) {
const token = hivePersistedDocumentsToken || loadedConfig.persistedDocuments && "token" in loadedConfig.persistedDocuments && loadedConfig.persistedDocuments.token;
if (!token) {
ctx.log.error(
`Hive persisted documents needs a CDN token. Please provide it through the "--hive-persisted-documents-token <token>" option or the config.`
);
process.exit(1);
}
config.persistedDocuments = {
...loadedConfig.persistedDocuments,
type: "hive",
endpoint: hivePersistedDocumentsEndpoint,
token
};
}
if (maskedErrors != null) {
config.maskedErrors = maskedErrors;
}
if (typeof config.pollingInterval === "number" && config.pollingInterval < 1e4) {
process.stderr.write(
`error: polling interval duration too short ${config.pollingInterval}, use at least 10 seconds
`
);
process.exit(1);
}
return runProxy(ctx, config);
});
async function runProxy({ log }, config) {
if (handleFork(log, config)) {
return;
}
const runtime = createGatewayRuntime(config);
log.info({ endpoint: config.proxy.endpoint }, "Loading schema");
await runtime.getSchema();
log.info({ endpoint: config.proxy.endpoint }, "Proxying requests");
await startServerForRuntime(runtime, {
...config,
log
});
}
const addCommand$1 = (ctx, cli) => cli.command("subgraph").description(
"serve a Federation subgraph that can be used with any Federation compatible router like Apollo Router/Gateway"
).argument(
"[schemaPathOrUrl]",
'path to the subgraph schema file or a url from where to pull the subgraph schema (default: "subgraph.graphql")'
).action(async function subgraph(schemaPathOrUrl) {
const {
opentelemetry,
opentelemetryExporterType,
maskedErrors,
hiveRegistryToken,
hiveTarget,
hiveUsageTarget,
hiveAccessToken,
hiveUsageAccessToken,
hiveTraceAccessToken,
hiveTraceEndpoint,
hivePersistedDocumentsEndpoint,
hivePersistedDocumentsToken,
...opts
} = this.optsWithGlobals();
ctx.log.info(`Starting ${ctx.productName} ${ctx.version} as subgraph`);
const openTelemetryEnabledByCLI = await handleOpenTelemetryCLIOpts(ctx, {
openTelemetry: opentelemetry,
openTelemetryExporterType: opentelemetryExporterType,
hiveTarget,
hiveTraceAccessToken,
hiveTraceEndpoint
});
const loadedConfig = await loadConfig({
log: ctx.log,
configPath: opts.configPath,
quiet: !cluster.isPrimary,
configFileName: ctx.configFileName
});
let subgraph2 = "subgraph.graphql";
if (schemaPathOrUrl) {
subgraph2 = schemaPathOrUrl;
} else if ("subgraph" in loadedConfig) {
subgraph2 = loadedConfig.subgraph;
}
const registryConfig = {};
const reporting = handleReportingConfig(ctx, loadedConfig, {
hiveRegistryToken,
hiveTarget,
hiveUsageTarget,
hiveAccessToken,
hiveUsageAccessToken,
hiveTraceAccessToken,
// subgraph can only do reporting to hive registry
apolloGraphRef: void 0,
apolloKey: void 0
});
if (reporting) {
registryConfig.reporting = reporting;
}
const pubsub = loadedConfig.pubsub || new MemPubSub();
const cwd = loadedConfig.cwd || process.cwd();
if (loadedConfig.logging != null) {
ctx.log = createLoggerFromLogging(loadedConfig.logging);
}
const cache = await getCacheInstanceFromConfig(loadedConfig, {
pubsub,
log: ctx.log,
cwd
});
const builtinPlugins = await getBuiltinPluginsFromConfig(
{
...loadedConfig,
...opts,
openTelemetry: openTelemetryEnabledByCLI ? { ...loadedConfig.openTelemetry, traces: true } : loadedConfig.openTelemetry
},
{
log: ctx.log,
cache}
);
const config = {
...defaultOptions,
...loadedConfig,
...opts,
pollingInterval: opts.polling || ("pollingInterval" in loadedConfig ? loadedConfig.pollingInterval : void 0) || defaultOptions.pollingInterval,
...registryConfig,
subgraph: subgraph2,
logging: loadedConfig.logging ?? ctx.log,
productName: ctx.productName,
productDescription: ctx.productDescription,
productPackageName: ctx.productPackageName,
productLink: ctx.productLink,
productLogo: ctx.productLogo,
pubsub,
cache,
plugins(ctx2) {
const userPlugins = loadedConfig.plugins?.(ctx2) ?? [];
return [...builtinPlugins, ...userPlugins];
}
};
if (hivePersistedDocumentsEndpoint) {
const token = hivePersistedDocumentsToken || loadedConfig.persistedDocuments && "token" in loadedConfig.persistedDocuments && loadedConfig.persistedDocuments.token;
if (!token) {
ctx.log.error(
`Hive persisted documents needs a CDN token. Please provide it through the "--hive-persisted-documents-token <token>" option or the config.`
);
process.exit(1);
}
config.persistedDocuments = {
...loadedConfig.persistedDocuments,
type: "hive",
endpoint: hivePersistedDocumentsEndpoint,
token
};
}
if (maskedErrors != null) {
config.maskedErrors = maskedErrors;
}
if (typeof config.pollingInterval === "number" && config.pollingInterval < 1e4) {
process.stderr.write(
`error: polling interval duration too short ${config.pollingInterval}, use at least 10 seconds
`
);
process.exit(1);
}
return runSubgraph(ctx, config);
});
async function runSubgraph({ log }, config) {
let absSchemaPath = null;
if (typeof config.subgraph === "string" && isValidPath(config.subgraph) && !isUrl(config.subgraph)) {
const subgraphPath = config.subgraph;
absSchemaPath = isAbsolute(subgraphPath) ? String(subgraphPath) : resolve(process.cwd(), subgraphPath);
try {
await lstat(absSchemaPath);
} catch {
throw new Error(`Subgraph schema at ${absSchemaPath} does not exist`);
}
}
if (handleFork(log, config)) {
return;
}
const runtime = createGatewayRuntime(config);
if (absSchemaPath) {
log.info(`Loading local subgraph from ${absSchemaPath}`);
} else if (isUrl(String(config.subgraph))) {
log.info(`Loading remote subgraph from ${config.subgraph}`);
} else {
log.info("Loading subgraph from config");
}
await runtime.getSchema();
await startServerForRuntime(runtime, {
...config,
log
});
}
const addCommand = (ctx, cli) => cli.command("supergraph").description(
"serve a Federation supergraph provided by a compliant composition tool such as Mesh Compose or Apollo Rover"
).argument(
"[schemaPathOrUrl]",
'path to the composed supergraph schema file or a url from where to pull the supergraph schema (default: "supergraph.graphql")'
).addOption(
new Option(
"--apollo-uplink <uplink>",
"The URL of the managed federation up link. When retrying after a failure, you should cycle through the default up links using this option."
).env("APOLLO_SCHEMA_CONFIG_DELIVERY_ENDPOINT")
).action(async function supergraph(schemaPathOrUrl) {
const {
opentelemetry,
opentelemetryExporterType,
hiveCdnEndpoint,
hiveCdnKey,
hiveRegistryToken,
hiveUsageTarget,
hiveTarget,
hiveAccessToken,
hiveUsageAccessToken,
hiveTraceAccessToken,
hiveTraceEndpoint,
maskedErrors,
apolloGraphRef,
apolloKey,
hivePersistedDocumentsEndpoint,
hivePersistedDocumentsToken,
...opts
} = this.optsWithGlobals();
const { apolloUplink } = this.opts();
ctx.log.info(
`Starting ${ctx.productName} ${ctx.version} with supergraph`
);
const openTelemetryEnabledByCLI = await handleOpenTelemetryCLIOpts(ctx, {
openTelemetry: opentelemetry,
openTelemetryExporterType: opentelemetryExporterType,
hiveTarget,
hiveTraceAccessToken,
hiveTraceEndpoint
});
const loadedConfig = await loadConfig({
log: ctx.log,
configPath: opts.configPath,
quiet: !cluster.isPrimary,
configFileName: ctx.configFileName
});
let supergraph2 = "./supergraph.graphql";
if (schemaPathOrUrl) {
ctx.log.info(`Supergraph will be loaded from "${schemaPathOrUrl}"`);
if (hiveCdnKey) {
ctx.log.info("Using Hive CDN key");
if (!isUrl(schemaPathOrUrl)) {
ctx.log.error(
`Hive CDN endpoint must be a URL when providing --hive-cdn-key but got "${schemaPathOrUrl}"`
);
process.exit(1);
}
supergraph2 = {
type: "hive",
endpoint: schemaPathOrUrl,
key: hiveCdnKey
};
} else if (apolloKey) {
ctx.log.info("Using GraphOS API key");
if (!schemaPathOrUrl.includes("@")) {
ctx.log.error(
`Apollo GraphOS requires a graph ref in the format <graph-id>@<graph-variant> when providing --apollo-key. Please provide a valid graph ref not "${schemaPathOrUrl}".`
);
process.exit(1);
}
supergraph2 = {
type: "graphos",
apiKey: apolloKey,
graphRef: schemaPathOrUrl,
...apolloUplink ? { upLink: apolloUplink } : {}
};
} else {
supergraph2 = schemaPathOrUrl;
}
} else if (hiveCdnEndpoint) {
if (!isUrl(hiveCdnEndpoint)) {
ctx.log.error(
`Hive CDN endpoint must be a valid URL but got ${hiveCdnEndpoint}. Please provide a valid URL.`
);
process.exit(1);
}
if (!hiveCdnKey) {
ctx.log.error(
`Hive CDN requires an API key. Please provide an API key using the --hive-cdn-key option.Learn more at https://the-guild.dev/graphql/hive/docs/features/high-availability-cdn#cdn-access-tokens`
);
process.exit(1);
}
ctx.log.info(`Using Hive CDN endpoint ${hiveCdnEndpoint}`);
supergraph2 = {
type: "hive",
endpoint: hiveCdnEndpoint,
key: hiveCdnKey
};
} else if (apolloGraphRef) {
if (!apolloGraphRef.includes("@")) {
ctx.log.error(
`Apollo GraphOS requires a graph ref in the format <graph-id>@<graph-variant>. Please provide a valid graph ref not ${apolloGraphRef}.`
);
process.exit(1);
}
if (!apolloKey) {
ctx.log.error(
"Apollo GraphOS requires an API key. Please provide an API key using the --apollo-key option."
);
process.exit(1);
}
ctx.log.info(`Using Apollo Graph Ref ${apolloGraphRef}`);
supergraph2 = {
type: "graphos",
apiKey: apolloKey,
graphRef: apolloGraphRef,
upLink: apolloUplink
};
} else if ("supergraph" in loadedConfig) {
supergraph2 = loadedConfig.supergraph;
} else {
ctx.log.info(`Using default supergraph location "${supergraph2}"`);
}
const registryConfig = {};
const reporting = handleReportingConfig(ctx, loadedConfig, {
hiveTarget,
hiveAccessToken,
hiveTraceAccessToken,
hiveRegistryToken,
hiveUsageTarget,
hiveUsageAccessToken,
apolloGraphRef: apolloGraphRef || schemaPathOrUrl,
apolloKey
});
if (reporting) {
registryConfig.reporting = reporting;
}
const pubsub = loadedConfig.pubsub || new MemPubSub();
const cwd = loadedConfig.cwd || process.cwd();
if (loadedConfig.logging != null) {
ctx.log = createLoggerFromLogging(loadedConfig.logging);
}
const cache = await getCacheInstanceFromConfig(loadedConfig, {
pubsub,
log: ctx.log,
cwd
});
const builtinPlugins = await getBuiltinPluginsFromConfig(
{
...loadedConfig,
...opts,
openTelemetry: openTelemetryEnabledByCLI ? { ...loadedConfig.openTelemetry, traces: true } : loadedConfig.openTelemetry
},
{
log: ctx.log,
cache}
);
const config = {
...defaultOptions,
...loadedConfig,
...opts,
pollingInterval: opts.polling || ("pollingInterval" in loadedConfig ? loadedConfig.pollingInterval : void 0) || defaultOptions.pollingInterval,
...registryConfig,
supergraph: supergraph2,
logging: ctx.log,
productName: ctx.productName,
productDescription: ctx.productDescription,
productPackageName: ctx.productPackageName,
productLink: ctx.productLink,
productLogo: ctx.productLogo,
pubsub,
cache,
plugins(ctx2) {
const userPlugins = loadedConfig.plugins?.(ctx2) ?? [];
return [...builtinPlugins, ...userPlugins];
}
};
if (hivePersistedDocumentsEndpoint) {
const token = hivePersistedDocumentsToken || loadedConfig.persistedDocuments && "token" in loadedConfig.persistedDocuments && loadedConfig.persistedDocuments.token;
if (!token) {
ctx.log.error(
'Hive persisted documents needs a CDN token. Please provide it through the "--hive-persisted-documents-token <token>" option or the config.'
);
process.exit(1);
}
config.persistedDocuments = {
...loadedConfig.persistedDocuments,
type: "hive",
endpoint: hivePersistedDocumentsEndpoint,
token
};
}
if (maskedErrors != null) {
config.maskedErrors = maskedErrors;
}
if (typeof config.pollingInterval === "number" && config.pollingInterval < 1e4) {
process.stderr.write(
`error: polling interval duration too short ${config.pollingInterval}, use at least 10 seconds
`
);
process.exit(1);
}
return runSupergraph(ctx, config);
}).allowUnknownOption(getNodeEnv() === "test").allowExcessArguments(getNodeEnv() === "test");
async function runSupergraph({ log }, config) {
let absSchemaPath = null;
if (typeof config.supergraph === "string" && isValidPath(config.supergraph) && !isUrl(config.supergraph)) {
const supergraphPath = config.supergraph;
absSchemaPath = isAbsolute(supergraphPath) ? String(supergraphPath) : resolve(process.cwd(), supergraphPath);
try {
await lstat(absSchemaPath);
} catch (err) {
log.error(
{ path: absSchemaPath, err },
"Could not find supergraph. Make sure the file exists."
);
process.exit(1);
}
}
if (absSchemaPath) {
delete config.pollingInterval;
if (cluster.isPrimary) {
log.info({ path: absSchemaPath }, "Watching supergraph file for changes");
const ctrl = new AbortController();
registerTerminateHandler((signal) => {
log.info(
{ path: absSchemaPath },
`Closing watcher for supergraph on ${signal}`
);
return ctrl.abort(`Process terminated on ${signal}`);
});
(async function watcher() {
for await (const f of watch(absSchemaPath, {
signal: ctrl.signal
})) {
if (f.eventType === "rename") {
throw new Error(`Supergraph file was renamed to "${f.filename}"`);
}
log.info(
{ path: absSchemaPath },
"Supergraph changed. Invalidating..."
);
if (config.fork && config.fork > 1) {
for (const workerId in cluster.workers) {
cluster.workers[workerId].send("invalidateUnifiedGraph");
}
} else {
runtime.invalidateUnifiedGraph();
}
}
})().catch((e) => {
if (e.name === "AbortError") return;
log.error(
{ path: absSchemaPath, err: e },
"Supergraph watcher closed with an error"
);
}).then(() => {
log.info(
{ path: absSchemaPath },
"Supergraph watcher successfuly closed"
);
});
}
}
if (handleFork(log, config)) {
return;
}
if (config.additionalTypeDefs) {
const loaders = [new GraphQLFileLoader(), new CodeFileLoader()];
const additionalTypeDefsArr = asArray(config.additionalTypeDefs);
config.additionalTypeDefs = await Promise.all(
additionalTypeDefsArr.flatMap(async (ptr) => {
if (typeof ptr === "string" && ptr.length <= 255 && isValidPath(ptr)) {
const sources = await loadTypedefs(ptr, {
loaders
});
return sources.map((source) => {
const typeSource = source.document || source.rawSDL || source.schema;
if (!typeSource) {
throw new Error(`Invalid source ${source.location || ptr}`);
}
return typeSource;
});
}
return ptr;
})
);
}
const runtime = createGatewayRuntime(config);
if (absSchemaPath) {
log.info({ path: absSchemaPath }, "Loading local supergraph");
} else if (isUrl(String(config.supergraph))) {
log.info({ url: config.supergraph }, "Loading remote supergraph");
} else if (typeof config.supergraph === "object" && "type" in config.supergraph && config.supergraph.type === "hive") {
log.info(
{ endpoint: config.supergraph.endpoint },
"Loading supergraph from Hive CDN"
);
} else {
log.info("Loading supergraph from config");
}
await runtime.getSchema();
await startServerForRuntime(runtime, {
...config,
log
});
}
const addCommands = (ctx, cli) => {
addCommand(ctx, cli);
addCommand$1(ctx, cli);
addCommand$2(ctx, cli);
};
function getFreeMemInGb() {
return freemem() / 1024 ** 2;
}
function getMaxConcurrencyPerMem() {
return parseInt(String(getFreeMemInGb()));
}
function getMaxConcurrencyPerCpu() {
return availableParallelism();
}
function getMaxConcurrency() {
const result = Math.min(getMaxConcurrencyPerMem(), getMaxConcurrencyPerCpu());
if (result < 1) {
return 1;
}
return result;
}
function defineConfig(config) {
return config;
}
const defaultOptions = {
fork: 1,
host: platform().toLowerCase() === "win32" || // is WSL?
release().toLowerCase().includes("microsoft") ? "127.0.0.1" : "0.0.0.0",
port: 4e3,
pollingInterval: 1e4,
renderGraphiQL
};
let cli = new Command().configureHelp({
// will print help of global options for each command
showGlobalOptions: true
}).addOption(
new Option(
"--fork <number>",
`number of workers to spawn. (default: ${defaultOptions.fork})`
).env("FORK").argParser((v) => {
const number = parseInt(v);
if (isNaN(number)) {
throw new InvalidArgumentError("not a number.");
}
const maxConcurrency = getMaxConcurrency();
if (number > maxConcurrency) {
throw new InvalidArgumentError(
`exceedes number of available concurrency "${maxConcurrency}".`
);
}
return number;
})
).addOption(
new Option(
"-c, --config-path <path>",
`path to the configuration file. defaults to the following files respectively in the current working directory: ${createDefaultConfigPaths("gateway").join(", ")}`
).env("CONFIG_PATH")
).addOption(
new Option(
"-h, --host <hostname>",
`host to use for serving (default: ${defaultOptions.host})`
)
).addOption(
new Option(
"-p, --port <number>",
`port to use for serving (default: ${defaultOptions.port})`
).env("PORT").argParser((v) => {
const port = parseInt(v);
if (isNaN(port)) {
throw new InvalidArgumentError("not a number.");
}
return port;
})
).addOption(
new Option(
"--polling <duration>",
`schema polling interval in human readable duration (default: 10s)`
).env("POLLING").argParser((v) => {
const interval = parse(v);
if (!interval) {
throw new InvalidArgumentError("not a duration.");
}
return interval;
})
).option("--no-masked-errors", "don't mask unexpected errors in responses").option(
"--masked-errors",
"mask unexpected errors in responses (default: true)",
// we use "null" intentionally so that we know when the user provided the flag vs when not
// see here https://github.com/tj/commander.js/blob/970ecae402b253de691e6a9066fea22f38fe7431/lib/command.js#L655
// @ts-expect-error
null
).addOption(
new Option(
"--opentelemetry [exporter-endpoint]",
`Enable OpenTelemetry integration with an exporter using this option's value as endpoint. By default, it uses OTLP HTTP, use "--opentelemetry-exporter-type" to change the default.`
).env("OPENTELEMETRY")
).addOption(
new Option(
"--opentelemetry-exporter-type <type>",
`OpenTelemetry exporter type to use when setting up OpenTelemetry integration. Requires "--opentelemetry" to set the endpoint.`
).choices(["otlp-http", "otlp-grpc"]).default("otlp-http").env("OPENTELEMETRY_EXPORTER_TYPE")
).addOption(
new Option(
"--hive-registry-token <token>",
'[DEPRECATED] please use "--hive-target" and "--hive-access-token"'
).env("HIVE_REGISTRY_TOKEN")
).addOption(
new Option(
"--hive-usage-target <target>",
"[DEPRECATED] please use --hive-target instead."
).env("HIVE_USAGE_TARGET")
).addOption(
new Option(
"--hive-target <target>",
'Hive registry target to which the usage and tracing data should be reported to. Requires either "--hive-access-token <token>", "--hive-usage-access-token <token>" or "--hive-trace-access-token" option'
).env("HIVE_TARGET")
).addOption(
new Option(
"--hive-access-token <token>",
'Hive registry access token for usage metrics reporting and tracing. Enables both usage reporting and tracing. Requires the "--hive-target <target>" option'
).env("HIVE_ACCESS_TOKEN")
).addOption(
new Option(
"--hive-usage-access-token <token>",
`Hive registry access token for usage reporting. Enables Hive usage report. Requires the "--hive-target <target>" option. It can't be used together with "--hive-access-token"`
).env("HIVE_USAGE_ACCESS_TOKEN")
).addOption(
new Option(
"--hive-trace-access-token <token>",
`Hive registry access token for tracing. Enables Hive tracing. Requires the "--hive-target <target>" option. It can't be used together with "--hive-access-token"`
).env("HIVE_TRACE_ACCESS_TOKEN")
).addOption(
new Option(
"--hive-trace-endpoint <endpoint>",
`Hive registry tracing endpoint.`
).env("HIVE_TRACE_ENDPOINT").default(`https://api.graphql-hive.com/otel/v1/traces`)
).option(
"--hive-persisted-documents-endpoint <endpoint>",
'[EXPERIMENTAL] Hive CDN endpoint for fetching the persisted documents. Requires the "--hive-persisted-documents-token <token>" option'
).option(
"--hive-persisted-documents-token <token>",
'[EXPERIMENTAL] Hive persisted documents CDN endpoint token