UNPKG

nice-grpc-prometheus

Version:

Prometheus monitoring for nice-grpc

96 lines 4.22 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.prometheusServerMiddleware = prometheusServerMiddleware; const abort_controller_x_1 = require("abort-controller-x"); const nice_grpc_common_1 = require("nice-grpc-common"); const prom_client_1 = require("prom-client"); const common_1 = require("./common"); const registry_1 = require("./registry"); const defaultStartedMetric = new prom_client_1.Counter({ registers: [registry_1.registry], name: 'grpc_server_started_total', help: 'Total number of RPCs started on the server.', labelNames: common_1.labelNames, }); const defaultHandledMetric = new prom_client_1.Counter({ registers: [registry_1.registry], name: 'grpc_server_handled_total', help: 'Total number of RPCs completed on the server, regardless of success or failure.', labelNames: common_1.labelNamesWithCode, }); const defaultStreamMsgReceivedMetric = new prom_client_1.Counter({ registers: [registry_1.registry], name: 'grpc_server_msg_received_total', help: 'Total number of RPC stream messages received by the server.', labelNames: common_1.labelNames, }); const defaultStreamMsgSentMetric = new prom_client_1.Counter({ registers: [registry_1.registry], name: 'grpc_server_msg_sent_total', help: 'Total number of gRPC stream messages sent by the server.', labelNames: common_1.labelNames, }); const defaultHandlingSecondsMetric = new prom_client_1.Histogram({ registers: [registry_1.registry], name: 'grpc_server_handling_seconds', help: 'Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.', labelNames: common_1.labelNamesWithCode, buckets: common_1.latencySecondsBuckets, }); function prometheusServerMiddleware(options) { const serverStartedMetric = options?.serverStartedMetric || defaultStartedMetric; const serverHandledMetric = options?.serverHandledMetric || defaultHandledMetric; const serverStreamMsgReceivedMetric = options?.serverStreamMsgReceivedMetric || defaultStreamMsgReceivedMetric; const serverStreamMsgSentMetric = options?.serverStreamMsgSentMetric || defaultStreamMsgSentMetric; const serverHandlingSecondsMetric = options?.serverHandlingSecondsMetric || defaultHandlingSecondsMetric; return async function* prometheusServerMiddlewareGenerator(call, context) { const labels = (0, common_1.getLabels)(call.method); serverStartedMetric.inc(labels); const stopTimer = serverHandlingSecondsMetric.startTimer(labels); let settled = false; let status = nice_grpc_common_1.Status.OK; try { let request; if (!call.requestStream) { request = call.request; } else { request = (0, common_1.incrementStreamMessagesCounter)(call.request, serverStreamMsgReceivedMetric.labels(labels)); } if (!call.responseStream) { const response = yield* call.next(request, context); settled = true; return response; } else { yield* (0, common_1.incrementStreamMessagesCounter)(call.next(request, context), serverStreamMsgSentMetric.labels(labels)); settled = true; return; } } catch (err) { settled = true; if (err instanceof nice_grpc_common_1.ServerError) { status = err.code; } else if ((0, abort_controller_x_1.isAbortError)(err)) { status = nice_grpc_common_1.Status.CANCELLED; } else { status = nice_grpc_common_1.Status.UNKNOWN; } throw err; } finally { if (!settled) { status = nice_grpc_common_1.Status.CANCELLED; } stopTimer({ [common_1.codeLabel]: nice_grpc_common_1.Status[status] }); serverHandledMetric.inc({ ...labels, [common_1.codeLabel]: nice_grpc_common_1.Status[status], }); } }; } //# sourceMappingURL=server.js.map