UNPKG

@graphql-hive/plugin-opentelemetry

Version:
184 lines (179 loc) • 6.75 kB
import { LogWriter, LogLevel, Attributes, Logger as Logger$1 } from '@graphql-hive/logger'; import { Context, TracerProvider, ContextManager, TextMapPropagator } from '@opentelemetry/api'; import { Resource } from '@opentelemetry/resources'; import { BufferConfig, SpanProcessor, Span, SpanLimits, SpanExporter, Sampler, GeneralLimits } from '@opentelemetry/sdk-trace-base'; export { S as SEMATTRS_GRAPHQL_DOCUMENT, b as SEMATTRS_GRAPHQL_OPERATION_NAME, a as SEMATTRS_GRAPHQL_OPERATION_TYPE, g as SEMATTRS_HIVE_GATEWAY_OPERATION_SUBGRAPH_NAMES, f as SEMATTRS_HIVE_GATEWAY_UPSTREAM_SUBGRAPH_NAME, e as SEMATTRS_HIVE_GRAPHQL_ERROR_CODES, d as SEMATTRS_HIVE_GRAPHQL_ERROR_COUNT, c as SEMATTRS_HIVE_GRAPHQL_OPERATION_HASH } from './attributes-mikIPKnv.js'; import { Logger, SeverityNumber } from '@opentelemetry/api-logs'; import { LoggerProvider, LogRecordLimits, LogRecordProcessor, LogRecordExporter } from '@opentelemetry/sdk-logs'; export { ATTR_SERVICE_NAME, ATTR_SERVICE_VERSION, SEMATTRS_HTTP_CLIENT_IP, SEMATTRS_HTTP_HOST, SEMATTRS_HTTP_METHOD, SEMATTRS_HTTP_ROUTE, SEMATTRS_HTTP_SCHEME, SEMATTRS_HTTP_SERVER_NAME, SEMATTRS_HTTP_STATUS_CODE, SEMATTRS_HTTP_URL, SEMATTRS_HTTP_USER_AGENT, SEMATTRS_NET_HOST_NAME } from '@opentelemetry/semantic-conventions'; type ProcessorOptions = { forceFlushTimeoutMillis?: number; logRecordLimits?: LogRecordLimits; resource?: Resource; console?: boolean; }; type OpenTelemetryLogWriterSetupOptions = { logger: Logger; } | { provider: LoggerProvider; } | (ProcessorOptions & ({ processors: LogRecordProcessor[]; exporter?: never; } | { exporter: LogRecordExporter; batching?: boolean | BufferConfig; processors?: never; } | { console: boolean; processors?: never; exporter?: never; })); type OpenTelemetryLogWriterOptions = OpenTelemetryLogWriterSetupOptions & { useContextManager?: boolean; }; declare class OpenTelemetryLogWriter implements LogWriter { private logger; private useContextManager; constructor(options: OpenTelemetryLogWriterOptions); flush(): void | Promise<void>; write(level: LogLevel, attrs: Attributes | null | undefined, msg: string | null | undefined): void | Promise<void>; } declare const HIVE_LOG_LEVEL_NUMBERS: { trace: SeverityNumber; debug: SeverityNumber; info: SeverityNumber; warn: SeverityNumber; error: SeverityNumber; }; declare function getContextForRequest(attributes?: { requestId?: string; }): Context; type HiveTracingSpanProcessorOptions = { target: string; accessToken: string; endpoint: string; batching?: BufferConfig; processor?: never; } | { processor: SpanProcessor; }; declare class HiveTracingSpanProcessor implements SpanProcessor { private traceStateById; private processor; constructor(config: HiveTracingSpanProcessorOptions); onStart(span: Span, parentContext: Context): void; onEnd(span: Span): void; forceFlush(): Promise<void>; shutdown(): Promise<void>; } type TracingOptions = { traces?: { /** * A custom Trace Provider. */ tracerProvider: TracerProvider; } | (TracerOptions & ({ /** * The span processors that will be used to process recorded spans. * All processors will receive all recorded spans. */ processors: SpanProcessor[]; tracerProvider?: never; exporter?: never; } | { /** * The exporter that will be used to send spans. */ exporter: SpanExporter; /** * The batching options. By default, spans are batched using default BatchProcessor. * You can pass `false` to entirely disable batching (not recommended for production). */ batching?: BatchingConfig | boolean; tracerProvider?: never; processors?: never; } | { tracerProvider?: never; processors?: never; exporter?: never; })); }; type TracerOptions = { /** * If true, adds a Console Exporter that will write to stdout all spans. * This can be used for debug purposes if you struggle to receive spans. */ console?: boolean; /** * The limits of the Span API like spans and attribute sizes */ spanLimits?: SpanLimits; }; type SamplingOptions = { /** * A custom sampling strategy */ sampler: Sampler; samplingRate?: never; } | { sampler?: never; /** * A sampler rate based on Parent First and Trace Id consistent probabilistic strategy. * Set to 1 to record all traces, 0 to record none. */ samplingRate?: number; }; type OpentelemetrySetupOptions = TracingOptions & SamplingOptions & { /** * The Resource that will be used to create the Trace Provider. * Can be either a Resource instance, or an simple object with service name and version */ resource?: Resource | { serviceName: string; serviceVersion: string; }; /** * The Context Manager to be used to track OTEL Context. * If possible, use `AsyncLocalStorageContextManager` from `@opentelemetry/context-async-hooks`. */ contextManager: ContextManager | null; /** * A custom list of propagators that will replace the default ones (Trace Context and Baggage) */ propagators?: TextMapPropagator[]; /** * The general limits of OTEL attributes. */ generalLimits?: GeneralLimits; /** * The Logger to be used by this utility. * A child of this logger will be used for OTEL diag API, unless `configureDiagLogger` is false */ log?: Logger$1; /** * Configure Opentelemetry `diag` API to use Gateway's logger. * * @default true * * Note: Logger configuration respects OTEL environment variables standard. * This means that the logger will be enabled only if `OTEL_LOG_LEVEL` variable is set. */ configureDiagLogger?: boolean; }; declare function openTelemetrySetup(options: OpentelemetrySetupOptions): void; type HiveTracingOptions = { target?: string; } & ({ accessToken?: string; batching?: BufferConfig; processor?: never; endpoint?: string; } | { processor: SpanProcessor; }); declare function hiveTracingSetup(config: HiveTracingOptions & { contextManager: ContextManager | null; log?: Logger$1; }): void; type BatchingConfig = boolean | BufferConfig; export { type BatchingConfig, HIVE_LOG_LEVEL_NUMBERS, type HiveTracingOptions, HiveTracingSpanProcessor, type HiveTracingSpanProcessorOptions, OpenTelemetryLogWriter, type OpenTelemetryLogWriterOptions, type OpenTelemetryLogWriterSetupOptions, getContextForRequest, hiveTracingSetup, openTelemetrySetup };