UNPKG

autotel

Version:
692 lines (671 loc) • 28.1 kB
export { A as AutotelConfig, i as init, a as isLoggerLocked, l as lockLogger } from './init-CMuTaFAV.cjs'; import { Span, Context } from '@opentelemetry/api'; export { Context, ROOT_CONTEXT, Span, SpanContext, SpanKind, Link as SpanLink, SpanStatusCode, TextMapGetter, TextMapSetter, Tracer, context, trace as otelTrace, propagation } from '@opentelemetry/api'; import { SpanProcessor, ReadableSpan } from '@opentelemetry/sdk-trace-base'; export { FilteringSpanProcessor, FilteringSpanProcessorOptions, SpanFilterPredicate } from './filtering-span-processor.cjs'; export { NORMALIZER_PATTERNS, NORMALIZER_PRESETS, SpanNameNormalizerConfig, SpanNameNormalizerFn, SpanNameNormalizerPreset, SpanNameNormalizingProcessor, SpanNameNormalizingProcessorOptions } from './span-name-normalizer.cjs'; import { AttributeRedactorConfig, AttributeRedactorPreset } from './attribute-redacting-processor.cjs'; export { AttributeRedactingProcessor, AttributeRedactingProcessorOptions, AttributeRedactorFn, REDACTOR_PATTERNS, REDACTOR_PRESETS, ValuePatternConfig, createAttributeRedactor, createRedactedSpan } from './attribute-redacting-processor.cjs'; export { InstrumentOptions, SpanOptions, WithBaggageOptions, WithNewContextOptions, ctx, instrument, span, trace, withBaggage, withNewContext, withTracing } from './functional.cjs'; import { EventSubscriber, EventAttributes, AutotelEventContext } from './event-subscriber.cjs'; export { FunnelStatus, OutcomeStatus } from './event-subscriber.cjs'; export { CORRELATION_ID_BAGGAGE_KEY, generateCorrelationId, getCorrelationId, getOrCreateCorrelationId, runWithCorrelationId, setCorrelationId, setCorrelationIdInBaggage } from './correlation-id.cjs'; import { T as TraceContext, A as AttributeValue } from './trace-context-t5X1AP-e.cjs'; export { d as defineBaggageSchema } from './trace-context-t5X1AP-e.cjs'; export { ParsedError, parseError } from './parse-error.cjs'; export { DrainPipelineOptions, PipelineDrainFn, createDrainPipeline } from './drain-pipeline.cjs'; export { AUTOTEL_SAMPLING_TAIL_EVALUATED, AUTOTEL_SAMPLING_TAIL_KEEP, AdaptiveSampler, AlwaysSampler, NeverSampler, RandomSampler, Sampler, SamplingContext, SamplingPreset, UserIdSampler, createLinkFromHeaders, extractLinksFromBatch, resolveSamplingPreset, samplingPresets } from './sampling.cjs'; export { Event, EventsOptions, getEvents, resetEvents } from './event.cjs'; export { Metric, MetricsOptions, getMetrics, resetMetrics } from './metric.cjs'; export { createCounter, createHistogram, createObservableGauge, createUpDownCounter, getMeter } from './metric-helpers.cjs'; import { ViewOptions } from '@opentelemetry/sdk-metrics'; export { TraceContext as OtelTraceContext, createDeterministicTraceId, enrichWithTraceContext, finalizeSpan, flattenMetadata, getActiveContext, getActiveSpan, getTraceContext, getTracer, isTracing, resolveTraceUrl, runWithSpan } from './trace-helpers.cjs'; export { getAutotelTracer, getAutotelTracerProvider, setAutotelTracerProvider } from './tracer-provider.cjs'; export { DBConfig, HTTPConfig, LLMConfig, MessagingConfig, traceDB, traceHTTP, traceLLM, traceMessaging } from './semantic-helpers.cjs'; export { HTTPAttributes, ServiceAttributes, URLAttributes, httpRequestHeaderAttribute, httpResponseHeaderAttribute } from './semantic-conventions.cjs'; export { a as AttributeGuardrails, A as AttributePolicy, C as ClientAttrs, b as CloudAttrs, c as CodeAttrs, d as ContainerAttrs, D as DBAttrs, e as DeploymentAttrs, f as DeviceAttrs, E as ErrorAttrs, g as ExceptionAttrs, F as FaaSAttrs, h as FeatureFlagAttrs, G as GenAIAttrs, i as GraphQLAttrs, H as HTTPClientAttrs, j as HTTPServerAttrs, K as K8sAttrs, M as MessagingAttrs, N as NetworkAttrs, O as OTelAttrs, P as PeerAttrs, k as ProcessAttrs, R as RPCAttrs, l as ServerAddressAttrs, m as ServiceAttrs, S as SessionAttrs, T as TLSAttrs, n as ThreadAttrs, o as URLAttrs, U as UserAttrs, p as attrs, q as autoRedactPII, s as dbClient, u as httpClient, v as httpServer, w as identify, x as mergeAttrs, y as mergeServiceResource, z as request, B as safeSetAttributes, I as setDevice, J as setError, L as setException, Q as setSession, V as setUser, W as validateAttribute } from './utils-CbUkl8r1.cjs'; export { ConsumerConfig, ConsumerContext, LagMetricsConfig, MessagingOperation, MessagingSystem, ProducerConfig, ProducerContext, traceConsumer, traceProducer } from './messaging.cjs'; export { BaggageError, BaggageFieldDefinition, BaggageFieldType, BusinessBaggage, BusinessBaggageValues, SafeBaggageOptions, SafeBaggageSchema, createSafeBaggageSchema } from './business-baggage.cjs'; export { StepConfig, StepContext, StepStatus, WorkflowConfig, WorkflowContext, WorkflowStatus, getCurrentWorkflowContext, isInWorkflow, traceStep, traceWorkflow } from './workflow.cjs'; import '@opentelemetry/sdk-node'; import '@opentelemetry/resources'; import './logger.cjs'; import 'pino'; import '@opentelemetry/sdk-logs'; import './processors.cjs'; import 'node:async_hooks'; import './event-testing.cjs'; import './metric-testing.cjs'; /** Standalone string redaction for use outside the span processor pipeline. */ type StringRedactor = (value: string) => string; declare function createStringRedactor(config: AttributeRedactorConfig | AttributeRedactorPreset): StringRedactor; /** * Span processor that copies baggage entries to span attributes * * This makes baggage visible in trace UIs without manual attribute setting. * Enabled via init({ baggage: true }) or init({ baggage: 'custom-prefix' }) */ interface BaggageSpanProcessorOptions { /** * Prefix for baggage attributes * @default 'baggage.' */ prefix?: string; } /** * Span processor that automatically copies baggage entries to span attributes * * This makes baggage visible in trace UIs (Jaeger, Grafana, DataDog, etc.) * without manually calling ctx.setAttribute() for each baggage entry. * * @example Enable in init() * ```typescript * init({ * service: 'my-app', * baggage: true // Uses default 'baggage.' prefix * }); * * // Now baggage automatically appears as span attributes * await withBaggage({ * baggage: { 'tenant.id': 't1', 'user.id': 'u1' }, * fn: async () => { * // Span has baggage.tenant.id and baggage.user.id attributes! * } * }); * ``` * * @example Custom prefix * ```typescript * init({ * service: 'my-app', * baggage: 'ctx' // Uses 'ctx.' prefix * }); * // Creates attributes: ctx.tenant.id, ctx.user.id * ``` */ declare class BaggageSpanProcessor implements SpanProcessor { private readonly prefix; constructor(options?: BaggageSpanProcessorOptions); onStart(span: Span, parentContext: Context): void; onEnd(_span: ReadableSpan): void; shutdown(): Promise<void>; forceFlush(): Promise<void>; } /** * Operation context tracking using AsyncLocalStorage * * This module provides a way to track operation names across async boundaries * so they can be automatically captured in events events. * * We cannot read span attributes from OpenTelemetry's API (it's write-only), * so we maintain our own async context storage. */ /** * Operation context that flows through async operations */ interface OperationContext { /** * The name of the current operation * This is set by trace() or span() and can be read by events */ name: string; } /** * Get the current operation context (if any) * * @returns The current operation context, or undefined if not in an operation * * @example * ```typescript * const ctx = getOperationContext(); * if (ctx) { * console.log('Current operation:', ctx.name); * } * ``` */ declare function getOperationContext(): OperationContext | undefined; /** * Run a function within an operation context * * This sets the operation name for the duration of the function execution, * including all async operations spawned from it. * * @param name - The operation name to set * @param fn - The function to execute within the context * @returns The result of the function * * @example * ```typescript * const result = await runInOperationContext('user.create', async () => { * // Any events.trackEvent() calls here will automatically capture * // 'operation.name': 'user.create' * await createUser(); * return 'success'; * }); * ``` */ declare function runInOperationContext<T>(name: string, fn: () => T): T; /** * Token bucket rate limiter for event subscribers * * Prevents overwhelming downstream events platforms with too many events. * Uses token bucket algorithm for smooth rate limiting with burst capacity. */ interface RateLimiterConfig { /** Maximum events per second (default: 100) */ maxEventsPerSecond: number; /** Burst capacity - max events in a single burst (default: 2x rate) */ burstCapacity?: number; } /** * Events event queue with batching, backpressure, retry logic, rate limiting, and OTel metrics * * Exposes delivery pipeline metrics for observability: * - autotel.event_delivery.queue.size - Current queue size * - autotel.event_delivery.queue.oldest_age_ms - Age of oldest event in queue * - autotel.event_delivery.queue.delivered - Successfully delivered events * - autotel.event_delivery.queue.failed - Failed event deliveries * - autotel.event_delivery.queue.dropped - Dropped events with reason * - autotel.event_delivery.queue.latency_ms - Delivery latency histogram * - autotel.event_delivery.subscriber.health - Subscriber health (1=healthy, 0=unhealthy) */ interface EventData { name: string; attributes?: EventAttributes; timestamp: number; /** Internal: correlation ID for debug breadcrumbs */ _correlationId?: string; /** Internal: trace ID for debug breadcrumbs */ _traceId?: string; /** Autotel context for trace correlation (passed to subscribers) */ autotel?: AutotelEventContext; } interface QueueConfig { maxSize: number; batchSize: number; flushInterval: number; maxRetries: number; rateLimit?: RateLimiterConfig; } /** * Events queue with batching and backpressure * * Features: * - Batches events for efficient sending * - Bounded queue with drop-oldest policy (prod) or blocking (dev) * - Exponential backoff retry * - Rate limiting to prevent overwhelming subscribers * - Graceful flush on shutdown */ declare class EventQueue { private queue; private flushTimer; private readonly config; private readonly subscribers; private readonly rateLimiter; private flushPromise; private isShuttingDown; private metrics; private observableCleanups; private subscriberHealthy; constructor(subscribers: EventSubscriber[], config?: Partial<QueueConfig>); /** * Initialize OTel metrics for queue observability */ private initMetrics; /** * Record a dropped event with reason and emit debug breadcrumb */ private recordDropped; /** * Record permanent delivery failure (after all retries exhausted) * Increments failed counter and logs error */ private recordFailed; /** * Mark subscriber as unhealthy on transient failure (without incrementing failed counter) * Used during retry attempts - only recordFailed should increment the counter */ private markSubscriberUnhealthy; /** * Record successful delivery */ private recordDelivered; /** * Enqueue an event for sending * * Backpressure policy: * - Drops oldest event and logs warning if queue is full (same behavior in all environments) */ enqueue(event: EventData): void; /** * Schedule a batch flush if not already scheduled */ private scheduleBatchFlush; /** * Flush a batch of events * Uses promise-based concurrency control to prevent race conditions */ private flushBatch; /** * Internal flush implementation */ private doFlushBatch; /** * Send events with exponential backoff retry * Tracks per-event, per-subscriber failures so failed counter reflects actual failed deliveries. * On retry, only failed (event, subscriber) pairs are re-sent to avoid double-counting delivered. */ private sendWithRetry; /** * Send events to configured subscribers with rate limiting and metrics. * When subscribersByEventIndex is provided (retry path), only those subscribers are tried per event. * Returns per-event, per-subscriber failures (empty if all succeeded). */ private sendToSubscribers; /** * Send a single event to subscribers. * - When subscriberNames is undefined (initial attempt): send to all subscribers. * - When subscriberNames is provided (retry): send only to those subscribers (never re-send to healthy ones). * Returns list of subscribers that failed (empty if all succeeded). */ private sendEventToSubscribers; /** * Flush all remaining events. Queue remains usable after flush (e.g. for * auto-flush at root span end). Use shutdown() when tearing down the queue. */ flush(): Promise<void>; /** * Flush remaining events and permanently disable the queue (reject new events). * Use for process/SDK shutdown; use flush() for periodic or span-end drain. */ shutdown(): Promise<void>; /** * Cleanup observable metric callbacks to prevent memory leaks * Call this when destroying the EventQueue instance */ cleanup(): void; /** * Get queue size (for testing/debugging) */ size(): number; /** * Get subscriber health status (for testing/debugging) */ getSubscriberHealth(): Map<string, boolean>; /** * Check if a specific subscriber is healthy */ isSubscriberHealthy(subscriberName: string): boolean; /** * Manually mark a subscriber as healthy or unhealthy * (used for circuit breaker integration) */ setSubscriberHealth(subscriberName: string, healthy: boolean): void; } /** * Global track() function for business events * * Simple, no instantiation needed, auto-attaches trace context */ /** * Track a business events event * * Features: * - Auto-attaches traceId and spanId if in active span * - Batched sending with retry * - Type-safe with optional generic * - No-op if init() not called or no subscribers configured * * @example Basic usage * ```typescript * track('user.signup', { userId: '123', plan: 'pro' }) * ``` * * @example With type safety * ```typescript * interface EventDatas { * 'user.signup': { userId: string; plan: string } * 'plan.upgraded': { userId: string; revenue: number } * } * * track<EventDatas>('user.signup', { userId: '123', plan: 'pro' }) * ``` * * @example Trace correlation (automatic) * ```typescript * @Instrumented() * class UserService { * async createUser(data: CreateUserData) { * // This track call automatically includes traceId + spanId * track('user.signup', { userId: data.id }) * } * } * ``` */ declare function track<Events extends Record<string, any> = Record<string, any>>(event: keyof Events & string, data?: Events[typeof event]): void; /** * Get events queue (for flush/shutdown) * @internal */ declare function getEventQueue(): EventQueue | null; /** * Graceful shutdown with flush and cleanup */ /** * Flush all pending telemetry * * Flushes both events events and OpenTelemetry spans to their destinations. * Includes timeout protection to prevent hanging in serverless environments. * * Safe to call multiple times. * * @param options - Optional configuration * @param options.timeout - Timeout in milliseconds (default: 2000ms) * @param options.forShutdown - If true, permanently disables the events queue after flush (used internally by shutdown()) * * @example Manual flush in serverless * ```typescript * import { flush } from 'autotel'; * * export const handler = async (event) => { * // ... process event * await flush(); // Flush before function returns * return result; * }; * ``` * * @example With custom timeout * ```typescript * await flush({ timeout: 5000 }); // 5 second timeout * ``` */ declare function flush(options?: { timeout?: number; forShutdown?: boolean; }): Promise<void>; /** * Shutdown telemetry and cleanup resources * * - Flushes all pending data * - Shuts down OpenTelemetry SDK * - Cleans up resources * * Call this before process exit. * * Always performs cleanup even if flush fails, preventing resource leaks * in serverless handlers or tests. * * @example Express server * ```typescript * const server = app.listen(3000) * * process.on('SIGTERM', async () => { * await server.close() * await shutdown() * process.exit(0) * }) * ``` */ declare function shutdown(): Promise<void>; declare function runWithRequestContext<T>(ctx: TraceContext, fn: () => T): T; interface RequestLogger { set(fields: Record<string, unknown>): void; info(message: string, fields?: Record<string, unknown>): void; warn(message: string, fields?: Record<string, unknown>): void; error(error: Error | string, fields?: Record<string, unknown>): void; getContext(): Record<string, unknown>; emitNow(overrides?: Record<string, unknown>): RequestLogSnapshot; fork(label: string, fn: () => void | Promise<void>): void; } interface RequestLogSnapshot { timestamp: string; traceId: string; spanId: string; correlationId: string; context: Record<string, unknown>; } interface RequestLoggerOptions { /** Callback invoked by emitNow() for manual fan-out. */ onEmit?: (snapshot: RequestLogSnapshot) => void | Promise<void>; } declare function getRequestLogger(ctx?: TraceContext, options?: RequestLoggerOptions): RequestLogger; interface StructuredErrorInput { message: string; why?: string; fix?: string; link?: string; code?: string | number; status?: number; cause?: unknown; details?: Record<string, unknown>; name?: string; /** Backend-only context. Omitted from toJSON() and never serialized to clients. */ internal?: Record<string, unknown>; } interface StructuredError extends Error { why?: string; fix?: string; link?: string; code?: string | number; status?: number; details?: Record<string, unknown>; /** Backend-only context. Omitted from toJSON() and never serialized to clients. */ readonly internal?: Record<string, unknown>; } declare function createStructuredError(input: StructuredErrorInput): StructuredError; declare function structuredErrorToJSON(error: StructuredError): Record<string, unknown>; declare function getStructuredErrorAttributes(error: Error): Record<string, AttributeValue>; declare function recordStructuredError(ctx: Pick<TraceContext, 'recordException' | 'setAttributes' | 'setStatus'>, error: Error): void; /** * Convert an unknown value to an OTel-compatible AttributeValue. * Returns undefined when the value cannot be represented. */ declare function toAttributeValue(value: unknown): AttributeValue | undefined; /** * Recursively flatten a nested object into dot-notation OTel attributes. * Includes circular reference protection via WeakSet. */ declare function flattenToAttributes(fields: Record<string, unknown>, prefix?: string): Record<string, AttributeValue>; /** * Format milliseconds into a human-readable duration string. * * @example * formatDuration(45) // "45ms" * formatDuration(1234) // "1.2s" * formatDuration(65000) // "1m 5s" */ declare function formatDuration(ms: number): string; /** * LLM-tuned histogram buckets. * * Default OpenTelemetry histogram buckets target HTTP latency (0ms–10s) * and small counter values. LLM workloads have very different shapes: * * - **Duration**: single-token prompts can be fast (50ms), long * generations and reasoning models can run for minutes. Default buckets * crush everything above 10s into one bucket. * - **Token usage**: heavily right-skewed. A single request can range * from tens of tokens to the million-token context windows. * - **Cost (USD)**: per-request values are tiny (fractions of a cent), * so linear buckets waste resolution at the low end. * * This module exposes empirically-chosen bucket arrays and a View helper * so users can apply them to their `MeterProvider` without knowing the * exact instrument names emitted by OpenAI/Anthropic/Traceloop plugins. * * @example * ```typescript * import { NodeSDK } from '@opentelemetry/sdk-node'; * import { genAiMetricViews } from 'autotel'; * * const sdk = new NodeSDK({ * serviceName: 'my-agent', * views: [...genAiMetricViews()], * }); * sdk.start(); * ``` */ /** * Duration buckets for LLM operations, in **seconds**. Covers fast * completions (50ms) through long-running reasoning jobs (5 min). * * Aligns with the OTel GenAI semantic conventions' published advice for * `gen_ai.client.operation.duration`. */ declare const GEN_AI_DURATION_BUCKETS_SECONDS: readonly number[]; /** * Token-count buckets for prompt, completion, and total token histograms. * Ranges from tiny prompts to million-token context windows. * * Aligns with the OTel GenAI semantic conventions' published advice for * `gen_ai.client.token.usage`. */ declare const GEN_AI_TOKEN_USAGE_BUCKETS: readonly number[]; /** * USD cost buckets. Sub-cent resolution at the low end (fractions of a * cent per small call) up to tens of dollars (batch jobs, Opus/o1 runs). */ declare const GEN_AI_COST_USD_BUCKETS: readonly number[]; /** * Instrument-level advice object for `createHistogram(name, advice)`. * Use when you control the instrument creation (e.g. custom business * LLM metrics); `genAiMetricViews()` is better when the metric comes * from a third-party plugin. */ declare function llmHistogramAdvice(kind: 'duration' | 'tokens' | 'cost'): { advice: { explicitBucketBoundaries: number[]; }; }; /** * Returns `View`s that re-bucket the standard OTel GenAI histograms. Pass * the result to your `MeterProvider`'s `views` option. * * Matches instrument names emitted by: * - OpenTelemetry GenAI autoinstrumentation * - OpenInference / OpenLLMetry (traceloop) * - Arize Phoenix, LangSmith, etc. that follow the OTel spec * * Add more instrument patterns via the `extra` argument if you emit * custom LLM metrics. */ declare function genAiMetricViews(extra?: { instrumentName: string; kind: 'duration' | 'tokens' | 'cost'; }[]): ViewOptions[]; /** * Span event helpers for LLM lifecycle, aligned with the OpenTelemetry * GenAI semantic conventions. * * Span events are timestamped points within a span — they render as dots * on the trace timeline in Jaeger / Tempo / Langfuse / Arize. Use them * to mark lifecycle moments the span attributes alone can't express: * * - When the prompt was sent (vs. when the first token arrived) * - When each retry attempt started, and why * - When a streaming response produced its first token (TTFT) * - When a tool was invoked * * Every helper pins the event name + attribute keys to the published * spec so downstream tooling (autotel-mcp, Langfuse, vendor UIs) can * render them consistently. * * @example * ```typescript * import { trace, recordPromptSent, recordResponseReceived, recordRetry } from 'autotel'; * * export const chat = trace('chat', ctx => async (prompt: string) => { * recordPromptSent(ctx, { model: 'gpt-4o', messageCount: 1 }); * * for (let attempt = 1; attempt <= 3; attempt++) { * try { * const res = await openai.chat.completions.create({...}); * recordResponseReceived(ctx, { * model: res.model, * promptTokens: res.usage?.prompt_tokens, * completionTokens: res.usage?.completion_tokens, * finishReasons: res.choices.map(c => c.finish_reason), * }); * return res; * } catch (err) { * recordRetry(ctx, { attempt, reason: 'rate_limit', delayMs: 500 }); * await sleep(500 * attempt); * } * } * }); * ``` */ /** Attributes expected on a `gen_ai.prompt.sent` event. */ interface PromptSentEvent { /** Model the caller intends to invoke (may differ from response model). */ model?: string; /** Estimated input token count, when known before the call. */ promptTokens?: number; /** Number of messages in a chat request (system + user + assistant). */ messageCount?: number; /** Free-form operation kind — `chat` / `completion` / `embedding`. */ operation?: string; } /** Attributes expected on a `gen_ai.response.received` event. */ interface ResponseReceivedEvent { /** Model the provider actually served (may be more specific than requested). */ model?: string; promptTokens?: number; completionTokens?: number; totalTokens?: number; /** `stop`, `length`, `content_filter`, `tool_calls`, etc. */ finishReasons?: string[]; } /** Attributes expected on a `gen_ai.retry` event. */ interface RetryEvent { attempt: number; /** `rate_limit` | `timeout` | `provider_error` | custom label. */ reason?: string; /** How long we'll wait before the next attempt. */ delayMs?: number; /** HTTP status that triggered the retry, when applicable. */ statusCode?: number; } /** Attributes expected on a `gen_ai.tool.call` event. */ interface ToolCallEvent { toolName: string; /** Call identifier so responses can be correlated back to calls. */ toolCallId?: string; /** Pre-serialised tool arguments; omit if sensitive. */ arguments?: string; } /** Attributes expected on a `gen_ai.stream.first_token` event. */ interface StreamFirstTokenEvent { /** Tokens streamed so far, if the caller tracks that. */ tokensSoFar?: number; } /** * Record that a prompt was dispatched to the provider. Typically called * before `await provider.chat.completions.create(...)`. */ declare function recordPromptSent(ctx: TraceContext, event?: PromptSentEvent): void; /** * Record a successful provider response. Call after the response arrives * (for non-streaming) or after the stream completes. */ declare function recordResponseReceived(ctx: TraceContext, event?: ResponseReceivedEvent): void; /** * Record a retry attempt on an LLM call. Call *before* sleeping for * `delayMs` so the event timestamp accurately marks when the retry * decision was made. */ declare function recordRetry(ctx: TraceContext, event: RetryEvent): void; /** * Record a tool / function call made in the course of an agent step. * Emits an event rather than a child span because many frameworks fire * several tool calls within a single provider response. */ declare function recordToolCall(ctx: TraceContext, event: ToolCallEvent): void; /** * Record the time-to-first-token for a streaming response. Pair with * `recordResponseReceived` at the end so the span carries both the TTFT * marker and the final usage numbers. */ declare function recordStreamFirstToken(ctx: TraceContext, event?: StreamFirstTokenEvent): void; export { AttributeRedactorConfig, AttributeRedactorPreset, BaggageSpanProcessor, type BaggageSpanProcessorOptions, EventAttributes, EventSubscriber, GEN_AI_COST_USD_BUCKETS, GEN_AI_DURATION_BUCKETS_SECONDS, GEN_AI_TOKEN_USAGE_BUCKETS, type OperationContext, type PromptSentEvent, type RequestLogSnapshot, type RequestLogger, type RequestLoggerOptions, type ResponseReceivedEvent, type RetryEvent, type StreamFirstTokenEvent, type StringRedactor, type StructuredError, type StructuredErrorInput, type ToolCallEvent, TraceContext, createStringRedactor, createStructuredError, flattenToAttributes, flush, formatDuration, genAiMetricViews, getEventQueue, getOperationContext, getRequestLogger, getStructuredErrorAttributes, llmHistogramAdvice, recordPromptSent, recordResponseReceived, recordRetry, recordStreamFirstToken, recordStructuredError, recordToolCall, runInOperationContext, runWithRequestContext, shutdown, structuredErrorToJSON, toAttributeValue, track };