spinal-obs-node
Version:
WithSpinal cost-aware OpenTelemetry SDK for Node.js
1,099 lines (1,085 loc) • 44.5 kB
JavaScript
;
var __create = Object.create;
var __defProp = Object.defineProperty;
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
var __getOwnPropNames = Object.getOwnPropertyNames;
var __getProtoOf = Object.getPrototypeOf;
var __hasOwnProp = Object.prototype.hasOwnProperty;
var __esm = (fn, res) => function __init() {
return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
};
var __export = (target, all) => {
for (var name in all)
__defProp(target, name, { get: all[name], enumerable: true });
};
var __copyProps = (to, from, except, desc) => {
if (from && typeof from === "object" || typeof from === "function") {
for (let key of __getOwnPropNames(from))
if (!__hasOwnProp.call(to, key) && key !== except)
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
}
return to;
};
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
// If the importer is in node compatibility mode or this is not an ESM
// file that has been converted to a CommonJS file using a Babel-
// compatible transform (i.e. "__esModule" has not been set), then set
// "default" to the CommonJS "module.exports" for node compatibility.
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
mod
));
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
// src/runtime/config.ts
var config_exports = {};
__export(config_exports, {
configure: () => configure,
getConfig: () => getConfig
});
function configure(opts = {}) {
const endpoint = opts.endpoint ?? process.env.SPINAL_TRACING_ENDPOINT ?? "https://cloud.withspinal.com";
const apiKey = opts.apiKey ?? process.env.SPINAL_API_KEY ?? "";
const disableLocalMode = opts.disableLocalMode ?? process.env.SPINAL_DISABLE_LOCAL_MODE === "true";
const inferredMode = (opts.mode ?? process.env.SPINAL_MODE) || (apiKey ? "cloud" : "local");
const mode = disableLocalMode && !apiKey ? (() => {
throw new Error("Cannot disable local mode without providing an API key for cloud mode");
})() : inferredMode;
const headers = mode === "cloud" ? { ...opts.headers ?? {}, "X-SPINAL-API-KEY": apiKey } : { ...opts.headers ?? {} };
const timeoutMs = opts.timeoutMs ?? 5e3;
const maxQueueSize = opts.maxQueueSize ?? parseInt(process.env.SPINAL_PROCESS_MAX_QUEUE_SIZE ?? "2048", 10);
const maxExportBatchSize = opts.maxExportBatchSize ?? parseInt(process.env.SPINAL_PROCESS_MAX_EXPORT_BATCH_SIZE ?? "512", 10);
const scheduleDelayMs = opts.scheduleDelayMs ?? parseInt(process.env.SPINAL_PROCESS_SCHEDULE_DELAY ?? "5000", 10);
const exportTimeoutMs = opts.exportTimeoutMs ?? parseInt(process.env.SPINAL_PROCESS_EXPORT_TIMEOUT ?? "30000", 10);
const scrubber = opts.scrubber ?? new DefaultScrubber();
const opentelemetryLogLevel = opts.opentelemetryLogLevel ?? import_api.DiagLogLevel.ERROR;
const localStorePath = opts.localStorePath ?? process.env.SPINAL_LOCAL_STORE_PATH ?? import_path.default.join(process.cwd(), ".spinal", "spans.jsonl");
if (!endpoint) throw new Error("Spinal endpoint must be provided");
if (mode === "cloud" && !apiKey) throw new Error("No API key provided. Set SPINAL_API_KEY or pass to configure().");
import_api.diag.setLogger(console, opentelemetryLogLevel);
globalConfig = {
endpoint,
apiKey,
headers,
timeoutMs,
maxQueueSize,
maxExportBatchSize,
scheduleDelayMs,
exportTimeoutMs,
scrubber,
opentelemetryLogLevel,
mode,
localStorePath,
disableLocalMode
};
return globalConfig;
}
function getConfig() {
if (!globalConfig) return configure();
return globalConfig;
}
var import_api, import_path, DefaultScrubber, globalConfig;
var init_config = __esm({
"src/runtime/config.ts"() {
"use strict";
import_api = require("@opentelemetry/api");
import_path = __toESM(require("path"), 1);
DefaultScrubber = class {
sensitive = [
/password/i,
/passwd/i,
/secret/i,
/api[._-]?key/i,
/auth[._-]?token/i,
/access[._-]?token/i,
/private[._-]?key/i,
/encryption[._-]?key/i,
/bearer/i,
/credential/i,
/user[._-]?name/i,
/first[._-]?name/i,
/last[._-]?name/i,
/email/i,
/email[._-]?address/i,
/phone[._-]?number/i,
/ip[._-]?address/i
];
protected = [/^attributes$/i, /^spinal\./i];
scrubAttributes(attributes) {
const out = {};
for (const [k, v] of Object.entries(attributes ?? {})) {
if (this.sensitive.some((r) => r.test(k))) {
out[k] = "[Scrubbed]";
} else if (Array.isArray(v)) {
out[k] = v.map((x) => typeof x === "object" && x !== null ? this.scrubAttributes(x) : x);
} else if (typeof v === "object" && v !== null) {
out[k] = this.scrubAttributes(v);
} else {
out[k] = v;
}
}
return out;
}
};
}
});
// src/pricing/index.ts
var pricing_exports = {};
__export(pricing_exports, {
estimateCost: () => estimateCost
});
function estimateCost(params) {
const { model = "openai:gpt-4o-mini", inputTokens = 0, outputTokens = 0 } = params;
const entry = catalog.find((c) => c.model === model) ?? catalog[0];
const inputCost = inputTokens / 1e3 * entry.inputPer1K;
const outputCost = outputTokens / 1e3 * entry.outputPer1K;
return roundUSD(inputCost + outputCost);
}
function roundUSD(n) {
return Math.round(n * 1e4) / 1e4;
}
var catalog;
var init_pricing = __esm({
"src/pricing/index.ts"() {
"use strict";
catalog = [
{ model: "openai:gpt-4o-mini", inputPer1K: 0.15, outputPer1K: 0.6 },
{ model: "openai:gpt-4o", inputPer1K: 2.5, outputPer1K: 10 }
];
}
});
// src/index.ts
var index_exports = {};
__export(index_exports, {
Analytics: () => Analytics,
configure: () => configure2,
displayLocalData: () => displayLocalData,
estimateCost: () => estimateCost,
forceFlush: () => forceFlush,
instrumentHTTP: () => instrumentHTTP2,
instrumentOpenAI: () => instrumentOpenAI2,
shutdown: () => shutdown,
tag: () => tag2
});
module.exports = __toCommonJS(index_exports);
// src/public.ts
init_config();
// src/runtime/tag.ts
var import_api3 = require("@opentelemetry/api");
// src/runtime/tracer.ts
var import_sdk_trace_base = require("@opentelemetry/sdk-trace-base");
var import_sdk_trace_node = require("@opentelemetry/sdk-trace-node");
var import_api2 = require("@opentelemetry/api");
init_config();
// node_modules/@opentelemetry/core/build/esm/ExportResult.js
var ExportResultCode;
(function(ExportResultCode2) {
ExportResultCode2[ExportResultCode2["SUCCESS"] = 0] = "SUCCESS";
ExportResultCode2[ExportResultCode2["FAILED"] = 1] = "FAILED";
})(ExportResultCode || (ExportResultCode = {}));
// src/runtime/exporter.ts
init_config();
var import_fs = __toESM(require("fs"), 1);
var import_path2 = __toESM(require("path"), 1);
var import_undici = require("undici");
var SpinalExporter = class {
shutdownFlag = false;
async export(spans, resultCallback) {
if (this.shutdownFlag) return resultCallback({ code: ExportResultCode.FAILED });
try {
const cfg = getConfig();
const payload = spans.map((s) => this.toJSON(s));
if (cfg.mode === "local") {
await this.writeLocal(cfg.localStorePath, payload);
resultCallback({ code: ExportResultCode.SUCCESS });
return;
}
const body = { spans: payload };
const res = await (0, import_undici.request)(cfg.endpoint, {
method: "POST",
headers: { "content-type": "application/json", ...cfg.headers },
body: JSON.stringify(body),
bodyTimeout: cfg.timeoutMs,
headersTimeout: cfg.timeoutMs
});
if (res.statusCode >= 200 && res.statusCode < 300) {
resultCallback({ code: ExportResultCode.SUCCESS });
} else {
resultCallback({ code: ExportResultCode.FAILED });
}
} catch (err) {
resultCallback({ code: ExportResultCode.FAILED, error: err });
}
}
shutdown() {
this.shutdownFlag = true;
return Promise.resolve();
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
toJSON(span) {
const cfg = getConfig();
const attributes = { ...span.attributes ?? {} };
const scrubbed = cfg.scrubber.scrubAttributes(attributes);
return {
name: span.name,
trace_id: span.spanContext().traceId,
span_id: span.spanContext().spanId,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
parent_span_id: span.parentSpanId ?? null,
start_time: span.startTime,
end_time: span.endTime,
status: span.status ?? null,
attributes: scrubbed,
events: (span.events ?? []).map((e) => ({ name: e.name, timestamp: e.time, attributes: e.attributes ?? {} })),
links: (span.links ?? []).map((l) => ({
context: { trace_id: l.context.traceId, span_id: l.context.spanId },
attributes: l.attributes ?? {}
})),
// eslint-disable-next-line @typescript-eslint/no-explicit-any
instrumentation_info: span.instrumentationLibrary || span.instrumentationScope ? {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
name: span.instrumentationLibrary?.name || span.instrumentationScope?.name,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
version: span.instrumentationLibrary?.version || span.instrumentationScope?.version
} : null
};
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
async writeLocal(filePath, payload) {
if (payload.length === 0) {
return;
}
try {
await import_fs.default.promises.mkdir(import_path2.default.dirname(filePath), { recursive: true });
const lines = payload.map((p) => JSON.stringify(p)).join("\n") + "\n";
await import_fs.default.promises.appendFile(filePath, lines, "utf8");
} catch (error) {
console.warn(`Failed to write local spans to ${filePath}:`, error);
}
}
};
// src/runtime/tracer.ts
var SPINAL_NAMESPACE = "spinal";
var SpinalSpanProcessor = class extends import_sdk_trace_base.BatchSpanProcessor {
exporter;
constructor() {
const cfg = getConfig();
const exporter = new SpinalExporter();
super(exporter, {
maxQueueSize: cfg.maxQueueSize,
scheduledDelayMillis: cfg.scheduleDelayMs,
maxExportBatchSize: cfg.maxExportBatchSize,
exportTimeoutMillis: cfg.exportTimeoutMs
});
this.exporter = exporter;
}
excludedHosts = (() => {
const defaultHosts = ["api.anthropic.com", "api.azure.com"];
const override = process.env.SPINAL_EXCLUDED_HOSTS?.trim();
const list = override && override.length > 0 ? override.split(",").map((s) => s.trim()).filter(Boolean) : defaultHosts;
const set = new Set(list);
if (process.env.SPINAL_EXCLUDE_OPENAI === "true") {
set.add("api.openai.com");
}
return set;
})();
shouldProcess(span) {
const scopeName = span.instrumentationLibrary?.name || span.instrumentationScope?.name || "";
if (!scopeName) return false;
if (scopeName.includes("spinal-")) return true;
if (scopeName.includes("http")) {
const url = span.attributes?.["http.url"];
try {
if (url) {
const host = new URL(url).host;
if (this.excludedHosts.has(host)) return false;
}
} catch {
}
return true;
}
if (scopeName.includes("openai") || scopeName.includes("anthropic") || scopeName.includes("openai_agents")) return true;
return false;
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
onStart(span, parentContext) {
if (!this.shouldProcess(span)) return;
const bag = import_api2.propagation.getBaggage(parentContext ?? import_api2.context.active());
if (!bag) return;
const entries = bag.getAllEntries();
entries.forEach(([key, entry]) => {
if (key.startsWith(`${SPINAL_NAMESPACE}.`)) {
span.setAttribute(key, String(entry.value));
}
});
}
};
var providerSingleton;
function getIsolatedProvider() {
if (providerSingleton) return providerSingleton;
const provider = new import_sdk_trace_node.NodeTracerProvider({
sampler: new import_sdk_trace_base.AlwaysOnSampler(),
spanProcessors: [new SpinalSpanProcessor()]
});
provider.register();
providerSingleton = provider;
return provider;
}
async function shutdown() {
if (!providerSingleton) return;
await providerSingleton.shutdown();
}
async function forceFlush() {
if (!providerSingleton) return;
await providerSingleton.forceFlush();
}
// src/runtime/tag.ts
var SPINAL_NAMESPACE2 = "spinal";
function tag(tags = {}) {
let ctx = import_api3.context.active();
const entries = [];
if (tags.aggregationId) {
entries.push([`${SPINAL_NAMESPACE2}.aggregation_id`, String(tags.aggregationId)]);
}
for (const [k, v] of Object.entries(tags)) {
if (k === "aggregationId") continue;
if (v === void 0) continue;
entries.push([`${SPINAL_NAMESPACE2}.${k}`, String(v)]);
}
const currentBaggage = import_api3.propagation.getBaggage(ctx) ?? import_api3.propagation.createBaggage();
const updated = entries.reduce((bag, [k, v]) => bag.setEntry(k, { value: v }), currentBaggage);
ctx = import_api3.propagation.setBaggage(ctx, updated);
const provider = getIsolatedProvider();
const tracer = provider.getTracer("spinal-tag");
const span = tracer.startSpan("spinal.tag", void 0, ctx);
entries.forEach(([key, value]) => {
span.setAttribute(key, value);
});
span.end();
const token = import_api3.context.attach?.(ctx) ?? void 0;
return {
dispose() {
if (token && import_api3.context.detach) import_api3.context.detach(token);
}
};
}
// src/providers/openai.ts
var import_api4 = require("@opentelemetry/api");
var responseDataMap = /* @__PURE__ */ new WeakMap();
async function instrumentOpenAI() {
getIsolatedProvider();
const originalFetch = global.fetch;
global.fetch = async function(input, init) {
const url = typeof input === "string" ? input : input.toString();
if (url.includes("api.openai.com")) {
const tracer = import_api4.trace.getTracer("spinal-openai");
const span = tracer.startSpan("openai-api-call");
try {
if (init?.body) {
try {
const bodyStr = typeof init.body === "string" ? init.body : JSON.stringify(init.body);
const parsed = JSON.parse(bodyStr);
if (parsed.model) {
span.setAttribute("spinal.model", `openai:${parsed.model}`);
}
} catch {
}
}
const response = await originalFetch(input, init);
const clonedResponse = response.clone();
const responseText = await clonedResponse.text();
responseDataMap.set(response, { body: responseText, span });
try {
const parsed = JSON.parse(responseText);
if (parsed.usage) {
span.setAttribute("spinal.input_tokens", parsed.usage.prompt_tokens);
span.setAttribute("spinal.output_tokens", parsed.usage.completion_tokens);
span.setAttribute("spinal.total_tokens", parsed.usage.total_tokens);
}
if (parsed.model) {
span.setAttribute("spinal.model", `openai:${parsed.model}`);
}
span.setAttribute("spinal.response.binary_data", responseText);
span.setAttribute("spinal.response.size", responseText.length);
span.setAttribute("spinal.response.capture_method", "fetch_clone");
} catch {
}
span.setStatus({ code: import_api4.SpanStatusCode.OK });
span.end();
return response;
} catch (error) {
span.setStatus({ code: import_api4.SpanStatusCode.ERROR, message: error.message });
span.end();
throw error;
}
}
return originalFetch(input, init);
};
try {
const http = await import("http");
const https = await import("https");
const originalHttpRequest = http.request;
http.request = function(options, callback) {
const url = options.hostname || options.host || "";
if (url.includes("api.openai.com")) {
const tracer = import_api4.trace.getTracer("spinal-openai");
const span = tracer.startSpan("openai-api-call");
const originalCallback = callback;
callback = function(res) {
const chunks = [];
res.on("data", (chunk) => chunks.push(chunk));
res.on("end", () => {
try {
const body = Buffer.concat(chunks).toString();
const parsed = JSON.parse(body);
if (parsed.usage) {
span.setAttribute("spinal.input_tokens", parsed.usage.prompt_tokens);
span.setAttribute("spinal.output_tokens", parsed.usage.completion_tokens);
span.setAttribute("spinal.total_tokens", parsed.usage.total_tokens);
}
if (parsed.model) {
span.setAttribute("spinal.model", `openai:${parsed.model}`);
}
span.setAttribute("spinal.response.binary_data", body);
span.setAttribute("spinal.response.size", body.length);
span.setAttribute("spinal.response.capture_method", "http_stream");
span.setStatus({ code: import_api4.SpanStatusCode.OK });
span.end();
} catch {
span.setStatus({ code: import_api4.SpanStatusCode.OK });
span.end();
}
});
if (originalCallback) originalCallback(res);
};
}
return originalHttpRequest.call(this, options, callback);
};
const originalHttpsRequest = https.request;
https.request = function(options, callback) {
const url = options.hostname || options.host || "";
if (url.includes("api.openai.com")) {
const tracer = import_api4.trace.getTracer("spinal-openai");
const span = tracer.startSpan("openai-api-call");
const originalCallback = callback;
callback = function(res) {
const chunks = [];
res.on("data", (chunk) => chunks.push(chunk));
res.on("end", () => {
try {
const body = Buffer.concat(chunks).toString();
const parsed = JSON.parse(body);
if (parsed.usage) {
span.setAttribute("spinal.input_tokens", parsed.usage.prompt_tokens);
span.setAttribute("spinal.output_tokens", parsed.usage.completion_tokens);
span.setAttribute("spinal.total_tokens", parsed.usage.total_tokens);
}
if (parsed.model) {
span.setAttribute("spinal.model", `openai:${parsed.model}`);
}
span.setAttribute("spinal.response.binary_data", body);
span.setAttribute("spinal.response.size", body.length);
span.setAttribute("spinal.response.capture_method", "https_stream");
span.setStatus({ code: import_api4.SpanStatusCode.OK });
span.end();
} catch {
span.setStatus({ code: import_api4.SpanStatusCode.OK });
span.end();
}
});
if (originalCallback) originalCallback(res);
};
}
return originalHttpsRequest.call(this, options, callback);
};
} catch {
}
}
// src/providers/http.ts
var import_instrumentation_http = require("@opentelemetry/instrumentation-http");
function instrumentHTTP() {
getIsolatedProvider();
const httpInstr = new import_instrumentation_http.HttpInstrumentation({
// Intercept request to capture OpenAI-specific data
requestHook: (span, request2) => {
const url = request2.url || request2.path || request2.href || "";
if (typeof url === "string" && url.includes("api.openai.com")) {
span.setAttribute("spinal.provider", "openai");
const body = request2.body;
if (body) {
try {
const bodyStr = typeof body === "string" ? body : JSON.stringify(body);
const parsed = JSON.parse(bodyStr);
if (parsed.model) {
span.setAttribute("spinal.model", `openai:${parsed.model}`);
}
} catch {
}
}
}
}
});
httpInstr.enable();
}
// src/public.ts
init_pricing();
var configure2 = configure;
var tag2 = tag;
var instrumentOpenAI2 = instrumentOpenAI;
var instrumentHTTP2 = instrumentHTTP;
async function displayLocalData(options = {}) {
const { getConfig: getConfig2 } = await Promise.resolve().then(() => (init_config(), config_exports));
const { estimateCost: estimateCost2 } = await Promise.resolve().then(() => (init_pricing(), pricing_exports));
const fs2 = await import("fs");
const cfg = getConfig2();
const file = cfg.localStorePath;
if (!fs2.existsSync(file)) {
console.log("No local data found. Start your application with Spinal configured to collect data.");
return;
}
const raw = await fs2.promises.readFile(file, "utf8");
const lines = raw.trim().length ? raw.trim().split("\n") : [];
if (lines.length === 0) {
console.log("No spans collected yet. Start your application with Spinal configured to collect data.");
return;
}
const spans = [];
for (const line of lines) {
try {
const span = JSON.parse(line);
spans.push(span);
} catch {
}
}
const limit = options.limit || 10;
const format = options.format || "table";
const displaySpans = spans.slice(-limit);
if (format === "json") {
console.log(JSON.stringify(displaySpans, null, 2));
} else if (format === "summary") {
const summary = {
totalSpans: spans.length,
uniqueTraces: new Set(spans.map((s) => s.trace_id)).size,
spanTypes: spans.reduce((acc, span) => {
const type = span.name || "unknown";
acc[type] = (acc[type] || 0) + 1;
return acc;
}, {}),
estimatedCost: spans.reduce((total, span) => {
const attrs = span.attributes || {};
const inputTokens = Number(attrs["spinal.input_tokens"] || 0);
const outputTokens = Number(attrs["spinal.output_tokens"] || 0);
const model = String(attrs["spinal.model"] || "openai:gpt-4o-mini");
return total + estimateCost2({ model, inputTokens, outputTokens });
}, 0)
};
console.log(JSON.stringify(summary, null, 2));
} else {
console.log(`
\u{1F4CA} Spinal Local Data (showing last ${displaySpans.length} of ${spans.length} spans)
`);
console.log("\u2500".repeat(120));
console.log(`${"Name".padEnd(30)} ${"Trace ID".padEnd(32)} ${"Duration (ms)".padEnd(12)} ${"Status".padEnd(8)} ${"Model".padEnd(15)} ${"Cost ($)".padEnd(8)}`);
console.log("\u2500".repeat(120));
for (const span of displaySpans) {
const name = (span.name || "unknown").substring(0, 29).padEnd(30);
const traceId = span.trace_id.substring(0, 31).padEnd(32);
const duration = span.end_time && span.start_time ? ((span.end_time - span.start_time) / 1e6).toFixed(1).padEnd(12) : "N/A".padEnd(12);
const status = String(span.status?.code || "UNSET").padEnd(8);
const attrs = span.attributes || {};
const model = (attrs["spinal.model"] || "N/A").toString().substring(0, 14).padEnd(15);
const inputTokens = Number(attrs["spinal.input_tokens"] || 0);
const outputTokens = Number(attrs["spinal.output_tokens"] || 0);
const cost = inputTokens > 0 || outputTokens > 0 ? estimateCost2({
model: String(attrs["spinal.model"] || "openai:gpt-4o-mini"),
inputTokens,
outputTokens
}).toFixed(4).padEnd(8) : "N/A".padEnd(8);
console.log(`${name} ${traceId} ${duration} ${status} ${model} ${cost}`);
}
console.log("\u2500".repeat(120));
const totalCost = spans.reduce((total, span) => {
const attrs = span.attributes || {};
const inputTokens = Number(attrs["spinal.input_tokens"] || 0);
const outputTokens = Number(attrs["spinal.output_tokens"] || 0);
const model = String(attrs["spinal.model"] || "openai:gpt-4o-mini");
return total + estimateCost2({ model, inputTokens, outputTokens });
}, 0);
console.log(`
\u{1F4B0} Total estimated cost: $${totalCost.toFixed(4)}`);
console.log(`\u{1F4C8} Total spans collected: ${spans.length}`);
console.log(`\u{1F50D} Unique traces: ${new Set(spans.map((s) => s.trace_id)).size}`);
}
}
// src/analytics/index.ts
var import_fs2 = require("fs");
var import_path3 = require("path");
init_pricing();
var Analytics = class {
spans = [];
spansPath;
constructor(spansPath) {
this.spansPath = spansPath || (0, import_path3.join)(process.cwd(), ".spinal", "spans.jsonl");
}
loadSpans() {
try {
const raw = (0, import_fs2.readFileSync)(this.spansPath, "utf8");
const lines = raw.trim().length ? raw.trim().split("\n") : [];
const spans = [];
for (const line of lines) {
try {
const span = JSON.parse(line);
spans.push(span);
} catch {
}
}
return spans;
} catch {
return [];
}
}
filterSpansByTime(spans, since) {
if (!since) return spans;
const now = Date.now();
const timeMap = {
"1h": 60 * 60 * 1e3,
"24h": 24 * 60 * 60 * 1e3,
"7d": 7 * 24 * 60 * 60 * 1e3,
"30d": 30 * 24 * 60 * 60 * 1e3,
"90d": 90 * 24 * 60 * 60 * 1e3,
"1y": 365 * 24 * 60 * 60 * 1e3
};
const cutoff = now - (timeMap[since] || 0);
return spans.filter((span) => {
const spanTime = span.start_time[0] * 1e3 + span.start_time[1] / 1e6;
return spanTime >= cutoff;
});
}
isOpenAISpan(span) {
return span.name === "openai-api-call" || span.attributes?.["spinal.provider"] === "openai" || span.instrumentation_info?.name === "spinal-openai";
}
getSpanDuration(span) {
const start = span.start_time[0] * 1e3 + span.start_time[1] / 1e6;
const end = span.end_time[0] * 1e3 + span.end_time[1] / 1e6;
return end - start;
}
analyzeCosts(options = {}) {
this.spans = this.loadSpans();
const filteredSpans = this.filterSpansByTime(this.spans, options.since);
const openAISpans = filteredSpans.filter((span) => this.isOpenAISpan(span));
let totalCost = 0;
const totalCalls = openAISpans.length;
const costByModel = {};
const costByAggregation = {};
for (const span of openAISpans) {
const attrs = span.attributes || {};
const inputTokens = Number(attrs["spinal.input_tokens"] || 0);
const outputTokens = Number(attrs["spinal.output_tokens"] || 0);
const model = String(attrs["spinal.model"] || "openai:gpt-4o-mini");
const aggregationId = String(attrs["spinal.aggregation_id"] || "unknown");
const cost = estimateCost({ model, inputTokens, outputTokens });
totalCost += cost;
if (!costByModel[model]) {
costByModel[model] = { cost: 0, calls: 0, percentage: 0 };
}
costByModel[model].cost += cost;
costByModel[model].calls += 1;
if (!costByAggregation[aggregationId]) {
costByAggregation[aggregationId] = { cost: 0, calls: 0, percentage: 0 };
}
costByAggregation[aggregationId].cost += cost;
costByAggregation[aggregationId].calls += 1;
}
Object.values(costByModel).forEach((model) => {
model.percentage = totalCost > 0 ? model.cost / totalCost * 100 : 0;
});
Object.values(costByAggregation).forEach((agg) => {
agg.percentage = totalCost > 0 ? agg.cost / totalCost * 100 : 0;
});
const costTrends = this.calculateCostTrends(filteredSpans);
return {
totalCost,
totalCalls,
averageCostPerCall: totalCalls > 0 ? totalCost / totalCalls : 0,
costByModel,
costByAggregation,
costTrends
};
}
analyzeUsage(options = {}) {
this.spans = this.loadSpans();
const filteredSpans = this.filterSpansByTime(this.spans, options.since);
const openAISpans = filteredSpans.filter((span) => this.isOpenAISpan(span));
const totalCalls = openAISpans.length;
let totalTokens = 0;
let inputTokens = 0;
let outputTokens = 0;
const usageByModel = {};
const usageByAggregation = {};
for (const span of openAISpans) {
const attrs = span.attributes || {};
const spanInputTokens = Number(attrs["spinal.input_tokens"] || 0);
const spanOutputTokens = Number(attrs["spinal.output_tokens"] || 0);
const spanTotalTokens = Number(attrs["spinal.total_tokens"] || 0);
const model = String(attrs["spinal.model"] || "openai:gpt-4o-mini");
const aggregationId = String(attrs["spinal.aggregation_id"] || "unknown");
inputTokens += spanInputTokens;
outputTokens += spanOutputTokens;
totalTokens += spanTotalTokens;
if (!usageByModel[model]) {
usageByModel[model] = { calls: 0, tokens: 0, percentage: 0 };
}
usageByModel[model].calls += 1;
usageByModel[model].tokens += spanTotalTokens;
if (!usageByAggregation[aggregationId]) {
usageByAggregation[aggregationId] = { calls: 0, tokens: 0, percentage: 0 };
}
usageByAggregation[aggregationId].calls += 1;
usageByAggregation[aggregationId].tokens += spanTotalTokens;
}
Object.values(usageByModel).forEach((model) => {
model.percentage = totalCalls > 0 ? model.calls / totalCalls * 100 : 0;
});
Object.values(usageByAggregation).forEach((agg) => {
agg.percentage = totalCalls > 0 ? agg.calls / totalCalls * 100 : 0;
});
return {
totalCalls,
totalTokens,
inputTokens,
outputTokens,
usageByModel,
usageByAggregation,
tokenEfficiency: {
averageInputTokensPerCall: totalCalls > 0 ? inputTokens / totalCalls : 0,
averageOutputTokensPerCall: totalCalls > 0 ? outputTokens / totalCalls : 0,
tokenRatio: inputTokens > 0 ? outputTokens / inputTokens : 0
}
};
}
analyzePerformance(options = {}) {
this.spans = this.loadSpans();
const filteredSpans = this.filterSpansByTime(this.spans, options.since);
const openAISpans = filteredSpans.filter((span) => this.isOpenAISpan(span));
const totalRequests = openAISpans.length;
const successful = openAISpans.filter((span) => span.status.code === 1).length;
const failed = totalRequests - successful;
const successRate = totalRequests > 0 ? successful / totalRequests * 100 : 0;
const responseTimes = openAISpans.map((span) => this.getSpanDuration(span)).sort((a, b) => a - b);
const average = responseTimes.length > 0 ? responseTimes.reduce((a, b) => a + b, 0) / responseTimes.length : 0;
const median = responseTimes.length > 0 ? responseTimes[Math.floor(responseTimes.length / 2)] : 0;
const p95 = responseTimes.length > 0 ? responseTimes[Math.floor(responseTimes.length * 0.95)] : 0;
const fastest = responseTimes.length > 0 ? responseTimes[0] : 0;
const slowest = responseTimes.length > 0 ? responseTimes[responseTimes.length - 1] : 0;
const errors = {
rateLimit: 0,
authentication: 0,
network: 0,
other: failed
};
return {
totalRequests,
successful,
failed,
successRate,
responseTimes: {
average,
median,
p95,
fastest,
slowest
},
errors
};
}
analyzeModels(options = {}) {
this.spans = this.loadSpans();
const filteredSpans = this.filterSpansByTime(this.spans, options.since);
const openAISpans = filteredSpans.filter((span) => this.isOpenAISpan(span));
const models = {};
for (const span of openAISpans) {
const attrs = span.attributes || {};
const model = String(attrs["spinal.model"] || "openai:gpt-4o-mini");
const inputTokens = Number(attrs["spinal.input_tokens"] || 0);
const outputTokens = Number(attrs["spinal.output_tokens"] || 0);
const totalTokens = Number(attrs["spinal.total_tokens"] || 0);
const cost = estimateCost({ model, inputTokens, outputTokens });
const responseTime = this.getSpanDuration(span);
const isSuccess = span.status.code === 1;
if (!models[model]) {
models[model] = {
calls: 0,
totalCost: 0,
avgCostPerCall: 0,
avgResponseTime: 0,
successRate: 0,
totalTokens: 0
};
}
models[model].calls += 1;
models[model].totalCost += cost;
models[model].totalTokens += totalTokens;
models[model].avgResponseTime = (models[model].avgResponseTime * (models[model].calls - 1) + responseTime) / models[model].calls;
models[model].avgCostPerCall = models[model].totalCost / models[model].calls;
models[model].successRate = (models[model].successRate * (models[model].calls - 1) + (isSuccess ? 100 : 0)) / models[model].calls;
}
return { models };
}
analyzeAggregations(options = {}) {
this.spans = this.loadSpans();
const filteredSpans = this.filterSpansByTime(this.spans, options.since);
const openAISpans = filteredSpans.filter((span) => this.isOpenAISpan(span));
const aggregations = {};
for (const span of openAISpans) {
const attrs = span.attributes || {};
const aggregationId = String(attrs["spinal.aggregation_id"] || "unknown");
const inputTokens = Number(attrs["spinal.input_tokens"] || 0);
const outputTokens = Number(attrs["spinal.output_tokens"] || 0);
const totalTokens = Number(attrs["spinal.total_tokens"] || 0);
const model = String(attrs["spinal.model"] || "openai:gpt-4o-mini");
const cost = estimateCost({ model, inputTokens, outputTokens });
const isSuccess = span.status.code === 1;
if (!aggregations[aggregationId]) {
aggregations[aggregationId] = {
calls: 0,
totalCost: 0,
avgCostPerCall: 0,
successRate: 0,
totalTokens: 0
};
}
aggregations[aggregationId].calls += 1;
aggregations[aggregationId].totalCost += cost;
aggregations[aggregationId].totalTokens += totalTokens;
aggregations[aggregationId].avgCostPerCall = aggregations[aggregationId].totalCost / aggregations[aggregationId].calls;
aggregations[aggregationId].successRate = (aggregations[aggregationId].successRate * (aggregations[aggregationId].calls - 1) + (isSuccess ? 100 : 0)) / aggregations[aggregationId].calls;
}
return { aggregations };
}
analyzeTrends(options = {}) {
this.spans = this.loadSpans();
const filteredSpans = this.filterSpansByTime(this.spans, options.since);
const openAISpans = filteredSpans.filter((span) => this.isOpenAISpan(span));
const dailyCalls = openAISpans.length / 30;
const peakUsage = { date: "unknown", calls: Math.max(...openAISpans.map(() => 1)) };
const growthRate = 0;
const totalCost = openAISpans.reduce((total, span) => {
const attrs = span.attributes || {};
const inputTokens = Number(attrs["spinal.input_tokens"] || 0);
const outputTokens = Number(attrs["spinal.output_tokens"] || 0);
const model = String(attrs["spinal.model"] || "openai:gpt-4o-mini");
return total + estimateCost({ model, inputTokens, outputTokens });
}, 0);
const dailyCost = totalCost / 30;
const peakCost = { date: "unknown", cost: totalCost };
const costPerCallTrend = "stable";
return {
usageTrends: {
dailyAverageCalls: dailyCalls,
peakUsage,
growthRate
},
costTrends: {
dailyAverageCost: dailyCost,
peakCost,
costPerCallTrend
},
performanceTrends: {
responseTimeTrend: "stable",
errorRateTrend: "stable",
successRateTrend: "stable"
}
};
}
getOptimizationRecommendations(options = {}) {
const costAnalysis = this.analyzeCosts(options);
const usageAnalysis = this.analyzeUsage(options);
const performanceAnalysis = this.analyzePerformance(options);
const modelAnalysis = this.analyzeModels(options);
const recommendations = {
costOptimization: [],
performanceOptimization: [],
usageOptimization: []
};
if (costAnalysis.averageCostPerCall > 0.02) {
recommendations.costOptimization.push("Consider using gpt-4o-mini for simple tasks to reduce costs");
}
if (costAnalysis.totalCost > 10) {
recommendations.costOptimization.push("Monitor token usage and optimize prompts to reduce costs");
}
if (performanceAnalysis.responseTimes.average > 3e3) {
recommendations.performanceOptimization.push("Consider implementing caching for repeated queries");
}
if (performanceAnalysis.successRate < 99) {
recommendations.performanceOptimization.push("Monitor error rates and implement retry logic");
}
const gpt4Usage = Object.entries(modelAnalysis.models).find(([model]) => model.includes("gpt-4o") && !model.includes("mini"));
if (gpt4Usage && gpt4Usage[1].calls > usageAnalysis.totalCalls * 0.5) {
recommendations.usageOptimization.push("Consider using gpt-4o-mini for simple tasks to reduce costs");
}
return recommendations;
}
analyzeResponses(options = {}) {
this.spans = this.loadSpans();
const filteredSpans = this.filterSpansByTime(this.spans, options.since);
const openAISpans = filteredSpans.filter((span) => this.isOpenAISpan(span));
let totalResponses = 0;
let totalResponseSize = 0;
const responseSizeDistribution = { small: 0, medium: 0, large: 0 };
const responseTypes = { success: 0, error: 0, truncated: 0 };
const errorTypes = {};
const errorMessages = [];
const modelResponseQuality = {};
const allResponseLengths = [];
for (const span of openAISpans) {
const attrs = span.attributes || {};
const responseData = attrs["spinal.response.binary_data"];
const responseSize = Number(attrs["spinal.response.size"] || 0);
const model = String(attrs["spinal.model"] || "unknown");
const isSuccess = span.status.code === 1;
if (responseData) {
totalResponses++;
totalResponseSize += responseSize;
if (responseSize < 500) responseSizeDistribution.small++;
else if (responseSize < 2e3) responseSizeDistribution.medium++;
else responseSizeDistribution.large++;
try {
const parsed = JSON.parse(responseData);
if (parsed.error) {
responseTypes.error++;
const errorType = parsed.error.type || "unknown";
errorTypes[errorType] = (errorTypes[errorType] || 0) + 1;
errorMessages.push(parsed.error.message || "Unknown error");
} else if (parsed.choices && parsed.choices.length > 0) {
responseTypes.success++;
const choice = parsed.choices[0];
const content = choice.message?.content || "";
const responseLength = content.length;
allResponseLengths.push(responseLength);
if (!modelResponseQuality[model]) {
modelResponseQuality[model] = {
averageResponseLength: 0,
averageResponseSize: 0,
successRate: 0,
commonErrors: [],
totalResponses: 0,
totalSize: 0,
totalLength: 0,
successful: 0
};
}
modelResponseQuality[model].totalResponses++;
modelResponseQuality[model].totalSize += responseSize;
modelResponseQuality[model].totalLength += responseLength;
modelResponseQuality[model].successful += isSuccess ? 1 : 0;
}
} catch {
responseTypes.truncated++;
}
}
}
const averageResponseSize = totalResponses > 0 ? totalResponseSize / totalResponses : 0;
const averageResponseLength = allResponseLengths.length > 0 ? allResponseLengths.reduce((a, b) => a + b, 0) / allResponseLengths.length : 0;
Object.keys(modelResponseQuality).forEach((model) => {
const data = modelResponseQuality[model];
data.averageResponseLength = data.totalResponses > 0 ? data.totalLength / data.totalResponses : 0;
data.averageResponseSize = data.totalResponses > 0 ? data.totalSize / data.totalResponses : 0;
data.successRate = data.totalResponses > 0 ? data.successful / data.totalResponses * 100 : 0;
});
return {
totalResponses,
averageResponseSize,
responseSizeDistribution,
contentPatterns: {
averageResponseLength,
commonPhrases: [],
// Could be enhanced with NLP analysis
responseTypes
},
errorAnalysis: {
totalErrors: responseTypes.error,
errorTypes,
errorMessages: [...new Set(errorMessages)],
// Remove duplicates
successRate: totalResponses > 0 ? responseTypes.success / totalResponses * 100 : 0
},
modelResponseQuality
};
}
getContentInsights(options = {}) {
this.spans = this.loadSpans();
const filteredSpans = this.filterSpansByTime(this.spans, options.since);
const openAISpans = filteredSpans.filter((span) => this.isOpenAISpan(span));
const responsePatterns = { shortResponses: 0, mediumResponses: 0, longResponses: 0 };
const finishReasons = {};
const commonErrors = { rateLimit: 0, authentication: 0, modelNotFound: 0, other: 0 };
let totalOutputTokens = 0;
let totalResponseSize = 0;
let totalResponseLength = 0;
for (const span of openAISpans) {
const attrs = span.attributes || {};
const responseData = attrs["spinal.response.binary_data"];
const responseSize = Number(attrs["spinal.response.size"] || 0);
const outputTokens = Number(attrs["spinal.output_tokens"] || 0);
if (responseData) {
totalOutputTokens += outputTokens;
totalResponseSize += responseSize;
try {
const parsed = JSON.parse(responseData);
if (parsed.error) {
const errorType = parsed.error.type || "unknown";
if (errorType.includes("rate_limit")) commonErrors.rateLimit++;
else if (errorType.includes("auth")) commonErrors.authentication++;
else if (errorType.includes("model")) commonErrors.modelNotFound++;
else commonErrors.other++;
} else if (parsed.choices && parsed.choices.length > 0) {
const choice = parsed.choices[0];
const content = choice.message?.content || "";
const responseLength = content.length;
totalResponseLength += responseLength;
if (responseLength < 50) responsePatterns.shortResponses++;
else if (responseLength < 200) responsePatterns.mediumResponses++;
else responsePatterns.longResponses++;
const finishReason = choice.finish_reason || "unknown";
finishReasons[finishReason] = (finishReasons[finishReason] || 0) + 1;
}
} catch {
}
}
}
return {
responsePatterns,
finishReasons,
responseQuality: {
averageTokensPerCharacter: totalResponseLength > 0 ? totalOutputTokens / totalResponseLength : 0,
responseEfficiency: totalResponseSize > 0 ? totalOutputTokens / totalResponseSize : 0
},
commonErrors
};
}
calculateCostTrends(spans) {
const openAISpans = spans.filter((span) => this.isOpenAISpan(span));
if (openAISpans.length === 0) return [];
const totalCost = openAISpans.reduce((total, span) => {
const attrs = span.attributes || {};
const inputTokens = Number(attrs["spinal.input_tokens"] || 0);
const outputTokens = Number(attrs["spinal.output_tokens"] || 0);
const model = String(attrs["spinal.model"] || "openai:gpt-4o-mini");
return total + estimateCost({ model, inputTokens, outputTokens });
}, 0);
return [
{ date: "Today", cost: totalCost, calls: openAISpans.length }
];
}
};
// Annotate the CommonJS export names for ESM import in node:
0 && (module.exports = {
Analytics,
configure,
displayLocalData,
estimateCost,
forceFlush,
instrumentHTTP,
instrumentOpenAI,
shutdown,
tag
});
//# sourceMappingURL=index.cjs.map