UNPKG

@sentry/core

Version:
281 lines (247 loc) 9.83 kB
Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' }); const genAiAttributes = require('../ai/gen-ai-attributes.js'); const utils = require('../ai/utils.js'); const constants = require('./constants.js'); const vercelAiAttributes = require('./vercel-ai-attributes.js'); /** * Accumulates token data from a span to its parent in the token accumulator map. * This function extracts token usage from the current span and adds it to the * accumulated totals for its parent span. */ function accumulateTokensForParent(span, tokenAccumulator) { const parentSpanId = span.parent_span_id; if (!parentSpanId) { return; } const inputTokens = span.data[genAiAttributes.GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]; const outputTokens = span.data[genAiAttributes.GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]; if (typeof inputTokens === 'number' || typeof outputTokens === 'number') { const existing = tokenAccumulator.get(parentSpanId) || { inputTokens: 0, outputTokens: 0 }; if (typeof inputTokens === 'number') { existing.inputTokens += inputTokens; } if (typeof outputTokens === 'number') { existing.outputTokens += outputTokens; } tokenAccumulator.set(parentSpanId, existing); } } /** * Applies accumulated token data to the `gen_ai.invoke_agent` span. * Only immediate children of the `gen_ai.invoke_agent` span are considered, * since aggregation will automatically occur for each parent span. */ function applyAccumulatedTokens( spanOrTrace, tokenAccumulator, ) { const accumulated = tokenAccumulator.get(spanOrTrace.span_id); if (!accumulated || !spanOrTrace.data) { return; } if (accumulated.inputTokens > 0) { spanOrTrace.data[genAiAttributes.GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] = accumulated.inputTokens; } if (accumulated.outputTokens > 0) { spanOrTrace.data[genAiAttributes.GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] = accumulated.outputTokens; } if (accumulated.inputTokens > 0 || accumulated.outputTokens > 0) { spanOrTrace.data['gen_ai.usage.total_tokens'] = accumulated.inputTokens + accumulated.outputTokens; } } /** * Builds a map of tool name -> description from all spans with available_tools. * This avoids O(n²) iteration and repeated JSON parsing. */ function buildToolDescriptionMap(spans) { const toolDescriptions = new Map(); for (const span of spans) { const availableTools = span.data[genAiAttributes.GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]; if (typeof availableTools !== 'string') { continue; } try { const tools = JSON.parse(availableTools) ; for (const tool of tools) { if (tool.name && tool.description && !toolDescriptions.has(tool.name)) { toolDescriptions.set(tool.name, tool.description); } } } catch { // ignore parse errors } } return toolDescriptions; } /** * Applies tool descriptions and accumulated tokens to spans in a single pass. * * - For `gen_ai.execute_tool` spans: looks up tool description from * `gen_ai.request.available_tools` on sibling spans * - For `gen_ai.invoke_agent` spans: applies accumulated token data from children */ function applyToolDescriptionsAndTokens(spans, tokenAccumulator) { // Build lookup map once to avoid O(n²) iteration and repeated JSON parsing const toolDescriptions = buildToolDescriptionMap(spans); for (const span of spans) { if (span.op === 'gen_ai.execute_tool') { const toolName = span.data[genAiAttributes.GEN_AI_TOOL_NAME_ATTRIBUTE]; if (typeof toolName === 'string') { const description = toolDescriptions.get(toolName); if (description) { span.data[genAiAttributes.GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE] = description; } } } if (span.op === 'gen_ai.invoke_agent') { applyAccumulatedTokens(span, tokenAccumulator); } } } /** * Get the span context associated with a tool call ID. */ function _INTERNAL_getSpanContextForToolCallId(toolCallId) { return constants.toolCallSpanContextMap.get(toolCallId); } /** * Clean up the span mapping for a tool call ID */ function _INTERNAL_cleanupToolCallSpanContext(toolCallId) { constants.toolCallSpanContextMap.delete(toolCallId); } /** * Convert an array of tool strings to a JSON string */ function convertAvailableToolsToJsonString(tools) { const toolObjects = tools.map(tool => { if (typeof tool === 'string') { try { return JSON.parse(tool); } catch { return tool; } } return tool; }); return JSON.stringify(toolObjects); } /** * Filter out invalid entries in messages array * @param input - The input array to filter * @returns The filtered array */ function filterMessagesArray(input) { return input.filter( (m) => !!m && typeof m === 'object' && 'role' in m && 'content' in m, ); } /** * Normalize the user input (stringified object with prompt, system, messages) to messages array */ function convertUserInputToMessagesFormat(userInput) { try { const p = JSON.parse(userInput); if (!!p && typeof p === 'object') { let { messages } = p; const { prompt, system } = p; const result = []; // prepend top-level system instruction if present if (typeof system === 'string') { result.push({ role: 'system', content: system }); } // stringified messages array if (typeof messages === 'string') { try { messages = JSON.parse(messages); } catch { // ignore parse errors } } // messages array format: { messages: [...] } if (Array.isArray(messages)) { result.push(...filterMessagesArray(messages)); return result; } // prompt array format: { prompt: [...] } if (Array.isArray(prompt)) { result.push(...filterMessagesArray(prompt)); return result; } // prompt string format: { prompt: "..." } if (typeof prompt === 'string') { result.push({ role: 'user', content: prompt }); } if (result.length > 0) { return result; } } // eslint-disable-next-line no-empty } catch {} return []; } /** * Generate a request.messages JSON array from the prompt field in the * invoke_agent op */ function requestMessagesFromPrompt(span, attributes, enableTruncation) { if ( typeof attributes[vercelAiAttributes.AI_PROMPT_ATTRIBUTE] === 'string' && !attributes[genAiAttributes.GEN_AI_INPUT_MESSAGES_ATTRIBUTE] && !attributes[vercelAiAttributes.AI_PROMPT_MESSAGES_ATTRIBUTE] ) { // No messages array is present, so we need to convert the prompt to the proper messages format // This is the case for ai.generateText spans // The ai.prompt attribute is a stringified object with prompt, system, messages attributes // The format of these is described in the vercel docs, for instance: https://ai-sdk.dev/docs/reference/ai-sdk-core/stream-object#parameters const userInput = attributes[vercelAiAttributes.AI_PROMPT_ATTRIBUTE]; const messages = convertUserInputToMessagesFormat(userInput); if (messages.length) { const { systemInstructions, filteredMessages } = utils.extractSystemInstructions(messages); if (systemInstructions) { span.setAttribute(genAiAttributes.GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE, systemInstructions); } const filteredLength = Array.isArray(filteredMessages) ? filteredMessages.length : 0; const messagesJson = enableTruncation ? utils.getTruncatedJsonString(filteredMessages) : utils.getJsonString(filteredMessages); span.setAttributes({ [vercelAiAttributes.AI_PROMPT_ATTRIBUTE]: messagesJson, [genAiAttributes.GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: messagesJson, [genAiAttributes.GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: filteredLength, }); } } else if (typeof attributes[vercelAiAttributes.AI_PROMPT_MESSAGES_ATTRIBUTE] === 'string') { // In this case we already get a properly formatted messages array, this is the preferred way to get the messages // This is the case for ai.generateText.doGenerate spans try { const messages = JSON.parse(attributes[vercelAiAttributes.AI_PROMPT_MESSAGES_ATTRIBUTE]); if (Array.isArray(messages)) { const { systemInstructions, filteredMessages } = utils.extractSystemInstructions(messages); if (systemInstructions) { span.setAttribute(genAiAttributes.GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE, systemInstructions); } const filteredLength = Array.isArray(filteredMessages) ? filteredMessages.length : 0; const messagesJson = enableTruncation ? utils.getTruncatedJsonString(filteredMessages) : utils.getJsonString(filteredMessages); span.setAttributes({ [vercelAiAttributes.AI_PROMPT_MESSAGES_ATTRIBUTE]: messagesJson, [genAiAttributes.GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: messagesJson, [genAiAttributes.GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: filteredLength, }); } // eslint-disable-next-line no-empty } catch {} } } exports._INTERNAL_cleanupToolCallSpanContext = _INTERNAL_cleanupToolCallSpanContext; exports._INTERNAL_getSpanContextForToolCallId = _INTERNAL_getSpanContextForToolCallId; exports.accumulateTokensForParent = accumulateTokensForParent; exports.applyAccumulatedTokens = applyAccumulatedTokens; exports.applyToolDescriptionsAndTokens = applyToolDescriptionsAndTokens; exports.convertAvailableToolsToJsonString = convertAvailableToolsToJsonString; exports.convertUserInputToMessagesFormat = convertUserInputToMessagesFormat; exports.requestMessagesFromPrompt = requestMessagesFromPrompt; //# sourceMappingURL=utils.js.map