@sentry/core
Version:
Base implementation for all Sentry JavaScript SDKs
236 lines (207 loc) • 8.88 kB
JavaScript
Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });
const currentScopes = require('../../currentScopes.js');
const exports$1 = require('../../exports.js');
const semanticAttributes = require('../../semanticAttributes.js');
const spanstatus = require('../spanstatus.js');
const trace = require('../trace.js');
const genAiAttributes = require('../ai/gen-ai-attributes.js');
const utils$1 = require('../ai/utils.js');
const streaming = require('./streaming.js');
const utils = require('./utils.js');
/**
* Extract request attributes from method arguments
*/
function extractRequestAttributes(args, methodPath) {
const attributes = {
[genAiAttributes.GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
[genAiAttributes.GEN_AI_OPERATION_NAME_ATTRIBUTE]: utils.getOperationName(methodPath),
[semanticAttributes.SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
};
// Chat completion API accepts web_search_options and tools as parameters
// we append web search options to the available tools to capture all tool calls
if (args.length > 0 && typeof args[0] === 'object' && args[0] !== null) {
const params = args[0] ;
const tools = Array.isArray(params.tools) ? params.tools : [];
const hasWebSearchOptions = params.web_search_options && typeof params.web_search_options === 'object';
const webSearchOptions = hasWebSearchOptions
? [{ type: 'web_search_options', ...(params.web_search_options ) }]
: [];
const availableTools = [...tools, ...webSearchOptions];
if (availableTools.length > 0) {
attributes[genAiAttributes.GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE] = JSON.stringify(availableTools);
}
}
if (args.length > 0 && typeof args[0] === 'object' && args[0] !== null) {
const params = args[0] ;
attributes[genAiAttributes.GEN_AI_REQUEST_MODEL_ATTRIBUTE] = params.model ?? 'unknown';
if ('temperature' in params) attributes[genAiAttributes.GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE] = params.temperature;
if ('top_p' in params) attributes[genAiAttributes.GEN_AI_REQUEST_TOP_P_ATTRIBUTE] = params.top_p;
if ('frequency_penalty' in params)
attributes[genAiAttributes.GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE] = params.frequency_penalty;
if ('presence_penalty' in params) attributes[genAiAttributes.GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE] = params.presence_penalty;
if ('stream' in params) attributes[genAiAttributes.GEN_AI_REQUEST_STREAM_ATTRIBUTE] = params.stream;
if ('encoding_format' in params) attributes[genAiAttributes.GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE] = params.encoding_format;
if ('dimensions' in params) attributes[genAiAttributes.GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE] = params.dimensions;
} else {
attributes[genAiAttributes.GEN_AI_REQUEST_MODEL_ATTRIBUTE] = 'unknown';
}
return attributes;
}
/**
* Add response attributes to spans
* This currently supports both Chat Completion and Responses API responses
*/
function addResponseAttributes(span, result, recordOutputs) {
if (!result || typeof result !== 'object') return;
const response = result ;
if (utils.isChatCompletionResponse(response)) {
utils.addChatCompletionAttributes(span, response, recordOutputs);
if (recordOutputs && response.choices?.length) {
const responseTexts = response.choices.map(choice => choice.message?.content || '');
span.setAttributes({ [genAiAttributes.GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: JSON.stringify(responseTexts) });
}
} else if (utils.isResponsesApiResponse(response)) {
utils.addResponsesApiAttributes(span, response, recordOutputs);
if (recordOutputs && response.output_text) {
span.setAttributes({ [genAiAttributes.GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: response.output_text });
}
} else if (utils.isEmbeddingsResponse(response)) {
utils.addEmbeddingsAttributes(span, response);
}
}
// Extract and record AI request inputs, if present. This is intentionally separate from response attributes.
function addRequestAttributes(span, params) {
if ('messages' in params) {
const truncatedMessages = utils$1.getTruncatedJsonString(params.messages);
span.setAttributes({ [genAiAttributes.GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: truncatedMessages });
}
if ('input' in params) {
const truncatedInput = utils$1.getTruncatedJsonString(params.input);
span.setAttributes({ [genAiAttributes.GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: truncatedInput });
}
}
/**
* Instrument a method with Sentry spans
* Following Sentry AI Agents Manual Instrumentation conventions
* @see https://docs.sentry.io/platforms/javascript/guides/node/tracing/instrumentation/ai-agents-module/#manual-instrumentation
*/
function instrumentMethod(
originalMethod,
methodPath,
context,
options,
) {
return async function instrumentedMethod(...args) {
const requestAttributes = extractRequestAttributes(args, methodPath);
const model = (requestAttributes[genAiAttributes.GEN_AI_REQUEST_MODEL_ATTRIBUTE] ) || 'unknown';
const operationName = utils.getOperationName(methodPath);
const params = args[0] ;
const isStreamRequested = params && typeof params === 'object' && params.stream === true;
if (isStreamRequested) {
// For streaming responses, use manual span management to properly handle the async generator lifecycle
return trace.startSpanManual(
{
name: `${operationName} ${model} stream-response`,
op: utils.getSpanOperation(methodPath),
attributes: requestAttributes ,
},
async (span) => {
try {
if (options.recordInputs && params) {
addRequestAttributes(span, params);
}
const result = await originalMethod.apply(context, args);
return streaming.instrumentStream(
result ,
span,
options.recordOutputs ?? false,
) ;
} catch (error) {
// For streaming requests that fail before stream creation, we still want to record
// them as streaming requests but end the span gracefully
span.setStatus({ code: spanstatus.SPAN_STATUS_ERROR, message: 'internal_error' });
exports$1.captureException(error, {
mechanism: {
handled: false,
type: 'auto.ai.openai.stream',
data: {
function: methodPath,
},
},
});
span.end();
throw error;
}
},
);
} else {
// Non-streaming responses
return trace.startSpan(
{
name: `${operationName} ${model}`,
op: utils.getSpanOperation(methodPath),
attributes: requestAttributes ,
},
async (span) => {
try {
if (options.recordInputs && params) {
addRequestAttributes(span, params);
}
const result = await originalMethod.apply(context, args);
addResponseAttributes(span, result, options.recordOutputs);
return result;
} catch (error) {
exports$1.captureException(error, {
mechanism: {
handled: false,
type: 'auto.ai.openai',
data: {
function: methodPath,
},
},
});
throw error;
}
},
);
}
};
}
/**
* Create a deep proxy for OpenAI client instrumentation
*/
function createDeepProxy(target, currentPath = '', options) {
return new Proxy(target, {
get(obj, prop) {
const value = (obj )[prop];
const methodPath = utils.buildMethodPath(currentPath, String(prop));
if (typeof value === 'function' && utils.shouldInstrument(methodPath)) {
return instrumentMethod(value , methodPath, obj, options);
}
if (typeof value === 'function') {
// Bind non-instrumented functions to preserve the original `this` context,
// which is required for accessing private class fields (e.g. #baseURL) in OpenAI SDK v5.
return value.bind(obj);
}
if (value && typeof value === 'object') {
return createDeepProxy(value, methodPath, options);
}
return value;
},
}) ;
}
/**
* Instrument an OpenAI client with Sentry tracing
* Can be used across Node.js, Cloudflare Workers, and Vercel Edge
*/
function instrumentOpenAiClient(client, options) {
const sendDefaultPii = Boolean(currentScopes.getClient()?.getOptions().sendDefaultPii);
const _options = {
recordInputs: sendDefaultPii,
recordOutputs: sendDefaultPii,
...options,
};
return createDeepProxy(client, '', _options);
}
exports.instrumentOpenAiClient = instrumentOpenAiClient;
//# sourceMappingURL=index.js.map