@ai-sdk/openai
Version:
The **[OpenAI provider](https://ai-sdk.dev/providers/ai-sdk-providers/openai)** for the [AI SDK](https://ai-sdk.dev/docs) contains language model support for the OpenAI chat and completion APIs and embedding model support for the OpenAI embeddings API.
58 lines (53 loc) • 1.48 kB
text/typescript
import { LanguageModelV3Usage } from '@ai-sdk/provider';
export type OpenAIChatUsage = {
prompt_tokens?: number | null;
completion_tokens?: number | null;
total_tokens?: number | null;
prompt_tokens_details?: {
cached_tokens?: number | null;
} | null;
completion_tokens_details?: {
reasoning_tokens?: number | null;
accepted_prediction_tokens?: number | null;
rejected_prediction_tokens?: number | null;
} | null;
};
export function convertOpenAIChatUsage(
usage: OpenAIChatUsage | undefined | null,
): LanguageModelV3Usage {
if (usage == null) {
return {
inputTokens: {
total: undefined,
noCache: undefined,
cacheRead: undefined,
cacheWrite: undefined,
},
outputTokens: {
total: undefined,
text: undefined,
reasoning: undefined,
},
raw: undefined,
};
}
const promptTokens = usage.prompt_tokens ?? 0;
const completionTokens = usage.completion_tokens ?? 0;
const cachedTokens = usage.prompt_tokens_details?.cached_tokens ?? 0;
const reasoningTokens =
usage.completion_tokens_details?.reasoning_tokens ?? 0;
return {
inputTokens: {
total: promptTokens,
noCache: promptTokens - cachedTokens,
cacheRead: cachedTokens,
cacheWrite: undefined,
},
outputTokens: {
total: completionTokens,
text: completionTokens - reasoningTokens,
reasoning: reasoningTokens,
},
raw: usage,
};
}