ai-sdk-token-usage
Version:
A lightweight Typescript library to track and visualize token usage across multiple AI model providers.
78 lines (73 loc) • 3.17 kB
TypeScript
import { LanguageModelUsage, FinishReason, TextStreamPart, ToolSet } from 'ai';
/**
* Represents the token usage metadata attached to a message.
*
* This metadata is generated by AI SDK Token Usage and includes
* both the total token usage reported by the model and the
* canonical model identifier.
*
* Used by the `messageMetadata` callback in
* `toUIMessageStreamResponse` to store cost and usage information
* for each completed model response.
*/
type TokenUsageMetadata = {
/** The total token usage for the model response, as reported by the provider. */
totalUsage: LanguageModelUsage;
/** The canonical model identifier (e.g., "openai/gpt-5"). */
canonicalSlug: string;
};
/**
* Represents a single part of a streamed model response along with
* the canonical model identifier.
*
* Used when generating metadata for a model response that may or may
* not yet be finished.
*/
type TokenUsagePart = {
/** The streamed message part (start, text-delta, finish etc.). */
part: TextStreamPart<ToolSet>;
/** The canonical model identifier (e.g., "openai/gpt-5"). */
canonicalSlug: string;
};
/**
* Represents a `finish` part of a model response, which includes
* the total token usage and reason for completion.
*
* Used when generating metadata after the model output is complete.
*/
type TokenUsageFinishPart = {
/** The completed model response part containing total usage info. */
part: {
type: "finish";
finishReason: FinishReason;
totalUsage: LanguageModelUsage;
};
/** The canonical model identifier (e.g., "openai/gpt-5"). */
canonicalSlug: string;
};
/**
* Generates token usage metadata for a completed model response.
*
* This helper should be used when you have access to a `finish` part
* (e.g., at the end of a streamed AI response) and want to attach
* token usage information to the message metadata.
*
* @param part - The final part of the model response, containing total token usage details.
* @param canonicalSlug - The canonical model identifier (e.g., "openai/gpt-5").
* @returns A {@link TokenUsageMetadata} object containing total token usage and model identifier metadata.
*/
declare function getTokenUsageMetadata({ part, canonicalSlug }: TokenUsageFinishPart): TokenUsageMetadata;
/**
* Generates token usage metadata conditionally for a message part.
*
* This helper is typically passed to the `messageMetadata` callback
* in `toUIMessageStreamResponse`. It automatically returns metadata
* only for `finish` parts of a streamed response, ensuring that
* token usage is attached only when the model output is complete.
*
* @param part - A streamed message part (input, output, or finish).
* @param canonicalSlug - The canonical model identifier (e.g., "openai/gpt-5").
* @returns A {@link TokenUsageMetadata} object if the part type is `finish`, otherwise `undefined`.
*/
declare function toTokenUsageMetadata({ part, canonicalSlug }: TokenUsagePart): TokenUsageMetadata | undefined;
export { type TokenUsageFinishPart, type TokenUsageMetadata, type TokenUsagePart, getTokenUsageMetadata, toTokenUsageMetadata };