UNPKG

@unified-llm/core

Version:

Unified LLM interface (in-memory).

70 lines 3.16 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.calculateUsageCost = calculateUsageCost; exports.accumulateUsage = accumulateUsage; const TOKENS_PER_MILLION = 1000000; const MODEL_PRICING = { "gpt-5.1": { input: 1.25, cachedInput: 0.125, output: 10, }, "gpt-5": { input: 1.25, cachedInput: 0.125, output: 10, }, "gpt-5-mini": { input: 0.25, cachedInput: 0.025, output: 2, }, "gpt-5-nano": { input: 0.05, cachedInput: 0.005, output: 0.4, }, }; function calculateUsageCost(usage, model, options) { var _a, _b; const pricing = MODEL_PRICING[model]; if (!pricing) { throw new Error(`Unsupported model for cost calculation: ${model}`); } const cachedInputTokens = Math.max((_b = (_a = options === null || options === void 0 ? void 0 : options.cachedInputTokens) !== null && _a !== void 0 ? _a : usage.cachedInputTokens) !== null && _b !== void 0 ? _b : 0, 0); const currencyMultiplier = Number.isFinite(options === null || options === void 0 ? void 0 : options.currencyMultiplier) ? options === null || options === void 0 ? void 0 : options.currencyMultiplier : 1; const billableInputTokens = Math.max(usage.inputTokens - cachedInputTokens, 0); const inputCost = (billableInputTokens / TOKENS_PER_MILLION) * pricing.input; const cachedInputCost = (cachedInputTokens / TOKENS_PER_MILLION) * pricing.cachedInput; const outputCost = (Math.max(usage.outputTokens, 0) / TOKENS_PER_MILLION) * pricing.output; return (inputCost + cachedInputCost + outputCost) * currencyMultiplier; } // --------------------------------------------------------- // トークン使用量集計ヘルパー // --------------------------------------------------------- function accumulateUsage(totals, usage) { var _a, _b, _c, _d, _e, _f; if (!usage || typeof usage !== "object") return; const input = Number((_a = usage.input_tokens) !== null && _a !== void 0 ? _a : usage.prompt_tokens); const output = Number((_b = usage.output_tokens) !== null && _b !== void 0 ? _b : usage.completion_tokens); const total = Number(usage.total_tokens); const cachedInput = Number((_d = (_c = usage === null || usage === void 0 ? void 0 : usage.input_tokens_details) === null || _c === void 0 ? void 0 : _c.cached_tokens) !== null && _d !== void 0 ? _d : (_e = usage === null || usage === void 0 ? void 0 : usage.prompt_tokens_details) === null || _e === void 0 ? void 0 : _e.cached_tokens); if (Number.isFinite(input)) totals.inputTokens += input; if (Number.isFinite(output)) totals.outputTokens += output; if (Number.isFinite(cachedInput)) { totals.cachedInputTokens = ((_f = totals.cachedInputTokens) !== null && _f !== void 0 ? _f : 0) + cachedInput; } if (Number.isFinite(total)) { totals.totalTokens += total; } else if (Number.isFinite(input) && Number.isFinite(output)) { totals.totalTokens += input + output; } } //# sourceMappingURL=token-utils.js.map