UNPKG

openai-mock-api

Version:

A mock OpenAI API server for testing LLM applications

60 lines 2.06 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.TokenCounter = void 0; const tiktoken_1 = require("tiktoken"); class TokenCounter { constructor(logger, encodingName = 'cl100k_base') { this.logger = logger; this.encoding = (0, tiktoken_1.get_encoding)(encodingName); } calculateTokens(request, responseContent) { try { // Calculate prompt tokens from all messages const promptText = this.formatMessagesForTokenCount(request.messages); const promptTokens = this.countTokens(promptText); // Calculate completion tokens from response content const completionTokens = this.countTokens(responseContent); return { prompt_tokens: promptTokens, completion_tokens: completionTokens, total_tokens: promptTokens + completionTokens, }; } catch (error) { this.logger.error('Error calculating tokens', error); // Return reasonable defaults if token calculation fails return { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0, }; } } formatMessagesForTokenCount(messages) { return messages .map((msg) => { let content = `${msg.role}: `; if (msg.content) { content += msg.content; } if (msg.tool_calls) { content += ' [tool_calls: ' + JSON.stringify(msg.tool_calls) + ']'; } if (msg.tool_call_id) { content += ' [tool_call_id: ' + msg.tool_call_id + ']'; } return content; }) .join('\n'); } countTokens(text) { if (!text) return 0; return this.encoding.encode(text).length; } dispose() { this.encoding.free(); } } exports.TokenCounter = TokenCounter; //# sourceMappingURL=token-counter.js.map