UNPKG

giga-code

Version:

A personal AI CLI assistant powered by Grok for local development.

68 lines 2.22 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.createTokenCounter = exports.TokenCounter = void 0; const tiktoken_1 = require("tiktoken"); class TokenCounter { constructor(model = 'gpt-4') { try { // Try to get encoding for specific model this.encoder = (0, tiktoken_1.encoding_for_model)(model); } catch { // Fallback to cl100k_base (used by GPT-4 and most modern models) this.encoder = (0, tiktoken_1.get_encoding)('cl100k_base'); } } /** * Count tokens in a string */ countTokens(text) { if (!text) return 0; return this.encoder.encode(text).length; } /** * Count tokens in messages array (for chat completions) */ countMessageTokens(messages) { let totalTokens = 0; for (const message of messages) { // Every message follows <|start|>{role/name}\n{content}<|end|\>\n totalTokens += 3; // Base tokens per message if (message.content && typeof message.content === 'string') { totalTokens += this.countTokens(message.content); } if (message.role) { totalTokens += this.countTokens(message.role); } // Add extra tokens for tool calls if present if (message.tool_calls) { totalTokens += this.countTokens(JSON.stringify(message.tool_calls)); } } totalTokens += 3; // Every reply is primed with <|start|>assistant<|message|> return totalTokens; } /** * Estimate tokens for streaming content * This is an approximation since we don't have the full response yet */ estimateStreamingTokens(accumulatedContent) { return this.countTokens(accumulatedContent); } /** * Clean up resources */ dispose() { this.encoder.free(); } } exports.TokenCounter = TokenCounter; /** * Create a token counter instance */ function createTokenCounter(model) { return new TokenCounter(model); } exports.createTokenCounter = createTokenCounter; //# sourceMappingURL=token-counter.js.map