aiwrapper
Version:
A Universal AI Wrapper for JavaScript & TypeScript
41 lines • 1.84 kB
JavaScript
// Rough estimate: 1 token ≈ 4 chars for English text
function estimateTokens(text) {
return Math.ceil(text.length / 4);
}
/**
* Calculate the maximum number of tokens available for model response
*/
export function calculateModelResponseTokens(model, messages, maxTokens) {
// Get model context
if (model.context.type !== "token") {
// Non-token contexts aren't handled, return user maxTokens or a reasonable default
return maxTokens || 2000;
}
const context = model.context;
// For models with fixed output capacity (like Anthropic models)
if (context.outputIsFixed === 1 && context.maxOutput) {
// If user specified maxTokens, clamp it to model's maxOutput
if (maxTokens) {
return Math.min(maxTokens, context.maxOutput);
}
return context.maxOutput;
}
// For models with dynamic output capacity that shares with input
if (context.total && context.maxOutput) {
// Estimate tokens used by messages
const inputTokens = messages.reduce((sum, message) => {
return sum + estimateTokens(message.content) + 4; // +4 tokens for message overhead
}, 0);
// Calculate remaining tokens in context window
const remainingTokens = context.total - inputTokens;
// If user specified maxTokens, respect it, but also respect model limits
if (maxTokens) {
return Math.max(0, Math.min(maxTokens, context.maxOutput, remainingTokens));
}
// Otherwise use the maximum available within limits
return Math.max(0, Math.min(context.maxOutput, remainingTokens));
}
// If we don't have enough information, return user maxTokens or a reasonable default
return maxTokens || context.maxOutput || 2000;
}
//# sourceMappingURL=token-calculator.js.map