@nanocollective/nanocoder
Version:
A local-first CLI coding agent that brings the power of agentic coding tools like Claude Code and Gemini CLI to local models or controlled APIs like OpenRouter
71 lines • 2.97 kB
JavaScript
import { useEffect, useRef } from 'react';
import { getModelContextLimit } from '../models/index.js';
import { calculateTokenBreakdown, calculateToolDefinitionsTokens, } from '../usage/calculator.js';
import { processPromptTemplate } from '../utils/prompt-processor.js';
export function useContextPercentage({ currentModel, messages, tokenizer, getMessageTokens, toolManager, streamingTokenCount, contextLimit, setContextPercentUsed, setContextLimit, }) {
const contextLimitRef = useRef(null);
const lastModelRef = useRef('');
// Effect 1: Resolve context limit when model changes
useEffect(() => {
if (!currentModel) {
contextLimitRef.current = null;
setContextLimit(null);
setContextPercentUsed(null);
return;
}
if (currentModel === lastModelRef.current)
return;
lastModelRef.current = currentModel;
let cancelled = false;
void getModelContextLimit(currentModel).then(limit => {
if (cancelled)
return;
contextLimitRef.current = limit;
setContextLimit(limit);
if (!limit) {
setContextPercentUsed(null);
}
});
return () => {
cancelled = true;
};
}, [currentModel, setContextLimit, setContextPercentUsed]);
// Effect 2: Recalculate percentage when messages, streaming tokens, or context limit change
useEffect(() => {
const limit = contextLimitRef.current;
if (!limit) {
setContextPercentUsed(null);
return;
}
// Include system prompt in calculation (same as /usage command)
const systemPrompt = processPromptTemplate();
const systemMessage = {
role: 'system',
content: systemPrompt,
};
const breakdown = calculateTokenBreakdown([systemMessage, ...messages], tokenizer, (message) => {
// System message won't be in the cache, use tokenizer directly
if (message.role === 'system') {
return tokenizer.countTokens(message);
}
return getMessageTokens(message);
});
// Include tool definition overhead (same as /usage command)
const toolDefTokens = toolManager
? calculateToolDefinitionsTokens(Object.keys(toolManager.getToolRegistry()).length)
: 0;
const total = breakdown.total + toolDefTokens + streamingTokenCount;
const percent = Math.round((total / limit) * 100);
setContextPercentUsed(percent);
// contextLimit is included to re-trigger calculation after async limit resolution
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [
messages,
tokenizer,
getMessageTokens,
toolManager,
streamingTokenCount,
setContextPercentUsed,
]);
}
//# sourceMappingURL=useContextPercentage.js.map