@nanocollective/nanocoder
Version:
A local-first CLI coding agent that brings the power of agentic coding tools like Claude Code and Gemini CLI to local models or controlled APIs like OpenRouter
163 lines • 7.49 kB
JavaScript
import { jsx as _jsx } from "react/jsx-runtime";
import React from 'react';
import { ConversationStateManager } from '../../app/utils/conversation-state.js';
import UserMessage from '../../components/user-message.js';
import { CommandIntegration } from '../../custom-commands/command-integration.js';
import { promptHistory } from '../../prompt-history.js';
import { MessageBuilder } from '../../utils/message-builder.js';
import { assemblePrompt, processPromptTemplate } from '../../utils/prompt-processor.js';
import { processAssistantResponse } from './conversation/conversation-loop.js';
import { createResetStreamingState } from './state/streaming-state.js';
import { displayError as displayErrorHelper } from './utils/message-helpers.js';
/**
* Main chat handler hook that manages LLM conversations and tool execution.
* Orchestrates streaming responses, tool calls, and conversation state.
*/
export function useChatHandler({ client, toolManager, customCommandLoader, messages, setMessages, currentProvider, currentModel, setIsCancelling, addToChatQueue, getNextComponentKey, abortController, setAbortController, developmentMode = 'normal', nonInteractiveMode = false, onStartToolConfirmationFlow, onConversationComplete, compactToolDisplayRef, onSetCompactToolCounts, compactToolCountsRef, }) {
// Conversation state manager for enhanced context
const conversationStateManager = React.useRef(new ConversationStateManager());
// Track when the current conversation started for elapsed time display
const conversationStartTimeRef = React.useRef(Date.now());
// Memoize CommandIntegration to avoid recreating on every message
const commandIntegration = React.useMemo(() => {
if (!toolManager || !customCommandLoader)
return null;
return new CommandIntegration(customCommandLoader, toolManager);
}, [toolManager, customCommandLoader]);
// State for streaming message content
const [streamingContent, setStreamingContent] = React.useState('');
const [isGenerating, setIsGenerating] = React.useState(false);
const [tokenCount, setTokenCount] = React.useState(0);
// Helper to reset all streaming state
const resetStreamingState = React.useCallback(createResetStreamingState(setIsCancelling, setAbortController, setIsGenerating, setStreamingContent, setTokenCount), []);
// Helper to display errors in chat queue
const displayError = React.useCallback((error, keyPrefix) => {
displayErrorHelper(error, keyPrefix, addToChatQueue, getNextComponentKey);
}, [addToChatQueue, getNextComponentKey]);
// Reset conversation state when messages are cleared
React.useEffect(() => {
if (messages.length === 0) {
conversationStateManager.current.reset();
}
}, [messages.length]);
// Wrapper for processAssistantResponse that includes error handling
const processAssistantResponseWithErrorHandling = React.useCallback(async (systemMessage, msgs) => {
if (!client)
return;
try {
await processAssistantResponse({
systemMessage,
messages: msgs,
client,
toolManager,
abortController,
setAbortController,
setIsGenerating,
setStreamingContent,
setTokenCount,
setMessages,
addToChatQueue,
getNextComponentKey,
currentProvider,
currentModel,
developmentMode,
nonInteractiveMode,
conversationStateManager,
onStartToolConfirmationFlow,
onConversationComplete,
conversationStartTime: conversationStartTimeRef.current,
compactToolDisplayRef,
onSetCompactToolCounts,
compactToolCountsRef,
});
}
catch (error) {
displayError(error, 'chat-error');
// Signal completion on error to avoid hanging in non-interactive mode
onConversationComplete?.();
}
finally {
resetStreamingState();
}
}, [
client,
toolManager,
abortController,
setAbortController,
setMessages,
addToChatQueue,
getNextComponentKey,
currentProvider,
currentModel,
developmentMode,
nonInteractiveMode,
onStartToolConfirmationFlow,
onConversationComplete,
compactToolDisplayRef,
compactToolCountsRef,
onSetCompactToolCounts,
displayError,
resetStreamingState,
]);
// Handle chat message processing
const handleChatMessage = async (message) => {
if (!client || !toolManager)
return;
// Record conversation start time for elapsed time display
conversationStartTimeRef.current = Date.now();
// For display purposes, try to get the placeholder version from history
// This preserves the nice placeholder display in chat history
// Only use history entry if the assembled prompt matches the current message
// (VS Code prompts bypass history, so we shouldn't use stale history entries)
const history = promptHistory.getHistory();
const lastEntry = history[history.length - 1];
const assembledFromHistory = lastEntry
? assemblePrompt(lastEntry)
: undefined;
const displayMessage = assembledFromHistory === message ? lastEntry.displayValue : message;
// Add user message to chat using display version (with placeholders)
// Pass the full assembled message for accurate token counting
addToChatQueue(_jsx(UserMessage, { message: displayMessage, tokenContent: message }, `user-${getNextComponentKey()}`));
// Add user message to conversation history
const builder = new MessageBuilder(messages);
builder.addUserMessage(message);
const updatedMessages = builder.build();
setMessages(updatedMessages);
// Initialize conversation state if this is a new conversation
if (messages.length === 0) {
conversationStateManager.current.initializeState(message);
}
// Create abort controller for cancellation
const controller = new AbortController();
setAbortController(controller);
try {
// Load and process system prompt
let systemPrompt = processPromptTemplate();
// Enhance with relevant commands (progressive disclosure)
if (commandIntegration) {
systemPrompt = commandIntegration.enhanceSystemPrompt(systemPrompt, message);
}
// Create stream request
const systemMessage = {
role: 'system',
content: systemPrompt,
};
// Use the conversation loop
await processAssistantResponseWithErrorHandling(systemMessage, updatedMessages);
}
catch (error) {
displayError(error, 'chat-error');
}
finally {
resetStreamingState();
}
};
return {
handleChatMessage,
processAssistantResponse: processAssistantResponseWithErrorHandling,
isGenerating,
streamingContent,
tokenCount,
};
}
//# sourceMappingURL=useChatHandler.js.map