@nanocollective/nanocoder
Version:
A local-first CLI coding agent that brings the power of agentic coding tools like Claude Code and Gemini CLI to local models or controlled APIs like OpenRouter
38 lines • 1.82 kB
TypeScript
import React from 'react';
import type { ConversationStateManager } from '../../../app/utils/conversation-state.js';
import type { ToolManager } from '../../../tools/tool-manager.js';
import type { LLMClient, Message, ToolCall } from '../../../types/core.js';
interface ProcessAssistantResponseParams {
systemMessage: Message;
messages: Message[];
client: LLMClient;
toolManager: ToolManager | null;
abortController: AbortController | null;
setAbortController: (controller: AbortController | null) => void;
setIsGenerating: (generating: boolean) => void;
setStreamingContent: (content: string) => void;
setTokenCount: (count: number) => void;
setMessages: (messages: Message[]) => void;
addToChatQueue: (component: React.ReactNode) => void;
getNextComponentKey: () => number;
currentProvider: string;
currentModel: string;
developmentMode: 'normal' | 'auto-accept' | 'plan' | 'scheduler';
nonInteractiveMode: boolean;
conversationStateManager: React.MutableRefObject<ConversationStateManager>;
onStartToolConfirmationFlow: (toolCalls: ToolCall[], updatedMessages: Message[], assistantMsg: Message, systemMessage: Message) => void;
onConversationComplete?: () => void;
conversationStartTime?: number;
}
/**
* Main conversation loop that processes assistant responses and handles tool calls.
* This function orchestrates the entire conversation flow including:
* - Streaming responses from the LLM
* - Parsing and validating tool calls
* - Executing or requesting confirmation for tools
* - Handling errors and self-correction
* - Managing the conversation state
*/
export declare const processAssistantResponse: (params: ProcessAssistantResponseParams) => Promise<void>;
export {};
//# sourceMappingURL=conversation-loop.d.ts.map