capsule-ai-cli
Version:
The AI Model Orchestrator - Intelligent multi-model workflows with device-locked licensing
98 lines ⢠4.65 kB
JavaScript
import { chatService } from './chat.js';
import { toolRegistry } from '../tools/registry.js';
import { toolExecutor } from '../tools/executor.js';
import { contextManager } from './context.js';
import { stateService } from './state.js';
import chalk from 'chalk';
export class ToolChatService {
async chatWithTools(input, options) {
const mode = options?.mode || 'chat';
const autoExecute = options?.autoExecute ?? (mode === 'agent');
const tools = mode === 'agent' || mode === 'fusion'
? toolRegistry.getEnabled()
: [];
const provider = stateService.getProvider();
const formattedTools = tools.length > 0
? toolRegistry.formatForProvider(provider)
: undefined;
const response = await chatService.chat(input, {
...options,
stream: false
});
if (response.toolCalls && response.toolCalls.length > 0) {
if (mode === 'plan') {
this.displayPlannedTools(response.toolCalls);
return response;
}
if (autoExecute) {
const toolResults = await this.executeTools(response.toolCalls);
toolResults.forEach((result, index) => {
contextManager.addMessage({
role: 'tool_result',
content: JSON.stringify(result.result?.output || result.result?.error),
tool_call_id: response.toolCalls[index].id,
metadata: {
model: stateService.getModel(),
provider: stateService.getProvider()
}
});
});
const followUpResponse = await chatService.chat('', {
...options,
stream: false
});
return {
...followUpResponse,
content: response.content + '\n\n' + followUpResponse.content,
usage: {
promptTokens: response.usage.promptTokens + followUpResponse.usage.promptTokens,
completionTokens: response.usage.completionTokens + followUpResponse.usage.completionTokens,
totalTokens: response.usage.totalTokens + followUpResponse.usage.totalTokens
}
};
}
else {
console.log(chalk.yellow('\nâ ď¸ Tool execution disabled. Enable with --auto-execute or switch to agent mode.'));
this.displayPlannedTools(response.toolCalls);
}
}
return response;
}
async executeTools(toolCalls) {
const executions = await toolExecutor.executeSequence(toolCalls, {
workingDirectory: process.cwd(),
requireApproval: false
});
return executions;
}
displayPlannedTools(toolCalls) {
console.log(chalk.blue('\nđ Planned tool usage:'));
toolCalls.forEach((call, index) => {
const tool = toolRegistry.get(call.name);
const icon = tool?.icon || 'đ§';
console.log(` ${index + 1}. ${icon} ${tool?.displayName || call.name}`);
Object.entries(call.arguments).forEach(([key, value]) => {
console.log(chalk.dim(` ${key}: ${JSON.stringify(value)}`));
});
});
}
getEnhancedSystemMessage(mode) {
const baseMessage = this.getBaseSystemMessage(mode);
const toolDescriptions = toolRegistry.getToolDescriptions();
if (toolDescriptions && (mode === 'agent' || mode === 'fusion')) {
return `${baseMessage}\n\n${toolDescriptions}`;
}
return baseMessage;
}
getBaseSystemMessage(mode) {
const systemMessages = {
chat: 'You are a helpful AI assistant.',
agent: 'You are an AI agent that can break down tasks and execute them step by step. When you need to perform actions like reading files, writing files, or running commands, use the available tools.',
plan: 'You are an AI architect that creates detailed plans and architectures. When relevant, mention which tools would be used to implement each step.',
fusion: 'You are an AI orchestrator that can coordinate multiple models and approaches. You can use tools to gather information and execute tasks.'
};
return systemMessages[mode] || systemMessages.chat;
}
}
export const toolChatService = new ToolChatService();
//# sourceMappingURL=tool-chat.js.map