capsule-ai-cli
Version:
The AI Model Orchestrator - Intelligent multi-model workflows with device-locked licensing
153 lines • 5.92 kB
JavaScript
import chalk from 'chalk';
import ora from 'ora';
import { render } from 'ink';
import React from 'react';
import { InteractiveChat } from '../ui/InteractiveChat.js';
import { providerRegistry } from '../providers/base.js';
import { ProviderFactory } from '../providers/factory.js';
import { configManager } from '../core/config.js';
import { logger } from '../utils/logger.js';
export class ChatCommand {
messages = [];
currentProvider = 'openai';
currentModel = 'gpt-4o';
async execute(prompt, options) {
try {
await this.initializeProviders();
if (options.interactive || !prompt) {
return this.runInteractiveMode();
}
return this.runSinglePrompt(prompt, options);
}
catch (error) {
logger.error('Chat command failed:', error);
console.error(chalk.red('Error:'), error.message);
process.exit(1);
}
}
async initializeProviders() {
const config = configManager.getConfig();
const providers = ['openai', 'anthropic', 'google', 'cohere', 'mistral'];
for (const providerName of providers) {
try {
const provider = await ProviderFactory.create(providerName);
providerRegistry.register(provider);
}
catch (error) {
logger.debug(`Provider ${providerName} not available: ${error}`);
}
}
if (providerRegistry.list().length === 0) {
throw new Error('No AI providers configured. Run "capsule config set providers.<name>.apiKey YOUR_KEY" to set up API keys.');
}
}
async runInteractiveMode() {
console.clear();
const { displayWelcomeScreen } = await import('../ui/welcome');
displayWelcomeScreen();
await new Promise(resolve => setTimeout(resolve, 2000));
const { waitUntilExit } = render(React.createElement(InteractiveChat, {
onSendMessage: async (message) => {
await this.handleMessage(message);
},
initialMessages: this.messages
.filter(m => m.role === 'user' || m.role === 'assistant' || m.role === 'system')
.map(m => ({
role: m.role,
content: typeof m.content === 'string'
? m.content
: m.content
.filter(c => c.type === 'text')
.map(c => c.text)
.join(' '),
timestamp: new Date(),
model: m.metadata?.model,
cost: m.metadata?.cost
}))
}));
await waitUntilExit();
}
async runSinglePrompt(prompt, options) {
const spinner = ora({
text: 'Thinking...',
color: 'cyan'
}).start();
try {
this.messages.push({
role: 'user',
content: prompt
});
const provider = providerRegistry.get(options.provider || this.currentProvider);
if (!provider) {
throw new Error(`Provider ${options.provider} not found`);
}
const model = options.model || this.currentModel;
if (options.stream && provider.supportsStreaming) {
spinner.stop();
console.log(chalk.cyan('\n🤖 Assistant:'));
const stream = provider.stream(this.messages, { model });
let fullResponse = '';
let totalCost = 0;
for await (const chunk of stream) {
process.stdout.write(chunk.delta);
fullResponse += chunk.delta;
if (chunk.usage) {
const cost = provider.calculateCost(chunk.usage, model);
totalCost = cost.amount;
}
}
console.log('\n');
if (configManager.getConfig().ui.showCosts) {
console.log(chalk.gray(`Cost: $${totalCost.toFixed(4)} | Model: ${model}`));
}
this.messages.push({
role: 'assistant',
content: fullResponse
});
}
else {
const response = await provider.complete(this.messages, { model });
spinner.stop();
console.log(chalk.cyan('\n🤖 Assistant:'));
console.log(response.content);
const cost = provider.calculateCost(response.usage, model);
if (configManager.getConfig().ui.showCosts) {
console.log(chalk.gray(`\nCost: $${cost.amount.toFixed(4)} | Model: ${model}`));
}
this.messages.push({
role: 'assistant',
content: response.content
});
}
}
catch (error) {
spinner.stop();
throw error;
}
}
async handleMessage(message) {
this.messages.push({
role: 'user',
content: message
});
const provider = providerRegistry.get(this.currentProvider);
if (!provider) {
throw new Error('No provider available');
}
const response = await provider.complete(this.messages, {
model: this.currentModel
});
this.messages.push({
role: 'assistant',
content: response.content
});
const cost = provider.calculateCost(response.usage, this.currentModel);
logger.info(`Chat response`, {
provider: this.currentProvider,
model: this.currentModel,
cost: cost.amount,
tokens: response.usage.totalTokens
});
}
}
//# sourceMappingURL=chat.js.map