capsule-ai-cli
Version:
The AI Model Orchestrator - Intelligent multi-model workflows with device-locked licensing
134 lines • 6.3 kB
JavaScript
import { stateService } from '../../services/state.js';
import { localModelsService } from '../../services/local-models.js';
import { ProviderFactory } from '../../providers/factory.js';
import chalk from 'chalk';
export const localCommand = {
name: 'local',
description: 'Use local AI models (Ollama, LM Studio, llama.cpp)',
alias: ['l', 'ollama', 'lmstudio'],
async execute(args) {
const serverStatus = localModelsService.getServerStatus();
const availableServers = serverStatus.filter(s => s.available);
if (availableServers.length === 0) {
return {
success: false,
message: chalk.yellow('⚠️ No local model servers detected\n\n') +
chalk.white('To use local models, start one of these servers:\n\n') +
chalk.cyan('Ollama:') + '\n' +
' 1. Install: curl -fsSL https://ollama.ai/install.sh | sh\n' +
' 2. Start: ollama serve\n' +
' 3. Pull a model: ollama pull llama3.3\n\n' +
chalk.cyan('LM Studio:') + '\n' +
' 1. Download from: https://lmstudio.ai\n' +
' 2. Load a model and start the server\n\n' +
chalk.cyan('llama.cpp:') + '\n' +
' 1. Build llama.cpp\n' +
' 2. Run: ./llama-server -m model.gguf --port 8080'
};
}
if (args && args.length > 0) {
const provider = args[0].toLowerCase();
const providerMap = {
'ollama': 'ollama',
'lm': 'lmstudio',
'lmstudio': 'lmstudio',
'lm-studio': 'lmstudio',
'llamacpp': 'llamacpp',
'llama.cpp': 'llamacpp',
'llama': 'llamacpp',
'local': 'local'
};
const mappedProvider = providerMap[provider];
if (!mappedProvider) {
return {
success: false,
message: `Unknown local provider: ${provider}\nAvailable: ollama, lmstudio, llamacpp`
};
}
const serverName = mappedProvider === 'ollama' ? 'Ollama' :
mappedProvider === 'lmstudio' ? 'LM Studio' :
mappedProvider === 'llamacpp' ? 'llama.cpp' : 'Local Server';
const server = serverStatus.find(s => s.name === serverName);
if (!server?.available) {
return {
success: false,
message: `${serverName} is not running at ${server?.url || 'default port'}`
};
}
try {
const { providerRegistry } = await import('../../providers/base.js');
const provider = await ProviderFactory.create(mappedProvider);
providerRegistry.register(provider);
stateService.setProvider(mappedProvider);
const models = await stateService.getAvailableModels(mappedProvider);
const modelCount = models.length;
const currentModel = models.length > 0 ? models[0] : 'No models available';
if (models.length > 0) {
stateService.setModel(currentModel);
}
return {
success: true,
message: chalk.green(`✓ Switched to ${serverName}\n`) +
chalk.gray(` Server: ${server.url}\n`) +
chalk.gray(` Models: ${modelCount} available\n`) +
chalk.gray(` Current: ${currentModel}`)
};
}
catch (error) {
return {
success: false,
message: `Failed to connect to ${serverName}: ${error instanceof Error ? error.message : String(error)}`
};
}
}
if (availableServers.length === 1) {
const server = availableServers[0];
const providerName = server.name === 'Ollama' ? 'ollama' :
server.name === 'LM Studio' ? 'lmstudio' :
server.name === 'llama.cpp' ? 'llamacpp' : 'local';
try {
const { providerRegistry } = await import('../../providers/base.js');
const provider = await ProviderFactory.create(providerName);
providerRegistry.register(provider);
stateService.setProvider(providerName);
const models = await stateService.getAvailableModels(providerName);
const currentModel = models.length > 0 ? models[0] : 'No models available';
if (models.length > 0) {
stateService.setModel(currentModel);
}
return {
success: true,
message: chalk.green(`✓ Connected to ${server.name}\n`) +
chalk.gray(` Server: ${server.url}\n`) +
chalk.gray(` Models: ${server.models} available\n`) +
chalk.gray(` Current: ${currentModel}`)
};
}
catch (error) {
return {
success: false,
message: `Failed to connect: ${error instanceof Error ? error.message : String(error)}`
};
}
}
return {
success: true,
action: 'none',
data: {
type: 'local-server-select',
servers: availableServers.map(s => ({
name: s.name,
url: s.url,
models: s.models,
provider: s.name === 'Ollama' ? 'ollama' :
s.name === 'LM Studio' ? 'lmstudio' :
s.name === 'llama.cpp' ? 'llamacpp' : 'local'
}))
},
message: chalk.cyan('Multiple local servers detected:\n') +
availableServers.map(s => ` • ${s.name} at ${s.url} (${s.models} models)`).join('\n') +
'\n\nUse /local <name> to select one'
};
}
};
//# sourceMappingURL=local.js.map