scai
Version:
> AI-powered CLI tools for smart commit messages, auto generated comments, and readme files — all powered by local models.
97 lines (96 loc) • 3.61 kB
JavaScript
import { spawn, execSync } from 'child_process';
import * as readline from 'readline';
// Port and models
const MODEL_PORT = 11434;
const REQUIRED_MODELS = ['llama3', 'mistral']; // Add more if needed
// Ensure Ollama is running
async function ensureOllamaRunning() {
try {
const res = await fetch(`http://localhost:${MODEL_PORT}`);
if (res.ok) {
console.log('✅ Ollama is already running.');
}
}
catch (error) {
console.error('🟡 Ollama is not running. Starting it in the background...');
if (error instanceof Error) {
console.error('❌ Error during Ollama health check:', error.message);
}
else {
console.error('❌ Unexpected error during Ollama health check:', error);
}
const child = spawn('ollama', ['serve'], {
detached: true,
stdio: 'ignore',
windowsHide: true,
});
child.unref();
await new Promise((res) => setTimeout(res, 3000)); // Wait a bit for server to be ready
}
}
// Get installed models via ollama list
async function getInstalledModels() {
try {
const result = execSync('ollama list', { encoding: 'utf-8' });
const installedModels = result
.split('\n')
.map((line) => line.split(/\s+/)[0].split(':')[0]) // Get model name, ignore version (e.g., 'llama3:latest' becomes 'llama3')
.filter((model) => REQUIRED_MODELS.includes(model)); // Filter based on required models
return installedModels;
}
catch (error) {
console.error('❌ Failed to fetch installed models:', error instanceof Error ? error.message : error);
return [];
}
}
// Prompt user for input
function promptUser(question) {
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout,
});
return new Promise((resolve) => rl.question(question, (answer) => {
rl.close();
resolve(answer.trim());
}));
}
// Ensure all required models are downloaded
async function ensureModelsDownloaded() {
const installedModels = await getInstalledModels();
const missingModels = REQUIRED_MODELS.filter((model) => !installedModels.includes(model));
if (missingModels.length === 0) {
console.log('✅ All required models are already installed.');
return;
}
console.log(`🟡 Missing models: ${missingModels.join(', ')}`);
const answer = await promptUser('Do you want to download the missing models now? (y/N): ');
if (answer.toLowerCase() !== 'y') {
console.log('🚫 Missing models not downloaded. Exiting.');
process.exit(1);
}
for (const model of missingModels) {
try {
console.log(`⬇️ Pulling model: ${model} ...`);
execSync(`ollama pull ${model}`, { stdio: 'inherit' });
console.log(`✅ Successfully pulled ${model}.`);
}
catch (err) {
console.error(`❌ Failed to pull ${model}:`, err instanceof Error ? err.message : err);
process.exit(1);
}
}
}
// Initialize the application
export async function bootstrap() {
try {
// Ensure Ollama is running only once at the start
await ensureOllamaRunning();
// Ensure models are downloaded once
await ensureModelsDownloaded();
// Now your CLI logic can proceed here...
}
catch (error) {
console.error('❌ Error during initialization:', error instanceof Error ? error.message : error);
process.exit(1);
}
}