scai
Version:
> AI-powered CLI tool for commit messages **and** pull request reviews — using local models.
172 lines (171 loc) • 5.85 kB
JavaScript
import { spawn, execSync } from 'child_process';
import * as readline from 'readline';
import * as fs from 'fs';
import * as path from 'path';
import chalk from 'chalk';
import { platform } from 'os';
import { getDbForRepo } from './db/client.js';
import { readConfig, writeConfig } from './config.js';
import { CONFIG_PATH } from './constants.js';
// Constants
const MODEL_PORT = 11434;
const REQUIRED_MODELS = ['llama3', 'mistral'];
const OLLAMA_URL = 'https://ollama.com/download';
const isYesMode = process.argv.includes('--yes') || process.env.SCAI_YES === '1';
let ollamaChecked = false;
let ollamaAvailable = false;
// 🧠 Auto init config/db if missing
export async function autoInitIfNeeded() {
const cfg = readConfig();
if (!fs.existsSync(CONFIG_PATH)) {
console.log(chalk.green('🛠️ Config not found. Initializing...'));
writeConfig({});
}
const activeRepo = cfg.activeRepo && cfg.repos[cfg.activeRepo];
if (activeRepo) {
const dbPath = path.join(activeRepo.indexDir, 'scai.db');
if (!fs.existsSync(dbPath)) {
console.log(chalk.green('📦 DB not found. Initializing...'));
getDbForRepo();
}
}
}
// 🗨 Prompt user with 10-second timeout
function promptUser(question) {
if (isYesMode)
return Promise.resolve('y');
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
return new Promise((resolve) => {
const timer = setTimeout(() => {
rl.close();
resolve('');
}, 10000); // 10 second timeout
rl.question(question, (answer) => {
clearTimeout(timer);
rl.close();
resolve(answer.trim());
});
});
}
// 🧭 Cross-platform browser opener
function openBrowser(url) {
const command = platform() === 'win32'
? `start ${url}`
: platform() === 'darwin'
? `open ${url}`
: `xdg-open ${url}`;
try {
execSync(command, { stdio: 'ignore' });
}
catch {
console.log(chalk.yellow('🔗 Please manually open:'), url);
}
}
// 🌐 Check if Ollama is running
async function isOllamaRunning() {
try {
const res = await fetch(`http://localhost:${MODEL_PORT}`);
return res.ok;
}
catch {
return false;
}
}
// 🚀 Ensure Ollama server is running
async function ensureOllamaRunning() {
if (ollamaChecked)
return;
ollamaChecked = true;
if (await isOllamaRunning()) {
console.log(chalk.green('✅ Ollama is already running.'));
ollamaAvailable = true;
return;
}
console.log(chalk.yellow('⚙️ Ollama is not running. Attempting to start it...'));
let ollamaStarted = false;
try {
const child = spawn('ollama', ['serve'], {
detached: true,
stdio: 'ignore',
windowsHide: true,
});
child.unref();
await new Promise((res) => setTimeout(res, 10000));
if (await isOllamaRunning()) {
console.log(chalk.green('✅ Ollama started successfully.'));
ollamaAvailable = true;
return;
}
}
catch (err) {
if (err.code !== 'ENOENT') {
console.log(chalk.red('❌ Unexpected error starting Ollama.'));
process.exit(1);
}
}
// If we get here, Ollama likely isn't installed
console.log(chalk.red('❌ Ollama is not installed or not in PATH.'));
console.log(chalk.yellow(`📦 Ollama is required to run local AI models.`));
const answer = await promptUser('🌐 Would you like to open the download page in your browser? (y/N): ');
if (answer.toLowerCase() === 'y') {
openBrowser(OLLAMA_URL);
}
console.log(chalk.yellow('⏳ Waiting for you to install Ollama and press Enter to continue...'));
await promptUser('👉 Press Enter once Ollama is installed and ready: ');
// Retry once
if (await isOllamaRunning()) {
console.log(chalk.green('✅ Ollama detected. Continuing...'));
ollamaAvailable = true;
}
else {
console.log(chalk.red('❌ Ollama still not detected. Please check your installation.'));
process.exit(1);
}
}
// 🧰 List installed models
async function getInstalledModels() {
try {
const result = execSync('ollama list', { encoding: 'utf-8' });
return result
.split('\n')
.map((line) => line.split(/\s+/)[0].split(':')[0])
.filter((model) => REQUIRED_MODELS.includes(model));
}
catch {
return [];
}
}
// 📥 Download missing models
async function ensureModelsDownloaded() {
if (!ollamaAvailable)
return;
const installed = await getInstalledModels();
const missing = REQUIRED_MODELS.filter((m) => !installed.includes(m));
if (!missing.length) {
console.log(chalk.green('✅ All required models are installed.'));
return;
}
console.log(chalk.yellow(`📦 Missing models: ${missing.join(', ')}`));
const answer = await promptUser('⬇️ Do you want to download them now? (y/N): ');
if (answer.toLowerCase() !== 'y') {
console.log(chalk.red('🚫 Aborting due to missing models.'));
process.exit(1);
}
for (const model of missing) {
try {
console.log(`⬇️ Pulling ${model}...`);
execSync(`ollama pull ${model}`, { stdio: 'inherit' });
console.log(chalk.green(`✅ Pulled ${model}`));
}
catch {
console.log(chalk.red(`❌ Failed to pull ${model}.`));
process.exit(1);
}
}
}
// 🏁 Main bootstrap logic
export async function bootstrap() {
await autoInitIfNeeded();
await ensureOllamaRunning();
await ensureModelsDownloaded();
}