scai
Version:
> **A local-first AI CLI for understanding, querying, and iterating on large codebases.** > **100% local • No token costs • No cloud • No prompt injection • Private by design**
100 lines (99 loc) • 3.75 kB
JavaScript
// src/utils/checkModel.ts
import { spawn } from 'node:child_process';
const OLLAMA_URL = 'http://127.0.0.1:11434/';
/* ──────────────────────────────────────────────── */
/* Public entry point (called from main.ts) */
/* ──────────────────────────────────────────────── */
export async function startModelProcess() {
if (await isModelRunning()) {
console.log('✅ Model already running');
await logRunningModels();
return;
}
console.log('🚀 Serving model…');
spawnOllamaSilently();
await waitForModelReady();
await logOllamaVersion();
await logRunningModels();
console.log('✅ Model ready');
}
/* ──────────────────────────────────────────────── */
/* Internals */
/* ──────────────────────────────────────────────── */
function spawnOllamaSilently() {
const child = spawn('/Applications/Ollama.app/Contents/Resources/ollama', ['serve'], {
stdio: 'ignore', // 👈 suppress ALL Ollama output
detached: true,
});
child.unref();
}
async function isModelRunning() {
try {
const res = await fetch(OLLAMA_URL, { method: 'GET' });
return res.ok;
}
catch {
return false;
}
}
async function waitForModelReady(timeoutMs = 30000) {
const start = Date.now();
while (Date.now() - start < timeoutMs) {
if (await isModelRunning())
return;
await sleep(300);
}
throw new Error('Model failed to become ready');
}
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
async function logModelInfo(model) {
try {
const res = await fetch(`${OLLAMA_URL}api/show`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ name: model }),
});
const json = await res.json();
console.log(`🧠 Model info: ${model}`);
console.log(` • family: ${json.details?.family}`);
console.log(` • params: ${json.details?.parameter_size}`);
console.log(` • quantization: ${json.details?.quantization_level}`);
console.log(` • context: ${json.parameters?.num_ctx ?? 'unknown'}`);
}
catch {
console.warn(`⚠️ Failed to fetch info for model ${model}`);
}
}
async function logOllamaVersion() {
try {
const res = await fetch(`${OLLAMA_URL}api/version`);
const json = await res.json();
console.log(`🦙 Ollama version: ${json.version}`);
}
catch (err) {
console.warn('⚠️ Failed to fetch Ollama version');
}
}
async function logRunningModels() {
try {
const res = await fetch(`${OLLAMA_URL}api/ps`);
const json = await res.json();
if (!json.models?.length) {
console.log('🧊 No models currently loaded');
return;
}
console.log('🔥 Running models:');
for (const m of json.models) {
const sizeGB = (m.size / 1024 ** 3).toFixed(1);
console.log(` • ${m.name}` +
` | ${m.details?.parameter_size ?? '??'} params` +
` | q=${m.details?.quantization_level ?? '?'}` +
` | ~${sizeGB} GB in RAM`);
}
}
catch {
console.warn('⚠️ Failed to fetch running models');
}
}