jay-code
Version:
Streamlined AI CLI orchestration engine with mathematical rigor and enterprise-grade reliability
201 lines (173 loc) • 6.51 kB
JavaScript
import os from 'node:os';
import path from 'node:path';
import fs from 'node:fs';
import { spawn, exec } from 'node:child_process';
import { promisify } from 'util';
const execAsync = promisify(exec);
console.log('Jay-Code Installation: Enterprise-grade AI orchestration with quality controls');
async function detectVRAM() {
try {
if (process.platform === 'linux') {
try {
const { stdout } = await execAsync('nvidia-smi --query-gpu=memory.total --format=csv,noheader,nounits');
const totalVRAM = stdout.trim().split('\n')
.map(line => parseInt(line))
.reduce((sum, vram) => sum + vram, 0);
return totalVRAM;
} catch (error) {
console.log('NVIDIA GPU not detected, checking alternatives...');
return 0;
}
} else if (process.platform === 'darwin') {
// Apple Silicon unified memory detection
const { stdout } = await execAsync('sysctl hw.memsize');
const memBytes = parseInt(stdout.match(/\\d+/)[0]);
const memGB = Math.floor(memBytes / 1024 / 1024 / 1024);
// Conservative estimate: 60% of unified memory available for models
return Math.floor(memGB * 1024 * 0.6);
}
return 0;
} catch (error) {
console.log('VRAM detection failed, assuming API-only configuration');
return 0;
}
}
async function checkSystemRequirements() {
const totalVRAM = await detectVRAM();
const totalRAM = Math.floor(os.totalmem() / 1024 / 1024); // MB
console.log(`System Analysis:`);
console.log(`- VRAM: ${totalVRAM}MB`);
console.log(`- RAM: ${totalRAM}MB`);
console.log(`- Platform: ${process.platform}`);
const meetsRequirements = totalVRAM >= 40000 && totalRAM >= 64000;
if (meetsRequirements) {
console.log('System meets requirements for local Llama 3.3 70B');
return { canRunLocal: true, totalVRAM, totalRAM };
} else {
console.log('Insufficient resources for local model, configuring Claude API fallback');
console.log('Requirements: 40GB+ VRAM, 64GB+ RAM');
return { canRunLocal: false, totalVRAM, totalRAM };
}
}
async function installOllama(canRunLocal) {
if (!canRunLocal) {
console.log('Skipping Ollama installation (using API-only configuration)');
return true;
}
try {
const { stdout } = await execAsync('ollama --version');
console.log('Ollama found:', stdout.trim());
return true;
} catch (error) {
console.log('Installing Ollama for local model support...');
try {
await execAsync('curl -fsSL https://ollama.com/install.sh | sh');
// Start Ollama service
spawn('ollama', ['serve'], { detached: true, stdio: 'ignore' });
await new Promise(resolve => setTimeout(resolve, 5000));
// Install Llama 3.3 70B
console.log('Downloading Llama 3.3 70B Instruct (this may take 30+ minutes)...');
await execAsync('ollama pull llama3.3:70b-instruct', { timeout: 1800000 }); // 30 min timeout
console.log('Local model installed successfully');
return true;
} catch (installError) {
console.error('Ollama installation failed:', installError.message);
return false;
}
}
}
async function setupConfiguration(canRunLocal) {
const configDir = path.join(os.homedir(), '.jay-code');
const configFile = path.join(configDir, 'config.json');
try {
await fs.promises.mkdir(configDir, { recursive: true });
let config;
if (canRunLocal) {
config = {
models: {
primary: {
type: 'ollama',
endpoint: 'http://localhost:11434',
model: 'llama3.3:70b-instruct',
timeout: 120000
},
fallback: {
type: 'anthropic',
model: 'claude-3-5-sonnet-20241022',
apiKey: '${ANTHROPIC_API_KEY}'
}
},
mode: 'local-primary',
systemRequirements: { met: true, vramRequired: 40000 }
};
} else {
config = {
models: {
primary: {
type: 'anthropic',
model: 'claude-3-5-sonnet-20241022',
apiKey: '${ANTHROPIC_API_KEY}'
}
},
mode: 'api-only',
systemRequirements: { met: false, reason: 'insufficient-vram' }
};
}
// Add common configuration
config.agents = {
planning: { enabled: true, complexityThreshold: 0.7 },
qa: { enabled: true, passThreshold: 0.8, strictMode: true }
};
await fs.promises.writeFile(configFile, JSON.stringify(config, null, 2));
console.log('Configuration created:', configFile);
return true;
} catch (error) {
console.error('Configuration setup failed:', error.message);
return false;
}
}
async function main() {
console.log('\\n=== Jay-Code Quality-Controlled Installation ===\\n');
// Step 1: Check system capabilities
const systemInfo = await checkSystemRequirements();
// Step 2: Install based on capabilities
if (systemInfo.canRunLocal) {
console.log('\\nInstalling local model configuration...');
const ollamaOk = await installOllama(true);
if (!ollamaOk) {
console.log('Local installation failed, falling back to API configuration');
systemInfo.canRunLocal = false;
}
}
// Step 3: Setup configuration
const configOk = await setupConfiguration(systemInfo.canRunLocal);
// Step 4: Install Jay-Code
console.log('\\nInstalling Jay-Code CLI...');
try {
await execAsync('npm install -g jay-code@alpha');
console.log('Jay-Code installed successfully');
} catch (error) {
console.error('Installation failed:', error.message);
process.exit(1);
}
// Summary
console.log('\\n=== Installation Complete ===');
if (systemInfo.canRunLocal) {
console.log('Mode: Local Llama 3.3 70B Instruct');
console.log('Performance: Maximum quality with full privacy');
console.log('Note: Set ANTHROPIC_API_KEY for fallback support');
} else {
console.log('Mode: Claude API (quality-controlled)');
console.log('Required: Set ANTHROPIC_API_KEY environment variable');
console.log('Upgrade path: Add 40GB+ VRAM for local model support');
}
console.log('\\nQuick start:');
console.log(' export ANTHROPIC_API_KEY=your_api_key');
console.log(' jay-code generate "Create a TypeScript REST API"');
console.log(' jay-code health');
}
main().catch(error => {
console.error('Installation failed:', error.message);
process.exit(1);
});