automata-metaverse
Version:
Automaton execution engines for self-referential CanvasL/JSONL systems
308 lines ⢠14.1 kB
JavaScript
/**
* Scalable Automaton with GPU Acceleration and Multi-Core Support
* Dynamically scales based on available resources (CPU cores, GPU, memory)
*/
import { MemoryOptimizedAutomaton } from './automaton-memory-optimized.js';
import * as os from 'os';
import * as path from 'path';
import * as fs from 'fs';
class ScalableAutomaton {
constructor(filePath, config) {
this.workers = [];
this.workerResults = new Map();
this.activeWorkers = 0;
this.gpuAvailable = false;
this.cpuCores = os.cpus().length;
this.config = {
maxWorkers: config?.maxWorkers || Math.max(1, this.cpuCores - 1), // Leave 1 core for main
workerInterval: config?.workerInterval || 100, // 100ms between worker executions
enableWorkerThreads: config?.enableWorkerThreads ?? true,
enableGPU: config?.enableGPU ?? false,
gpuBatchSize: config?.gpuBatchSize || 1000,
autoScale: config?.autoScale ?? true,
scaleUpThreshold: config?.scaleUpThreshold || 0.3, // Scale up at 30% memory
scaleDownThreshold: config?.scaleDownThreshold || 0.1, // Scale down at 10% memory
minWorkers: config?.minWorkers || 1,
maxWorkersLimit: config?.maxWorkersLimit || this.cpuCores * 2,
parallelModifications: config?.parallelModifications || this.cpuCores,
executionBatchSize: config?.executionBatchSize || 100,
};
// Initialize main automaton
this.mainAutomaton = new MemoryOptimizedAutomaton(filePath, {
maxObjects: 10000, // Increased for scalability
maxExecutionHistory: 2000,
gcInterval: 10000,
trimInterval: 20000,
memoryPressureThreshold: 500, // Higher threshold for scaling
enableGC: true,
});
// Check GPU availability
this.checkGPUAvailability();
// Initialize workers
if (this.config.enableWorkerThreads) {
this.initializeWorkers();
}
console.log('š Scalable Automaton initialized');
console.log(` CPU Cores: ${this.cpuCores}`);
console.log(` Workers: ${this.config.maxWorkers}`);
console.log(` GPU: ${this.gpuAvailable ? 'ā
Available' : 'ā Not available'}`);
console.log(` Parallel Modifications: ${this.config.parallelModifications}`);
console.log(` Auto-Scale: ${this.config.autoScale ? 'ā
Enabled' : 'ā Disabled'}`);
}
checkGPUAvailability() {
// Check if GPU.js is available (optional dependency)
try {
// Try to require GPU.js - if not available, GPU acceleration will be disabled
require.resolve('gpu.js');
// Try to actually load and instantiate GPU to verify it works
try {
const GPU = require('gpu.js');
if (GPU && typeof GPU === 'function') {
// Try to create a simple GPU instance to verify it works
const testGPU = new GPU({ mode: 'cpu' }); // Use CPU mode for testing
this.gpuAvailable = true;
console.log('ā
GPU.js detected and working - GPU acceleration available');
}
else {
throw new Error('GPU.js module loaded but constructor not available');
}
}
catch (loadError) {
this.gpuAvailable = false;
console.log('ā ļø GPU.js found but failed to initialize:', loadError.message);
console.log(' Continuing without GPU acceleration');
}
}
catch (resolveError) {
this.gpuAvailable = false;
// Check if it's in package.json but not installed
const packageJsonPath = path.join(__dirname, '../../package.json');
if (fs.existsSync(packageJsonPath)) {
try {
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf-8'));
const hasGpuJs = (packageJson.dependencies && packageJson.dependencies['gpu.js']) ||
(packageJson.optionalDependencies && packageJson.optionalDependencies['gpu.js']);
if (hasGpuJs) {
console.log('ā ļø GPU.js is in package.json but not installed (may require native build tools)');
console.log(' Install build tools or run: npm install gpu.js');
}
else {
console.log('ā ļø GPU.js not found - Install with: npm install gpu.js');
}
}
catch {
console.log('ā ļø GPU.js not found - Install with: npm install gpu.js');
}
}
else {
console.log('ā ļø GPU.js not found - Install with: npm install gpu.js');
}
console.log(' Continuing without GPU acceleration');
}
}
initializeWorkers() {
// Use Promise-based parallelization instead of worker threads
// Worker threads require compiled JS files which complicates the setup
// We'll use Promise-based parallelization for simplicity
console.log(`ā
Using parallel execution (${this.config.maxWorkers} parallel streams)`);
}
async executeParallelModifications() {
const promises = [];
// Execute parallel modifications
for (let i = 0; i < this.config.parallelModifications; i++) {
promises.push(new Promise((resolve) => {
setTimeout(() => {
this.mainAutomaton.executeSelfModification();
resolve();
}, i * 10); // Stagger executions slightly
}));
}
await Promise.all(promises);
}
async executeWorkerModifications() {
if (this.workers.length === 0) {
return;
}
const promises = [];
// Distribute work across workers
for (let i = 0; i < this.workers.length; i++) {
const worker = this.workers[i];
if (!worker || !worker.threadId)
continue;
this.activeWorkers++;
promises.push(new Promise((resolve) => {
worker.postMessage({
type: 'execute',
workerId: i,
});
// Resolve after a delay (worker will respond asynchronously)
setTimeout(() => resolve(), this.config.workerInterval);
}));
}
await Promise.all(promises);
}
async executeGPUBatch() {
if (!this.gpuAvailable || !this.config.enableGPU) {
return;
}
try {
const GPU = require('gpu.js');
const gpu = new GPU();
// Example: GPU-accelerated Church encoding computation
const churchEncode = gpu.createKernel(function (data) {
// Simple GPU computation example
// @ts-ignore - GPU.js provides thread context via 'this'
const index = this.thread.x;
const value = data[index];
return value !== undefined ? value * 2 : 0;
}).setOutput([this.config.gpuBatchSize]);
const input = Array.from({ length: this.config.gpuBatchSize }, (_, i) => i);
const result = churchEncode(input);
console.log(`š„ GPU processed ${result.length} operations`);
}
catch (error) {
console.error('GPU execution error:', error);
}
}
getMemoryUsage() {
const mem = process.memoryUsage();
const totalMem = os.totalmem();
return mem.heapUsed / totalMem;
}
scaleWorkers() {
if (!this.config.autoScale) {
return;
}
const memoryUsage = this.getMemoryUsage();
const currentWorkers = this.workers.length;
// Scale up if memory usage is low
if (memoryUsage < this.config.scaleUpThreshold &&
currentWorkers < this.config.maxWorkersLimit) {
const workersToAdd = Math.min(Math.floor((this.config.scaleUpThreshold - memoryUsage) * 10), this.config.maxWorkersLimit - currentWorkers);
if (workersToAdd > 0) {
console.log(`š Scaling up: Adding ${workersToAdd} workers (Memory: ${(memoryUsage * 100).toFixed(1)}%)`);
// Add workers (simplified - would need to recreate worker pool)
}
}
// Scale down if memory usage is high
if (memoryUsage > this.config.scaleDownThreshold &&
currentWorkers > this.config.minWorkers) {
const workersToRemove = Math.min(Math.floor((memoryUsage - this.config.scaleDownThreshold) * 10), currentWorkers - this.config.minWorkers);
if (workersToRemove > 0) {
console.log(`š Scaling down: Removing ${workersToRemove} workers (Memory: ${(memoryUsage * 100).toFixed(1)}%)`);
// Remove workers (simplified - would need to recreate worker pool)
}
}
}
async executeScalable() {
const startTime = Date.now();
// Execute main automaton modifications
await this.executeParallelModifications();
// Execute worker modifications
if (this.config.enableWorkerThreads) {
await this.executeWorkerModifications();
}
// Execute GPU batch if available
if (this.config.enableGPU && this.gpuAvailable) {
await this.executeGPUBatch();
}
// Auto-scale workers
this.scaleWorkers();
const duration = Date.now() - startTime;
const objects = (this.mainAutomaton.objects || []).length;
const memUsage = process.memoryUsage();
console.log(`ā” Scalable execution completed in ${duration}ms`);
console.log(` Objects: ${objects}`);
console.log(` Memory: ${(memUsage.heapUsed / 1024 / 1024).toFixed(2)}MB`);
console.log(` Active Workers: ${this.activeWorkers}/${this.workers.length}`);
}
start(intervalMs = 1000) {
console.log(`š Starting scalable automaton (interval: ${intervalMs}ms)`);
const interval = setInterval(async () => {
try {
await this.executeScalable();
}
catch (error) {
console.error('Scalable execution error:', error);
}
}, intervalMs);
// Handle shutdown
process.on('SIGINT', () => {
console.log('\nš Stopping scalable automaton...');
clearInterval(interval);
this.destroy();
process.exit(0);
});
}
destroy() {
// Destroy main automaton
this.mainAutomaton.destroy();
// Workers are Promise-based, so no cleanup needed
console.log('ā
Scalable automaton destroyed');
}
getStats() {
const memUsage = process.memoryUsage();
const objects = (this.mainAutomaton.objects || []).length;
return {
cpuCores: this.cpuCores,
workers: {
total: this.config.maxWorkers,
active: this.activeWorkers,
},
gpu: {
available: this.gpuAvailable,
enabled: this.config.enableGPU,
},
memory: {
heapUsed: memUsage.heapUsed / 1024 / 1024,
heapTotal: memUsage.heapTotal / 1024 / 1024,
rss: memUsage.rss / 1024 / 1024,
usagePercent: (this.getMemoryUsage() * 100).toFixed(1),
},
objects: {
count: objects,
modifications: this.mainAutomaton.selfModificationCount || 0,
},
config: {
parallelModifications: this.config.parallelModifications,
executionBatchSize: this.config.executionBatchSize,
autoScale: this.config.autoScale,
},
};
}
}
// Export for use in other modules
export { ScalableAutomaton };
// If run directly, start scalable automaton
if (require.main === module) {
const args = process.argv.slice(2);
const enableGPU = args.includes('--gpu');
const workers = args.find(arg => arg.startsWith('--workers='))?.split('=')[1];
const interval = args.find(arg => arg.startsWith('--interval='))?.split('=')[1];
const noAutoScale = args.includes('--no-auto-scale');
const automaton = new ScalableAutomaton('./automaton.jsonl', {
maxWorkers: workers ? parseInt(workers) : undefined,
enableGPU,
autoScale: !noAutoScale,
parallelModifications: os.cpus().length * 2, // 2x CPU cores for parallel modifications
});
automaton.start(interval ? parseInt(interval) : 1000);
// Print stats every 10 seconds
setInterval(() => {
const stats = automaton.getStats();
console.log('\nš Scalability Stats:');
console.log(` CPU Cores: ${stats.cpuCores}`);
console.log(` Workers: ${stats.workers.active}/${stats.workers.total}`);
console.log(` GPU: ${stats.gpu.enabled ? 'ā
' : 'ā'} (${stats.gpu.available ? 'Available' : 'Not Available'})`);
console.log(` Memory: ${stats.memory.heapUsed.toFixed(2)}MB / ${stats.memory.heapTotal.toFixed(2)}MB (${stats.memory.usagePercent}%)`);
console.log(` Objects: ${stats.objects.count} (${stats.objects.modifications} modifications)`);
console.log(` Parallel Modifications: ${stats.config.parallelModifications}`);
}, 10000);
console.log('\nš” Usage:');
console.log(' --gpu Enable GPU acceleration');
console.log(' --workers=N Set number of worker threads');
console.log(' --interval=N Set execution interval (ms)');
console.log(' --no-auto-scale Disable auto-scaling');
}
//# sourceMappingURL=automaton-scalable.js.map