codecrucible-synth
Version:
Production-Ready AI Development Platform with Multi-Voice Synthesis, Smithery MCP Integration, Enterprise Security, and Zero-Timeout Reliability
481 lines • 20.1 kB
JavaScript
/**
* Secure Council Decision Engine - Enhanced with isolation and security
* Implements enterprise-grade multi-agent collaboration with security boundaries
*/
import { CouncilDecisionEngine } from './council-decision-engine.js';
import { EnterpriseSecurityFramework, } from '../security/enterprise-security-framework.js';
import { logger } from '../logger.js';
export var IsolationLevel;
(function (IsolationLevel) {
IsolationLevel["NONE"] = "none";
IsolationLevel["BASIC"] = "basic";
IsolationLevel["SANDBOX"] = "sandbox";
IsolationLevel["CONTAINER"] = "container";
IsolationLevel["VM"] = "vm";
})(IsolationLevel || (IsolationLevel = {}));
export class SecureCouncilDecisionEngine extends CouncilDecisionEngine {
isolationManager;
auditLogger;
activeCouncils = new Map();
voiceSystemRef;
securityFrameworkRef;
constructor(voiceSystem, modelClient) {
super(voiceSystem, modelClient);
this.voiceSystemRef = voiceSystem;
this.securityFrameworkRef = new EnterpriseSecurityFramework();
this.isolationManager = new SubAgentIsolationSystem();
this.auditLogger = new CouncilAuditLogger();
}
async conductSecureCouncilSession(prompt, voices, config, securityContext) {
const sessionId = this.generateSessionId();
logger.info('Starting secure council session', {
sessionId,
voices,
isolationLevel: config.isolationLevel,
});
await this.auditLogger.logEvent({
timestamp: new Date(),
event: 'council_session_started',
details: { sessionId, prompt: prompt.substring(0, 100), voices, config },
securityLevel: 'info',
});
try {
// Security validation of council request
await this.validateCouncilRequest(prompt, voices, securityContext);
// Create isolated execution contexts
const isolatedContexts = await this.createIsolatedContexts(voices, config);
// Execute council session with security boundaries
const decision = await this.executeSecureSession(prompt, isolatedContexts, config, securityContext);
// Comprehensive security audit
await this.auditCouncilDecision(decision);
this.activeCouncils.delete(sessionId);
logger.info('Secure council session completed', {
sessionId,
consensusReached: decision.consensusReached,
qualityScore: decision.qualityScore,
});
return decision;
}
catch (error) {
await this.auditLogger.logEvent({
timestamp: new Date(),
event: 'council_session_failed',
details: { sessionId, error: error.message },
securityLevel: 'error',
});
this.activeCouncils.delete(sessionId);
throw error;
}
}
async validateCouncilRequest(prompt, voices, securityContext) {
// Validate prompt for malicious content
const promptValidation = await this.securityFrameworkRef.validateAgentAction('council-coordinator', {
type: 'code_generation',
agentId: 'council-coordinator',
payload: { prompt },
timestamp: new Date(),
}, securityContext);
if (!promptValidation.allowed) {
throw new Error(`Council request failed security validation: ${promptValidation.violations.map((v) => v.description).join(', ')}`);
}
// Validate voice participation
for (const voiceId of voices) {
if (!this.isVoiceAuthorized(voiceId, securityContext)) {
throw new Error(`Voice ${voiceId} is not authorized for this security context`);
}
}
}
async createIsolatedContexts(voices, config) {
const contexts = new Map();
for (const voiceId of voices) {
const permissions = await this.getVoicePermissions(voiceId);
const context = await this.isolationManager.createContext({
agentId: voiceId,
isolationLevel: config.isolationLevel,
permissions,
securityPolicy: config.securityPolicy,
memoryLimit: this.calculateMemoryLimit(voiceId, config),
cpuLimit: this.calculateCpuLimit(voiceId, config),
timeoutMs: config.timeoutMs,
});
contexts.set(voiceId, context);
logger.debug('Created isolated context for voice', { voiceId, contextId: context.contextId });
}
return contexts;
}
async executeSecureSession(prompt, contexts, config, securityContext) {
const sessionId = this.generateSessionId();
const perspectives = [];
const executionMetrics = [];
const auditTrail = [];
// Execute voices in parallel with isolation
const voicePromises = Array.from(contexts.entries()).map(async ([voiceId, context]) => {
const startTime = performance.now();
try {
await this.auditLogger.logEvent({
timestamp: new Date(),
event: 'voice_execution_started',
voiceId,
details: { contextId: context.contextId },
securityLevel: 'debug',
});
const perspective = await this.executeVoiceInIsolation(voiceId, prompt, context, config);
const duration = performance.now() - startTime;
perspectives.push(perspective);
executionMetrics.push({
voiceId,
duration,
success: true,
memoryUsed: context.resourceUsage.memoryUsed,
securityViolations: context.securityViolations,
});
await this.auditLogger.logEvent({
timestamp: new Date(),
event: 'voice_execution_completed',
voiceId,
details: { duration, qualityScore: perspective.qualityMetrics },
securityLevel: 'debug',
});
}
catch (error) {
executionMetrics.push({
voiceId,
duration: performance.now() - startTime,
success: false,
memoryUsed: context.resourceUsage.memoryUsed,
securityViolations: context.securityViolations,
error: error.message,
});
await this.auditLogger.logEvent({
timestamp: new Date(),
event: 'voice_execution_failed',
voiceId,
details: { error: error.message },
securityLevel: 'error',
});
}
});
await Promise.allSettled(voicePromises);
// Synthesize decision with security considerations
const decision = await this.synthesizeSecureDecision(perspectives, executionMetrics, config, securityContext);
// Add session metadata
decision.id = sessionId;
decision.prompt = prompt;
decision.participants = Array.from(contexts.entries()).map(([voiceId, context]) => ({
voiceId,
isolatedContext: context,
permissions: this.getVoicePermissionsSync(voiceId),
securityLevel: this.getVoiceSecurityLevel(voiceId),
}));
decision.executionMetrics = executionMetrics;
decision.auditTrail = auditTrail;
decision.timestamp = new Date();
return decision;
}
async executeVoiceInIsolation(voiceId, prompt, context, config) {
const startTime = performance.now();
// Monitor resource usage
const resourceMonitor = this.isolationManager.createResourceMonitor(context);
try {
// Execute voice with isolation
const response = await this.isolationManager.executeInContext(context, async () => {
return await this.voiceSystemRef.generateSingleVoiceResponse(voiceId, prompt, {});
});
// Security assessment of response
const securityAssessment = await this.assessResponseSecurity(response, voiceId, context);
// Quality metrics calculation
const qualityMetrics = this.calculateQualityMetrics(response, prompt);
return {
voiceId,
response: response.content,
confidence: response.confidence || 0.8,
reasoning: response.reasoning || 'No reasoning provided',
securityAssessment,
executionTime: performance.now() - startTime,
resourcesUsed: resourceMonitor.getUsage(),
qualityMetrics,
};
}
finally {
resourceMonitor.stop();
}
}
async synthesizeSecureDecision(perspectives, executionMetrics, config, securityContext) {
// Filter out perspectives with critical security violations
const safePerspectives = perspectives.filter(p => p.securityAssessment.threatLevel !== 'critical');
if (safePerspectives.length === 0) {
throw new Error('All perspectives failed security validation');
}
// Calculate consensus
const consensusReached = this.calculateConsensusBoolean(safePerspectives, config.requireConsensus);
// Synthesize final decision
const synthesizedDecision = await this.synthesizeFromPerspectives(safePerspectives);
// Overall security validation
const securityValidation = await this.validateFinalDecision(synthesizedDecision, perspectives, securityContext);
// Quality score calculation
const qualityScore = this.calculateOverallQuality(safePerspectives);
return {
id: '', // Will be set by caller
prompt: '', // Will be set by caller
participants: [], // Will be set by caller
perspectives: safePerspectives,
synthesizedDecision,
consensusReached: this.calculateConsensusInternal(safePerspectives) > 0.7,
confidence: this.calculateOverallConfidence(safePerspectives),
securityValidation,
executionMetrics,
qualityScore,
auditTrail: [], // Will be set by caller
timestamp: new Date(),
};
}
async assessResponseSecurity(response, voiceId, context) {
const violations = [];
const mitigations = [];
const recommendations = [];
// Check for sensitive information in response
if (typeof response.content === 'string') {
const sensitivePatterns = ['password', 'secret', 'api_key', 'private_key', 'token'];
for (const pattern of sensitivePatterns) {
if (response.content.toLowerCase().includes(pattern)) {
violations.push({
type: 'sensitive_data_exposure',
severity: 'high',
description: `Response contains potential sensitive data: ${pattern}`,
timestamp: new Date(),
});
mitigations.push(`Sanitize ${pattern} from response`);
}
}
}
// Check context violations
violations.push(...context.securityViolations);
// Determine threat level
const threatLevel = this.calculateThreatLevel(violations);
// Generate recommendations
if (violations.length > 0) {
recommendations.push('Review voice permissions and isolation settings');
recommendations.push('Implement additional content filtering');
}
return {
threatLevel,
violations,
mitigations,
recommendations,
};
}
calculateQualityMetrics(response, prompt) {
// Simplified quality calculation - in production would use ML models
const relevance = this.calculateRelevance(response.content, prompt);
const clarity = this.calculateClarity(response.content);
const completeness = this.calculateCompleteness(response.content, prompt);
const accuracy = response.confidence || 0.8;
const innovation = this.calculateInnovation(response.content);
return {
relevance,
clarity,
completeness,
accuracy,
innovation,
};
}
// Implement our own consensus calculation since base method is private
calculateConsensusInternal(perspectives) {
if (perspectives.length === 0)
return 0;
const avgConfidence = perspectives.reduce((sum, p) => sum + (p.confidence || 0), 0) / perspectives.length;
return Math.min(avgConfidence, 1.0);
}
calculateConsensusBoolean(perspectives, requireConsensus) {
if (!requireConsensus)
return true;
// Simple consensus calculation based on similarity of responses
const responses = perspectives.map(p => p.response);
const similarities = [];
for (let i = 0; i < responses.length; i++) {
for (let j = i + 1; j < responses.length; j++) {
similarities.push(this.calculateSimilarity(responses[i], responses[j]));
}
}
const averageSimilarity = similarities.reduce((sum, sim) => sum + sim, 0) / similarities.length;
return averageSimilarity > 0.7; // 70% similarity threshold
}
async validateFinalDecision(decision, perspectives, securityContext) {
const allViolations = perspectives.flatMap(p => p.securityAssessment.violations);
const criticalViolations = allViolations.filter(v => v.severity === 'critical');
const highViolations = allViolations.filter(v => v.severity === 'high');
const overallRisk = criticalViolations.length > 0
? 'critical'
: highViolations.length > 2
? 'high'
: allViolations.length > 5
? 'medium'
: 'low';
return {
validated: overallRisk !== 'critical',
overallRisk,
violations: allViolations,
mitigations: perspectives.flatMap(p => p.securityAssessment.mitigations),
recommendations: perspectives.flatMap(p => p.securityAssessment.recommendations),
};
}
// Helper methods
isVoiceAuthorized(voiceId, securityContext) {
// Check if voice has required permissions for security context
return true; // Simplified implementation
}
async getVoicePermissions(voiceId) {
// Return permissions based on voice role
const basePermissions = {
canReadFiles: true,
canWriteFiles: false,
canExecuteCommands: false,
canAccessNetwork: false,
allowedTools: ['read', 'analyze'],
restrictedTools: ['write', 'execute', 'delete'],
};
// Enhance permissions based on voice type
switch (voiceId) {
case 'security':
return {
...basePermissions,
allowedTools: [...basePermissions.allowedTools, 'security-scan', 'vulnerability-check'],
};
case 'implementor':
return {
...basePermissions,
canWriteFiles: true,
allowedTools: [...basePermissions.allowedTools, 'write', 'edit'],
};
default:
return basePermissions;
}
}
getVoicePermissionsSync(voiceId) {
// Synchronous version for metadata
return {
canReadFiles: true,
canWriteFiles: false,
canExecuteCommands: false,
canAccessNetwork: false,
allowedTools: ['read', 'analyze'],
restrictedTools: ['write', 'execute', 'delete'],
};
}
getVoiceSecurityLevel(voiceId) {
const securityLevels = {
security: 'critical',
guardian: 'high',
implementor: 'medium',
analyzer: 'medium',
explorer: 'low',
};
return securityLevels[voiceId] || 'medium';
}
calculateMemoryLimit(voiceId, config) {
const baseLimits = {
security: 256,
analyzer: 512,
implementor: 384,
default: 256,
};
return baseLimits[voiceId] || baseLimits.default;
}
calculateCpuLimit(voiceId, config) {
return 50; // 50% CPU limit
}
calculateThreatLevel(violations) {
if (violations.some(v => v.severity === 'critical'))
return 'critical';
if (violations.some(v => v.severity === 'high'))
return 'high';
if (violations.some(v => v.severity === 'medium'))
return 'medium';
if (violations.length > 0)
return 'low';
return 'none';
}
async auditCouncilDecision(decision) {
await this.auditLogger.logEvent({
timestamp: new Date(),
event: 'council_decision_completed',
details: {
sessionId: decision.id,
consensusReached: decision.consensusReached,
qualityScore: decision.qualityScore,
securityRisk: decision.securityValidation.overallRisk,
participantCount: decision.participants.length,
},
securityLevel: 'info',
});
}
generateSessionId() {
return `council_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
}
// Simplified implementations for quality metrics
calculateRelevance(content, prompt) {
return 0.8;
}
calculateClarity(content) {
return 0.85;
}
calculateCompleteness(content, prompt) {
return 0.8;
}
calculateInnovation(content) {
return 0.75;
}
calculateSimilarity(response1, response2) {
return 0.7;
}
calculateOverallConfidence(perspectives) {
return perspectives.reduce((sum, p) => sum + p.confidence, 0) / perspectives.length;
}
calculateOverallQuality(perspectives) {
const avgQuality = perspectives.reduce((sum, p) => {
const q = p.qualityMetrics;
return sum + (q.relevance + q.clarity + q.completeness + q.accuracy + q.innovation) / 5;
}, 0) / perspectives.length;
return avgQuality * 100;
}
async synthesizeFromPerspectives(perspectives) {
return perspectives.map(p => p.response).join('\n\n--- SYNTHESIS ---\n\n');
}
}
// Supporting classes
class SubAgentIsolationSystem {
async createContext(config) {
return {
contextId: `ctx_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`,
memoryLimit: config.memoryLimit || 256,
cpuLimit: config.cpuLimit || 50,
networkAccess: false,
fileSystemAccess: [],
allowedOperations: config.permissions?.allowedTools || [],
securityViolations: [],
resourceUsage: {
memoryUsed: 0,
cpuUsed: 0,
executionTime: 0,
networkRequests: 0,
fileOperations: 0,
},
};
}
async executeInContext(context, fn) {
// In production, this would execute in actual isolation (container, sandbox, etc.)
return await fn();
}
createResourceMonitor(context) {
return {
getUsage: () => context.resourceUsage,
stop: () => { },
};
}
}
class CouncilAuditLogger {
async logEvent(entry) {
logger.info('Council audit event', entry);
}
}
//# sourceMappingURL=secure-council-engine.js.map