codecrucible-synth
Version:
Production-Ready AI Development Platform with Multi-Voice Synthesis, Smithery MCP Integration, Enterprise Security, and Zero-Timeout Reliability
757 lines (661 loc) • 23 kB
text/typescript
/**
* Secure Council Decision Engine - Enhanced with isolation and security
* Implements enterprise-grade multi-agent collaboration with security boundaries
*/
import { CouncilDecisionEngine } from './council-decision-engine.js';
import {
EnterpriseSecurityFramework,
SecurityContext,
AgentAction,
} from '../security/enterprise-security-framework.js';
import { logger } from '../logger.js';
export interface SecureCouncilConfig {
maxParticipants: number;
timeoutMs: number;
requireConsensus: boolean;
isolationLevel: IsolationLevel;
securityPolicy: AgentSecurityPolicy;
qualityThreshold: number;
auditLevel: 'basic' | 'detailed' | 'comprehensive';
}
export interface SecureCouncilDecision {
id: string;
prompt: string;
participants: VoiceParticipant[];
perspectives: VoicePerspective[];
synthesizedDecision: string;
consensusReached: boolean;
confidence: number;
securityValidation: SecurityValidation;
executionMetrics: ExecutionMetric[];
qualityScore: number;
auditTrail: CouncilAuditEntry[];
timestamp: Date;
}
export interface VoiceParticipant {
voiceId: string;
isolatedContext: IsolatedContext;
permissions: VoicePermissions;
securityLevel: 'low' | 'medium' | 'high' | 'critical';
}
export interface IsolatedContext {
contextId: string;
memoryLimit: number;
cpuLimit: number;
networkAccess: boolean;
fileSystemAccess: string[];
allowedOperations: string[];
securityViolations: SecurityViolation[];
resourceUsage: ResourceUsage;
}
export interface VoicePermissions {
canReadFiles: boolean;
canWriteFiles: boolean;
canExecuteCommands: boolean;
canAccessNetwork: boolean;
allowedTools: string[];
restrictedTools: string[];
}
export interface VoicePerspective {
voiceId: string;
response: string;
confidence: number;
reasoning: string;
securityAssessment: SecurityAssessment;
executionTime: number;
resourcesUsed: ResourceUsage;
qualityMetrics: QualityMetrics;
}
export interface SecurityAssessment {
threatLevel: 'none' | 'low' | 'medium' | 'high' | 'critical';
violations: SecurityViolation[];
mitigations: string[];
recommendations: string[];
}
export interface SecurityViolation {
type: string;
severity: 'low' | 'medium' | 'high' | 'critical';
description: string;
timestamp: Date;
}
export interface ResourceUsage {
memoryUsed: number;
cpuUsed: number;
executionTime: number;
networkRequests: number;
fileOperations: number;
}
export interface QualityMetrics {
relevance: number;
clarity: number;
completeness: number;
accuracy: number;
innovation: number;
}
export interface ExecutionMetric {
voiceId: string;
duration: number;
success: boolean;
memoryUsed: number;
securityViolations: SecurityViolation[];
error?: string;
}
export interface CouncilAuditEntry {
timestamp: Date;
event: string;
voiceId?: string;
details: any;
securityLevel: string;
}
export interface SecurityValidation {
validated: boolean;
overallRisk: 'low' | 'medium' | 'high' | 'critical';
violations: SecurityViolation[];
mitigations: string[];
recommendations: string[];
}
export interface AgentSecurityPolicy {
isolationRequired: boolean;
maxExecutionTime: number;
maxMemoryUsage: number;
allowedOperations: string[];
blockedOperations: string[];
auditRequired: boolean;
}
export enum IsolationLevel {
NONE = 'none',
BASIC = 'basic',
SANDBOX = 'sandbox',
CONTAINER = 'container',
VM = 'vm',
}
export class SecureCouncilDecisionEngine extends CouncilDecisionEngine {
private isolationManager: SubAgentIsolationSystem;
private auditLogger: CouncilAuditLogger;
private activeCouncils: Map<string, SecureCouncilSession> = new Map();
protected voiceSystemRef: any;
protected securityFrameworkRef: any;
constructor(voiceSystem: any, modelClient: any) {
super(voiceSystem, modelClient);
this.voiceSystemRef = voiceSystem;
this.securityFrameworkRef = new EnterpriseSecurityFramework();
this.isolationManager = new SubAgentIsolationSystem();
this.auditLogger = new CouncilAuditLogger();
}
async conductSecureCouncilSession(
prompt: string,
voices: string[],
config: SecureCouncilConfig,
securityContext: SecurityContext
): Promise<SecureCouncilDecision> {
const sessionId = this.generateSessionId();
logger.info('Starting secure council session', {
sessionId,
voices,
isolationLevel: config.isolationLevel,
});
await this.auditLogger.logEvent({
timestamp: new Date(),
event: 'council_session_started',
details: { sessionId, prompt: prompt.substring(0, 100), voices, config },
securityLevel: 'info',
});
try {
// Security validation of council request
await this.validateCouncilRequest(prompt, voices, securityContext);
// Create isolated execution contexts
const isolatedContexts = await this.createIsolatedContexts(voices, config);
// Execute council session with security boundaries
const decision = await this.executeSecureSession(
prompt,
isolatedContexts,
config,
securityContext
);
// Comprehensive security audit
await this.auditCouncilDecision(decision);
this.activeCouncils.delete(sessionId);
logger.info('Secure council session completed', {
sessionId,
consensusReached: decision.consensusReached,
qualityScore: decision.qualityScore,
});
return decision;
} catch (error: any) {
await this.auditLogger.logEvent({
timestamp: new Date(),
event: 'council_session_failed',
details: { sessionId, error: error.message },
securityLevel: 'error',
});
this.activeCouncils.delete(sessionId);
throw error;
}
}
private async validateCouncilRequest(
prompt: string,
voices: string[],
securityContext: SecurityContext
): Promise<void> {
// Validate prompt for malicious content
const promptValidation = await this.securityFrameworkRef.validateAgentAction(
'council-coordinator',
{
type: 'code_generation',
agentId: 'council-coordinator',
payload: { prompt },
timestamp: new Date(),
},
securityContext
);
if (!promptValidation.allowed) {
throw new Error(
`Council request failed security validation: ${promptValidation.violations.map((v: any) => v.description).join(', ')}`
);
}
// Validate voice participation
for (const voiceId of voices) {
if (!this.isVoiceAuthorized(voiceId, securityContext)) {
throw new Error(`Voice ${voiceId} is not authorized for this security context`);
}
}
}
private async createIsolatedContexts(
voices: string[],
config: SecureCouncilConfig
): Promise<Map<string, IsolatedContext>> {
const contexts = new Map<string, IsolatedContext>();
for (const voiceId of voices) {
const permissions = await this.getVoicePermissions(voiceId);
const context = await this.isolationManager.createContext({
agentId: voiceId,
isolationLevel: config.isolationLevel,
permissions,
securityPolicy: config.securityPolicy,
memoryLimit: this.calculateMemoryLimit(voiceId, config),
cpuLimit: this.calculateCpuLimit(voiceId, config),
timeoutMs: config.timeoutMs,
});
contexts.set(voiceId, context);
logger.debug('Created isolated context for voice', { voiceId, contextId: context.contextId });
}
return contexts;
}
private async executeSecureSession(
prompt: string,
contexts: Map<string, IsolatedContext>,
config: SecureCouncilConfig,
securityContext: SecurityContext
): Promise<SecureCouncilDecision> {
const sessionId = this.generateSessionId();
const perspectives: VoicePerspective[] = [];
const executionMetrics: ExecutionMetric[] = [];
const auditTrail: CouncilAuditEntry[] = [];
// Execute voices in parallel with isolation
const voicePromises = Array.from(contexts.entries()).map(async ([voiceId, context]) => {
const startTime = performance.now();
try {
await this.auditLogger.logEvent({
timestamp: new Date(),
event: 'voice_execution_started',
voiceId,
details: { contextId: context.contextId },
securityLevel: 'debug',
});
const perspective = await this.executeVoiceInIsolation(voiceId, prompt, context, config);
const duration = performance.now() - startTime;
perspectives.push(perspective);
executionMetrics.push({
voiceId,
duration,
success: true,
memoryUsed: context.resourceUsage.memoryUsed,
securityViolations: context.securityViolations,
});
await this.auditLogger.logEvent({
timestamp: new Date(),
event: 'voice_execution_completed',
voiceId,
details: { duration, qualityScore: perspective.qualityMetrics },
securityLevel: 'debug',
});
} catch (error: any) {
executionMetrics.push({
voiceId,
duration: performance.now() - startTime,
success: false,
memoryUsed: context.resourceUsage.memoryUsed,
securityViolations: context.securityViolations,
error: error.message,
});
await this.auditLogger.logEvent({
timestamp: new Date(),
event: 'voice_execution_failed',
voiceId,
details: { error: error.message },
securityLevel: 'error',
});
}
});
await Promise.allSettled(voicePromises);
// Synthesize decision with security considerations
const decision = await this.synthesizeSecureDecision(
perspectives,
executionMetrics,
config,
securityContext
);
// Add session metadata
decision.id = sessionId;
decision.prompt = prompt;
decision.participants = Array.from(contexts.entries()).map(([voiceId, context]) => ({
voiceId,
isolatedContext: context,
permissions: this.getVoicePermissionsSync(voiceId),
securityLevel: this.getVoiceSecurityLevel(voiceId),
}));
decision.executionMetrics = executionMetrics;
decision.auditTrail = auditTrail;
decision.timestamp = new Date();
return decision;
}
private async executeVoiceInIsolation(
voiceId: string,
prompt: string,
context: IsolatedContext,
config: SecureCouncilConfig
): Promise<VoicePerspective> {
const startTime = performance.now();
// Monitor resource usage
const resourceMonitor = this.isolationManager.createResourceMonitor(context);
try {
// Execute voice with isolation
const response = await this.isolationManager.executeInContext(context, async () => {
return await this.voiceSystemRef.generateSingleVoiceResponse(voiceId, prompt, {});
});
// Security assessment of response
const securityAssessment = await this.assessResponseSecurity(response, voiceId, context);
// Quality metrics calculation
const qualityMetrics = this.calculateQualityMetrics(response, prompt);
return {
voiceId,
response: response.content,
confidence: response.confidence || 0.8,
reasoning: response.reasoning || 'No reasoning provided',
securityAssessment,
executionTime: performance.now() - startTime,
resourcesUsed: resourceMonitor.getUsage(),
qualityMetrics,
};
} finally {
resourceMonitor.stop();
}
}
private async synthesizeSecureDecision(
perspectives: VoicePerspective[],
executionMetrics: ExecutionMetric[],
config: SecureCouncilConfig,
securityContext: SecurityContext
): Promise<SecureCouncilDecision> {
// Filter out perspectives with critical security violations
const safePerspectives = perspectives.filter(
p => p.securityAssessment.threatLevel !== 'critical'
);
if (safePerspectives.length === 0) {
throw new Error('All perspectives failed security validation');
}
// Calculate consensus
const consensusReached = this.calculateConsensusBoolean(
safePerspectives,
config.requireConsensus
);
// Synthesize final decision
const synthesizedDecision = await this.synthesizeFromPerspectives(safePerspectives);
// Overall security validation
const securityValidation = await this.validateFinalDecision(
synthesizedDecision,
perspectives,
securityContext
);
// Quality score calculation
const qualityScore = this.calculateOverallQuality(safePerspectives);
return {
id: '', // Will be set by caller
prompt: '', // Will be set by caller
participants: [], // Will be set by caller
perspectives: safePerspectives,
synthesizedDecision,
consensusReached: this.calculateConsensusInternal(safePerspectives) > 0.7,
confidence: this.calculateOverallConfidence(safePerspectives),
securityValidation,
executionMetrics,
qualityScore,
auditTrail: [], // Will be set by caller
timestamp: new Date(),
};
}
private async assessResponseSecurity(
response: any,
voiceId: string,
context: IsolatedContext
): Promise<SecurityAssessment> {
const violations: SecurityViolation[] = [];
const mitigations: string[] = [];
const recommendations: string[] = [];
// Check for sensitive information in response
if (typeof response.content === 'string') {
const sensitivePatterns = ['password', 'secret', 'api_key', 'private_key', 'token'];
for (const pattern of sensitivePatterns) {
if (response.content.toLowerCase().includes(pattern)) {
violations.push({
type: 'sensitive_data_exposure',
severity: 'high',
description: `Response contains potential sensitive data: ${pattern}`,
timestamp: new Date(),
});
mitigations.push(`Sanitize ${pattern} from response`);
}
}
}
// Check context violations
violations.push(...context.securityViolations);
// Determine threat level
const threatLevel = this.calculateThreatLevel(violations);
// Generate recommendations
if (violations.length > 0) {
recommendations.push('Review voice permissions and isolation settings');
recommendations.push('Implement additional content filtering');
}
return {
threatLevel,
violations,
mitigations,
recommendations,
};
}
private calculateQualityMetrics(response: any, prompt: string): QualityMetrics {
// Simplified quality calculation - in production would use ML models
const relevance = this.calculateRelevance(response.content, prompt);
const clarity = this.calculateClarity(response.content);
const completeness = this.calculateCompleteness(response.content, prompt);
const accuracy = response.confidence || 0.8;
const innovation = this.calculateInnovation(response.content);
return {
relevance,
clarity,
completeness,
accuracy,
innovation,
};
}
// Implement our own consensus calculation since base method is private
private calculateConsensusInternal(perspectives: any[]): number {
if (perspectives.length === 0) return 0;
const avgConfidence =
perspectives.reduce((sum, p) => sum + (p.confidence || 0), 0) / perspectives.length;
return Math.min(avgConfidence, 1.0);
}
private calculateConsensusBoolean(
perspectives: VoicePerspective[],
requireConsensus: boolean
): boolean {
if (!requireConsensus) return true;
// Simple consensus calculation based on similarity of responses
const responses = perspectives.map(p => p.response);
const similarities: number[] = [];
for (let i = 0; i < responses.length; i++) {
for (let j = i + 1; j < responses.length; j++) {
similarities.push(this.calculateSimilarity(responses[i], responses[j]));
}
}
const averageSimilarity = similarities.reduce((sum, sim) => sum + sim, 0) / similarities.length;
return averageSimilarity > 0.7; // 70% similarity threshold
}
private async validateFinalDecision(
decision: string,
perspectives: VoicePerspective[],
securityContext: SecurityContext
): Promise<SecurityValidation> {
const allViolations = perspectives.flatMap(p => p.securityAssessment.violations);
const criticalViolations = allViolations.filter(v => v.severity === 'critical');
const highViolations = allViolations.filter(v => v.severity === 'high');
const overallRisk: 'low' | 'medium' | 'high' | 'critical' =
criticalViolations.length > 0
? 'critical'
: highViolations.length > 2
? 'high'
: allViolations.length > 5
? 'medium'
: 'low';
return {
validated: overallRisk !== 'critical',
overallRisk,
violations: allViolations,
mitigations: perspectives.flatMap(p => p.securityAssessment.mitigations),
recommendations: perspectives.flatMap(p => p.securityAssessment.recommendations),
};
}
// Helper methods
private isVoiceAuthorized(voiceId: string, securityContext: SecurityContext): boolean {
// Check if voice has required permissions for security context
return true; // Simplified implementation
}
private async getVoicePermissions(voiceId: string): Promise<VoicePermissions> {
// Return permissions based on voice role
const basePermissions: VoicePermissions = {
canReadFiles: true,
canWriteFiles: false,
canExecuteCommands: false,
canAccessNetwork: false,
allowedTools: ['read', 'analyze'],
restrictedTools: ['write', 'execute', 'delete'],
};
// Enhance permissions based on voice type
switch (voiceId) {
case 'security':
return {
...basePermissions,
allowedTools: [...basePermissions.allowedTools, 'security-scan', 'vulnerability-check'],
};
case 'implementor':
return {
...basePermissions,
canWriteFiles: true,
allowedTools: [...basePermissions.allowedTools, 'write', 'edit'],
};
default:
return basePermissions;
}
}
private getVoicePermissionsSync(voiceId: string): VoicePermissions {
// Synchronous version for metadata
return {
canReadFiles: true,
canWriteFiles: false,
canExecuteCommands: false,
canAccessNetwork: false,
allowedTools: ['read', 'analyze'],
restrictedTools: ['write', 'execute', 'delete'],
};
}
private getVoiceSecurityLevel(voiceId: string): 'low' | 'medium' | 'high' | 'critical' {
const securityLevels: Record<string, 'low' | 'medium' | 'high' | 'critical'> = {
security: 'critical',
guardian: 'high',
implementor: 'medium',
analyzer: 'medium',
explorer: 'low',
};
return securityLevels[voiceId] || 'medium';
}
private calculateMemoryLimit(voiceId: string, config: SecureCouncilConfig): number {
const baseLimits = {
security: 256,
analyzer: 512,
implementor: 384,
default: 256,
};
return (baseLimits as any)[voiceId] || baseLimits.default;
}
private calculateCpuLimit(voiceId: string, config: SecureCouncilConfig): number {
return 50; // 50% CPU limit
}
private calculateThreatLevel(
violations: SecurityViolation[]
): 'none' | 'low' | 'medium' | 'high' | 'critical' {
if (violations.some(v => v.severity === 'critical')) return 'critical';
if (violations.some(v => v.severity === 'high')) return 'high';
if (violations.some(v => v.severity === 'medium')) return 'medium';
if (violations.length > 0) return 'low';
return 'none';
}
private async auditCouncilDecision(decision: SecureCouncilDecision): Promise<void> {
await this.auditLogger.logEvent({
timestamp: new Date(),
event: 'council_decision_completed',
details: {
sessionId: decision.id,
consensusReached: decision.consensusReached,
qualityScore: decision.qualityScore,
securityRisk: decision.securityValidation.overallRisk,
participantCount: decision.participants.length,
},
securityLevel: 'info',
});
}
protected override generateSessionId(): string {
return `council_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
}
// Simplified implementations for quality metrics
private calculateRelevance(content: string, prompt: string): number {
return 0.8;
}
private calculateClarity(content: string): number {
return 0.85;
}
private calculateCompleteness(content: string, prompt: string): number {
return 0.8;
}
private calculateInnovation(content: string): number {
return 0.75;
}
private calculateSimilarity(response1: string, response2: string): number {
return 0.7;
}
private calculateOverallConfidence(perspectives: VoicePerspective[]): number {
return perspectives.reduce((sum, p) => sum + p.confidence, 0) / perspectives.length;
}
private calculateOverallQuality(perspectives: VoicePerspective[]): number {
const avgQuality =
perspectives.reduce((sum, p) => {
const q = p.qualityMetrics;
return sum + (q.relevance + q.clarity + q.completeness + q.accuracy + q.innovation) / 5;
}, 0) / perspectives.length;
return avgQuality * 100;
}
private async synthesizeFromPerspectives(perspectives: VoicePerspective[]): Promise<string> {
return perspectives.map(p => p.response).join('\n\n--- SYNTHESIS ---\n\n');
}
}
// Supporting classes
class SubAgentIsolationSystem {
async createContext(config: any): Promise<IsolatedContext> {
return {
contextId: `ctx_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`,
memoryLimit: config.memoryLimit || 256,
cpuLimit: config.cpuLimit || 50,
networkAccess: false,
fileSystemAccess: [],
allowedOperations: config.permissions?.allowedTools || [],
securityViolations: [],
resourceUsage: {
memoryUsed: 0,
cpuUsed: 0,
executionTime: 0,
networkRequests: 0,
fileOperations: 0,
},
};
}
async executeInContext<T>(context: IsolatedContext, fn: () => Promise<T>): Promise<T> {
// In production, this would execute in actual isolation (container, sandbox, etc.)
return await fn();
}
createResourceMonitor(context: IsolatedContext): any {
return {
getUsage: () => context.resourceUsage,
stop: () => {},
};
}
}
class CouncilAuditLogger {
async logEvent(entry: CouncilAuditEntry): Promise<void> {
logger.info('Council audit event', entry);
}
}
interface SecureCouncilSession {
id: string;
startTime: Date;
participants: string[];
config: SecureCouncilConfig;
}