agent-animate
Version:
AI-powered cinematic animations from workflow transcripts - Jony Ive precision meets Hans Zimmer timing
415 lines (348 loc) • 15 kB
JavaScript
/**
* MultiAgentOrchestrator - Coordinates multiple AI agents for scene creation
* Compares single vs multi-agent approaches and handles orchestration
*/
class MultiAgentOrchestrator {
constructor() {
this.agents = {
creation: new SceneCreationAgent(),
validation: new SceneValidationAgent(),
optimization: null // Future: dedicated optimization agent
};
this.performanceMetrics = {
singleAgent: { quality: 0, speed: 0, consistency: 0 },
multiAgent: { quality: 0, speed: 0, consistency: 0 }
};
this.coordinationLog = [];
}
/**
* Main orchestration method - decides between single vs multi-agent approach
*/
async createOptimalScenes(transcript, options = {}) {
this.log("🎭 Starting multi-agent orchestration");
const approach = this.selectOptimalApproach(transcript, options);
this.log(`Selected approach: ${approach}`);
if (approach === 'single_agent') {
return await this.runSingleAgentApproach(transcript, options);
} else {
return await this.runMultiAgentApproach(transcript, options);
}
}
/**
* Analyzes when multi-agent provides benefits vs overhead
*/
selectOptimalApproach(transcript, options) {
const complexity = this.analyzeComplexity(transcript);
const qualityRequirement = options.qualityTarget || 0.8;
const speedRequirement = options.maxProcessingTime || 5000; // ms
// Multi-agent beneficial when:
// 1. High complexity requiring specialized reasoning
// 2. High quality requirement needing validation
// 3. Long-term learning/memory requirements
// 4. Need for diverse perspectives
const multiAgentScore = this.calculateMultiAgentBenefit(
complexity,
qualityRequirement,
transcript.length,
options
);
this.log(`Multi-agent benefit score: ${multiAgentScore}/1.0`);
return multiAgentScore > 0.6 ? 'multi_agent' : 'single_agent';
}
async runSingleAgentApproach(transcript, options) {
this.log("🤖 Running single-agent approach");
const startTime = Date.now();
// Enhanced single agent with combined reasoning
const enhancedAgent = new EnhancedSingleAgent();
const result = await enhancedAgent.createScenesWithIntegratedValidation(transcript);
const processingTime = Date.now() - startTime;
this.updateMetrics('singleAgent', result, processingTime);
return {
...result,
approach: 'single_agent',
processingTime: processingTime,
reasoning: enhancedAgent.getIntegratedReasoning()
};
}
async runMultiAgentApproach(transcript, options) {
this.log("🎭 Running multi-agent approach");
const startTime = Date.now();
// Step 1: Creation agent generates initial scenes
this.log("Agent 1: Scene creation");
const creationResult = await this.agents.creation.createScenesFromTranscript(transcript);
// Step 2: Validation agent analyzes and provides feedback
this.log("Agent 2: Scene validation");
const validationResult = await this.agents.validation.validateScenes(
creationResult,
transcript,
this.agents.creation.getReasoningLog()
);
// Step 3: Coordination and final optimization
this.log("Coordination: Final optimization");
const finalResult = await this.coordinateAgentFeedback(
creationResult,
validationResult,
transcript
);
const processingTime = Date.now() - startTime;
this.updateMetrics('multiAgent', finalResult, processingTime);
return {
...finalResult,
approach: 'multi_agent',
processingTime: processingTime,
agentReports: {
creation: this.agents.creation.getReasoningLog(),
validation: this.agents.validation.reasoningLog
},
coordination: this.coordinationLog
};
}
async coordinateAgentFeedback(creationResult, validationResult, transcript) {
this.log("🤝 Coordinating agent feedback");
const coordination = {
conflicts: [],
agreements: [],
finalDecisions: [],
qualityScore: validationResult.overallScore
};
// Analyze agent agreement/disagreement
const conflicts = this.identifyAgentConflicts(creationResult, validationResult);
coordination.conflicts = conflicts;
// Resolve conflicts through reasoning
for (const conflict of conflicts) {
const resolution = await this.resolveConflict(conflict, transcript);
coordination.finalDecisions.push(resolution);
}
// Apply coordinated improvements
const finalScenes = this.applyCoordinatedImprovements(
validationResult.optimizedScenes,
coordination.finalDecisions
);
this.coordinationLog.push(coordination);
return {
scenes: finalScenes,
qualityScore: coordination.qualityScore,
coordination: coordination
};
}
/**
* Analysis: Multi-Agent Benefits vs Overhead
*/
analyzeMultiAgentBenefits() {
return {
benefits: {
specialization: "Each agent can focus on specific expertise (creation vs validation)",
qualityAssurance: "Built-in validation prevents single-point-of-failure reasoning",
diversePerspectives: "Multiple reasoning paths increase robustness",
scalability: "Can add specialized agents (layout, timing, narrative)",
debugging: "Easier to identify which reasoning step failed",
learning: "Agents can learn from each other's feedback",
parallelization: "Future: Run agents in parallel for speed"
},
overhead: {
complexity: "More coordination logic and potential conflicts",
latency: "Sequential agent calls increase processing time",
consistency: "Need to ensure agents don't contradict each other",
resourceUsage: "More memory and computation for multiple agents",
debuggingComplexity: "Harder to trace multi-agent decision chains"
},
optimalUse: {
highComplexity: "Complex workflows benefit from specialized reasoning",
qualityCritical: "High-stakes scenarios need validation",
iterativeImprovement: "Learning systems benefit from feedback loops",
domainExpertise: "Different agents can have domain-specific knowledge"
}
};
}
calculateMultiAgentBenefit(complexity, qualityTarget, transcriptLength, options) {
let score = 0;
// Complexity benefit (0-0.3)
if (complexity > 0.7) score += 0.3;
else if (complexity > 0.4) score += 0.15;
// Quality requirement benefit (0-0.3)
if (qualityTarget > 0.85) score += 0.3;
else if (qualityTarget > 0.7) score += 0.15;
// Content length benefit (0-0.2)
if (transcriptLength > 1000) score += 0.2;
else if (transcriptLength > 500) score += 0.1;
// Learning requirement (0-0.2)
if (options.enableLearning) score += 0.2;
return Math.min(score, 1.0);
}
analyzeComplexity(transcript) {
const factors = {
length: Math.min(transcript.length / 2000, 1) * 0.2,
technicalTerms: this.countTechnicalTerms(transcript) / 50 * 0.3,
integrations: this.countIntegrations(transcript) / 20 * 0.3,
workflows: this.countWorkflowSteps(transcript) / 15 * 0.2
};
return Object.values(factors).reduce((sum, val) => sum + val, 0);
}
identifyAgentConflicts(creationResult, validationResult) {
const conflicts = [];
// Check for timing conflicts
if (validationResult.detailedAnalysis.timing.issues.length > 0) {
conflicts.push({
type: 'timing',
description: 'Validation agent suggests timing adjustments',
severity: 'medium',
creationReasoning: "Creation agent optimized for narrative flow",
validationConcern: validationResult.detailedAnalysis.timing.issues
});
}
// Check for layout conflicts
if (validationResult.detailedAnalysis.layout.spatialIssues.length > 0) {
conflicts.push({
type: 'layout',
description: 'Validation agent found spatial issues',
severity: 'high',
creationReasoning: "Creation agent focused on semantic grouping",
validationConcern: validationResult.detailedAnalysis.layout.spatialIssues
});
}
return conflicts;
}
async resolveConflict(conflict, transcript) {
this.log(`Resolving ${conflict.type} conflict`);
// Meta-reasoning: Which agent's perspective is more important?
const resolution = {
conflictType: conflict.type,
decision: '',
reasoning: '',
compromise: null
};
if (conflict.type === 'layout' && conflict.severity === 'high') {
// Validation agent wins on critical spatial issues
resolution.decision = 'validation_agent';
resolution.reasoning = 'Spatial clarity is critical for user comprehension';
} else if (conflict.type === 'timing') {
// Try to find compromise between narrative flow and optimal timing
resolution.decision = 'compromise';
resolution.compromise = this.findTimingCompromise(conflict);
resolution.reasoning = 'Balance narrative flow with optimal pacing';
}
return resolution;
}
findTimingCompromise(conflict) {
// Find middle ground between creation and validation timing preferences
return {
adjustmentFactor: 0.5, // 50% toward validation suggestion
preserveNarrativeFlow: true
};
}
updateMetrics(approach, result, processingTime) {
this.performanceMetrics[approach] = {
quality: result.qualityScore || 0.8,
speed: Math.max(0, 1 - processingTime / 10000), // Normalize to 0-1
consistency: this.measureConsistency(result),
lastUpdate: Date.now()
};
}
measureConsistency(result) {
// Measure how consistent the result is (simplified)
return result.scenes ? 0.85 : 0.5;
}
getPerformanceComparison() {
return {
recommendation: this.getRecommendation(),
metrics: this.performanceMetrics,
analysis: this.analyzeMultiAgentBenefits()
};
}
getRecommendation() {
const single = this.performanceMetrics.singleAgent;
const multi = this.performanceMetrics.multiAgent;
if (multi.quality > single.quality + 0.1) {
return "Multi-agent approach recommended for higher quality";
} else if (single.speed > multi.speed + 0.2) {
return "Single-agent approach recommended for speed";
} else {
return "Choose based on specific requirements";
}
}
// Helper methods
countTechnicalTerms(transcript) {
const techTerms = ['api', 'webhook', 'database', 'authentication', 'integration', 'workflow'];
return techTerms.reduce((count, term) =>
count + (transcript.toLowerCase().match(new RegExp(term, 'g')) || []).length, 0);
}
countIntegrations(transcript) {
const integrations = ['gmail', 'slack', 'notion', 'airtable', 'zapier', 'salesforce'];
return integrations.reduce((count, integration) =>
count + (transcript.toLowerCase().includes(integration) ? 1 : 0), 0);
}
countWorkflowSteps(transcript) {
const stepIndicators = ['first', 'then', 'next', 'finally', 'step', 'after'];
return stepIndicators.reduce((count, indicator) =>
count + (transcript.toLowerCase().match(new RegExp(indicator, 'g')) || []).length, 0);
}
applyCoordinatedImprovements(scenes, decisions) {
// Apply the coordinated decisions to improve scenes
let improvedScenes = [...scenes];
for (const decision of decisions) {
if (decision.conflictType === 'layout' && decision.decision === 'validation_agent') {
improvedScenes = this.applyLayoutFixes(improvedScenes);
}
if (decision.conflictType === 'timing' && decision.compromise) {
improvedScenes = this.applyTimingCompromise(improvedScenes, decision.compromise);
}
}
return improvedScenes;
}
applyLayoutFixes(scenes) {
// Apply layout fixes from validation agent
return scenes.map(scene => ({
...scene,
layout: {
...scene.layout,
spacing: Math.max(scene.layout?.spacing || 150, 200),
validated: true
}
}));
}
applyTimingCompromise(scenes, compromise) {
// Apply timing compromise between agents
return scenes.map(scene => ({
...scene,
duration: scene.duration * (1 + (compromise.adjustmentFactor - 0.5) * 0.2)
}));
}
log(message) {
console.log(`🎭 Orchestrator: ${message}`);
this.coordinationLog.push({
timestamp: Date.now(),
message: message
});
}
}
/**
* Enhanced Single Agent - Combines creation and validation in one agent
*/
class EnhancedSingleAgent {
constructor() {
this.integratedReasoning = [];
}
async createScenesWithIntegratedValidation(transcript) {
// Combine creation and validation in single reasoning chain
const scenes = await this.createAndValidateSimultaneously(transcript);
return { scenes, qualityScore: 0.8 };
}
async createAndValidateSimultaneously(transcript) {
this.reason("Creating scenes with integrated validation");
// Simplified integrated approach
const parser = new WorkflowParser();
const architecture = parser.parse(transcript);
const scenes = [];
// ... scene creation logic with built-in validation
return scenes;
}
reason(message) {
this.integratedReasoning.push({
timestamp: Date.now(),
message: message
});
}
getIntegratedReasoning() {
return this.integratedReasoning;
}
}