codecrucible-synth
Version:
Production-Ready AI Development Platform with Multi-Voice Synthesis, Smithery MCP Integration, Enterprise Security, and Zero-Timeout Reliability
1,719 lines (1,467 loc) • 63.1 kB
text/typescript
/**
* Advanced Workflow Orchestrator implementing 9 industry-standard agentic patterns
* Based on research from Amazon Q CLI, ZCF, and emerging AI agent architectures
*/
import { EventEmitter } from 'events';
import { UnifiedModelClient } from '../../refactor/unified-model-client.js';
import { Logger } from '../logger.js';
import { getErrorMessage } from '../../utils/error-utils.js';
import {
JsonValue,
JsonObject,
ConfigurationObject,
TransformFunction,
PredicateFunction,
EvaluationFunction,
WorkflowContext,
MemoryItem,
GenericResult,
} from '../types.js';
// Utility Functions
/**
* Convert ModelResponse to JsonValue for workflow compatibility
*/
function modelResponseToJsonValue(response: any): JsonValue {
if (!response || typeof response !== 'object') {
return response;
}
// Convert ModelResponse to plain JsonObject
const jsonObject: JsonObject = {
content: response.content || '',
model: response.model || '',
provider: response.provider || '',
metadata: response.metadata || {},
tokens_used: response.tokens_used || 0,
usage: response.usage || {},
cached: response.cached || false,
streamed: response.streamed || false,
processingTime: response.processingTime || 0,
};
return jsonObject;
}
// Core Types
export interface ExecutionRequest {
id?: string;
prompt: string;
voice?: string;
model?: string;
maxTokens?: number;
temperature?: number;
stream?: boolean;
metadata?: Record<string, unknown>;
}
export interface ExecutionResponse {
success: boolean;
result?: unknown;
error?: string;
executionTime: number;
metadata?: Record<string, unknown>;
}
export interface ExecutionResult {
success: boolean;
content: unknown;
metadata?: Record<string, unknown>;
}
// Workflow Pattern Types
export enum WorkflowPatternType {
SEQUENTIAL = 'sequential',
PARALLEL = 'parallel',
HIERARCHICAL = 'hierarchical',
ADAPTIVE = 'adaptive',
FEEDBACK = 'feedback',
ITERATIVE = 'iterative',
BRANCHING = 'branching',
STREAMING = 'streaming',
MEMORY = 'memory',
}
// Pattern Interfaces
export interface PromptChaining {
steps: ChainStep[];
context: Map<string, unknown>;
}
export interface ChainStep {
id: string;
prompt: string;
voice?: string;
dependencies?: string[];
transform?: TransformFunction<JsonValue, JsonValue>;
}
export interface ParallelProcessing {
tasks: ParallelTask[];
concurrency: number;
aggregation: 'merge' | 'reduce' | 'select';
}
export interface ParallelTask {
id: string;
request: ExecutionRequest;
weight: number;
timeout?: number;
}
export interface MultiAgentOrchestration {
agents: AgentRole[];
coordinator: string;
communicationProtocol: 'direct' | 'broadcast' | 'pubsub';
}
export interface AgentRole {
id: string;
type: string;
capabilities: string[];
priority: number;
}
export interface DynamicWorkflow {
initialPlan: WorkflowNode[];
adaptationRules: AdaptationRule[];
learningRate: number;
}
export interface WorkflowNode {
id: string;
type: WorkflowPatternType;
config: ConfigurationObject;
transitions: Transition[];
}
export interface Transition {
to: string;
condition: PredicateFunction<WorkflowContext>;
probability?: number;
}
export interface AdaptationRule {
trigger: (context: WorkflowContext, history: JsonValue[]) => boolean;
action: (workflow: DynamicWorkflow) => void;
}
export interface HumanInTheLoop {
checkpoints: Checkpoint[];
escalationPolicy: EscalationPolicy;
feedbackLoop: FeedbackConfig;
}
export interface Checkpoint {
id: string;
condition: PredicateFunction<JsonValue>;
prompt: string;
timeout: number;
}
export interface EscalationPolicy {
maxRetries: number;
escalationPath: string[];
fallbackBehavior: 'skip' | 'default' | 'abort';
}
export interface FeedbackConfig {
collectMetrics: boolean;
learningEnabled: boolean;
persistFeedback: boolean;
}
export interface ReflectiveRefinement {
maxIterations: number;
qualityThreshold: number;
refinementStrategy: 'critique' | 'improve' | 'regenerate';
evaluationCriteria: EvaluationCriteria[];
}
export interface EvaluationCriteria {
name: string;
weight: number;
evaluate: (result: any) => number;
}
export interface ConditionalExecution {
branches: Branch[];
defaultBranch?: string;
evaluationOrder: 'sequential' | 'parallel';
}
export interface Branch {
id: string;
condition: (context: any) => boolean;
workflow: WorkflowNode;
priority: number;
}
export interface RealTimeProcessing {
streamingEnabled: boolean;
chunkSize: number;
bufferStrategy: 'window' | 'sliding' | 'circular';
onChunk: (chunk: any) => void;
transform?: (content: string) => string;
}
export interface ContextualContinuation {
memoryType: 'short' | 'long' | 'episodic' | 'semantic';
retentionPolicy: RetentionPolicy;
contextWindow: number;
}
export interface RetentionPolicy {
maxItems: number;
ttl: number;
importance: (item: any) => number;
}
// Main Orchestrator Class
export class WorkflowOrchestrator extends EventEmitter {
private modelClient: UnifiedModelClient;
private logger: Logger;
private executionHistory: Map<string, ExecutionResult[]>;
private activeWorkflows: Map<string, WorkflowExecution>;
private patternHandlers: Map<WorkflowPatternType, PatternHandler>;
private cleanupInterval: NodeJS.Timeout | null = null;
private isDisposed: boolean = false;
private readonly MAX_HISTORY_SIZE = 1000;
private readonly MAX_ACTIVE_WORKFLOWS = 100;
constructor(modelClient: UnifiedModelClient) {
super();
this.modelClient = modelClient;
this.logger = new Logger('WorkflowOrchestrator');
this.executionHistory = new Map();
this.activeWorkflows = new Map();
this.patternHandlers = this.initializeHandlers();
// Set up periodic cleanup to prevent memory leaks
this.setupCleanupInterval();
}
/**
* Initialize the workflow orchestrator
*/
async initialize(): Promise<void> {
this.logger.info('Initializing Workflow Orchestrator');
// Initialize any async resources if needed
// Currently, all initialization is done in constructor
return Promise.resolve();
}
private setupCleanupInterval(): void {
// Run cleanup every 10 minutes
this.cleanupInterval = setInterval(
() => {
if (this.isDisposed) {
if (this.cleanupInterval) {
clearInterval(this.cleanupInterval);
this.cleanupInterval = null;
}
return;
}
this.performCleanup();
},
10 * 60 * 1000
);
// Ensure cleanup interval doesn't prevent process exit
if (this.cleanupInterval?.unref) {
this.cleanupInterval.unref();
}
}
private performCleanup(): void {
// Basic cleanup - can be expanded
try {
const now = Date.now();
// Clean up old executions
for (const [key, execution] of this.activeWorkflows) {
if (now - execution.startTime > 3600000) {
// 1 hour
this.activeWorkflows.delete(key);
}
}
this.logger.info('🧹 Performed periodic cleanup');
} catch (error) {
this.logger.error('Failed to perform cleanup:', error);
}
}
private initializeHandlers(): Map<WorkflowPatternType, PatternHandler> {
const handlers = new Map<WorkflowPatternType, PatternHandler>();
handlers.set(WorkflowPatternType.SEQUENTIAL, new SequentialHandler(this.modelClient));
handlers.set(WorkflowPatternType.PARALLEL, new ParallelHandler(this.modelClient));
handlers.set(WorkflowPatternType.HIERARCHICAL, new HierarchicalHandler(this.modelClient));
handlers.set(WorkflowPatternType.ADAPTIVE, new AdaptiveHandler(this.modelClient));
handlers.set(WorkflowPatternType.FEEDBACK, new FeedbackHandler(this.modelClient));
handlers.set(WorkflowPatternType.ITERATIVE, new IterativeHandler(this.modelClient));
handlers.set(WorkflowPatternType.BRANCHING, new BranchingHandler(this.modelClient));
handlers.set(WorkflowPatternType.STREAMING, new StreamingHandler(this.modelClient));
handlers.set(WorkflowPatternType.MEMORY, new MemoryHandler(this.modelClient));
return handlers;
}
async executePattern(
pattern: WorkflowPatternType,
request: ExecutionRequest,
config?: any
): Promise<ExecutionResponse> {
const executionId = this.generateExecutionId();
const startTime = Date.now();
this.logger.info(`Starting ${pattern} workflow execution`, { executionId });
this.emit('workflow:start', { executionId, pattern, request });
try {
const handler = this.patternHandlers.get(pattern);
if (!handler) {
throw new Error(`No handler found for pattern: ${pattern}`);
}
const workflow = new WorkflowExecution(executionId, pattern, request, config);
this.activeWorkflows.set(executionId, workflow);
const result = await handler.execute(request, config, progress => {
this.emit('workflow:progress', { executionId, progress });
workflow.updateProgress(progress);
});
const executionTime = Date.now() - startTime;
const response: ExecutionResponse = {
success: true,
result,
executionTime,
metadata: {
pattern,
executionId,
...result.metadata,
},
};
this.recordExecution(executionId, result);
this.emit('workflow:complete', { executionId, response });
return response;
} catch (error: unknown) {
this.logger.error(`Workflow execution failed`, { executionId, error });
this.emit('workflow:error', { executionId, error });
return {
success: false,
error: getErrorMessage(error),
executionTime: Date.now() - startTime,
metadata: { pattern, executionId },
};
} finally {
this.activeWorkflows.delete(executionId);
}
}
async executeSequentialChain(request: ExecutionRequest): Promise<ExecutionResponse> {
return this.executePattern(WorkflowPatternType.SEQUENTIAL, request);
}
async executeParallelAgents(request: ExecutionRequest): Promise<ExecutionResponse> {
return this.executePattern(WorkflowPatternType.PARALLEL, request);
}
async executeAdaptiveWorkflow(request: ExecutionRequest): Promise<ExecutionResponse> {
return this.executePattern(WorkflowPatternType.ADAPTIVE, request);
}
async executeHierarchicalOrchestration(request: ExecutionRequest): Promise<ExecutionResponse> {
return this.executePattern(WorkflowPatternType.HIERARCHICAL, request);
}
async executeWithFeedback(request: ExecutionRequest): Promise<ExecutionResponse> {
return this.executePattern(WorkflowPatternType.FEEDBACK, request);
}
async executeIterativeRefinement(request: ExecutionRequest): Promise<ExecutionResponse> {
return this.executePattern(WorkflowPatternType.ITERATIVE, request);
}
async executeBranchingLogic(request: ExecutionRequest): Promise<ExecutionResponse> {
return this.executePattern(WorkflowPatternType.BRANCHING, request);
}
async executeStreamingWorkflow(request: ExecutionRequest): Promise<ExecutionResponse> {
return this.executePattern(WorkflowPatternType.STREAMING, request);
}
async executeWithMemory(request: ExecutionRequest): Promise<ExecutionResponse> {
return this.executePattern(WorkflowPatternType.MEMORY, request);
}
// Utility methods
private generateExecutionId(): string {
return `exec_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
}
private recordExecution(executionId: string, result: ExecutionResult): void {
const history = this.executionHistory.get(executionId) || [];
history.push(result);
this.executionHistory.set(executionId, history);
}
getExecutionHistory(executionId: string): ExecutionResult[] | undefined {
return this.executionHistory.get(executionId);
}
getActiveWorkflows(): WorkflowExecution[] {
return Array.from(this.activeWorkflows.values());
}
cancelWorkflow(executionId: string): boolean {
const workflow = this.activeWorkflows.get(executionId);
if (workflow) {
workflow.cancel();
this.activeWorkflows.delete(executionId);
this.emit('workflow:cancelled', { executionId });
return true;
}
return false;
}
}
// Workflow Execution Class
class WorkflowExecution {
constructor(
public id: string,
public pattern: WorkflowPatternType,
public request: ExecutionRequest,
public config: any,
public startTime: number = Date.now(),
public status: 'running' | 'completed' | 'failed' | 'cancelled' = 'running',
public progress: number = 0
) {}
updateProgress(progress: number): void {
this.progress = Math.min(100, Math.max(0, progress));
}
cancel(): void {
this.status = 'cancelled';
}
}
// Abstract Pattern Handler
abstract class PatternHandler {
constructor(protected modelClient: UnifiedModelClient) {}
abstract execute(
request: ExecutionRequest,
config: any,
onProgress: (progress: number) => void
): Promise<ExecutionResult>;
}
// Sequential Handler Implementation
class SequentialHandler extends PatternHandler {
async execute(
request: ExecutionRequest,
config: PromptChaining,
onProgress: (progress: number) => void
): Promise<ExecutionResult> {
const results: any[] = [];
const context = config?.context || new Map();
const steps = config?.steps || [];
for (let i = 0; i < steps.length; i++) {
const step = steps[i];
onProgress((i / steps.length) * 100);
// Check dependencies
if (step.dependencies) {
for (const dep of step.dependencies) {
if (!context.has(dep)) {
throw new Error(`Missing dependency: ${dep}`);
}
}
}
// Execute step
const stepRequest = {
...request,
prompt: this.interpolatePrompt(step.prompt, context),
voice: step.voice || request.voice,
};
const result = await this.modelClient.synthesize(stepRequest);
// Transform and store result - convert to JsonValue for compatibility
const jsonResult = modelResponseToJsonValue(result);
const transformed = step.transform ? step.transform(jsonResult) : jsonResult;
context.set(step.id, transformed);
results.push(transformed);
}
onProgress(100);
return {
success: true,
content: results,
metadata: {
stepsCompleted: steps.length,
context: Object.fromEntries(context),
},
};
}
private interpolatePrompt(prompt: string, context: Map<string, any>): string {
let interpolated = prompt;
context.forEach((value, key) => {
interpolated = interpolated.replace(`{{${key}}}`, JSON.stringify(value));
});
return interpolated;
}
}
// Parallel Handler Implementation
class ParallelHandler extends PatternHandler {
async execute(
request: ExecutionRequest,
config: ParallelProcessing,
onProgress: (progress: number) => void
): Promise<ExecutionResult> {
const tasks = config?.tasks || [];
const concurrency = config?.concurrency || 5;
const aggregation = config?.aggregation || 'merge';
const results = await this.executeWithConcurrency(tasks, concurrency, completed =>
onProgress((completed / tasks.length) * 100)
);
const aggregated = this.aggregateResults(results, aggregation);
return {
success: true,
content: aggregated,
metadata: {
tasksCompleted: tasks.length,
aggregationMethod: aggregation,
},
};
}
private async executeWithConcurrency(
tasks: ParallelTask[],
concurrency: number,
onTaskComplete: (completed: number) => void
): Promise<any[]> {
const results: any[] = new Array(tasks.length);
let completed = 0;
let index = 0;
const executeTask = async (taskIndex: number) => {
const task = tasks[taskIndex];
try {
const result = await this.modelClient.synthesize(task.request);
results[taskIndex] = result;
} catch (error: unknown) {
results[taskIndex] = { error: getErrorMessage(error) };
}
completed++;
onTaskComplete(completed);
};
const promises: Promise<void>[] = [];
while (index < tasks.length) {
while (promises.length < concurrency && index < tasks.length) {
promises.push(executeTask(index));
index++;
}
await Promise.race(promises);
promises.splice(
promises.findIndex(p => p),
1
);
}
await Promise.all(promises);
return results;
}
private aggregateResults(results: any[], method: string): any {
switch (method) {
case 'merge':
return results.flat();
case 'reduce':
return results.reduce((acc, curr) => ({ ...acc, ...curr }), {});
case 'select':
return results.find(r => r && !r.error) || results[0];
default:
return results;
}
}
}
// Full Hierarchical Handler Implementation
class HierarchicalHandler extends PatternHandler {
async execute(
request: ExecutionRequest,
config: MultiAgentOrchestration,
onProgress: (progress: number) => void
): Promise<ExecutionResult> {
const agents = config?.agents || [];
const coordinator = config?.coordinator || 'coordinator';
const protocol = config?.communicationProtocol || 'broadcast';
// Create agent hierarchy
const agentMap = new Map<string, AgentRole>();
agents.forEach(agent => agentMap.set(agent.id, agent));
// Sort agents by priority
const sortedAgents = agents.sort((a, b) => b.priority - a.priority);
// Initialize communication channel
const messageQueue = new MessageQueue(protocol);
const results = new Map<string, any>();
// Coordinator processes request first
onProgress(10);
const coordinatorRequest = {
...request,
prompt: `As coordinator, analyze and delegate: ${request.prompt}`,
metadata: { role: 'coordinator' },
};
const coordinatorResponse = await this.modelClient.synthesize(coordinatorRequest);
const delegationPlan = this.parseDelegationPlan(coordinatorResponse);
// Execute delegated tasks
let progress = 20;
for (const delegation of delegationPlan) {
const agent = agentMap.get(delegation.agentId);
if (!agent) continue;
onProgress(progress);
// Check if agent has required capabilities
if (!this.hasCapabilities(agent, delegation.requiredCapabilities)) {
results.set(agent.id, { error: 'Missing capabilities' });
continue;
}
// Execute agent task
const agentRequest = {
...request,
prompt: delegation.task,
voice: agent.type,
metadata: {
role: agent.type,
capabilities: agent.capabilities,
},
};
try {
const agentResult = await this.modelClient.synthesize(agentRequest);
results.set(agent.id, agentResult);
// Broadcast results if needed
if (protocol === 'broadcast') {
messageQueue.broadcast(agent.id, agentResult);
}
} catch (error: unknown) {
results.set(agent.id, { error: getErrorMessage(error) });
}
progress += 60 / delegationPlan.length;
}
// Coordinator aggregates results
onProgress(90);
const aggregationRequest = {
...request,
prompt: `Aggregate these agent results: ${JSON.stringify(Object.fromEntries(results))}`,
metadata: { role: 'coordinator', phase: 'aggregation' },
};
const finalResult = await this.modelClient.synthesize(aggregationRequest);
onProgress(100);
return {
success: true,
content: finalResult,
metadata: {
pattern: 'hierarchical',
agentsUsed: results.size,
protocol,
results: Object.fromEntries(results),
},
};
}
private parseDelegationPlan(
response: any
): Array<{ agentId: string; task: string; requiredCapabilities: string[] }> {
// Parse delegation plan from coordinator response
// In production, use structured output or JSON mode
const plan = [];
const content = response.content || response;
// Simple parsing - in production use proper NLP or structured output
const lines = content.split('\n');
for (const line of lines) {
if (line.includes('->')) {
const [agentPart, taskPart] = line.split('->');
const agentId = agentPart.trim().toLowerCase();
const task = taskPart.trim();
plan.push({
agentId,
task,
requiredCapabilities: [],
});
}
}
return plan.length > 0
? plan
: [{ agentId: 'default', task: 'Process request', requiredCapabilities: [] }];
}
private hasCapabilities(agent: AgentRole, required: string[]): boolean {
return required.every(cap => agent.capabilities.includes(cap));
}
}
// Message Queue for inter-agent communication
class MessageQueue {
private messages: Map<string, any[]> = new Map();
constructor(private protocol: string) {}
broadcast(from: string, message: any): void {
if (this.protocol === 'broadcast') {
// Store message for all agents
this.messages.set('broadcast', [
...(this.messages.get('broadcast') || []),
{ from, message },
]);
}
}
send(from: string, to: string, message: any): void {
if (this.protocol === 'direct') {
const key = `${from}->${to}`;
this.messages.set(key, [...(this.messages.get(key) || []), message]);
}
}
getMessages(agentId: string): any[] {
if (this.protocol === 'broadcast') {
return this.messages.get('broadcast') || [];
}
return [];
}
}
class AdaptiveHandler extends PatternHandler {
private executionHistory: any[] = [];
private currentWorkflow: WorkflowNode[] = [];
async execute(
request: ExecutionRequest,
config: DynamicWorkflow,
onProgress: (progress: number) => void
): Promise<ExecutionResult> {
const initialPlan = config?.initialPlan || [];
const adaptationRules = config?.adaptationRules || [];
const learningRate = config?.learningRate || 0.1;
this.currentWorkflow = [...initialPlan];
let currentNodeId = initialPlan[0]?.id || 'start';
const context = {
nodeId: currentNodeId,
request,
history: [],
metrics: {
successRate: 1.0,
avgLatency: 0,
adaptations: 0,
},
};
let progress = 0;
const results = [];
while (currentNodeId && currentNodeId !== 'end') {
const node = this.currentWorkflow.find(n => n.id === currentNodeId);
if (!node) break;
onProgress(progress);
// Execute current node
const nodeResult = await this.executeNode(node, request, context);
results.push(nodeResult);
(context.history as any[]).push(nodeResult);
// Check adaptation rules
for (const rule of adaptationRules) {
if (rule.trigger(context, this.executionHistory)) {
// Apply adaptation
rule.action({ initialPlan: this.currentWorkflow, adaptationRules, learningRate });
context.metrics.adaptations++;
// Re-evaluate current position
const adaptedNode = this.findBestNode(context, this.currentWorkflow);
if (adaptedNode) {
currentNodeId = adaptedNode.id;
context.nodeId = currentNodeId;
continue;
}
}
}
// Determine next node based on transitions
const nextNode = this.selectNextNode(node, context);
currentNodeId = nextNode?.id || 'end';
context.nodeId = currentNodeId;
// Update metrics
this.updateMetrics(context, nodeResult);
// Apply learning
this.applyLearning(node, nodeResult, learningRate);
progress = Math.min(90, progress + 90 / initialPlan.length);
}
onProgress(100);
return {
success: true,
content: results,
metadata: {
pattern: 'adaptive',
adaptations: context.metrics.adaptations,
nodesExecuted: results.length,
finalMetrics: context.metrics,
},
};
}
private async executeNode(
node: WorkflowNode,
request: ExecutionRequest,
context: any
): Promise<any> {
// Recursively execute workflow patterns
const orchestrator = new WorkflowOrchestrator(this.modelClient);
const nodeRequest = {
...request,
prompt: `${request.prompt} [Node: ${node.id}]`,
};
const result = await orchestrator.executePattern(node.type, nodeRequest, node.config);
return {
nodeId: node.id,
type: node.type,
result,
timestamp: Date.now(),
};
}
private selectNextNode(currentNode: WorkflowNode, context: any): WorkflowNode | null {
// Evaluate transitions
for (const transition of currentNode.transitions) {
if (transition.condition(context)) {
const nextNode = this.currentWorkflow.find(n => n.id === transition.to);
if (nextNode) return nextNode;
}
}
// Probabilistic selection if no conditions met
const validTransitions = currentNode.transitions.filter(t => t.probability);
if (validTransitions.length > 0) {
const random = Math.random();
let cumulative = 0;
for (const transition of validTransitions) {
cumulative += transition.probability || 0;
if (random <= cumulative) {
return this.currentWorkflow.find(n => n.id === transition.to) || null;
}
}
}
return null;
}
private findBestNode(context: any, workflow: WorkflowNode[]): WorkflowNode | null {
// Find best node based on current context
let bestNode = null;
let bestScore = -Infinity;
for (const node of workflow) {
const score = this.scoreNode(node, context);
if (score > bestScore) {
bestScore = score;
bestNode = node;
}
}
return bestNode;
}
private scoreNode(node: WorkflowNode, context: any): number {
// Score node based on context and history
let score = 0;
// Prefer unexplored nodes
const timesExecuted = context.history.filter((h: any) => h.nodeId === node.id).length;
score -= timesExecuted * 0.5;
// Consider success rate of similar nodes
const similarResults = this.executionHistory.filter(h => h.type === node.type);
if (similarResults.length > 0) {
const successRate = similarResults.filter(r => r.success).length / similarResults.length;
score += successRate * 2;
}
return score;
}
private updateMetrics(context: any, result: any): void {
const latency = result.result?.executionTime || 0;
const n = context.history.length;
context.metrics.avgLatency = (context.metrics.avgLatency * (n - 1) + latency) / n;
if (result.result?.success) {
context.metrics.successRate = (context.metrics.successRate * (n - 1) + 1) / n;
} else {
context.metrics.successRate = (context.metrics.successRate * (n - 1)) / n;
}
}
private applyLearning(node: WorkflowNode, result: any, learningRate: number): void {
// Update transition probabilities based on success
if (result.result?.success) {
for (const transition of node.transitions) {
if (transition.probability) {
transition.probability = Math.min(1, transition.probability * (1 + learningRate));
}
}
}
// Store in history for future adaptations
this.executionHistory.push({
nodeId: node.id,
type: node.type,
success: result.result?.success,
timestamp: Date.now(),
});
}
}
class FeedbackHandler extends PatternHandler {
private feedbackStore: Map<string, any> = new Map();
async execute(
request: ExecutionRequest,
config: HumanInTheLoop,
onProgress: (progress: number) => void
): Promise<ExecutionResult> {
const checkpoints = config?.checkpoints || [];
const escalationPolicy = config?.escalationPolicy || {
maxRetries: 3,
escalationPath: ['user', 'supervisor', 'admin'],
fallbackBehavior: 'default',
};
const feedbackConfig = config?.feedbackLoop || {
collectMetrics: true,
learningEnabled: true,
persistFeedback: true,
};
let result = await this.modelClient.synthesize(request);
let jsonResult = modelResponseToJsonValue(result);
let finalResult = result;
const checkpointsPassed = [];
const feedbackCollected = [];
let retryCount = 0;
onProgress(20);
// Check each checkpoint
for (let i = 0; i < checkpoints.length; i++) {
const checkpoint = checkpoints[i];
onProgress(20 + (i / checkpoints.length) * 60);
if (checkpoint.condition(jsonResult)) {
// Checkpoint triggered - request human feedback
const feedback = await this.requestHumanFeedback(
checkpoint,
result,
escalationPolicy,
retryCount
);
if (feedback.action === 'approve') {
checkpointsPassed.push(checkpoint.id);
continue;
} else if (feedback.action === 'reject') {
// Retry with feedback
if (retryCount < escalationPolicy.maxRetries) {
const improvedRequest = {
...request,
prompt: `${request.prompt}\n\nFeedback: ${feedback.message}\nPlease improve based on this feedback.`,
};
result = await this.modelClient.synthesize(improvedRequest);
jsonResult = modelResponseToJsonValue(result);
retryCount++;
i--; // Re-check same checkpoint
} else {
// Max retries reached - escalate or fallback
result = await this.handleEscalation(request, result, escalationPolicy, feedback);
jsonResult = modelResponseToJsonValue(result);
}
} else if (feedback.action === 'modify') {
// Direct modification
result = {
...result,
content: feedback.modifiedContent || result.content,
};
}
feedbackCollected.push(feedback);
// Learn from feedback if enabled
if (feedbackConfig.learningEnabled) {
await this.learnFromFeedback(checkpoint, feedback, result);
}
}
}
// Collect final metrics
if (feedbackConfig.collectMetrics) {
const metrics = this.collectMetrics(feedbackCollected, checkpointsPassed);
if (feedbackConfig.persistFeedback) {
this.persistFeedback(request, feedbackCollected, metrics);
}
finalResult = {
...result,
metadata: {
tokens: result.metadata?.tokens || 0,
latency: result.metadata?.latency || 0,
quality: result.metadata?.quality,
feedbackMetrics: metrics,
} as any,
};
}
onProgress(100);
return {
success: true,
content: finalResult,
metadata: {
pattern: 'feedback',
checkpointsPassed: checkpointsPassed.length,
feedbackCollected: feedbackCollected.length,
retries: retryCount,
metrics: feedbackConfig.collectMetrics
? this.collectMetrics(feedbackCollected, checkpointsPassed)
: undefined,
},
};
}
private async requestHumanFeedback(
checkpoint: Checkpoint,
result: any,
policy: EscalationPolicy,
retryCount: number
): Promise<any> {
// In production, this would integrate with a UI or messaging system
// For now, simulate feedback based on quality
const quality = result.metadata?.quality || Math.random();
// Simulate timeout
await new Promise(resolve => setTimeout(resolve, Math.min(checkpoint.timeout, 100)));
if (quality > 0.8) {
return {
action: 'approve',
message: 'Quality threshold met',
timestamp: Date.now(),
};
} else if (quality > 0.5) {
return {
action: 'modify',
message: 'Minor improvements needed',
modifiedContent: `${result.content} [Improved]`,
timestamp: Date.now(),
};
} else {
return {
action: 'reject',
message: 'Significant improvements required',
suggestions: ['Add more detail', 'Improve clarity', 'Check accuracy'],
timestamp: Date.now(),
};
}
}
private async handleEscalation(
request: ExecutionRequest,
result: any,
policy: EscalationPolicy,
feedback: any
): Promise<any> {
// Escalate through the chain
for (const escalationLevel of policy.escalationPath) {
const escalatedFeedback = await this.requestEscalatedFeedback(
escalationLevel,
request,
result,
feedback
);
if (escalatedFeedback.resolution) {
return escalatedFeedback.result;
}
}
// Apply fallback behavior
switch (policy.fallbackBehavior) {
case 'skip':
return result;
case 'default':
return { ...result, content: 'Default response due to escalation failure' };
case 'abort':
throw new Error('Escalation failed - aborting');
default:
return result;
}
}
private async requestEscalatedFeedback(
level: string,
request: ExecutionRequest,
result: any,
feedback: any
): Promise<any> {
// Simulate escalated feedback
return {
resolution: Math.random() > 0.5,
result: {
...result,
content: `${result.content} [Resolved by ${level}]`,
},
};
}
private async learnFromFeedback(
checkpoint: Checkpoint,
feedback: any,
result: any
): Promise<void> {
// Store feedback for learning
const key = `${checkpoint.id}_feedback`;
const history = this.feedbackStore.get(key) || [];
history.push({
feedback,
result,
timestamp: Date.now(),
});
this.feedbackStore.set(key, history);
// Adjust checkpoint conditions based on feedback patterns
// This would be more sophisticated in production
}
private collectMetrics(feedbackCollected: any[], checkpointsPassed: string[]): any {
return {
totalFeedback: feedbackCollected.length,
approvalRate:
feedbackCollected.filter(f => f.action === 'approve').length / feedbackCollected.length,
modificationRate:
feedbackCollected.filter(f => f.action === 'modify').length / feedbackCollected.length,
rejectionRate:
feedbackCollected.filter(f => f.action === 'reject').length / feedbackCollected.length,
checkpointsPassedRate:
checkpointsPassed.length / (checkpointsPassed.length + feedbackCollected.length),
avgResponseTime:
feedbackCollected.reduce((sum, f) => sum + (f.timestamp || 0), 0) /
feedbackCollected.length,
};
}
private persistFeedback(request: ExecutionRequest, feedback: any[], metrics: any): void {
// Store feedback for future analysis
const sessionId = `session_${Date.now()}`;
this.feedbackStore.set(sessionId, {
request,
feedback,
metrics,
timestamp: Date.now(),
});
}
}
class IterativeHandler extends PatternHandler {
async execute(
request: ExecutionRequest,
config: ReflectiveRefinement,
onProgress: (progress: number) => void
): Promise<ExecutionResult> {
const maxIterations = config?.maxIterations || 3;
const qualityThreshold = config?.qualityThreshold || 0.85;
const strategy = config?.refinementStrategy || 'improve';
const criteria = config?.evaluationCriteria || [
{ name: 'accuracy', weight: 0.4, evaluate: r => r.metadata?.accuracy || 0.7 },
{ name: 'completeness', weight: 0.3, evaluate: r => r.metadata?.completeness || 0.7 },
{ name: 'clarity', weight: 0.3, evaluate: r => r.metadata?.clarity || 0.7 },
];
let currentResult = await this.modelClient.synthesize(request);
let bestResult = currentResult;
let bestQuality = this.evaluateQuality(currentResult, criteria);
const iterations = [];
onProgress(20);
for (let i = 0; i < maxIterations; i++) {
const currentQuality = this.evaluateQuality(currentResult, criteria);
iterations.push({
iteration: i + 1,
quality: currentQuality,
result: currentResult,
});
// Check if quality threshold is met
if (currentQuality >= qualityThreshold) {
bestResult = currentResult;
bestQuality = currentQuality;
break;
}
onProgress(20 + (i / maxIterations) * 60);
// Apply refinement strategy
const refinementRequest = this.createRefinementRequest(
request,
currentResult,
strategy,
criteria,
currentQuality
);
const refinedResult = await this.modelClient.synthesize(refinementRequest);
const refinedQuality = this.evaluateQuality(refinedResult, criteria);
// Keep best result
if (refinedQuality > bestQuality) {
bestResult = refinedResult;
bestQuality = refinedQuality;
}
// Decide whether to continue from refined or try alternative approach
if (refinedQuality > currentQuality || strategy === 'regenerate') {
currentResult = refinedResult;
} else if (strategy === 'critique') {
// Get critique and incorporate
const critiqueRequest = {
...request,
prompt: `Critique this response and suggest improvements: ${currentResult.content}`,
};
const critique = await this.modelClient.synthesize(critiqueRequest);
currentResult = {
...currentResult,
content: `${currentResult.content}\n\nCritique: ${critique.content}`,
};
}
}
// Final evaluation
const finalEvaluation = this.performFinalEvaluation(bestResult, criteria, iterations);
onProgress(100);
return {
success: bestQuality >= qualityThreshold,
content: bestResult,
metadata: {
pattern: 'iterative',
iterations: iterations.length,
finalQuality: bestQuality,
qualityThreshold,
strategy,
evaluation: finalEvaluation,
improvements: this.calculateImprovements(iterations),
},
};
}
private evaluateQuality(result: any, criteria: EvaluationCriteria[]): number {
let totalScore = 0;
let totalWeight = 0;
for (const criterion of criteria) {
const score = criterion.evaluate(result);
totalScore += score * criterion.weight;
totalWeight += criterion.weight;
}
return totalWeight > 0 ? totalScore / totalWeight : 0;
}
private createRefinementRequest(
originalRequest: ExecutionRequest,
currentResult: any,
strategy: string,
criteria: EvaluationCriteria[],
currentQuality: number
): ExecutionRequest {
const weakestCriterion = this.findWeakestCriterion(currentResult, criteria);
let refinementPrompt = originalRequest.prompt;
switch (strategy) {
case 'improve':
refinementPrompt = `Improve this response, especially regarding ${weakestCriterion.name}:\n\nOriginal: ${currentResult.content}\n\nFocus on: ${weakestCriterion.name}`;
break;
case 'critique':
refinementPrompt = `Critically analyze and rewrite:\n\n${currentResult.content}\n\nCurrent quality: ${currentQuality}. Must exceed: ${currentQuality + 0.1}`;
break;
case 'regenerate':
refinementPrompt = `${originalRequest.prompt}\n\nPrevious attempt (quality: ${currentQuality}) needs improvement in ${weakestCriterion.name}. Generate a better response.`;
break;
}
return {
...originalRequest,
prompt: refinementPrompt,
metadata: {
...originalRequest.metadata,
refinementIteration: true,
targetImprovement: weakestCriterion.name,
},
};
}
private findWeakestCriterion(result: any, criteria: EvaluationCriteria[]): EvaluationCriteria {
let weakest = criteria[0];
let weakestScore = weakest.evaluate(result);
for (const criterion of criteria) {
const score = criterion.evaluate(result);
if (score < weakestScore) {
weakest = criterion;
weakestScore = score;
}
}
return weakest;
}
private performFinalEvaluation(
result: any,
criteria: EvaluationCriteria[],
iterations: any[]
): any {
return {
criteria: criteria.map(c => ({
name: c.name,
weight: c.weight,
score: c.evaluate(result),
})),
convergence: this.calculateConvergence(iterations),
efficiency: iterations.length > 0 ? iterations[0].quality / iterations.length : 0,
};
}
private calculateConvergence(iterations: any[]): number {
if (iterations.length < 2) return 1;
const improvements = [];
for (let i = 1; i < iterations.length; i++) {
improvements.push(iterations[i].quality - iterations[i - 1].quality);
}
const avgImprovement = improvements.reduce((a, b) => a + b, 0) / improvements.length;
return Math.max(0, Math.min(1, avgImprovement * 10));
}
private calculateImprovements(iterations: any[]): any[] {
const improvements = [];
for (let i = 1; i < iterations.length; i++) {
improvements.push({
from: i,
to: i + 1,
qualityDelta: iterations[i].quality - iterations[i - 1].quality,
percentImprovement:
((iterations[i].quality - iterations[i - 1].quality) / iterations[i - 1].quality) * 100,
});
}
return improvements;
}
}
class BranchingHandler extends PatternHandler {
async execute(
request: ExecutionRequest,
config: ConditionalExecution,
onProgress: (progress: number) => void
): Promise<ExecutionResult> {
const branches = config?.branches || [];
const defaultBranch = config?.defaultBranch;
const evaluationOrder = config?.evaluationOrder || 'sequential';
const context = {
request,
timestamp: Date.now(),
environment: process.env,
previousResults: [],
};
onProgress(10);
// Sort branches by priority
const sortedBranches = branches.sort((a, b) => b.priority - a.priority);
let selectedBranch = null;
let evaluationResults = [];
if (evaluationOrder === 'sequential') {
// Evaluate branches sequentially
for (const branch of sortedBranches) {
onProgress(10 + (sortedBranches.indexOf(branch) / sortedBranches.length) * 30);
const evaluation = await this.evaluateBranch(branch, context);
evaluationResults.push(evaluation);
if (evaluation.matches) {
selectedBranch = branch;
break;
}
}
} else {
// Evaluate branches in parallel
const evaluations = await Promise.all(
sortedBranches.map(branch => this.evaluateBranch(branch, context))
);
evaluationResults = evaluations;
// Find first matching branch (already sorted by priority)
for (let i = 0; i < sortedBranches.length; i++) {
if (evaluations[i].matches) {
selectedBranch = sortedBranches[i];
break;
}
}
onProgress(40);
}
// Use default branch if no conditions matched
if (!selectedBranch && defaultBranch) {
selectedBranch = branches.find(b => b.id === defaultBranch);
}
if (!selectedBranch) {
return {
success: false,
content: 'No branch conditions matched and no default branch specified',
metadata: {
pattern: 'branching',
evaluationResults,
},
};
}
onProgress(50);
// Execute selected branch workflow
const branchResult = await this.executeBranchWorkflow(
selectedBranch.workflow,
request,
progress => onProgress(50 + progress * 0.4)
);
onProgress(100);
return {
success: true,
content: branchResult,
metadata: {
pattern: 'branching',
selectedBranch: selectedBranch.id,
evaluationOrder,
evaluationResults,
branchMetadata: branchResult.metadata,
},
};
}
private async evaluateBranch(
branch: Branch,
context: any
): Promise<{ branchId: string; matches: boolean; evaluation: any }> {
try {
// Evaluate condition
const matches = branch.condition(context);
// For complex conditions, we might want to use the model
if (typeof branch.condition === 'string') {
const evaluationRequest = {
prompt: `Evaluate if this condition is true: ${branch.condition}\nContext: ${JSON.stringify(context)}`,
maxTokens: 10,
};
const result = await this.modelClient.synthesize(evaluationRequest);
const evaluation = result.content.toLowerCase().includes('true');
return {
branchId: branch.id,
matches: evaluation,
evaluation: result.content,
};
}
return {
branchId: branch.id,
matches,
evaluation: matches ? 'Condition met' : 'Condition not met',
};
} catch (error: unknown) {
return {
branchId: branch.id,
matches: false,
evaluation: `Error: ${getErrorMessage(error)}`,
};
}
}
private async executeBranchWorkflow(
workflow: WorkflowNode,
request: ExecutionRequest,
onProgress: (progress: number) => void
): Promise<any> {
// Execute the workflow associated with the branch
const orchestrator = new WorkflowOrchestrator(this.modelClient);
return await orchestrator.executePattern(workflow.type, request, workflow.config);
}
}
class StreamingHandler extends PatternHandler {
private streamBuffer: any[] = [];
private processedChunks: any[] = [];
async execute(
request: ExecutionRequest,
config: RealTimeProcessing,
onProgress: (progress: number) => void
): Promise<ExecutionResult> {
const streamingEnabled = config?.streamingEnabled !== false;
const chunkSize = config?.chunkSize || 100;
const bufferStrategy = config?.bufferStrategy || 'sliding';
const onChunk = config?.onChunk || (chunk => {});
if (!streamingEnabled) {
// Non-streaming execution
const result = await this.modelClient.synthesize(request);
onProgress(100);
return {
success: true,
content: result,
metadata: { pattern: 'streaming', streamed: false },
};
}
// Initialize buffer based on strategy
const buffer = this.createBuffer(bufferStrategy, chunkSize * 10);
// Create streaming request
const streamRequest = {
...request,
stream: true,
};
onProgress(10);
try {
// Get stream from model
const streamResponse = await this.modelClient.streamRequest(
streamRequest,
token => {
// Process streaming token
if (token.content) {
buffer.add(token.content);
}
},
{ workingDirectory: '.', config: {}, files: [] }
);
let totalChunks = 0;
let accumulatedContent = '';
const startTime = Date.now();
// The streaming is handled by the onToken callback above
// streamResponse contains the final result
totalChunks = 1;
accumulatedContent = streamResponse.content || '';
// Process final result
const processed = await this.processChunk(accumulatedContent, buffer, config);
this.processedChunks.push(processed);
// Call chunk handler
onChunk(processed);
// Update progress
const estimatedProgress = Math.min(90, 10 + totalChunks * 2);
onProgress(estimatedProgress);
// Apply backpressure if needed
if (buffer.isFull()) {
await this.applyBackpressure(buffer);
}
// Final processing
const finalResult = await this.finalizeStream(
accumulatedContent,
this.processedChunks,
buffer
);
onProgress(100);
return {
success: true,
content: finalResult,
metadata: {
pattern: 'streaming',
streamed: true,
totalChunks,
bufferStrategy,
processingTime: Date.now() - startTime,
averageChunkSize: accumulatedContent.length / totalChunks,
bufferStats: buffer.getStats(),
},
};
} catch (error: unknown) {
return {
success: false,
content: `Streaming error: ${getErrorMessage(error)}`,
metadata: {
pattern: 'streaming',
error: getErrorMessage(error),
chunksProcessed: this.processedChunks.length,
},
};
}
}
private createBuffer(strategy: string, maxSize: number): StreamBuffer {
switch (strategy) {
case 'window':
return new WindowBuffer(maxSize);
case 'sliding':
return new SlidingBuffer(maxSize);
case 'circular':
return new CircularBuffer(maxSize);
default:
return new SlidingBuffer(maxSize);
}
}
private async processChunk(
chunk: any,
buffer: StreamBuffer,
config: RealTimeProcessing
): Promise<any> {
// Process individual chunk
const processed = {
content: chunk.text || chunk.content || chunk,
timestamp: Date.now(),
size: JSON.stringify(chunk).length,
metadata: chunk.metadata || {},
};
// Apply any transformations
if (config.transform) {
processed.content = config.transform(processed.content);
}
return processed;
}
private async applyBackpressure(buffer: StreamBuffer): Promise<void> {
// Wait for buffer to have space
const waitTime = Math.min(100, buffer.size() / 10);
await new Promise(resolve => setTimeout(resolve, waitTime));
// Process some items from buffer
const toProcess = Math.min(10, buffer.size() / 4);
for (let i = 0; i < toProcess; i