@stackmemoryai/stackmemory
Version:
Lossless, project-scoped memory for AI coding tools. Durable context across sessions with 56 MCP tools, FTS5 search, conductor orchestrator, loop/watch monitoring, snapshot capture, pre-flight overlap checks, Claude/Codex/OpenCode wrappers, Linear sync, a
609 lines (608 loc) • 21.4 kB
JavaScript
import { fileURLToPath as __fileURLToPath } from 'url';
import { dirname as __pathDirname } from 'path';
const __filename = __fileURLToPath(import.meta.url);
const __dirname = __pathDirname(__filename);
import * as fs from "fs";
import * as path from "path";
import { logger } from "../core/monitoring/logger.js";
import { ParallelExecutor } from "../core/execution/parallel-executor.js";
import { RecursiveContextManager } from "../core/context/recursive-context-manager.js";
import { ClaudeCodeSubagentClient } from "../integrations/claude-code/subagent-client.js";
import { STRUCTURED_RESPONSE_SUFFIX } from "../orchestrators/multimodal/constants.js";
class RecursiveAgentOrchestrator {
frameManager;
contextRetriever;
taskStore;
parallelExecutor;
contextManager;
subagentClient;
// Subagent configurations
subagentConfigs;
// Execution tracking
activeExecutions = /* @__PURE__ */ new Map();
executionHistory = [];
// Default options
defaultOptions = {
maxParallel: 5,
maxRecursionDepth: 4,
maxTokensPerAgent: 3e4,
maxTotalCost: 50,
// Quality over cost
timeoutPerAgent: 300,
retryFailedAgents: true,
shareContextRealtime: true,
testGenerationMode: "all",
reviewStages: 3,
// Multi-stage review
qualityThreshold: 0.85,
verboseLogging: true
// Full transparency
};
constructor(frameManager, dualStackManager, contextRetriever, taskStore) {
this.frameManager = frameManager;
this.contextRetriever = contextRetriever;
this.taskStore = taskStore;
this.parallelExecutor = new ParallelExecutor(
this.defaultOptions.maxParallel
);
this.contextManager = new RecursiveContextManager(
dualStackManager,
contextRetriever
);
this.subagentClient = new ClaudeCodeSubagentClient();
this.subagentConfigs = this.initializeSubagentConfigs();
logger.info("RLM Orchestrator initialized", {
maxParallel: this.defaultOptions.maxParallel,
maxRecursion: this.defaultOptions.maxRecursionDepth,
reviewStages: this.defaultOptions.reviewStages
});
}
/**
* Initialize subagent configurations with specialized prompts
*/
initializeSubagentConfigs() {
const configs = /* @__PURE__ */ new Map();
configs.set("planning", {
type: "planning",
model: "claude-sonnet-4-5-20250929",
maxTokens: 2e4,
temperature: 0.3,
systemPrompt: `You decompose tasks into parallel/sequential subtask trees.
Output JSON: { subtasks: [{ id, description, agent, dependencies[], parallel: bool }] }
Rules:
- Maximize parallelism \u2014 independent tasks run concurrently
- Each subtask names its agent type: planning, code, testing, linting, review, improve, context, publish
- Include failure modes and rollback steps for risky operations
- Keep subtask descriptions actionable (verb + object + constraint)` + STRUCTURED_RESPONSE_SUFFIX,
capabilities: ["decompose", "analyze", "strategize", "prioritize"]
});
configs.set("code", {
type: "code",
model: "claude-sonnet-4-5-20250929",
maxTokens: 3e4,
temperature: 0.2,
systemPrompt: `You implement code changes. Read existing code before modifying.
Output JSON: { success: bool, filesChanged: string[], changes: string[], notes: string[] }
Rules:
- Follow existing project conventions (naming, imports, patterns)
- Add .js extensions to relative TypeScript imports (ESM)
- Return undefined over throwing; log+continue over crash
- No emojis, no unnecessary comments, functions under 20 lines
- Validate inputs at system boundaries only` + STRUCTURED_RESPONSE_SUFFIX,
capabilities: ["implement", "refactor", "optimize", "document"]
});
configs.set("testing", {
type: "testing",
model: "claude-sonnet-4-5-20250929",
maxTokens: 25e3,
temperature: 0.1,
systemPrompt: `You generate and run tests using the project's test framework.
Output JSON: { success: bool, tests: [{ name, type, file }], coverage: string, notes: string[] }
Rules:
- Use vitest (describe/it/expect) \u2014 check existing tests for patterns
- Prioritize: critical paths > edge cases > happy paths
- Each test should assert meaningful behavior, not implementation details
- Use parameterized tests (it.each) to consolidate similar cases
- Run tests after writing: npm run test:run` + STRUCTURED_RESPONSE_SUFFIX,
capabilities: [
"generate-tests",
"validate",
"coverage-analysis",
"test-execution"
]
});
configs.set("linting", {
type: "linting",
model: "claude-haiku-4-5-20251001",
maxTokens: 15e3,
temperature: 0,
systemPrompt: `You run lint checks and fix issues.
Output JSON: { success: bool, issuesFound: number, issuesFixed: number, remaining: string[] }
Rules:
- Run: npm run lint (ESLint + Prettier)
- Auto-fix: npm run lint:fix
- ESM imports require .js extension on relative paths
- Report unfixable issues with file:line format` + STRUCTURED_RESPONSE_SUFFIX,
capabilities: ["lint", "format", "type-check", "security-scan"]
});
configs.set("review", {
type: "review",
model: "claude-sonnet-4-5-20250929",
maxTokens: 25e3,
temperature: 0.2,
systemPrompt: `You review code changes for quality, security, and correctness.
Output JSON: { qualityScore: 0-1, issues: [{ severity, file, line, description, suggestion }], approved: bool }
Rules:
- Score 0.85+ = approved, below = needs improvement
- Flag: SQL injection, XSS, secret exposure, command injection
- Flag: functions > 20 lines, cyclomatic complexity > 5
- Flag: missing error handling at system boundaries
- Suggest specific fixes, not vague improvements` + STRUCTURED_RESPONSE_SUFFIX,
capabilities: [
"review",
"critique",
"suggest-improvements",
"quality-scoring"
]
});
configs.set("improve", {
type: "improve",
model: "claude-sonnet-4-5-20250929",
maxTokens: 3e4,
temperature: 0.3,
systemPrompt: `You implement review feedback and improve code quality.
Output JSON: { success: bool, improvements: string[], filesChanged: string[] }
Rules:
- Apply only the specific improvements requested \u2014 no scope creep
- Maintain backward compatibility unless explicitly breaking
- Run lint + tests after changes to verify nothing regressed
- Keep changes minimal and focused` + STRUCTURED_RESPONSE_SUFFIX,
capabilities: ["enhance", "refactor", "optimize", "polish"]
});
configs.set("context", {
type: "context",
model: "claude-haiku-4-5-20251001",
maxTokens: 1e4,
temperature: 0,
systemPrompt: `You retrieve relevant context from the codebase and specs.
Output JSON: { context: string, sources: string[], relevanceScore: 0-1 }
Rules:
- Check docs/specs/ for ONE_PAGER.md, DEV_SPEC.md, PROMPT_PLAN.md
- Check CLAUDE.md and AGENTS.md for project conventions
- Search src/ for relevant implementations
- Return concise summaries, not full file contents` + STRUCTURED_RESPONSE_SUFFIX,
capabilities: ["search", "retrieve", "summarize", "contextualize"]
});
configs.set("publish", {
type: "publish",
model: "claude-haiku-4-5-20251001",
maxTokens: 15e3,
temperature: 0,
systemPrompt: `You handle releases and publishing.
Output JSON: { success: bool, version: string, actions: string[] }
Rules:
- Verify lint + tests + build pass before any publish
- Follow semver: breaking=major, feature=minor, fix=patch
- Generate changelog from git log since last tag
- Never force-push or skip pre-publish hooks` + STRUCTURED_RESPONSE_SUFFIX,
capabilities: ["publish-npm", "github-release", "deploy", "document"]
});
return configs;
}
/**
* Execute a task with recursive decomposition
*/
async execute(task, context, options) {
const opts = { ...this.defaultOptions, ...options };
const executionId = this.generateExecutionId();
const startTime = Date.now();
logger.info("Starting RLM execution", {
executionId,
task: task.slice(0, 100),
options: opts
});
try {
const rootFrameId = await this.createExecutionFrame(executionId, task);
const rootNode = await this.planTask(task, context, opts);
this.activeExecutions.set(executionId, rootNode);
if (opts.verboseLogging) {
this.logExecutionTree(rootNode);
}
await this.executeTaskTree(rootNode, context, opts, 0);
const improvements = await this.performMultiStageReview(
rootNode,
opts.reviewStages,
opts.qualityThreshold
);
const result = {
success: rootNode.status === "completed",
rootNode,
totalTokens: this.calculateTotalTokens(rootNode),
totalCost: this.calculateTotalCost(rootNode),
duration: Date.now() - startTime,
improvements,
testsGenerated: this.countGeneratedTests(rootNode),
issuesFound: this.countIssuesFound(rootNode),
issuesFixed: this.countIssuesFixed(rootNode)
};
this.executionHistory.push(result);
await this.updateExecutionFrame(rootFrameId, result);
logger.info("RLM execution completed", {
executionId,
success: result.success,
duration: result.duration,
totalCost: result.totalCost,
testsGenerated: result.testsGenerated,
improvements: improvements.length
});
return result;
} catch (error) {
logger.error("RLM execution failed", { executionId, error });
throw error;
} finally {
this.activeExecutions.delete(executionId);
}
}
/**
* Plan task decomposition
*/
async planTask(task, context, options) {
const response = await this.subagentClient.executeSubagent({
type: "planning",
task,
context: {
...context,
requirements: options
}
});
const taskTree = this.parseTaskTree(JSON.stringify(response.result));
this.injectTestGenerationNodes(taskTree, options.testGenerationMode);
this.injectReviewStages(taskTree, options.reviewStages);
return taskTree;
}
/**
* Execute task tree recursively with parallelization
*/
async executeTaskTree(node, context, options, depth) {
if (depth >= options.maxRecursionDepth) {
logger.warn("Max recursion depth reached", { nodeId: node.id, depth });
node.status = "failed";
node.error = new Error("Max recursion depth exceeded");
return;
}
if (options.verboseLogging) {
logger.info(`Executing node: ${node.description}`, {
id: node.id,
type: node.type,
agent: node.agent,
depth
});
}
node.status = "running";
node.startTime = /* @__PURE__ */ new Date();
try {
if (node.type === "parallel" && node.children) {
await this.parallelExecutor.executeParallel(
node.children,
async (child) => {
await this.executeTaskTree(child, context, options, depth + 1);
}
);
} else if (node.type === "sequential" && node.children) {
for (const child of node.children) {
await this.executeTaskTree(child, context, options, depth + 1);
if (child.result) {
context[`${child.id}_result`] = child.result;
}
}
} else {
await this.executeLeafNode(node, context, options);
}
node.status = "completed";
} catch (error) {
logger.error(`Node execution failed: ${node.description}`, { error });
if (options.retryFailedAgents && node.attempts < 3) {
node.attempts++;
logger.info(`Retrying node: ${node.description}`, {
attempt: node.attempts
});
await this.executeTaskTree(node, context, options, depth);
} else {
node.status = "failed";
node.error = error;
}
} finally {
node.endTime = /* @__PURE__ */ new Date();
if (options.verboseLogging) {
const duration = node.endTime.getTime() - (node.startTime?.getTime() ?? 0);
logger.info(`Completed node: ${node.description}`, {
id: node.id,
status: node.status,
duration,
tokens: node.tokens,
cost: node.cost
});
}
}
}
/**
* Execute a leaf node with the appropriate agent
*/
async executeLeafNode(node, context, options) {
const agentConfig = this.subagentConfigs.get(node.agent);
if (!agentConfig) {
throw new Error(`Unknown agent type: ${node.agent}`);
}
const agentContext = await this.contextManager.prepareAgentContext(
node.agent,
context,
options.maxTokensPerAgent
);
const taskDescription = this.buildAgentPrompt(node, agentContext);
const response = await this.subagentClient.executeSubagent({
type: node.agent,
task: taskDescription,
context: agentContext
});
node.result = response.result;
node.tokens = response.tokens || this.estimateTokens(JSON.stringify(response));
node.cost = this.calculateNodeCost(node.tokens, agentConfig.model);
if (options.shareContextRealtime) {
await this.shareAgentResults(node);
}
}
/**
* Perform multi-stage review and improvement
*/
async performMultiStageReview(rootNode, stages, qualityThreshold) {
const improvements = [];
let currentQuality = 0;
for (let stage = 1; stage <= stages; stage++) {
logger.info(`Starting review stage ${stage}/${stages}`);
const reviewNode = {
id: `review-stage-${stage}`,
type: "task",
description: `Review stage ${stage}`,
agent: "review",
dependencies: [],
context: { rootNode, stage },
status: "pending",
attempts: 0
};
const reviewResponse = await this.subagentClient.executeSubagent({
type: "review",
task: `Review stage ${stage}: Analyze code quality and suggest improvements`,
context: { rootNode, stage }
});
reviewNode.result = reviewResponse.result;
reviewNode.status = reviewResponse.success ? "completed" : "failed";
const reviewResult = reviewResponse.result;
currentQuality = reviewResult.quality || 0.5;
if (reviewResult.suggestions && Array.isArray(reviewResult.suggestions)) {
improvements.push(...reviewResult.suggestions);
} else {
improvements.push(
`Stage ${stage}: Review completed with quality ${currentQuality}`
);
}
logger.info(`Review stage ${stage} complete`, {
quality: currentQuality,
issues: reviewResult.issues?.length || 0,
suggestions: reviewResult.suggestions?.length || 0
});
if (currentQuality >= qualityThreshold) {
logger.info(
`Quality threshold met: ${currentQuality} >= ${qualityThreshold}`
);
break;
}
if (stage < stages) {
const improveNode = {
id: `improve-stage-${stage}`,
type: "task",
description: `Improvement stage ${stage}`,
agent: "improve",
dependencies: [reviewNode.id],
context: { reviewResult, rootNode },
status: "pending",
attempts: 0
};
const improveResponse = await this.subagentClient.executeSubagent({
type: "improve",
task: `Improvement stage ${stage}: Implement suggested improvements`,
context: { reviewResult, rootNode }
});
improveNode.result = improveResponse.result;
improveNode.status = improveResponse.success ? "completed" : "failed";
this.applyImprovements(rootNode, improveNode.result);
}
}
return improvements;
}
/**
* Helper methods
*/
generateExecutionId() {
return `rlm-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
}
async createExecutionFrame(executionId, task) {
return this.frameManager.createFrame({
name: `RLM: ${task.slice(0, 50)}`,
type: "task",
inputs: { executionId, task, type: "rlm-execution" }
});
}
async updateExecutionFrame(frameId, result) {
this.frameManager.closeFrame(frameId, {
type: "rlm-result",
content: JSON.stringify(result, null, 2),
success: result.success,
duration: result.duration,
totalTokens: result.totalTokens,
totalCost: result.totalCost
});
}
logExecutionTree(node, depth = 0) {
const indent = " ".repeat(depth);
const status = node.status === "completed" ? "\u2713" : node.status === "failed" ? "\u2717" : node.status === "running" ? "\u27F3" : "\u25CB";
console.log(`${indent}${status} ${node.description} [${node.agent}]`);
if (node.children) {
for (const child of node.children) {
this.logExecutionTree(child, depth + 1);
}
}
}
parseTaskTree(_response) {
return {
id: "root",
type: "sequential",
description: "Root task",
agent: "planning",
dependencies: [],
context: {},
status: "pending",
attempts: 0,
children: []
};
}
injectTestGenerationNodes(node, _mode) {
if (!node.children) {
node.children = [];
}
const testNode = {
id: `${node.id}-test`,
type: "task",
description: `Generate ${_mode} tests for ${node.description}`,
agent: "testing",
dependencies: [node.id],
context: { testMode: _mode },
status: "pending",
attempts: 0
};
node.children.push(testNode);
}
injectReviewStages(_node, _stages) {
}
loadSpecContext() {
const specDir = path.join(process.cwd(), "docs", "specs");
if (!fs.existsSync(specDir)) return "";
const specFiles = ["ONE_PAGER.md", "DEV_SPEC.md", "PROMPT_PLAN.md"];
const sections = [];
for (const file of specFiles) {
const filePath = path.join(specDir, file);
if (fs.existsSync(filePath)) {
const content = fs.readFileSync(filePath, "utf-8");
const truncated = content.length > 2e3 ? content.slice(0, 2e3) + "\n...[truncated]" : content;
sections.push(`### ${file}
${truncated}`);
}
}
return sections.length > 0 ? `
## Project Specs
${sections.join("\n\n")}` : "";
}
buildAgentPrompt(node, context) {
const depResults = node.dependencies.map((id) => {
const dep = this.activeExecutions.get(id);
if (!dep?.result) return null;
return { id, agent: dep.agent, result: dep.result };
}).filter(Boolean);
const specContext = node.agent === "planning" || node.agent === "code" ? this.loadSpecContext() : "";
return [
`## Task`,
node.description,
"",
`## Agent Role: ${node.agent}`,
`Config: ${JSON.stringify(this.subagentConfigs.get(node.agent)?.capabilities || [])}`,
"",
`## Context`,
JSON.stringify(context, null, 2),
"",
...depResults.length > 0 ? [`## Dependency Results`, JSON.stringify(depResults, null, 2), ""] : [],
...specContext ? [specContext, ""] : [],
`## Constraints`,
`- ESM imports: use .js extensions on relative imports`,
`- Testing: vitest (not jest)`,
`- Lint: npm run lint (eslint + prettier)`,
`- Output structured JSON when possible`
].join("\n");
}
estimateTokens(text) {
return Math.ceil(text.length / 4);
}
async shareAgentResults(_node) {
logger.debug("Sharing agent results", { nodeId: _node.id });
}
applyImprovements(_rootNode, improvements) {
logger.debug("Applying improvements", { improvements });
}
calculateTotalTokens(node) {
let total = node.tokens || 0;
if (node.children) {
for (const child of node.children) {
total += this.calculateTotalTokens(child);
}
}
return total;
}
calculateTotalCost(node) {
let total = node.cost || 0;
if (node.children) {
for (const child of node.children) {
total += this.calculateTotalCost(child);
}
}
return total;
}
calculateNodeCost(tokens, model) {
const pricing = {
"claude-sonnet-4-5-20250929": 15,
"claude-haiku-4-5-20251001": 1,
"claude-opus-4-6": 75,
// External providers — much cheaper
"llama-4-scout-17b-16e-instruct": 0.35,
"THUDM/glm-4-9b-chat": 0.06
};
const modelName = typeof model === "string" ? model : model.model;
return tokens / 1e6 * (pricing[modelName] || 10);
}
countGeneratedTests(node) {
let count = 0;
if (node.agent === "testing" && node.result?.tests) {
count += node.result.tests.length;
}
if (node.children) {
for (const child of node.children) {
count += this.countGeneratedTests(child);
}
}
return count;
}
countIssuesFound(node) {
let count = 0;
if ((node.agent === "review" || node.agent === "linting") && node.result?.issues) {
count += node.result.issues.length;
}
if (node.children) {
for (const child of node.children) {
count += this.countIssuesFound(child);
}
}
return count;
}
countIssuesFixed(node) {
let count = 0;
if (node.agent === "improve" && node.result?.fixed) {
count += node.result.fixed.length;
}
if (node.children) {
for (const child of node.children) {
count += this.countIssuesFixed(child);
}
}
return count;
}
}
export {
RecursiveAgentOrchestrator
};