@stackmemoryai/stackmemory
Version:
Project-scoped memory for AI coding tools. Durable context across sessions with MCP integration, frames, smart retrieval, Claude Code skills, and automatic hooks.
1,177 lines (1,157 loc) • 39.9 kB
JavaScript
import { fileURLToPath as __fileURLToPath } from 'url';
import { dirname as __pathDirname } from 'path';
const __filename = __fileURLToPath(import.meta.url);
const __dirname = __pathDirname(__filename);
import { logger } from "../core/monitoring/logger.js";
import {
RepoIngestionSkill
} from "./repo-ingestion-skill.js";
import {
RecursiveAgentOrchestrator
} from "./recursive-agent-orchestrator.js";
import { getAPISkill } from "./api-skill.js";
import { SpecGeneratorSkill } from "./spec-generator-skill.js";
import { LinearTaskRunner } from "./linear-task-runner.js";
import * as fs from "fs";
import * as path from "path";
import * as os from "os";
class HandoffSkill {
constructor(context) {
this.context = context;
}
async execute(targetUser, message, options) {
try {
const activeStack = this.context.dualStackManager.getActiveStack();
let framesToHandoff = options?.frames || [];
if (options?.autoDetect !== false && framesToHandoff.length === 0) {
const allFrames = await activeStack.getAllFrames();
const relevantFrames = allFrames.filter(
(f) => f.state === "completed" || f.outputs && Array.isArray(f.outputs) && f.outputs.some((o) => o.type === "error")
);
framesToHandoff = relevantFrames.slice(-5).map((f) => f.frameId);
}
if (framesToHandoff.length === 0) {
return {
success: false,
message: "No frames to handoff. Specify frames or complete some work first."
};
}
const frameDetails = await Promise.all(
framesToHandoff.map((id) => activeStack.getFrame(id))
);
const summary = this.generateHandoffSummary(frameDetails, message);
const metadata = {
initiatedAt: /* @__PURE__ */ new Date(),
initiatorId: this.context.userId,
targetUserId: targetUser,
frameContext: {
totalFrames: framesToHandoff.length,
frameTypes: [
...new Set(frameDetails.map((f) => f?.type || "unknown"))
],
estimatedSize: JSON.stringify(frameDetails).length,
dependencies: this.extractDependencies(frameDetails)
},
businessContext: {
priority: options?.priority || "medium",
stakeholders: [targetUser]
}
};
const availableStacks = await this.context.dualStackManager.getAvailableStacks();
let targetStackId = availableStacks.find(
(s) => s.type === "shared"
)?.stackId;
if (!targetStackId) {
targetStackId = await this.context.dualStackManager.createSharedStack(
"team",
`Handoff: ${message.slice(0, 50)}`,
this.context.userId
);
}
const handoffId = await this.context.handoffManager.initiateHandoff(
targetStackId,
framesToHandoff,
metadata,
targetUser,
summary
);
const actionItems = this.generateActionItems(frameDetails);
return {
success: true,
message: `Handoff initiated to @${targetUser}`,
data: {
handoffId,
frameCount: framesToHandoff.length,
priority: options?.priority || "medium",
actionItems,
targetStack: targetStackId
},
action: `Notified ${targetUser}. Handoff ID: ${handoffId}`
};
} catch (error) {
logger.error("Handoff skill error:", error);
return {
success: false,
message: `Failed to initiate handoff: ${error.message}`
};
}
}
generateHandoffSummary(frames, message) {
const completed = frames.filter((f) => f?.state === "completed").length;
const blocked = frames.filter(
(f) => f?.outputs?.some((o) => o.type === "error")
).length;
return `
## Handoff Summary
**Message**: ${message}
**Frames**: ${frames.length} total (${completed} completed, ${blocked} blocked)
### Work Completed:
${frames.filter((f) => f?.state === "completed").map((f) => `- ${f.name}: ${f.digest_deterministic?.summary || "No summary"}`).join("\n")}
### Attention Required:
${frames.filter((f) => f?.outputs?.some((o) => o.type === "error")).map(
(f) => `- ${f.name}: ${f.outputs.find((o) => o.type === "error")?.content || "Error"}`
).join("\n") || "None"}
### Context:
${frames.map((f) => f?.digest_ai?.context || "").filter(Boolean).join("\n")}
`.trim();
}
extractDependencies(frames) {
const deps = /* @__PURE__ */ new Set();
frames.forEach((frame) => {
if (frame?.inputs?.dependencies) {
if (Array.isArray(frame.inputs.dependencies)) {
frame.inputs.dependencies.forEach((d) => deps.add(d));
}
}
if (frame?.outputs) {
frame.outputs.forEach((output) => {
if (output.type === "dependency") {
deps.add(output.content);
}
});
}
});
return Array.from(deps);
}
generateActionItems(frames) {
const items = [];
frames.forEach((frame) => {
if (frame?.outputs) {
frame.outputs.forEach((output) => {
if (output.type === "todo" || output.content?.includes("TODO")) {
items.push(output.content);
}
});
}
if (frame?.outputs?.some((o) => o.type === "error")) {
items.push(`Resolve error in ${frame.name}`);
}
if (frame?.inputs?.tests === "pending" || frame?.type === "implementation" || frame?.name && frame.name.toLowerCase().includes("implementation")) {
items.push(`Write tests for ${frame.name}`);
}
});
return items;
}
}
class CheckpointSkill {
constructor(context) {
this.context = context;
this.checkpointDir = path.join(
os.homedir(),
".stackmemory",
"checkpoints",
context.projectId
);
fs.mkdirSync(this.checkpointDir, { recursive: true });
}
checkpointDir;
async create(description, options) {
try {
const timestamp = Date.now();
const checkpointId = `checkpoint-${timestamp}-${Math.random().toString(36).slice(2, 8)}`;
const activeStack = this.context.dualStackManager.getActiveStack();
const currentContext = this.context.dualStackManager.getCurrentContext();
const allFrames = await activeStack.getAllFrames();
const checkpoint = {
id: checkpointId,
timestamp,
description,
context: {
stackId: currentContext.stackId,
stackType: currentContext.type,
userId: this.context.userId,
projectId: this.context.projectId
},
frames: allFrames,
metadata: {
...options?.metadata,
frameCount: allFrames.length,
activeFrames: allFrames.filter((f) => f.state === "active").length,
completedFrames: allFrames.filter((f) => f.state === "completed").length
},
files: options?.includeFiles || []
};
const checkpointPath = path.join(
this.checkpointDir,
`${checkpointId}.json`
);
fs.writeFileSync(checkpointPath, JSON.stringify(checkpoint, null, 2));
if (options?.includeFiles && options.includeFiles.length > 0) {
const filesDir = path.join(this.checkpointDir, checkpointId, "files");
fs.mkdirSync(filesDir, { recursive: true });
for (const file of options.includeFiles) {
if (fs.existsSync(file)) {
const basename = path.basename(file);
const backupPath = path.join(filesDir, basename);
fs.copyFileSync(file, backupPath);
}
}
}
if (options?.autoDetectRisky) {
const riskyPatterns = [
"migration",
"database",
"deploy",
"production",
"delete",
"remove",
"drop",
"migrate"
// Add more specific pattern
];
const isRisky = allFrames.some((frame) => {
const nameMatches = frame.name && riskyPatterns.some(
(pattern) => frame.name.toLowerCase().includes(pattern)
);
const commandMatches = frame.inputs?.command && riskyPatterns.some(
(pattern) => frame.inputs.command.toLowerCase().includes(pattern)
);
return nameMatches || commandMatches;
});
if (isRisky) {
checkpoint.metadata.riskyOperation = true;
checkpoint.metadata.autoCheckpoint = true;
}
}
fs.writeFileSync(checkpointPath, JSON.stringify(checkpoint, null, 2));
logger.info(`Created checkpoint: ${checkpointId}`);
return {
success: true,
message: `Checkpoint created: ${description}`,
data: {
checkpointId,
timestamp: new Date(timestamp).toISOString(),
frameCount: checkpoint.metadata.frameCount,
location: checkpointPath
},
action: `Saved checkpoint ${checkpointId}`
};
} catch (error) {
logger.error("Checkpoint creation error:", error);
return {
success: false,
message: `Failed to create checkpoint: ${error.message}`
};
}
}
async restore(checkpointId) {
try {
const checkpointPath = path.join(
this.checkpointDir,
`${checkpointId}.json`
);
if (!fs.existsSync(checkpointPath)) {
const files = fs.readdirSync(this.checkpointDir);
const match = files.find((f) => f.includes(checkpointId));
if (match) {
checkpointId = match.replace(".json", "");
} else {
return {
success: false,
message: `Checkpoint not found: ${checkpointId}`
};
}
}
const checkpoint = JSON.parse(fs.readFileSync(checkpointPath, "utf-8"));
await this.context.dualStackManager.switchToStack(
checkpoint.context.stackId
);
const filesDir = path.join(this.checkpointDir, checkpointId, "files");
if (fs.existsSync(filesDir)) {
const files = fs.readdirSync(filesDir);
for (const file of files) {
const backupPath = path.join(filesDir, file);
const originalPath = checkpoint.files.find(
(f) => path.basename(f) === file
);
if (originalPath && fs.existsSync(backupPath)) {
fs.copyFileSync(backupPath, originalPath);
}
}
}
logger.info(`Restored checkpoint: ${checkpointId}`);
return {
success: true,
message: `Restored to checkpoint: ${checkpoint.description}`,
data: {
checkpointId,
timestamp: new Date(checkpoint.timestamp).toISOString(),
frameCount: checkpoint.metadata.frameCount,
filesRestored: checkpoint.files.length
},
action: `Restored checkpoint from ${new Date(checkpoint.timestamp).toLocaleString()}`
};
} catch (error) {
logger.error("Checkpoint restoration error:", error);
return {
success: false,
message: `Failed to restore checkpoint: ${error.message}`
};
}
}
async list(options) {
try {
const files = fs.readdirSync(this.checkpointDir).filter((f) => f.endsWith(".json")).map((f) => {
const checkpointPath = path.join(this.checkpointDir, f);
const checkpoint = JSON.parse(
fs.readFileSync(checkpointPath, "utf-8")
);
return checkpoint;
}).filter((c) => !options?.since || c.timestamp > options.since.getTime()).sort((a, b) => b.timestamp - a.timestamp).slice(0, options?.limit || 10);
return {
success: true,
message: `Found ${files.length} checkpoints`,
data: files.map((c) => ({
id: c.id,
description: c.description,
timestamp: new Date(c.timestamp).toISOString(),
frameCount: c.metadata.frameCount,
risky: c.metadata.riskyOperation || false
}))
};
} catch (error) {
logger.error("Checkpoint list error:", error);
return {
success: false,
message: `Failed to list checkpoints: ${error.message}`
};
}
}
async diff(checkpoint1, checkpoint2) {
try {
const cp1 = await this.loadCheckpoint(checkpoint1);
const cp2 = await this.loadCheckpoint(checkpoint2);
if (!cp1 || !cp2) {
return {
success: false,
message: "One or both checkpoints not found"
};
}
const diff = {
timeDiff: Math.abs(cp2.timestamp - cp1.timestamp),
framesDiff: cp2.frames.length - cp1.frames.length,
newFrames: cp2.frames.filter(
(f2) => !cp1.frames.some((f1) => f1.frameId === f2.frameId)
),
removedFrames: cp1.frames.filter(
(f1) => !cp2.frames.some((f2) => f2.frameId === f1.frameId)
),
modifiedFrames: cp2.frames.filter((f2) => {
const f1 = cp1.frames.find((f) => f.frameId === f2.frameId);
return f1 && JSON.stringify(f1) !== JSON.stringify(f2);
})
};
return {
success: true,
message: `Diff between ${cp1.description} and ${cp2.description}`,
data: {
timeDiff: `${Math.round(diff.timeDiff / 1e3 / 60)} minutes`,
framesDiff: diff.framesDiff > 0 ? `+${diff.framesDiff}` : `${diff.framesDiff}`,
newFrames: diff.newFrames.length,
removedFrames: diff.removedFrames.length,
modifiedFrames: diff.modifiedFrames.length,
details: diff
}
};
} catch (error) {
logger.error("Checkpoint diff error:", error);
return {
success: false,
message: `Failed to diff checkpoints: ${error.message}`
};
}
}
async loadCheckpoint(checkpointId) {
const checkpointPath = path.join(
this.checkpointDir,
`${checkpointId}.json`
);
if (fs.existsSync(checkpointPath)) {
return JSON.parse(fs.readFileSync(checkpointPath, "utf-8"));
}
const files = fs.readdirSync(this.checkpointDir);
const match = files.find((f) => f.includes(checkpointId));
if (match) {
const path2 = path2.join(this.checkpointDir, match);
return JSON.parse(fs.readFileSync(path2, "utf-8"));
}
return null;
}
}
class ArchaeologistSkill {
constructor(context) {
this.context = context;
}
async dig(query, options) {
try {
const depth = this.parseDepth(options?.depth || "30days");
const since = new Date(Date.now() - depth);
const results = await this.context.contextRetriever.retrieve({
query,
projectId: this.context.projectId,
limit: 50,
minScore: 0.3
});
const filtered = results.filter(
(r) => !depth || new Date(r.timestamp) > since
);
let patterns = [];
if (options?.patterns) {
patterns = this.extractPatterns(filtered);
}
let decisions = [];
if (options?.decisions) {
decisions = this.extractDecisions(filtered);
}
let timeline = [];
if (options?.timeline) {
timeline = this.generateTimeline(filtered);
}
const topResults = filtered.slice(0, 10);
const summary = this.generateArchaeologySummary(
topResults,
patterns,
decisions,
timeline
);
return {
success: true,
message: `Found ${filtered.length} relevant results`,
data: {
totalResults: filtered.length,
timeRange: {
from: since.toISOString(),
to: (/* @__PURE__ */ new Date()).toISOString()
},
topResults: topResults.map((r) => ({
frameId: r.frameId,
score: r.score,
timestamp: r.timestamp,
summary: r.content.slice(0, 100) + "..."
})),
patterns,
decisions,
timeline,
summary
},
action: `Analyzed ${filtered.length} frames from ${options?.depth || "30days"} of history`
};
} catch (error) {
logger.error("Archaeology skill error:", error);
return {
success: false,
message: `Failed to dig through context: ${error.message}`
};
}
}
parseDepth(depth) {
const match = depth.match(/^(\d+)(days?|weeks?|months?|years?|all)$/i);
if (!match) {
return 30 * 24 * 60 * 60 * 1e3;
}
const [, num, unit] = match;
const value = parseInt(num);
switch (unit.toLowerCase()) {
case "day":
case "days":
return value * 24 * 60 * 60 * 1e3;
case "week":
case "weeks":
return value * 7 * 24 * 60 * 60 * 1e3;
case "month":
case "months":
return value * 30 * 24 * 60 * 60 * 1e3;
case "year":
case "years":
return value * 365 * 24 * 60 * 60 * 1e3;
case "all":
return Number.MAX_SAFE_INTEGER;
default:
return 30 * 24 * 60 * 60 * 1e3;
}
}
extractPatterns(results) {
const patterns = /* @__PURE__ */ new Map();
const patternTypes = [
{ regex: /test.*then.*implement/i, name: "TDD" },
{ regex: /refactor/i, name: "Refactoring" },
{ regex: /debug|fix|error|bug/i, name: "Debugging" },
{ regex: /implement.*feature/i, name: "Feature Development" },
{ regex: /review|code review/i, name: "Code Review" },
{ regex: /deploy|release/i, name: "Deployment" },
{ regex: /optimize|performance/i, name: "Optimization" }
];
results.forEach((result) => {
patternTypes.forEach((pattern) => {
if (pattern.regex.test(result.content)) {
patterns.set(pattern.name, (patterns.get(pattern.name) || 0) + 1);
}
});
});
return Array.from(patterns.entries()).map(([name, count]) => ({ name, count })).sort((a, b) => b.count - a.count);
}
extractDecisions(results) {
const decisions = [];
const decisionKeywords = [
"decided",
"chose",
"selected",
"will use",
"going with",
"approach",
"strategy",
"solution"
];
results.forEach((result) => {
const content = result.content.toLowerCase();
if (decisionKeywords.some((keyword) => content.includes(keyword))) {
const sentences = result.content.split(/[.!?]+/);
const decisionSentence = sentences.find(
(s) => decisionKeywords.some((k) => s.toLowerCase().includes(k))
);
if (decisionSentence) {
decisions.push({
frameId: result.frameId,
timestamp: result.timestamp,
decision: decisionSentence.trim(),
context: result.content.slice(0, 200)
});
}
}
});
return decisions.slice(0, 10);
}
generateTimeline(results) {
const timeline = /* @__PURE__ */ new Map();
results.forEach((result) => {
const date = new Date(result.timestamp).toDateString();
if (!timeline.has(date)) {
timeline.set(date, []);
}
const dateItems = timeline.get(date);
if (dateItems) {
dateItems.push(result);
}
});
return Array.from(timeline.entries()).map(([date, items]) => ({
date,
itemCount: items.length,
highlights: items.slice(0, 3).map((item) => ({
frameId: item.frameId,
summary: item.content.slice(0, 50) + "..."
}))
})).sort((a, b) => new Date(b.date).getTime() - new Date(a.date).getTime());
}
generateArchaeologySummary(results, patterns, decisions, timeline) {
let summary = "## Context Archaeology Report\n\n";
if (results.length > 0) {
summary += `### Most Relevant Context (${results.length} results)
`;
results.slice(0, 3).forEach((r) => {
summary += `- **${new Date(r.timestamp).toLocaleDateString()}**: ${r.content.slice(0, 100)}...
`;
});
summary += "\n";
}
if (patterns.length > 0) {
summary += `### Detected Patterns
`;
patterns.slice(0, 5).forEach((p) => {
summary += `- ${p.name}: ${p.count} occurrences
`;
});
summary += "\n";
}
if (decisions.length > 0) {
summary += `### Key Decisions
`;
decisions.slice(0, 5).forEach((d) => {
summary += `- **${new Date(d.timestamp).toLocaleDateString()}**: ${d.decision}
`;
});
summary += "\n";
}
if (timeline.length > 0) {
summary += `### Activity Timeline
`;
timeline.slice(0, 5).forEach((t) => {
summary += `- **${t.date}**: ${t.itemCount} activities
`;
});
}
return summary;
}
}
class ClaudeSkillsManager {
constructor(context) {
this.context = context;
this.handoffSkill = new HandoffSkill(context);
this.checkpointSkill = new CheckpointSkill(context);
this.archaeologistSkill = new ArchaeologistSkill(context);
this.apiSkill = getAPISkill();
this.specGeneratorSkill = new SpecGeneratorSkill(context);
import("./dashboard-launcher.js").then((module) => {
this.dashboardLauncher = new module.DashboardLauncherSkill();
logger.info("Dashboard launcher initialized (manual launch required)");
});
const chromaConfig = {
apiKey: process.env["CHROMADB_API_KEY"] || "",
tenant: process.env["CHROMADB_TENANT"] || "",
database: process.env["CHROMADB_DATABASE"] || "stackmemory",
collectionName: process.env["CHROMADB_COLLECTION"] || "stackmemory_repos"
};
if (chromaConfig.apiKey && chromaConfig.tenant) {
this.repoIngestionSkill = new RepoIngestionSkill(
chromaConfig,
context.userId,
process.env["CHROMADB_TEAM_ID"]
);
this.repoIngestionSkill.initialize().catch((error) => {
logger.warn("Repo ingestion skill initialization failed:", error);
});
}
import("../features/tasks/linear-task-manager.js").then((module) => {
const taskStore = new module.LinearTaskManager();
const frameManager = context.frameManager;
if (!frameManager) {
throw new Error(
"FrameManager not provided in context - required for RLM orchestrator"
);
}
this.rlmOrchestrator = new RecursiveAgentOrchestrator(
frameManager,
context.dualStackManager,
context.contextRetriever,
taskStore
);
this.linearTaskRunner = new LinearTaskRunner(
taskStore,
this.rlmOrchestrator,
context,
this.specGeneratorSkill
);
logger.info("RLM Orchestrator initialized");
}).catch((error) => {
logger.warn("RLM Orchestrator initialization failed:", error);
});
}
handoffSkill;
checkpointSkill;
archaeologistSkill;
dashboardLauncher;
repoIngestionSkill = null;
rlmOrchestrator = null;
apiSkill;
specGeneratorSkill;
linearTaskRunner = null;
async executeSkill(skillName, args, options) {
switch (skillName) {
case "handoff":
return this.handoffSkill.execute(args[0], args[1], options);
case "checkpoint":
const subcommand = args[0];
switch (subcommand) {
case "create":
return this.checkpointSkill.create(args[1], options);
case "restore":
return this.checkpointSkill.restore(args[1]);
case "list":
return this.checkpointSkill.list(options);
case "diff":
return this.checkpointSkill.diff(args[1], args[2]);
default:
return {
success: false,
message: `Unknown checkpoint subcommand: ${subcommand}`
};
}
case "dig":
return this.archaeologistSkill.dig(args[0], options);
case "lint":
if (!this.rlmOrchestrator) {
return {
success: false,
message: "RLM Orchestrator not initialized. Please wait a moment and try again."
};
}
const lintPath = args[0] || process.cwd();
const lintOptions = {
...options,
// Force use of linting agent
agents: ["linting"],
maxParallel: 1,
reviewStages: 1,
verboseLogging: true
};
const lintTask = `Perform comprehensive linting on ${lintPath}: Check for syntax errors, type issues, formatting violations, security vulnerabilities, performance anti-patterns, and unused code. Provide actionable fixes.`;
try {
const result = await this.rlmOrchestrator.execute(
lintTask,
{ path: lintPath, ...options },
lintOptions
);
return {
success: result.success,
message: `Linting ${result.success ? "completed" : "failed"}`,
data: {
issuesFound: result.issuesFound,
issuesFixed: result.issuesFixed,
duration: `${result.duration}ms`,
totalTokens: result.totalTokens,
details: result.rootNode
}
};
} catch (error) {
return {
success: false,
message: `Linting failed: ${error.message}`
};
}
case "rlm":
if (!this.rlmOrchestrator) {
return {
success: false,
message: "RLM Orchestrator not initialized. Please wait a moment and try again."
};
}
return this.rlmOrchestrator.execute(args[0], options);
case "repo":
case "ingest":
if (!this.repoIngestionSkill) {
return {
success: false,
message: "Repo ingestion skill not initialized. Please configure ChromaDB."
};
}
const repoCommand = args[0];
switch (repoCommand) {
case "ingest":
const repoPath = args[1] || process.cwd();
const repoName = args[2] || path.basename(repoPath);
return await this.repoIngestionSkill.ingestRepository(
repoPath,
repoName,
options
);
case "update":
const updatePath = args[1] || process.cwd();
const updateName = args[2] || path.basename(updatePath);
return await this.repoIngestionSkill.updateRepository(
updatePath,
updateName,
options
);
case "search":
const query = args[1];
if (!query) {
return {
success: false,
message: "Search query required"
};
}
const results = await this.repoIngestionSkill.searchCode(query, {
repoName: options?.repoName,
language: options?.language,
limit: options?.limit,
includeContext: options?.includeContext
});
return {
success: true,
message: `Found ${results.length} results`,
data: results
};
case "stats":
const stats = await this.repoIngestionSkill.getRepoStats(args[1]);
return {
success: true,
message: "Repository statistics",
data: stats
};
default:
return {
success: false,
message: `Unknown repo command: ${repoCommand}. Use: ingest, update, search, or stats`
};
}
case "dashboard":
const dashboardCmd = args[0];
if (!this.dashboardLauncher) {
return {
success: false,
message: "Dashboard launcher not yet initialized"
};
}
switch (dashboardCmd) {
case "launch":
await this.dashboardLauncher.launch();
return {
success: true,
message: "Dashboard launched",
action: "open-browser"
};
case "stop":
await this.dashboardLauncher.stop();
return {
success: true,
message: "Dashboard stopped"
};
default:
await this.dashboardLauncher.launch();
return {
success: true,
message: "Dashboard launched",
action: "open-browser"
};
}
case "recursive":
if (!this.rlmOrchestrator) {
return {
success: false,
message: "RLM Orchestrator not initialized. Please wait a moment and try again."
};
}
const task = args.join(" ") || "Analyze and improve the current code";
const rlmOptions = options;
try {
logger.info("Starting RLM execution", { task });
const result = await this.rlmOrchestrator.execute(
task,
{
files: rlmOptions.files || [],
query: task
},
rlmOptions
);
return {
success: result.success,
message: `RLM execution ${result.success ? "completed" : "failed"}`,
data: {
duration: `${result.duration}ms`,
totalTokens: result.totalTokens,
totalCost: `$${result.totalCost.toFixed(2)}`,
testsGenerated: result.testsGenerated,
improvements: result.improvements.length,
issuesFound: result.issuesFound,
issuesFixed: result.issuesFixed,
executionTree: result.rootNode
}
};
} catch (error) {
logger.error("RLM execution error:", error);
return {
success: false,
message: `RLM execution failed: ${error.message}`
};
}
case "api":
const apiCmd = args[0];
switch (apiCmd) {
case "add":
return this.apiSkill.add(args[1], args[2], {
spec: options?.spec,
authType: options?.authType,
headerName: options?.headerName,
envVar: options?.envVar
});
case "list":
return this.apiSkill.list();
case "describe":
return this.apiSkill.describe(args[1], args[2]);
case "exec":
const execParams = {};
for (let i = 3; i < args.length; i += 2) {
if (args[i] && args[i + 1]) {
execParams[args[i].replace("--", "")] = args[i + 1];
}
}
return this.apiSkill.exec(args[1], args[2], execParams, {
raw: options?.raw,
filter: options?.filter
});
case "auth":
return this.apiSkill.auth(args[1], {
token: options?.token,
envVar: options?.envVar,
oauth: options?.oauth,
scopes: options?.scopes?.split(",")
});
case "sync":
return this.apiSkill.sync(args[1]);
case "remove":
return this.apiSkill.remove(args[1]);
case "help":
default:
return {
success: true,
message: this.apiSkill.getHelp()
};
}
case "spec": {
const specCmd = args[0];
switch (specCmd) {
case "list":
return this.specGeneratorSkill.list();
case "update":
return this.specGeneratorSkill.update(
args[1] || "docs/specs/PROMPT_PLAN.md",
args.slice(2).join(" ") || args[1]
);
case "validate":
return this.specGeneratorSkill.validate(
args[1] || "docs/specs/PROMPT_PLAN.md"
);
case "one-pager":
case "dev-spec":
case "prompt-plan":
case "agents":
return this.specGeneratorSkill.generate(
specCmd,
args.slice(1).join(" ") || "Untitled",
{ force: options?.force }
);
default:
if (specCmd && ["one-pager", "dev-spec", "prompt-plan", "agents"].includes(
specCmd
)) {
return this.specGeneratorSkill.generate(
specCmd,
args.slice(1).join(" ") || "Untitled",
{ force: options?.force }
);
}
return {
success: false,
message: "Usage: spec <one-pager|dev-spec|prompt-plan|agents|list|update|validate> [args]"
};
}
}
case "linear-run": {
if (!this.linearTaskRunner) {
return {
success: false,
message: "Linear Task Runner not initialized. RLM Orchestrator may not be ready."
};
}
const lrCmd = args[0];
switch (lrCmd) {
case "next":
return this.linearTaskRunner.runNext(
options
);
case "all":
return this.linearTaskRunner.runAll(
options
);
case "task":
return this.linearTaskRunner.runTask(args[1]);
case "preview":
return this.linearTaskRunner.preview(args[1]);
default:
return this.linearTaskRunner.runNext(
options
);
}
}
default:
return {
success: false,
message: `Unknown skill: ${skillName}`
};
}
}
getAvailableSkills() {
const skills = ["handoff", "checkpoint", "dig", "dashboard", "api", "spec"];
if (this.repoIngestionSkill) {
skills.push("repo");
}
if (this.rlmOrchestrator) {
skills.push("rlm", "lint");
}
if (this.linearTaskRunner) {
skills.push("linear-run");
}
return skills;
}
getSkillHelp(skillName) {
switch (skillName) {
case "handoff":
return `
/handoff @user "message" [--priority high] [--frames frame1,frame2]
Streamline frame handoffs between team members
`;
case "checkpoint":
return `
/checkpoint create "description" [--files file1,file2] [--auto-detect-risky]
/checkpoint restore <id>
/checkpoint list [--limit 10] [--since "2024-01-01"]
/checkpoint diff <id1> <id2>
Create and manage recovery points
`;
case "dig":
return `
/dig "query" [--depth 6months] [--patterns] [--decisions] [--timeline]
Deep historical context retrieval across sessions
`;
case "lint":
return `
/lint [path] [options]
Perform comprehensive code linting and quality checks
Automatically checks for:
- Syntax errors and type issues
- Code formatting and style violations
- Security vulnerabilities
- Performance anti-patterns
- Unused imports and dead code
- Code smells and complexity issues
Usage:
stackmemory skills lint # Lint current directory
stackmemory skills lint src/ # Lint specific directory
stackmemory skills lint src/file.ts # Lint specific file
Options:
--fix Automatically fix issues where possible
--format Focus on formatting issues
--security Focus on security vulnerabilities
--performance Focus on performance issues
--verbose Show detailed output
Examples:
stackmemory skills lint --fix
stackmemory skills lint src/ --security --verbose
`;
case "rlm":
return `
/rlm "task description" [options]
Execute complex tasks with recursive agent orchestration
Options:
--max-parallel <n> Max concurrent subagents (default: 5)
--max-recursion <n> Max recursion depth (default: 4)
--max-tokens-per-agent <n> Token budget per agent (default: 30000)
--review-stages <n> Review iterations (default: 3)
--quality-threshold <n> Target quality 0-1 (default: 0.85)
--test-mode <mode> unit/integration/e2e/all (default: all)
--verbose Show all operations
--timeout-per-agent <s> Timeout in seconds (default: 300)
Examples:
stackmemory skills rlm "Generate tests for API endpoints"
stackmemory skills rlm "Refactor auth system" --quality-threshold 0.95
`;
case "dashboard":
return `
/dashboard [launch|stop]
Launch the StackMemory web dashboard for real-time monitoring
- launch: Start the web dashboard and open in browser (default)
- stop: Stop the dashboard server
Auto-launches on new sessions when configured
`;
case "repo":
return `
/repo ingest [path] [name] [--incremental] [--include-tests] [--include-docs]
/repo update [path] [name] [--force-update]
/repo search "query" [--repo-name name] [--language lang] [--limit n]
/repo stats [repo-name]
Ingest and search code repositories in ChromaDB:
- ingest: Index a new repository (defaults to current directory)
- update: Update an existing repository with changes
- search: Semantic search across ingested code
- stats: View statistics about ingested repositories
Options:
- --incremental: Only process changed files
- --include-tests: Include test files in indexing
- --include-docs: Include documentation files
- --force-update: Force re-indexing of all files
- --language: Filter search by programming language
- --limit: Maximum search results (default: 20)
`;
case "recursive":
return `
/rlm "task description" [options]
Recursive Language Model orchestration using Claude Code's Task tool:
- Decomposes complex tasks into parallel/sequential subtasks
- Spawns specialized Claude subagents for each task type
- Automatic test generation and multi-stage review
- Handles large codebases through intelligent chunking
Subagent Types:
- Planning: Task decomposition and strategy
- Code: Implementation and refactoring
- Testing: Comprehensive test generation (unit/integration/E2E)
- Linting: Code quality and formatting
- Review: Multi-stage code review and quality scoring
- Improve: Implement review suggestions
- Context: Information retrieval
- Publish: NPM/GitHub releases
Options:
- --max-parallel N: Max concurrent subagents (default: 5)
- --max-recursion N: Max recursion depth (default: 4)
- --review-stages N: Number of review iterations (default: 3)
- --quality-threshold N: Target quality score 0-1 (default: 0.85)
- --test-mode [unit|integration|e2e|all]: Test generation mode (default: all)
- --verbose: Show all recursive operations
Examples:
/rlm "Refactor the authentication system with full test coverage"
/rlm "Generate comprehensive tests for the API endpoints" --test-mode integration
/rlm "Review and improve code quality" --review-stages 5 --quality-threshold 0.95
`;
case "api":
return this.apiSkill.getHelp();
case "spec":
return `
/spec <type> [title] [--force]
/spec list
/spec update <path> <changes>
/spec validate <path>
Generate iterative spec documents (VibeScaffold 4-doc system):
one-pager \u2192 docs/specs/ONE_PAGER.md
dev-spec \u2192 docs/specs/DEV_SPEC.md (reads ONE_PAGER)
prompt-plan \u2192 docs/specs/PROMPT_PLAN.md (reads ONE_PAGER + DEV_SPEC)
agents \u2192 AGENTS.md (reads all)
Examples:
/spec one-pager "Photo Captioner"
/spec dev-spec
/spec update prompt-plan "Initialize repository and tooling"
/spec validate prompt-plan
`;
case "linear-run":
return `
/linear-run next [--priority high] [--tag backend]
/linear-run all [--dry-run] [--maxConcurrent 3]
/linear-run task <id>
/linear-run preview [id]
Execute Linear tasks via RLM orchestrator:
next \u2014 Pull next todo task, execute, update status
all \u2014 Run all pending tasks iteratively
task \u2014 Execute a specific task by ID
preview \u2014 Show execution plan without running
Flow: Linear \u2192 RLM decompose \u2192 subagents execute \u2192 Linear status update
Auto-updates PROMPT_PLAN checkboxes when tasks complete.
`;
default:
return `Unknown skill: ${skillName}`;
}
}
}
export {
ArchaeologistSkill,
CheckpointSkill,
ClaudeSkillsManager,
HandoffSkill
};
//# sourceMappingURL=claude-skills.js.map