@stackmemoryai/stackmemory
Version:
Project-scoped memory for AI coding tools. Durable context across sessions with MCP integration, frames, smart retrieval, Claude Code skills, and automatic hooks.
278 lines (277 loc) • 8.2 kB
JavaScript
import { fileURLToPath as __fileURLToPath } from 'url';
import { dirname as __pathDirname } from 'path';
const __filename = __fileURLToPath(import.meta.url);
const __dirname = __pathDirname(__filename);
import { HybridDigestGenerator } from "./hybrid-digest-generator.js";
import { logger } from "../monitoring/logger.js";
const DEFAULT_IDLE_CONFIG = {
noToolCallThreshold: 3e4,
// 30 seconds
noInputThreshold: 6e4,
// 60 seconds
processOnFrameClose: true,
checkInterval: 1e4
// Check every 10 seconds
};
class EnhancedHybridDigestGenerator extends HybridDigestGenerator {
idleConfig;
lastToolCallTime = Date.now();
lastInputTime = Date.now();
idleCheckInterval;
activeFrames = /* @__PURE__ */ new Set();
constructor(db, config = {}, llmProvider, idleConfig = {}) {
const enhancedConfig = {
...config,
maxTokens: config.maxTokens || 200,
// Keep under 200 tokens as per requirement
enableAIGeneration: config.enableAIGeneration ?? true
};
super(db, enhancedConfig, llmProvider);
this.idleConfig = { ...DEFAULT_IDLE_CONFIG, ...idleConfig };
this.startIdleDetection();
}
/**
* Start idle detection monitoring
*/
startIdleDetection() {
this.idleCheckInterval = setInterval(() => {
this.checkIdleState();
}, this.idleConfig.checkInterval);
}
/**
* Check if system is idle and trigger processing
*/
checkIdleState() {
const now = Date.now();
const toolCallIdle = now - this.lastToolCallTime > this.idleConfig.noToolCallThreshold;
const inputIdle = now - this.lastInputTime > this.idleConfig.noInputThreshold;
if (toolCallIdle || inputIdle) {
logger.debug("Idle state detected, triggering digest processing", {
toolCallIdle,
inputIdle,
timeSinceLastToolCall: now - this.lastToolCallTime,
timeSinceLastInput: now - this.lastInputTime
});
this.processQueue().catch((error) => {
logger.error(
"Error processing digest queue during idle",
error instanceof Error ? error : new Error(String(error))
);
});
}
}
/**
* Record tool call activity
*/
recordToolCall() {
this.lastToolCallTime = Date.now();
}
/**
* Record user input activity
*/
recordUserInput() {
this.lastInputTime = Date.now();
}
/**
* Handle frame closure - immediately trigger digest if configured
*/
onFrameClosed(frameId) {
this.activeFrames.delete(frameId);
if (this.idleConfig.processOnFrameClose) {
logger.info("Frame closed, triggering immediate digest processing", {
frameId
});
this.prioritizeFrame(frameId);
this.processQueue().catch((error) => {
logger.error(
"Error processing digest on frame close",
error instanceof Error ? error : new Error(String(error))
);
});
}
}
/**
* Handle frame opened
*/
onFrameOpened(frameId) {
this.activeFrames.add(frameId);
}
/**
* Prioritize a specific frame for processing
*/
prioritizeFrame(frameId) {
try {
this.db.prepare(
`
UPDATE digest_queue
SET priority = 'high', updated_at = unixepoch()
WHERE frame_id = ? AND status = 'pending'
`
).run(frameId);
} catch (error) {
logger.error("Failed to prioritize frame", error);
}
}
/**
* Enhanced AI generation with 40% content
*/
async generateEnhancedAI(input, deterministic) {
if (!this.llmProvider) {
throw new Error("No LLM provider configured");
}
const prompt = this.buildEnhancedPrompt(input, deterministic);
const response = await this.llmProvider.generateSummary(
input,
deterministic,
this.config.maxTokens
);
const enhanced = {
...response,
keyDecisions: this.extractKeyDecisions(response),
insights: this.extractInsights(response),
nextSteps: this.extractNextSteps(response),
patterns: this.detectPatterns(input, deterministic),
technicalDebt: this.identifyTechnicalDebt(input, deterministic)
};
return enhanced;
}
/**
* Build enhanced prompt for AI generation
*/
buildEnhancedPrompt(input, deterministic) {
const parts = [
`Analyze this development frame and provide insights (max ${this.config.maxTokens} tokens):`,
"",
`Frame: ${input.frame.name} (${input.frame.type})`,
`Duration: ${deterministic.durationSeconds}s`,
`Files Modified: ${deterministic.filesModified.length}`,
`Tool Calls: ${deterministic.toolCallCount}`,
`Errors: ${deterministic.errorsEncountered.length}`,
"",
"Provide:",
"1. Key decisions made and why (2-3 items)",
"2. Important insights or learnings (1-2 items)",
"3. Suggested next steps (2-3 items)",
"4. Any patterns or anti-patterns observed",
"5. Technical debt or improvements needed",
"",
"Be concise and actionable. Focus on value, not description."
];
return parts.join("\n");
}
/**
* Extract key decisions from AI response
*/
extractKeyDecisions(response) {
return [];
}
/**
* Extract insights from AI response
*/
extractInsights(response) {
const insights = [];
if (response.insight) {
insights.push(response.insight);
}
return insights;
}
/**
* Extract next steps from AI response
*/
extractNextSteps(response) {
return [];
}
/**
* Detect patterns in the frame activity
*/
detectPatterns(input, deterministic) {
const patterns = [];
if (deterministic.errorsEncountered.some((e) => e.count > 2)) {
patterns.push("Multiple retry attempts detected");
}
const hasTests = deterministic.testsRun.length > 0;
const hasCodeChanges = deterministic.filesModified.some(
(f) => f.operation === "modify" && !f.path.includes("test")
);
if (hasTests && hasCodeChanges) {
patterns.push("Test-driven development pattern observed");
}
const manyFileChanges = deterministic.filesModified.length > 5;
const noNewFiles = !deterministic.filesModified.some(
(f) => f.operation === "create"
);
if (manyFileChanges && noNewFiles) {
patterns.push("Refactoring pattern detected");
}
return patterns;
}
/**
* Identify technical debt
*/
identifyTechnicalDebt(input, deterministic) {
const debt = [];
if (deterministic.filesModified.length > 3 && deterministic.testsRun.length === 0) {
debt.push("Code changes without corresponding tests");
}
const unresolvedErrors = deterministic.errorsEncountered.filter(
(e) => !e.resolved
);
if (unresolvedErrors.length > 0) {
debt.push(`${unresolvedErrors.length} unresolved errors remain`);
}
if (deterministic.decisions.some((d) => d.toLowerCase().includes("todo"))) {
debt.push("TODOs added to codebase");
}
return debt;
}
/**
* Generate digest with 60/40 split
*/
generateDigest(input) {
this.recordToolCall();
const digest = super.generateDigest(input);
if (this.config.enableAIGeneration && this.llmProvider) {
digest.status = "ai_pending";
}
return digest;
}
/**
* Handle interruption gracefully
*/
handleInterruption() {
logger.info("User activity detected, pausing digest processing");
this.recordUserInput();
this.recordToolCall();
this.db.prepare(
`
UPDATE digest_queue
SET priority = 'low'
WHERE status = 'processing'
`
).run();
}
/**
* Get idle status
*/
getIdleStatus() {
const now = Date.now();
return {
isIdle: now - this.lastToolCallTime > this.idleConfig.noToolCallThreshold || now - this.lastInputTime > this.idleConfig.noInputThreshold,
timeSinceLastToolCall: now - this.lastToolCallTime,
timeSinceLastInput: now - this.lastInputTime,
activeFrames: this.activeFrames.size
};
}
/**
* Cleanup on shutdown
*/
shutdown() {
if (this.idleCheckInterval) {
clearInterval(this.idleCheckInterval);
}
}
}
export {
EnhancedHybridDigestGenerator
};
//# sourceMappingURL=enhanced-hybrid-digest.js.map