@stackmemoryai/stackmemory
Version:
Lossless, project-scoped memory for AI coding tools. Durable context across sessions with 56 MCP tools, FTS5 search, conductor orchestrator, loop/watch monitoring, snapshot capture, pre-flight overlap checks, Claude/Codex/OpenCode wrappers, Linear sync, a
228 lines (227 loc) • 6.95 kB
JavaScript
import { fileURLToPath as __fileURLToPath } from 'url';
import { dirname as __pathDirname } from 'path';
const __filename = __fileURLToPath(import.meta.url);
const __dirname = __pathDirname(__filename);
import { logger } from "../../../core/monitoring/logger.js";
import { isFeatureEnabled } from "../../../core/config/feature-flags.js";
import {
createProvider
} from "../../../core/extensions/provider-adapter.js";
import {
getOptimalProvider
} from "../../../core/models/model-router.js";
import { scoreComplexity } from "../../../core/models/complexity-scorer.js";
import {
AnthropicBatchClient
} from "../../anthropic/batch-client.js";
function errorResponse(err) {
return {
content: [{ type: "text", text: JSON.stringify(err, null, 2) }]
};
}
function classifyApiError(error, provider) {
const msg = error.message || String(error);
const status = error.status || (msg.match(/(\d{3})/)?.[1] ? parseInt(msg.match(/(\d{3})/)[1]) : void 0);
if (status === 429) {
return {
errorType: "rate_limit",
message: msg,
recommendation: `Rate limited by ${provider}. Retry after a delay or switch to a different provider.`,
provider
};
}
if (status && status >= 500) {
return {
errorType: "server_error",
message: msg,
recommendation: `${provider} returned a server error. Try a different provider or retry later.`,
provider
};
}
return {
errorType: "api_error",
message: msg,
recommendation: `API call to ${provider} failed. Check the model name, API key, and base URL.`,
provider
};
}
class ProviderHandlers {
batchClient;
constructor(_deps) {
}
getBatchClient() {
if (!this.batchClient) {
this.batchClient = new AnthropicBatchClient();
}
return this.batchClient;
}
/**
* delegate_to_model — route a prompt to a specific provider+model
*/
async handleDelegateToModel(args) {
if (!isFeatureEnabled("multiProvider")) {
return errorResponse({
errorType: "feature_disabled",
message: "Multi-provider routing is disabled.",
recommendation: "Set STACKMEMORY_MULTI_PROVIDER=true to enable multi-provider routing."
});
}
const taskType = args.taskType || "default";
const preference = args.provider;
const complexity = scoreComplexity(args.prompt);
const optimal = getOptimalProvider(taskType, preference, {
task: args.prompt
});
logger.info("delegate_to_model routing", {
taskType,
complexity: complexity.tier,
score: complexity.score,
provider: optimal.provider
});
const providerModel = args.model || optimal.model;
const apiKey = process.env[optimal.apiKeyEnv] || "";
if (!apiKey) {
return errorResponse({
errorType: "missing_api_key",
message: `No API key found for ${optimal.provider} (env: ${optimal.apiKeyEnv})`,
recommendation: `Set the ${optimal.apiKeyEnv} environment variable or choose a different provider.`,
provider: optimal.provider
});
}
try {
const adapter = createProvider(optimal.provider, {
apiKey,
baseUrl: optimal.baseUrl
});
const messages = [{ role: "user", content: args.prompt }];
const result = await adapter.complete(messages, {
model: providerModel,
maxTokens: args.maxTokens || 4096,
temperature: args.temperature,
system: args.system
});
const text = result.content.filter((c) => c.type === "text").map((c) => c.text).join("");
return {
content: [
{
type: "text",
text: JSON.stringify(
{
provider: optimal.provider,
model: providerModel,
response: text,
usage: result.usage
},
null,
2
)
}
]
};
} catch (error) {
logger.error("delegate_to_model failed", { error: error.message });
return errorResponse(classifyApiError(error, optimal.provider));
}
}
/**
* batch_submit — submit prompts to Anthropic Batch API
*/
async handleBatchSubmit(args) {
if (!isFeatureEnabled("multiProvider")) {
return errorResponse({
errorType: "feature_disabled",
message: "Multi-provider routing is disabled.",
recommendation: "Set STACKMEMORY_MULTI_PROVIDER=true to enable multi-provider routing."
});
}
try {
const batchClient = this.getBatchClient();
const requests = args.prompts.map((p) => ({
custom_id: p.id,
params: {
model: p.model || "claude-sonnet-4-5-20250929",
max_tokens: p.maxTokens || 4096,
messages: [{ role: "user", content: p.prompt }],
system: p.system
}
}));
const batchId = await batchClient.submit(requests, args.description);
return {
content: [
{
type: "text",
text: JSON.stringify(
{
batchId,
status: "submitted",
requestCount: requests.length
},
null,
2
)
}
]
};
} catch (error) {
return errorResponse({
errorType: "batch_error",
message: error.message,
recommendation: "Check ANTHROPIC_API_KEY and batch request format."
});
}
}
/**
* batch_check — poll status / retrieve results
*/
async handleBatchCheck(args) {
if (!isFeatureEnabled("multiProvider")) {
return errorResponse({
errorType: "feature_disabled",
message: "Multi-provider routing is disabled.",
recommendation: "Set STACKMEMORY_MULTI_PROVIDER=true to enable multi-provider routing."
});
}
try {
const batchClient = this.getBatchClient();
const job = await batchClient.poll(args.batchId);
if (args.retrieve && job.processing_status === "ended") {
const results = await batchClient.retrieve(args.batchId);
return {
content: [
{
type: "text",
text: JSON.stringify({ job, results }, null, 2)
}
]
};
}
return {
content: [
{
type: "text",
text: JSON.stringify(
{
batchId: args.batchId,
status: job.processing_status,
counts: job.request_counts,
createdAt: job.created_at,
endedAt: job.ended_at
},
null,
2
)
}
]
};
} catch (error) {
return errorResponse({
errorType: "batch_error",
message: error.message,
recommendation: "Check the batchId is valid and ANTHROPIC_API_KEY is set."
});
}
}
}
export {
ProviderHandlers
};