@stackmemoryai/stackmemory
Version:
Project-scoped memory for AI coding tools. Durable context across sessions with MCP integration, frames, smart retrieval, Claude Code skills, and automatic hooks.
156 lines (155 loc) • 4.37 kB
JavaScript
import { fileURLToPath as __fileURLToPath } from 'url';
import { dirname as __pathDirname } from 'path';
const __filename = __fileURLToPath(import.meta.url);
const __dirname = __pathDirname(__filename);
import {
SWEEP_STOP_TOKENS,
DEFAULT_SERVER_CONFIG
} from "./types.js";
import { buildSweepPrompt } from "./prompt-builder.js";
class SweepPredictionClient {
config;
baseUrl;
constructor(config = {}) {
this.config = { ...DEFAULT_SERVER_CONFIG, ...config };
this.baseUrl = `http://${this.config.host}:${this.config.port}`;
}
/**
* Check if the server is healthy
*/
async checkHealth() {
try {
const response = await fetch(`${this.baseUrl}/health`, {
method: "GET",
signal: AbortSignal.timeout(2e3)
});
return response.ok;
} catch {
return false;
}
}
/**
* Run a prediction using the Sweep model
*/
async predict(input) {
const startTime = Date.now();
try {
const prompt = buildSweepPrompt({
filePath: input.file_path,
originalContent: input.original_content || input.current_content,
currentContent: input.current_content,
recentDiffs: input.recent_diffs || [],
contextFiles: input.context_files
});
const request = {
model: "sweep",
prompt,
max_tokens: input.max_tokens || 2048,
temperature: input.temperature || 0.1,
top_k: input.top_k || 40,
stop: SWEEP_STOP_TOKENS,
stream: false
};
const response = await fetch(`${this.baseUrl}/v1/completions`, {
method: "POST",
headers: {
"Content-Type": "application/json"
},
body: JSON.stringify(request),
signal: AbortSignal.timeout(3e4)
});
if (!response.ok) {
const errorText = await response.text();
return {
success: false,
error: "server_error",
message: `Server returned ${response.status}: ${errorText}`
};
}
const data = await response.json();
const latencyMs = Date.now() - startTime;
if (!data.choices || data.choices.length === 0) {
return {
success: false,
error: "no_choices",
message: "Server returned no completion choices"
};
}
const completionText = data.choices[0].text;
if (!completionText || completionText.trim().length === 0) {
return {
success: true,
predicted_content: "",
file_path: input.file_path,
latency_ms: latencyMs,
tokens_generated: 0,
message: "No changes predicted"
};
}
return {
success: true,
predicted_content: completionText,
file_path: input.file_path,
latency_ms: latencyMs,
tokens_generated: data.usage?.completion_tokens || 0
};
} catch (error) {
const latencyMs = Date.now() - startTime;
if (error instanceof Error) {
if (error.name === "AbortError" || error.name === "TimeoutError") {
return {
success: false,
error: "timeout",
message: "Request timed out",
latency_ms: latencyMs
};
}
if (error.message.includes("ECONNREFUSED")) {
return {
success: false,
error: "connection_refused",
message: "Server not running. Start with: stackmemory sweep start",
latency_ms: latencyMs
};
}
return {
success: false,
error: "request_error",
message: error.message,
latency_ms: latencyMs
};
}
return {
success: false,
error: "unknown_error",
message: String(error),
latency_ms: latencyMs
};
}
}
/**
* Get server info
*/
async getServerInfo() {
try {
const response = await fetch(`${this.baseUrl}/v1/models`, {
method: "GET",
signal: AbortSignal.timeout(2e3)
});
if (response.ok) {
return await response.json();
}
return null;
} catch {
return null;
}
}
}
function createPredictionClient(config) {
return new SweepPredictionClient(config);
}
export {
SweepPredictionClient,
createPredictionClient
};
//# sourceMappingURL=prediction-client.js.map