node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
7 lines • 419 B
JavaScript
export function getRamUsageFromUnifiedVram(vramUsage, vramState) {
const onlyVramSize = vramState.total - vramState.unifiedSize;
const existingUsage = Math.max(0, vramState.total - vramState.free);
const unifiedRamUsage = Math.min(vramState.unifiedSize, Math.max(0, vramUsage - Math.max(0, onlyVramSize - existingUsage)));
return unifiedRamUsage;
}
//# sourceMappingURL=getRamUsageFromUnifiedVram.js.map