UNPKG

node-llama-cpp

Version:

Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level

13 lines 329 B
const prettyBuildGpuNames = { metal: "Metal", cuda: "CUDA", vulkan: "Vulkan" }; export function getPrettyBuildGpuName(gpu) { if (gpu == null) return "unknown GPU"; if (gpu == false) return "no GPU"; return prettyBuildGpuNames[gpu] ?? ('"' + gpu + '"'); } //# sourceMappingURL=consts.js.map