node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
16 lines • 682 B
JavaScript
export function normalizeGgufDownloadUrl(url) {
const parsedUrl = new URL(url);
if (parsedUrl.hostname === "huggingface.co" || parsedUrl.hostname === "hf.co") {
const pathnameParts = parsedUrl.pathname.split("/");
if (pathnameParts.length > 3 && pathnameParts[3] === "blob") {
const newUrl = new URL(url);
pathnameParts[3] = "resolve";
newUrl.pathname = pathnameParts.join("/");
if (newUrl.searchParams.get("download") !== "true")
newUrl.searchParams.set("download", "true");
return newUrl.href;
}
}
return url;
}
//# sourceMappingURL=normalizeGgufDownloadUrl.js.map