node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
18 lines • 869 B
JavaScript
import { resolveHuggingFaceEndpoint } from "../../utils/modelDownloadEndpoints.js";
export function normalizeGgufDownloadUrl(url, endpoints) {
const parsedUrl = new URL(url);
if (parsedUrl.hostname === "huggingface.co" || parsedUrl.hostname === "hf.co" ||
parsedUrl.hostname === (new URL(resolveHuggingFaceEndpoint(endpoints))).hostname) {
const pathnameParts = parsedUrl.pathname.split("/");
if (pathnameParts.length > 3 && pathnameParts[3] === "blob") {
const newUrl = new URL(url);
pathnameParts[3] = "resolve";
newUrl.pathname = pathnameParts.join("/");
if (newUrl.searchParams.get("download") !== "true")
newUrl.searchParams.set("download", "true");
return newUrl.href;
}
}
return url;
}
//# sourceMappingURL=normalizeGgufDownloadUrl.js.map