node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
41 lines • 1.41 kB
JavaScript
import process from "process";
import path from "path";
import os from "os";
import fs from "fs-extra";
import { isUrl } from "./isUrl.js";
export async function resolveModelFileAccessTokensTryHeaders(modelUrl, tokens, baseHeaders) {
const res = [];
if (tokens == null || !isUrl(modelUrl))
return res;
const parsedUrl = new URL(modelUrl);
const { huggingFace } = tokens;
if (parsedUrl.hostname === "huggingface.co" || parsedUrl.hostname === "hf.co") {
const hfToken = resolveHfToken(huggingFace);
res.push({
...(baseHeaders ?? {}),
"Authorization": `Bearer ${hfToken}`
});
}
return res;
}
async function resolveHfToken(providedToken) {
if (providedToken !== null)
return providedToken;
if (process.env.HF_TOKEN != null)
return process.env.HF_TOKEN;
const hfHomePath = process.env.HF_HOME ||
path.join(process.env.XDG_CACHE_HOME || path.join(os.homedir(), ".cache"), "huggingface");
const hfTokenPath = process.env.HF_TOKEN_PATH || path.join(hfHomePath, "token");
try {
if (await fs.pathExists(hfTokenPath)) {
const token = (await fs.readFile(hfTokenPath, "utf8")).trim();
if (token !== "")
return token;
}
}
catch (err) {
// do nothing
}
return undefined;
}
//# sourceMappingURL=modelFileAccesTokens.js.map