node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
12 lines • 560 B
JavaScript
import lockfile from "proper-lockfile";
import { isLockActive } from "lifecycle-utils";
import { lockfileLockScope } from "./withLockfile.js";
export async function isLockfileActive({ resourcePath, staleDuration = 1000 * 10 }) {
if (isLockActive(lockfileLockScope, resourcePath))
return true;
const lockfileActive = await lockfile.check(resourcePath, { stale: staleDuration, realpath: false });
if (lockfileActive)
return true;
return isLockActive(lockfileLockScope, resourcePath);
}
//# sourceMappingURL=isLockfileActive.js.map