node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
15 lines • 549 B
JavaScript
import fs from "fs-extra";
import { binariesGithubReleasePath } from "../../config.js";
export async function getBinariesGithubRelease() {
const binariesGithubRelease = await fs.readJson(binariesGithubReleasePath);
return binariesGithubRelease.release;
}
export async function setBinariesGithubRelease(release) {
const binariesGithubReleaseJson = {
release: release
};
await fs.writeJson(binariesGithubReleasePath, binariesGithubReleaseJson, {
spaces: 4
});
}
//# sourceMappingURL=binariesGithubRelease.js.map