node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
17 lines • 464 B
JavaScript
import fs from "fs-extra";
import { lastBuildInfoJsonPath } from "../../config.js";
export async function getLastBuildInfo() {
try {
const buildInfo = await fs.readJson(lastBuildInfoJsonPath);
return buildInfo;
}
catch (err) {
return null;
}
}
export async function setLastBuildInfo(buildInfo) {
await fs.writeJson(lastBuildInfoJsonPath, buildInfo, {
spaces: 4
});
}
//# sourceMappingURL=lastBuildInfo.js.map