node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
32 lines • 1.07 kB
JavaScript
import process from "process";
import { execFile } from "node:child_process";
import path from "path";
import { fileURLToPath } from "url";
import fs from "fs-extra";
const __dirname = path.dirname(fileURLToPath(import.meta.url));
export async function testCmakeBinary(cmakeBinaryPath, { cwd = __dirname, env = process.env } = {}) {
if (cmakeBinaryPath == null || !(await fs.pathExists(cmakeBinaryPath)))
return false;
return new Promise((resolve, reject) => {
const child = execFile(cmakeBinaryPath, ["--version"], {
cwd,
env,
windowsHide: true
});
child.on("exit", (code) => {
if (code == 0)
resolve(true);
else
reject(false);
});
child.on("error", reject);
child.on("disconnect", () => resolve(false));
child.on("close", (code) => {
if (code == 0)
resolve(true);
else
resolve(false);
});
});
}
//# sourceMappingURL=testCmakeBinary.js.map