node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
18 lines • 624 B
JavaScript
import path from "path";
import fs from "fs-extra";
import { runningInElectron } from "../../utils/runtime.js";
export async function resolveActualBindingBinaryPath(binaryPath) {
const absolutePath = path.resolve(binaryPath);
if (!runningInElectron)
return absolutePath;
const fixedAsarPath = absolutePath.replace(".asar" + path.sep, ".asar.unpacked" + path.sep);
try {
if (await fs.pathExists(fixedAsarPath))
return fixedAsarPath;
return absolutePath;
}
catch (err) {
return absolutePath;
}
}
//# sourceMappingURL=resolveActualBindingBinaryPath.js.map