node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
8 lines • 449 B
JavaScript
import path from "path";
import { fileURLToPath } from "url";
const __filename = fileURLToPath(import.meta.url);
export const runningInElectron = process.versions.electron != null;
export const runningInsideAsar = runningInElectron && __filename.toLowerCase().includes(".asar" + path.sep);
export const runningInBun = process.versions.bun != null;
export const runningInNode = !runningInElectron && !runningInBun;
//# sourceMappingURL=runtime.js.map