node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
27 lines • 1 kB
JavaScript
import { withLock } from "lifecycle-utils";
import { getLlamaForOptions } from "../getLlama.js";
import { LlamaLogLevel } from "../types.js";
let sharedLlamaWithoutBackend = null;
/**
* This is used to access various methods in the addon side without actually using a backend
*/
export async function getLlamaWithoutBackend() {
if (sharedLlamaWithoutBackend != null)
return sharedLlamaWithoutBackend;
return await withLock(getLlamaWithoutBackend, "loadAddon", async () => {
if (sharedLlamaWithoutBackend != null)
return sharedLlamaWithoutBackend;
sharedLlamaWithoutBackend = await getLlamaForOptions({
gpu: false,
progressLogs: false,
logLevel: LlamaLogLevel.error,
build: "never",
usePrebuiltBinaries: true,
vramPadding: 0
}, {
skipLlamaInit: true
});
return sharedLlamaWithoutBackend;
});
}
//# sourceMappingURL=getLlamaWithoutBackend.js.map