node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
27 lines • 651 B
JavaScript
import which from "which";
import { asyncEvery } from "./asyncEvery.js";
export async function hasBuildingFromSourceDependenciesInstalled() {
return await asyncEvery([
hasGit(),
hasNpm()
]);
}
export async function hasGit() {
try {
const resolvedPath = await which("git");
return resolvedPath !== "";
}
catch (err) {
return false;
}
}
export async function hasNpm() {
try {
const resolvedPath = await which("npm");
return resolvedPath !== "";
}
catch (err) {
return false;
}
}
//# sourceMappingURL=hasBuildingFromSourceDependenciesInstalled.js.map