UNPKG

node-llama-cpp

Version:

Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level

9 lines 353 B
import chalk from "chalk"; import { getPrettyBuildGpuName } from "../../bindings/consts.js"; export function logUsedGpuTypeOption(gpu) { if (gpu == false) console.log(`${chalk.yellow("GPU:")} disabled`); else console.log(`${chalk.yellow("GPU:")} ${getPrettyBuildGpuName(gpu)}`); } //# sourceMappingURL=logUsedGpuTypeOption.js.map