UNPKG

node-llama-cpp

Version:

Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level

22 lines 1.13 kB
import { removeUndefinedFields } from "../../utils/removeNullFields.js"; import { getExampleUsageCodeOfGetLlama } from "./getExampleUsageCodeOfGetLlama.js"; export function logBinaryUsageExampleToConsole(buildOptions, specifyGpuType, showLatestBuildUsageExample = true) { console.log("To use the binary you've just built, use this code:"); const llamaOptions = removeUndefinedFields({ gpu: specifyGpuType ? buildOptions.gpu : undefined, cmakeOptions: buildOptions.customCmakeOptions.size === 0 ? undefined : Object.fromEntries([...buildOptions.customCmakeOptions.entries()].sort(([keyA], [keyB]) => keyA.localeCompare(keyB))) }); console.log(getExampleUsageCodeOfGetLlama(Object.keys(llamaOptions).length === 0 ? undefined : llamaOptions)); if (showLatestBuildUsageExample) { console.log(); console.log("To always use the latest binary you build using a CLI command, use this code:"); console.log(getExampleUsageCodeOfGetLlama("lastBuild")); } } //# sourceMappingURL=logBinaryUsageExampleToConsole.js.map