node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
10 lines • 476 B
JavaScript
import chalk from "chalk";
import { getForceShowConsoleLogPrefix, getIsRunningFromCLI } from "../state.js";
export function getConsoleLogPrefix(forcePrefix = false, padEnd = true) {
const isInCLI = getIsRunningFromCLI();
const forceShowLogPrefix = getForceShowConsoleLogPrefix();
if (!isInCLI || forceShowLogPrefix || forcePrefix)
return chalk.gray("[node-llama-cpp]") + (padEnd ? " " : "");
return "";
}
//# sourceMappingURL=getConsoleLogPrefix.js.map