node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
25 lines • 1.19 kB
JavaScript
import chalk from "chalk";
import logSymbols from "log-symbols";
import { clockChar } from "../consts.js";
import { getConsoleLogPrefix } from "./getConsoleLogPrefix.js";
export default async function withStatusLogs(messageAndOptions, callback) {
if (typeof messageAndOptions !== "string" && messageAndOptions.disableLogs)
return await callback();
console.log(getConsoleLogPrefix() + `${chalk.cyan(clockChar)} ${typeof messageAndOptions === "string" ? messageAndOptions : messageAndOptions.loading}`);
try {
const res = await callback();
if (typeof messageAndOptions !== "string")
console.log(getConsoleLogPrefix() + `${logSymbols.success} ${messageAndOptions.success}`);
else
console.log(getConsoleLogPrefix() + `${logSymbols.success} ${messageAndOptions}`);
return res;
}
catch (er) {
if (typeof messageAndOptions !== "string")
console.log(getConsoleLogPrefix() + `${logSymbols.error} ${messageAndOptions.fail}`);
else
console.log(getConsoleLogPrefix() + `${logSymbols.error} ${messageAndOptions}`);
throw er;
}
}
//# sourceMappingURL=withStatusLogs.js.map