node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
13 lines (12 loc) • 405 B
TypeScript
export declare function printInfoLine(options: Parameters<typeof renderInfoLine>[0]): void;
export declare function renderInfoLine({ title, padTitle, separateLines, info, maxWidth }: {
title?: string;
padTitle?: number;
separateLines?: boolean;
info: Array<{
title: string;
value: string | (() => string);
show?: boolean;
}>;
maxWidth?: number;
}): string;