node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
12 lines (11 loc) • 520 B
TypeScript
import { GbnfTerminal } from "../GbnfTerminal.js";
import { GbnfGrammarGenerator } from "../GbnfGrammarGenerator.js";
import { GbnfJsonFormatStringSchema } from "../types.js";
export declare class GbnfFormatString extends GbnfTerminal {
readonly format: GbnfJsonFormatStringSchema["format"];
constructor(format: GbnfJsonFormatStringSchema["format"]);
getGrammar(grammarGenerator: GbnfGrammarGenerator): string;
protected getRuleName(): string;
private _getDateGrammar;
private _getTimeGrammar;
}