node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
10 lines (9 loc) • 423 B
TypeScript
import { GbnfTerminal } from "../GbnfTerminal.js";
import { GbnfGrammarGenerator } from "../GbnfGrammarGenerator.js";
export declare class GbnfGrammar extends GbnfTerminal {
readonly grammar: string | string[];
readonly resolveToRawGrammar: boolean;
constructor(grammar: string | string[], resolveToRawGrammar?: boolean);
getGrammar(): string;
resolve(grammarGenerator: GbnfGrammarGenerator): string;
}