node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
16 lines (15 loc) • 639 B
TypeScript
import { GbnfTerminal } from "../GbnfTerminal.js";
import { GbnfGrammarGenerator } from "../GbnfGrammarGenerator.js";
import { GbnfJsonSchema } from "../types.js";
export declare class GbnfRef extends GbnfTerminal {
readonly getValueTerminal: () => GbnfTerminal;
readonly defName: string;
readonly def: GbnfJsonSchema;
constructor({ getValueTerminal, defName, def }: {
getValueTerminal: () => GbnfTerminal;
defName: string;
def: GbnfJsonSchema;
});
getGrammar(grammarGenerator: GbnfGrammarGenerator): string;
protected generateRuleName(grammarGenerator: GbnfGrammarGenerator): string;
}