node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
23 lines • 711 B
JavaScript
import { GbnfTerminal } from "../GbnfTerminal.js";
export class GbnfGrammar extends GbnfTerminal {
grammar;
resolveToRawGrammar;
constructor(grammar, resolveToRawGrammar = false) {
super();
this.grammar = grammar;
this.resolveToRawGrammar = resolveToRawGrammar;
}
getGrammar() {
if (this.grammar instanceof Array)
return this.grammar
.filter((item) => item !== "")
.join(" ");
return this.grammar;
}
resolve(grammarGenerator) {
if (this.resolveToRawGrammar)
return this.getGrammar();
return super.resolve(grammarGenerator);
}
}
//# sourceMappingURL=GbnfGrammar.js.map