node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
22 lines • 624 B
JavaScript
import { GbnfTerminal } from "../GbnfTerminal.js";
import { reservedRuleNames } from "./gbnfConsts.js";
export class GbnfBoolean extends GbnfTerminal {
getGrammar() {
return this._getGrammar();
}
getGrammarFromResolve() {
return this._getGrammar(false);
}
_getGrammar(wrap = true) {
const values = ['"true"', '"false"'];
if (wrap)
return [
"(", values.join(" | "), ")"
].join(" ");
return values.join(" | ");
}
getRuleName() {
return reservedRuleNames.boolean;
}
}
//# sourceMappingURL=GbnfBoolean.js.map