node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
28 lines • 1 kB
JavaScript
import { GbnfTerminal } from "../GbnfTerminal.js";
import { GbnfGrammar } from "./GbnfGrammar.js";
import { GbnfWhitespace } from "./GbnfWhitespace.js";
import { reservedRuleNames } from "./gbnfConsts.js";
export class GbnfCommaWhitespace extends GbnfTerminal {
scopeState;
newLine;
constructor(scopeState, { newLine = "before" } = {}) {
super();
this.scopeState = scopeState;
this.newLine = newLine;
}
getGrammar() {
return new GbnfGrammar([
'","', new GbnfWhitespace(this.scopeState, { newLine: this.newLine }).getGrammar()
]).getGrammar();
}
getRuleName() {
return reservedRuleNames.commaWhitespace({
newLine: this.scopeState.settings.allowNewLines
? this.newLine
: false,
scopeSpaces: this.scopeState.settings.scopePadSpaces,
nestingScope: this.scopeState.currentNestingScope
});
}
}
//# sourceMappingURL=GbnfCommaWhitespace.js.map