node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
90 lines • 2.46 kB
JavaScript
import { GbnfTerminal } from "../GbnfTerminal.js";
import { reservedRuleNames } from "./gbnfConsts.js";
import { GbnfGrammar } from "./GbnfGrammar.js";
import { GbnfString } from "./GbnfString.js";
export class GbnfFormatString extends GbnfTerminal {
format;
constructor(format) {
super();
this.format = format;
}
getGrammar(grammarGenerator) {
const quote = '"\\""';
if (this.format === "date")
return new GbnfGrammar([
quote,
this._getDateGrammar(),
quote
]).getGrammar();
else if (this.format === "time") {
return new GbnfGrammar([
quote,
this._getTimeGrammar(),
quote
]).getGrammar();
}
else if (this.format === "date-time")
return new GbnfGrammar([
quote,
this._getDateGrammar(),
'"T"',
this._getTimeGrammar(),
quote
]).getGrammar();
return new GbnfString({
minLength: 0,
maxLength: 0
}).resolve(grammarGenerator);
}
getRuleName() {
return reservedRuleNames.formatString(this.format);
}
_getDateGrammar() {
return new GbnfGrammar([
"[0-9]{4}",
'"-"',
or([
'"0" [1-9]',
'"1" [012]'
]),
'"-"',
or([
'"0" [1-9]',
"[12] [0-9]",
'"3" [01]'
])
]).getGrammar();
}
_getTimeGrammar() {
return new GbnfGrammar([
or([
"[01] [0-9]",
'"2" [0-3]'
]),
'":"',
"[0-5] [0-9]",
'":"',
"[0-5] [0-9]",
'( "." [0-9]{3} )?',
or([
'"Z"',
new GbnfGrammar([
or([
'"+"',
'"-"'
]),
or([
"[01] [0-9]",
'"2" [0-3]'
]),
'":"',
"[0-5] [0-9]"
]).getGrammar()
])
]).getGrammar();
}
}
function or(values) {
return "(" + values.join(" | ") + ")";
}
//# sourceMappingURL=GbnfFormatString.js.map