node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
42 lines • 1.34 kB
JavaScript
export class GbnfGrammarGenerator {
rules = new Map();
ruleContentToRuleName = new Map();
literalValueRuleNames = new Map();
ruleId = 0;
valueRuleId = 0;
generateRuleName() {
const ruleId = this.ruleId;
this.ruleId++;
return `rule${ruleId}`;
}
generateRuleNameForLiteralValue(value) {
const existingRuleName = this.literalValueRuleNames.get(value);
if (existingRuleName != null)
return existingRuleName;
const ruleName = `val${this.valueRuleId}`;
this.valueRuleId++;
this.literalValueRuleNames.set(value, ruleName);
return ruleName;
}
generateGbnfFile(rootGrammar) {
const rules = [{
name: "root",
grammar: rootGrammar
}];
for (const [ruleName, grammar] of this.rules.entries()) {
if (grammar == null)
continue;
rules.push({
name: ruleName,
grammar
});
}
const ruleStrings = rules.map((rule) => rule.name + " ::= " + rule.grammar);
const gbnf = ruleStrings.join("\n");
return gbnf;
}
getProposedLiteralValueRuleNameLength() {
return `val${this.valueRuleId}`.length;
}
}
//# sourceMappingURL=GbnfGrammarGenerator.js.map