UNPKG

node-llama-cpp

Version:

Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level

39 lines 1.41 kB
export class GbnfTerminal { _ruleName = null; /** To be used only by `getRuleName` */ generateRuleName(grammarGenerator) { return grammarGenerator.generateRuleName(); } getRuleName(grammarGenerator) { if (this._ruleName != null) return this._ruleName; const ruleName = this.generateRuleName(grammarGenerator); this._ruleName = ruleName; return ruleName; } getGrammarFromResolve(grammarGenerator) { return this.getGrammar(grammarGenerator); } resolve(grammarGenerator) { if (this._ruleName != null) return this._ruleName; const grammar = this.getGrammarFromResolve(grammarGenerator); const existingRuleName = grammarGenerator.ruleContentToRuleName.get(grammar); if (existingRuleName != null) { this._ruleName = existingRuleName; return existingRuleName; } const ruleName = this.getRuleName(grammarGenerator); if (grammar === ruleName) { this._ruleName = ruleName; return ruleName; } if (!grammarGenerator.rules.has(ruleName)) { grammarGenerator.rules.set(ruleName, grammar); grammarGenerator.ruleContentToRuleName.set(grammar, ruleName); } this._ruleName = ruleName; return ruleName; } } //# sourceMappingURL=GbnfTerminal.js.map