node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
11 lines • 812 B
JavaScript
import { getGbnfJsonTerminalForGbnfJsonSchema } from "./utils/getGbnfJsonTerminalForGbnfJsonSchema.js";
import { GbnfGrammarGenerator } from "./GbnfGrammarGenerator.js";
import { GbnfJsonScopeState } from "./utils/GbnfJsonScopeState.js";
export function getGbnfGrammarForGbnfJsonSchema(schema, { allowNewLines = true, scopePadSpaces = 4 } = {}) {
const grammarGenerator = new GbnfGrammarGenerator();
const scopeState = new GbnfJsonScopeState({ allowNewLines, scopePadSpaces });
const rootTerminal = getGbnfJsonTerminalForGbnfJsonSchema(schema, grammarGenerator, scopeState);
const rootGrammar = rootTerminal.getGrammar(grammarGenerator);
return grammarGenerator.generateGbnfFile(rootGrammar + ` "${"\\n".repeat(4)}"` + " [\\n]*");
}
//# sourceMappingURL=getGbnfGrammarForGbnfJsonSchema.js.map