jison-gho
Version:
A parser generator with a Bison/YACC-similar API (derived off zaach/jison repo)
58 lines (54 loc) • 1.29 MB
JavaScript
#!/usr/bin/env node
'use strict';
var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; };
var _templateObject = _taggedTemplateLiteral(['\n There\'s an error in your lexer regex rules or epilogue.\n Maybe you did not correctly separate the lexer sections with a \'%%\'\n on an otherwise empty line?\n The lexer spec file should have this structure:\n \n definitions\n %%\n rules\n %% // <-- optional!\n extra_module_code // <-- optional epilogue!\n \n Erroneous code:\n ', '\n \n Technical error report:\n ', '\n '], ['\n There\'s an error in your lexer regex rules or epilogue.\n Maybe you did not correctly separate the lexer sections with a \'%%\'\n on an otherwise empty line?\n The lexer spec file should have this structure:\n \n definitions\n %%\n rules\n %% // <-- optional!\n extra_module_code // <-- optional epilogue!\n \n Erroneous code:\n ', '\n \n Technical error report:\n ', '\n ']),
_templateObject2 = _taggedTemplateLiteral(['\n There\'s probably an error in one or more of your lexer regex rules.\n The lexer rule spec should have this structure:\n \n regex action_code\n \n where \'regex\' is a lex-style regex expression (see the\n jison and jison-lex documentation) which is intended to match a chunk\n of the input to lex, while the \'action_code\' block is the JS code\n which will be invoked when the regex is matched. The \'action_code\' block\n may be any (indented!) set of JS statements, optionally surrounded\n by \'{...}\' curly braces or otherwise enclosed in a \'%{...%}\' block.\n \n Erroneous code:\n ', '\n \n Technical error report:\n ', '\n '], ['\n There\'s probably an error in one or more of your lexer regex rules.\n The lexer rule spec should have this structure:\n \n regex action_code\n \n where \'regex\' is a lex-style regex expression (see the\n jison and jison-lex documentation) which is intended to match a chunk\n of the input to lex, while the \'action_code\' block is the JS code\n which will be invoked when the regex is matched. The \'action_code\' block\n may be any (indented!) set of JS statements, optionally surrounded\n by \'{...}\' curly braces or otherwise enclosed in a \'%{...%}\' block.\n \n Erroneous code:\n ', '\n \n Technical error report:\n ', '\n ']),
_templateObject3 = _taggedTemplateLiteral(['\n There\'s an error in your lexer epilogue a.k.a. \'extra_module_code\' block.\n \n Erroneous code:\n ', '\n \n Technical error report:\n ', '\n '], ['\n There\'s an error in your lexer epilogue a.k.a. \'extra_module_code\' block.\n \n Erroneous code:\n ', '\n \n Technical error report:\n ', '\n ']),
_templateObject4 = _taggedTemplateLiteral(['\n Encountered an unsupported definition type: ', '.\n \n Erroneous area:\n ', '\n '], ['\n Encountered an unsupported definition type: ', '.\n \n Erroneous area:\n ', '\n ']),
_templateObject5 = _taggedTemplateLiteral(['\n The \'%{...%}\' lexer setup action code section does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n The \'%{...%}\' lexer setup action code section does not compile: ', '\n \n Erroneous area:\n ', '\n ']),
_templateObject6 = _taggedTemplateLiteral(['\n You did not specify a legal file path for the \'%import\' initialization code statement, which must have the format:\n %import qualifier_name file_path\n \n Erroneous code:\n ', '\n \n Technical error report:\n ', '\n '], ['\n You did not specify a legal file path for the \'%import\' initialization code statement, which must have the format:\n %import qualifier_name file_path\n \n Erroneous code:\n ', '\n \n Technical error report:\n ', '\n ']),
_templateObject7 = _taggedTemplateLiteral(['\n %import name or source filename missing maybe?\n \n Note: each \'%import\'-ed initialization code section must be qualified by a name, e.g. \'required\' before the import path itself:\n %import qualifier_name file_path\n \n Erroneous code:\n ', '\n \n Technical error report:\n ', '\n '], ['\n %import name or source filename missing maybe?\n \n Note: each \'%import\'-ed initialization code section must be qualified by a name, e.g. \'required\' before the import path itself:\n %import qualifier_name file_path\n \n Erroneous code:\n ', '\n \n Technical error report:\n ', '\n ']),
_templateObject8 = _taggedTemplateLiteral(['\n The \'%code ', '\' action code section does not compile: ', '\n \n ', '\n \n Erroneous area:\n ', '\n '], ['\n The \'%code ', '\' action code section does not compile: ', '\n \n ', '\n \n Erroneous area:\n ', '\n ']),
_templateObject9 = _taggedTemplateLiteral(['\n Each \'%code\' initialization code section must be qualified by a name, e.g. \'required\' before the action code itself:\n %code qualifier_name {action code}\n \n Erroneous code:\n ', '\n \n Technical error report:\n ', '\n '], ['\n Each \'%code\' initialization code section must be qualified by a name, e.g. \'required\' before the action code itself:\n %code qualifier_name {action code}\n \n Erroneous code:\n ', '\n \n Technical error report:\n ', '\n ']),
_templateObject10 = _taggedTemplateLiteral(['\n Seems you made a mistake while specifying one of the lexer rules inside\n the start condition\n <', '> { rules... }\n block.\n \n Erroneous area:\n ', '\n \n Technical error report:\n ', '\n '], ['\n Seems you made a mistake while specifying one of the lexer rules inside\n the start condition\n <', '> { rules... }\n block.\n \n Erroneous area:\n ', '\n \n Technical error report:\n ', '\n ']),
_templateObject11 = _taggedTemplateLiteral(['\n Seems you did not correctly bracket a lexer rules set inside\n the start condition\n <', '> { rules... }\n as a terminating curly brace \'}\' could not be found.\n \n Erroneous area:\n ', '\n \n Technical error report:\n ', '\n '], ['\n Seems you did not correctly bracket a lexer rules set inside\n the start condition\n <', '> { rules... }\n as a terminating curly brace \'}\' could not be found.\n \n Erroneous area:\n ', '\n \n Technical error report:\n ', '\n ']),
_templateObject12 = _taggedTemplateLiteral(['\n The rule\'s action code section does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n The rule\'s action code section does not compile: ', '\n \n Erroneous area:\n ', '\n ']),
_templateObject13 = _taggedTemplateLiteral(['\n Lexer rule regex action code declaration error?\n \n Erroneous code:\n ', '\n \n Technical error report:\n ', '\n '], ['\n Lexer rule regex action code declaration error?\n \n Erroneous code:\n ', '\n \n Technical error report:\n ', '\n ']),
_templateObject14 = _taggedTemplateLiteral(['\n Missing curly braces: seems you did not correctly bracket a lexer rule action block in curly braces: \'{ ... }\'.\n \n Offending action body:\n ', '\n '], ['\n Missing curly braces: seems you did not correctly bracket a lexer rule action block in curly braces: \'{ ... }\'.\n \n Offending action body:\n ', '\n ']),
_templateObject15 = _taggedTemplateLiteral(['\n Too many curly braces: seems you did not correctly bracket a lexer rule action block in curly braces: \'{ ... }\'.\n \n Offending action body:\n ', '\n '], ['\n Too many curly braces: seems you did not correctly bracket a lexer rule action block in curly braces: \'{ ... }\'.\n \n Offending action body:\n ', '\n ']),
_templateObject16 = _taggedTemplateLiteral(['\n You may place the \'%include\' instruction only at the start/front of a line.\n \n Its use is not permitted at this position:\n ', '\n '], ['\n You may place the \'%include\' instruction only at the start/front of a line.\n \n Its use is not permitted at this position:\n ', '\n ']),
_templateObject17 = _taggedTemplateLiteral(['\n Seems you did not correctly match curly braces \'{ ... }\' in a lexer rule action block.\n \n Erroneous code:\n ', '\n \n Technical error report:\n ', '\n '], ['\n Seems you did not correctly match curly braces \'{ ... }\' in a lexer rule action block.\n \n Erroneous code:\n ', '\n \n Technical error report:\n ', '\n ']),
_templateObject18 = _taggedTemplateLiteral(['\n Seems you did not correctly terminate the start condition set <', ',???> with a terminating \'>\'\n \n Erroneous code:\n ', '\n \n Technical error report:\n ', '\n '], ['\n Seems you did not correctly terminate the start condition set <', ',???> with a terminating \'>\'\n \n Erroneous code:\n ', '\n \n Technical error report:\n ', '\n ']),
_templateObject19 = _taggedTemplateLiteral(['\n Seems you did not correctly bracket a lex rule regex part in \'(...)\' braces.\n \n Unterminated regex part:\n ', '\n \n Technical error report:\n ', '\n '], ['\n Seems you did not correctly bracket a lex rule regex part in \'(...)\' braces.\n \n Unterminated regex part:\n ', '\n \n Technical error report:\n ', '\n ']),
_templateObject20 = _taggedTemplateLiteral(['\n Seems you did not correctly bracket a lex rule regex set in \'[...]\' brackets.\n \n Unterminated regex set:\n ', '\n \n Technical error report:\n ', '\n '], ['\n Seems you did not correctly bracket a lex rule regex set in \'[...]\' brackets.\n \n Unterminated regex set:\n ', '\n \n Technical error report:\n ', '\n ']),
_templateObject21 = _taggedTemplateLiteral(['\n Internal error: option "', '" value assignment failure.\n \n Erroneous area:\n ', '\n \n Technical error report:\n ', '\n '], ['\n Internal error: option "', '" value assignment failure.\n \n Erroneous area:\n ', '\n \n Technical error report:\n ', '\n ']),
_templateObject22 = _taggedTemplateLiteral(['\n Expected a valid option name (with optional value assignment).\n \n Erroneous area:\n ', '\n \n Technical error report:\n ', '\n '], ['\n Expected a valid option name (with optional value assignment).\n \n Erroneous area:\n ', '\n \n Technical error report:\n ', '\n ']),
_templateObject23 = _taggedTemplateLiteral(['\n The extra lexer module code section (a.k.a. \'epilogue\') does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n The extra lexer module code section (a.k.a. \'epilogue\') does not compile: ', '\n \n Erroneous area:\n ', '\n ']),
_templateObject24 = _taggedTemplateLiteral(['\n The source code %include-d into the extra lexer module code section (a.k.a. \'epilogue\') does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n The source code %include-d into the extra lexer module code section (a.k.a. \'epilogue\') does not compile: ', '\n \n Erroneous area:\n ', '\n ']),
_templateObject25 = _taggedTemplateLiteral(['\n %include MUST be followed by a valid file path.\n \n Erroneous path:\n ', '\n \n Technical error report:\n ', '\n '], ['\n %include MUST be followed by a valid file path.\n \n Erroneous path:\n ', '\n \n Technical error report:\n ', '\n ']),
_templateObject26 = _taggedTemplateLiteral(['\n Module code declaration error?\n \n Erroneous code:\n ', '\n \n Technical error report:\n ', '\n '], ['\n Module code declaration error?\n \n Erroneous code:\n ', '\n \n Technical error report:\n ', '\n ']),
_templateObject27 = _taggedTemplateLiteral(['\n %include statements must occur on a line on their own and cannot occur inside an %{...%} action code block.\n Its use is not permitted at this position.\n\n Erroneous area:\n '], ['\n %include statements must occur on a line on their own and cannot occur inside an %{...%} action code block.\n Its use is not permitted at this position.\n\n Erroneous area:\n ']),
_templateObject28 = _taggedTemplateLiteral(['\n too many closing curly braces in lexer rule action block.\n\n Note: the action code chunk may be too complex for jison to parse\n easily; we suggest you wrap the action code chunk in \'%{...%}\'\n to help jison grok more or less complex action code chunks.\n\n Erroneous area:\n '], ['\n too many closing curly braces in lexer rule action block.\n\n Note: the action code chunk may be too complex for jison to parse\n easily; we suggest you wrap the action code chunk in \'%{...%}\'\n to help jison grok more or less complex action code chunks.\n\n Erroneous area:\n ']),
_templateObject29 = _taggedTemplateLiteral(['\n missing ', ' closing curly braces in lexer rule action block.\n\n Note: the action code chunk may be too complex for jison to parse\n easily; we suggest you wrap the action code chunk in \'%{...%}\'\n to help jison grok more or less complex action code chunks.\n\n Erroneous area:\n '], ['\n missing ', ' closing curly braces in lexer rule action block.\n\n Note: the action code chunk may be too complex for jison to parse\n easily; we suggest you wrap the action code chunk in \'%{...%}\'\n to help jison grok more or less complex action code chunks.\n\n Erroneous area:\n ']),
_templateObject30 = _taggedTemplateLiteral(['\n LEX: ignoring unsupported lexer option ', '\n while lexing in ', ' state.\n\n Erroneous area:\n '], ['\n LEX: ignoring unsupported lexer option ', '\n while lexing in ', ' state.\n\n Erroneous area:\n ']),
_templateObject31 = _taggedTemplateLiteral(['\n unterminated string constant in lexer rule action block.\n\n Erroneous area:\n '], ['\n unterminated string constant in lexer rule action block.\n\n Erroneous area:\n ']),
_templateObject32 = _taggedTemplateLiteral(['\n unterminated string constant in %options entry.\n\n Erroneous area:\n '], ['\n unterminated string constant in %options entry.\n\n Erroneous area:\n ']),
_templateObject33 = _taggedTemplateLiteral(['\n unterminated string constant encountered while lexing\n ', '.\n\n Erroneous area:\n '], ['\n unterminated string constant encountered while lexing\n ', '.\n\n Erroneous area:\n ']),
_templateObject34 = _taggedTemplateLiteral(['\n unsupported lexer input encountered while lexing\n ', ' (i.e. jison lex regexes).\n\n NOTE: When you want this input to be interpreted as a LITERAL part\n of a lex rule regex, you MUST enclose it in double or\n single quotes.\n\n If not, then know that this input is not accepted as a valid\n regex expression here in jison-lex ', '.\n\n Erroneous area:\n '], ['\n unsupported lexer input encountered while lexing\n ', ' (i.e. jison lex regexes).\n\n NOTE: When you want this input to be interpreted as a LITERAL part\n of a lex rule regex, you MUST enclose it in double or\n single quotes.\n\n If not, then know that this input is not accepted as a valid\n regex expression here in jison-lex ', '.\n\n Erroneous area:\n ']),
_templateObject35 = _taggedTemplateLiteral(['\n unsupported lexer input: ', '\n while lexing in ', ' state.\n\n Erroneous area:\n '], ['\n unsupported lexer input: ', '\n while lexing in ', ' state.\n\n Erroneous area:\n ']),
_templateObject36 = _taggedTemplateLiteral(['\n var __hacky_counter__ = 0;\n\n /**\n * @constructor\n * @nocollapse\n */\n function XRegExp(re, f) {\n this.re = re;\n this.flags = f;\n this._getUnicodeProperty = function (k) {};\n var fake = /./; // WARNING: this exact \'fake\' is also depended upon by the xregexp unit test!\n __hacky_counter__++;\n fake.__hacky_backy__ = __hacky_counter__;\n return fake;\n }\n '], ['\n var __hacky_counter__ = 0;\n\n /**\n * @constructor\n * @nocollapse\n */\n function XRegExp(re, f) {\n this.re = re;\n this.flags = f;\n this._getUnicodeProperty = function (k) {};\n var fake = /./; // WARNING: this exact \'fake\' is also depended upon by the xregexp unit test!\n __hacky_counter__++;\n fake.__hacky_backy__ = __hacky_counter__;\n return fake;\n }\n ']),
_templateObject37 = _taggedTemplateLiteral(['\n return ', ';\n'], ['\n return ', ';\n']),
_templateObject38 = _taggedTemplateLiteral(['\n // Code Generator Information Report\n // ---------------------------------\n //\n // Options:\n //\n // backtracking: .................... ', '\n // location.ranges: ................. ', '\n // location line+column tracking: ... ', '\n //\n //\n // Forwarded Parser Analysis flags:\n //\n // uses yyleng: ..................... ', '\n // uses yylineno: ................... ', '\n // uses yytext: ..................... ', '\n // uses yylloc: ..................... ', '\n // uses lexer values: ............... ', ' / ', '\n // location tracking: ............... ', '\n // location assignment: ............. ', '\n //\n //\n // Lexer Analysis flags:\n //\n // uses yyleng: ..................... ', '\n // uses yylineno: ................... ', '\n // uses yytext: ..................... ', '\n // uses yylloc: ..................... ', '\n // uses ParseError API: ............. ', '\n // uses yyerror: .................... ', '\n // uses location tracking & editing: ', '\n // uses more() API: ................. ', '\n // uses unput() API: ................ ', '\n // uses reject() API: ............... ', '\n // uses less() API: ................. ', '\n // uses display APIs pastInput(), upcomingInput(), showPosition():\n // ............................. ', '\n // uses describeYYLLOC() API: ....... ', '\n //\n // --------- END OF REPORT -----------\n\n '], ['\n // Code Generator Information Report\n // ---------------------------------\n //\n // Options:\n //\n // backtracking: .................... ', '\n // location.ranges: ................. ', '\n // location line+column tracking: ... ', '\n //\n //\n // Forwarded Parser Analysis flags:\n //\n // uses yyleng: ..................... ', '\n // uses yylineno: ................... ', '\n // uses yytext: ..................... ', '\n // uses yylloc: ..................... ', '\n // uses lexer values: ............... ', ' / ', '\n // location tracking: ............... ', '\n // location assignment: ............. ', '\n //\n //\n // Lexer Analysis flags:\n //\n // uses yyleng: ..................... ', '\n // uses yylineno: ................... ', '\n // uses yytext: ..................... ', '\n // uses yylloc: ..................... ', '\n // uses ParseError API: ............. ', '\n // uses yyerror: .................... ', '\n // uses location tracking & editing: ', '\n // uses more() API: ................. ', '\n // uses unput() API: ................ ', '\n // uses reject() API: ............... ', '\n // uses less() API: ................. ', '\n // uses display APIs pastInput(), upcomingInput(), showPosition():\n // ............................. ', '\n // uses describeYYLLOC() API: ....... ', '\n //\n // --------- END OF REPORT -----------\n\n ']),
_templateObject39 = _taggedTemplateLiteral(['\n var lexer = {\n '], ['\n var lexer = {\n ']),
_templateObject40 = _taggedTemplateLiteral([',\n JisonLexerError: JisonLexerError,\n performAction: ', ',\n simpleCaseActionClusters: ', ',\n rules: [\n ', '\n ],\n conditions: ', '\n };\n '], [',\n JisonLexerError: JisonLexerError,\n performAction: ', ',\n simpleCaseActionClusters: ', ',\n rules: [\n ', '\n ],\n conditions: ', '\n };\n ']),
_templateObject41 = _taggedTemplateLiteral(['\n /* lexer generated by jison-lex ', ' */\n\n /*\n * Returns a Lexer object of the following structure:\n *\n * Lexer: {\n * yy: {} The so-called "shared state" or rather the *source* of it;\n * the real "shared state" `yy` passed around to\n * the rule actions, etc. is a direct reference!\n *\n * This "shared context" object was passed to the lexer by way of \n * the `lexer.setInput(str, yy)` API before you may use it.\n *\n * This "shared context" object is passed to the lexer action code in `performAction()`\n * so userland code in the lexer actions may communicate with the outside world \n * and/or other lexer rules\' actions in more or less complex ways.\n *\n * }\n *\n * Lexer.prototype: {\n * EOF: 1,\n * ERROR: 2,\n *\n * yy: The overall "shared context" object reference.\n *\n * JisonLexerError: function(msg, hash),\n *\n * performAction: function lexer__performAction(yy, yyrulenumber, YY_START),\n *\n * The function parameters and `this` have the following value/meaning:\n * - `this` : reference to the `lexer` instance. \n * `yy_` is an alias for `this` lexer instance reference used internally.\n *\n * - `yy` : a reference to the `yy` "shared state" object which was passed to the lexer\n * by way of the `lexer.setInput(str, yy)` API before.\n *\n * Note:\n * The extra arguments you specified in the `%parse-param` statement in your\n * **parser** grammar definition file are passed to the lexer via this object\n * reference as member variables.\n *\n * - `yyrulenumber` : index of the matched lexer rule (regex), used internally.\n *\n * - `YY_START`: the current lexer "start condition" state.\n *\n * parseError: function(str, hash, ExceptionClass),\n *\n * constructLexErrorInfo: function(error_message, is_recoverable),\n * Helper function.\n * Produces a new errorInfo \'hash object\' which can be passed into `parseError()`.\n * See it\'s use in this lexer kernel in many places; example usage:\n *\n * var infoObj = lexer.constructParseErrorInfo(\'fail!\', true);\n * var retVal = lexer.parseError(infoObj.errStr, infoObj, lexer.JisonLexerError);\n *\n * options: { ... lexer %options ... },\n *\n * lex: function(),\n * Produce one token of lexed input, which was passed in earlier via the `lexer.setInput()` API.\n * You MAY use the additional `args...` parameters as per `%parse-param` spec of the **lexer** grammar:\n * these extra `args...` are added verbatim to the `yy` object reference as member variables.\n *\n * WARNING:\n * Lexer\'s additional `args...` parameters (via lexer\'s `%parse-param`) MAY conflict with\n * any attributes already added to `yy` by the **parser** or the jison run-time; \n * when such a collision is detected an exception is thrown to prevent the generated run-time \n * from silently accepting this confusing and potentially hazardous situation! \n *\n * cleanupAfterLex: function(do_not_nuke_errorinfos),\n * Helper function.\n *\n * This helper API is invoked when the **parse process** has completed: it is the responsibility\n * of the **parser** (or the calling userland code) to invoke this method once cleanup is desired. \n *\n * This helper may be invoked by user code to ensure the internal lexer gets properly garbage collected.\n *\n * setInput: function(input, [yy]),\n *\n *\n * input: function(),\n *\n *\n * unput: function(str),\n *\n *\n * more: function(),\n *\n *\n * reject: function(),\n *\n *\n * less: function(n),\n *\n *\n * pastInput: function(n),\n *\n *\n * upcomingInput: function(n),\n *\n *\n * showPosition: function(),\n *\n *\n * test_match: function(regex_match_array, rule_index),\n *\n *\n * next: function(),\n *\n *\n * begin: function(condition),\n *\n *\n * pushState: function(condition),\n *\n *\n * popState: function(),\n *\n *\n * topState: function(),\n *\n *\n * _currentRules: function(),\n *\n *\n * stateStackSize: function(),\n *\n *\n * performAction: function(yy, yy_, yyrulenumber, YY_START),\n *\n *\n * rules: [...],\n *\n *\n * conditions: {associative list: name ==> set},\n * }\n *\n *\n * token location info (`yylloc`): {\n * first_line: n,\n * last_line: n,\n * first_column: n,\n * last_column: n,\n * range: [start_number, end_number]\n * (where the numbers are indexes into the input string, zero-based)\n * }\n *\n * ---\n *\n * The `parseError` function receives a \'hash\' object with these members for lexer errors:\n *\n * {\n * text: (matched text)\n * token: (the produced terminal token, if any)\n * token_id: (the produced terminal token numeric ID, if any)\n * line: (yylineno)\n * loc: (yylloc)\n * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule\n * available for this particular error)\n * yy: (object: the current parser internal "shared state" `yy`\n * as is also available in the rule actions; this can be used,\n * for instance, for advanced error analysis and reporting)\n * lexer: (reference to the current lexer instance used by the parser)\n * }\n *\n * while `this` will reference the current lexer instance.\n *\n * When `parseError` is invoked by the lexer, the default implementation will\n * attempt to invoke `yy.parser.parseError()`; when this callback is not provided\n * it will try to invoke `yy.parseError()` instead. When that callback is also not\n * provided, a `JisonLexerError` exception will be thrown containing the error\n * message and `hash`, as constructed by the `constructLexErrorInfo()` API.\n *\n * Note that the lexer\'s `JisonLexerError` error class is passed via the\n * `ExceptionClass` argument, which is invoked to construct the exception\n * instance to be thrown, so technically `parseError` will throw the object\n * produced by the `new ExceptionClass(str, hash)` JavaScript expression.\n *\n * ---\n *\n * You can specify lexer options by setting / modifying the `.options` object of your Lexer instance.\n * These options are available:\n *\n * (Options are permanent.)\n * \n * yy: {\n * parseError: function(str, hash, ExceptionClass)\n * optional: overrides the default `parseError` function.\n * }\n *\n * lexer.options: {\n * pre_lex: function()\n * optional: is invoked before the lexer is invoked to produce another token.\n * `this` refers to the Lexer object.\n * post_lex: function(token) { return token; }\n * optional: is invoked when the lexer has produced a token `token`;\n * this function can override the returned token value by returning another.\n * When it does not return any (truthy) value, the lexer will return\n * the original `token`.\n * `this` refers to the Lexer object.\n *\n * WARNING: the next set of options are not meant to be changed. They echo the abilities of\n * the lexer as per when it was compiled!\n *\n * ranges: boolean\n * optional: `true` ==> token location info will include a .range[] member.\n * flex: boolean\n * optional: `true` ==> flex-like lexing behaviour where the rules are tested\n * exhaustively to find the longest match.\n * backtrack_lexer: boolean\n * optional: `true` ==> lexer regexes are tested in order and for invoked;\n * the lexer terminates the scan when a token is returned by the action code.\n * xregexp: boolean\n * optional: `true` ==> lexer rule regexes are "extended regex format" requiring the\n * `XRegExp` library. When this %option has not been specified at compile time, all lexer\n * rule regexes have been written as standard JavaScript RegExp expressions.\n * }\n */\n '], ['\n /* lexer generated by jison-lex ', ' */\n\n /*\n * Returns a Lexer object of the following structure:\n *\n * Lexer: {\n * yy: {} The so-called "shared state" or rather the *source* of it;\n * the real "shared state" \\`yy\\` passed around to\n * the rule actions, etc. is a direct reference!\n *\n * This "shared context" object was passed to the lexer by way of \n * the \\`lexer.setInput(str, yy)\\` API before you may use it.\n *\n * This "shared context" object is passed to the lexer action code in \\`performAction()\\`\n * so userland code in the lexer actions may communicate with the outside world \n * and/or other lexer rules\' actions in more or less complex ways.\n *\n * }\n *\n * Lexer.prototype: {\n * EOF: 1,\n * ERROR: 2,\n *\n * yy: The overall "shared context" object reference.\n *\n * JisonLexerError: function(msg, hash),\n *\n * performAction: function lexer__performAction(yy, yyrulenumber, YY_START),\n *\n * The function parameters and \\`this\\` have the following value/meaning:\n * - \\`this\\` : reference to the \\`lexer\\` instance. \n * \\`yy_\\` is an alias for \\`this\\` lexer instance reference used internally.\n *\n * - \\`yy\\` : a reference to the \\`yy\\` "shared state" object which was passed to the lexer\n * by way of the \\`lexer.setInput(str, yy)\\` API before.\n *\n * Note:\n * The extra arguments you specified in the \\`%parse-param\\` statement in your\n * **parser** grammar definition file are passed to the lexer via this object\n * reference as member variables.\n *\n * - \\`yyrulenumber\\` : index of the matched lexer rule (regex), used internally.\n *\n * - \\`YY_START\\`: the current lexer "start condition" state.\n *\n * parseError: function(str, hash, ExceptionClass),\n *\n * constructLexErrorInfo: function(error_message, is_recoverable),\n * Helper function.\n * Produces a new errorInfo \\\'hash object\\\' which can be passed into \\`parseError()\\`.\n * See it\\\'s use in this lexer kernel in many places; example usage:\n *\n * var infoObj = lexer.constructParseErrorInfo(\\\'fail!\\\', true);\n * var retVal = lexer.parseError(infoObj.errStr, infoObj, lexer.JisonLexerError);\n *\n * options: { ... lexer %options ... },\n *\n * lex: function(),\n * Produce one token of lexed input, which was passed in earlier via the \\`lexer.setInput()\\` API.\n * You MAY use the additional \\`args...\\` parameters as per \\`%parse-param\\` spec of the **lexer** grammar:\n * these extra \\`args...\\` are added verbatim to the \\`yy\\` object reference as member variables.\n *\n * WARNING:\n * Lexer\'s additional \\`args...\\` parameters (via lexer\'s \\`%parse-param\\`) MAY conflict with\n * any attributes already added to \\`yy\\` by the **parser** or the jison run-time; \n * when such a collision is detected an exception is thrown to prevent the generated run-time \n * from silently accepting this confusing and potentially hazardous situation! \n *\n * cleanupAfterLex: function(do_not_nuke_errorinfos),\n * Helper function.\n *\n * This helper API is invoked when the **parse process** has completed: it is the responsibility\n * of the **parser** (or the calling userland code) to invoke this method once cleanup is desired. \n *\n * This helper may be invoked by user code to ensure the internal lexer gets properly garbage collected.\n *\n * setInput: function(input, [yy]),\n *\n *\n * input: function(),\n *\n *\n * unput: function(str),\n *\n *\n * more: function(),\n *\n *\n * reject: function(),\n *\n *\n * less: function(n),\n *\n *\n * pastInput: function(n),\n *\n *\n * upcomingInput: function(n),\n *\n *\n * showPosition: function(),\n *\n *\n * test_match: function(regex_match_array, rule_index),\n *\n *\n * next: function(),\n *\n *\n * begin: function(condition),\n *\n *\n * pushState: function(condition),\n *\n *\n * popState: function(),\n *\n *\n * topState: function(),\n *\n *\n * _currentRules: function(),\n *\n *\n * stateStackSize: function(),\n *\n *\n * performAction: function(yy, yy_, yyrulenumber, YY_START),\n *\n *\n * rules: [...],\n *\n *\n * conditions: {associative list: name ==> set},\n * }\n *\n *\n * token location info (\\`yylloc\\`): {\n * first_line: n,\n * last_line: n,\n * first_column: n,\n * last_column: n,\n * range: [start_number, end_number]\n * (where the numbers are indexes into the input string, zero-based)\n * }\n *\n * ---\n *\n * The \\`parseError\\` function receives a \\\'hash\\\' object with these members for lexer errors:\n *\n * {\n * text: (matched text)\n * token: (the produced terminal token, if any)\n * token_id: (the produced terminal token numeric ID, if any)\n * line: (yylineno)\n * loc: (yylloc)\n * recoverable: (boolean: TRUE when the parser MAY have an error recovery rule\n * available for this particular error)\n * yy: (object: the current parser internal "shared state" \\`yy\\`\n * as is also available in the rule actions; this can be used,\n * for instance, for advanced error analysis and reporting)\n * lexer: (reference to the current lexer instance used by the parser)\n * }\n *\n * while \\`this\\` will reference the current lexer instance.\n *\n * When \\`parseError\\` is invoked by the lexer, the default implementation will\n * attempt to invoke \\`yy.parser.parseError()\\`; when this callback is not provided\n * it will try to invoke \\`yy.parseError()\\` instead. When that callback is also not\n * provided, a \\`JisonLexerError\\` exception will be thrown containing the error\n * message and \\`hash\\`, as constructed by the \\`constructLexErrorInfo()\\` API.\n *\n * Note that the lexer\\\'s \\`JisonLexerError\\` error class is passed via the\n * \\`ExceptionClass\\` argument, which is invoked to construct the exception\n * instance to be thrown, so technically \\`parseError\\` will throw the object\n * produced by the \\`new ExceptionClass(str, hash)\\` JavaScript expression.\n *\n * ---\n *\n * You can specify lexer options by setting / modifying the \\`.options\\` object of your Lexer instance.\n * These options are available:\n *\n * (Options are permanent.)\n * \n * yy: {\n * parseError: function(str, hash, ExceptionClass)\n * optional: overrides the default \\`parseError\\` function.\n * }\n *\n * lexer.options: {\n * pre_lex: function()\n * optional: is invoked before the lexer is invoked to produce another token.\n * \\`this\\` refers to the Lexer object.\n * post_lex: function(token) { return token; }\n * optional: is invoked when the lexer has produced a token \\`token\\`;\n * this function can override the returned token value by returning another.\n * When it does not return any (truthy) value, the lexer will return\n * the original \\`token\\`.\n * \\`this\\` refers to the Lexer object.\n *\n * WARNING: the next set of options are not meant to be changed. They echo the abilities of\n * the lexer as per when it was compiled!\n *\n * ranges: boolean\n * optional: \\`true\\` ==> token location info will include a .range[] member.\n * flex: boolean\n * optional: \\`true\\` ==> flex-like lexing behaviour where the rules are tested\n * exhaustively to find the longest match.\n * backtrack_lexer: boolean\n * optional: \\`true\\` ==> lexer regexes are tested in order and for invoked;\n * the lexer terminates the scan when a token is returned by the action code.\n * xregexp: boolean\n * optional: \\`true\\` ==> lexer rule regexes are "extended regex format" requiring the\n * \\`XRegExp\\` library. When this %option has not been specified at compile time, all lexer\n * rule regexes have been written as standard JavaScript RegExp expressions.\n * }\n */\n ']),
_templateObject42 = _taggedTemplateLiteral(['\n export {\n lexer,\n yylex as lex\n };\n '], ['\n export {\n lexer,\n yylex as lex\n };\n ']),
_templateObject43 = _taggedTemplateLiteral(['\n Maybe you did not correctly separate trailing code from the grammar rule set with a \'%%\' marker on an otherwise empty line?\n \n Erroneous area:\n ', '\n '], ['\n Maybe you did not correctly separate trailing code from the grammar rule set with a \'%%\' marker on an otherwise empty line?\n \n Erroneous area:\n ', '\n ']),
_templateObject44 = _taggedTemplateLiteral(['\n Maybe you did not correctly separate the parse \'header section\' (token definitions, options, lexer spec, etc.) from the grammar rule set with a \'%%\' on an otherwise empty line?\n \n Erroneous area:\n ', '\n '], ['\n Maybe you did not correctly separate the parse \'header section\' (token definitions, options, lexer spec, etc.) from the grammar rule set with a \'%%\' on an otherwise empty line?\n \n Erroneous area:\n ', '\n ']),
_templateObject45 = _taggedTemplateLiteral(['\n The extra parser module code section (a.k.a. \'epilogue\') does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n The extra parser module code section (a.k.a. \'epilogue\') does not compile: ', '\n \n Erroneous area:\n ', '\n ']),
_templateObject46 = _taggedTemplateLiteral(['\n action header code block does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n action header code block does not compile: ', '\n \n Erroneous area:\n ', '\n ']),
_templateObject47 = _taggedTemplateLiteral(['\n declaration list error?\n \n Erroneous area:\n ', '\n '], ['\n declaration list error?\n \n Erroneous area:\n ', '\n ']),
_templateObject48 = _taggedTemplateLiteral(['\n action code block does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n action code block does not compile: ', '\n \n Erroneous area:\n ', '\n ']),
_templateObject49 = _taggedTemplateLiteral(['\n You did not specify a legal file path for the \'%import\' initialization code statement, which must have the format:\n \n %import qualifier_name file_path\n \n Erroneous area:\n ', '\n '], ['\n You did not specify a legal file path for the \'%import\' initialization code statement, which must have the format:\n \n %import qualifier_name file_path\n \n Erroneous area:\n ', '\n ']),
_templateObject50 = _taggedTemplateLiteral(['\n Each \'%import\'-ed initialization code section must be qualified by a name, e.g. \'required\' before the import path itself:\n \n %import qualifier_name file_path\n \n Erroneous area:\n ', '\n '], ['\n Each \'%import\'-ed initialization code section must be qualified by a name, e.g. \'required\' before the import path itself:\n \n %import qualifier_name file_path\n \n Erroneous area:\n ', '\n ']),
_templateObject51 = _taggedTemplateLiteral(['\n %code "', '" initialization section action code block does not compile: ', '\n \n Erroneous area:\n ', '\n '], ['\n %code "',