expexp
Version:
The express model io and express model and data representation.
1,958 lines (1,952 loc) • 264 kB
JavaScript
import nearley from 'nearley';
import moo from 'moo';
import jssStringify from 'json-stable-stringify';
// //////////////////////////////////////////////
// Syntax JSON format and tree traverse concept
// This is a description of the concepts, ideas and guidelines behind
// the form and structure of the parsed JSON.
// The tree traverse function depends on that structure. So they are
// described both together here.
//
// The traverse function keeps track and notifies in its events the
// variables: ii, path and json
// - json: Is the current part of the JSON that is in focus at the moment.
// Initially this is the whole JSON.
// - path: It is the chain of parent elements down to the root (the whole JSON)
// When afterFct or beforeFct are called for the current element,
// the path does not contain json.
// - ii: Stands for identifier or index. It names the context of
// the json object: If the parent is semantically an object, the
// type of the top element at ii is string, providing the name of
// the key. If the parent is semantically an array, the type of
// the element is JS number (0 or positive and whole), providing
// the index position.
//
// What does 'is semantically' mean?
// Even though the JSON contains certainly arrays, they always are
// represented as JS objects. This is to provide also a name for
// the type of array. Whether json stands for an array can be checked
// using the isArrSem(json) function. If so, there is - besides
// the 't' only one more key 's' of type JS array holding the actual
// array elements. The point is about having a format for tree unit
// test data and at the same time human readable path positions in
// the debug output. See the *.tlog files in the /test/units or
// test/u* dirs.
//
// Remarks:
// - The length of path and ii is identical.
// - Neither json nor path do at any time contain a JS array.
// Their content or elements are JS objects with a granted key 't'
// of type string.
// - The value of 't' unambiguously defines what other keys must
// be present.
// - Any object with 't':'*_dcl' (but 'sct_dcl') has also a key 'typ':{..} .
// - The 't':'typ' as well as any aggregation type has a key 'spc' defining
// the base type (of the elements) or being a reference.
// - A key 'id' always stands for a JS string but for a JS object
// in the following cases:
// For 't':'a_atr_ids':
// 'id':{t:'atr|qal_atr|qal_atr_rnmd', id:'..', ..} .
// For 't':'drv|inv':
// 'id':{t:'atr|qal_atr|qal_atr_rnmd', id:'..', ..} .
// For 't':'cst_dcl':
// 'id':{t:'cst', id:'..', ..} .
// For 't':'a_prm_ids':
// 'id':{t:'prm', id:'..', ..} .
// For 't':'a_lcl_ids':
// 'id':{t:'lcl', id:'..', ..} .
// - The 'id' key of an operation {'t':'o_*'} is always a string.
// - A key '*[Rr]ef' always stands for a JS object with {t:'*_ref', id:'..'} .
// - A key '*[Rr]efs' always stands for a JS array containing JS
// objects with {t:'*_ref', id:'..'} .
function traverseContent(ii, path, json, afterFct, beforeFct, keyOrder) {
if (Array.isArray(json)) {
// Simple and plane iteration without any event notification.
for (let i = 0; i < json.length; i++) {
ii.push(i);
traverseContent(ii, path, json[i], afterFct, beforeFct, keyOrder);
ii.pop(); // i
}
} else {
// json is object: {t:'..', ..}
beforeFct(json, path, ii);
path.push(json);
// But we have to check for our special array-objects.
if (isArrSem(json) == false) {
// Establish the defined key order for iterating objects.
const keys = Object.keys(json).filter(key => !IGNORE_KEY[key]);
if (1 < keys.length) {
// Initially the order was thought to be most-important-first sorted.
// But to improve performance the whole thing is set up reverted.
// We better sort only this short (even filtered) array instead
// of the lengthy order array later.
keys.sort(); // alphanumerically
if (Array.isArray(keyOrder) && 0 < keyOrder.length) {
keys.reverse();
// The reverted sorting causes order.length-i later.
const order = keys.concat(keyOrder);
// const order = DEFAULT_OBJECT_KEY_TRAVERSE_ORDER.concat(keys)
// order.reverse()
const key2idx = {};
order.forEach((elt, i) => {
key2idx[elt] = order.length - i;
}); // Index needs not be exact (missing -1).
// A key, that is in the json as well as in the special array,
// gets the lower index of the special array.
keys.sort((a, b) => key2idx[a] - key2idx[b]);
}
}
for (const key of keys) {
const value = json[key];
if (value !== null && typeof value == 'object' && Array.isArray(value) == false) {
// Per design we do not have arrays here.
ii.push(key);
traverseContent(ii, path, value, afterFct, beforeFct, keyOrder);
ii.pop(); // key
}
}
} else {
// The special array-object is the event arg, and
// already triggered here, while traverseContent(..)
// only handles the true array (value.s).
traverseContent(ii, path, json.s, afterFct, beforeFct, keyOrder);
}
path.pop(); // json
afterFct(json, path, ii);
}
}
const II_ROOT = '≡';
const IGNORE_KEY = {
'p': true,
't': true
};
const DEFAULT_OBJECT_KEY_TRAVERSE_ORDER = [
// For most cases the order is a nice to have, but
// for fct, xpr, itv { < < } and qry( <* | ): order src-var-cnd
// the order fits op stack architecture and is functionally essential.
'arg0', 'qals0', 'arg', 'arg1', 'qals1', 'op', 'id1',
// a compare relation op id
'arg2', 'id2',
// a compare relation op id
// Unfortunately there are some technical dependencies introduced
// by the fact, that all must fit into one 'flat' order.
// Through cnd(qry) also involved are: ntt and sct_dcl
// Through whrs(ntt) also involved are: typ and rul
// Through csts, lcls, ctts (rul) also: fun and prc
// Through nttRef also involved are: inv, qal_atr, qal_atr_rnmd
// Through ref(qal_atr_rnmd) also: ref_rnmd, enm_xtdd, slc_xtdd
// Through refs(slc_xtdd) also: slc, use, ref, scm
// Through xtd(enm_xtdd, slc_xtdd) also: enm, slc
// Through vals(enm, enm_xtdd) also: agi, bif, coc
'src',
// qry
'var',
// qry, als, rpt
'id',
// ntt, sct_dcl, .. and really many more, but good, this always comes first.
'spc',
// typ, and really many more, but good, this comes after 'id' .
'xtd',
// enm, enm_xtdd, slc, slc_xtdd
'nttOnly',
// slc, slc_xtdd
'agrTyp',
// inv
'agrBds',
// inv
'nttRef',
// sct_dcl, inv, qal_atr, qal_atr_rnmd
'ref',
// qal_atr_rnmd, ref_rnmd, enm_xtdd, slc_xtdd, asg, als
'refs',
// slc, slc_xtdd, use, t:ref, scm
'qals',
// asg, als
'nttRefs',
// rul
'invRef',
// inv
'vals',
// enm, enm_xtdd, agi, bif, coc
'abs',
// ntt, sct_dcl
'ttlNttRefs',
// sct_dcl
'cnd',
// qry, ntt, sct_dcl, iff
'sprs',
// ntt
'atrs',
// ntt
'drvs',
// ntt
'invs',
// ntt
'unqs',
// ntt
'itr',
// rpt
'whl',
// rpt
'utl',
// rpt
'prms',
// fun, prc
'res',
// fun
'ctts',
// rul, fun, prc (algorithm_head order)
'csts',
// rul, scm, fun, prc (algorithm_head order)
'lcls',
// rul, fun, prc (algorithm_head order)
'stms',
// fun, prc, rul, cpd, als, rpt, iff
'whrs',
// ntt, typ, rul
'elss',
// iff
'slr',
// cas
'caas',
// cas
'oth',
// cas
'xprs',
// caa
'stm',
// caa
'fr',
// bds, itr
'to',
// bds, itr
'by',
// itr
'scts' // scm
];
const noOp$1 = function () {};
function traverseExternal(json, afterFct, beforeFct = noOp$1, keyOrder = DEFAULT_OBJECT_KEY_TRAVERSE_ORDER, path = [], ii = [II_ROOT]) {
if (!json) return;
if (Array.isArray(keyOrder)) {
keyOrder = keyOrder.slice(); // copy
keyOrder.reverse();
}
traverseContent(ii, path, json, afterFct, beforeFct, keyOrder);
}
function isArrSem(json) {
return json && json.t && json.t.startsWith('a_');
}
function path2str(ii, path, json) {
let fullPath = path;
if (json) {
fullPath = path.slice();
fullPath.push(json);
}
const strs = fullPath.map(function (elt, i, arr) {
let lbl = ii[i];
if (Number.isInteger(lbl)) {
const arrObj = arr[i - 1];
const pArrLenStrLength = ` ${arrObj.s.length}`.length;
lbl = String(lbl).padStart(pArrLenStrLength, ' ');
lbl = `[${arrObj.t}${lbl}]`;
} else {
lbl = '.' + lbl;
}
let eltStr; // undefined
if (isArrSem(elt)) {
if (i < fullPath.length - 1) {
eltStr = '';
} else {
// At the top.
if (0 < elt.s.length) {
eltStr = `[${elt.t}:${elt.s.length}]`;
} else {
eltStr = `[]`;
}
}
} else {
eltStr = elt.t;
if (elt.id) {
eltStr += ' ' + elt.id;
}
eltStr = `{${eltStr}}`;
}
return lbl + eltStr;
});
return strs.join('').substr(1);
}
function DEFAULT_ORDER() {
return DEFAULT_OBJECT_KEY_TRAVERSE_ORDER.slice();
}
function ALPHANUM_ORDER() {
return [];
}
// Generated automatically by nearley, version 2.20.1
// http://github.com/Hardmath123/nearley
function id$1(x) {
return x[0];
}
// "The order of your rules matters. Earlier ones will take precedence." - for an JS object that sounds not very reliable.
// "Only rules from the current state can be matched. You need to copy your rule into all the states you want it to be matched in."
// From: https://github.com/no-context/moo#states
const P_EMBEDDED_REMARK = '\\(\\*[^]*?\\*\\)';
const P_TAIL_REMARK = '--[^\\r\\n]*\\r?\\n';
const P_ANY_REMARK_ANYWHERE = '(?:' + P_EMBEDDED_REMARK + '[ \t\r\n]*)?(?:' + P_TAIL_REMARK + '[ \t\r\n]*)?';
const rxOptWsAtEnd$1 = new RegExp('\\s*' + P_ANY_REMARK_ANYWHERE + '\$', 'g');
function convStr$1(str) {
return str.replace(rxOptWsAtEnd$1, '').slice(1, -1); // decoding (if needed) is done lazy
}
const P_SIMPLE_ID$1 = '[a-zA-Z][a-zA-Z0-9_]*';
const P_REAL_VALUE$1 = '[+-]?[0-9]+\\.[0-9]*(?:[eE][+-]?[0-9]+)?';
const P_INT_VALUE$1 = '[+-]?[0-9]+';
//const P_STR_VALUE = "'(?:[^']| |'')*'"
const P_STR_VALUE$1 = '\x27(?:[^\x27]| |\x27\x27)*\x27';
// string: {match: /"(?:\\["\\]|[^\n"\\])*"/, value: s => s.slice(1, -1)}
const P_ENCSTR_VALUE = '"(?:[0-9A-F]{8,8})*"';
// See: https://github.com/no-context/moo#value-vs-text
const P_BIN_VALUE$1 = '"[0-3][0-9A-F]*"';
const sts$1 = {};
sts$1['main'] = {};
sts$1['main']['WS'] = {
match: /[ \t\r\n]+/,
lineBreaks: true
};
sts$1['main']['EMBEDDED_REMARK'] = {
match: new RegExp(P_EMBEDDED_REMARK),
lineBreaks: true
};
sts$1['main']['TAIL_REMARK'] = {
match: new RegExp(P_TAIL_REMARK),
lineBreaks: true
};
sts$1['main']['LB'] = '[';
sts$1['main']['RB'] = ']';
sts$1['main']['LP'] = '(';
sts$1['main']['RP'] = ')';
sts$1['main']['LC'] = '{';
sts$1['main']['RC'] = '}';
sts$1['main']['SNE'] = ':<>:';
sts$1['main']['SEQ'] = ':=:';
sts$1['main']['ASSIGN'] = ':=';
sts$1['main']['COLON'] = ':';
sts$1['main']['DOT'] = '.';
sts$1['main']['SEMI'] = ';';
sts$1['main']['COMMA'] = ',';
sts$1['main']['INDET'] = '?';
sts$1['main']['BACKSLASH'] = '\\';
sts$1['main']['DOUBLE_SOLID'] = '||';
sts$1['main']['SOLID'] = '|';
sts$1['main']['LESS_ASTERISK'] = '<*';
sts$1['main']['DOUBLE_ASTERISTK'] = '**';
sts$1['main']['ASTERISK'] = '*';
sts$1['main']['DIVIS'] = '/';
sts$1['main']['PLUS'] = '+';
sts$1['main']['MINUS'] = '-';
sts$1['main']['LE'] = '<=';
sts$1['main']['NE'] = '<>';
sts$1['main']['LT'] = '<';
sts$1['main']['GE'] = '>=';
sts$1['main']['GT'] = '>';
sts$1['main']['EQ'] = '=';
sts$1['main']['REAL_LIT'] = {
match: new RegExp(P_REAL_VALUE$1)
};
sts$1['main']['INT'] = {
match: new RegExp(P_INT_VALUE$1)
};
sts$1['main']['STR'] = {
match: new RegExp(P_STR_VALUE$1)
};
sts$1['main']['ENCSTR'] = {
match: new RegExp(P_ENCSTR_VALUE)
};
sts$1['main']['BIN'] = {
match: new RegExp(P_BIN_VALUE$1)
};
const kwds = [];
kwds.push('SCHEMA');
kwds.push('TYPEOF'); // before TYPE
kwds.push('TYPE');
kwds.push('ENTITY');
kwds.push('RULE');
kwds.push('FUNCTION');
kwds.push('PROCEDURE');
kwds.push('SUBTYPE_CONSTRAINT'); // before SUBTYPE
kwds.push('SUBTYPE');
kwds.push('SUPERTYPE');
kwds.push('GENERIC_ENTITY');
kwds.push('GENERIC');
kwds.push('RENAMED');
kwds.push('LOCAL');
kwds.push('WHERE');
kwds.push('DERIVE');
kwds.push('INVERSE');
kwds.push('ARRAY');
kwds.push('BAG');
kwds.push('LIST');
kwds.push('SET');
kwds.push('OPTIONAL');
kwds.push('UNIQUE');
kwds.push('AGGREGATE');
kwds.push('BINARY');
kwds.push('BOOLEAN');
kwds.push('INTEGER');
kwds.push('LOGICAL');
kwds.push('NUMBER');
kwds.push('REAL');
kwds.push('STRING');
kwds.push('FIXED');
kwds.push('REFERENCE');
kwds.push('CONSTANT');
kwds.push('FROM');
kwds.push('EXTENSIBLE');
kwds.push('ANDOR'); // before AND
kwds.push('AND');
kwds.push('ONEOF');
kwds.push('END_SCHEMA'); // before END
kwds.push('END_ENTITY'); // before END
kwds.push('END_SUBTYPE_CONSTRAINT'); // before END
kwds.push('END_TYPE'); // before END
kwds.push('END_FUNCTION'); // before END
kwds.push('END_PROCEDURE'); // before END
kwds.push('END_RULE'); // before END
kwds.push('END_LOCAL'); // before END
kwds.push('END_CONSTANT'); // before END
kwds.push('END_IF'); // before END
kwds.push('END_ALIAS'); // before END
kwds.push('END_REPEAT'); // before END
kwds.push('END_CASE'); // before END
kwds.push('END');
kwds.push('RETURN');
kwds.push('REMOVE');
kwds.push('INSERT');
kwds.push('SKIP');
kwds.push('ESCAPE');
kwds.push('CASE');
kwds.push('BEGIN');
kwds.push('ALIAS');
kwds.push('REPEAT');
kwds.push('IF');
kwds.push('ELSE');
kwds.push('ABSTRACT'); // before ABS
kwds.push('ABS');
kwds.push('CONST_E');
kwds.push('PI');
kwds.push('SELF');
kwds.push('ACOS');
kwds.push('ASIN');
kwds.push('ATAN');
kwds.push('BLENGTH');
kwds.push('COS');
kwds.push('EXISTS');
kwds.push('EXP');
kwds.push('FORMAT'); // before FOR
kwds.push('FOR');
kwds.push('HIBOUND');
kwds.push('HIINDEX');
kwds.push('LENGTH');
kwds.push('LOBOUND');
kwds.push('LOINDEX');
kwds.push('LOG2'); // before LOG
kwds.push('LOG10'); // before LOG
kwds.push('LOG');
kwds.push('NVL');
kwds.push('ODD');
kwds.push('ROLESOF');
kwds.push('SIN');
kwds.push('SIZEOF');
kwds.push('SQRT');
kwds.push('TAN');
kwds.push('USEDIN'); // before USE
kwds.push('USE');
kwds.push('VALUE_IN'); // before VALUE
kwds.push('VALUE_UNIQUE'); // before VALUE
kwds.push('VALUE');
kwds.push('QUERY');
kwds.push('XOR');
kwds.push('TRUE');
kwds.push('FALSE');
kwds.push('UNKNOWN');
kwds.push('LIKE');
kwds.push('NOT');
kwds.push('DIV');
kwds.push('MOD');
kwds.push('OTHERWISE');
kwds.push('WHILE');
kwds.push('UNTIL');
kwds.push('THEN');
kwds.push('SELECT');
kwds.push('ENUMERATION');
kwds.push('BASED_ON');
kwds.push('WITH');
kwds.push('VAR');
kwds.push('TOTAL_OVER'); // before TO
kwds.push('TO');
kwds.push('OR');
kwds.push('IN');
kwds.push('AS');
kwds.push('OF');
kwds.push('BY');
const kwm = Object.fromEntries(kwds.map(k => [k, k]));
sts$1['main']['SIMPLE_ID'] = {
match: new RegExp(P_SIMPLE_ID$1),
type: moo.keywords(kwm)
};
const lexer$1 = moo.states(sts$1);
function pNfo$1(token) {
return {
//value: token.value,
line: token.line,
col: token.col,
offset: token.offset,
lineBreaks: token.lineBreaks
};
}
function toRlo$1(dd) {
const raw = dd.type.toLowerCase();
switch (raw) {
case 'le':
return 'le';
case 'ge':
return 'ge';
case 'ne':
return 'ne';
case 'eq':
return 'eq';
case 'seq':
return 'se';
case 'sne':
return 'sn';
case 'lt':
return 'lt';
case 'gt':
return 'gt';
case 'in':
return 'in';
case 'like':
return 'lik';
}
throw new Error(`Invalid relational operator "${raw}" error.`);
}
function toAdo$1(d) {
const raw = d[0][0].type.toLowerCase();
switch (raw) {
case 'plus':
return 'pls';
case 'minus':
return 'min';
case 'or':
return 'oor';
case 'xor':
return 'xor';
}
throw new Error(`Invalid addition/subtraction (or/xor) operator "${raw}" error.`);
}
function toMpo$1(d) {
const raw = d[0][0].type.toLowerCase();
switch (raw) {
case 'asterisk':
return 'ast';
case 'divis':
return 'dvs';
case 'div':
return 'div';
case 'mod':
return 'mod';
case 'and':
return 'and';
case 'double_solid':
return 'll2';
}
throw new Error(`Invalid multiplication/division (and and double solid) operator "${raw}" error.`);
}
function toUno$1(d) {
const raw = d[0][0].type.toLowerCase();
switch (raw) {
case 'plus':
return 'psu';
case 'minus':
return 'msu';
case 'not':
return 'not';
}
throw new Error(`Invalid unary operator "${raw}" error.`);
}
function toBic$1(d) {
const raw = d[0][0].value.toLowerCase();
switch (raw) {
case 'const_e':
return 'eul';
case 'pi':
return 'pii';
case 'self':
return 'slf';
case '?':
return 'idt';
case 'true':
return 'tru';
case 'false':
return 'fls';
case 'unknown':
return 'ukn';
}
throw new Error(`Invalid built in constant "${raw}" error.`);
}
function toBif$1(d) {
const raw = d[0][0].value.toLowerCase();
switch (raw) {
case 'abs':
return 'abs';
case 'acos':
return 'acs';
case 'asin':
return 'asn';
case 'atan':
return 'atn';
case 'blength':
return 'bln';
case 'cos':
return 'cos';
case 'exists':
return 'xst';
case 'exp':
return 'exp';
case 'format':
return 'fmt';
case 'hibound':
return 'hbd';
case 'hiindex':
return 'hdx';
case 'length':
return 'len';
case 'lobound':
return 'lbd';
case 'loindex':
return 'ldx';
case 'log':
return 'lgn';
case 'log2':
return 'lg2';
case 'log10':
return 'lgx';
case 'nvl':
return 'nvl';
case 'odd':
return 'odd';
case 'rolesof':
return 'rlf';
case 'sin':
return 'sin';
case 'sizeof':
return 'szf';
case 'sqrt':
return 'qrt';
case 'tan':
return 'tan';
case 'typeof':
return 'tpf';
case 'usedin':
return 'usd';
case 'value':
return 'vlu';
case 'value_in':
return 'vln';
case 'value_unique':
return 'vlq';
}
throw new Error(`Invalid built in function "${raw}" error.`);
}
function toBip(d) {
const raw = d[0][0].value.toLowerCase();
switch (raw) {
case 'insert':
return 'ins';
case 'remove':
return 'rmv';
}
throw new Error(`Invalid built in procedure "${raw}" error.`);
}
function toSim(d) {
const raw = d[0][0].value.toLowerCase();
switch (raw) {
case 'boolean':
return 't_bol';
case 'integer':
return 't_int';
case 'logical':
return 't_lox';
case 'number':
return 't_nbr';
}
throw new Error(`Invalid simple data type "${raw}" error.`);
}
function toReal(token) {
return withWidthSpec(token, 't_rea', true);
}
function toStr(token) {
return withWidthSpec(token, 't_str');
}
function toBin(token) {
return withWidthSpec(token, 't_bin');
}
function withWidthSpec(token, typeStr, primIsFrom = false) {
if (token[1]) {
// cannot check token.length since token[1] is null
let primFld = 'to';
let scndFld = 'fr';
if (primIsFrom) {
primFld = 'fr';
scndFld = 'to';
}
// See width_spec on how token[1] looks like.
if ('width' in token[1]) {
// console.log(token[1].width)
if (token[1].width.t == 'width_ref') {
token[1].t = 'bds';
const fixed = token[1].fixed;
const varRef = token[1].width.id;
token[1][primFld] = {
t: 'a_sxp_trms',
s: [{
t: 'a_trm_ftrs',
s: [{
t: 'ftr',
arg0: {
t: 'any_ref',
id: varRef
},
qals0: null,
arg1: null,
qals1: null,
p: pNfo$1(token[1].p)
}]
}]
};
if (fixed) {
token[1][scndFld] = token[1][primFld];
} else {
token[1][scndFld] = null;
}
delete token[1].width;
delete token[1].fixed;
} else {
token[1].t = 'bds';
const fixed = token[1].fixed;
const parsed = parseInt(token[1].width.value);
if (!isNaN(parsed) && 0 < parsed) {
token[1][primFld] = {
t: 'a_sxp_trms',
s: [{
t: 'a_trm_ftrs',
s: [{
t: 'ftr',
arg0: {
t: 'int',
val: parsed
},
qals0: null,
arg1: null,
qals1: null
}]
}]
};
if (fixed) {
token[1][scndFld] = token[1][primFld];
} else {
token[1][scndFld] = null;
}
delete token[1].width;
delete token[1].fixed;
}
}
}
// TODO what about p:pNfo?
return {
t: typeStr,
bds: token[1]
};
} else {
return {
t: typeStr,
bds: null
};
}
}
function toArr$1(token, tName) {
if (token && 0 < token.length) {
return {
t: tName,
s: token
};
} else {
return null;
}
}
function toBds(token) {
// Also used for t:'itr'.
// Originally this was jsut a undefined check, but then it was not
// achieved by parser rules to get a simple INDET representation
// when the token is just a question_mark. So simplifyBndExpr(..) .
if (token) {
token.fr = simplifyBndExpr(token.fr);
token.to = simplifyBndExpr(token.to);
return token;
}
return null;
}
function simplifyBndExpr(expr) {
if (expr && expr.t == 'a_sxp_trms' && expr.s.length == 1 && expr.s[0] && expr.s[0].t == 'a_trm_ftrs' && expr.s[0].s.length == 1 && expr.s[0].s[0] && expr.s[0].s[0].t == 'ftr' && expr.s[0].s[0].arg1 == null && expr.s[0].s[0].arg0.t == 'bic' && expr.s[0].s[0].arg0.id == 'idt') {
return null;
}
return expr;
}
function toInverse(d) {
const res = {
t: 'inv',
id: d[0],
agrTyp: null,
agrBds: null,
nttRef: toNttRef$1(d[3]),
invRef: {
t: 'any_ref',
id: d[5].value,
p: pNfo$1(d[5])
},
p: pNfo$1(d[0])
};
if (d[2]) {
res.agrTyp = d[2][0][0].value.toLowerCase();
if (d[2][1]) {
res.agrBds = toBds(d[2][1]);
} else {
res.agrBds = {
t: 'bds',
fr: {
t: 'a_sxp_trms',
s: [{
t: 'a_trm_ftrs',
s: [{
t: 'ftr',
arg0: {
t: 'int',
val: 0
},
qals0: null,
arg1: null,
qals1: null
}]
}]
},
to: null
};
}
}
return res;
}
function toEotRef(token) {
// used for entity_ref | type_ref
if (token.value) {
return {
t: 'any_ref',
id: token.value,
p: pNfo$1(token)
};
} else {
return null;
}
}
function toNttRef$1(token) {
// used for entity_ref only
if (token && token.value) {
return {
t: 'ntt_ref',
id: token.value,
p: pNfo$1(token)
};
} else {
return null;
}
}
// Make remarks (e.g. EMBEDDED_REMARK) not to be passed to the parser.
lexer$1.next = (next => () => {
let tok;
while ((tok = next.call(lexer$1)) && (tok.type === 'WS' || tok.type === 'EMBEDDED_REMARK' || tok.type === 'TAIL_REMARK')) {}
//if (tok) { // Out the result from the lexer.
// console.log(tok.type, tok.value, lexer.stack, lexer.state)
//}
if (tok && tok.type === 'SIMPLE_ID') {
// Check if it is not a (partially) lower case keyword.
const tType = kwm[tok.value.toUpperCase()];
if (tType) {
tok.type = tType;
}
}
return tok;
})(lexer$1.next);
let Lexer$1 = lexer$1;
let ParserRules$1 = [{
"name": "main$ebnf$1",
"symbols": [lexer$1.has("WS") ? {
type: "WS"
} : WS],
"postprocess": id$1
}, {
"name": "main$ebnf$1",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "main",
"symbols": ["main$ebnf$1", "syntax"],
"postprocess": d => d[1]
}, {
"name": "type_decl$ebnf$1",
"symbols": ["where_clause"],
"postprocess": id$1
}, {
"name": "type_decl$ebnf$1",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "type_decl",
"symbols": [lexer$1.has("TYPE") ? {
type: "TYPE"
} : TYPE, lexer$1.has("SIMPLE_ID") ? {
type: "SIMPLE_ID"
} : SIMPLE_ID, lexer$1.has("EQ") ? {
type: "EQ"
} : EQ, "underlying_type", lexer$1.has("SEMI") ? {
type: "SEMI"
} : SEMI, "type_decl$ebnf$1", lexer$1.has("END_TYPE") ? {
type: "END_TYPE"
} : END_TYPE, lexer$1.has("SEMI") ? {
type: "SEMI"
} : SEMI],
"postprocess": d => ({
t: 'typ',
id: d[1].value,
spc: d[3],
whrs: toArr$1(d[5], 'a_typ_whrs'),
p: pNfo$1(d[0])
})
}, {
"name": "underlying_type$subexpression$1",
"symbols": ["enumeration_type"]
}, {
"name": "underlying_type$subexpression$1",
"symbols": ["select_type"]
}, {
"name": "underlying_type$subexpression$1",
"symbols": ["general_aggregation_types"]
}, {
"name": "underlying_type$subexpression$1",
"symbols": ["simple_types"]
}, {
"name": "underlying_type$subexpression$1",
"symbols": ["named_types"]
}, {
"name": "underlying_type",
"symbols": ["underlying_type$subexpression$1"],
"postprocess": d => d[0][0]
}, {
"name": "enumeration_type$ebnf$1",
"symbols": [lexer$1.has("EXTENSIBLE") ? {
type: "EXTENSIBLE"
} : EXTENSIBLE],
"postprocess": id$1
}, {
"name": "enumeration_type$ebnf$1",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "enumeration_type",
"symbols": ["enumeration_type$ebnf$1", lexer$1.has("ENUMERATION") ? {
type: "ENUMERATION"
} : ENUMERATION, lexer$1.has("OF") ? {
type: "OF"
} : OF, lexer$1.has("LP") ? {
type: "LP"
} : LP, "enum_id_list", lexer$1.has("RP") ? {
type: "RP"
} : RP],
"postprocess": d => ({
t: 'enm',
xtd: !!d[0],
vals: toArr$1(d[4], 'a_enm_vals'),
p: pNfo$1(d[1])
})
}, {
"name": "enumeration_type$ebnf$2",
"symbols": [lexer$1.has("EXTENSIBLE") ? {
type: "EXTENSIBLE"
} : EXTENSIBLE],
"postprocess": id$1
}, {
"name": "enumeration_type$ebnf$2",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "enumeration_type",
"symbols": ["enumeration_type$ebnf$2", lexer$1.has("ENUMERATION") ? {
type: "ENUMERATION"
} : ENUMERATION, lexer$1.has("BASED_ON") ? {
type: "BASED_ON"
} : BASED_ON, lexer$1.has("SIMPLE_ID") ? {
type: "SIMPLE_ID"
} : SIMPLE_ID, lexer$1.has("WITH") ? {
type: "WITH"
} : WITH, lexer$1.has("LP") ? {
type: "LP"
} : LP, "enum_id_list", lexer$1.has("RP") ? {
type: "RP"
} : RP],
"postprocess": d => ({
t: 'enm_xtdd',
xtd: !!d[0],
ref: {
t: 'typ_ref',
id: d[3].value
},
vals: toArr$1(d[6], 'a_enm_vals'),
p: pNfo$1(d[1])
})
}, {
"name": "enum_id_list$ebnf$1",
"symbols": []
}, {
"name": "enum_id_list$ebnf$1$subexpression$1",
"symbols": [lexer$1.has("COMMA") ? {
type: "COMMA"
} : COMMA, "enum_value"]
}, {
"name": "enum_id_list$ebnf$1",
"symbols": ["enum_id_list$ebnf$1", "enum_id_list$ebnf$1$subexpression$1"],
"postprocess": function arrpush(d) {
return d[0].concat([d[1]]);
}
}, {
"name": "enum_id_list",
"symbols": ["enum_value", "enum_id_list$ebnf$1"],
"postprocess": d => d[1][0] ? [d[0]].concat(d[1].map(e => e.slice(1)[0])) : [d[0]]
}, {
"name": "enum_value",
"symbols": [lexer$1.has("SIMPLE_ID") ? {
type: "SIMPLE_ID"
} : SIMPLE_ID],
"postprocess": d => ({
t: 'evl',
id: d[0].value,
p: pNfo$1(d[0])
})
}, {
"name": "select_type$ebnf$1$subexpression$1$ebnf$1",
"symbols": [lexer$1.has("GENERIC_ENTITY") ? {
type: "GENERIC_ENTITY"
} : GENERIC_ENTITY],
"postprocess": id$1
}, {
"name": "select_type$ebnf$1$subexpression$1$ebnf$1",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "select_type$ebnf$1$subexpression$1",
"symbols": [lexer$1.has("EXTENSIBLE") ? {
type: "EXTENSIBLE"
} : EXTENSIBLE, "select_type$ebnf$1$subexpression$1$ebnf$1"]
}, {
"name": "select_type$ebnf$1",
"symbols": ["select_type$ebnf$1$subexpression$1"],
"postprocess": id$1
}, {
"name": "select_type$ebnf$1",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "select_type",
"symbols": ["select_type$ebnf$1", lexer$1.has("SELECT") ? {
type: "SELECT"
} : SELECT, lexer$1.has("LP") ? {
type: "LP"
} : LP, "select_ref_list", lexer$1.has("RP") ? {
type: "RP"
} : RP],
"postprocess": d => ({
t: 'slc',
xtd: !!(d[0] && !!d[0][0]),
refs: toArr$1(d[3], 'a_slc_refs'),
nttOnly: !!(d[0] && !!d[0][1]),
p: pNfo$1(d[1])
})
}, {
"name": "select_type$ebnf$2$subexpression$1$ebnf$1",
"symbols": [lexer$1.has("GENERIC_ENTITY") ? {
type: "GENERIC_ENTITY"
} : GENERIC_ENTITY],
"postprocess": id$1
}, {
"name": "select_type$ebnf$2$subexpression$1$ebnf$1",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "select_type$ebnf$2$subexpression$1",
"symbols": [lexer$1.has("EXTENSIBLE") ? {
type: "EXTENSIBLE"
} : EXTENSIBLE, "select_type$ebnf$2$subexpression$1$ebnf$1"]
}, {
"name": "select_type$ebnf$2",
"symbols": ["select_type$ebnf$2$subexpression$1"],
"postprocess": id$1
}, {
"name": "select_type$ebnf$2",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "select_type",
"symbols": ["select_type$ebnf$2", lexer$1.has("SELECT") ? {
type: "SELECT"
} : SELECT, lexer$1.has("BASED_ON") ? {
type: "BASED_ON"
} : BASED_ON, lexer$1.has("SIMPLE_ID") ? {
type: "SIMPLE_ID"
} : SIMPLE_ID, lexer$1.has("WITH") ? {
type: "WITH"
} : WITH, lexer$1.has("LP") ? {
type: "LP"
} : LP, "select_ref_list", lexer$1.has("RP") ? {
type: "RP"
} : RP],
"postprocess": d => ({
t: 'slc_xtdd',
xtd: !!(d[0] && !!d[0][0]),
ref: {
t: 'typ_ref',
id: d[3].value
},
refs: toArr$1(d[6], 'a_slc_refs'),
nttOnly: !!(d[0] && !!d[0][1]),
p: pNfo$1(d[1])
})
}, {
"name": "select_ref_list$ebnf$1",
"symbols": []
}, {
"name": "select_ref_list$ebnf$1$subexpression$1",
"symbols": [lexer$1.has("COMMA") ? {
type: "COMMA"
} : COMMA, "named_types"]
}, {
"name": "select_ref_list$ebnf$1",
"symbols": ["select_ref_list$ebnf$1", "select_ref_list$ebnf$1$subexpression$1"],
"postprocess": function arrpush(d) {
return d[0].concat([d[1]]);
}
}, {
"name": "select_ref_list",
"symbols": ["named_types", "select_ref_list$ebnf$1"],
"postprocess": d => d[1][0] ? [d[0]].concat(d[1].map(e => e.slice(1)[0])) : [d[0]]
}, {
"name": "function_decl$ebnf$1",
"symbols": ["formal_params"],
"postprocess": id$1
}, {
"name": "function_decl$ebnf$1",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "function_decl$ebnf$2",
"symbols": []
}, {
"name": "function_decl$ebnf$2",
"symbols": ["function_decl$ebnf$2", "declaration"],
"postprocess": function arrpush(d) {
return d[0].concat([d[1]]);
}
}, {
"name": "function_decl$ebnf$3",
"symbols": ["constant_decl"],
"postprocess": id$1
}, {
"name": "function_decl$ebnf$3",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "function_decl$ebnf$4",
"symbols": ["local_decl"],
"postprocess": id$1
}, {
"name": "function_decl$ebnf$4",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "function_decl$ebnf$5",
"symbols": ["stmt"]
}, {
"name": "function_decl$ebnf$5",
"symbols": ["function_decl$ebnf$5", "stmt"],
"postprocess": function arrpush(d) {
return d[0].concat([d[1]]);
}
}, {
"name": "function_decl",
"symbols": [lexer$1.has("FUNCTION") ? {
type: "FUNCTION"
} : FUNCTION, lexer$1.has("SIMPLE_ID") ? {
type: "SIMPLE_ID"
} : SIMPLE_ID, "function_decl$ebnf$1", lexer$1.has("COLON") ? {
type: "COLON"
} : COLON, "parameter_type", lexer$1.has("SEMI") ? {
type: "SEMI"
} : SEMI, "function_decl$ebnf$2", "function_decl$ebnf$3", "function_decl$ebnf$4", "function_decl$ebnf$5", lexer$1.has("END_FUNCTION") ? {
type: "END_FUNCTION"
} : END_FUNCTION, lexer$1.has("SEMI") ? {
type: "SEMI"
} : SEMI],
"postprocess": d => ({
t: 'fun',
id: d[1].value,
prms: toArr$1(d[2], 'a_fun_prms'),
res: d[4],
ctts: toArr$1(d[6], 'a_fun_ctts'),
csts: toArr$1(d[7], 'a_fun_csts'),
lcls: toArr$1(d[8], 'a_fun_lcls'),
stms: toArr$1(d[9], 'a_fun_stms'),
p: pNfo$1(d[0])
})
}, {
"name": "procedure_decl$ebnf$1",
"symbols": ["p_formal_params"],
"postprocess": id$1
}, {
"name": "procedure_decl$ebnf$1",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "procedure_decl$ebnf$2",
"symbols": []
}, {
"name": "procedure_decl$ebnf$2",
"symbols": ["procedure_decl$ebnf$2", "declaration"],
"postprocess": function arrpush(d) {
return d[0].concat([d[1]]);
}
}, {
"name": "procedure_decl$ebnf$3",
"symbols": ["constant_decl"],
"postprocess": id$1
}, {
"name": "procedure_decl$ebnf$3",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "procedure_decl$ebnf$4",
"symbols": ["local_decl"],
"postprocess": id$1
}, {
"name": "procedure_decl$ebnf$4",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "procedure_decl$ebnf$5",
"symbols": []
}, {
"name": "procedure_decl$ebnf$5",
"symbols": ["procedure_decl$ebnf$5", "stmt"],
"postprocess": function arrpush(d) {
return d[0].concat([d[1]]);
}
}, {
"name": "procedure_decl",
"symbols": [lexer$1.has("PROCEDURE") ? {
type: "PROCEDURE"
} : PROCEDURE, lexer$1.has("SIMPLE_ID") ? {
type: "SIMPLE_ID"
} : SIMPLE_ID, "procedure_decl$ebnf$1", lexer$1.has("SEMI") ? {
type: "SEMI"
} : SEMI, "procedure_decl$ebnf$2", "procedure_decl$ebnf$3", "procedure_decl$ebnf$4", "procedure_decl$ebnf$5", lexer$1.has("END_PROCEDURE") ? {
type: "END_PROCEDURE"
} : END_PROCEDURE, lexer$1.has("SEMI") ? {
type: "SEMI"
} : SEMI],
"postprocess": d => ({
t: 'prc',
id: d[1].value,
prms: toArr$1(d[2], 'a_prc_prms'),
ctts: toArr$1(d[4], 'a_prc_ctts'),
csts: toArr$1(d[5], 'a_prc_csts'),
lcls: toArr$1(d[6], 'a_prc_lcls'),
stms: toArr$1(d[7], 'a_prc_stms'),
p: pNfo$1(d[0])
})
}, {
"name": "rule_decl$ebnf$1",
"symbols": []
}, {
"name": "rule_decl$ebnf$1",
"symbols": ["rule_decl$ebnf$1", "declaration"],
"postprocess": function arrpush(d) {
return d[0].concat([d[1]]);
}
}, {
"name": "rule_decl$ebnf$2",
"symbols": ["constant_decl"],
"postprocess": id$1
}, {
"name": "rule_decl$ebnf$2",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "rule_decl$ebnf$3",
"symbols": ["local_decl"],
"postprocess": id$1
}, {
"name": "rule_decl$ebnf$3",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "rule_decl$ebnf$4",
"symbols": []
}, {
"name": "rule_decl$ebnf$4",
"symbols": ["rule_decl$ebnf$4", "stmt"],
"postprocess": function arrpush(d) {
return d[0].concat([d[1]]);
}
}, {
"name": "rule_decl",
"symbols": [lexer$1.has("RULE") ? {
type: "RULE"
} : RULE, lexer$1.has("SIMPLE_ID") ? {
type: "SIMPLE_ID"
} : SIMPLE_ID, lexer$1.has("FOR") ? {
type: "FOR"
} : FOR, lexer$1.has("LP") ? {
type: "LP"
} : LP, "ntt_ref_list", lexer$1.has("RP") ? {
type: "RP"
} : RP, lexer$1.has("SEMI") ? {
type: "SEMI"
} : SEMI, "rule_decl$ebnf$1", "rule_decl$ebnf$2", "rule_decl$ebnf$3", "rule_decl$ebnf$4", "where_clause", lexer$1.has("END_RULE") ? {
type: "END_RULE"
} : END_RULE, lexer$1.has("SEMI") ? {
type: "SEMI"
} : SEMI],
"postprocess": d => ({
t: 'rul',
id: d[1].value,
nttRefs: toArr$1(d[4], 'a_rul_refs'),
ctts: toArr$1(d[7], 'a_rul_ctts'),
csts: toArr$1(d[8], 'a_rul_csts'),
lcls: toArr$1(d[9], 'a_rul_lcls'),
stms: toArr$1(d[10], 'a_rul_stms'),
whrs: toArr$1(d[11], 'a_rul_whrs'),
p: pNfo$1(d[0])
})
}, {
"name": "ntt_ref_list$ebnf$1",
"symbols": []
}, {
"name": "ntt_ref_list$ebnf$1$subexpression$1",
"symbols": [lexer$1.has("COMMA") ? {
type: "COMMA"
} : COMMA, "ntt_ref"]
}, {
"name": "ntt_ref_list$ebnf$1",
"symbols": ["ntt_ref_list$ebnf$1", "ntt_ref_list$ebnf$1$subexpression$1"],
"postprocess": function arrpush(d) {
return d[0].concat([d[1]]);
}
}, {
"name": "ntt_ref_list",
"symbols": ["ntt_ref", "ntt_ref_list$ebnf$1"],
"postprocess": d => d[1][0] ? [d[0]].concat(d[1].map(e => e.slice(1)[0])) : [d[0]]
}, {
"name": "ntt_ref",
"symbols": [lexer$1.has("SIMPLE_ID") ? {
type: "SIMPLE_ID"
} : SIMPLE_ID],
"postprocess": d => ({
t: 'ntt_ref',
id: d[0].value,
p: pNfo$1(d[0])
})
}, {
"name": "declaration$subexpression$1",
"symbols": ["type_decl"]
}, {
"name": "declaration$subexpression$1",
"symbols": ["entity_decl"]
}, {
"name": "declaration$subexpression$1",
"symbols": ["subtype_constraint_decl"]
}, {
"name": "declaration$subexpression$1",
"symbols": ["function_decl"]
}, {
"name": "declaration$subexpression$1",
"symbols": ["procedure_decl"]
}, {
"name": "declaration",
"symbols": ["declaration$subexpression$1"],
"postprocess": d => d[0][0]
}, {
"name": "local_decl$ebnf$1",
"symbols": ["local_variables"]
}, {
"name": "local_decl$ebnf$1",
"symbols": ["local_decl$ebnf$1", "local_variables"],
"postprocess": function arrpush(d) {
return d[0].concat([d[1]]);
}
}, {
"name": "local_decl",
"symbols": [lexer$1.has("LOCAL") ? {
type: "LOCAL"
} : LOCAL, "local_decl$ebnf$1", lexer$1.has("END_LOCAL") ? {
type: "END_LOCAL"
} : END_LOCAL, lexer$1.has("SEMI") ? {
type: "SEMI"
} : SEMI],
"postprocess": d => d[1]
}, {
"name": "local_variables$ebnf$1$subexpression$1",
"symbols": [lexer$1.has("ASSIGN") ? {
type: "ASSIGN"
} : ASSIGN, "expression"]
}, {
"name": "local_variables$ebnf$1",
"symbols": ["local_variables$ebnf$1$subexpression$1"],
"postprocess": id$1
}, {
"name": "local_variables$ebnf$1",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "local_variables",
"symbols": ["variable_id_list", lexer$1.has("COLON") ? {
type: "COLON"
} : COLON, "parameter_type", "local_variables$ebnf$1", lexer$1.has("SEMI") ? {
type: "SEMI"
} : SEMI],
"postprocess": d => ({
t: 'lcl_dcl',
ids: toArr$1(d[0], 'a_lcl_ids'),
typ: d[2],
xpr: d[3] ? d[3][1] : null,
p: pNfo$1(d[0])
})
}, {
"name": "variable_id_list$ebnf$1",
"symbols": []
}, {
"name": "variable_id_list$ebnf$1$subexpression$1",
"symbols": [lexer$1.has("COMMA") ? {
type: "COMMA"
} : COMMA, "variable_id"]
}, {
"name": "variable_id_list$ebnf$1",
"symbols": ["variable_id_list$ebnf$1", "variable_id_list$ebnf$1$subexpression$1"],
"postprocess": function arrpush(d) {
return d[0].concat([d[1]]);
}
}, {
"name": "variable_id_list",
"symbols": ["variable_id", "variable_id_list$ebnf$1"],
"postprocess": d => d[1][0] ? [d[0]].concat(d[1].map(e => e.slice(1)[0])) : [d[0]]
}, {
"name": "variable_id",
"symbols": [lexer$1.has("SIMPLE_ID") ? {
type: "SIMPLE_ID"
} : SIMPLE_ID],
"postprocess": d => ({
t: 'lcl',
id: d[0].value,
p: pNfo$1(d[0])
})
}, {
"name": "parameter_type$subexpression$1",
"symbols": ["generalized_types"]
}, {
"name": "parameter_type$subexpression$1",
"symbols": ["simple_types"]
}, {
"name": "parameter_type$subexpression$1",
"symbols": ["named_types"]
}, {
"name": "parameter_type",
"symbols": ["parameter_type$subexpression$1"],
"postprocess": d => d[0][0]
}, {
"name": "generalized_types$subexpression$1",
"symbols": ["aggregate_type"]
}, {
"name": "generalized_types$subexpression$1",
"symbols": ["general_aggregation_types"]
}, {
"name": "generalized_types$subexpression$1",
"symbols": ["generic_entity_type"]
}, {
"name": "generalized_types$subexpression$1",
"symbols": ["generic_type"]
}, {
"name": "generalized_types",
"symbols": ["generalized_types$subexpression$1"],
"postprocess": d => d[0][0]
}, {
"name": "general_aggregation_types$subexpression$1",
"symbols": ["general_array_type"]
}, {
"name": "general_aggregation_types$subexpression$1",
"symbols": ["general_bag_type"]
}, {
"name": "general_aggregation_types$subexpression$1",
"symbols": ["general_list_type"]
}, {
"name": "general_aggregation_types$subexpression$1",
"symbols": ["general_set_type"]
}, {
"name": "general_aggregation_types",
"symbols": ["general_aggregation_types$subexpression$1"],
"postprocess": d => d[0][0]
}, {
"name": "general_array_type$ebnf$1",
"symbols": ["bound_spec"],
"postprocess": id$1
}, {
"name": "general_array_type$ebnf$1",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "general_array_type$ebnf$2",
"symbols": [lexer$1.has("OPTIONAL") ? {
type: "OPTIONAL"
} : OPTIONAL],
"postprocess": id$1
}, {
"name": "general_array_type$ebnf$2",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "general_array_type$ebnf$3",
"symbols": [lexer$1.has("UNIQUE") ? {
type: "UNIQUE"
} : UNIQUE],
"postprocess": id$1
}, {
"name": "general_array_type$ebnf$3",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "general_array_type",
"symbols": [lexer$1.has("ARRAY") ? {
type: "ARRAY"
} : ARRAY, "general_array_type$ebnf$1", lexer$1.has("OF") ? {
type: "OF"
} : OF, "general_array_type$ebnf$2", "general_array_type$ebnf$3", "parameter_type"],
"postprocess": d => ({
t: 'arr',
bds: toBds(d[1]),
opt: !!d[3],
unq: !!d[4],
spc: d[5],
p: pNfo$1(d[0])
})
}, {
"name": "general_bag_type$ebnf$1",
"symbols": ["bound_spec"],
"postprocess": id$1
}, {
"name": "general_bag_type$ebnf$1",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "general_bag_type",
"symbols": [lexer$1.has("BAG") ? {
type: "BAG"
} : BAG, "general_bag_type$ebnf$1", lexer$1.has("OF") ? {
type: "OF"
} : OF, "parameter_type"],
"postprocess": d => ({
t: 'bag',
bds: toBds(d[1]),
spc: d[3],
p: pNfo$1(d[0])
})
}, {
"name": "general_list_type$ebnf$1",
"symbols": ["bound_spec"],
"postprocess": id$1
}, {
"name": "general_list_type$ebnf$1",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "general_list_type$ebnf$2",
"symbols": [lexer$1.has("UNIQUE") ? {
type: "UNIQUE"
} : UNIQUE],
"postprocess": id$1
}, {
"name": "general_list_type$ebnf$2",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "general_list_type",
"symbols": [lexer$1.has("LIST") ? {
type: "LIST"
} : LIST, "general_list_type$ebnf$1", lexer$1.has("OF") ? {
type: "OF"
} : OF, "general_list_type$ebnf$2", "parameter_type"],
"postprocess": d => ({
t: 'lst',
bds: toBds(d[1]),
unq: !!d[3],
spc: d[4],
p: pNfo$1(d[0])
})
}, {
"name": "general_set_type$ebnf$1",
"symbols": ["bound_spec"],
"postprocess": id$1
}, {
"name": "general_set_type$ebnf$1",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "general_set_type",
"symbols": [lexer$1.has("SET") ? {
type: "SET"
} : SET, "general_set_type$ebnf$1", lexer$1.has("OF") ? {
type: "OF"
} : OF, "parameter_type"],
"postprocess": d => ({
t: 'set',
bds: toBds(d[1]),
spc: d[3],
p: pNfo$1(d[0])
})
}, {
"name": "bound_spec",
"symbols": [lexer$1.has("LB") ? {
type: "LB"
} : LB, "simple_expression", lexer$1.has("COLON") ? {
type: "COLON"
} : COLON, "simple_expression", lexer$1.has("RB") ? {
type: "RB"
} : RB],
"postprocess": d => ({
t: 'bds',
fr: d[1],
to: d[3],
p: pNfo$1(d[0])
})
}, {
"name": "generic_type",
"symbols": [lexer$1.has("GENERIC") ? {
type: "GENERIC"
} : GENERIC],
"postprocess": d => ({
t: 'gen',
p: pNfo$1(d[0])
})
}, {
"name": "generic_type",
"symbols": [lexer$1.has("GENERIC") ? {
type: "GENERIC"
} : GENERIC, lexer$1.has("COLON") ? {
type: "COLON"
} : COLON, lexer$1.has("SIMPLE_ID") ? {
type: "SIMPLE_ID"
} : SIMPLE_ID],
"postprocess": d => ({
t: 'gen_lbld',
lbl: d[2].value,
p: pNfo$1(d[0])
})
}, {
"name": "generic_entity_type",
"symbols": [lexer$1.has("GENERIC_ENTITY") ? {
type: "GENERIC_ENTITY"
} : GENERIC_ENTITY],
"postprocess": d => ({
t: 'gtt',
p: pNfo$1(d[0])
})
}, {
"name": "generic_entity_type",
"symbols": [lexer$1.has("GENERIC_ENTITY") ? {
type: "GENERIC_ENTITY"
} : GENERIC_ENTITY, lexer$1.has("COLON") ? {
type: "COLON"
} : COLON, lexer$1.has("SIMPLE_ID") ? {
type: "SIMPLE_ID"
} : SIMPLE_ID],
"postprocess": d => ({
t: 'gtt_lbld',
lbl: d[2].value,
p: pNfo$1(d[0])
})
}, {
"name": "aggregate_type",
"symbols": [lexer$1.has("AGGREGATE") ? {
type: "AGGREGATE"
} : AGGREGATE, lexer$1.has("OF") ? {
type: "OF"
} : OF, "parameter_type"],
"postprocess": d => ({
t: 'agt',
spc: d[2],
p: pNfo$1(d[0])
})
}, {
"name": "aggregate_type",
"symbols": [lexer$1.has("AGGREGATE") ? {
type: "AGGREGATE"
} : AGGREGATE, lexer$1.has("COLON") ? {
type: "COLON"
} : COLON, lexer$1.has("SIMPLE_ID") ? {
type: "SIMPLE_ID"
} : SIMPLE_ID, lexer$1.has("OF") ? {
type: "OF"
} : OF, "parameter_type"],
"postprocess": d => ({
t: 'agt_lbld',
lbl: d[2].value,
spc: d[4],
p: pNfo$1(d[0])
})
}, {
"name": "simple_types$subexpression$1",
"symbols": [lexer$1.has("BOOLEAN") ? {
type: "BOOLEAN"
} : BOOLEAN]
}, {
"name": "simple_types$subexpression$1",
"symbols": [lexer$1.has("INTEGER") ? {
type: "INTEGER"
} : INTEGER]
}, {
"name": "simple_types$subexpression$1",
"symbols": [lexer$1.has("LOGICAL") ? {
type: "LOGICAL"
} : LOGICAL]
}, {
"name": "simple_types$subexpression$1",
"symbols": [lexer$1.has("NUMBER") ? {
type: "NUMBER"
} : NUMBER]
}, {
"name": "simple_types",
"symbols": ["simple_types$subexpression$1"],
"postprocess": d => ({
t: toSim(d)
})
}, {
"name": "simple_types",
"symbols": ["real_type"],
"postprocess": d => toReal(d[0])
}, {
"name": "simple_types",
"symbols": ["string_type"],
"postprocess": d => toStr(d[0])
}, {
"name": "simple_types",
"symbols": ["binary_type"],
"postprocess": d => toBin(d[0])
}, {
"name": "named_types",
"symbols": [lexer$1.has("SIMPLE_ID") ? {
type: "SIMPLE_ID"
} : SIMPLE_ID],
"postprocess": d => toEotRef(d[0])
}, {
"name": "real_type$ebnf$1",
"symbols": ["precision_spec"],
"postprocess": id$1
}, {
"name": "real_type$ebnf$1",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "real_type",
"symbols": [lexer$1.has("REAL") ? {
type: "REAL"
} : REAL, "real_type$ebnf$1"]
}, {
"name": "precision_spec",
"symbols": [lexer$1.has("LP") ? {
type: "LP"
} : LP, "one_to_bounds_expr", lexer$1.has("RP") ? {
type: "RP"
} : RP],
"postprocess": d => ({
width: d[1],
p: pNfo$1(d[0])
})
}, {
"name": "string_type$ebnf$1",
"symbols": ["width_spec"],
"postprocess": id$1
}, {
"name": "string_type$ebnf$1",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "string_type",
"symbols": [lexer$1.has("STRING") ? {
type: "STRING"
} : STRING, "string_type$ebnf$1"]
}, {
"name": "binary_type$ebnf$1",
"symbols": ["width_spec"],
"postprocess": id$1
}, {
"name": "binary_type$ebnf$1",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "binary_type",
"symbols": [lexer$1.has("BINARY") ? {
type: "BINARY"
} : BINARY, "binary_type$ebnf$1"]
}, {
"name": "width_spec$ebnf$1",
"symbols": [lexer$1.has("FIXED") ? {
type: "FIXED"
} : FIXED],
"postprocess": id$1
}, {
"name": "width_spec$ebnf$1",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "width_spec",
"symbols": [lexer$1.has("LP") ? {
type: "LP"
} : LP, "one_to_bounds_expr", lexer$1.has("RP") ? {
type: "RP"
} : RP, "width_spec$ebnf$1"],
"postprocess": d => ({
width: d[1],
fixed: !!d[3],
p: pNfo$1(d[0])
})
}, {
"name": "one_to_bounds_expr",
"symbols": [lexer$1.has("INT") ? {
type: "INT"
} : INT],
"postprocess": d => d[0]
}, {
"name": "one_to_bounds_expr",
"symbols": [lexer$1.has("SIMPLE_ID") ? {
type: "SIMPLE_ID"
} : SIMPLE_ID],
"postprocess": d => ({
t: 'width_ref',
id: d[0].value,
p: pNfo$1(d[0])
})
}, {
"name": "explicit_attr$ebnf$1",
"symbols": [lexer$1.has("OPTIONAL") ? {
type: "OPTIONAL"
} : OPTIONAL],
"postprocess": id$1
}, {
"name": "explicit_attr$ebnf$1",
"symbols": [],
"postprocess": function (d) {
return null;
}
}, {
"name": "explicit_attr",
"symbols": ["attr_list", lexer$1.has("COLON") ? {
type: "COLON"
} : COLON, "explicit_attr$ebnf$1", "parameter_type", lexer$1.has("SEMI") ? {
type: "SEMI"
} : SEMI],
"postprocess": d => ({
t: 'atr_dcl',
ids: toArr$1(d[0], 'a_atr_ids'),
opt: !!d[2],
typ: d[3],
p: pNfo$1(d[0])
})
}, {
"name": "derived_attr",
"symbols": ["attr", lexer$1.has("COLON") ? {
type: "COLON"
} : COLON, "parameter_type", lexer$1.has("ASSIGN") ? {
type: "ASSIGN"
} : ASSIGN, "expression", lexer$1.has("SEMI") ? {
type: "SEMI"
} : SEMI],
"postprocess": d => ({
t: 'drv',
id: d[0],
typ: d[2],
xpr: d[4],
p: pNfo$1(d[0])
})
}, {
"name": "inverse_attr$ebnf$1$subexpression$1$subexpression$1",
"symbols": [lexer$1.has("SET") ? {
type: "SET"
} : SET]
}, {
"name": "inverse_attr$ebnf$1$subexpress