targaryen
Version:
Test Firebase security rules without connecting to Firebase.
108 lines (104 loc) • 7.68 kB
JavaScript
// Generated automatically by nearley, version 2.13.0
// http://github.com/Hardmath123/nearley
(function () {
function id(x) { return x[0]; }
const moo = require('moo');
const token = require('./token')
const lexer = moo.states({
main: {
bar: '|',
caret: '^',
comma: ',',
dollar: '$',
dot: '.',
closingParenthesis: ')',
openingParenthesis: '(',
mark: '?',
plus: '+',
closingBrace: '}',
openingBrace: '{',
star: '*',
negativeSetStart: {match: '[^', push: 'set'},
positiveSetStart: {match: '[', push: 'set'},
charset: ['\\s', '\\w', '\\d', '\\S', '\\W', '\\D'],
literal: /\\./,
number: /0|[1-9][0-9]*/,
char: /./
},
set: {
setEnd: {match: ']', pop: true},
charset: ['\\s', '\\w', '\\d', '\\S', '\\W', '\\D'],
range: /(?:\\.|[^\]])-(?:\\.|[^\]])/,
literal: /\\./,
char: /./
}
});
var grammar = {
Lexer: lexer,
ParserRules: [
{"name": "root$ebnf$1", "symbols": ["startRE"], "postprocess": id},
{"name": "root$ebnf$1", "symbols": [], "postprocess": function(d) {return null;}},
{"name": "root$ebnf$2", "symbols": ["endRE"], "postprocess": id},
{"name": "root$ebnf$2", "symbols": [], "postprocess": function(d) {return null;}},
{"name": "root", "symbols": ["root$ebnf$1", "RE", "root$ebnf$2"], "postprocess": token.concatenation},
{"name": "startRE", "symbols": [(lexer.has("caret") ? {type: "caret"} : caret)], "postprocess": token.rename('startAnchor')},
{"name": "startRE", "symbols": ["positionalLiteral"], "postprocess": id},
{"name": "endRE", "symbols": [(lexer.has("dollar") ? {type: "dollar"} : dollar)], "postprocess": token.rename('endAnchor')},
{"name": "RE", "symbols": ["union"], "postprocess": id},
{"name": "RE", "symbols": ["simpleRE"], "postprocess": id},
{"name": "union", "symbols": ["RE", (lexer.has("bar") ? {type: "bar"} : bar), "simpleRE"], "postprocess": token.union},
{"name": "simpleRE", "symbols": ["concatenation"], "postprocess": id},
{"name": "simpleRE", "symbols": ["basicRE"], "postprocess": id},
{"name": "concatenation", "symbols": ["simpleRE", "basicRE"], "postprocess": token.concatenation},
{"name": "basicRE", "symbols": ["series"], "postprocess": id},
{"name": "basicRE", "symbols": ["elementaryRE"], "postprocess": id},
{"name": "series", "symbols": ["basicRE", "repeat"], "postprocess": token.series},
{"name": "repeat", "symbols": [(lexer.has("plus") ? {type: "plus"} : plus)], "postprocess": token.repeat(1)},
{"name": "repeat", "symbols": [(lexer.has("star") ? {type: "star"} : star)], "postprocess": token.repeat(0)},
{"name": "repeat", "symbols": [(lexer.has("mark") ? {type: "mark"} : mark)], "postprocess": token.repeat(0, 1)},
{"name": "repeat", "symbols": [(lexer.has("openingBrace") ? {type: "openingBrace"} : openingBrace), (lexer.has("number") ? {type: "number"} : number), (lexer.has("closingBrace") ? {type: "closingBrace"} : closingBrace)], "postprocess": data => token.repeat(data[1], data[1])(data)},
{"name": "repeat", "symbols": [(lexer.has("openingBrace") ? {type: "openingBrace"} : openingBrace), (lexer.has("number") ? {type: "number"} : number), (lexer.has("comma") ? {type: "comma"} : comma), (lexer.has("closingBrace") ? {type: "closingBrace"} : closingBrace)], "postprocess": data => token.repeat(data[1])(data)},
{"name": "repeat", "symbols": [(lexer.has("openingBrace") ? {type: "openingBrace"} : openingBrace), (lexer.has("number") ? {type: "number"} : number), (lexer.has("comma") ? {type: "comma"} : comma), (lexer.has("number") ? {type: "number"} : number), (lexer.has("closingBrace") ? {type: "closingBrace"} : closingBrace)], "postprocess": data => token.repeat(data[1], data[3])(data)},
{"name": "elementaryRE", "symbols": ["group"], "postprocess": id},
{"name": "elementaryRE", "symbols": ["char"], "postprocess": id},
{"name": "elementaryRE", "symbols": ["number"], "postprocess": id},
{"name": "elementaryRE", "symbols": ["set"], "postprocess": id},
{"name": "elementaryRE", "symbols": ["charset"], "postprocess": id},
{"name": "elementaryRE", "symbols": [(lexer.has("dot") ? {type: "dot"} : dot)], "postprocess": token.create},
{"name": "group", "symbols": [(lexer.has("openingParenthesis") ? {type: "openingParenthesis"} : openingParenthesis), "groupRE", (lexer.has("closingParenthesis") ? {type: "closingParenthesis"} : closingParenthesis)], "postprocess": token.group},
{"name": "groupRE$ebnf$1", "symbols": ["positionalLiteral"], "postprocess": id},
{"name": "groupRE$ebnf$1", "symbols": [], "postprocess": function(d) {return null;}},
{"name": "groupRE", "symbols": ["groupRE$ebnf$1", "RE"], "postprocess": token.concatenation},
{"name": "char", "symbols": [(lexer.has("char") ? {type: "char"} : char)], "postprocess": token.create},
{"name": "char", "symbols": [(lexer.has("comma") ? {type: "comma"} : comma)], "postprocess": token.char},
{"name": "char", "symbols": [(lexer.has("literal") ? {type: "literal"} : literal)], "postprocess": token.char},
{"name": "char", "symbols": [(lexer.has("closingBrace") ? {type: "closingBrace"} : closingBrace)], "postprocess": token.char},
{"name": "number", "symbols": [(lexer.has("number") ? {type: "number"} : number)], "postprocess": token.create},
{"name": "charset", "symbols": [(lexer.has("charset") ? {type: "charset"} : charset)], "postprocess": token.create},
{"name": "set$ebnf$1", "symbols": ["setItem"]},
{"name": "set$ebnf$1", "symbols": ["set$ebnf$1", "setItem"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}},
{"name": "set", "symbols": [(lexer.has("positiveSetStart") ? {type: "positiveSetStart"} : positiveSetStart), "set$ebnf$1", (lexer.has("setEnd") ? {type: "setEnd"} : setEnd)], "postprocess": token.set(true)},
{"name": "set$ebnf$2", "symbols": ["setItem"]},
{"name": "set$ebnf$2", "symbols": ["set$ebnf$2", "setItem"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}},
{"name": "set", "symbols": [(lexer.has("negativeSetStart") ? {type: "negativeSetStart"} : negativeSetStart), "set$ebnf$2", (lexer.has("setEnd") ? {type: "setEnd"} : setEnd)], "postprocess": token.set(false)},
{"name": "setItem", "symbols": [(lexer.has("range") ? {type: "range"} : range)], "postprocess": token.range},
{"name": "setItem", "symbols": ["char"], "postprocess": id},
{"name": "setItem", "symbols": ["charset"], "postprocess": id},
{"name": "positionalLiteral", "symbols": ["meta"], "postprocess": token.char},
{"name": "meta", "symbols": [(lexer.has("bar") ? {type: "bar"} : bar)], "postprocess": id},
{"name": "meta", "symbols": [(lexer.has("closingParenthesis") ? {type: "closingParenthesis"} : closingParenthesis)], "postprocess": id},
{"name": "meta", "symbols": [(lexer.has("openingParenthesis") ? {type: "openingParenthesis"} : openingParenthesis)], "postprocess": id},
{"name": "meta", "symbols": [(lexer.has("mark") ? {type: "mark"} : mark)], "postprocess": id},
{"name": "meta", "symbols": [(lexer.has("plus") ? {type: "plus"} : plus)], "postprocess": id},
{"name": "meta", "symbols": [(lexer.has("closingBrace") ? {type: "closingBrace"} : closingBrace)], "postprocess": id},
{"name": "meta", "symbols": [(lexer.has("openingBrace") ? {type: "openingBrace"} : openingBrace)], "postprocess": id},
{"name": "meta", "symbols": [(lexer.has("star") ? {type: "star"} : star)], "postprocess": id}
]
, ParserStart: "root"
}
if (typeof module !== 'undefined'&& typeof module.exports !== 'undefined') {
module.exports = grammar;
} else {
window.grammar = grammar;
}
})();