antlr4ng
Version:
Alternative JavaScript/TypeScript runtime for ANTLR4
1,722 lines (1,696 loc) • 168 kB
JavaScript
var __defProp = Object.defineProperty;
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
var __getOwnPropNames = Object.getOwnPropertyNames;
var __hasOwnProp = Object.prototype.hasOwnProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
var __export = (target, all) => {
for (var name in all)
__defProp(target, name, { get: all[name], enumerable: true });
};
var __copyProps = (to, from, except, desc) => {
if (from && typeof from === "object" || typeof from === "function") {
for (let key of __getOwnPropNames(from))
if (!__hasOwnProp.call(to, key) && key !== except)
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
}
return to;
};
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
// src/atn/LexerATNSimulator.ts
var LexerATNSimulator_exports = {};
__export(LexerATNSimulator_exports, {
LexerATNSimulator: () => LexerATNSimulator
});
module.exports = __toCommonJS(LexerATNSimulator_exports);
// src/IntStream.ts
var IntStream;
((IntStream2) => {
IntStream2.EOF = -1;
IntStream2.UNKNOWN_SOURCE_NAME = "<unknown>";
})(IntStream || (IntStream = {}));
// src/Token.ts
var Token;
((Token2) => {
Token2.INVALID_TYPE = 0;
Token2.EPSILON = -2;
Token2.MIN_USER_TOKEN_TYPE = 1;
Token2.EOF = IntStream.EOF;
Token2.DEFAULT_CHANNEL = 0;
Token2.HIDDEN_CHANNEL = 1;
Token2.MIN_USER_CHANNEL_VALUE = 2;
})(Token || (Token = {}));
var isToken = /* @__PURE__ */ __name((candidate) => {
const token = candidate;
return token.tokenSource !== void 0 && token.channel !== void 0;
}, "isToken");
// src/BaseErrorListener.ts
var BaseErrorListener = class {
static {
__name(this, "BaseErrorListener");
}
syntaxError(recognizer, offendingSymbol, line, column, msg, e) {
}
reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) {
}
reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) {
}
reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) {
}
};
// src/ConsoleErrorListener.ts
var ConsoleErrorListener = class _ConsoleErrorListener extends BaseErrorListener {
static {
__name(this, "ConsoleErrorListener");
}
/**
* Provides a default instance of {@link ConsoleErrorListener}.
*/
static instance = new _ConsoleErrorListener();
syntaxError(recognizer, offendingSymbol, line, charPositionInLine, msg, _e) {
console.error("line " + line + ":" + charPositionInLine + " " + msg);
}
};
// src/ProxyErrorListener.ts
var ProxyErrorListener = class extends BaseErrorListener {
constructor(delegates) {
super();
this.delegates = delegates;
return this;
}
static {
__name(this, "ProxyErrorListener");
}
syntaxError(recognizer, offendingSymbol, line, column, msg, e) {
this.delegates.forEach((d) => {
d.syntaxError(recognizer, offendingSymbol, line, column, msg, e);
});
}
reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) {
this.delegates.forEach((d) => {
d.reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs);
});
}
reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) {
this.delegates.forEach((d) => {
d.reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs);
});
}
reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) {
this.delegates.forEach((d) => {
d.reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs);
});
}
};
// src/Recognizer.ts
var Recognizer = class _Recognizer {
static {
__name(this, "Recognizer");
}
static EOF = -1;
static tokenTypeMapCache = /* @__PURE__ */ new Map();
static ruleIndexMapCache = /* @__PURE__ */ new Map();
interpreter;
listeners = [ConsoleErrorListener.instance];
stateNumber = -1;
checkVersion(toolVersion) {
const runtimeVersion = "4.13.1";
if (runtimeVersion !== toolVersion) {
console.error("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion);
}
}
addErrorListener(listener) {
this.listeners.push(listener);
}
removeErrorListeners() {
this.listeners = [];
}
removeErrorListener(listener) {
for (let i = 0; i < this.listeners.length; i++) {
if (this.listeners[i] === listener) {
this.listeners.splice(i, 1);
return;
}
}
}
getErrorListeners() {
return this.listeners;
}
getTokenTypeMap() {
const vocabulary = this.vocabulary;
let result = _Recognizer.tokenTypeMapCache.get(vocabulary);
if (!result) {
result = /* @__PURE__ */ new Map();
for (let i = 0; i <= this.atn.maxTokenType; i++) {
const literalName = vocabulary.getLiteralName(i);
if (literalName) {
result.set(literalName, i);
}
const symbolicName = vocabulary.getSymbolicName(i);
if (symbolicName) {
result.set(symbolicName, i);
}
}
result.set("EOF", Token.EOF);
_Recognizer.tokenTypeMapCache.set(vocabulary, result);
}
return result;
}
/**
* Get a map from rule names to rule indexes.
* Used for XPath and tree pattern compilation.
*/
getRuleIndexMap() {
const ruleNames = this.ruleNames;
let result = _Recognizer.ruleIndexMapCache.get(ruleNames);
if (!result) {
result = /* @__PURE__ */ new Map();
ruleNames.forEach((ruleName, idx) => {
return result.set(ruleName, idx);
});
_Recognizer.ruleIndexMapCache.set(ruleNames, result);
}
return result;
}
getTokenType(tokenName) {
const ttype = this.getTokenTypeMap().get(tokenName);
if (ttype) {
return ttype;
}
return Token.INVALID_TYPE;
}
/** What is the error header, normally line/character position information? */
getErrorHeader(e) {
const line = e.offendingToken?.line;
const column = e.offendingToken?.column;
return "line " + line + ":" + column;
}
get errorListenerDispatch() {
return new ProxyErrorListener(this.listeners);
}
/**
* subclass needs to override these if there are semantic predicates or actions
* that the ATN interp needs to execute
*/
sempred(_localctx, _ruleIndex, _actionIndex) {
return true;
}
// TODO: make localCtx an optional parameter, not optional null.
precpred(_localctx, _precedence) {
return true;
}
action(_localctx, _ruleIndex, _actionIndex) {
}
get atn() {
return this.interpreter.atn;
}
get state() {
return this.stateNumber;
}
set state(state) {
this.stateNumber = state;
}
getParseInfo() {
return void 0;
}
};
// src/CommonToken.ts
var CommonToken = class _CommonToken {
static {
__name(this, "CommonToken");
}
/**
* An empty tuple which is used as the default value of
* {@link source} for tokens that do not have a source.
*/
// eslint-disable-next-line @typescript-eslint/naming-convention
static EMPTY_SOURCE = [null, null];
/**
* These properties share a field to reduce the memory footprint of
* {@link CommonToken}. Tokens created by a {@link CommonTokenFactory} from
* the same source and input stream share a reference to the same
* {@link Pair} containing these values.
*/
source;
tokenIndex;
start;
stop;
/**
* This is the backing field for {@link #getType} and {@link #setType}.
*/
type;
/**
* The (one-based) line number on which the 1st character of this token was.
*/
line;
/**
* The zero-based index of the first character position in its line.
*/
column;
/**
* The token's channel.
*/
channel;
/**
* This is the backing field for {@link getText} when the token text is
* explicitly set in the constructor or via {@link setText}.
*/
#text;
constructor(details) {
this.type = details.type;
this.source = details.source;
this.tokenIndex = details.tokenIndex ?? -1;
this.line = details.line ?? 0;
this.column = details.column ?? -1;
this.channel = details.channel ?? Token.DEFAULT_CHANNEL;
this.start = details.start ?? 0;
this.stop = details.stop ?? 0;
this.#text = details.text;
if (details.line === void 0 && details.source[0] !== null) {
this.line = details.source[0].line;
}
if (details.column === void 0 && details.source[0] !== null) {
this.column = details.source[0].column;
}
}
/**
* Constructs a new {@link CommonToken} as a copy of another {@link Token}.
*
* If `token` is also a {@link CommonToken} instance, the newly
* constructed token will share a reference to the {@link #text} field and
* the {@link Pair} stored in {@link source}. Otherwise, {@link text} will
* be assigned the result of calling {@link getText}, and {@link source}
* will be constructed from the result of {@link Token.getTokenSource} and
* {@link Token#getInputStream}.
*
* @param token The token to copy.
*/
static fromToken(token) {
const source = [token.tokenSource, token.inputStream];
return new _CommonToken({
type: token.type,
line: token.line,
tokenIndex: token.tokenIndex,
column: token.column,
channel: token.channel,
start: token.start,
stop: token.stop,
text: token.text,
source
});
}
/**
* Constructs a new {@link CommonToken} with the specified token type and text.
*
* @param type The token type.
* @param text The text of the token.
*/
static fromType(type, text) {
return new _CommonToken({ type, text, source: _CommonToken.EMPTY_SOURCE });
}
static fromSource(source, type, channel, start, stop) {
return new _CommonToken({ type, channel, start, stop, source });
}
get tokenSource() {
return this.source[0];
}
get inputStream() {
return this.source[1];
}
set inputStream(input) {
this.source[1] = input;
}
/**
* Constructs a new {@link CommonToken} as a copy of another {@link Token}.
*
* If `oldToken` is also a {@link CommonToken} instance, the newly
* constructed token will share a reference to the {@link text} field and
* the {@link Pair} stored in {@link source}. Otherwise, {@link text} will
* be assigned the result of calling {@link getText}, and {@link source}
* will be constructed from the result of {@link Token.getTokenSource} and
* {@link Token.getInputStream}.
*/
clone() {
const t = new _CommonToken({
source: this.source,
type: this.type,
channel: this.channel,
start: this.start,
stop: this.stop,
tokenIndex: this.tokenIndex,
line: this.line,
column: this.column,
text: this.#text
});
return t;
}
toString(recognizer) {
let channelStr = "";
if (this.channel > 0) {
channelStr = ",channel=" + this.channel;
}
let text = this.text;
if (text) {
text = text.replace(/\n/g, "\\n");
text = text.replace(/\r/g, "\\r");
text = text.replace(/\t/g, "\\t");
} else {
text = "<no text>";
}
let typeString = String(this.type);
if (recognizer) {
typeString = recognizer.vocabulary.getDisplayName(this.type) ?? "<unknown>";
}
return "[@" + this.tokenIndex + "," + this.start + ":" + this.stop + "='" + text + "',<" + typeString + ">" + channelStr + "," + this.line + ":" + this.column + "]";
}
get text() {
if (this.#text !== void 0) {
return this.#text;
}
const input = this.inputStream;
if (!input) {
return void 0;
}
const n2 = input.size;
if (this.start < n2 && this.stop < n2) {
return input.getTextFromRange(this.start, this.stop);
}
return "<EOF>";
}
set text(text) {
this.#text = text;
}
// WritableToken implementation
setText(text) {
this.#text = text;
}
setType(ttype) {
this.type = ttype;
}
setLine(line) {
this.line = line;
}
setCharPositionInLine(pos) {
this.column = pos;
}
setChannel(channel) {
this.channel = channel;
}
setTokenIndex(index) {
this.tokenIndex = index;
}
};
// src/CommonTokenFactory.ts
var CommonTokenFactory = class _CommonTokenFactory {
static {
__name(this, "CommonTokenFactory");
}
/**
* The default {@link CommonTokenFactory} instance.
*
*
* This token factory does not explicitly copy token text when constructing
* tokens.
*/
static DEFAULT = new _CommonTokenFactory();
/**
* Indicates whether {@link CommonToken.setText} should be called after
* constructing tokens to explicitly set the text. This is useful for cases
* where the input stream might not be able to provide arbitrary substrings
* of text from the input after the lexer creates a token (e.g. the
* implementation of {@link CharStream.getText} in
* {@link UnbufferedCharStream} throws an
* {@link UnsupportedOperationException}). Explicitly setting the token text
* allows {@link Token.getText} to be called at any time regardless of the
* input stream implementation.
*
*
* The default value is `false` to avoid the performance and memory
* overhead of copying text for every token unless explicitly requested.
*/
copyText = false;
constructor(copyText) {
this.copyText = copyText ?? false;
}
create(source, type, text, channel, start, stop, line, column) {
const t = CommonToken.fromSource(source, type, channel, start, stop);
t.line = line;
t.column = column;
if (text) {
t.text = text;
} else if (this.copyText && source[1] !== null) {
t.text = source[1].getTextFromRange(start, stop);
}
return t;
}
};
// src/RecognitionException.ts
var RecognitionException = class _RecognitionException extends Error {
static {
__name(this, "RecognitionException");
}
ctx;
/**
* The current {@link Token} when an error occurred. Since not all streams
* support accessing symbols by index, we have to track the {@link Token}
* instance itself
*/
offendingToken = null;
/**
* Get the ATN state number the parser was in at the time the error
* occurred. For {@link NoViableAltException} and
* {@link LexerNoViableAltException} exceptions, this is the
* {@link DecisionState} number. For others, it is the state whose outgoing
* edge we couldn't match.
*/
offendingState = -1;
recognizer;
input;
constructor(params) {
super(params.message);
if (Error.captureStackTrace) {
Error.captureStackTrace(this, _RecognitionException);
}
this.message = params.message;
this.recognizer = params.recognizer;
this.input = params.input;
this.ctx = params.ctx;
if (this.recognizer !== null) {
this.offendingState = this.recognizer.state;
}
}
/**
* Gets the set of input symbols which could potentially follow the
* previously matched symbol at the time this exception was thrown.
*
* If the set of expected tokens is not known and could not be computed,
* this method returns `null`.
*
* @returns The set of token types that could potentially follow the current
* state in the ATN, or `null` if the information is not available.
*/
getExpectedTokens() {
if (this.recognizer !== null && this.ctx !== null) {
return this.recognizer.atn.getExpectedTokens(this.offendingState, this.ctx);
} else {
return null;
}
}
// If the state number is not known, this method returns -1.
toString() {
return this.message;
}
};
// src/LexerNoViableAltException.ts
var LexerNoViableAltException = class extends RecognitionException {
static {
__name(this, "LexerNoViableAltException");
}
startIndex;
deadEndConfigs;
constructor(lexer, input, startIndex, deadEndConfigs) {
super({ message: "", recognizer: lexer, input, ctx: null });
this.startIndex = startIndex;
this.deadEndConfigs = deadEndConfigs;
}
toString() {
let symbol = "";
if (this.input && this.startIndex >= 0 && this.startIndex < this.input.size) {
symbol = this.input.getTextFromRange(this.startIndex, this.startIndex);
}
return `LexerNoViableAltException(${symbol})`;
}
};
// src/Lexer.ts
var Lexer = class _Lexer extends Recognizer {
static {
__name(this, "Lexer");
}
static DEFAULT_MODE = 0;
static MORE = -2;
static SKIP = -3;
static DEFAULT_TOKEN_CHANNEL = Token.DEFAULT_CHANNEL;
static HIDDEN = Token.HIDDEN_CHANNEL;
options = {
minDFAEdge: 0,
maxDFAEdge: 256,
minCodePoint: 0,
maxCodePoint: 1114111
};
/**
* What character index in the stream did the current token start at?
* Needed, for example, to get the text for current token. Set at
* the start of nextToken.
*/
tokenStartCharIndex = -1;
/** The channel number for the current token */
channel = 0;
/** The token type for the current token */
type = 0;
mode = _Lexer.DEFAULT_MODE;
/** The start column of the current token (the one that was last read by `nextToken`). */
currentTokenColumn = 0;
/**
* The line on which the first character of the current token (the one that was last read by `nextToken`) resides.
*/
currentTokenStartLine = 0;
input;
/**
* The goal of all lexer rules/methods is to create a token object.
* This is an instance variable as multiple rules may collaborate to
* create a single token. nextToken will return this object after
* matching lexer rule(s). If you subclass to allow multiple token
* emissions, then set this to the last token to be matched or
* something non-null so that the auto token emit mechanism will not
* emit another token.
*/
token = null;
/**
* Once we see EOF on char stream, next token will be EOF.
* If you have DONE : EOF ; then you see DONE EOF.
*/
hitEOF = false;
factory;
#modeStack = [];
/**
* The text to be used for the next token. If this is not null, then the text
* for the next token is fixed and is not subject to change in the normal
* workflow of the lexer.
*/
#text;
constructor(input, options) {
super();
this.options = { ...this.options, ...options };
this.input = input;
this.factory = CommonTokenFactory.DEFAULT;
}
reset(seekBack = true) {
if (seekBack) {
this.input.seek(0);
}
this.token = null;
this.type = Token.INVALID_TYPE;
this.channel = Token.DEFAULT_CHANNEL;
this.tokenStartCharIndex = -1;
this.currentTokenColumn = -1;
this.currentTokenStartLine = -1;
this.#text = void 0;
this.hitEOF = false;
this.mode = _Lexer.DEFAULT_MODE;
this.#modeStack = [];
this.interpreter.reset();
}
/** @returns a token from this source; i.e., match a token on the char stream. */
nextToken() {
if (this.input === null) {
throw new Error("nextToken requires a non-null input stream.");
}
const tokenStartMarker = this.input.mark();
try {
while (true) {
if (this.hitEOF) {
this.emitEOF();
return this.token;
}
this.token = null;
this.channel = Token.DEFAULT_CHANNEL;
this.tokenStartCharIndex = this.input.index;
this.currentTokenColumn = this.interpreter.column;
this.currentTokenStartLine = this.interpreter.line;
this.#text = void 0;
let continueOuter = false;
while (true) {
this.type = Token.INVALID_TYPE;
let ttype = _Lexer.SKIP;
try {
ttype = this.interpreter.match(this.input, this.mode);
} catch (e) {
if (e instanceof LexerNoViableAltException) {
this.notifyListeners(e);
this.recover(e);
} else {
throw e;
}
}
if (this.input.LA(1) === Token.EOF) {
this.hitEOF = true;
}
if (this.type === Token.INVALID_TYPE) {
this.type = ttype;
}
if (this.type === _Lexer.SKIP) {
continueOuter = true;
break;
}
if (this.type !== _Lexer.MORE) {
break;
}
}
if (continueOuter) {
continue;
}
if (this.token === null) {
this.emit();
}
return this.token;
}
} finally {
this.input.release(tokenStartMarker);
}
}
/**
* Instruct the lexer to skip creating a token for current lexer rule
* and look for another token. nextToken() knows to keep looking when
* a lexer rule finishes with token set to SKIP_TOKEN. Recall that
* if token==null at end of any token rule, it creates one for you
* and emits it.
*/
skip() {
this.type = _Lexer.SKIP;
}
more() {
this.type = _Lexer.MORE;
}
pushMode(m2) {
if (LexerATNSimulator.debug) {
console.log("pushMode " + m2);
}
this.#modeStack.push(this.mode);
this.mode = m2;
}
popMode() {
if (this.#modeStack.length === 0) {
throw new Error("Empty Stack");
}
if (LexerATNSimulator.debug) {
console.log("popMode back to " + this.#modeStack.slice(0, -1));
}
this.mode = this.#modeStack.pop();
return this.mode;
}
get modeStack() {
return this.#modeStack;
}
/**
* By default does not support multiple emits per nextToken invocation
* for efficiency reasons. Subclass and override this method, nextToken,
* and getToken (to push tokens into a list and pull from that list
* rather than a single variable as this implementation does).
*/
emitToken(token) {
this.token = token;
}
/**
* The standard method called to automatically emit a token at the
* outermost lexical rule. The token object should point into the
* char buffer start..stop. If there is a text override in 'text',
* use that to set the token's text. Override this method to emit
* custom Token objects or provide a new factory.
*/
emit() {
const t = this.factory.create(
[this, this.input],
this.type,
this.#text,
this.channel,
this.tokenStartCharIndex,
this.getCharIndex() - 1,
this.currentTokenStartLine,
this.currentTokenColumn
);
this.emitToken(t);
return t;
}
emitEOF() {
const eof = this.factory.create(
[this, this.input],
Token.EOF,
void 0,
Token.DEFAULT_CHANNEL,
this.input.index,
this.input.index - 1,
this.line,
this.column
);
this.emitToken(eof);
return eof;
}
/** What is the index of the current character of lookahead? */
getCharIndex() {
return this.input.index;
}
/**
* Return a list of all Token objects in input char stream.
* Forces load of all tokens. Does not include EOF token.
*/
getAllTokens() {
const tokens = [];
let t = this.nextToken();
while (t.type !== Token.EOF) {
tokens.push(t);
t = this.nextToken();
}
return tokens;
}
notifyListeners(e) {
const start = this.tokenStartCharIndex;
const stop = this.input.index;
const text = this.input.getTextFromRange(start, stop);
const msg = "token recognition error at: '" + this.getErrorDisplay(text) + "'";
this.errorListenerDispatch.syntaxError(this, null, this.currentTokenStartLine, this.currentTokenColumn, msg, e);
}
getErrorDisplay(s) {
return s;
}
getErrorDisplayForChar(c) {
if (c.charCodeAt(0) === Token.EOF) {
return "<EOF>";
}
if (c === "\n") {
return "\\n";
}
if (c === " ") {
return "\\t";
}
if (c === "\r") {
return "\\r";
}
return c;
}
getCharErrorDisplay(c) {
return "'" + this.getErrorDisplayForChar(c) + "'";
}
/**
* Lexers can normally match any char in it's vocabulary after matching
* a token, so do the easy thing and just kill a character and hope
* it all works out. You can instead use the rule invocation stack
* to do sophisticated error recovery if you are in a fragment rule.
*/
recover(re) {
if (this.input.LA(1) !== Token.EOF) {
if (re instanceof LexerNoViableAltException) {
this.interpreter.consume(this.input);
} else {
this.input.consume();
}
}
}
get inputStream() {
return this.input;
}
set inputStream(input) {
this.reset(false);
this.input = input;
}
set tokenFactory(factory) {
this.factory = factory;
}
get tokenFactory() {
return this.factory;
}
get sourceName() {
return this.input.getSourceName();
}
get line() {
return this.interpreter.line;
}
set line(line) {
this.interpreter.line = line;
}
get column() {
return this.interpreter.column;
}
set column(column) {
this.interpreter.column = column;
}
get text() {
if (this.#text) {
return this.#text;
} else {
return this.interpreter.getText(this.input);
}
}
set text(text) {
this.#text = text;
}
};
// src/dfa/DFASerializer.ts
var DFASerializer = class {
static {
__name(this, "DFASerializer");
}
dfa;
vocabulary;
constructor(dfa, vocabulary) {
this.dfa = dfa;
this.vocabulary = vocabulary;
}
toString() {
if (!this.dfa.s0) {
return "";
}
let buf = "";
const states = this.dfa.getStates();
for (const s of states) {
let n2 = 0;
n2 = s.edges.length;
for (let i = 0; i < n2; i++) {
const t = s.edges[i];
if (t && t.stateNumber !== 2147483647) {
buf += this.getStateString(s);
const label = this.getEdgeLabel(i);
buf += "-";
buf += label;
buf += "->";
buf += this.getStateString(t);
buf += "\n";
}
}
}
return buf;
}
getEdgeLabel(i) {
const name = this.vocabulary.getDisplayName(i - 1);
return `${name}`;
}
getStateString(s) {
const n2 = s.stateNumber;
const baseStateStr = (s.isAcceptState ? ":" : "") + "s" + n2 + (s.requiresFullContext ? "^" : "");
if (s.isAcceptState) {
if (s.predicates !== null) {
return `${baseStateStr}=>${s.predicates.toString()}`;
}
return `${baseStateStr}=>${s.prediction}`;
} else {
return `${baseStateStr}`;
}
}
};
// src/utils/helpers.ts
var valueToString = /* @__PURE__ */ __name((v) => {
return v === null ? "null" : v;
}, "valueToString");
var arrayToString = /* @__PURE__ */ __name((value) => {
return Array.isArray(value) ? "[" + value.map(valueToString).join(", ") + "]" : "null";
}, "arrayToString");
var equalArrays = /* @__PURE__ */ __name((a, b) => {
if (a === b) {
return true;
}
if (a.length !== b.length) {
return false;
}
for (let i = 0; i < a.length; i++) {
const left = a[i];
const right = b[i];
if (left === right) {
continue;
}
if (!left || !left.equals(right)) {
return false;
}
}
return true;
}, "equalArrays");
var equalNumberArrays = /* @__PURE__ */ __name((a, b) => {
if (a === b) {
return true;
}
if (a.length !== b.length) {
return false;
}
for (let i = 0; i < a.length; i++) {
if (a[i] !== b[i]) {
return false;
}
}
return true;
}, "equalNumberArrays");
var escapeWhitespace = /* @__PURE__ */ __name((s, escapeSpaces = false) => {
s = s.replace(/\t/g, "\\t").replace(/\n/g, "\\n").replace(/\r/g, "\\r");
if (escapeSpaces) {
s = s.replace(/ /g, "\xB7");
}
return s;
}, "escapeWhitespace");
// src/dfa/DFAState.ts
var DFAState = class _DFAState {
static {
__name(this, "DFAState");
}
stateNumber = -1;
configs;
/**
* `edges[symbol]` points to target of symbol. Shift up by 1 so (-1) {@link Token.EOF} maps to `edges[0]`.
*/
edges = [];
isAcceptState = false;
/**
* If accept state, what ttype do we match or alt do we predict? This is set to {@link ATN.INVALID_ALT_NUMBER}
* when {@link predicates} `!= null` or {@link requiresFullContext}.
*/
prediction = -1;
lexerActionExecutor = null;
/**
* Indicates that this state was created during SLL prediction that discovered a conflict between the configurations
* in the state. Future {@link ParserATNSimulator.execATN} invocations immediately jumped doing
* full context prediction if this field is true.
*/
requiresFullContext = false;
/**
* During SLL parsing, this is a list of predicates associated with the ATN configurations of the DFA state.
* When we have predicates, {@link requiresFullContext} is `false` since full context prediction evaluates
* predicates on-the-fly. If this is not null, then {@link prediction} is `ATN.INVALID_ALT_NUMBER`.
*
* We only use these for non-{@link #requiresFullContext} but conflicting states. That
* means we know from the context (it's $ or we don't dip into outer
* context) that it's an ambiguity not a conflict.
*
* This list is computed by {@link ParserATNSimulator#predicateDFAState}.
*/
predicates = null;
constructor(configs) {
if (configs) {
this.configs = configs;
}
}
static fromState(stateNumber) {
const result = new _DFAState();
result.stateNumber = stateNumber;
return result;
}
static fromConfigs(configs) {
return new _DFAState(configs);
}
static hashCode(state) {
return state.configs.hashCode();
}
/**
* Two {@link DFAState} instances are equal if their ATN configuration sets
* are the same. This method is used to see if a state already exists.
*
* Because the number of alternatives and number of ATN configurations are
* finite, there is a finite number of DFA states that can be processed.
* This is necessary to show that the algorithm terminates.
*
* Cannot test the DFA state numbers here because in
* {@link ParserATNSimulator#addDFAState} we need to know if any other state
* exists that has this exact set of ATN configurations. The
* {@link #stateNumber} is irrelevant.
*
* @param a The first {@link DFAState}.
* @param b The second {@link DFAState}.
*
* @returns `true` if the two states are equal, otherwise `false`.
*/
static equals(a, b) {
return a.configs.equals(b.configs);
}
/**
* @returns the set of all alts mentioned by all ATN configurations in this DFA state.
*/
getAltSet() {
const alts = /* @__PURE__ */ new Set();
for (const config of this.configs) {
alts.add(config.alt);
}
if (alts.size === 0) {
return null;
}
return alts;
}
toString() {
let buf = "";
buf += this.stateNumber;
buf += ":";
buf += this.configs ? this.configs.toString() : "";
if (this.isAcceptState) {
buf += "=>";
if (this.predicates) {
buf += arrayToString(this.predicates);
} else {
buf += this.prediction;
}
}
return buf.toString();
}
};
// src/Vocabulary.ts
var Vocabulary = class _Vocabulary {
static {
__name(this, "Vocabulary");
}
static EMPTY_NAMES = [];
/**
* Gets an empty {@link Vocabulary} instance.
*
*
* No literal or symbol names are assigned to token types, so
* {@link #getDisplayName(int)} returns the numeric value for all tokens
* except {@link Token#EOF}.
*/
static EMPTY_VOCABULARY = new _Vocabulary(_Vocabulary.EMPTY_NAMES, _Vocabulary.EMPTY_NAMES, _Vocabulary.EMPTY_NAMES);
maxTokenType;
literalNames;
symbolicNames;
displayNames;
/**
* Constructs a new instance of {@link Vocabulary} from the specified
* literal, symbolic, and display token names.
*
* @param literalNames The literal names assigned to tokens, or `null`
* if no literal names are assigned.
* @param symbolicNames The symbolic names assigned to tokens, or
* `null` if no symbolic names are assigned.
* @param displayNames The display names assigned to tokens, or `null`
* to use the values in `literalNames` and `symbolicNames` as
* the source of display names, as described in
* {@link #getDisplayName(int)}.
*/
constructor(literalNames, symbolicNames, displayNames) {
this.literalNames = literalNames ?? _Vocabulary.EMPTY_NAMES;
this.symbolicNames = symbolicNames ?? _Vocabulary.EMPTY_NAMES;
this.displayNames = displayNames ?? _Vocabulary.EMPTY_NAMES;
this.maxTokenType = Math.max(this.displayNames.length, Math.max(
this.literalNames.length,
this.symbolicNames.length
)) - 1;
}
/**
* Returns a {@link Vocabulary} instance from the specified set of token
* names. This method acts as a compatibility layer for the single
* `tokenNames` array generated by previous releases of ANTLR.
*
* The resulting vocabulary instance returns `null` for
* {@link getLiteralName getLiteralName(int)} and {@link getSymbolicName getSymbolicName(int)}, and the
* value from `tokenNames` for the display names.
*
* @param tokenNames The token names, or `null` if no token names are
* available.
* @returns A {@link Vocabulary} instance which uses `tokenNames` for
* the display names of tokens.
*/
static fromTokenNames(tokenNames) {
if (tokenNames == null || tokenNames.length === 0) {
return _Vocabulary.EMPTY_VOCABULARY;
}
const literalNames = [...tokenNames];
const symbolicNames = [...tokenNames];
for (let i = 0; i < tokenNames.length; i++) {
const tokenName = tokenNames[i];
if (tokenName == null) {
continue;
}
if (tokenName.length > 0) {
const firstChar = tokenName.codePointAt(0);
if (firstChar === 39) {
symbolicNames[i] = null;
continue;
} else if (firstChar >= 65 && firstChar <= 90) {
literalNames[i] = null;
continue;
}
}
literalNames[i] = null;
symbolicNames[i] = null;
}
return new _Vocabulary(literalNames, symbolicNames, tokenNames);
}
getMaxTokenType() {
return this.maxTokenType;
}
getLiteralName(tokenType) {
if (tokenType >= 0 && tokenType < this.literalNames.length) {
return this.literalNames[tokenType];
}
return null;
}
getSymbolicName(tokenType) {
if (tokenType >= 0 && tokenType < this.symbolicNames.length) {
return this.symbolicNames[tokenType];
}
if (tokenType === Token.EOF) {
return "EOF";
}
return null;
}
getDisplayName(tokenType) {
if (tokenType >= 0 && tokenType < this.displayNames.length) {
const displayName = this.displayNames[tokenType];
if (displayName != null) {
return displayName;
}
}
const literalName = this.getLiteralName(tokenType);
if (literalName != null) {
return literalName;
}
const symbolicName = this.getSymbolicName(tokenType);
if (symbolicName != null) {
return symbolicName;
}
return `${tokenType}`;
}
getLiteralNames() {
return this.literalNames;
}
getSymbolicNames() {
return this.symbolicNames;
}
getDisplayNames() {
return this.displayNames;
}
};
// src/dfa/LexerDFASerializer.ts
var LexerDFASerializer = class extends DFASerializer {
static {
__name(this, "LexerDFASerializer");
}
constructor(dfa) {
super(dfa, Vocabulary.EMPTY_VOCABULARY);
}
getEdgeLabel = /* @__PURE__ */ __name((i) => {
return "'" + String.fromCharCode(i) + "'";
}, "getEdgeLabel");
};
// src/atn/ATNState.ts
var ATNState = class _ATNState {
static {
__name(this, "ATNState");
}
static INVALID_STATE_NUMBER = -1;
static INVALID_TYPE = 0;
static BASIC = 1;
static RULE_START = 2;
static BLOCK_START = 3;
static PLUS_BLOCK_START = 4;
static STAR_BLOCK_START = 5;
static TOKEN_START = 6;
static RULE_STOP = 7;
static BLOCK_END = 8;
static STAR_LOOP_BACK = 9;
static STAR_LOOP_ENTRY = 10;
static PLUS_LOOP_BACK = 11;
static LOOP_END = 12;
static stateType = _ATNState.INVALID_STATE_NUMBER;
stateNumber = 0;
ruleIndex = 0;
// at runtime, we don't have Rule objects
epsilonOnlyTransitions = false;
/** Used to cache lookahead during parsing, not used during construction */
nextTokenWithinRule;
/** Track the transitions emanating from this ATN state. */
transitions = [];
hashCode() {
return this.stateNumber;
}
equals(other) {
return this.stateNumber === other.stateNumber;
}
toString() {
return `${this.stateNumber}`;
}
addTransitionAtIndex(index, transition) {
if (this.transitions.length === 0) {
this.epsilonOnlyTransitions = transition.isEpsilon;
} else if (this.epsilonOnlyTransitions !== transition.isEpsilon) {
this.epsilonOnlyTransitions = false;
}
this.transitions.splice(index, 1, transition);
}
addTransition(transition) {
if (this.transitions.length === 0) {
this.epsilonOnlyTransitions = transition.isEpsilon;
} else if (this.epsilonOnlyTransitions !== transition.isEpsilon) {
this.epsilonOnlyTransitions = false;
}
this.transitions.push(transition);
}
setTransition(i, e) {
this.transitions.splice(i, 1, e);
}
removeTransition(index) {
const t = this.transitions.splice(index, 1);
return t[0];
}
};
// src/atn/DecisionState.ts
var DecisionState = class extends ATNState {
static {
__name(this, "DecisionState");
}
decision = -1;
nonGreedy = false;
};
// src/atn/StarLoopEntryState.ts
var StarLoopEntryState = class extends DecisionState {
static {
__name(this, "StarLoopEntryState");
}
static stateType = ATNState.STAR_LOOP_ENTRY;
// This is always set during ATN deserialization
loopBackState;
/**
* Indicates whether this state can benefit from a precedence DFA during SLL
* decision making.
*
* This is a computed property that is calculated during ATN deserialization
* and stored for use in {@link ParserATNSimulator} and
* {@link ParserInterpreter}.
*
* @see `DFA.isPrecedenceDfa`
*/
precedenceRuleDecision = false;
};
// src/dfa/DFA.ts
var DFA = class {
static {
__name(this, "DFA");
}
s0;
decision;
/** From which ATN state did we create this DFA? */
atnStartState;
/**
* Gets whether this DFA is a precedence DFA. Precedence DFAs use a special
* start state {@link #s0} which is not stored in {@link #states}. The
* {@link DFAState#edges} array for this start state contains outgoing edges
* supplying individual start states corresponding to specific precedence
* values.
*
* @returns `true` if this is a precedence DFA; otherwise, `false`.
*/
isPrecedenceDfa;
/**
* A mapping from an ATNConfigSet hash to a DFAState.
* Used to quick look up the DFA state for a particular configuration set.
*/
states = /* @__PURE__ */ new Map();
constructor(atnStartState, decision) {
this.atnStartState = atnStartState;
this.decision = decision ?? 0;
let precedenceDfa = false;
if (atnStartState instanceof StarLoopEntryState) {
if (atnStartState.precedenceRuleDecision) {
precedenceDfa = true;
this.s0 = DFAState.fromState(-1);
}
}
this.isPrecedenceDfa = precedenceDfa;
}
[Symbol.iterator] = () => {
return this.states.values()[Symbol.iterator]();
};
/**
* Get the start state for a specific precedence value.
*
* @param precedence The current precedence.
@returns The start state corresponding to the specified precedence, or
* `null` if no start state exists for the specified precedence.
*
* @throws IllegalStateException if this is not a precedence DFA.
* @see #isPrecedenceDfa
*/
getPrecedenceStartState = /* @__PURE__ */ __name((precedence) => {
if (!this.isPrecedenceDfa) {
throw new Error(`Only precedence DFAs may contain a precedence start state.`);
}
if (!this.s0 || !this.s0.edges || precedence < 0 || precedence >= this.s0.edges.length) {
return void 0;
}
return this.s0.edges[precedence];
}, "getPrecedenceStartState");
/**
* Set the start state for a specific precedence value.
*
* @param precedence The current precedence.
* @param startState The start state corresponding to the specified precedence.
*/
setPrecedenceStartState = /* @__PURE__ */ __name((precedence, startState) => {
if (!this.isPrecedenceDfa) {
throw new Error(`Only precedence DFAs may contain a precedence start state.`);
}
if (precedence < 0 || !this.s0) {
return;
}
this.s0.edges[precedence] = startState;
}, "setPrecedenceStartState");
/**
* @returns a list of all states in this DFA, ordered by state number.
*/
getStates() {
const result = [...this.states.values()];
result.sort((o1, o2) => {
return o1.stateNumber - o2.stateNumber;
});
return result;
}
getState(state) {
return this.states.get(state.configs.hashCode()) ?? null;
}
getStateForConfigs(configs) {
return this.states.get(configs.hashCode()) ?? null;
}
addState(state) {
const hash = state.configs.hashCode();
if (this.states.has(hash)) {
return;
}
this.states.set(hash, state);
state.stateNumber = this.states.size - 1;
}
toString(vocabulary) {
if (!vocabulary) {
return this.toString(Vocabulary.EMPTY_VOCABULARY);
}
if (!this.s0) {
return "";
}
const serializer = new DFASerializer(this, vocabulary);
return serializer.toString() ?? "";
}
toLexerString() {
if (!this.s0) {
return "";
}
const serializer = new LexerDFASerializer(this);
return serializer.toString() ?? "";
}
get length() {
return this.states.size;
}
};
// src/misc/BitSet.ts
var BitSet = class {
static {
__name(this, "BitSet");
}
data;
/**
* Creates a new bit set. All bits are initially `false`.
*
* @param data Optional initial data.
*/
constructor(data) {
if (data) {
this.data = new Uint32Array(data.map((value) => {
return value >>> 0;
}));
} else {
this.data = new Uint32Array(1);
}
}
/**
* @returns an iterator over all set bits.
*/
[Symbol.iterator]() {
const length = this.data.length;
let currentIndex = 0;
let currentWord = this.data[currentIndex];
const words = this.data;
return {
[Symbol.iterator]() {
return this;
},
next: /* @__PURE__ */ __name(() => {
while (currentIndex < length) {
if (currentWord !== 0) {
const t = currentWord & -currentWord;
const value = (currentIndex << 5) + this.bitCount(t - 1);
currentWord ^= t;
return { done: false, value };
} else {
currentIndex++;
if (currentIndex < length) {
currentWord = words[currentIndex];
}
}
}
return { done: true, value: void 0 };
}, "next")
};
}
/**
* Sets a single bit or all of the bits in this `BitSet` to `false`.
*
* @param index the index of the bit to be cleared, or undefined to clear all bits.
*/
clear(index) {
if (index === void 0) {
this.data = new Uint32Array();
} else {
this.resize(index);
this.data[index >>> 5] &= ~(1 << index);
}
}
/**
* Performs a logical **OR** of this bit set with the bit set argument. This bit set is modified so that a bit in it
* has the value `true` if and only if it either already had the value `true` or the corresponding bit in the bit
* set argument has the value `true`.
*
* @param set the bit set to be ORed with.
*/
or(set) {
const minCount = Math.min(this.data.length, set.data.length);
for (let k = 0; k < minCount; ++k) {
this.data[k] |= set.data[k];
}
if (this.data.length < set.data.length) {
this.resize((set.data.length << 5) - 1);
const c = set.data.length;
for (let k = minCount; k < c; ++k) {
this.data[k] = set.data[k];
}
}
}
/**
* Returns the value of the bit with the specified index. The value is `true` if the bit with the index `bitIndex`
* is currently set in this `BitSet`; otherwise, the result is `false`.
*
* @param index the bit index
*
* @returns the value of the bit with the specified index.
*/
get(index) {
if (index < 0) {
throw new RangeError("index cannot be negative");
}
const slot = index >>> 5;
if (slot >= this.data.length) {
return false;
}
return (this.data[slot] & 1 << index % 32) !== 0;
}
/**
* @returns the number of set bits.
*/
get length() {
let result = 0;
const c = this.data.length;
const w = this.data;
for (let i = 0; i < c; i++) {
result += this.bitCount(w[i]);
}
return result;
}
/**
* @returns an array with indices of set bits.
*/
values() {
const result = new Array(this.length);
let pos = 0;
const length = this.data.length;
for (let k = 0; k < length; ++k) {
let w = this.data[k];
while (w !== 0) {
const t = w & -w;
result[pos++] = (k << 5) + this.bitCount(t - 1);
w ^= t;
}
}
return result;
}
/**
* @returns the index of the first bit that is set to `true` that occurs on or after the specified starting index.
* If no such bit exists then undefined is returned.
*
* @param fromIndex the index to start checking from (inclusive)
*/
nextSetBit(fromIndex) {
if (fromIndex < 0) {
throw new RangeError("index cannot be negative");
}
for (const index of this) {
if (index >= fromIndex) {
return index;
}
}
return void 0;
}
/**
* Sets the bit at the specified index to `true`.
*
* @param index a bit index
*/
set(index) {
if (index < 0) {
throw new RangeError("index cannot be negative");
}
this.resize(index);
this.data[index >>> 5] |= 1 << index % 32;
}
/**
* @returns a string representation of this bit set.
*/
toString() {
return "{" + this.values().join(", ") + "}";
}
resize(index) {
const count = index + 32 >>> 5;
if (count <= this.data.length) {
return;
}
const data = new Uint32Array(count);
data.set(this.data);
data.fill(0, this.data.length);
this.data = data;
}
bitCount(v) {
v = v - (v >> 1 & 1431655765);
v = (v & 858993459) + (v >> 2 & 858993459);
v = v + (v >> 4) & 252645135;
v = v + (v >> 8);
v = v + (v >> 16);
return v & 63;
}
};
// src/utils/MurmurHash.ts
var c1 = 3432918353;
var c2 = 461845907;
var r1 = 15;
var r2 = 13;
var m = 5;
var n = 3864292196;
var MurmurHash = class _MurmurHash {
static {
__name(this, "MurmurHash");
}
static defaultSeed = 701;
constructor() {
}
/**
* Initialize the hash using the specified {@code seed}.
*
* @param seed the seed
*
* @returns the intermediate hash value
*/
static initialize(seed = _MurmurHash.defaultSeed) {
return seed;
}
static updateFromComparable(hash, value) {
return this.update(hash, value?.hashCode() ?? 0);
}
/**
* Update the intermediate hash value for the next input {@code value}.
*
* @param hash The intermediate hash value.
* @param value the value to add to the current hash.
*
* @returns the updated intermediate hash value
*/
static update(hash, value) {
value = Math.imul(value, c1);
value = value << r1 | value >>> 32 - r1;
value = Math.imul(value, c2);
hash = hash ^ value;
hash = hash << r2 | hash >>> 32 - r2;
hash = Math.imul(hash, m) + n;
return hash;
}
/**
* Apply the final computation steps to the intermediate value {@code hash}
* to form the final result of the MurmurHash 3 hash function.
*
* @param hash The intermediate hash value.
* @param entryCount The number of values added to the hash.
*
* @returns the final hash result
*/
static finish(hash, entryCount) {
hash ^= entryCount * 4;
hash ^= hash >>> 16;
hash = Math.imul(hash, 2246822507);
hash ^= hash >>> 13;
hash = Math.imul(hash, 3266489909);
hash ^= hash >>> 16;
return hash;
}
/**
* An all-in-one convenience method to compute a hash for a single value.
*
* @param value The value to hash.
* @param seed The seed for the hash value.
*
* @returns The computed hash.
*/
static hashCode(value, seed) {
return _MurmurHash.finish(_MurmurHash.update(seed ?? _MurmurHash.defaultSeed, value), 1);
}
};
// src/misc/ObjectEqualityComparator.ts
var ObjectEqualityComparator = class _ObjectEqualityComparator {
static {
__name(this, "ObjectEqualityComparator");
}
static instance = new _ObjectEqualityComparator();
hashCode(obj) {
if (obj == null) {
return 0;
}
return obj.hashCode();
}
equals(a, b) {
if (a == null) {
return b == null;
}
return a.equals(b);
}
};
// src/misc/DefaultEqualityComparator.ts
var DefaultEqualityComparator = class _DefaultEqualityComparator {
static {
__name(this, "DefaultEqualityComparator");
}
static instance = new _DefaultEqualityComparator();
hashCode(obj) {
if (obj == null) {
return 0;
}
return ObjectEqualityComparator.instance.hashCode(obj);
}
equals(a, b) {
if (a == null) {
return b == null;
}
if (typeof a === "string" || typeof a === "number") {
return a === b;
}
return ObjectEqualityComparator.instance.equals(a, b);
}
};
// src/misc/HashSet.ts
var HashSet = class _HashSet {
static {
__name(this, "HashSet");
}
static defaultLoadFactor = 0.75;
static initialCapacity = 16;
// must be power of 2
comparator;
buckets;
threshold;
/** How many elements in set */
itemCount = 0;
constructor(comparatorOrSet, initialCapacity = _HashSet.initialCapacity) {
if (comparatorOrSet instanceof _HashSet) {
this.comparator = comparatorOrS