simple-graph-query
Version:
TypeScript evaluator for Forge expressions with browser-compatible UMD bundle
1,251 lines (1,215 loc) • 2.33 MB
JavaScript
(function webpackUniversalModuleDefinition(root, factory) {
if(typeof exports === 'object' && typeof module === 'object')
module.exports = factory();
else if(typeof define === 'function' && define.amd)
define([], factory);
else if(typeof exports === 'object')
exports["SimpleGraphQuery"] = factory();
else
root["SimpleGraphQuery"] = factory();
})(this, () => {
return /******/ (() => { // webpackBootstrap
/******/ var __webpack_modules__ = ({
/***/ "./node_modules/antlr4ts/ANTLRErrorListener.js":
/*!*****************************************************!*\
!*** ./node_modules/antlr4ts/ANTLRErrorListener.js ***!
\*****************************************************/
/***/ ((__unused_webpack_module, exports) => {
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
Object.defineProperty(exports, "__esModule", ({ value: true }));
//# sourceMappingURL=ANTLRErrorListener.js.map
/***/ }),
/***/ "./node_modules/antlr4ts/ANTLRErrorStrategy.js":
/*!*****************************************************!*\
!*** ./node_modules/antlr4ts/ANTLRErrorStrategy.js ***!
\*****************************************************/
/***/ ((__unused_webpack_module, exports) => {
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
Object.defineProperty(exports, "__esModule", ({ value: true }));
//# sourceMappingURL=ANTLRErrorStrategy.js.map
/***/ }),
/***/ "./node_modules/antlr4ts/ANTLRInputStream.js":
/*!***************************************************!*\
!*** ./node_modules/antlr4ts/ANTLRInputStream.js ***!
\***************************************************/
/***/ (function(__unused_webpack_module, exports, __webpack_require__) {
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
// ConvertTo-TS run at 2016-10-04T11:26:49.0828748-07:00
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
Object.defineProperty(exports, "__esModule", ({ value: true }));
exports.ANTLRInputStream = void 0;
const assert = __webpack_require__(/*! assert */ "./node_modules/assert/build/assert.js");
const Decorators_1 = __webpack_require__(/*! ./Decorators */ "./node_modules/antlr4ts/Decorators.js");
const IntStream_1 = __webpack_require__(/*! ./IntStream */ "./node_modules/antlr4ts/IntStream.js");
const READ_BUFFER_SIZE = 1024;
const INITIAL_BUFFER_SIZE = 1024;
/**
* Vacuum all input from a {@link Reader}/{@link InputStream} and then treat it
* like a `char[]` buffer. Can also pass in a {@link String} or
* `char[]` to use.
*
* If you need encoding, pass in stream/reader with correct encoding.
*
* @deprecated as of 4.7, please use `CharStreams` interface.
*/
class ANTLRInputStream {
/** Copy data in string to a local char array */
constructor(input) {
/** 0..n-1 index into string of next char */
this.p = 0;
this.data = input;
this.n = input.length;
}
/** Reset the stream so that it's in the same state it was
* when the object was created *except* the data array is not
* touched.
*/
reset() {
this.p = 0;
}
consume() {
if (this.p >= this.n) {
assert(this.LA(1) === IntStream_1.IntStream.EOF);
throw new Error("cannot consume EOF");
}
//System.out.println("prev p="+p+", c="+(char)data[p]);
if (this.p < this.n) {
this.p++;
//System.out.println("p moves to "+p+" (c='"+(char)data[p]+"')");
}
}
LA(i) {
if (i === 0) {
return 0; // undefined
}
if (i < 0) {
i++; // e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
if ((this.p + i - 1) < 0) {
return IntStream_1.IntStream.EOF; // invalid; no char before first char
}
}
if ((this.p + i - 1) >= this.n) {
//System.out.println("char LA("+i+")=EOF; p="+p);
return IntStream_1.IntStream.EOF;
}
//System.out.println("char LA("+i+")="+(char)data[p+i-1]+"; p="+p);
//System.out.println("LA("+i+"); p="+p+" n="+n+" data.length="+data.length);
return this.data.charCodeAt(this.p + i - 1);
}
LT(i) {
return this.LA(i);
}
/** Return the current input symbol index 0..n where n indicates the
* last symbol has been read. The index is the index of char to
* be returned from LA(1).
*/
get index() {
return this.p;
}
get size() {
return this.n;
}
/** mark/release do nothing; we have entire buffer */
mark() {
return -1;
}
release(marker) {
// No default implementation since this stream buffers the entire input
}
/** consume() ahead until p==index; can't just set p=index as we must
* update line and charPositionInLine. If we seek backwards, just set p
*/
seek(index) {
if (index <= this.p) {
this.p = index; // just jump; don't update stream state (line, ...)
return;
}
// seek forward, consume until p hits index or n (whichever comes first)
index = Math.min(index, this.n);
while (this.p < index) {
this.consume();
}
}
getText(interval) {
let start = interval.a;
let stop = interval.b;
if (stop >= this.n) {
stop = this.n - 1;
}
let count = stop - start + 1;
if (start >= this.n) {
return "";
}
// System.err.println("data: "+Arrays.toString(data)+", n="+n+
// ", start="+start+
// ", stop="+stop);
return this.data.substr(start, count);
}
get sourceName() {
if (!this.name) {
return IntStream_1.IntStream.UNKNOWN_SOURCE_NAME;
}
return this.name;
}
toString() { return this.data; }
}
__decorate([
Decorators_1.Override
], ANTLRInputStream.prototype, "consume", null);
__decorate([
Decorators_1.Override
], ANTLRInputStream.prototype, "LA", null);
__decorate([
Decorators_1.Override
], ANTLRInputStream.prototype, "index", null);
__decorate([
Decorators_1.Override
], ANTLRInputStream.prototype, "size", null);
__decorate([
Decorators_1.Override
], ANTLRInputStream.prototype, "mark", null);
__decorate([
Decorators_1.Override
], ANTLRInputStream.prototype, "release", null);
__decorate([
Decorators_1.Override
], ANTLRInputStream.prototype, "seek", null);
__decorate([
Decorators_1.Override
], ANTLRInputStream.prototype, "getText", null);
__decorate([
Decorators_1.Override
], ANTLRInputStream.prototype, "sourceName", null);
__decorate([
Decorators_1.Override
], ANTLRInputStream.prototype, "toString", null);
exports.ANTLRInputStream = ANTLRInputStream;
//# sourceMappingURL=ANTLRInputStream.js.map
/***/ }),
/***/ "./node_modules/antlr4ts/BailErrorStrategy.js":
/*!****************************************************!*\
!*** ./node_modules/antlr4ts/BailErrorStrategy.js ***!
\****************************************************/
/***/ (function(__unused_webpack_module, exports, __webpack_require__) {
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
Object.defineProperty(exports, "__esModule", ({ value: true }));
exports.BailErrorStrategy = void 0;
// ConvertTo-TS run at 2016-10-04T11:26:49.2855056-07:00
const DefaultErrorStrategy_1 = __webpack_require__(/*! ./DefaultErrorStrategy */ "./node_modules/antlr4ts/DefaultErrorStrategy.js");
const InputMismatchException_1 = __webpack_require__(/*! ./InputMismatchException */ "./node_modules/antlr4ts/InputMismatchException.js");
const Decorators_1 = __webpack_require__(/*! ./Decorators */ "./node_modules/antlr4ts/Decorators.js");
const ParseCancellationException_1 = __webpack_require__(/*! ./misc/ParseCancellationException */ "./node_modules/antlr4ts/misc/ParseCancellationException.js");
/**
* This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
* by immediately canceling the parse operation with a
* {@link ParseCancellationException}. The implementation ensures that the
* {@link ParserRuleContext#exception} field is set for all parse tree nodes
* that were not completed prior to encountering the error.
*
* This error strategy is useful in the following scenarios.
*
* * **Two-stage parsing:** This error strategy allows the first
* stage of two-stage parsing to immediately terminate if an error is
* encountered, and immediately fall back to the second stage. In addition to
* avoiding wasted work by attempting to recover from errors here, the empty
* implementation of {@link BailErrorStrategy#sync} improves the performance of
* the first stage.
* * **Silent validation:** When syntax errors are not being
* reported or logged, and the parse result is simply ignored if errors occur,
* the {@link BailErrorStrategy} avoids wasting work on recovering from errors
* when the result will be ignored either way.
*
* ```
* myparser.errorHandler = new BailErrorStrategy();
* ```
*
* @see Parser.errorHandler
*/
class BailErrorStrategy extends DefaultErrorStrategy_1.DefaultErrorStrategy {
/** Instead of recovering from exception `e`, re-throw it wrapped
* in a {@link ParseCancellationException} so it is not caught by the
* rule function catches. Use {@link Exception#getCause()} to get the
* original {@link RecognitionException}.
*/
recover(recognizer, e) {
for (let context = recognizer.context; context; context = context.parent) {
context.exception = e;
}
throw new ParseCancellationException_1.ParseCancellationException(e);
}
/** Make sure we don't attempt to recover inline; if the parser
* successfully recovers, it won't throw an exception.
*/
recoverInline(recognizer) {
let e = new InputMismatchException_1.InputMismatchException(recognizer);
for (let context = recognizer.context; context; context = context.parent) {
context.exception = e;
}
throw new ParseCancellationException_1.ParseCancellationException(e);
}
/** Make sure we don't attempt to recover from problems in subrules. */
sync(recognizer) {
// intentionally empty
}
}
__decorate([
Decorators_1.Override
], BailErrorStrategy.prototype, "recover", null);
__decorate([
Decorators_1.Override
], BailErrorStrategy.prototype, "recoverInline", null);
__decorate([
Decorators_1.Override
], BailErrorStrategy.prototype, "sync", null);
exports.BailErrorStrategy = BailErrorStrategy;
//# sourceMappingURL=BailErrorStrategy.js.map
/***/ }),
/***/ "./node_modules/antlr4ts/BufferedTokenStream.js":
/*!******************************************************!*\
!*** ./node_modules/antlr4ts/BufferedTokenStream.js ***!
\******************************************************/
/***/ (function(__unused_webpack_module, exports, __webpack_require__) {
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __param = (this && this.__param) || function (paramIndex, decorator) {
return function (target, key) { decorator(target, key, paramIndex); }
};
Object.defineProperty(exports, "__esModule", ({ value: true }));
exports.BufferedTokenStream = void 0;
// ConvertTo-TS run at 2016-10-04T11:26:49.6074365-07:00
const assert = __webpack_require__(/*! assert */ "./node_modules/assert/build/assert.js");
const CommonToken_1 = __webpack_require__(/*! ./CommonToken */ "./node_modules/antlr4ts/CommonToken.js");
const Interval_1 = __webpack_require__(/*! ./misc/Interval */ "./node_modules/antlr4ts/misc/Interval.js");
const Lexer_1 = __webpack_require__(/*! ./Lexer */ "./node_modules/antlr4ts/Lexer.js");
const Decorators_1 = __webpack_require__(/*! ./Decorators */ "./node_modules/antlr4ts/Decorators.js");
const Token_1 = __webpack_require__(/*! ./Token */ "./node_modules/antlr4ts/Token.js");
/**
* This implementation of {@link TokenStream} loads tokens from a
* {@link TokenSource} on-demand, and places the tokens in a buffer to provide
* access to any previous token by index.
*
* This token stream ignores the value of {@link Token#getChannel}. If your
* parser requires the token stream filter tokens to only those on a particular
* channel, such as {@link Token#DEFAULT_CHANNEL} or
* {@link Token#HIDDEN_CHANNEL}, use a filtering token stream such a
* {@link CommonTokenStream}.
*/
let BufferedTokenStream = class BufferedTokenStream {
constructor(tokenSource) {
/**
* A collection of all tokens fetched from the token source. The list is
* considered a complete view of the input once {@link #fetchedEOF} is set
* to `true`.
*/
this.tokens = [];
/**
* The index into {@link #tokens} of the current token (next token to
* {@link #consume}). {@link #tokens}`[`{@link #p}`]` should be
* {@link #LT LT(1)}.
*
* This field is set to -1 when the stream is first constructed or when
* {@link #setTokenSource} is called, indicating that the first token has
* not yet been fetched from the token source. For additional information,
* see the documentation of {@link IntStream} for a description of
* Initializing Methods.
*/
this.p = -1;
/**
* Indicates whether the {@link Token#EOF} token has been fetched from
* {@link #tokenSource} and added to {@link #tokens}. This field improves
* performance for the following cases:
*
* * {@link #consume}: The lookahead check in {@link #consume} to prevent
* consuming the EOF symbol is optimized by checking the values of
* {@link #fetchedEOF} and {@link #p} instead of calling {@link #LA}.
* * {@link #fetch}: The check to prevent adding multiple EOF symbols into
* {@link #tokens} is trivial with this field.
*/
this.fetchedEOF = false;
if (tokenSource == null) {
throw new Error("tokenSource cannot be null");
}
this._tokenSource = tokenSource;
}
get tokenSource() {
return this._tokenSource;
}
/** Reset this token stream by setting its token source. */
set tokenSource(tokenSource) {
this._tokenSource = tokenSource;
this.tokens.length = 0;
this.p = -1;
this.fetchedEOF = false;
}
get index() {
return this.p;
}
mark() {
return 0;
}
release(marker) {
// no resources to release
}
seek(index) {
this.lazyInit();
this.p = this.adjustSeekIndex(index);
}
get size() {
return this.tokens.length;
}
consume() {
let skipEofCheck;
if (this.p >= 0) {
if (this.fetchedEOF) {
// the last token in tokens is EOF. skip check if p indexes any
// fetched token except the last.
skipEofCheck = this.p < this.tokens.length - 1;
}
else {
// no EOF token in tokens. skip check if p indexes a fetched token.
skipEofCheck = this.p < this.tokens.length;
}
}
else {
// not yet initialized
skipEofCheck = false;
}
if (!skipEofCheck && this.LA(1) === Token_1.Token.EOF) {
throw new Error("cannot consume EOF");
}
if (this.sync(this.p + 1)) {
this.p = this.adjustSeekIndex(this.p + 1);
}
}
/** Make sure index `i` in tokens has a token.
*
* @returns `true` if a token is located at index `i`, otherwise
* `false`.
* @see #get(int i)
*/
sync(i) {
assert(i >= 0);
let n = i - this.tokens.length + 1; // how many more elements we need?
//System.out.println("sync("+i+") needs "+n);
if (n > 0) {
let fetched = this.fetch(n);
return fetched >= n;
}
return true;
}
/** Add `n` elements to buffer.
*
* @returns The actual number of elements added to the buffer.
*/
fetch(n) {
if (this.fetchedEOF) {
return 0;
}
for (let i = 0; i < n; i++) {
let t = this.tokenSource.nextToken();
if (this.isWritableToken(t)) {
t.tokenIndex = this.tokens.length;
}
this.tokens.push(t);
if (t.type === Token_1.Token.EOF) {
this.fetchedEOF = true;
return i + 1;
}
}
return n;
}
get(i) {
if (i < 0 || i >= this.tokens.length) {
throw new RangeError("token index " + i + " out of range 0.." + (this.tokens.length - 1));
}
return this.tokens[i];
}
/** Get all tokens from start..stop inclusively. */
getRange(start, stop) {
if (start < 0 || stop < 0) {
return [];
}
this.lazyInit();
let subset = new Array();
if (stop >= this.tokens.length) {
stop = this.tokens.length - 1;
}
for (let i = start; i <= stop; i++) {
let t = this.tokens[i];
if (t.type === Token_1.Token.EOF) {
break;
}
subset.push(t);
}
return subset;
}
LA(i) {
let token = this.LT(i);
if (!token) {
return Token_1.Token.INVALID_TYPE;
}
return token.type;
}
tryLB(k) {
if ((this.p - k) < 0) {
return undefined;
}
return this.tokens[this.p - k];
}
LT(k) {
let result = this.tryLT(k);
if (result === undefined) {
throw new RangeError("requested lookback index out of range");
}
return result;
}
tryLT(k) {
this.lazyInit();
if (k === 0) {
throw new RangeError("0 is not a valid lookahead index");
}
if (k < 0) {
return this.tryLB(-k);
}
let i = this.p + k - 1;
this.sync(i);
if (i >= this.tokens.length) {
// return EOF token
// EOF must be last token
return this.tokens[this.tokens.length - 1];
}
// if ( i>range ) range = i;
return this.tokens[i];
}
/**
* Allowed derived classes to modify the behavior of operations which change
* the current stream position by adjusting the target token index of a seek
* operation. The default implementation simply returns `i`. If an
* exception is thrown in this method, the current stream index should not be
* changed.
*
* For example, {@link CommonTokenStream} overrides this method to ensure that
* the seek target is always an on-channel token.
*
* @param i The target token index.
* @returns The adjusted target token index.
*/
adjustSeekIndex(i) {
return i;
}
lazyInit() {
if (this.p === -1) {
this.setup();
}
}
setup() {
this.sync(0);
this.p = this.adjustSeekIndex(0);
}
/** Given a start and stop index, return a `List` of all tokens in
* the token type `BitSet`. Return an empty array if no tokens were found. This
* method looks at both on and off channel tokens.
*/
getTokens(start, stop, types) {
this.lazyInit();
if (start === undefined) {
assert(stop === undefined && types === undefined);
return this.tokens;
}
else if (stop === undefined) {
stop = this.tokens.length - 1;
}
if (start < 0 || stop >= this.tokens.length || stop < 0 || start >= this.tokens.length) {
throw new RangeError("start " + start + " or stop " + stop + " not in 0.." + (this.tokens.length - 1));
}
if (start > stop) {
return [];
}
if (types === undefined) {
return this.tokens.slice(start, stop + 1);
}
else if (typeof types === "number") {
types = new Set().add(types);
}
let typesSet = types;
// list = tokens[start:stop]:{T t, t.type in types}
let filteredTokens = this.tokens.slice(start, stop + 1);
filteredTokens = filteredTokens.filter((value) => typesSet.has(value.type));
return filteredTokens;
}
/**
* Given a starting index, return the index of the next token on channel.
* Return `i` if `tokens[i]` is on channel. Return the index of
* the EOF token if there are no tokens on channel between `i` and
* EOF.
*/
nextTokenOnChannel(i, channel) {
this.sync(i);
if (i >= this.size) {
return this.size - 1;
}
let token = this.tokens[i];
while (token.channel !== channel) {
if (token.type === Token_1.Token.EOF) {
return i;
}
i++;
this.sync(i);
token = this.tokens[i];
}
return i;
}
/**
* Given a starting index, return the index of the previous token on
* channel. Return `i` if `tokens[i]` is on channel. Return -1
* if there are no tokens on channel between `i` and 0.
*
* If `i` specifies an index at or after the EOF token, the EOF token
* index is returned. This is due to the fact that the EOF token is treated
* as though it were on every channel.
*/
previousTokenOnChannel(i, channel) {
this.sync(i);
if (i >= this.size) {
// the EOF token is on every channel
return this.size - 1;
}
while (i >= 0) {
let token = this.tokens[i];
if (token.type === Token_1.Token.EOF || token.channel === channel) {
return i;
}
i--;
}
return i;
}
/** Collect all tokens on specified channel to the right of
* the current token up until we see a token on {@link Lexer#DEFAULT_TOKEN_CHANNEL} or
* EOF. If `channel` is `-1`, find any non default channel token.
*/
getHiddenTokensToRight(tokenIndex, channel = -1) {
this.lazyInit();
if (tokenIndex < 0 || tokenIndex >= this.tokens.length) {
throw new RangeError(tokenIndex + " not in 0.." + (this.tokens.length - 1));
}
let nextOnChannel = this.nextTokenOnChannel(tokenIndex + 1, Lexer_1.Lexer.DEFAULT_TOKEN_CHANNEL);
let to;
let from = tokenIndex + 1;
// if none onchannel to right, nextOnChannel=-1 so set to = last token
if (nextOnChannel === -1) {
to = this.size - 1;
}
else {
to = nextOnChannel;
}
return this.filterForChannel(from, to, channel);
}
/** Collect all tokens on specified channel to the left of
* the current token up until we see a token on {@link Lexer#DEFAULT_TOKEN_CHANNEL}.
* If `channel` is `-1`, find any non default channel token.
*/
getHiddenTokensToLeft(tokenIndex, channel = -1) {
this.lazyInit();
if (tokenIndex < 0 || tokenIndex >= this.tokens.length) {
throw new RangeError(tokenIndex + " not in 0.." + (this.tokens.length - 1));
}
if (tokenIndex === 0) {
// obviously no tokens can appear before the first token
return [];
}
let prevOnChannel = this.previousTokenOnChannel(tokenIndex - 1, Lexer_1.Lexer.DEFAULT_TOKEN_CHANNEL);
if (prevOnChannel === tokenIndex - 1) {
return [];
}
// if none onchannel to left, prevOnChannel=-1 then from=0
let from = prevOnChannel + 1;
let to = tokenIndex - 1;
return this.filterForChannel(from, to, channel);
}
filterForChannel(from, to, channel) {
let hidden = new Array();
for (let i = from; i <= to; i++) {
let t = this.tokens[i];
if (channel === -1) {
if (t.channel !== Lexer_1.Lexer.DEFAULT_TOKEN_CHANNEL) {
hidden.push(t);
}
}
else {
if (t.channel === channel) {
hidden.push(t);
}
}
}
return hidden;
}
get sourceName() {
return this.tokenSource.sourceName;
}
getText(interval) {
if (interval === undefined) {
interval = Interval_1.Interval.of(0, this.size - 1);
}
else if (!(interval instanceof Interval_1.Interval)) {
// Note: the more obvious check for 'instanceof RuleContext' results in a circular dependency problem
interval = interval.sourceInterval;
}
let start = interval.a;
let stop = interval.b;
if (start < 0 || stop < 0) {
return "";
}
this.fill();
if (stop >= this.tokens.length) {
stop = this.tokens.length - 1;
}
let buf = "";
for (let i = start; i <= stop; i++) {
let t = this.tokens[i];
if (t.type === Token_1.Token.EOF) {
break;
}
buf += t.text;
}
return buf.toString();
}
getTextFromRange(start, stop) {
if (this.isToken(start) && this.isToken(stop)) {
return this.getText(Interval_1.Interval.of(start.tokenIndex, stop.tokenIndex));
}
return "";
}
/** Get all tokens from lexer until EOF. */
fill() {
this.lazyInit();
const blockSize = 1000;
while (true) {
let fetched = this.fetch(blockSize);
if (fetched < blockSize) {
return;
}
}
}
// TODO: Figure out a way to make this more flexible?
isWritableToken(t) {
return t instanceof CommonToken_1.CommonToken;
}
// TODO: Figure out a way to make this more flexible?
isToken(t) {
return t instanceof CommonToken_1.CommonToken;
}
};
__decorate([
Decorators_1.NotNull
], BufferedTokenStream.prototype, "_tokenSource", void 0);
__decorate([
Decorators_1.Override
], BufferedTokenStream.prototype, "tokenSource", null);
__decorate([
Decorators_1.Override
], BufferedTokenStream.prototype, "index", null);
__decorate([
Decorators_1.Override
], BufferedTokenStream.prototype, "mark", null);
__decorate([
Decorators_1.Override
], BufferedTokenStream.prototype, "release", null);
__decorate([
Decorators_1.Override
], BufferedTokenStream.prototype, "seek", null);
__decorate([
Decorators_1.Override
], BufferedTokenStream.prototype, "size", null);
__decorate([
Decorators_1.Override
], BufferedTokenStream.prototype, "consume", null);
__decorate([
Decorators_1.Override
], BufferedTokenStream.prototype, "get", null);
__decorate([
Decorators_1.Override
], BufferedTokenStream.prototype, "LA", null);
__decorate([
Decorators_1.NotNull,
Decorators_1.Override
], BufferedTokenStream.prototype, "LT", null);
__decorate([
Decorators_1.Override
], BufferedTokenStream.prototype, "sourceName", null);
__decorate([
Decorators_1.NotNull,
Decorators_1.Override
], BufferedTokenStream.prototype, "getText", null);
__decorate([
Decorators_1.NotNull,
Decorators_1.Override
], BufferedTokenStream.prototype, "getTextFromRange", null);
BufferedTokenStream = __decorate([
__param(0, Decorators_1.NotNull)
], BufferedTokenStream);
exports.BufferedTokenStream = BufferedTokenStream;
//# sourceMappingURL=BufferedTokenStream.js.map
/***/ }),
/***/ "./node_modules/antlr4ts/CharStream.js":
/*!*********************************************!*\
!*** ./node_modules/antlr4ts/CharStream.js ***!
\*********************************************/
/***/ ((__unused_webpack_module, exports) => {
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
Object.defineProperty(exports, "__esModule", ({ value: true }));
//# sourceMappingURL=CharStream.js.map
/***/ }),
/***/ "./node_modules/antlr4ts/CharStreams.js":
/*!**********************************************!*\
!*** ./node_modules/antlr4ts/CharStreams.js ***!
\**********************************************/
/***/ ((__unused_webpack_module, exports, __webpack_require__) => {
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
Object.defineProperty(exports, "__esModule", ({ value: true }));
exports.CharStreams = void 0;
const CodePointBuffer_1 = __webpack_require__(/*! ./CodePointBuffer */ "./node_modules/antlr4ts/CodePointBuffer.js");
const CodePointCharStream_1 = __webpack_require__(/*! ./CodePointCharStream */ "./node_modules/antlr4ts/CodePointCharStream.js");
const IntStream_1 = __webpack_require__(/*! ./IntStream */ "./node_modules/antlr4ts/IntStream.js");
// const DEFAULT_BUFFER_SIZE: number = 4096;
/** This class represents the primary interface for creating {@link CharStream}s
* from a variety of sources as of 4.7. The motivation was to support
* Unicode code points > U+FFFF. {@link ANTLRInputStream} and
* {@link ANTLRFileStream} are now deprecated in favor of the streams created
* by this interface.
*
* DEPRECATED: {@code new ANTLRFileStream("myinputfile")}
* NEW: {@code CharStreams.fromFileName("myinputfile")}
*
* WARNING: If you use both the deprecated and the new streams, you will see
* a nontrivial performance degradation. This speed hit is because the
* {@link Lexer}'s internal code goes from a monomorphic to megamorphic
* dynamic dispatch to get characters from the input stream. Java's
* on-the-fly compiler (JIT) is unable to perform the same optimizations
* so stick with either the old or the new streams, if performance is
* a primary concern. See the extreme debugging and spelunking
* needed to identify this issue in our timing rig:
*
* https://github.com/antlr/antlr4/pull/1781
*
* The ANTLR character streams still buffer all the input when you create
* the stream, as they have done for ~20 years. If you need unbuffered
* access, please note that it becomes challenging to create
* parse trees. The parse tree has to point to tokens which will either
* point into a stale location in an unbuffered stream or you have to copy
* the characters out of the buffer into the token. That defeats the purpose
* of unbuffered input. Per the ANTLR book, unbuffered streams are primarily
* useful for processing infinite streams *during the parse.*
*
* The new streams also use 8-bit buffers when possible so this new
* interface supports character streams that use half as much memory
* as the old {@link ANTLRFileStream}, which assumed 16-bit characters.
*
* A big shout out to Ben Hamilton (github bhamiltoncx) for his superhuman
* efforts across all targets to get true Unicode 3.1 support for U+10FFFF.
*
* @since 4.7
*/
var CharStreams;
(function (CharStreams) {
// /**
// * Creates a {@link CharStream} given a path to a UTF-8
// * encoded file on disk.
// *
// * Reads the entire contents of the file into the result before returning.
// */
// export function fromFile(file: File): CharStream;
// export function fromFile(file: File, charset: Charset): CharStream;
// export function fromFile(file: File, charset?: Charset): CharStream {
// if (charset === undefined) {
// charset = Charset.forName("UTF-8");
// }
function fromString(s, sourceName) {
if (sourceName === undefined || sourceName.length === 0) {
sourceName = IntStream_1.IntStream.UNKNOWN_SOURCE_NAME;
}
// Initial guess assumes no code points > U+FFFF: one code
// point for each code unit in the string
let codePointBufferBuilder = CodePointBuffer_1.CodePointBuffer.builder(s.length);
// TODO: CharBuffer.wrap(String) rightfully returns a read-only buffer
// which doesn't expose its array, so we make a copy.
let cb = new Uint16Array(s.length);
for (let i = 0; i < s.length; i++) {
cb[i] = s.charCodeAt(i);
}
codePointBufferBuilder.append(cb);
return CodePointCharStream_1.CodePointCharStream.fromBuffer(codePointBufferBuilder.build(), sourceName);
}
CharStreams.fromString = fromString;
// export function bufferFromChannel(
// channel: ReadableByteChannel,
// charset: Charset,
// bufferSize: number,
// decodingErrorAction: CodingErrorAction,
// inputSize: number): CodePointBuffer {
// try {
// let utf8BytesIn: Uint8Array = new Uint8Array(bufferSize);
// let utf16CodeUnitsOut: Uint16Array = new Uint16Array(bufferSize);
// if (inputSize === -1) {
// inputSize = bufferSize;
// } else if (inputSize > Integer.MAX_VALUE) {
// // ByteBuffer et al don't support long sizes
// throw new RangeError(`inputSize ${inputSize} larger than max ${Integer.MAX_VALUE}`);
// }
// let codePointBufferBuilder: CodePointBuffer.Builder = CodePointBuffer.builder(inputSize);
// let decoder: CharsetDecoder = charset
// .newDecoder()
// .onMalformedInput(decodingErrorAction)
// .onUnmappableCharacter(decodingErrorAction);
// let endOfInput: boolean = false;
// while (!endOfInput) {
// let bytesRead: number = channel.read(utf8BytesIn);
// endOfInput = (bytesRead === -1);
// utf8BytesIn.flip();
// let result: CoderResult = decoder.decode(
// utf8BytesIn,
// utf16CodeUnitsOut,
// endOfInput);
// if (result.isError() && decodingErrorAction === CodingErrorAction.REPORT) {
// result.throwException();
// }
// utf16CodeUnitsOut.flip();
// codePointBufferBuilder.append(utf16CodeUnitsOut);
// utf8BytesIn.compact();
// utf16CodeUnitsOut.compact();
// }
// // Handle any bytes at the end of the file which need to
// // be represented as errors or substitution characters.
// let flushResult: CoderResult = decoder.flush(utf16CodeUnitsOut);
// if (flushResult.isError() && decodingErrorAction === CodingErrorAction.REPORT) {
// flushResult.throwException();
// }
// utf16CodeUnitsOut.flip();
// codePointBufferBuilder.append(utf16CodeUnitsOut);
// return codePointBufferBuilder.build();
// }
// finally {
// channel.close();
// }
// }
})(CharStreams = exports.CharStreams || (exports.CharStreams = {}));
//# sourceMappingURL=CharStreams.js.map
/***/ }),
/***/ "./node_modules/antlr4ts/CodePointBuffer.js":
/*!**************************************************!*\
!*** ./node_modules/antlr4ts/CodePointBuffer.js ***!
\**************************************************/
/***/ ((__unused_webpack_module, exports, __webpack_require__) => {
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
Object.defineProperty(exports, "__esModule", ({ value: true }));
exports.CodePointBuffer = void 0;
const assert = __webpack_require__(/*! assert */ "./node_modules/assert/build/assert.js");
const Character = __webpack_require__(/*! ./misc/Character */ "./node_modules/antlr4ts/misc/Character.js");
/**
* Wrapper for `Uint8Array` / `Uint16Array` / `Int32Array`.
*/
class CodePointBuffer {
constructor(buffer, size) {
this.buffer = buffer;
this._position = 0;
this._size = size;
}
static withArray(buffer) {
return new CodePointBuffer(buffer, buffer.length);
}
get position() {
return this._position;
}
set position(newPosition) {
if (newPosition < 0 || newPosition > this._size) {
throw new RangeError();
}
this._position = newPosition;
}
get remaining() {
return this._size - this.position;
}
get(offset) {
return this.buffer[offset];
}
array() {
return this.buffer.slice(0, this._size);
}
static builder(initialBufferSize) {
return new CodePointBuffer.Builder(initialBufferSize);
}
}
exports.CodePointBuffer = CodePointBuffer;
(function (CodePointBuffer) {
let Type;
(function (Type) {
Type[Type["BYTE"] = 0] = "BYTE";
Type[Type["CHAR"] = 1] = "CHAR";
Type[Type["INT"] = 2] = "INT";
})(Type || (Type = {}));
class Builder {
constructor(initialBufferSize) {
this.type = 0 /* BYTE */;
this.buffer = new Uint8Array(initialBufferSize);
this.prevHighSurrogate = -1;
this.position = 0;
}
build() {
return new CodePointBuffer(this.buffer, this.position);
}
static roundUpToNextPowerOfTwo(i) {
let nextPowerOfTwo = 32 - Math.clz32(i - 1);
return Math.pow(2, nextPowerOfTwo);
}
ensureRemaining(remainingNeeded) {
switch (this.type) {
case 0 /* BYTE */:
if (this.buffer.length - this.position < remainingNeeded) {
let newCapacity = Builder.roundUpToNextPowerOfTwo(this.buffer.length + remainingNeeded);
let newBuffer = new Uint8Array(newCapacity);
newBuffer.set(this.buffer.subarray(0, this.position), 0);
this.buffer = newBuffer;
}
break;
case 1 /* CHAR */:
if (this.buffer.length - this.position < remainingNeeded) {
let newCapacity = Builder.roundUpToNextPowerOfTwo(this.buffer.length + remainingNeeded);
let newBuffer = new Uint16Array(newCapacity);
newBuffer.set(this.buffer.subarray(0, this.position), 0);
this.buffer = newBuffer;
}
break;
case 2 /* INT */:
if (this.buffer.length - this.position < remainingNeeded) {
let newCapacity = Builder.roundUpToNextPowerOfTwo(this.buffer.length + remainingNeeded);
let newBuffer = new Int32Array(newCapacity);
newBuffer.set(this.buffer.subarray(0, this.position), 0);
this.buffer = newBuffer;
}
break;
}
}
append(utf16In) {
this.ensureRemaining(utf16In.length);
this.appendArray(utf16In);
}
appendArray(utf16In) {
switch (this.type) {
case 0 /* BYTE */:
this.appendArrayByte(utf16In);
break;
case 1 /* CHAR */:
this.appendArrayChar(utf16In);
break;
case 2 /* INT */:
this.appendArrayInt(utf16In);
break;
}
}
appendArrayByte(utf16In) {
assert(this.prevHighSurrogate === -1);
let input = utf16In;
let inOffset = 0;
let inLimit = utf16In.length;
let outByte = this.buffer;
let outOffset = this.position;
while (inOffset < inLimit) {
let c = input[inOffset];
if (c <= 0xFF) {
outByte[outOffset] = c;
}
else {
utf16In = utf16In.subarray(inOffset, inLimit);
this.position = outOffset;
if (!Character.isHighSurrogate(c)) {
this.byteToCharBuffer(utf16In.length);
this.appendArrayChar(utf16In);
return;
}
else {
this.byteToIntBuffer(utf16In.length);
this.appendArrayInt(utf16In);
return;
}
}
inOffset++;
outOffset++;
}
this.position = outOffset;
}
appendArrayChar(utf16In) {
assert(this.prevHighSurrogate === -1);
let input = utf16In;
let inOffset = 0;
let inLimit = utf16In.length;
let outChar = this.buffer;
let outOffset = this.position;
while (inOffset < inLimit) {
let c = input[inOffset];
if (!Character.isHighSurrogate(c)) {
outChar[outOffset] = c;
}
else {
utf16In = utf16In.subarray(inOffset, inLimit);
this.position = outOffset;
this.charToIntBuffer(utf16In.length);
this.appendArrayInt(utf16In);
return;
}
inOffset++;
outOffset++;
}
this.position = outOffset;
}
appendArrayInt(utf16In) {
let input = utf16In;
let inOffset = 0;
let inLimit = utf16In.length;
let outInt = this.buffer;
let outOffset = this.position;
while (inOffset < inLimit) {
let c = input[inOffset];
inOffset++;
if (this.prevHighSurrogate !== -1) {
if (Character.isLowSurrogate(c)) {
outInt[outOffset] = String.fromCharCode(this.prevHighSurrogate, c).codePointAt(0);
outOffset++;
this.prevHighSurrogate = -1;
}
else {
// Dangling high surrogate
outInt[outOffset] = this.prevHighSurrogate;
outOffset++;
if (Character.isHighSurrogate(c)) {
this.prevHighSurrogate = c;
}
else {
outInt[outOffset] = c;
outOffset++;
this.prevHighSurrogate = -1;
}
}
}
else if (Character.isHighSurrogate(c)) {
this.prevHighSurrogate = c;
}
else {
outInt[outOffset] = c;
outOffset++;
}
}
if (this.prevHighSurrogate !== -1) {
// Dangling high surrogate
outInt[outOffset] = this.prevHighSurrogate;
outOffset++;
}
this.position = outOffset;
}
byteToCharBuffer(toAppend) {
// CharBuffers hold twice as much per unit as ByteBuffers, so start with half the capacity.
let newBuffer = new Uint16Array(Math.max(this.position + toAppend, this.buffer.length >> 1));
newBuffer.set(this.buffer.subarray(0, this.position), 0);
this.type = 1 /* CHAR */;
this.buffer = newBuffer;
}
byteToIntBuffer(toAppend) {
// IntBuffers hold four times as much per unit as ByteBuffers, so start with one quarter the capacity.
let newBuffer = new Int32Array(Math.max(this.position + toAppend, this.buffer.length >> 2));
newBuffer.set(this.buffer.subarray(0, this.position), 0);
this.type = 2 /* INT */;
this.buffer = newBuffer;
}
charToIntBuffer(toAppend) {
// IntBuffers hold two times as much per unit as ByteBuffers, so start with one half the capacity.
let newBuffer = new Int32Array(Math.max(this.position + toAppend, this.buffer.length >> 1));
newBuffer.set(this.buffer.subarray(0, this.position), 0);
this.type = 2 /* INT */;
this.buffer = newBuffer;
}
}
CodePointBuffer.Builder = Builder;
})(CodePointBuffer = exports.CodePointBuffer || (exports.CodePointBuffer = {}));
//# sourceMappingURL=CodePointBuffer.js.map
/***/ }),
/***/ "./node_modules/antlr4ts/CodePointCharStream.js":
/*!******************************************************!*\
!*** ./node_modules/antlr4ts/CodePointCharStream.js ***!
\******************************************************/
/***/ (function(__unused_webpack_module, exports, __webpack_require__) {
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
Object.defineProperty(exports, "__esModule", ({ value: true }));
exports.CodePointCharStream = void 0;
const assert = __webpack_require__(/*! assert */ "./node_modules/assert/build/assert.js");
const IntStream_1 = __webpack_require__(/*! ./IntStream */ "./node_modules/antlr4ts/IntStream.js");
const Interval_1 = __webpack_require__(/*! ./misc/Interval */ "./node_modules/antlr4ts/misc/Interval.js");
const Decorators_1 = __webpack_require__(/*! ./Decorators */ "./node_modules/antlr4ts/Decorators.js");
/**
* Alternative to {@link ANTLRInputStream} which treats the input
* as a series of Unicode code points, instead of a series of UTF-16
* code units.
*
* Use this if you need to parse input which potentially contains
* Unicode values > U+FFFF.
*/
class CodePointCharStream {
// Use the factory method {@link #fromBuffer(CodePointBuffer)} to
// construct instances of this type.
constructor(array, position, remaining, name) {
// TODO