antlr4ts
Version:
ANTLR 4 runtime for JavaScript written in Typescript
124 lines (123 loc) • 5.51 kB
TypeScript
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { ATNState } from "./ATNState";
import { ATNType } from "./ATNType";
import { DecisionState } from "./DecisionState";
import { DFA } from "../dfa/DFA";
import { IntervalSet } from "../misc/IntervalSet";
import { LexerAction } from "./LexerAction";
import { PredictionContext } from "./PredictionContext";
import { RuleContext } from "../RuleContext";
import { RuleStartState } from "./RuleStartState";
import { RuleStopState } from "./RuleStopState";
import { TokensStartState } from "./TokensStartState";
/** */
export declare class ATN {
readonly states: ATNState[];
/** Each subrule/rule is a decision point and we must track them so we
* can go back later and build DFA predictors for them. This includes
* all the rules, subrules, optional blocks, ()+, ()* etc...
*/
decisionToState: DecisionState[];
/**
* Maps from rule index to starting state number.
*/
ruleToStartState: RuleStartState[];
/**
* Maps from rule index to stop state number.
*/
ruleToStopState: RuleStopState[];
modeNameToStartState: Map<string, TokensStartState>;
/**
* The type of the ATN.
*/
grammarType: ATNType;
/**
* The maximum value for any symbol recognized by a transition in the ATN.
*/
maxTokenType: number;
/**
* For lexer ATNs, this maps the rule index to the resulting token type.
* For parser ATNs, this maps the rule index to the generated bypass token
* type if the
* {@link ATNDeserializationOptions#isGenerateRuleBypassTransitions}
* deserialization option was specified; otherwise, this is `undefined`.
*/
ruleToTokenType: Int32Array;
/**
* For lexer ATNs, this is an array of {@link LexerAction} objects which may
* be referenced by action transitions in the ATN.
*/
lexerActions: LexerAction[];
modeToStartState: TokensStartState[];
private contextCache;
decisionToDFA: DFA[];
modeToDFA: DFA[];
LL1Table: Map<number, number>;
/** Used for runtime deserialization of ATNs from strings */
constructor(grammarType: ATNType, maxTokenType: number);
clearDFA(): void;
get contextCacheSize(): number;
getCachedContext(context: PredictionContext): PredictionContext;
getDecisionToDFA(): DFA[];
/** Compute the set of valid tokens that can occur starting in state `s`.
* If `ctx` is {@link PredictionContext#EMPTY_LOCAL}, the set of tokens will not include what can follow
* the rule surrounding `s`. In other words, the set will be
* restricted to tokens reachable staying within `s`'s rule.
*/
nextTokens(s: ATNState, /*@NotNull*/ ctx: PredictionContext): IntervalSet;
/**
* Compute the set of valid tokens that can occur starting in `s` and
* staying in same rule. {@link Token#EPSILON} is in set if we reach end of
* rule.
*/
nextTokens(/*@NotNull*/ s: ATNState): IntervalSet;
addState(state: ATNState): void;
removeState(state: ATNState): void;
defineMode(name: string, s: TokensStartState): void;
defineDecisionState(s: DecisionState): number;
getDecisionState(decision: number): DecisionState | undefined;
get numberOfDecisions(): number;
/**
* Computes the set of input symbols which could follow ATN state number
* `stateNumber` in the specified full `context`. This method
* considers the complete parser context, but does not evaluate semantic
* predicates (i.e. all predicates encountered during the calculation are
* assumed true). If a path in the ATN exists from the starting state to the
* {@link RuleStopState} of the outermost context without matching any
* symbols, {@link Token#EOF} is added to the returned set.
*
* If `context` is `undefined`, it is treated as
* {@link ParserRuleContext#EMPTY}.
*
* Note that this does NOT give you the set of all tokens that could
* appear at a given token position in the input phrase. In other words, it
* does not answer:
*
* > Given a specific partial input phrase, return the set of all
* > tokens that can follow the last token in the input phrase.
*
* The big difference is that with just the input, the parser could land
* right in the middle of a lookahead decision. Getting all
* *possible* tokens given a partial input stream is a separate
* computation. See https://github.com/antlr/antlr4/issues/1428
*
* For this function, we are specifying an ATN state and call stack to
* compute what token(s) can come next and specifically: outside of a
* lookahead decision. That is what you want for error reporting and
* recovery upon parse error.
*
* @param stateNumber the ATN state number
* @param context the full parse context
* @returns The set of potentially valid input symbols which could follow the
* specified state in the specified context.
* @ if the ATN does not contain a state with
* number `stateNumber`
*/
getExpectedTokens(stateNumber: number, context: RuleContext | undefined): IntervalSet;
}
export declare namespace ATN {
const INVALID_ALT_NUMBER: number;
}