dt-sql-parser
Version:
SQL Parsers for BigData, built with antlr4
23 lines (22 loc) • 1.66 kB
TypeScript
import { CandidatesCollection } from 'antlr4-c3';
import { CharStream, CommonTokenStream, Token } from 'antlr4ng';
import { SparkSqlLexer } from '../../lib/spark/SparkSqlLexer';
import { ProgramContext, SparkSqlParser } from '../../lib/spark/SparkSqlParser';
import { BasicSQL } from '../common/basicSQL';
import { Suggestions, CaretPosition, SemanticCollectOptions } from '../common/types';
import { ErrorListener } from '../common/parseErrorListener';
import { SparkEntityCollector } from './sparkEntityCollector';
import { SparkErrorListener } from './sparkErrorListener';
import { SparkSqlSplitListener } from './sparkSplitListener';
import { SparkSemanticContextCollector } from './sparkSemanticContextCollector';
export { SparkEntityCollector, SparkSqlSplitListener };
export declare class SparkSQL extends BasicSQL<SparkSqlLexer, ProgramContext, SparkSqlParser> {
protected createLexerFromCharStream(charStreams: CharStream): SparkSqlLexer;
protected createParserFromTokenStream(tokenStream: CommonTokenStream): SparkSqlParser;
protected preferredRules: Set<number>;
protected get splitListener(): SparkSqlSplitListener;
protected createErrorListener(_errorListener: ErrorListener): SparkErrorListener;
protected createEntityCollector(input: string, allTokens?: Token[], caretTokenIndex?: number): SparkEntityCollector;
protected createSemanticContextCollector(input: string, caretPosition: CaretPosition, allTokens: Token[], options?: SemanticCollectOptions): SparkSemanticContextCollector;
protected processCandidates(candidates: CandidatesCollection, allTokens: Token[], caretTokenIndex: number): Suggestions<Token>;
}