UNPKG

dt-sql-parser

Version:

SQL Parsers for BigData, built with antlr4

21 lines (20 loc) 1.39 kB
import { CandidatesCollection } from 'antlr4-c3'; import { CharStream, CommonTokenStream, Token } from 'antlr4ng'; import { SparkSqlLexer } from '../../lib/spark/SparkSqlLexer'; import { ProgramContext, SparkSqlParser } from '../../lib/spark/SparkSqlParser'; import { BasicSQL } from '../common/basicSQL'; import { ErrorListener } from '../common/parseErrorListener'; import { Suggestions } from '../common/types'; import { SparkEntityCollector } from './sparkEntityCollector'; import { SparkErrorListener } from './sparkErrorListener'; import { SparkSqlSplitListener } from './sparkSplitListener'; export { SparkEntityCollector, SparkSqlSplitListener }; export declare class SparkSQL extends BasicSQL<SparkSqlLexer, ProgramContext, SparkSqlParser> { protected createLexerFromCharStream(charStreams: CharStream): SparkSqlLexer; protected createParserFromTokenStream(tokenStream: CommonTokenStream): SparkSqlParser; protected preferredRules: Set<number>; protected get splitListener(): SparkSqlSplitListener; protected createErrorListener(_errorListener: ErrorListener): SparkErrorListener; protected createEntityCollector(input: string, allTokens?: Token[], caretTokenIndex?: number): SparkEntityCollector; protected processCandidates(candidates: CandidatesCollection, allTokens: Token[], caretTokenIndex: number, tokenIndexOffset: number): Suggestions<Token>; }