dt-sql-parser
Version:
SQL Parsers for BigData, built with antlr4
128 lines (127 loc) • 5.94 kB
JavaScript
import { processTokenCandidates } from '../common/tokenUtils';
import { SparkSqlLexer } from '../../lib/spark/SparkSqlLexer';
import { SparkSqlParser } from '../../lib/spark/SparkSqlParser';
import { BasicSQL } from '../common/basicSQL';
import { EntityContextType, } from '../common/types';
import { SparkEntityCollector } from './sparkEntityCollector';
import { SparkErrorListener } from './sparkErrorListener';
import { SparkSqlSplitListener } from './sparkSplitListener';
import { SparkSemanticContextCollector } from './sparkSemanticContextCollector';
export { SparkEntityCollector, SparkSqlSplitListener };
export class SparkSQL extends BasicSQL {
constructor() {
super(...arguments);
this.preferredRules = new Set([
SparkSqlParser.RULE_namespaceName,
SparkSqlParser.RULE_namespaceNameCreate,
SparkSqlParser.RULE_tableName,
SparkSqlParser.RULE_tableNameCreate,
SparkSqlParser.RULE_viewName,
SparkSqlParser.RULE_viewNameCreate,
SparkSqlParser.RULE_functionName,
SparkSqlParser.RULE_functionNameCreate,
SparkSqlParser.RULE_columnName,
SparkSqlParser.RULE_columnNamePath,
SparkSqlParser.RULE_columnNameCreate,
]);
}
createLexerFromCharStream(charStreams) {
return new SparkSqlLexer(charStreams);
}
createParserFromTokenStream(tokenStream) {
return new SparkSqlParser(tokenStream);
}
get splitListener() {
return new SparkSqlSplitListener();
}
createErrorListener(_errorListener) {
const parserContext = this;
return new SparkErrorListener(_errorListener, parserContext, this.preferredRules);
}
createEntityCollector(input, allTokens, caretTokenIndex) {
return new SparkEntityCollector(input, allTokens, caretTokenIndex);
}
createSemanticContextCollector(input, caretPosition, allTokens, options) {
return new SparkSemanticContextCollector(input, caretPosition, allTokens, options);
}
processCandidates(candidates, allTokens, caretTokenIndex) {
const originalSyntaxSuggestions = [];
const keywords = [];
for (const candidate of candidates.rules) {
const [ruleType, candidateRule] = candidate;
const tokenRanges = allTokens.slice(candidateRule.startTokenIndex, caretTokenIndex + 1);
let syntaxContextType = void 0;
switch (ruleType) {
case SparkSqlParser.RULE_namespaceName: {
syntaxContextType = EntityContextType.DATABASE;
break;
}
case SparkSqlParser.RULE_namespaceNameCreate: {
syntaxContextType = EntityContextType.DATABASE_CREATE;
break;
}
case SparkSqlParser.RULE_tableName: {
syntaxContextType = EntityContextType.TABLE;
break;
}
case SparkSqlParser.RULE_tableNameCreate: {
syntaxContextType = EntityContextType.TABLE_CREATE;
break;
}
case SparkSqlParser.RULE_viewName: {
syntaxContextType = EntityContextType.VIEW;
break;
}
case SparkSqlParser.RULE_viewNameCreate: {
syntaxContextType = EntityContextType.VIEW_CREATE;
break;
}
case SparkSqlParser.RULE_functionName: {
syntaxContextType = EntityContextType.FUNCTION;
break;
}
case SparkSqlParser.RULE_functionNameCreate: {
syntaxContextType = EntityContextType.FUNCTION_CREATE;
break;
}
case SparkSqlParser.RULE_columnName: {
syntaxContextType = EntityContextType.COLUMN;
break;
}
case SparkSqlParser.RULE_columnNameCreate: {
syntaxContextType = EntityContextType.COLUMN_CREATE;
break;
}
case SparkSqlParser.RULE_columnNamePath: {
if (candidateRule.ruleList.includes(SparkSqlParser.RULE_whenClause) ||
candidateRule.ruleList.includes(SparkSqlParser.RULE_whereClause) ||
candidateRule.ruleList.includes(SparkSqlParser.RULE_joinRelation) ||
candidateRule.ruleList.includes(SparkSqlParser.RULE_orderOrSortByClause) ||
candidateRule.ruleList.includes(SparkSqlParser.RULE_groupByClause) ||
candidateRule.ruleList.includes(SparkSqlParser.RULE_aggregationClause) ||
candidateRule.ruleList.includes(SparkSqlParser.RULE_havingClause) ||
candidateRule.ruleList.includes(SparkSqlParser.RULE_windowClause) ||
candidateRule.ruleList.includes(SparkSqlParser.RULE_selectClause) ||
candidateRule.ruleList.includes(SparkSqlParser.RULE_limitClause) ||
candidateRule.ruleList.includes(SparkSqlParser.RULE_clusterOrDistributeBy)) {
syntaxContextType = EntityContextType.COLUMN;
}
}
default:
break;
}
if (syntaxContextType) {
originalSyntaxSuggestions.push({
syntaxContextType,
wordRanges: tokenRanges,
});
}
}
const processedKeywords = processTokenCandidates(this._parser, candidates.tokens);
keywords.push(...processedKeywords);
return {
syntax: originalSyntaxSuggestions,
keywords,
};
}
}