UNPKG

dt-sql-parser

Version:

SQL Parsers for BigData, built with antlr4

162 lines (161 loc) 7.89 kB
import { processTokenCandidates } from '../common/tokenUtils'; import { PostgreSqlLexer } from '../../lib/postgresql/PostgreSqlLexer'; import { PostgreSqlParser } from '../../lib/postgresql/PostgreSqlParser'; import { EntityContextType, } from '../common/types'; import { BasicSQL } from '../common/basicSQL'; import { PostgreSqlEntityCollector } from './postgreEntityCollector'; import { PostgreSqlErrorListener } from './postgreErrorListener'; import { PostgreSqlSplitListener } from './postgreSplitListener'; import { PostgreSemanticContextCollector } from './postgreSemanticContextCollector'; export { PostgreSqlEntityCollector, PostgreSqlSplitListener }; export class PostgreSQL extends BasicSQL { constructor() { super(...arguments); /** * The rules that keywords you don't want to be suggested. */ this.excludeKeywordRules = new Set([ PostgreSqlParser.RULE_nonReservedWord, PostgreSqlParser.RULE_identifier, PostgreSqlParser.RULE_reservedKeyword, PostgreSqlParser.RULE_typeFuncNameKeyword, PostgreSqlParser.RULE_colNameKeyword, ]); this.preferredRules = new Set([ PostgreSqlParser.RULE_tableNameCreate, // table name PostgreSqlParser.RULE_tableName, // table name that will be created PostgreSqlParser.RULE_functionName, // function name PostgreSqlParser.RULE_functionNameCreate, // function name that will be created PostgreSqlParser.RULE_schemaNameCreate, // schema name that will be created PostgreSqlParser.RULE_schemaName, // schema name PostgreSqlParser.RULE_viewNameCreate, // view name that will be created PostgreSqlParser.RULE_viewName, // view name PostgreSqlParser.RULE_databaseNameCreate, // database name that will be created PostgreSqlParser.RULE_databaseName, // database name PostgreSqlParser.RULE_procedureNameCreate, // procedure name that will be created PostgreSqlParser.RULE_procedureName, // procedure name PostgreSqlParser.RULE_columnNameCreate, // column name that will be created PostgreSqlParser.RULE_columnName, // column name PostgreSqlParser.RULE_columnNamePath, // column name ...this.excludeKeywordRules, ]); } createLexerFromCharStream(charStreams) { return new PostgreSqlLexer(charStreams); } createParserFromTokenStream(tokenStream) { return new PostgreSqlParser(tokenStream); } get splitListener() { return new PostgreSqlSplitListener(); } createErrorListener(_errorListener) { const parserContext = this; return new PostgreSqlErrorListener(_errorListener, parserContext, this.preferredRules); } createEntityCollector(input, allTokens, caretTokenIndex) { return new PostgreSqlEntityCollector(input, allTokens, caretTokenIndex); } createSemanticContextCollector(input, caretPosition, allTokens, options) { return new PostgreSemanticContextCollector(input, caretPosition, allTokens, options); } processCandidates(candidates, allTokens, caretTokenIndex) { const originalSyntaxSuggestions = []; const keywords = []; for (let candidate of candidates.rules) { const [ruleType, candidateRule] = candidate; const tokenRanges = allTokens.slice(candidateRule.startTokenIndex, caretTokenIndex + 1); let syntaxContextType = void 0; switch (ruleType) { case PostgreSqlParser.RULE_tableNameCreate: { syntaxContextType = EntityContextType.TABLE_CREATE; break; } case PostgreSqlParser.RULE_tableName: { syntaxContextType = EntityContextType.TABLE; break; } case PostgreSqlParser.RULE_functionNameCreate: { syntaxContextType = EntityContextType.FUNCTION_CREATE; break; } case PostgreSqlParser.RULE_functionName: { syntaxContextType = EntityContextType.FUNCTION; break; } case PostgreSqlParser.RULE_schemaNameCreate: { syntaxContextType = EntityContextType.DATABASE_CREATE; break; } case PostgreSqlParser.RULE_schemaName: { syntaxContextType = EntityContextType.DATABASE; break; } case PostgreSqlParser.RULE_viewNameCreate: { syntaxContextType = EntityContextType.VIEW_CREATE; break; } case PostgreSqlParser.RULE_viewName: { syntaxContextType = EntityContextType.VIEW; break; } case PostgreSqlParser.RULE_databaseNameCreate: { syntaxContextType = EntityContextType.DATABASE_CREATE; break; } case PostgreSqlParser.RULE_databaseName: { syntaxContextType = EntityContextType.DATABASE; break; } case PostgreSqlParser.RULE_procedureNameCreate: { syntaxContextType = EntityContextType.PROCEDURE_CREATE; break; } case PostgreSqlParser.RULE_procedureName: { syntaxContextType = EntityContextType.PROCEDURE; break; } case PostgreSqlParser.RULE_columnNameCreate: { syntaxContextType = EntityContextType.COLUMN_CREATE; break; } case PostgreSqlParser.RULE_columnName: { syntaxContextType = EntityContextType.COLUMN; break; } case PostgreSqlParser.RULE_columnNamePath: { if (candidateRule.ruleList.includes(PostgreSqlParser.RULE_groupClause) || candidateRule.ruleList.includes(PostgreSqlParser.RULE_sortClause) || candidateRule.ruleList.includes(PostgreSqlParser.RULE_limitClause) || candidateRule.ruleList.includes(PostgreSqlParser.RULE_whereClause) || candidateRule.ruleList.includes(PostgreSqlParser.RULE_havingClause) || candidateRule.ruleList.includes(PostgreSqlParser.RULE_windowClause) || candidateRule.ruleList.includes(PostgreSqlParser.RULE_triggerWhen)) { syntaxContextType = EntityContextType.COLUMN; } break; } default: break; } if (syntaxContextType && !originalSyntaxSuggestions.some((syn) => { var _a, _b; return syn.syntaxContextType === syntaxContextType && ((_a = syn.wordRanges.map((wordRange) => wordRange.text)) === null || _a === void 0 ? void 0 : _a.join(',')) === ((_b = tokenRanges.map((tokenRange) => tokenRange.text)) === null || _b === void 0 ? void 0 : _b.join(',')); })) { originalSyntaxSuggestions.push({ syntaxContextType, wordRanges: tokenRanges, }); } } const processedKeywords = processTokenCandidates(this._parser, candidates.tokens); keywords.push(...processedKeywords); return { syntax: originalSyntaxSuggestions, keywords, }; } }