sucrase
Version:
Super-fast alternative to Babel for when you can target modern JS runtimes
245 lines (197 loc) • 6.34 kB
JavaScript
"use strict";Object.defineProperty(exports, "__esModule", {value: true});
var _types = require('./parser/tokenizer/types');
class TokenProcessor {
__init() {this.resultCode = ""}
__init2() {this.tokenIndex = 0}
constructor(
code,
tokens,
isFlowEnabled,
) {;this.code = code;this.tokens = tokens;this.isFlowEnabled = isFlowEnabled;TokenProcessor.prototype.__init.call(this);TokenProcessor.prototype.__init2.call(this);}
/**
* Make a new TokenProcessor for things like lookahead.
*/
snapshot() {
return {resultCode: this.resultCode, tokenIndex: this.tokenIndex};
}
restoreToSnapshot(snapshot) {
this.resultCode = snapshot.resultCode;
this.tokenIndex = snapshot.tokenIndex;
}
getResultCodeIndex() {
return this.resultCode.length;
}
reset() {
this.resultCode = "";
this.tokenIndex = 0;
}
matchesContextualAtIndex(index, contextualKeyword) {
return (
this.matches1AtIndex(index, _types.TokenType.name) &&
this.tokens[index].contextualKeyword === contextualKeyword
);
}
identifierNameAtIndex(index) {
// TODO: We need to process escapes since technically you can have unicode escapes in variable
// names.
return this.identifierNameForToken(this.tokens[index]);
}
identifierName() {
return this.identifierNameForToken(this.currentToken());
}
identifierNameForToken(token) {
return this.code.slice(token.start, token.end);
}
rawCodeForToken(token) {
return this.code.slice(token.start, token.end);
}
stringValueAtIndex(index) {
return this.stringValueForToken(this.tokens[index]);
}
stringValue() {
return this.stringValueForToken(this.currentToken());
}
stringValueForToken(token) {
// This is used to identify when two imports are the same and to resolve TypeScript enum keys.
// Ideally we'd process escapes within the strings, but for now we pretty much take the raw
// code.
return this.code.slice(token.start + 1, token.end - 1);
}
matches1AtIndex(index, t1) {
return this.tokens[index].type === t1;
}
matches2AtIndex(index, t1, t2) {
return this.tokens[index].type === t1 && this.tokens[index + 1].type === t2;
}
matches3AtIndex(index, t1, t2, t3) {
return (
this.tokens[index].type === t1 &&
this.tokens[index + 1].type === t2 &&
this.tokens[index + 2].type === t3
);
}
matches1(t1) {
return this.tokens[this.tokenIndex].type === t1;
}
matches2(t1, t2) {
return this.tokens[this.tokenIndex].type === t1 && this.tokens[this.tokenIndex + 1].type === t2;
}
matches3(t1, t2, t3) {
return (
this.tokens[this.tokenIndex].type === t1 &&
this.tokens[this.tokenIndex + 1].type === t2 &&
this.tokens[this.tokenIndex + 2].type === t3
);
}
matches4(t1, t2, t3, t4) {
return (
this.tokens[this.tokenIndex].type === t1 &&
this.tokens[this.tokenIndex + 1].type === t2 &&
this.tokens[this.tokenIndex + 2].type === t3 &&
this.tokens[this.tokenIndex + 3].type === t4
);
}
matches5(t1, t2, t3, t4, t5) {
return (
this.tokens[this.tokenIndex].type === t1 &&
this.tokens[this.tokenIndex + 1].type === t2 &&
this.tokens[this.tokenIndex + 2].type === t3 &&
this.tokens[this.tokenIndex + 3].type === t4 &&
this.tokens[this.tokenIndex + 4].type === t5
);
}
matchesContextual(contextualKeyword) {
return this.matchesContextualAtIndex(this.tokenIndex, contextualKeyword);
}
matchesContextIdAndLabel(type, contextId) {
return this.matches1(type) && this.currentToken().contextId === contextId;
}
previousWhitespaceAndComments() {
let whitespaceAndComments = this.code.slice(
this.tokenIndex > 0 ? this.tokens[this.tokenIndex - 1].end : 0,
this.tokenIndex < this.tokens.length ? this.tokens[this.tokenIndex].start : this.code.length,
);
if (this.isFlowEnabled) {
whitespaceAndComments = whitespaceAndComments.replace(/ /g, "");
}
return whitespaceAndComments;
}
replaceToken(newCode) {
this.resultCode += this.previousWhitespaceAndComments();
this.resultCode += newCode;
this.tokenIndex++;
}
replaceTokenTrimmingLeftWhitespace(newCode) {
this.resultCode += this.previousWhitespaceAndComments().replace(/[^\r\n]/g, "");
this.resultCode += newCode;
this.tokenIndex++;
}
removeInitialToken() {
this.replaceToken("");
}
removeToken() {
this.replaceTokenTrimmingLeftWhitespace("");
}
copyExpectedToken(tokenType) {
if (this.tokens[this.tokenIndex].type !== tokenType) {
throw new Error(`Expected token ${tokenType}`);
}
this.copyToken();
}
copyToken() {
this.resultCode += this.previousWhitespaceAndComments();
this.resultCode += this.code.slice(
this.tokens[this.tokenIndex].start,
this.tokens[this.tokenIndex].end,
);
this.tokenIndex++;
}
copyTokenWithPrefix(prefix) {
this.resultCode += this.previousWhitespaceAndComments();
this.resultCode += prefix;
this.resultCode += this.code.slice(
this.tokens[this.tokenIndex].start,
this.tokens[this.tokenIndex].end,
);
this.tokenIndex++;
}
appendCode(code) {
this.resultCode += code;
}
currentToken() {
return this.tokens[this.tokenIndex];
}
currentTokenCode() {
const token = this.currentToken();
return this.code.slice(token.start, token.end);
}
tokenAtRelativeIndex(relativeIndex) {
return this.tokens[this.tokenIndex + relativeIndex];
}
currentIndex() {
return this.tokenIndex;
}
/**
* Move to the next token. Only suitable in preprocessing steps. When
* generating new code, you should use copyToken or removeToken.
*/
nextToken() {
if (this.tokenIndex === this.tokens.length) {
throw new Error("Unexpectedly reached end of input.");
}
this.tokenIndex++;
}
previousToken() {
this.tokenIndex--;
}
finish() {
if (this.tokenIndex !== this.tokens.length) {
throw new Error("Tried to finish processing tokens before reaching the end.");
}
this.resultCode += this.previousWhitespaceAndComments();
return this.resultCode;
}
isAtEnd() {
return this.tokenIndex === this.tokens.length;
}
} exports.default = TokenProcessor;