@atlaskit/editor-wikimarkup-transformer
Version:
Wiki markup transformer for JIRA and Confluence
200 lines (198 loc) • 8.43 kB
JavaScript
"use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.parseString = parseString;
var _toConsumableArray2 = _interopRequireDefault(require("@babel/runtime/helpers/toConsumableArray"));
var _text = require("./nodes/text");
var _keyword = require("./tokenize/keyword");
var _tokenize = require("./tokenize");
var _whitespace = require("./tokenize/whitespace");
var _escape = require("./utils/escape");
var _normalize = require("./utils/normalize");
var processState = {
NEWLINE: 0,
BUFFER: 1,
TOKEN: 2,
ESCAPE: 3
};
function parseString(_ref) {
var input = _ref.input,
schema = _ref.schema,
_ref$ignoreTokenTypes = _ref.ignoreTokenTypes,
ignoreTokenTypes = _ref$ignoreTokenTypes === void 0 ? [] : _ref$ignoreTokenTypes,
context = _ref.context,
_ref$includeLeadingSp = _ref.includeLeadingSpace,
includeLeadingSpace = _ref$includeLeadingSp === void 0 ? false : _ref$includeLeadingSp;
var index = 0;
var state = processState.NEWLINE;
var buffer = [];
var tokenType = _tokenize.TokenType.STRING;
var newLines = [];
var output = [];
var inlineNodes = [];
while (index < input.length) {
var char = input.charAt(index);
switch (state) {
case processState.NEWLINE:
{
/**
* During this state, the parser will trim leading
* spaces and looking for any leading keywords.
*/
var substring = input.substring(index);
var length = (0, _whitespace.parseWhitespaceOnly)(substring);
if (length) {
index += length;
if (includeLeadingSpace) {
buffer.push(char);
}
continue;
}
var match = (0, _keyword.parseLeadingKeyword)(substring) || (0, _keyword.parseMacroKeyword)(substring) || (0, _keyword.parseOtherKeyword)(substring) || (0, _keyword.parseIssueKeyword)(substring, context.issueKeyRegex);
if (match && ignoreTokenTypes.indexOf(match.type) === -1) {
tokenType = match.type;
state = processState.TOKEN;
continue;
} else {
state = processState.BUFFER;
continue;
}
}
case processState.BUFFER:
{
/**
* During this state, the parser will start
* saving plaintext into the buffer until it hits
* a keyword
*/
var _substring = input.substring(index);
/**
* If the previous char is not a alphanumeric, we will parse
* format keywords.
* If the previous char is '{', we need to skip parse macro
* keyword
*/
var _match = null;
if (buffer.length > 0 && buffer[buffer.length - 1].endsWith('{')) {
_match = (0, _keyword.parseOtherKeyword)(_substring);
} else {
_match = (0, _keyword.parseMacroKeyword)(_substring) || (0, _keyword.parseOtherKeyword)(_substring) || (0, _keyword.parseIssueKeyword)(_substring, context.issueKeyRegex);
}
if (_match && ignoreTokenTypes.indexOf(_match.type) === -1) {
tokenType = _match.type;
state = processState.TOKEN;
continue;
}
if (char === '\\') {
state = processState.ESCAPE;
continue;
}
buffer.push(char);
break;
}
case processState.TOKEN:
{
var token = (0, _tokenize.parseToken)(input, tokenType, index, schema, context);
if (token.type === 'text') {
buffer.push(token.text);
} else if (token.type === 'pmnode') {
var _inlineNodes2;
/*ESS-2539 We are keeping track of consecutive newLines in the newLines array
Whenever more than two consecutive newLines are encountered, we start a new paragraph
*/
if (newLines.length >= 2 && (tokenType !== _tokenize.TokenType.HARD_BREAK || buffer.length > 0)) {
output.push.apply(output, (0, _toConsumableArray2.default)((0, _normalize.normalizePMNodes)(inlineNodes, schema)));
// push newLines to the buffer as a separator between media nodes
inlineNodes = isConsecutiveMediaGroups(inlineNodes, token.nodes) ? (0, _toConsumableArray2.default)(newLines) : [];
newLines = [];
}
if (inlineNodes.length === 0) {
newLines = [];
}
if (newLines.length > 0 && isNewLineRequiredBetweenNodes(inlineNodes, buffer, token.nodes)) {
var _inlineNodes;
(_inlineNodes = inlineNodes).push.apply(_inlineNodes, (0, _toConsumableArray2.default)(newLines));
newLines = [];
}
(_inlineNodes2 = inlineNodes).push.apply(_inlineNodes2, (0, _toConsumableArray2.default)((0, _text.createTextNode)(buffer.join(''), schema)));
if (tokenType === _tokenize.TokenType.HARD_BREAK) {
var _newLines;
(_newLines = newLines).push.apply(_newLines, (0, _toConsumableArray2.default)(token.nodes));
} else {
var _inlineNodes3;
(_inlineNodes3 = inlineNodes).push.apply(_inlineNodes3, (0, _toConsumableArray2.default)(token.nodes));
if (token.nodes.length > 0) {
newLines = [];
}
}
buffer = []; // clear the buffer
}
index += token.length;
if (tokenType === _tokenize.TokenType.HARD_BREAK) {
state = processState.NEWLINE;
} else {
state = processState.BUFFER;
}
continue;
}
case processState.ESCAPE:
{
var _token = (0, _escape.escapeHandler)(input, index);
buffer.push(_token.text);
index += _token.length;
state = processState.BUFFER;
continue;
}
default:
}
index++;
}
var bufferedStr = buffer.join('');
if (bufferedStr.length > 0) {
var _inlineNodes5;
// Wrapping the rest of the buffer into a text node
if (newLines.length >= 2) {
// normalize the nodes already parsed if more than two consecutive newLines are encountered
output.push.apply(output, (0, _toConsumableArray2.default)((0, _normalize.normalizePMNodes)(inlineNodes, schema)));
inlineNodes = [];
newLines = [];
}
if (newLines.length > 0 && inlineNodes.length > 0 && !inlineNodes[inlineNodes.length - 1].isBlock) {
var _inlineNodes4;
(_inlineNodes4 = inlineNodes).push.apply(_inlineNodes4, (0, _toConsumableArray2.default)(newLines));
}
(_inlineNodes5 = inlineNodes).push.apply(_inlineNodes5, (0, _toConsumableArray2.default)((0, _text.createTextNode)(bufferedStr, schema)));
}
return [].concat(output, (0, _toConsumableArray2.default)(inlineNodes));
}
/* checks whether a newLine is required between two consecutive nodes
Returns true for inline nodes, false for block nodes
*/
function isNewLineRequiredBetweenNodes(currentNodes, buffer, nextNodes) {
var _currentNodes;
if (currentNodes.length === 0) {
return false;
}
if (buffer.length > 0 && (_currentNodes = currentNodes[currentNodes.length - 1]) !== null && _currentNodes !== void 0 && _currentNodes.isBlock) {
return false;
}
if (buffer.length === 0) {
var _nextNodes$, _nextNodes$2, _currentNodes2;
if (nextNodes.length === 0) {
return false;
}
if (((_nextNodes$ = nextNodes[0]) === null || _nextNodes$ === void 0 ? void 0 : _nextNodes$.type.name) === 'hardBreak') {
return false;
}
if ((_nextNodes$2 = nextNodes[0]) !== null && _nextNodes$2 !== void 0 && _nextNodes$2.isBlock || (_currentNodes2 = currentNodes[currentNodes.length - 1]) !== null && _currentNodes2 !== void 0 && _currentNodes2.isBlock) {
return false;
}
}
return true;
}
function isConsecutiveMediaGroups(currentNodes, nextNodes) {
var _currentNodes3, _nextNodes$3;
return currentNodes.length > 0 && ((_currentNodes3 = currentNodes[currentNodes.length - 1]) === null || _currentNodes3 === void 0 ? void 0 : _currentNodes3.type.name) === 'mediaGroup' && ((_nextNodes$3 = nextNodes[0]) === null || _nextNodes$3 === void 0 ? void 0 : _nextNodes$3.type.name) === 'mediaGroup';
}