@atlaskit/editor-wikimarkup-transformer
Version:
Wiki markup transformer for JIRA and Confluence
193 lines (192 loc) • 7.93 kB
JavaScript
import _toConsumableArray from "@babel/runtime/helpers/toConsumableArray";
import { createTextNode } from './nodes/text';
import { parseOtherKeyword, parseLeadingKeyword, parseMacroKeyword, parseIssueKeyword } from './tokenize/keyword';
import { parseToken, TokenType } from './tokenize';
import { parseWhitespaceOnly } from './tokenize/whitespace';
import { escapeHandler } from './utils/escape';
import { normalizePMNodes } from './utils/normalize';
var processState = {
NEWLINE: 0,
BUFFER: 1,
TOKEN: 2,
ESCAPE: 3
};
export function parseString(_ref) {
var input = _ref.input,
schema = _ref.schema,
_ref$ignoreTokenTypes = _ref.ignoreTokenTypes,
ignoreTokenTypes = _ref$ignoreTokenTypes === void 0 ? [] : _ref$ignoreTokenTypes,
context = _ref.context,
_ref$includeLeadingSp = _ref.includeLeadingSpace,
includeLeadingSpace = _ref$includeLeadingSp === void 0 ? false : _ref$includeLeadingSp;
var index = 0;
var state = processState.NEWLINE;
var buffer = [];
var tokenType = TokenType.STRING;
var newLines = [];
var output = [];
var inlineNodes = [];
while (index < input.length) {
var char = input.charAt(index);
switch (state) {
case processState.NEWLINE:
{
/**
* During this state, the parser will trim leading
* spaces and looking for any leading keywords.
*/
var substring = input.substring(index);
var length = parseWhitespaceOnly(substring);
if (length) {
index += length;
if (includeLeadingSpace) {
buffer.push(char);
}
continue;
}
var match = parseLeadingKeyword(substring) || parseMacroKeyword(substring) || parseOtherKeyword(substring) || parseIssueKeyword(substring, context.issueKeyRegex);
if (match && ignoreTokenTypes.indexOf(match.type) === -1) {
tokenType = match.type;
state = processState.TOKEN;
continue;
} else {
state = processState.BUFFER;
continue;
}
}
case processState.BUFFER:
{
/**
* During this state, the parser will start
* saving plaintext into the buffer until it hits
* a keyword
*/
var _substring = input.substring(index);
/**
* If the previous char is not a alphanumeric, we will parse
* format keywords.
* If the previous char is '{', we need to skip parse macro
* keyword
*/
var _match = null;
if (buffer.length > 0 && buffer[buffer.length - 1].endsWith('{')) {
_match = parseOtherKeyword(_substring);
} else {
_match = parseMacroKeyword(_substring) || parseOtherKeyword(_substring) || parseIssueKeyword(_substring, context.issueKeyRegex);
}
if (_match && ignoreTokenTypes.indexOf(_match.type) === -1) {
tokenType = _match.type;
state = processState.TOKEN;
continue;
}
if (char === '\\') {
state = processState.ESCAPE;
continue;
}
buffer.push(char);
break;
}
case processState.TOKEN:
{
var token = parseToken(input, tokenType, index, schema, context);
if (token.type === 'text') {
buffer.push(token.text);
} else if (token.type === 'pmnode') {
var _inlineNodes2;
/*ESS-2539 We are keeping track of consecutive newLines in the newLines array
Whenever more than two consecutive newLines are encountered, we start a new paragraph
*/
if (newLines.length >= 2 && (tokenType !== TokenType.HARD_BREAK || buffer.length > 0)) {
output.push.apply(output, _toConsumableArray(normalizePMNodes(inlineNodes, schema)));
// push newLines to the buffer as a separator between media nodes
inlineNodes = isConsecutiveMediaGroups(inlineNodes, token.nodes) ? _toConsumableArray(newLines) : [];
newLines = [];
}
if (inlineNodes.length === 0) {
newLines = [];
}
if (newLines.length > 0 && isNewLineRequiredBetweenNodes(inlineNodes, buffer, token.nodes)) {
var _inlineNodes;
(_inlineNodes = inlineNodes).push.apply(_inlineNodes, _toConsumableArray(newLines));
newLines = [];
}
(_inlineNodes2 = inlineNodes).push.apply(_inlineNodes2, _toConsumableArray(createTextNode(buffer.join(''), schema)));
if (tokenType === TokenType.HARD_BREAK) {
var _newLines;
(_newLines = newLines).push.apply(_newLines, _toConsumableArray(token.nodes));
} else {
var _inlineNodes3;
(_inlineNodes3 = inlineNodes).push.apply(_inlineNodes3, _toConsumableArray(token.nodes));
if (token.nodes.length > 0) {
newLines = [];
}
}
buffer = []; // clear the buffer
}
index += token.length;
if (tokenType === TokenType.HARD_BREAK) {
state = processState.NEWLINE;
} else {
state = processState.BUFFER;
}
continue;
}
case processState.ESCAPE:
{
var _token = escapeHandler(input, index);
buffer.push(_token.text);
index += _token.length;
state = processState.BUFFER;
continue;
}
default:
}
index++;
}
var bufferedStr = buffer.join('');
if (bufferedStr.length > 0) {
var _inlineNodes5;
// Wrapping the rest of the buffer into a text node
if (newLines.length >= 2) {
// normalize the nodes already parsed if more than two consecutive newLines are encountered
output.push.apply(output, _toConsumableArray(normalizePMNodes(inlineNodes, schema)));
inlineNodes = [];
newLines = [];
}
if (newLines.length > 0 && inlineNodes.length > 0 && !inlineNodes[inlineNodes.length - 1].isBlock) {
var _inlineNodes4;
(_inlineNodes4 = inlineNodes).push.apply(_inlineNodes4, _toConsumableArray(newLines));
}
(_inlineNodes5 = inlineNodes).push.apply(_inlineNodes5, _toConsumableArray(createTextNode(bufferedStr, schema)));
}
return [].concat(output, _toConsumableArray(inlineNodes));
}
/* checks whether a newLine is required between two consecutive nodes
Returns true for inline nodes, false for block nodes
*/
function isNewLineRequiredBetweenNodes(currentNodes, buffer, nextNodes) {
var _currentNodes;
if (currentNodes.length === 0) {
return false;
}
if (buffer.length > 0 && (_currentNodes = currentNodes[currentNodes.length - 1]) !== null && _currentNodes !== void 0 && _currentNodes.isBlock) {
return false;
}
if (buffer.length === 0) {
var _nextNodes$, _nextNodes$2, _currentNodes2;
if (nextNodes.length === 0) {
return false;
}
if (((_nextNodes$ = nextNodes[0]) === null || _nextNodes$ === void 0 ? void 0 : _nextNodes$.type.name) === 'hardBreak') {
return false;
}
if ((_nextNodes$2 = nextNodes[0]) !== null && _nextNodes$2 !== void 0 && _nextNodes$2.isBlock || (_currentNodes2 = currentNodes[currentNodes.length - 1]) !== null && _currentNodes2 !== void 0 && _currentNodes2.isBlock) {
return false;
}
}
return true;
}
function isConsecutiveMediaGroups(currentNodes, nextNodes) {
var _currentNodes3, _nextNodes$3;
return currentNodes.length > 0 && ((_currentNodes3 = currentNodes[currentNodes.length - 1]) === null || _currentNodes3 === void 0 ? void 0 : _currentNodes3.type.name) === 'mediaGroup' && ((_nextNodes$3 = nextNodes[0]) === null || _nextNodes$3 === void 0 ? void 0 : _nextNodes$3.type.name) === 'mediaGroup';
}