marked
Version:
A markdown parser built for speed
1 lines • 188 kB
Source Map (JSON)
{"version":3,"file":"marked.cjs","sources":["../src/defaults.ts","../src/rules.ts","../src/helpers.ts","../src/Tokenizer.ts","../src/Lexer.ts","../src/Renderer.ts","../src/TextRenderer.ts","../src/Parser.ts","../src/Hooks.ts","../src/Instance.ts","../src/marked.ts"],"sourcesContent":["/**\n * Gets the original marked default options.\n */\nexport function _getDefaults() {\n return {\n async: false,\n breaks: false,\n extensions: null,\n gfm: true,\n hooks: null,\n pedantic: false,\n renderer: null,\n silent: false,\n tokenizer: null,\n walkTokens: null,\n };\n}\nexport let _defaults = _getDefaults();\nexport function changeDefaults(newDefaults) {\n _defaults = newDefaults;\n}\n","const noopTest = { exec: () => null };\nfunction edit(regex, opt = '') {\n let source = typeof regex === 'string' ? regex : regex.source;\n const obj = {\n replace: (name, val) => {\n let valSource = typeof val === 'string' ? val : val.source;\n valSource = valSource.replace(other.caret, '$1');\n source = source.replace(name, valSource);\n return obj;\n },\n getRegex: () => {\n return new RegExp(source, opt);\n },\n };\n return obj;\n}\nexport const other = {\n codeRemoveIndent: /^(?: {1,4}| {0,3}\\t)/gm,\n outputLinkReplace: /\\\\([\\[\\]])/g,\n indentCodeCompensation: /^(\\s+)(?:```)/,\n beginningSpace: /^\\s+/,\n endingHash: /#$/,\n startingSpaceChar: /^ /,\n endingSpaceChar: / $/,\n nonSpaceChar: /[^ ]/,\n newLineCharGlobal: /\\n/g,\n tabCharGlobal: /\\t/g,\n multipleSpaceGlobal: /\\s+/g,\n blankLine: /^[ \\t]*$/,\n doubleBlankLine: /\\n[ \\t]*\\n[ \\t]*$/,\n blockquoteStart: /^ {0,3}>/,\n blockquoteSetextReplace: /\\n {0,3}((?:=+|-+) *)(?=\\n|$)/g,\n blockquoteSetextReplace2: /^ {0,3}>[ \\t]?/gm,\n listReplaceTabs: /^\\t+/,\n listReplaceNesting: /^ {1,4}(?=( {4})*[^ ])/g,\n listIsTask: /^\\[[ xX]\\] /,\n listReplaceTask: /^\\[[ xX]\\] +/,\n anyLine: /\\n.*\\n/,\n hrefBrackets: /^<(.*)>$/,\n tableDelimiter: /[:|]/,\n tableAlignChars: /^\\||\\| *$/g,\n tableRowBlankLine: /\\n[ \\t]*$/,\n tableAlignRight: /^ *-+: *$/,\n tableAlignCenter: /^ *:-+: *$/,\n tableAlignLeft: /^ *:-+ *$/,\n startATag: /^<a /i,\n endATag: /^<\\/a>/i,\n startPreScriptTag: /^<(pre|code|kbd|script)(\\s|>)/i,\n endPreScriptTag: /^<\\/(pre|code|kbd|script)(\\s|>)/i,\n startAngleBracket: /^</,\n endAngleBracket: />$/,\n pedanticHrefTitle: /^([^'\"]*[^\\s])\\s+(['\"])(.*)\\2/,\n unicodeAlphaNumeric: /[\\p{L}\\p{N}]/u,\n escapeTest: /[&<>\"']/,\n escapeReplace: /[&<>\"']/g,\n escapeTestNoEncode: /[<>\"']|&(?!(#\\d{1,7}|#[Xx][a-fA-F0-9]{1,6}|\\w+);)/,\n escapeReplaceNoEncode: /[<>\"']|&(?!(#\\d{1,7}|#[Xx][a-fA-F0-9]{1,6}|\\w+);)/g,\n unescapeTest: /&(#(?:\\d+)|(?:#x[0-9A-Fa-f]+)|(?:\\w+));?/ig,\n caret: /(^|[^\\[])\\^/g,\n percentDecode: /%25/g,\n findPipe: /\\|/g,\n splitPipe: / \\|/,\n slashPipe: /\\\\\\|/g,\n carriageReturn: /\\r\\n|\\r/g,\n spaceLine: /^ +$/gm,\n notSpaceStart: /^\\S*/,\n endingNewline: /\\n$/,\n listItemRegex: (bull) => new RegExp(`^( {0,3}${bull})((?:[\\t ][^\\\\n]*)?(?:\\\\n|$))`),\n nextBulletRegex: (indent) => new RegExp(`^ {0,${Math.min(3, indent - 1)}}(?:[*+-]|\\\\d{1,9}[.)])((?:[ \\t][^\\\\n]*)?(?:\\\\n|$))`),\n hrRegex: (indent) => new RegExp(`^ {0,${Math.min(3, indent - 1)}}((?:- *){3,}|(?:_ *){3,}|(?:\\\\* *){3,})(?:\\\\n+|$)`),\n fencesBeginRegex: (indent) => new RegExp(`^ {0,${Math.min(3, indent - 1)}}(?:\\`\\`\\`|~~~)`),\n headingBeginRegex: (indent) => new RegExp(`^ {0,${Math.min(3, indent - 1)}}#`),\n htmlBeginRegex: (indent) => new RegExp(`^ {0,${Math.min(3, indent - 1)}}<(?:[a-z].*>|!--)`, 'i'),\n};\n/**\n * Block-Level Grammar\n */\nconst newline = /^(?:[ \\t]*(?:\\n|$))+/;\nconst blockCode = /^((?: {4}| {0,3}\\t)[^\\n]+(?:\\n(?:[ \\t]*(?:\\n|$))*)?)+/;\nconst fences = /^ {0,3}(`{3,}(?=[^`\\n]*(?:\\n|$))|~{3,})([^\\n]*)(?:\\n|$)(?:|([\\s\\S]*?)(?:\\n|$))(?: {0,3}\\1[~`]* *(?=\\n|$)|$)/;\nconst hr = /^ {0,3}((?:-[\\t ]*){3,}|(?:_[ \\t]*){3,}|(?:\\*[ \\t]*){3,})(?:\\n+|$)/;\nconst heading = /^ {0,3}(#{1,6})(?=\\s|$)(.*)(?:\\n+|$)/;\nconst bullet = /(?:[*+-]|\\d{1,9}[.)])/;\nconst lheadingCore = /^(?!bull |blockCode|fences|blockquote|heading|html|table)((?:.|\\n(?!\\s*?\\n|bull |blockCode|fences|blockquote|heading|html|table))+?)\\n {0,3}(=+|-+) *(?:\\n+|$)/;\nconst lheading = edit(lheadingCore)\n .replace(/bull/g, bullet) // lists can interrupt\n .replace(/blockCode/g, /(?: {4}| {0,3}\\t)/) // indented code blocks can interrupt\n .replace(/fences/g, / {0,3}(?:`{3,}|~{3,})/) // fenced code blocks can interrupt\n .replace(/blockquote/g, / {0,3}>/) // blockquote can interrupt\n .replace(/heading/g, / {0,3}#{1,6}/) // ATX heading can interrupt\n .replace(/html/g, / {0,3}<[^\\n>]+>\\n/) // block html can interrupt\n .replace(/\\|table/g, '') // table not in commonmark\n .getRegex();\nconst lheadingGfm = edit(lheadingCore)\n .replace(/bull/g, bullet) // lists can interrupt\n .replace(/blockCode/g, /(?: {4}| {0,3}\\t)/) // indented code blocks can interrupt\n .replace(/fences/g, / {0,3}(?:`{3,}|~{3,})/) // fenced code blocks can interrupt\n .replace(/blockquote/g, / {0,3}>/) // blockquote can interrupt\n .replace(/heading/g, / {0,3}#{1,6}/) // ATX heading can interrupt\n .replace(/html/g, / {0,3}<[^\\n>]+>\\n/) // block html can interrupt\n .replace(/table/g, / {0,3}\\|?(?:[:\\- ]*\\|)+[\\:\\- ]*\\n/) // table can interrupt\n .getRegex();\nconst _paragraph = /^([^\\n]+(?:\\n(?!hr|heading|lheading|blockquote|fences|list|html|table| +\\n)[^\\n]+)*)/;\nconst blockText = /^[^\\n]+/;\nconst _blockLabel = /(?!\\s*\\])(?:\\\\.|[^\\[\\]\\\\])+/;\nconst def = edit(/^ {0,3}\\[(label)\\]: *(?:\\n[ \\t]*)?([^<\\s][^\\s]*|<.*?>)(?:(?: +(?:\\n[ \\t]*)?| *\\n[ \\t]*)(title))? *(?:\\n+|$)/)\n .replace('label', _blockLabel)\n .replace('title', /(?:\"(?:\\\\\"?|[^\"\\\\])*\"|'[^'\\n]*(?:\\n[^'\\n]+)*\\n?'|\\([^()]*\\))/)\n .getRegex();\nconst list = edit(/^( {0,3}bull)([ \\t][^\\n]+?)?(?:\\n|$)/)\n .replace(/bull/g, bullet)\n .getRegex();\nconst _tag = 'address|article|aside|base|basefont|blockquote|body|caption'\n + '|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption'\n + '|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe'\n + '|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option'\n + '|p|param|search|section|summary|table|tbody|td|tfoot|th|thead|title'\n + '|tr|track|ul';\nconst _comment = /<!--(?:-?>|[\\s\\S]*?(?:-->|$))/;\nconst html = edit('^ {0,3}(?:' // optional indentation\n + '<(script|pre|style|textarea)[\\\\s>][\\\\s\\\\S]*?(?:</\\\\1>[^\\\\n]*\\\\n+|$)' // (1)\n + '|comment[^\\\\n]*(\\\\n+|$)' // (2)\n + '|<\\\\?[\\\\s\\\\S]*?(?:\\\\?>\\\\n*|$)' // (3)\n + '|<![A-Z][\\\\s\\\\S]*?(?:>\\\\n*|$)' // (4)\n + '|<!\\\\[CDATA\\\\[[\\\\s\\\\S]*?(?:\\\\]\\\\]>\\\\n*|$)' // (5)\n + '|</?(tag)(?: +|\\\\n|/?>)[\\\\s\\\\S]*?(?:(?:\\\\n[ \\t]*)+\\\\n|$)' // (6)\n + '|<(?!script|pre|style|textarea)([a-z][\\\\w-]*)(?:attribute)*? */?>(?=[ \\\\t]*(?:\\\\n|$))[\\\\s\\\\S]*?(?:(?:\\\\n[ \\t]*)+\\\\n|$)' // (7) open tag\n + '|</(?!script|pre|style|textarea)[a-z][\\\\w-]*\\\\s*>(?=[ \\\\t]*(?:\\\\n|$))[\\\\s\\\\S]*?(?:(?:\\\\n[ \\t]*)+\\\\n|$)' // (7) closing tag\n + ')', 'i')\n .replace('comment', _comment)\n .replace('tag', _tag)\n .replace('attribute', / +[a-zA-Z:_][\\w.:-]*(?: *= *\"[^\"\\n]*\"| *= *'[^'\\n]*'| *= *[^\\s\"'=<>`]+)?/)\n .getRegex();\nconst paragraph = edit(_paragraph)\n .replace('hr', hr)\n .replace('heading', ' {0,3}#{1,6}(?:\\\\s|$)')\n .replace('|lheading', '') // setext headings don't interrupt commonmark paragraphs\n .replace('|table', '')\n .replace('blockquote', ' {0,3}>')\n .replace('fences', ' {0,3}(?:`{3,}(?=[^`\\\\n]*\\\\n)|~{3,})[^\\\\n]*\\\\n')\n .replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt\n .replace('html', '</?(?:tag)(?: +|\\\\n|/?>)|<(?:script|pre|style|textarea|!--)')\n .replace('tag', _tag) // pars can be interrupted by type (6) html blocks\n .getRegex();\nconst blockquote = edit(/^( {0,3}> ?(paragraph|[^\\n]*)(?:\\n|$))+/)\n .replace('paragraph', paragraph)\n .getRegex();\n/**\n * Normal Block Grammar\n */\nconst blockNormal = {\n blockquote,\n code: blockCode,\n def,\n fences,\n heading,\n hr,\n html,\n lheading,\n list,\n newline,\n paragraph,\n table: noopTest,\n text: blockText,\n};\n/**\n * GFM Block Grammar\n */\nconst gfmTable = edit('^ *([^\\\\n ].*)\\\\n' // Header\n + ' {0,3}((?:\\\\| *)?:?-+:? *(?:\\\\| *:?-+:? *)*(?:\\\\| *)?)' // Align\n + '(?:\\\\n((?:(?! *\\\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\\\n|$))*)\\\\n*|$)') // Cells\n .replace('hr', hr)\n .replace('heading', ' {0,3}#{1,6}(?:\\\\s|$)')\n .replace('blockquote', ' {0,3}>')\n .replace('code', '(?: {4}| {0,3}\\t)[^\\\\n]')\n .replace('fences', ' {0,3}(?:`{3,}(?=[^`\\\\n]*\\\\n)|~{3,})[^\\\\n]*\\\\n')\n .replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt\n .replace('html', '</?(?:tag)(?: +|\\\\n|/?>)|<(?:script|pre|style|textarea|!--)')\n .replace('tag', _tag) // tables can be interrupted by type (6) html blocks\n .getRegex();\nconst blockGfm = {\n ...blockNormal,\n lheading: lheadingGfm,\n table: gfmTable,\n paragraph: edit(_paragraph)\n .replace('hr', hr)\n .replace('heading', ' {0,3}#{1,6}(?:\\\\s|$)')\n .replace('|lheading', '') // setext headings don't interrupt commonmark paragraphs\n .replace('table', gfmTable) // interrupt paragraphs with table\n .replace('blockquote', ' {0,3}>')\n .replace('fences', ' {0,3}(?:`{3,}(?=[^`\\\\n]*\\\\n)|~{3,})[^\\\\n]*\\\\n')\n .replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt\n .replace('html', '</?(?:tag)(?: +|\\\\n|/?>)|<(?:script|pre|style|textarea|!--)')\n .replace('tag', _tag) // pars can be interrupted by type (6) html blocks\n .getRegex(),\n};\n/**\n * Pedantic grammar (original John Gruber's loose markdown specification)\n */\nconst blockPedantic = {\n ...blockNormal,\n html: edit('^ *(?:comment *(?:\\\\n|\\\\s*$)'\n + '|<(tag)[\\\\s\\\\S]+?</\\\\1> *(?:\\\\n{2,}|\\\\s*$)' // closed tag\n + '|<tag(?:\"[^\"]*\"|\\'[^\\']*\\'|\\\\s[^\\'\"/>\\\\s]*)*?/?> *(?:\\\\n{2,}|\\\\s*$))')\n .replace('comment', _comment)\n .replace(/tag/g, '(?!(?:'\n + 'a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub'\n + '|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)'\n + '\\\\b)\\\\w+(?!:|[^\\\\w\\\\s@]*@)\\\\b')\n .getRegex(),\n def: /^ *\\[([^\\]]+)\\]: *<?([^\\s>]+)>?(?: +([\"(][^\\n]+[\")]))? *(?:\\n+|$)/,\n heading: /^(#{1,6})(.*)(?:\\n+|$)/,\n fences: noopTest, // fences not supported\n lheading: /^(.+?)\\n {0,3}(=+|-+) *(?:\\n+|$)/,\n paragraph: edit(_paragraph)\n .replace('hr', hr)\n .replace('heading', ' *#{1,6} *[^\\n]')\n .replace('lheading', lheading)\n .replace('|table', '')\n .replace('blockquote', ' {0,3}>')\n .replace('|fences', '')\n .replace('|list', '')\n .replace('|html', '')\n .replace('|tag', '')\n .getRegex(),\n};\n/**\n * Inline-Level Grammar\n */\nconst escape = /^\\\\([!\"#$%&'()*+,\\-./:;<=>?@\\[\\]\\\\^_`{|}~])/;\nconst inlineCode = /^(`+)([^`]|[^`][\\s\\S]*?[^`])\\1(?!`)/;\nconst br = /^( {2,}|\\\\)\\n(?!\\s*$)/;\nconst inlineText = /^(`+|[^`])(?:(?= {2,}\\n)|[\\s\\S]*?(?:(?=[\\\\<!\\[`*_]|\\b_|$)|[^ ](?= {2,}\\n)))/;\n// list of unicode punctuation marks, plus any missing characters from CommonMark spec\nconst _punctuation = /[\\p{P}\\p{S}]/u;\nconst _punctuationOrSpace = /[\\s\\p{P}\\p{S}]/u;\nconst _notPunctuationOrSpace = /[^\\s\\p{P}\\p{S}]/u;\nconst punctuation = edit(/^((?![*_])punctSpace)/, 'u')\n .replace(/punctSpace/g, _punctuationOrSpace).getRegex();\n// GFM allows ~ inside strong and em for strikethrough\nconst _punctuationGfmStrongEm = /(?!~)[\\p{P}\\p{S}]/u;\nconst _punctuationOrSpaceGfmStrongEm = /(?!~)[\\s\\p{P}\\p{S}]/u;\nconst _notPunctuationOrSpaceGfmStrongEm = /(?:[^\\s\\p{P}\\p{S}]|~)/u;\n// sequences em should skip over [title](link), `code`, <html>\nconst blockSkip = /\\[[^[\\]]*?\\]\\((?:\\\\.|[^\\\\\\(\\)]|\\((?:\\\\.|[^\\\\\\(\\)])*\\))*\\)|`[^`]*?`|<[^<>]*?>/g;\nconst emStrongLDelimCore = /^(?:\\*+(?:((?!\\*)punct)|[^\\s*]))|^_+(?:((?!_)punct)|([^\\s_]))/;\nconst emStrongLDelim = edit(emStrongLDelimCore, 'u')\n .replace(/punct/g, _punctuation)\n .getRegex();\nconst emStrongLDelimGfm = edit(emStrongLDelimCore, 'u')\n .replace(/punct/g, _punctuationGfmStrongEm)\n .getRegex();\nconst emStrongRDelimAstCore = '^[^_*]*?__[^_*]*?\\\\*[^_*]*?(?=__)' // Skip orphan inside strong\n + '|[^*]+(?=[^*])' // Consume to delim\n + '|(?!\\\\*)punct(\\\\*+)(?=[\\\\s]|$)' // (1) #*** can only be a Right Delimiter\n + '|notPunctSpace(\\\\*+)(?!\\\\*)(?=punctSpace|$)' // (2) a***#, a*** can only be a Right Delimiter\n + '|(?!\\\\*)punctSpace(\\\\*+)(?=notPunctSpace)' // (3) #***a, ***a can only be Left Delimiter\n + '|[\\\\s](\\\\*+)(?!\\\\*)(?=punct)' // (4) ***# can only be Left Delimiter\n + '|(?!\\\\*)punct(\\\\*+)(?!\\\\*)(?=punct)' // (5) #***# can be either Left or Right Delimiter\n + '|notPunctSpace(\\\\*+)(?=notPunctSpace)'; // (6) a***a can be either Left or Right Delimiter\nconst emStrongRDelimAst = edit(emStrongRDelimAstCore, 'gu')\n .replace(/notPunctSpace/g, _notPunctuationOrSpace)\n .replace(/punctSpace/g, _punctuationOrSpace)\n .replace(/punct/g, _punctuation)\n .getRegex();\nconst emStrongRDelimAstGfm = edit(emStrongRDelimAstCore, 'gu')\n .replace(/notPunctSpace/g, _notPunctuationOrSpaceGfmStrongEm)\n .replace(/punctSpace/g, _punctuationOrSpaceGfmStrongEm)\n .replace(/punct/g, _punctuationGfmStrongEm)\n .getRegex();\n// (6) Not allowed for _\nconst emStrongRDelimUnd = edit('^[^_*]*?\\\\*\\\\*[^_*]*?_[^_*]*?(?=\\\\*\\\\*)' // Skip orphan inside strong\n + '|[^_]+(?=[^_])' // Consume to delim\n + '|(?!_)punct(_+)(?=[\\\\s]|$)' // (1) #___ can only be a Right Delimiter\n + '|notPunctSpace(_+)(?!_)(?=punctSpace|$)' // (2) a___#, a___ can only be a Right Delimiter\n + '|(?!_)punctSpace(_+)(?=notPunctSpace)' // (3) #___a, ___a can only be Left Delimiter\n + '|[\\\\s](_+)(?!_)(?=punct)' // (4) ___# can only be Left Delimiter\n + '|(?!_)punct(_+)(?!_)(?=punct)', 'gu') // (5) #___# can be either Left or Right Delimiter\n .replace(/notPunctSpace/g, _notPunctuationOrSpace)\n .replace(/punctSpace/g, _punctuationOrSpace)\n .replace(/punct/g, _punctuation)\n .getRegex();\nconst anyPunctuation = edit(/\\\\(punct)/, 'gu')\n .replace(/punct/g, _punctuation)\n .getRegex();\nconst autolink = edit(/^<(scheme:[^\\s\\x00-\\x1f<>]*|email)>/)\n .replace('scheme', /[a-zA-Z][a-zA-Z0-9+.-]{1,31}/)\n .replace('email', /[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/)\n .getRegex();\nconst _inlineComment = edit(_comment).replace('(?:-->|$)', '-->').getRegex();\nconst tag = edit('^comment'\n + '|^</[a-zA-Z][\\\\w:-]*\\\\s*>' // self-closing tag\n + '|^<[a-zA-Z][\\\\w-]*(?:attribute)*?\\\\s*/?>' // open tag\n + '|^<\\\\?[\\\\s\\\\S]*?\\\\?>' // processing instruction, e.g. <?php ?>\n + '|^<![a-zA-Z]+\\\\s[\\\\s\\\\S]*?>' // declaration, e.g. <!DOCTYPE html>\n + '|^<!\\\\[CDATA\\\\[[\\\\s\\\\S]*?\\\\]\\\\]>') // CDATA section\n .replace('comment', _inlineComment)\n .replace('attribute', /\\s+[a-zA-Z:_][\\w.:-]*(?:\\s*=\\s*\"[^\"]*\"|\\s*=\\s*'[^']*'|\\s*=\\s*[^\\s\"'=<>`]+)?/)\n .getRegex();\nconst _inlineLabel = /(?:\\[(?:\\\\.|[^\\[\\]\\\\])*\\]|\\\\.|`[^`]*`|[^\\[\\]\\\\`])*?/;\nconst link = edit(/^!?\\[(label)\\]\\(\\s*(href)(?:(?:[ \\t]*(?:\\n[ \\t]*)?)(title))?\\s*\\)/)\n .replace('label', _inlineLabel)\n .replace('href', /<(?:\\\\.|[^\\n<>\\\\])+>|[^ \\t\\n\\x00-\\x1f]*/)\n .replace('title', /\"(?:\\\\\"?|[^\"\\\\])*\"|'(?:\\\\'?|[^'\\\\])*'|\\((?:\\\\\\)?|[^)\\\\])*\\)/)\n .getRegex();\nconst reflink = edit(/^!?\\[(label)\\]\\[(ref)\\]/)\n .replace('label', _inlineLabel)\n .replace('ref', _blockLabel)\n .getRegex();\nconst nolink = edit(/^!?\\[(ref)\\](?:\\[\\])?/)\n .replace('ref', _blockLabel)\n .getRegex();\nconst reflinkSearch = edit('reflink|nolink(?!\\\\()', 'g')\n .replace('reflink', reflink)\n .replace('nolink', nolink)\n .getRegex();\n/**\n * Normal Inline Grammar\n */\nconst inlineNormal = {\n _backpedal: noopTest, // only used for GFM url\n anyPunctuation,\n autolink,\n blockSkip,\n br,\n code: inlineCode,\n del: noopTest,\n emStrongLDelim,\n emStrongRDelimAst,\n emStrongRDelimUnd,\n escape,\n link,\n nolink,\n punctuation,\n reflink,\n reflinkSearch,\n tag,\n text: inlineText,\n url: noopTest,\n};\n/**\n * Pedantic Inline Grammar\n */\nconst inlinePedantic = {\n ...inlineNormal,\n link: edit(/^!?\\[(label)\\]\\((.*?)\\)/)\n .replace('label', _inlineLabel)\n .getRegex(),\n reflink: edit(/^!?\\[(label)\\]\\s*\\[([^\\]]*)\\]/)\n .replace('label', _inlineLabel)\n .getRegex(),\n};\n/**\n * GFM Inline Grammar\n */\nconst inlineGfm = {\n ...inlineNormal,\n emStrongRDelimAst: emStrongRDelimAstGfm,\n emStrongLDelim: emStrongLDelimGfm,\n url: edit(/^((?:ftp|https?):\\/\\/|www\\.)(?:[a-zA-Z0-9\\-]+\\.?)+[^\\s<]*|^email/, 'i')\n .replace('email', /[A-Za-z0-9._+-]+(@)[a-zA-Z0-9-_]+(?:\\.[a-zA-Z0-9-_]*[a-zA-Z0-9])+(?![-_])/)\n .getRegex(),\n _backpedal: /(?:[^?!.,:;*_'\"~()&]+|\\([^)]*\\)|&(?![a-zA-Z0-9]+;$)|[?!.,:;*_'\"~)]+(?!$))+/,\n del: /^(~~?)(?=[^\\s~])((?:\\\\.|[^\\\\])*?(?:\\\\.|[^\\s~\\\\]))\\1(?=[^~]|$)/,\n text: /^([`~]+|[^`~])(?:(?= {2,}\\n)|(?=[a-zA-Z0-9.!#$%&'*+\\/=?_`{\\|}~-]+@)|[\\s\\S]*?(?:(?=[\\\\<!\\[`*~_]|\\b_|https?:\\/\\/|ftp:\\/\\/|www\\.|$)|[^ ](?= {2,}\\n)|[^a-zA-Z0-9.!#$%&'*+\\/=?_`{\\|}~-](?=[a-zA-Z0-9.!#$%&'*+\\/=?_`{\\|}~-]+@)))/,\n};\n/**\n * GFM + Line Breaks Inline Grammar\n */\nconst inlineBreaks = {\n ...inlineGfm,\n br: edit(br).replace('{2,}', '*').getRegex(),\n text: edit(inlineGfm.text)\n .replace('\\\\b_', '\\\\b_| {2,}\\\\n')\n .replace(/\\{2,\\}/g, '*')\n .getRegex(),\n};\n/**\n * exports\n */\nexport const block = {\n normal: blockNormal,\n gfm: blockGfm,\n pedantic: blockPedantic,\n};\nexport const inline = {\n normal: inlineNormal,\n gfm: inlineGfm,\n breaks: inlineBreaks,\n pedantic: inlinePedantic,\n};\n","import { other } from './rules.ts';\n/**\n * Helpers\n */\nconst escapeReplacements = {\n '&': '&',\n '<': '<',\n '>': '>',\n '\"': '"',\n \"'\": ''',\n};\nconst getEscapeReplacement = (ch) => escapeReplacements[ch];\nexport function escape(html, encode) {\n if (encode) {\n if (other.escapeTest.test(html)) {\n return html.replace(other.escapeReplace, getEscapeReplacement);\n }\n }\n else {\n if (other.escapeTestNoEncode.test(html)) {\n return html.replace(other.escapeReplaceNoEncode, getEscapeReplacement);\n }\n }\n return html;\n}\nexport function unescape(html) {\n // explicitly match decimal, hex, and named HTML entities\n return html.replace(other.unescapeTest, (_, n) => {\n n = n.toLowerCase();\n if (n === 'colon')\n return ':';\n if (n.charAt(0) === '#') {\n return n.charAt(1) === 'x'\n ? String.fromCharCode(parseInt(n.substring(2), 16))\n : String.fromCharCode(+n.substring(1));\n }\n return '';\n });\n}\nexport function cleanUrl(href) {\n try {\n href = encodeURI(href).replace(other.percentDecode, '%');\n }\n catch {\n return null;\n }\n return href;\n}\nexport function splitCells(tableRow, count) {\n // ensure that every cell-delimiting pipe has a space\n // before it to distinguish it from an escaped pipe\n const row = tableRow.replace(other.findPipe, (match, offset, str) => {\n let escaped = false;\n let curr = offset;\n while (--curr >= 0 && str[curr] === '\\\\')\n escaped = !escaped;\n if (escaped) {\n // odd number of slashes means | is escaped\n // so we leave it alone\n return '|';\n }\n else {\n // add space before unescaped |\n return ' |';\n }\n }), cells = row.split(other.splitPipe);\n let i = 0;\n // First/last cell in a row cannot be empty if it has no leading/trailing pipe\n if (!cells[0].trim()) {\n cells.shift();\n }\n if (cells.length > 0 && !cells.at(-1)?.trim()) {\n cells.pop();\n }\n if (count) {\n if (cells.length > count) {\n cells.splice(count);\n }\n else {\n while (cells.length < count)\n cells.push('');\n }\n }\n for (; i < cells.length; i++) {\n // leading or trailing whitespace is ignored per the gfm spec\n cells[i] = cells[i].trim().replace(other.slashPipe, '|');\n }\n return cells;\n}\n/**\n * Remove trailing 'c's. Equivalent to str.replace(/c*$/, '').\n * /c*$/ is vulnerable to REDOS.\n *\n * @param str\n * @param c\n * @param invert Remove suffix of non-c chars instead. Default falsey.\n */\nexport function rtrim(str, c, invert) {\n const l = str.length;\n if (l === 0) {\n return '';\n }\n // Length of suffix matching the invert condition.\n let suffLen = 0;\n // Step left until we fail to match the invert condition.\n while (suffLen < l) {\n const currChar = str.charAt(l - suffLen - 1);\n if (currChar === c && !invert) {\n suffLen++;\n }\n else if (currChar !== c && invert) {\n suffLen++;\n }\n else {\n break;\n }\n }\n return str.slice(0, l - suffLen);\n}\nexport function findClosingBracket(str, b) {\n if (str.indexOf(b[1]) === -1) {\n return -1;\n }\n let level = 0;\n for (let i = 0; i < str.length; i++) {\n if (str[i] === '\\\\') {\n i++;\n }\n else if (str[i] === b[0]) {\n level++;\n }\n else if (str[i] === b[1]) {\n level--;\n if (level < 0) {\n return i;\n }\n }\n }\n if (level > 0) {\n return -2;\n }\n return -1;\n}\n","import { _defaults } from './defaults.ts';\nimport { rtrim, splitCells, findClosingBracket, } from './helpers.ts';\nfunction outputLink(cap, link, raw, lexer, rules) {\n const href = link.href;\n const title = link.title || null;\n const text = cap[1].replace(rules.other.outputLinkReplace, '$1');\n lexer.state.inLink = true;\n const token = {\n type: cap[0].charAt(0) === '!' ? 'image' : 'link',\n raw,\n href,\n title,\n text,\n tokens: lexer.inlineTokens(text),\n };\n lexer.state.inLink = false;\n return token;\n}\nfunction indentCodeCompensation(raw, text, rules) {\n const matchIndentToCode = raw.match(rules.other.indentCodeCompensation);\n if (matchIndentToCode === null) {\n return text;\n }\n const indentToCode = matchIndentToCode[1];\n return text\n .split('\\n')\n .map(node => {\n const matchIndentInNode = node.match(rules.other.beginningSpace);\n if (matchIndentInNode === null) {\n return node;\n }\n const [indentInNode] = matchIndentInNode;\n if (indentInNode.length >= indentToCode.length) {\n return node.slice(indentToCode.length);\n }\n return node;\n })\n .join('\\n');\n}\n/**\n * Tokenizer\n */\nexport class _Tokenizer {\n options;\n rules; // set by the lexer\n lexer; // set by the lexer\n constructor(options) {\n this.options = options || _defaults;\n }\n space(src) {\n const cap = this.rules.block.newline.exec(src);\n if (cap && cap[0].length > 0) {\n return {\n type: 'space',\n raw: cap[0],\n };\n }\n }\n code(src) {\n const cap = this.rules.block.code.exec(src);\n if (cap) {\n const text = cap[0].replace(this.rules.other.codeRemoveIndent, '');\n return {\n type: 'code',\n raw: cap[0],\n codeBlockStyle: 'indented',\n text: !this.options.pedantic\n ? rtrim(text, '\\n')\n : text,\n };\n }\n }\n fences(src) {\n const cap = this.rules.block.fences.exec(src);\n if (cap) {\n const raw = cap[0];\n const text = indentCodeCompensation(raw, cap[3] || '', this.rules);\n return {\n type: 'code',\n raw,\n lang: cap[2] ? cap[2].trim().replace(this.rules.inline.anyPunctuation, '$1') : cap[2],\n text,\n };\n }\n }\n heading(src) {\n const cap = this.rules.block.heading.exec(src);\n if (cap) {\n let text = cap[2].trim();\n // remove trailing #s\n if (this.rules.other.endingHash.test(text)) {\n const trimmed = rtrim(text, '#');\n if (this.options.pedantic) {\n text = trimmed.trim();\n }\n else if (!trimmed || this.rules.other.endingSpaceChar.test(trimmed)) {\n // CommonMark requires space before trailing #s\n text = trimmed.trim();\n }\n }\n return {\n type: 'heading',\n raw: cap[0],\n depth: cap[1].length,\n text,\n tokens: this.lexer.inline(text),\n };\n }\n }\n hr(src) {\n const cap = this.rules.block.hr.exec(src);\n if (cap) {\n return {\n type: 'hr',\n raw: rtrim(cap[0], '\\n'),\n };\n }\n }\n blockquote(src) {\n const cap = this.rules.block.blockquote.exec(src);\n if (cap) {\n let lines = rtrim(cap[0], '\\n').split('\\n');\n let raw = '';\n let text = '';\n const tokens = [];\n while (lines.length > 0) {\n let inBlockquote = false;\n const currentLines = [];\n let i;\n for (i = 0; i < lines.length; i++) {\n // get lines up to a continuation\n if (this.rules.other.blockquoteStart.test(lines[i])) {\n currentLines.push(lines[i]);\n inBlockquote = true;\n }\n else if (!inBlockquote) {\n currentLines.push(lines[i]);\n }\n else {\n break;\n }\n }\n lines = lines.slice(i);\n const currentRaw = currentLines.join('\\n');\n const currentText = currentRaw\n // precede setext continuation with 4 spaces so it isn't a setext\n .replace(this.rules.other.blockquoteSetextReplace, '\\n $1')\n .replace(this.rules.other.blockquoteSetextReplace2, '');\n raw = raw ? `${raw}\\n${currentRaw}` : currentRaw;\n text = text ? `${text}\\n${currentText}` : currentText;\n // parse blockquote lines as top level tokens\n // merge paragraphs if this is a continuation\n const top = this.lexer.state.top;\n this.lexer.state.top = true;\n this.lexer.blockTokens(currentText, tokens, true);\n this.lexer.state.top = top;\n // if there is no continuation then we are done\n if (lines.length === 0) {\n break;\n }\n const lastToken = tokens.at(-1);\n if (lastToken?.type === 'code') {\n // blockquote continuation cannot be preceded by a code block\n break;\n }\n else if (lastToken?.type === 'blockquote') {\n // include continuation in nested blockquote\n const oldToken = lastToken;\n const newText = oldToken.raw + '\\n' + lines.join('\\n');\n const newToken = this.blockquote(newText);\n tokens[tokens.length - 1] = newToken;\n raw = raw.substring(0, raw.length - oldToken.raw.length) + newToken.raw;\n text = text.substring(0, text.length - oldToken.text.length) + newToken.text;\n break;\n }\n else if (lastToken?.type === 'list') {\n // include continuation in nested list\n const oldToken = lastToken;\n const newText = oldToken.raw + '\\n' + lines.join('\\n');\n const newToken = this.list(newText);\n tokens[tokens.length - 1] = newToken;\n raw = raw.substring(0, raw.length - lastToken.raw.length) + newToken.raw;\n text = text.substring(0, text.length - oldToken.raw.length) + newToken.raw;\n lines = newText.substring(tokens.at(-1).raw.length).split('\\n');\n continue;\n }\n }\n return {\n type: 'blockquote',\n raw,\n tokens,\n text,\n };\n }\n }\n list(src) {\n let cap = this.rules.block.list.exec(src);\n if (cap) {\n let bull = cap[1].trim();\n const isordered = bull.length > 1;\n const list = {\n type: 'list',\n raw: '',\n ordered: isordered,\n start: isordered ? +bull.slice(0, -1) : '',\n loose: false,\n items: [],\n };\n bull = isordered ? `\\\\d{1,9}\\\\${bull.slice(-1)}` : `\\\\${bull}`;\n if (this.options.pedantic) {\n bull = isordered ? bull : '[*+-]';\n }\n // Get next list item\n const itemRegex = this.rules.other.listItemRegex(bull);\n let endsWithBlankLine = false;\n // Check if current bullet point can start a new List Item\n while (src) {\n let endEarly = false;\n let raw = '';\n let itemContents = '';\n if (!(cap = itemRegex.exec(src))) {\n break;\n }\n if (this.rules.block.hr.test(src)) { // End list if bullet was actually HR (possibly move into itemRegex?)\n break;\n }\n raw = cap[0];\n src = src.substring(raw.length);\n let line = cap[2].split('\\n', 1)[0].replace(this.rules.other.listReplaceTabs, (t) => ' '.repeat(3 * t.length));\n let nextLine = src.split('\\n', 1)[0];\n let blankLine = !line.trim();\n let indent = 0;\n if (this.options.pedantic) {\n indent = 2;\n itemContents = line.trimStart();\n }\n else if (blankLine) {\n indent = cap[1].length + 1;\n }\n else {\n indent = cap[2].search(this.rules.other.nonSpaceChar); // Find first non-space char\n indent = indent > 4 ? 1 : indent; // Treat indented code blocks (> 4 spaces) as having only 1 indent\n itemContents = line.slice(indent);\n indent += cap[1].length;\n }\n if (blankLine && this.rules.other.blankLine.test(nextLine)) { // Items begin with at most one blank line\n raw += nextLine + '\\n';\n src = src.substring(nextLine.length + 1);\n endEarly = true;\n }\n if (!endEarly) {\n const nextBulletRegex = this.rules.other.nextBulletRegex(indent);\n const hrRegex = this.rules.other.hrRegex(indent);\n const fencesBeginRegex = this.rules.other.fencesBeginRegex(indent);\n const headingBeginRegex = this.rules.other.headingBeginRegex(indent);\n const htmlBeginRegex = this.rules.other.htmlBeginRegex(indent);\n // Check if following lines should be included in List Item\n while (src) {\n const rawLine = src.split('\\n', 1)[0];\n let nextLineWithoutTabs;\n nextLine = rawLine;\n // Re-align to follow commonmark nesting rules\n if (this.options.pedantic) {\n nextLine = nextLine.replace(this.rules.other.listReplaceNesting, ' ');\n nextLineWithoutTabs = nextLine;\n }\n else {\n nextLineWithoutTabs = nextLine.replace(this.rules.other.tabCharGlobal, ' ');\n }\n // End list item if found code fences\n if (fencesBeginRegex.test(nextLine)) {\n break;\n }\n // End list item if found start of new heading\n if (headingBeginRegex.test(nextLine)) {\n break;\n }\n // End list item if found start of html block\n if (htmlBeginRegex.test(nextLine)) {\n break;\n }\n // End list item if found start of new bullet\n if (nextBulletRegex.test(nextLine)) {\n break;\n }\n // Horizontal rule found\n if (hrRegex.test(nextLine)) {\n break;\n }\n if (nextLineWithoutTabs.search(this.rules.other.nonSpaceChar) >= indent || !nextLine.trim()) { // Dedent if possible\n itemContents += '\\n' + nextLineWithoutTabs.slice(indent);\n }\n else {\n // not enough indentation\n if (blankLine) {\n break;\n }\n // paragraph continuation unless last line was a different block level element\n if (line.replace(this.rules.other.tabCharGlobal, ' ').search(this.rules.other.nonSpaceChar) >= 4) { // indented code block\n break;\n }\n if (fencesBeginRegex.test(line)) {\n break;\n }\n if (headingBeginRegex.test(line)) {\n break;\n }\n if (hrRegex.test(line)) {\n break;\n }\n itemContents += '\\n' + nextLine;\n }\n if (!blankLine && !nextLine.trim()) { // Check if current line is blank\n blankLine = true;\n }\n raw += rawLine + '\\n';\n src = src.substring(rawLine.length + 1);\n line = nextLineWithoutTabs.slice(indent);\n }\n }\n if (!list.loose) {\n // If the previous item ended with a blank line, the list is loose\n if (endsWithBlankLine) {\n list.loose = true;\n }\n else if (this.rules.other.doubleBlankLine.test(raw)) {\n endsWithBlankLine = true;\n }\n }\n let istask = null;\n let ischecked;\n // Check for task list items\n if (this.options.gfm) {\n istask = this.rules.other.listIsTask.exec(itemContents);\n if (istask) {\n ischecked = istask[0] !== '[ ] ';\n itemContents = itemContents.replace(this.rules.other.listReplaceTask, '');\n }\n }\n list.items.push({\n type: 'list_item',\n raw,\n task: !!istask,\n checked: ischecked,\n loose: false,\n text: itemContents,\n tokens: [],\n });\n list.raw += raw;\n }\n // Do not consume newlines at end of final item. Alternatively, make itemRegex *start* with any newlines to simplify/speed up endsWithBlankLine logic\n const lastItem = list.items.at(-1);\n if (lastItem) {\n lastItem.raw = lastItem.raw.trimEnd();\n lastItem.text = lastItem.text.trimEnd();\n }\n else {\n // not a list since there were no items\n return;\n }\n list.raw = list.raw.trimEnd();\n // Item child tokens handled here at end because we needed to have the final item to trim it first\n for (let i = 0; i < list.items.length; i++) {\n this.lexer.state.top = false;\n list.items[i].tokens = this.lexer.blockTokens(list.items[i].text, []);\n if (!list.loose) {\n // Check if list should be loose\n const spacers = list.items[i].tokens.filter(t => t.type === 'space');\n const hasMultipleLineBreaks = spacers.length > 0 && spacers.some(t => this.rules.other.anyLine.test(t.raw));\n list.loose = hasMultipleLineBreaks;\n }\n }\n // Set all items to loose if list is loose\n if (list.loose) {\n for (let i = 0; i < list.items.length; i++) {\n list.items[i].loose = true;\n }\n }\n return list;\n }\n }\n html(src) {\n const cap = this.rules.block.html.exec(src);\n if (cap) {\n const token = {\n type: 'html',\n block: true,\n raw: cap[0],\n pre: cap[1] === 'pre' || cap[1] === 'script' || cap[1] === 'style',\n text: cap[0],\n };\n return token;\n }\n }\n def(src) {\n const cap = this.rules.block.def.exec(src);\n if (cap) {\n const tag = cap[1].toLowerCase().replace(this.rules.other.multipleSpaceGlobal, ' ');\n const href = cap[2] ? cap[2].replace(this.rules.other.hrefBrackets, '$1').replace(this.rules.inline.anyPunctuation, '$1') : '';\n const title = cap[3] ? cap[3].substring(1, cap[3].length - 1).replace(this.rules.inline.anyPunctuation, '$1') : cap[3];\n return {\n type: 'def',\n tag,\n raw: cap[0],\n href,\n title,\n };\n }\n }\n table(src) {\n const cap = this.rules.block.table.exec(src);\n if (!cap) {\n return;\n }\n if (!this.rules.other.tableDelimiter.test(cap[2])) {\n // delimiter row must have a pipe (|) or colon (:) otherwise it is a setext heading\n return;\n }\n const headers = splitCells(cap[1]);\n const aligns = cap[2].replace(this.rules.other.tableAlignChars, '').split('|');\n const rows = cap[3]?.trim() ? cap[3].replace(this.rules.other.tableRowBlankLine, '').split('\\n') : [];\n const item = {\n type: 'table',\n raw: cap[0],\n header: [],\n align: [],\n rows: [],\n };\n if (headers.length !== aligns.length) {\n // header and align columns must be equal, rows can be different.\n return;\n }\n for (const align of aligns) {\n if (this.rules.other.tableAlignRight.test(align)) {\n item.align.push('right');\n }\n else if (this.rules.other.tableAlignCenter.test(align)) {\n item.align.push('center');\n }\n else if (this.rules.other.tableAlignLeft.test(align)) {\n item.align.push('left');\n }\n else {\n item.align.push(null);\n }\n }\n for (let i = 0; i < headers.length; i++) {\n item.header.push({\n text: headers[i],\n tokens: this.lexer.inline(headers[i]),\n header: true,\n align: item.align[i],\n });\n }\n for (const row of rows) {\n item.rows.push(splitCells(row, item.header.length).map((cell, i) => {\n return {\n text: cell,\n tokens: this.lexer.inline(cell),\n header: false,\n align: item.align[i],\n };\n }));\n }\n return item;\n }\n lheading(src) {\n const cap = this.rules.block.lheading.exec(src);\n if (cap) {\n return {\n type: 'heading',\n raw: cap[0],\n depth: cap[2].charAt(0) === '=' ? 1 : 2,\n text: cap[1],\n tokens: this.lexer.inline(cap[1]),\n };\n }\n }\n paragraph(src) {\n const cap = this.rules.block.paragraph.exec(src);\n if (cap) {\n const text = cap[1].charAt(cap[1].length - 1) === '\\n'\n ? cap[1].slice(0, -1)\n : cap[1];\n return {\n type: 'paragraph',\n raw: cap[0],\n text,\n tokens: this.lexer.inline(text),\n };\n }\n }\n text(src) {\n const cap = this.rules.block.text.exec(src);\n if (cap) {\n return {\n type: 'text',\n raw: cap[0],\n text: cap[0],\n tokens: this.lexer.inline(cap[0]),\n };\n }\n }\n escape(src) {\n const cap = this.rules.inline.escape.exec(src);\n if (cap) {\n return {\n type: 'escape',\n raw: cap[0],\n text: cap[1],\n };\n }\n }\n tag(src) {\n const cap = this.rules.inline.tag.exec(src);\n if (cap) {\n if (!this.lexer.state.inLink && this.rules.other.startATag.test(cap[0])) {\n this.lexer.state.inLink = true;\n }\n else if (this.lexer.state.inLink && this.rules.other.endATag.test(cap[0])) {\n this.lexer.state.inLink = false;\n }\n if (!this.lexer.state.inRawBlock && this.rules.other.startPreScriptTag.test(cap[0])) {\n this.lexer.state.inRawBlock = true;\n }\n else if (this.lexer.state.inRawBlock && this.rules.other.endPreScriptTag.test(cap[0])) {\n this.lexer.state.inRawBlock = false;\n }\n return {\n type: 'html',\n raw: cap[0],\n inLink: this.lexer.state.inLink,\n inRawBlock: this.lexer.state.inRawBlock,\n block: false,\n text: cap[0],\n };\n }\n }\n link(src) {\n const cap = this.rules.inline.link.exec(src);\n if (cap) {\n const trimmedUrl = cap[2].trim();\n if (!this.options.pedantic && this.rules.other.startAngleBracket.test(trimmedUrl)) {\n // commonmark requires matching angle brackets\n if (!(this.rules.other.endAngleBracket.test(trimmedUrl))) {\n return;\n }\n // ending angle bracket cannot be escaped\n const rtrimSlash = rtrim(trimmedUrl.slice(0, -1), '\\\\');\n if ((trimmedUrl.length - rtrimSlash.length) % 2 === 0) {\n return;\n }\n }\n else {\n // find closing parenthesis\n const lastParenIndex = findClosingBracket(cap[2], '()');\n if (lastParenIndex === -2) {\n // more open parens than closed\n return;\n }\n if (lastParenIndex > -1) {\n const start = cap[0].indexOf('!') === 0 ? 5 : 4;\n const linkLen = start + cap[1].length + lastParenIndex;\n cap[2] = cap[2].substring(0, lastParenIndex);\n cap[0] = cap[0].substring(0, linkLen).trim();\n cap[3] = '';\n }\n }\n let href = cap[2];\n let title = '';\n if (this.options.pedantic) {\n // split pedantic href and title\n const link = this.rules.other.pedanticHrefTitle.exec(href);\n if (link) {\n href = link[1];\n title = link[3];\n }\n }\n else {\n title = cap[3] ? cap[3].slice(1, -1) : '';\n }\n href = href.trim();\n if (this.rules.other.startAngleBracket.test(href)) {\n if (this.options.pedantic && !(this.rules.other.endAngleBracket.test(trimmedUrl))) {\n // pedantic allows starting angle bracket without ending angle bracket\n href = href.slice(1);\n }\n else {\n href = href.slice(1, -1);\n }\n }\n return outputLink(cap, {\n href: href ? href.replace(this.rules.inline.anyPunctuation, '$1') : href,\n title: title ? title.replace(this.rules.inline.anyPunctuation, '$1') : title,\n }, cap[0], this.lexer, this.rules);\n }\n }\n reflink(src, links) {\n let cap;\n if ((cap = this.rules.inline.reflink.exec(src))\n || (cap = this.rules.inline.nolink.exec(src))) {\n const linkString = (cap[2] || cap[1]).replace(this.rules.other.multipleSpaceGlobal, ' ');\n const link = links[linkString.toLowerCase()];\n if (!link) {\n const text = cap[0].charAt(0);\n return {\n type: 'text',\n raw: text,\n text,\n };\n }\n return outputLink(cap, link, cap[0], this.lexer, this.rules);\n }\n }\n emStrong(src, maskedSrc, prevChar = '') {\n let match = this.rules.inline.emStrongLDelim.exec(src);\n if (!match)\n return;\n // _ can't be between two alphanumerics. \\p{L}\\p{N} includes non-english alphabet/numbers as well\n if (match[3] && prevChar.match(this.rules.other.unicodeAlphaNumeric))\n return;\n const nextChar = match[1] || match[2] || '';\n if (!nextChar || !prevChar || this.rules.inline.punctuation.exec(prevChar)) {\n // unicode Regex counts emoji as 1 char; spread into array for proper count (used multiple times below)\n const lLength = [...match[0]].length - 1;\n let rDelim, rLength, delimTotal = lLength, midDelimTotal = 0;\n const endReg = match[0][0] === '*' ? this.rules.inline.emStrongRDelimAst : this.rules.inline.emStrongRDelimUnd;\n endReg.lastIndex = 0;\n // Clip maskedSrc to same section of string as src (move to lexer?)\n maskedSrc = maskedSrc.slice(-1 * src.length + lLength);\n while ((match = endReg.exec(maskedSrc)) != null) {\n rDelim = match[1] || match[2] || match[3] || match[4] || match[5] || match[6];\n if (!rDelim)\n continue; // skip single * in __abc*abc__\n rLength = [...rDelim].length;\n if (match[3] || match[4]) { // found another Left Delim\n delimTotal += rLength;\n continue;\n }\n else if (match[5] || match[6]) { // either Left or Right Delim\n if (lLength % 3 && !((lLength + rLength) % 3)) {\n midDelimTotal += rLength;\n continue; // CommonMark Emphasis Rules 9-10\n