 e89f2f4b7b
			
		
	
	e89f2f4b7b
	
	
	
		
			
			Created 10 detailed GitHub issues covering: - Project activation and management UI (#1-2) - Worker node coordination and visualization (#3-4) - Automated GitHub repository scanning (#5) - Intelligent model-to-issue matching (#6) - Multi-model task execution system (#7) - N8N workflow integration (#8) - Hive-Bzzz P2P bridge (#9) - Peer assistance protocol (#10) Each issue includes detailed specifications, acceptance criteria, technical implementation notes, and dependency mapping. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
		
			
				
	
	
		
			5541 lines
		
	
	
		
			159 KiB
		
	
	
	
		
			JavaScript
		
	
	
	
	
	
			
		
		
	
	
			5541 lines
		
	
	
		
			159 KiB
		
	
	
	
		
			JavaScript
		
	
	
	
	
	
| 'use strict';
 | ||
| 
 | ||
| var mdurl = require('mdurl');
 | ||
| var ucmicro = require('uc.micro');
 | ||
| var entities = require('entities');
 | ||
| var LinkifyIt = require('linkify-it');
 | ||
| var punycode = require('punycode.js');
 | ||
| 
 | ||
| function _interopNamespaceDefault(e) {
 | ||
|   var n = Object.create(null);
 | ||
|   if (e) {
 | ||
|     Object.keys(e).forEach(function (k) {
 | ||
|       if (k !== 'default') {
 | ||
|         var d = Object.getOwnPropertyDescriptor(e, k);
 | ||
|         Object.defineProperty(n, k, d.get ? d : {
 | ||
|           enumerable: true,
 | ||
|           get: function () { return e[k]; }
 | ||
|         });
 | ||
|       }
 | ||
|     });
 | ||
|   }
 | ||
|   n.default = e;
 | ||
|   return Object.freeze(n);
 | ||
| }
 | ||
| 
 | ||
| var mdurl__namespace = /*#__PURE__*/_interopNamespaceDefault(mdurl);
 | ||
| var ucmicro__namespace = /*#__PURE__*/_interopNamespaceDefault(ucmicro);
 | ||
| 
 | ||
| // Utilities
 | ||
| //
 | ||
| 
 | ||
| function _class(obj) {
 | ||
|   return Object.prototype.toString.call(obj);
 | ||
| }
 | ||
| function isString(obj) {
 | ||
|   return _class(obj) === '[object String]';
 | ||
| }
 | ||
| const _hasOwnProperty = Object.prototype.hasOwnProperty;
 | ||
| function has(object, key) {
 | ||
|   return _hasOwnProperty.call(object, key);
 | ||
| }
 | ||
| 
 | ||
| // Merge objects
 | ||
| //
 | ||
| function assign(obj /* from1, from2, from3, ... */) {
 | ||
|   const sources = Array.prototype.slice.call(arguments, 1);
 | ||
|   sources.forEach(function (source) {
 | ||
|     if (!source) {
 | ||
|       return;
 | ||
|     }
 | ||
|     if (typeof source !== 'object') {
 | ||
|       throw new TypeError(source + 'must be object');
 | ||
|     }
 | ||
|     Object.keys(source).forEach(function (key) {
 | ||
|       obj[key] = source[key];
 | ||
|     });
 | ||
|   });
 | ||
|   return obj;
 | ||
| }
 | ||
| 
 | ||
| // Remove element from array and put another array at those position.
 | ||
| // Useful for some operations with tokens
 | ||
| function arrayReplaceAt(src, pos, newElements) {
 | ||
|   return [].concat(src.slice(0, pos), newElements, src.slice(pos + 1));
 | ||
| }
 | ||
| function isValidEntityCode(c) {
 | ||
|   /* eslint no-bitwise:0 */
 | ||
|   // broken sequence
 | ||
|   if (c >= 0xD800 && c <= 0xDFFF) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   // never used
 | ||
|   if (c >= 0xFDD0 && c <= 0xFDEF) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   if ((c & 0xFFFF) === 0xFFFF || (c & 0xFFFF) === 0xFFFE) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   // control codes
 | ||
|   if (c >= 0x00 && c <= 0x08) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   if (c === 0x0B) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   if (c >= 0x0E && c <= 0x1F) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   if (c >= 0x7F && c <= 0x9F) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   // out of range
 | ||
|   if (c > 0x10FFFF) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   return true;
 | ||
| }
 | ||
| function fromCodePoint(c) {
 | ||
|   /* eslint no-bitwise:0 */
 | ||
|   if (c > 0xffff) {
 | ||
|     c -= 0x10000;
 | ||
|     const surrogate1 = 0xd800 + (c >> 10);
 | ||
|     const surrogate2 = 0xdc00 + (c & 0x3ff);
 | ||
|     return String.fromCharCode(surrogate1, surrogate2);
 | ||
|   }
 | ||
|   return String.fromCharCode(c);
 | ||
| }
 | ||
| const UNESCAPE_MD_RE = /\\([!"#$%&'()*+,\-./:;<=>?@[\\\]^_`{|}~])/g;
 | ||
| const ENTITY_RE = /&([a-z#][a-z0-9]{1,31});/gi;
 | ||
| const UNESCAPE_ALL_RE = new RegExp(UNESCAPE_MD_RE.source + '|' + ENTITY_RE.source, 'gi');
 | ||
| const DIGITAL_ENTITY_TEST_RE = /^#((?:x[a-f0-9]{1,8}|[0-9]{1,8}))$/i;
 | ||
| function replaceEntityPattern(match, name) {
 | ||
|   if (name.charCodeAt(0) === 0x23 /* # */ && DIGITAL_ENTITY_TEST_RE.test(name)) {
 | ||
|     const code = name[1].toLowerCase() === 'x' ? parseInt(name.slice(2), 16) : parseInt(name.slice(1), 10);
 | ||
|     if (isValidEntityCode(code)) {
 | ||
|       return fromCodePoint(code);
 | ||
|     }
 | ||
|     return match;
 | ||
|   }
 | ||
|   const decoded = entities.decodeHTML(match);
 | ||
|   if (decoded !== match) {
 | ||
|     return decoded;
 | ||
|   }
 | ||
|   return match;
 | ||
| }
 | ||
| 
 | ||
| /* function replaceEntities(str) {
 | ||
|   if (str.indexOf('&') < 0) { return str; }
 | ||
| 
 | ||
|   return str.replace(ENTITY_RE, replaceEntityPattern);
 | ||
| } */
 | ||
| 
 | ||
| function unescapeMd(str) {
 | ||
|   if (str.indexOf('\\') < 0) {
 | ||
|     return str;
 | ||
|   }
 | ||
|   return str.replace(UNESCAPE_MD_RE, '$1');
 | ||
| }
 | ||
| function unescapeAll(str) {
 | ||
|   if (str.indexOf('\\') < 0 && str.indexOf('&') < 0) {
 | ||
|     return str;
 | ||
|   }
 | ||
|   return str.replace(UNESCAPE_ALL_RE, function (match, escaped, entity) {
 | ||
|     if (escaped) {
 | ||
|       return escaped;
 | ||
|     }
 | ||
|     return replaceEntityPattern(match, entity);
 | ||
|   });
 | ||
| }
 | ||
| const HTML_ESCAPE_TEST_RE = /[&<>"]/;
 | ||
| const HTML_ESCAPE_REPLACE_RE = /[&<>"]/g;
 | ||
| const HTML_REPLACEMENTS = {
 | ||
|   '&': '&',
 | ||
|   '<': '<',
 | ||
|   '>': '>',
 | ||
|   '"': '"'
 | ||
| };
 | ||
| function replaceUnsafeChar(ch) {
 | ||
|   return HTML_REPLACEMENTS[ch];
 | ||
| }
 | ||
| function escapeHtml(str) {
 | ||
|   if (HTML_ESCAPE_TEST_RE.test(str)) {
 | ||
|     return str.replace(HTML_ESCAPE_REPLACE_RE, replaceUnsafeChar);
 | ||
|   }
 | ||
|   return str;
 | ||
| }
 | ||
| const REGEXP_ESCAPE_RE = /[.?*+^$[\]\\(){}|-]/g;
 | ||
| function escapeRE(str) {
 | ||
|   return str.replace(REGEXP_ESCAPE_RE, '\\$&');
 | ||
| }
 | ||
| function isSpace(code) {
 | ||
|   switch (code) {
 | ||
|     case 0x09:
 | ||
|     case 0x20:
 | ||
|       return true;
 | ||
|   }
 | ||
|   return false;
 | ||
| }
 | ||
| 
 | ||
| // Zs (unicode class) || [\t\f\v\r\n]
 | ||
| function isWhiteSpace(code) {
 | ||
|   if (code >= 0x2000 && code <= 0x200A) {
 | ||
|     return true;
 | ||
|   }
 | ||
|   switch (code) {
 | ||
|     case 0x09: // \t
 | ||
|     case 0x0A: // \n
 | ||
|     case 0x0B: // \v
 | ||
|     case 0x0C: // \f
 | ||
|     case 0x0D: // \r
 | ||
|     case 0x20:
 | ||
|     case 0xA0:
 | ||
|     case 0x1680:
 | ||
|     case 0x202F:
 | ||
|     case 0x205F:
 | ||
|     case 0x3000:
 | ||
|       return true;
 | ||
|   }
 | ||
|   return false;
 | ||
| }
 | ||
| 
 | ||
| /* eslint-disable max-len */
 | ||
| 
 | ||
| // Currently without astral characters support.
 | ||
| function isPunctChar(ch) {
 | ||
|   return ucmicro__namespace.P.test(ch) || ucmicro__namespace.S.test(ch);
 | ||
| }
 | ||
| 
 | ||
| // Markdown ASCII punctuation characters.
 | ||
| //
 | ||
| // !, ", #, $, %, &, ', (, ), *, +, ,, -, ., /, :, ;, <, =, >, ?, @, [, \, ], ^, _, `, {, |, }, or ~
 | ||
| // http://spec.commonmark.org/0.15/#ascii-punctuation-character
 | ||
| //
 | ||
| // Don't confuse with unicode punctuation !!! It lacks some chars in ascii range.
 | ||
| //
 | ||
| function isMdAsciiPunct(ch) {
 | ||
|   switch (ch) {
 | ||
|     case 0x21 /* ! */:
 | ||
|     case 0x22 /* " */:
 | ||
|     case 0x23 /* # */:
 | ||
|     case 0x24 /* $ */:
 | ||
|     case 0x25 /* % */:
 | ||
|     case 0x26 /* & */:
 | ||
|     case 0x27 /* ' */:
 | ||
|     case 0x28 /* ( */:
 | ||
|     case 0x29 /* ) */:
 | ||
|     case 0x2A /* * */:
 | ||
|     case 0x2B /* + */:
 | ||
|     case 0x2C /* , */:
 | ||
|     case 0x2D /* - */:
 | ||
|     case 0x2E /* . */:
 | ||
|     case 0x2F /* / */:
 | ||
|     case 0x3A /* : */:
 | ||
|     case 0x3B /* ; */:
 | ||
|     case 0x3C /* < */:
 | ||
|     case 0x3D /* = */:
 | ||
|     case 0x3E /* > */:
 | ||
|     case 0x3F /* ? */:
 | ||
|     case 0x40 /* @ */:
 | ||
|     case 0x5B /* [ */:
 | ||
|     case 0x5C /* \ */:
 | ||
|     case 0x5D /* ] */:
 | ||
|     case 0x5E /* ^ */:
 | ||
|     case 0x5F /* _ */:
 | ||
|     case 0x60 /* ` */:
 | ||
|     case 0x7B /* { */:
 | ||
|     case 0x7C /* | */:
 | ||
|     case 0x7D /* } */:
 | ||
|     case 0x7E /* ~ */:
 | ||
|       return true;
 | ||
|     default:
 | ||
|       return false;
 | ||
|   }
 | ||
| }
 | ||
| 
 | ||
| // Hepler to unify [reference labels].
 | ||
| //
 | ||
| function normalizeReference(str) {
 | ||
|   // Trim and collapse whitespace
 | ||
|   //
 | ||
|   str = str.trim().replace(/\s+/g, ' ');
 | ||
| 
 | ||
|   // In node v10 'ẞ'.toLowerCase() === 'Ṿ', which is presumed to be a bug
 | ||
|   // fixed in v12 (couldn't find any details).
 | ||
|   //
 | ||
|   // So treat this one as a special case
 | ||
|   // (remove this when node v10 is no longer supported).
 | ||
|   //
 | ||
|   if ('ẞ'.toLowerCase() === 'Ṿ') {
 | ||
|     str = str.replace(/ẞ/g, 'ß');
 | ||
|   }
 | ||
| 
 | ||
|   // .toLowerCase().toUpperCase() should get rid of all differences
 | ||
|   // between letter variants.
 | ||
|   //
 | ||
|   // Simple .toLowerCase() doesn't normalize 125 code points correctly,
 | ||
|   // and .toUpperCase doesn't normalize 6 of them (list of exceptions:
 | ||
|   // İ, ϴ, ẞ, Ω, K, Å - those are already uppercased, but have differently
 | ||
|   // uppercased versions).
 | ||
|   //
 | ||
|   // Here's an example showing how it happens. Lets take greek letter omega:
 | ||
|   // uppercase U+0398 (Θ), U+03f4 (ϴ) and lowercase U+03b8 (θ), U+03d1 (ϑ)
 | ||
|   //
 | ||
|   // Unicode entries:
 | ||
|   // 0398;GREEK CAPITAL LETTER THETA;Lu;0;L;;;;;N;;;;03B8;
 | ||
|   // 03B8;GREEK SMALL LETTER THETA;Ll;0;L;;;;;N;;;0398;;0398
 | ||
|   // 03D1;GREEK THETA SYMBOL;Ll;0;L;<compat> 03B8;;;;N;GREEK SMALL LETTER SCRIPT THETA;;0398;;0398
 | ||
|   // 03F4;GREEK CAPITAL THETA SYMBOL;Lu;0;L;<compat> 0398;;;;N;;;;03B8;
 | ||
|   //
 | ||
|   // Case-insensitive comparison should treat all of them as equivalent.
 | ||
|   //
 | ||
|   // But .toLowerCase() doesn't change ϑ (it's already lowercase),
 | ||
|   // and .toUpperCase() doesn't change ϴ (already uppercase).
 | ||
|   //
 | ||
|   // Applying first lower then upper case normalizes any character:
 | ||
|   // '\u0398\u03f4\u03b8\u03d1'.toLowerCase().toUpperCase() === '\u0398\u0398\u0398\u0398'
 | ||
|   //
 | ||
|   // Note: this is equivalent to unicode case folding; unicode normalization
 | ||
|   // is a different step that is not required here.
 | ||
|   //
 | ||
|   // Final result should be uppercased, because it's later stored in an object
 | ||
|   // (this avoid a conflict with Object.prototype members,
 | ||
|   // most notably, `__proto__`)
 | ||
|   //
 | ||
|   return str.toLowerCase().toUpperCase();
 | ||
| }
 | ||
| 
 | ||
| // Re-export libraries commonly used in both markdown-it and its plugins,
 | ||
| // so plugins won't have to depend on them explicitly, which reduces their
 | ||
| // bundled size (e.g. a browser build).
 | ||
| //
 | ||
| const lib = {
 | ||
|   mdurl: mdurl__namespace,
 | ||
|   ucmicro: ucmicro__namespace
 | ||
| };
 | ||
| 
 | ||
| var utils = /*#__PURE__*/Object.freeze({
 | ||
|   __proto__: null,
 | ||
|   arrayReplaceAt: arrayReplaceAt,
 | ||
|   assign: assign,
 | ||
|   escapeHtml: escapeHtml,
 | ||
|   escapeRE: escapeRE,
 | ||
|   fromCodePoint: fromCodePoint,
 | ||
|   has: has,
 | ||
|   isMdAsciiPunct: isMdAsciiPunct,
 | ||
|   isPunctChar: isPunctChar,
 | ||
|   isSpace: isSpace,
 | ||
|   isString: isString,
 | ||
|   isValidEntityCode: isValidEntityCode,
 | ||
|   isWhiteSpace: isWhiteSpace,
 | ||
|   lib: lib,
 | ||
|   normalizeReference: normalizeReference,
 | ||
|   unescapeAll: unescapeAll,
 | ||
|   unescapeMd: unescapeMd
 | ||
| });
 | ||
| 
 | ||
| // Parse link label
 | ||
| //
 | ||
| // this function assumes that first character ("[") already matches;
 | ||
| // returns the end of the label
 | ||
| //
 | ||
| 
 | ||
| function parseLinkLabel(state, start, disableNested) {
 | ||
|   let level, found, marker, prevPos;
 | ||
|   const max = state.posMax;
 | ||
|   const oldPos = state.pos;
 | ||
|   state.pos = start + 1;
 | ||
|   level = 1;
 | ||
|   while (state.pos < max) {
 | ||
|     marker = state.src.charCodeAt(state.pos);
 | ||
|     if (marker === 0x5D /* ] */) {
 | ||
|       level--;
 | ||
|       if (level === 0) {
 | ||
|         found = true;
 | ||
|         break;
 | ||
|       }
 | ||
|     }
 | ||
|     prevPos = state.pos;
 | ||
|     state.md.inline.skipToken(state);
 | ||
|     if (marker === 0x5B /* [ */) {
 | ||
|       if (prevPos === state.pos - 1) {
 | ||
|         // increase level if we find text `[`, which is not a part of any token
 | ||
|         level++;
 | ||
|       } else if (disableNested) {
 | ||
|         state.pos = oldPos;
 | ||
|         return -1;
 | ||
|       }
 | ||
|     }
 | ||
|   }
 | ||
|   let labelEnd = -1;
 | ||
|   if (found) {
 | ||
|     labelEnd = state.pos;
 | ||
|   }
 | ||
| 
 | ||
|   // restore old state
 | ||
|   state.pos = oldPos;
 | ||
|   return labelEnd;
 | ||
| }
 | ||
| 
 | ||
| // Parse link destination
 | ||
| //
 | ||
| 
 | ||
| function parseLinkDestination(str, start, max) {
 | ||
|   let code;
 | ||
|   let pos = start;
 | ||
|   const result = {
 | ||
|     ok: false,
 | ||
|     pos: 0,
 | ||
|     str: ''
 | ||
|   };
 | ||
|   if (str.charCodeAt(pos) === 0x3C /* < */) {
 | ||
|     pos++;
 | ||
|     while (pos < max) {
 | ||
|       code = str.charCodeAt(pos);
 | ||
|       if (code === 0x0A /* \n */) {
 | ||
|         return result;
 | ||
|       }
 | ||
|       if (code === 0x3C /* < */) {
 | ||
|         return result;
 | ||
|       }
 | ||
|       if (code === 0x3E /* > */) {
 | ||
|         result.pos = pos + 1;
 | ||
|         result.str = unescapeAll(str.slice(start + 1, pos));
 | ||
|         result.ok = true;
 | ||
|         return result;
 | ||
|       }
 | ||
|       if (code === 0x5C /* \ */ && pos + 1 < max) {
 | ||
|         pos += 2;
 | ||
|         continue;
 | ||
|       }
 | ||
|       pos++;
 | ||
|     }
 | ||
| 
 | ||
|     // no closing '>'
 | ||
|     return result;
 | ||
|   }
 | ||
| 
 | ||
|   // this should be ... } else { ... branch
 | ||
| 
 | ||
|   let level = 0;
 | ||
|   while (pos < max) {
 | ||
|     code = str.charCodeAt(pos);
 | ||
|     if (code === 0x20) {
 | ||
|       break;
 | ||
|     }
 | ||
| 
 | ||
|     // ascii control characters
 | ||
|     if (code < 0x20 || code === 0x7F) {
 | ||
|       break;
 | ||
|     }
 | ||
|     if (code === 0x5C /* \ */ && pos + 1 < max) {
 | ||
|       if (str.charCodeAt(pos + 1) === 0x20) {
 | ||
|         break;
 | ||
|       }
 | ||
|       pos += 2;
 | ||
|       continue;
 | ||
|     }
 | ||
|     if (code === 0x28 /* ( */) {
 | ||
|       level++;
 | ||
|       if (level > 32) {
 | ||
|         return result;
 | ||
|       }
 | ||
|     }
 | ||
|     if (code === 0x29 /* ) */) {
 | ||
|       if (level === 0) {
 | ||
|         break;
 | ||
|       }
 | ||
|       level--;
 | ||
|     }
 | ||
|     pos++;
 | ||
|   }
 | ||
|   if (start === pos) {
 | ||
|     return result;
 | ||
|   }
 | ||
|   if (level !== 0) {
 | ||
|     return result;
 | ||
|   }
 | ||
|   result.str = unescapeAll(str.slice(start, pos));
 | ||
|   result.pos = pos;
 | ||
|   result.ok = true;
 | ||
|   return result;
 | ||
| }
 | ||
| 
 | ||
| // Parse link title
 | ||
| //
 | ||
| 
 | ||
| 
 | ||
| // Parse link title within `str` in [start, max] range,
 | ||
| // or continue previous parsing if `prev_state` is defined (equal to result of last execution).
 | ||
| //
 | ||
| function parseLinkTitle(str, start, max, prev_state) {
 | ||
|   let code;
 | ||
|   let pos = start;
 | ||
|   const state = {
 | ||
|     // if `true`, this is a valid link title
 | ||
|     ok: false,
 | ||
|     // if `true`, this link can be continued on the next line
 | ||
|     can_continue: false,
 | ||
|     // if `ok`, it's the position of the first character after the closing marker
 | ||
|     pos: 0,
 | ||
|     // if `ok`, it's the unescaped title
 | ||
|     str: '',
 | ||
|     // expected closing marker character code
 | ||
|     marker: 0
 | ||
|   };
 | ||
|   if (prev_state) {
 | ||
|     // this is a continuation of a previous parseLinkTitle call on the next line,
 | ||
|     // used in reference links only
 | ||
|     state.str = prev_state.str;
 | ||
|     state.marker = prev_state.marker;
 | ||
|   } else {
 | ||
|     if (pos >= max) {
 | ||
|       return state;
 | ||
|     }
 | ||
|     let marker = str.charCodeAt(pos);
 | ||
|     if (marker !== 0x22 /* " */ && marker !== 0x27 /* ' */ && marker !== 0x28 /* ( */) {
 | ||
|       return state;
 | ||
|     }
 | ||
|     start++;
 | ||
|     pos++;
 | ||
| 
 | ||
|     // if opening marker is "(", switch it to closing marker ")"
 | ||
|     if (marker === 0x28) {
 | ||
|       marker = 0x29;
 | ||
|     }
 | ||
|     state.marker = marker;
 | ||
|   }
 | ||
|   while (pos < max) {
 | ||
|     code = str.charCodeAt(pos);
 | ||
|     if (code === state.marker) {
 | ||
|       state.pos = pos + 1;
 | ||
|       state.str += unescapeAll(str.slice(start, pos));
 | ||
|       state.ok = true;
 | ||
|       return state;
 | ||
|     } else if (code === 0x28 /* ( */ && state.marker === 0x29 /* ) */) {
 | ||
|       return state;
 | ||
|     } else if (code === 0x5C /* \ */ && pos + 1 < max) {
 | ||
|       pos++;
 | ||
|     }
 | ||
|     pos++;
 | ||
|   }
 | ||
| 
 | ||
|   // no closing marker found, but this link title may continue on the next line (for references)
 | ||
|   state.can_continue = true;
 | ||
|   state.str += unescapeAll(str.slice(start, pos));
 | ||
|   return state;
 | ||
| }
 | ||
| 
 | ||
| // Just a shortcut for bulk export
 | ||
| 
 | ||
| var helpers = /*#__PURE__*/Object.freeze({
 | ||
|   __proto__: null,
 | ||
|   parseLinkDestination: parseLinkDestination,
 | ||
|   parseLinkLabel: parseLinkLabel,
 | ||
|   parseLinkTitle: parseLinkTitle
 | ||
| });
 | ||
| 
 | ||
| /**
 | ||
|  * class Renderer
 | ||
|  *
 | ||
|  * Generates HTML from parsed token stream. Each instance has independent
 | ||
|  * copy of rules. Those can be rewritten with ease. Also, you can add new
 | ||
|  * rules if you create plugin and adds new token types.
 | ||
|  **/
 | ||
| 
 | ||
| const default_rules = {};
 | ||
| default_rules.code_inline = function (tokens, idx, options, env, slf) {
 | ||
|   const token = tokens[idx];
 | ||
|   return '<code' + slf.renderAttrs(token) + '>' + escapeHtml(token.content) + '</code>';
 | ||
| };
 | ||
| default_rules.code_block = function (tokens, idx, options, env, slf) {
 | ||
|   const token = tokens[idx];
 | ||
|   return '<pre' + slf.renderAttrs(token) + '><code>' + escapeHtml(tokens[idx].content) + '</code></pre>\n';
 | ||
| };
 | ||
| default_rules.fence = function (tokens, idx, options, env, slf) {
 | ||
|   const token = tokens[idx];
 | ||
|   const info = token.info ? unescapeAll(token.info).trim() : '';
 | ||
|   let langName = '';
 | ||
|   let langAttrs = '';
 | ||
|   if (info) {
 | ||
|     const arr = info.split(/(\s+)/g);
 | ||
|     langName = arr[0];
 | ||
|     langAttrs = arr.slice(2).join('');
 | ||
|   }
 | ||
|   let highlighted;
 | ||
|   if (options.highlight) {
 | ||
|     highlighted = options.highlight(token.content, langName, langAttrs) || escapeHtml(token.content);
 | ||
|   } else {
 | ||
|     highlighted = escapeHtml(token.content);
 | ||
|   }
 | ||
|   if (highlighted.indexOf('<pre') === 0) {
 | ||
|     return highlighted + '\n';
 | ||
|   }
 | ||
| 
 | ||
|   // If language exists, inject class gently, without modifying original token.
 | ||
|   // May be, one day we will add .deepClone() for token and simplify this part, but
 | ||
|   // now we prefer to keep things local.
 | ||
|   if (info) {
 | ||
|     const i = token.attrIndex('class');
 | ||
|     const tmpAttrs = token.attrs ? token.attrs.slice() : [];
 | ||
|     if (i < 0) {
 | ||
|       tmpAttrs.push(['class', options.langPrefix + langName]);
 | ||
|     } else {
 | ||
|       tmpAttrs[i] = tmpAttrs[i].slice();
 | ||
|       tmpAttrs[i][1] += ' ' + options.langPrefix + langName;
 | ||
|     }
 | ||
| 
 | ||
|     // Fake token just to render attributes
 | ||
|     const tmpToken = {
 | ||
|       attrs: tmpAttrs
 | ||
|     };
 | ||
|     return `<pre><code${slf.renderAttrs(tmpToken)}>${highlighted}</code></pre>\n`;
 | ||
|   }
 | ||
|   return `<pre><code${slf.renderAttrs(token)}>${highlighted}</code></pre>\n`;
 | ||
| };
 | ||
| default_rules.image = function (tokens, idx, options, env, slf) {
 | ||
|   const token = tokens[idx];
 | ||
| 
 | ||
|   // "alt" attr MUST be set, even if empty. Because it's mandatory and
 | ||
|   // should be placed on proper position for tests.
 | ||
|   //
 | ||
|   // Replace content with actual value
 | ||
| 
 | ||
|   token.attrs[token.attrIndex('alt')][1] = slf.renderInlineAsText(token.children, options, env);
 | ||
|   return slf.renderToken(tokens, idx, options);
 | ||
| };
 | ||
| default_rules.hardbreak = function (tokens, idx, options /*, env */) {
 | ||
|   return options.xhtmlOut ? '<br />\n' : '<br>\n';
 | ||
| };
 | ||
| default_rules.softbreak = function (tokens, idx, options /*, env */) {
 | ||
|   return options.breaks ? options.xhtmlOut ? '<br />\n' : '<br>\n' : '\n';
 | ||
| };
 | ||
| default_rules.text = function (tokens, idx /*, options, env */) {
 | ||
|   return escapeHtml(tokens[idx].content);
 | ||
| };
 | ||
| default_rules.html_block = function (tokens, idx /*, options, env */) {
 | ||
|   return tokens[idx].content;
 | ||
| };
 | ||
| default_rules.html_inline = function (tokens, idx /*, options, env */) {
 | ||
|   return tokens[idx].content;
 | ||
| };
 | ||
| 
 | ||
| /**
 | ||
|  * new Renderer()
 | ||
|  *
 | ||
|  * Creates new [[Renderer]] instance and fill [[Renderer#rules]] with defaults.
 | ||
|  **/
 | ||
| function Renderer() {
 | ||
|   /**
 | ||
|    * Renderer#rules -> Object
 | ||
|    *
 | ||
|    * Contains render rules for tokens. Can be updated and extended.
 | ||
|    *
 | ||
|    * ##### Example
 | ||
|    *
 | ||
|    * ```javascript
 | ||
|    * var md = require('markdown-it')();
 | ||
|    *
 | ||
|    * md.renderer.rules.strong_open  = function () { return '<b>'; };
 | ||
|    * md.renderer.rules.strong_close = function () { return '</b>'; };
 | ||
|    *
 | ||
|    * var result = md.renderInline(...);
 | ||
|    * ```
 | ||
|    *
 | ||
|    * Each rule is called as independent static function with fixed signature:
 | ||
|    *
 | ||
|    * ```javascript
 | ||
|    * function my_token_render(tokens, idx, options, env, renderer) {
 | ||
|    *   // ...
 | ||
|    *   return renderedHTML;
 | ||
|    * }
 | ||
|    * ```
 | ||
|    *
 | ||
|    * See [source code](https://github.com/markdown-it/markdown-it/blob/master/lib/renderer.mjs)
 | ||
|    * for more details and examples.
 | ||
|    **/
 | ||
|   this.rules = assign({}, default_rules);
 | ||
| }
 | ||
| 
 | ||
| /**
 | ||
|  * Renderer.renderAttrs(token) -> String
 | ||
|  *
 | ||
|  * Render token attributes to string.
 | ||
|  **/
 | ||
| Renderer.prototype.renderAttrs = function renderAttrs(token) {
 | ||
|   let i, l, result;
 | ||
|   if (!token.attrs) {
 | ||
|     return '';
 | ||
|   }
 | ||
|   result = '';
 | ||
|   for (i = 0, l = token.attrs.length; i < l; i++) {
 | ||
|     result += ' ' + escapeHtml(token.attrs[i][0]) + '="' + escapeHtml(token.attrs[i][1]) + '"';
 | ||
|   }
 | ||
|   return result;
 | ||
| };
 | ||
| 
 | ||
| /**
 | ||
|  * Renderer.renderToken(tokens, idx, options) -> String
 | ||
|  * - tokens (Array): list of tokens
 | ||
|  * - idx (Numbed): token index to render
 | ||
|  * - options (Object): params of parser instance
 | ||
|  *
 | ||
|  * Default token renderer. Can be overriden by custom function
 | ||
|  * in [[Renderer#rules]].
 | ||
|  **/
 | ||
| Renderer.prototype.renderToken = function renderToken(tokens, idx, options) {
 | ||
|   const token = tokens[idx];
 | ||
|   let result = '';
 | ||
| 
 | ||
|   // Tight list paragraphs
 | ||
|   if (token.hidden) {
 | ||
|     return '';
 | ||
|   }
 | ||
| 
 | ||
|   // Insert a newline between hidden paragraph and subsequent opening
 | ||
|   // block-level tag.
 | ||
|   //
 | ||
|   // For example, here we should insert a newline before blockquote:
 | ||
|   //  - a
 | ||
|   //    >
 | ||
|   //
 | ||
|   if (token.block && token.nesting !== -1 && idx && tokens[idx - 1].hidden) {
 | ||
|     result += '\n';
 | ||
|   }
 | ||
| 
 | ||
|   // Add token name, e.g. `<img`
 | ||
|   result += (token.nesting === -1 ? '</' : '<') + token.tag;
 | ||
| 
 | ||
|   // Encode attributes, e.g. `<img src="foo"`
 | ||
|   result += this.renderAttrs(token);
 | ||
| 
 | ||
|   // Add a slash for self-closing tags, e.g. `<img src="foo" /`
 | ||
|   if (token.nesting === 0 && options.xhtmlOut) {
 | ||
|     result += ' /';
 | ||
|   }
 | ||
| 
 | ||
|   // Check if we need to add a newline after this tag
 | ||
|   let needLf = false;
 | ||
|   if (token.block) {
 | ||
|     needLf = true;
 | ||
|     if (token.nesting === 1) {
 | ||
|       if (idx + 1 < tokens.length) {
 | ||
|         const nextToken = tokens[idx + 1];
 | ||
|         if (nextToken.type === 'inline' || nextToken.hidden) {
 | ||
|           // Block-level tag containing an inline tag.
 | ||
|           //
 | ||
|           needLf = false;
 | ||
|         } else if (nextToken.nesting === -1 && nextToken.tag === token.tag) {
 | ||
|           // Opening tag + closing tag of the same type. E.g. `<li></li>`.
 | ||
|           //
 | ||
|           needLf = false;
 | ||
|         }
 | ||
|       }
 | ||
|     }
 | ||
|   }
 | ||
|   result += needLf ? '>\n' : '>';
 | ||
|   return result;
 | ||
| };
 | ||
| 
 | ||
| /**
 | ||
|  * Renderer.renderInline(tokens, options, env) -> String
 | ||
|  * - tokens (Array): list on block tokens to render
 | ||
|  * - options (Object): params of parser instance
 | ||
|  * - env (Object): additional data from parsed input (references, for example)
 | ||
|  *
 | ||
|  * The same as [[Renderer.render]], but for single token of `inline` type.
 | ||
|  **/
 | ||
| Renderer.prototype.renderInline = function (tokens, options, env) {
 | ||
|   let result = '';
 | ||
|   const rules = this.rules;
 | ||
|   for (let i = 0, len = tokens.length; i < len; i++) {
 | ||
|     const type = tokens[i].type;
 | ||
|     if (typeof rules[type] !== 'undefined') {
 | ||
|       result += rules[type](tokens, i, options, env, this);
 | ||
|     } else {
 | ||
|       result += this.renderToken(tokens, i, options);
 | ||
|     }
 | ||
|   }
 | ||
|   return result;
 | ||
| };
 | ||
| 
 | ||
| /** internal
 | ||
|  * Renderer.renderInlineAsText(tokens, options, env) -> String
 | ||
|  * - tokens (Array): list on block tokens to render
 | ||
|  * - options (Object): params of parser instance
 | ||
|  * - env (Object): additional data from parsed input (references, for example)
 | ||
|  *
 | ||
|  * Special kludge for image `alt` attributes to conform CommonMark spec.
 | ||
|  * Don't try to use it! Spec requires to show `alt` content with stripped markup,
 | ||
|  * instead of simple escaping.
 | ||
|  **/
 | ||
| Renderer.prototype.renderInlineAsText = function (tokens, options, env) {
 | ||
|   let result = '';
 | ||
|   for (let i = 0, len = tokens.length; i < len; i++) {
 | ||
|     switch (tokens[i].type) {
 | ||
|       case 'text':
 | ||
|         result += tokens[i].content;
 | ||
|         break;
 | ||
|       case 'image':
 | ||
|         result += this.renderInlineAsText(tokens[i].children, options, env);
 | ||
|         break;
 | ||
|       case 'html_inline':
 | ||
|       case 'html_block':
 | ||
|         result += tokens[i].content;
 | ||
|         break;
 | ||
|       case 'softbreak':
 | ||
|       case 'hardbreak':
 | ||
|         result += '\n';
 | ||
|         break;
 | ||
|       // all other tokens are skipped
 | ||
|     }
 | ||
|   }
 | ||
|   return result;
 | ||
| };
 | ||
| 
 | ||
| /**
 | ||
|  * Renderer.render(tokens, options, env) -> String
 | ||
|  * - tokens (Array): list on block tokens to render
 | ||
|  * - options (Object): params of parser instance
 | ||
|  * - env (Object): additional data from parsed input (references, for example)
 | ||
|  *
 | ||
|  * Takes token stream and generates HTML. Probably, you will never need to call
 | ||
|  * this method directly.
 | ||
|  **/
 | ||
| Renderer.prototype.render = function (tokens, options, env) {
 | ||
|   let result = '';
 | ||
|   const rules = this.rules;
 | ||
|   for (let i = 0, len = tokens.length; i < len; i++) {
 | ||
|     const type = tokens[i].type;
 | ||
|     if (type === 'inline') {
 | ||
|       result += this.renderInline(tokens[i].children, options, env);
 | ||
|     } else if (typeof rules[type] !== 'undefined') {
 | ||
|       result += rules[type](tokens, i, options, env, this);
 | ||
|     } else {
 | ||
|       result += this.renderToken(tokens, i, options, env);
 | ||
|     }
 | ||
|   }
 | ||
|   return result;
 | ||
| };
 | ||
| 
 | ||
| /**
 | ||
|  * class Ruler
 | ||
|  *
 | ||
|  * Helper class, used by [[MarkdownIt#core]], [[MarkdownIt#block]] and
 | ||
|  * [[MarkdownIt#inline]] to manage sequences of functions (rules):
 | ||
|  *
 | ||
|  * - keep rules in defined order
 | ||
|  * - assign the name to each rule
 | ||
|  * - enable/disable rules
 | ||
|  * - add/replace rules
 | ||
|  * - allow assign rules to additional named chains (in the same)
 | ||
|  * - cacheing lists of active rules
 | ||
|  *
 | ||
|  * You will not need use this class directly until write plugins. For simple
 | ||
|  * rules control use [[MarkdownIt.disable]], [[MarkdownIt.enable]] and
 | ||
|  * [[MarkdownIt.use]].
 | ||
|  **/
 | ||
| 
 | ||
| /**
 | ||
|  * new Ruler()
 | ||
|  **/
 | ||
| function Ruler() {
 | ||
|   // List of added rules. Each element is:
 | ||
|   //
 | ||
|   // {
 | ||
|   //   name: XXX,
 | ||
|   //   enabled: Boolean,
 | ||
|   //   fn: Function(),
 | ||
|   //   alt: [ name2, name3 ]
 | ||
|   // }
 | ||
|   //
 | ||
|   this.__rules__ = [];
 | ||
| 
 | ||
|   // Cached rule chains.
 | ||
|   //
 | ||
|   // First level - chain name, '' for default.
 | ||
|   // Second level - diginal anchor for fast filtering by charcodes.
 | ||
|   //
 | ||
|   this.__cache__ = null;
 | ||
| }
 | ||
| 
 | ||
| // Helper methods, should not be used directly
 | ||
| 
 | ||
| // Find rule index by name
 | ||
| //
 | ||
| Ruler.prototype.__find__ = function (name) {
 | ||
|   for (let i = 0; i < this.__rules__.length; i++) {
 | ||
|     if (this.__rules__[i].name === name) {
 | ||
|       return i;
 | ||
|     }
 | ||
|   }
 | ||
|   return -1;
 | ||
| };
 | ||
| 
 | ||
| // Build rules lookup cache
 | ||
| //
 | ||
| Ruler.prototype.__compile__ = function () {
 | ||
|   const self = this;
 | ||
|   const chains = [''];
 | ||
| 
 | ||
|   // collect unique names
 | ||
|   self.__rules__.forEach(function (rule) {
 | ||
|     if (!rule.enabled) {
 | ||
|       return;
 | ||
|     }
 | ||
|     rule.alt.forEach(function (altName) {
 | ||
|       if (chains.indexOf(altName) < 0) {
 | ||
|         chains.push(altName);
 | ||
|       }
 | ||
|     });
 | ||
|   });
 | ||
|   self.__cache__ = {};
 | ||
|   chains.forEach(function (chain) {
 | ||
|     self.__cache__[chain] = [];
 | ||
|     self.__rules__.forEach(function (rule) {
 | ||
|       if (!rule.enabled) {
 | ||
|         return;
 | ||
|       }
 | ||
|       if (chain && rule.alt.indexOf(chain) < 0) {
 | ||
|         return;
 | ||
|       }
 | ||
|       self.__cache__[chain].push(rule.fn);
 | ||
|     });
 | ||
|   });
 | ||
| };
 | ||
| 
 | ||
| /**
 | ||
|  * Ruler.at(name, fn [, options])
 | ||
|  * - name (String): rule name to replace.
 | ||
|  * - fn (Function): new rule function.
 | ||
|  * - options (Object): new rule options (not mandatory).
 | ||
|  *
 | ||
|  * Replace rule by name with new function & options. Throws error if name not
 | ||
|  * found.
 | ||
|  *
 | ||
|  * ##### Options:
 | ||
|  *
 | ||
|  * - __alt__ - array with names of "alternate" chains.
 | ||
|  *
 | ||
|  * ##### Example
 | ||
|  *
 | ||
|  * Replace existing typographer replacement rule with new one:
 | ||
|  *
 | ||
|  * ```javascript
 | ||
|  * var md = require('markdown-it')();
 | ||
|  *
 | ||
|  * md.core.ruler.at('replacements', function replace(state) {
 | ||
|  *   //...
 | ||
|  * });
 | ||
|  * ```
 | ||
|  **/
 | ||
| Ruler.prototype.at = function (name, fn, options) {
 | ||
|   const index = this.__find__(name);
 | ||
|   const opt = options || {};
 | ||
|   if (index === -1) {
 | ||
|     throw new Error('Parser rule not found: ' + name);
 | ||
|   }
 | ||
|   this.__rules__[index].fn = fn;
 | ||
|   this.__rules__[index].alt = opt.alt || [];
 | ||
|   this.__cache__ = null;
 | ||
| };
 | ||
| 
 | ||
| /**
 | ||
|  * Ruler.before(beforeName, ruleName, fn [, options])
 | ||
|  * - beforeName (String): new rule will be added before this one.
 | ||
|  * - ruleName (String): name of added rule.
 | ||
|  * - fn (Function): rule function.
 | ||
|  * - options (Object): rule options (not mandatory).
 | ||
|  *
 | ||
|  * Add new rule to chain before one with given name. See also
 | ||
|  * [[Ruler.after]], [[Ruler.push]].
 | ||
|  *
 | ||
|  * ##### Options:
 | ||
|  *
 | ||
|  * - __alt__ - array with names of "alternate" chains.
 | ||
|  *
 | ||
|  * ##### Example
 | ||
|  *
 | ||
|  * ```javascript
 | ||
|  * var md = require('markdown-it')();
 | ||
|  *
 | ||
|  * md.block.ruler.before('paragraph', 'my_rule', function replace(state) {
 | ||
|  *   //...
 | ||
|  * });
 | ||
|  * ```
 | ||
|  **/
 | ||
| Ruler.prototype.before = function (beforeName, ruleName, fn, options) {
 | ||
|   const index = this.__find__(beforeName);
 | ||
|   const opt = options || {};
 | ||
|   if (index === -1) {
 | ||
|     throw new Error('Parser rule not found: ' + beforeName);
 | ||
|   }
 | ||
|   this.__rules__.splice(index, 0, {
 | ||
|     name: ruleName,
 | ||
|     enabled: true,
 | ||
|     fn,
 | ||
|     alt: opt.alt || []
 | ||
|   });
 | ||
|   this.__cache__ = null;
 | ||
| };
 | ||
| 
 | ||
| /**
 | ||
|  * Ruler.after(afterName, ruleName, fn [, options])
 | ||
|  * - afterName (String): new rule will be added after this one.
 | ||
|  * - ruleName (String): name of added rule.
 | ||
|  * - fn (Function): rule function.
 | ||
|  * - options (Object): rule options (not mandatory).
 | ||
|  *
 | ||
|  * Add new rule to chain after one with given name. See also
 | ||
|  * [[Ruler.before]], [[Ruler.push]].
 | ||
|  *
 | ||
|  * ##### Options:
 | ||
|  *
 | ||
|  * - __alt__ - array with names of "alternate" chains.
 | ||
|  *
 | ||
|  * ##### Example
 | ||
|  *
 | ||
|  * ```javascript
 | ||
|  * var md = require('markdown-it')();
 | ||
|  *
 | ||
|  * md.inline.ruler.after('text', 'my_rule', function replace(state) {
 | ||
|  *   //...
 | ||
|  * });
 | ||
|  * ```
 | ||
|  **/
 | ||
| Ruler.prototype.after = function (afterName, ruleName, fn, options) {
 | ||
|   const index = this.__find__(afterName);
 | ||
|   const opt = options || {};
 | ||
|   if (index === -1) {
 | ||
|     throw new Error('Parser rule not found: ' + afterName);
 | ||
|   }
 | ||
|   this.__rules__.splice(index + 1, 0, {
 | ||
|     name: ruleName,
 | ||
|     enabled: true,
 | ||
|     fn,
 | ||
|     alt: opt.alt || []
 | ||
|   });
 | ||
|   this.__cache__ = null;
 | ||
| };
 | ||
| 
 | ||
| /**
 | ||
|  * Ruler.push(ruleName, fn [, options])
 | ||
|  * - ruleName (String): name of added rule.
 | ||
|  * - fn (Function): rule function.
 | ||
|  * - options (Object): rule options (not mandatory).
 | ||
|  *
 | ||
|  * Push new rule to the end of chain. See also
 | ||
|  * [[Ruler.before]], [[Ruler.after]].
 | ||
|  *
 | ||
|  * ##### Options:
 | ||
|  *
 | ||
|  * - __alt__ - array with names of "alternate" chains.
 | ||
|  *
 | ||
|  * ##### Example
 | ||
|  *
 | ||
|  * ```javascript
 | ||
|  * var md = require('markdown-it')();
 | ||
|  *
 | ||
|  * md.core.ruler.push('my_rule', function replace(state) {
 | ||
|  *   //...
 | ||
|  * });
 | ||
|  * ```
 | ||
|  **/
 | ||
| Ruler.prototype.push = function (ruleName, fn, options) {
 | ||
|   const opt = options || {};
 | ||
|   this.__rules__.push({
 | ||
|     name: ruleName,
 | ||
|     enabled: true,
 | ||
|     fn,
 | ||
|     alt: opt.alt || []
 | ||
|   });
 | ||
|   this.__cache__ = null;
 | ||
| };
 | ||
| 
 | ||
| /**
 | ||
|  * Ruler.enable(list [, ignoreInvalid]) -> Array
 | ||
|  * - list (String|Array): list of rule names to enable.
 | ||
|  * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.
 | ||
|  *
 | ||
|  * Enable rules with given names. If any rule name not found - throw Error.
 | ||
|  * Errors can be disabled by second param.
 | ||
|  *
 | ||
|  * Returns list of found rule names (if no exception happened).
 | ||
|  *
 | ||
|  * See also [[Ruler.disable]], [[Ruler.enableOnly]].
 | ||
|  **/
 | ||
| Ruler.prototype.enable = function (list, ignoreInvalid) {
 | ||
|   if (!Array.isArray(list)) {
 | ||
|     list = [list];
 | ||
|   }
 | ||
|   const result = [];
 | ||
| 
 | ||
|   // Search by name and enable
 | ||
|   list.forEach(function (name) {
 | ||
|     const idx = this.__find__(name);
 | ||
|     if (idx < 0) {
 | ||
|       if (ignoreInvalid) {
 | ||
|         return;
 | ||
|       }
 | ||
|       throw new Error('Rules manager: invalid rule name ' + name);
 | ||
|     }
 | ||
|     this.__rules__[idx].enabled = true;
 | ||
|     result.push(name);
 | ||
|   }, this);
 | ||
|   this.__cache__ = null;
 | ||
|   return result;
 | ||
| };
 | ||
| 
 | ||
| /**
 | ||
|  * Ruler.enableOnly(list [, ignoreInvalid])
 | ||
|  * - list (String|Array): list of rule names to enable (whitelist).
 | ||
|  * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.
 | ||
|  *
 | ||
|  * Enable rules with given names, and disable everything else. If any rule name
 | ||
|  * not found - throw Error. Errors can be disabled by second param.
 | ||
|  *
 | ||
|  * See also [[Ruler.disable]], [[Ruler.enable]].
 | ||
|  **/
 | ||
| Ruler.prototype.enableOnly = function (list, ignoreInvalid) {
 | ||
|   if (!Array.isArray(list)) {
 | ||
|     list = [list];
 | ||
|   }
 | ||
|   this.__rules__.forEach(function (rule) {
 | ||
|     rule.enabled = false;
 | ||
|   });
 | ||
|   this.enable(list, ignoreInvalid);
 | ||
| };
 | ||
| 
 | ||
| /**
 | ||
|  * Ruler.disable(list [, ignoreInvalid]) -> Array
 | ||
|  * - list (String|Array): list of rule names to disable.
 | ||
|  * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.
 | ||
|  *
 | ||
|  * Disable rules with given names. If any rule name not found - throw Error.
 | ||
|  * Errors can be disabled by second param.
 | ||
|  *
 | ||
|  * Returns list of found rule names (if no exception happened).
 | ||
|  *
 | ||
|  * See also [[Ruler.enable]], [[Ruler.enableOnly]].
 | ||
|  **/
 | ||
| Ruler.prototype.disable = function (list, ignoreInvalid) {
 | ||
|   if (!Array.isArray(list)) {
 | ||
|     list = [list];
 | ||
|   }
 | ||
|   const result = [];
 | ||
| 
 | ||
|   // Search by name and disable
 | ||
|   list.forEach(function (name) {
 | ||
|     const idx = this.__find__(name);
 | ||
|     if (idx < 0) {
 | ||
|       if (ignoreInvalid) {
 | ||
|         return;
 | ||
|       }
 | ||
|       throw new Error('Rules manager: invalid rule name ' + name);
 | ||
|     }
 | ||
|     this.__rules__[idx].enabled = false;
 | ||
|     result.push(name);
 | ||
|   }, this);
 | ||
|   this.__cache__ = null;
 | ||
|   return result;
 | ||
| };
 | ||
| 
 | ||
| /**
 | ||
|  * Ruler.getRules(chainName) -> Array
 | ||
|  *
 | ||
|  * Return array of active functions (rules) for given chain name. It analyzes
 | ||
|  * rules configuration, compiles caches if not exists and returns result.
 | ||
|  *
 | ||
|  * Default chain name is `''` (empty string). It can't be skipped. That's
 | ||
|  * done intentionally, to keep signature monomorphic for high speed.
 | ||
|  **/
 | ||
| Ruler.prototype.getRules = function (chainName) {
 | ||
|   if (this.__cache__ === null) {
 | ||
|     this.__compile__();
 | ||
|   }
 | ||
| 
 | ||
|   // Chain can be empty, if rules disabled. But we still have to return Array.
 | ||
|   return this.__cache__[chainName] || [];
 | ||
| };
 | ||
| 
 | ||
| // Token class
 | ||
| 
 | ||
| /**
 | ||
|  * class Token
 | ||
|  **/
 | ||
| 
 | ||
| /**
 | ||
|  * new Token(type, tag, nesting)
 | ||
|  *
 | ||
|  * Create new token and fill passed properties.
 | ||
|  **/
 | ||
| function Token(type, tag, nesting) {
 | ||
|   /**
 | ||
|    * Token#type -> String
 | ||
|    *
 | ||
|    * Type of the token (string, e.g. "paragraph_open")
 | ||
|    **/
 | ||
|   this.type = type;
 | ||
| 
 | ||
|   /**
 | ||
|    * Token#tag -> String
 | ||
|    *
 | ||
|    * html tag name, e.g. "p"
 | ||
|    **/
 | ||
|   this.tag = tag;
 | ||
| 
 | ||
|   /**
 | ||
|    * Token#attrs -> Array
 | ||
|    *
 | ||
|    * Html attributes. Format: `[ [ name1, value1 ], [ name2, value2 ] ]`
 | ||
|    **/
 | ||
|   this.attrs = null;
 | ||
| 
 | ||
|   /**
 | ||
|    * Token#map -> Array
 | ||
|    *
 | ||
|    * Source map info. Format: `[ line_begin, line_end ]`
 | ||
|    **/
 | ||
|   this.map = null;
 | ||
| 
 | ||
|   /**
 | ||
|    * Token#nesting -> Number
 | ||
|    *
 | ||
|    * Level change (number in {-1, 0, 1} set), where:
 | ||
|    *
 | ||
|    * -  `1` means the tag is opening
 | ||
|    * -  `0` means the tag is self-closing
 | ||
|    * - `-1` means the tag is closing
 | ||
|    **/
 | ||
|   this.nesting = nesting;
 | ||
| 
 | ||
|   /**
 | ||
|    * Token#level -> Number
 | ||
|    *
 | ||
|    * nesting level, the same as `state.level`
 | ||
|    **/
 | ||
|   this.level = 0;
 | ||
| 
 | ||
|   /**
 | ||
|    * Token#children -> Array
 | ||
|    *
 | ||
|    * An array of child nodes (inline and img tokens)
 | ||
|    **/
 | ||
|   this.children = null;
 | ||
| 
 | ||
|   /**
 | ||
|    * Token#content -> String
 | ||
|    *
 | ||
|    * In a case of self-closing tag (code, html, fence, etc.),
 | ||
|    * it has contents of this tag.
 | ||
|    **/
 | ||
|   this.content = '';
 | ||
| 
 | ||
|   /**
 | ||
|    * Token#markup -> String
 | ||
|    *
 | ||
|    * '*' or '_' for emphasis, fence string for fence, etc.
 | ||
|    **/
 | ||
|   this.markup = '';
 | ||
| 
 | ||
|   /**
 | ||
|    * Token#info -> String
 | ||
|    *
 | ||
|    * Additional information:
 | ||
|    *
 | ||
|    * - Info string for "fence" tokens
 | ||
|    * - The value "auto" for autolink "link_open" and "link_close" tokens
 | ||
|    * - The string value of the item marker for ordered-list "list_item_open" tokens
 | ||
|    **/
 | ||
|   this.info = '';
 | ||
| 
 | ||
|   /**
 | ||
|    * Token#meta -> Object
 | ||
|    *
 | ||
|    * A place for plugins to store an arbitrary data
 | ||
|    **/
 | ||
|   this.meta = null;
 | ||
| 
 | ||
|   /**
 | ||
|    * Token#block -> Boolean
 | ||
|    *
 | ||
|    * True for block-level tokens, false for inline tokens.
 | ||
|    * Used in renderer to calculate line breaks
 | ||
|    **/
 | ||
|   this.block = false;
 | ||
| 
 | ||
|   /**
 | ||
|    * Token#hidden -> Boolean
 | ||
|    *
 | ||
|    * If it's true, ignore this element when rendering. Used for tight lists
 | ||
|    * to hide paragraphs.
 | ||
|    **/
 | ||
|   this.hidden = false;
 | ||
| }
 | ||
| 
 | ||
| /**
 | ||
|  * Token.attrIndex(name) -> Number
 | ||
|  *
 | ||
|  * Search attribute index by name.
 | ||
|  **/
 | ||
| Token.prototype.attrIndex = function attrIndex(name) {
 | ||
|   if (!this.attrs) {
 | ||
|     return -1;
 | ||
|   }
 | ||
|   const attrs = this.attrs;
 | ||
|   for (let i = 0, len = attrs.length; i < len; i++) {
 | ||
|     if (attrs[i][0] === name) {
 | ||
|       return i;
 | ||
|     }
 | ||
|   }
 | ||
|   return -1;
 | ||
| };
 | ||
| 
 | ||
| /**
 | ||
|  * Token.attrPush(attrData)
 | ||
|  *
 | ||
|  * Add `[ name, value ]` attribute to list. Init attrs if necessary
 | ||
|  **/
 | ||
| Token.prototype.attrPush = function attrPush(attrData) {
 | ||
|   if (this.attrs) {
 | ||
|     this.attrs.push(attrData);
 | ||
|   } else {
 | ||
|     this.attrs = [attrData];
 | ||
|   }
 | ||
| };
 | ||
| 
 | ||
| /**
 | ||
|  * Token.attrSet(name, value)
 | ||
|  *
 | ||
|  * Set `name` attribute to `value`. Override old value if exists.
 | ||
|  **/
 | ||
| Token.prototype.attrSet = function attrSet(name, value) {
 | ||
|   const idx = this.attrIndex(name);
 | ||
|   const attrData = [name, value];
 | ||
|   if (idx < 0) {
 | ||
|     this.attrPush(attrData);
 | ||
|   } else {
 | ||
|     this.attrs[idx] = attrData;
 | ||
|   }
 | ||
| };
 | ||
| 
 | ||
| /**
 | ||
|  * Token.attrGet(name)
 | ||
|  *
 | ||
|  * Get the value of attribute `name`, or null if it does not exist.
 | ||
|  **/
 | ||
| Token.prototype.attrGet = function attrGet(name) {
 | ||
|   const idx = this.attrIndex(name);
 | ||
|   let value = null;
 | ||
|   if (idx >= 0) {
 | ||
|     value = this.attrs[idx][1];
 | ||
|   }
 | ||
|   return value;
 | ||
| };
 | ||
| 
 | ||
| /**
 | ||
|  * Token.attrJoin(name, value)
 | ||
|  *
 | ||
|  * Join value to existing attribute via space. Or create new attribute if not
 | ||
|  * exists. Useful to operate with token classes.
 | ||
|  **/
 | ||
| Token.prototype.attrJoin = function attrJoin(name, value) {
 | ||
|   const idx = this.attrIndex(name);
 | ||
|   if (idx < 0) {
 | ||
|     this.attrPush([name, value]);
 | ||
|   } else {
 | ||
|     this.attrs[idx][1] = this.attrs[idx][1] + ' ' + value;
 | ||
|   }
 | ||
| };
 | ||
| 
 | ||
| // Core state object
 | ||
| //
 | ||
| 
 | ||
| function StateCore(src, md, env) {
 | ||
|   this.src = src;
 | ||
|   this.env = env;
 | ||
|   this.tokens = [];
 | ||
|   this.inlineMode = false;
 | ||
|   this.md = md; // link to parser instance
 | ||
| }
 | ||
| 
 | ||
| // re-export Token class to use in core rules
 | ||
| StateCore.prototype.Token = Token;
 | ||
| 
 | ||
| // Normalize input string
 | ||
| 
 | ||
| // https://spec.commonmark.org/0.29/#line-ending
 | ||
| const NEWLINES_RE = /\r\n?|\n/g;
 | ||
| const NULL_RE = /\0/g;
 | ||
| function normalize(state) {
 | ||
|   let str;
 | ||
| 
 | ||
|   // Normalize newlines
 | ||
|   str = state.src.replace(NEWLINES_RE, '\n');
 | ||
| 
 | ||
|   // Replace NULL characters
 | ||
|   str = str.replace(NULL_RE, '\uFFFD');
 | ||
|   state.src = str;
 | ||
| }
 | ||
| 
 | ||
| function block(state) {
 | ||
|   let token;
 | ||
|   if (state.inlineMode) {
 | ||
|     token = new state.Token('inline', '', 0);
 | ||
|     token.content = state.src;
 | ||
|     token.map = [0, 1];
 | ||
|     token.children = [];
 | ||
|     state.tokens.push(token);
 | ||
|   } else {
 | ||
|     state.md.block.parse(state.src, state.md, state.env, state.tokens);
 | ||
|   }
 | ||
| }
 | ||
| 
 | ||
| function inline(state) {
 | ||
|   const tokens = state.tokens;
 | ||
| 
 | ||
|   // Parse inlines
 | ||
|   for (let i = 0, l = tokens.length; i < l; i++) {
 | ||
|     const tok = tokens[i];
 | ||
|     if (tok.type === 'inline') {
 | ||
|       state.md.inline.parse(tok.content, state.md, state.env, tok.children);
 | ||
|     }
 | ||
|   }
 | ||
| }
 | ||
| 
 | ||
| // Replace link-like texts with link nodes.
 | ||
| //
 | ||
| // Currently restricted by `md.validateLink()` to http/https/ftp
 | ||
| //
 | ||
| 
 | ||
| function isLinkOpen$1(str) {
 | ||
|   return /^<a[>\s]/i.test(str);
 | ||
| }
 | ||
| function isLinkClose$1(str) {
 | ||
|   return /^<\/a\s*>/i.test(str);
 | ||
| }
 | ||
| function linkify$1(state) {
 | ||
|   const blockTokens = state.tokens;
 | ||
|   if (!state.md.options.linkify) {
 | ||
|     return;
 | ||
|   }
 | ||
|   for (let j = 0, l = blockTokens.length; j < l; j++) {
 | ||
|     if (blockTokens[j].type !== 'inline' || !state.md.linkify.pretest(blockTokens[j].content)) {
 | ||
|       continue;
 | ||
|     }
 | ||
|     let tokens = blockTokens[j].children;
 | ||
|     let htmlLinkLevel = 0;
 | ||
| 
 | ||
|     // We scan from the end, to keep position when new tags added.
 | ||
|     // Use reversed logic in links start/end match
 | ||
|     for (let i = tokens.length - 1; i >= 0; i--) {
 | ||
|       const currentToken = tokens[i];
 | ||
| 
 | ||
|       // Skip content of markdown links
 | ||
|       if (currentToken.type === 'link_close') {
 | ||
|         i--;
 | ||
|         while (tokens[i].level !== currentToken.level && tokens[i].type !== 'link_open') {
 | ||
|           i--;
 | ||
|         }
 | ||
|         continue;
 | ||
|       }
 | ||
| 
 | ||
|       // Skip content of html tag links
 | ||
|       if (currentToken.type === 'html_inline') {
 | ||
|         if (isLinkOpen$1(currentToken.content) && htmlLinkLevel > 0) {
 | ||
|           htmlLinkLevel--;
 | ||
|         }
 | ||
|         if (isLinkClose$1(currentToken.content)) {
 | ||
|           htmlLinkLevel++;
 | ||
|         }
 | ||
|       }
 | ||
|       if (htmlLinkLevel > 0) {
 | ||
|         continue;
 | ||
|       }
 | ||
|       if (currentToken.type === 'text' && state.md.linkify.test(currentToken.content)) {
 | ||
|         const text = currentToken.content;
 | ||
|         let links = state.md.linkify.match(text);
 | ||
| 
 | ||
|         // Now split string to nodes
 | ||
|         const nodes = [];
 | ||
|         let level = currentToken.level;
 | ||
|         let lastPos = 0;
 | ||
| 
 | ||
|         // forbid escape sequence at the start of the string,
 | ||
|         // this avoids http\://example.com/ from being linkified as
 | ||
|         // http:<a href="//example.com/">//example.com/</a>
 | ||
|         if (links.length > 0 && links[0].index === 0 && i > 0 && tokens[i - 1].type === 'text_special') {
 | ||
|           links = links.slice(1);
 | ||
|         }
 | ||
|         for (let ln = 0; ln < links.length; ln++) {
 | ||
|           const url = links[ln].url;
 | ||
|           const fullUrl = state.md.normalizeLink(url);
 | ||
|           if (!state.md.validateLink(fullUrl)) {
 | ||
|             continue;
 | ||
|           }
 | ||
|           let urlText = links[ln].text;
 | ||
| 
 | ||
|           // Linkifier might send raw hostnames like "example.com", where url
 | ||
|           // starts with domain name. So we prepend http:// in those cases,
 | ||
|           // and remove it afterwards.
 | ||
|           //
 | ||
|           if (!links[ln].schema) {
 | ||
|             urlText = state.md.normalizeLinkText('http://' + urlText).replace(/^http:\/\//, '');
 | ||
|           } else if (links[ln].schema === 'mailto:' && !/^mailto:/i.test(urlText)) {
 | ||
|             urlText = state.md.normalizeLinkText('mailto:' + urlText).replace(/^mailto:/, '');
 | ||
|           } else {
 | ||
|             urlText = state.md.normalizeLinkText(urlText);
 | ||
|           }
 | ||
|           const pos = links[ln].index;
 | ||
|           if (pos > lastPos) {
 | ||
|             const token = new state.Token('text', '', 0);
 | ||
|             token.content = text.slice(lastPos, pos);
 | ||
|             token.level = level;
 | ||
|             nodes.push(token);
 | ||
|           }
 | ||
|           const token_o = new state.Token('link_open', 'a', 1);
 | ||
|           token_o.attrs = [['href', fullUrl]];
 | ||
|           token_o.level = level++;
 | ||
|           token_o.markup = 'linkify';
 | ||
|           token_o.info = 'auto';
 | ||
|           nodes.push(token_o);
 | ||
|           const token_t = new state.Token('text', '', 0);
 | ||
|           token_t.content = urlText;
 | ||
|           token_t.level = level;
 | ||
|           nodes.push(token_t);
 | ||
|           const token_c = new state.Token('link_close', 'a', -1);
 | ||
|           token_c.level = --level;
 | ||
|           token_c.markup = 'linkify';
 | ||
|           token_c.info = 'auto';
 | ||
|           nodes.push(token_c);
 | ||
|           lastPos = links[ln].lastIndex;
 | ||
|         }
 | ||
|         if (lastPos < text.length) {
 | ||
|           const token = new state.Token('text', '', 0);
 | ||
|           token.content = text.slice(lastPos);
 | ||
|           token.level = level;
 | ||
|           nodes.push(token);
 | ||
|         }
 | ||
| 
 | ||
|         // replace current node
 | ||
|         blockTokens[j].children = tokens = arrayReplaceAt(tokens, i, nodes);
 | ||
|       }
 | ||
|     }
 | ||
|   }
 | ||
| }
 | ||
| 
 | ||
| // Simple typographic replacements
 | ||
| //
 | ||
| // (c) (C) → ©
 | ||
| // (tm) (TM) → ™
 | ||
| // (r) (R) → ®
 | ||
| // +- → ±
 | ||
| // ... → … (also ?.... → ?.., !.... → !..)
 | ||
| // ???????? → ???, !!!!! → !!!, `,,` → `,`
 | ||
| // -- → –, --- → —
 | ||
| //
 | ||
| 
 | ||
| // TODO:
 | ||
| // - fractionals 1/2, 1/4, 3/4 -> ½, ¼, ¾
 | ||
| // - multiplications 2 x 4 -> 2 × 4
 | ||
| 
 | ||
| const RARE_RE = /\+-|\.\.|\?\?\?\?|!!!!|,,|--/;
 | ||
| 
 | ||
| // Workaround for phantomjs - need regex without /g flag,
 | ||
| // or root check will fail every second time
 | ||
| const SCOPED_ABBR_TEST_RE = /\((c|tm|r)\)/i;
 | ||
| const SCOPED_ABBR_RE = /\((c|tm|r)\)/ig;
 | ||
| const SCOPED_ABBR = {
 | ||
|   c: '©',
 | ||
|   r: '®',
 | ||
|   tm: '™'
 | ||
| };
 | ||
| function replaceFn(match, name) {
 | ||
|   return SCOPED_ABBR[name.toLowerCase()];
 | ||
| }
 | ||
| function replace_scoped(inlineTokens) {
 | ||
|   let inside_autolink = 0;
 | ||
|   for (let i = inlineTokens.length - 1; i >= 0; i--) {
 | ||
|     const token = inlineTokens[i];
 | ||
|     if (token.type === 'text' && !inside_autolink) {
 | ||
|       token.content = token.content.replace(SCOPED_ABBR_RE, replaceFn);
 | ||
|     }
 | ||
|     if (token.type === 'link_open' && token.info === 'auto') {
 | ||
|       inside_autolink--;
 | ||
|     }
 | ||
|     if (token.type === 'link_close' && token.info === 'auto') {
 | ||
|       inside_autolink++;
 | ||
|     }
 | ||
|   }
 | ||
| }
 | ||
| function replace_rare(inlineTokens) {
 | ||
|   let inside_autolink = 0;
 | ||
|   for (let i = inlineTokens.length - 1; i >= 0; i--) {
 | ||
|     const token = inlineTokens[i];
 | ||
|     if (token.type === 'text' && !inside_autolink) {
 | ||
|       if (RARE_RE.test(token.content)) {
 | ||
|         token.content = token.content.replace(/\+-/g, '±')
 | ||
|         // .., ..., ....... -> …
 | ||
|         // but ?..... & !..... -> ?.. & !..
 | ||
|         .replace(/\.{2,}/g, '…').replace(/([?!])…/g, '$1..').replace(/([?!]){4,}/g, '$1$1$1').replace(/,{2,}/g, ',')
 | ||
|         // em-dash
 | ||
|         .replace(/(^|[^-])---(?=[^-]|$)/mg, '$1\u2014')
 | ||
|         // en-dash
 | ||
|         .replace(/(^|\s)--(?=\s|$)/mg, '$1\u2013').replace(/(^|[^-\s])--(?=[^-\s]|$)/mg, '$1\u2013');
 | ||
|       }
 | ||
|     }
 | ||
|     if (token.type === 'link_open' && token.info === 'auto') {
 | ||
|       inside_autolink--;
 | ||
|     }
 | ||
|     if (token.type === 'link_close' && token.info === 'auto') {
 | ||
|       inside_autolink++;
 | ||
|     }
 | ||
|   }
 | ||
| }
 | ||
| function replace(state) {
 | ||
|   let blkIdx;
 | ||
|   if (!state.md.options.typographer) {
 | ||
|     return;
 | ||
|   }
 | ||
|   for (blkIdx = state.tokens.length - 1; blkIdx >= 0; blkIdx--) {
 | ||
|     if (state.tokens[blkIdx].type !== 'inline') {
 | ||
|       continue;
 | ||
|     }
 | ||
|     if (SCOPED_ABBR_TEST_RE.test(state.tokens[blkIdx].content)) {
 | ||
|       replace_scoped(state.tokens[blkIdx].children);
 | ||
|     }
 | ||
|     if (RARE_RE.test(state.tokens[blkIdx].content)) {
 | ||
|       replace_rare(state.tokens[blkIdx].children);
 | ||
|     }
 | ||
|   }
 | ||
| }
 | ||
| 
 | ||
| // Convert straight quotation marks to typographic ones
 | ||
| //
 | ||
| 
 | ||
| const QUOTE_TEST_RE = /['"]/;
 | ||
| const QUOTE_RE = /['"]/g;
 | ||
| const APOSTROPHE = '\u2019'; /* ’ */
 | ||
| 
 | ||
| function replaceAt(str, index, ch) {
 | ||
|   return str.slice(0, index) + ch + str.slice(index + 1);
 | ||
| }
 | ||
| function process_inlines(tokens, state) {
 | ||
|   let j;
 | ||
|   const stack = [];
 | ||
|   for (let i = 0; i < tokens.length; i++) {
 | ||
|     const token = tokens[i];
 | ||
|     const thisLevel = tokens[i].level;
 | ||
|     for (j = stack.length - 1; j >= 0; j--) {
 | ||
|       if (stack[j].level <= thisLevel) {
 | ||
|         break;
 | ||
|       }
 | ||
|     }
 | ||
|     stack.length = j + 1;
 | ||
|     if (token.type !== 'text') {
 | ||
|       continue;
 | ||
|     }
 | ||
|     let text = token.content;
 | ||
|     let pos = 0;
 | ||
|     let max = text.length;
 | ||
| 
 | ||
|     /* eslint no-labels:0,block-scoped-var:0 */
 | ||
|     OUTER: while (pos < max) {
 | ||
|       QUOTE_RE.lastIndex = pos;
 | ||
|       const t = QUOTE_RE.exec(text);
 | ||
|       if (!t) {
 | ||
|         break;
 | ||
|       }
 | ||
|       let canOpen = true;
 | ||
|       let canClose = true;
 | ||
|       pos = t.index + 1;
 | ||
|       const isSingle = t[0] === "'";
 | ||
| 
 | ||
|       // Find previous character,
 | ||
|       // default to space if it's the beginning of the line
 | ||
|       //
 | ||
|       let lastChar = 0x20;
 | ||
|       if (t.index - 1 >= 0) {
 | ||
|         lastChar = text.charCodeAt(t.index - 1);
 | ||
|       } else {
 | ||
|         for (j = i - 1; j >= 0; j--) {
 | ||
|           if (tokens[j].type === 'softbreak' || tokens[j].type === 'hardbreak') break; // lastChar defaults to 0x20
 | ||
|           if (!tokens[j].content) continue; // should skip all tokens except 'text', 'html_inline' or 'code_inline'
 | ||
| 
 | ||
|           lastChar = tokens[j].content.charCodeAt(tokens[j].content.length - 1);
 | ||
|           break;
 | ||
|         }
 | ||
|       }
 | ||
| 
 | ||
|       // Find next character,
 | ||
|       // default to space if it's the end of the line
 | ||
|       //
 | ||
|       let nextChar = 0x20;
 | ||
|       if (pos < max) {
 | ||
|         nextChar = text.charCodeAt(pos);
 | ||
|       } else {
 | ||
|         for (j = i + 1; j < tokens.length; j++) {
 | ||
|           if (tokens[j].type === 'softbreak' || tokens[j].type === 'hardbreak') break; // nextChar defaults to 0x20
 | ||
|           if (!tokens[j].content) continue; // should skip all tokens except 'text', 'html_inline' or 'code_inline'
 | ||
| 
 | ||
|           nextChar = tokens[j].content.charCodeAt(0);
 | ||
|           break;
 | ||
|         }
 | ||
|       }
 | ||
|       const isLastPunctChar = isMdAsciiPunct(lastChar) || isPunctChar(String.fromCharCode(lastChar));
 | ||
|       const isNextPunctChar = isMdAsciiPunct(nextChar) || isPunctChar(String.fromCharCode(nextChar));
 | ||
|       const isLastWhiteSpace = isWhiteSpace(lastChar);
 | ||
|       const isNextWhiteSpace = isWhiteSpace(nextChar);
 | ||
|       if (isNextWhiteSpace) {
 | ||
|         canOpen = false;
 | ||
|       } else if (isNextPunctChar) {
 | ||
|         if (!(isLastWhiteSpace || isLastPunctChar)) {
 | ||
|           canOpen = false;
 | ||
|         }
 | ||
|       }
 | ||
|       if (isLastWhiteSpace) {
 | ||
|         canClose = false;
 | ||
|       } else if (isLastPunctChar) {
 | ||
|         if (!(isNextWhiteSpace || isNextPunctChar)) {
 | ||
|           canClose = false;
 | ||
|         }
 | ||
|       }
 | ||
|       if (nextChar === 0x22 /* " */ && t[0] === '"') {
 | ||
|         if (lastChar >= 0x30 /* 0 */ && lastChar <= 0x39 /* 9 */) {
 | ||
|           // special case: 1"" - count first quote as an inch
 | ||
|           canClose = canOpen = false;
 | ||
|         }
 | ||
|       }
 | ||
|       if (canOpen && canClose) {
 | ||
|         // Replace quotes in the middle of punctuation sequence, but not
 | ||
|         // in the middle of the words, i.e.:
 | ||
|         //
 | ||
|         // 1. foo " bar " baz - not replaced
 | ||
|         // 2. foo-"-bar-"-baz - replaced
 | ||
|         // 3. foo"bar"baz     - not replaced
 | ||
|         //
 | ||
|         canOpen = isLastPunctChar;
 | ||
|         canClose = isNextPunctChar;
 | ||
|       }
 | ||
|       if (!canOpen && !canClose) {
 | ||
|         // middle of word
 | ||
|         if (isSingle) {
 | ||
|           token.content = replaceAt(token.content, t.index, APOSTROPHE);
 | ||
|         }
 | ||
|         continue;
 | ||
|       }
 | ||
|       if (canClose) {
 | ||
|         // this could be a closing quote, rewind the stack to get a match
 | ||
|         for (j = stack.length - 1; j >= 0; j--) {
 | ||
|           let item = stack[j];
 | ||
|           if (stack[j].level < thisLevel) {
 | ||
|             break;
 | ||
|           }
 | ||
|           if (item.single === isSingle && stack[j].level === thisLevel) {
 | ||
|             item = stack[j];
 | ||
|             let openQuote;
 | ||
|             let closeQuote;
 | ||
|             if (isSingle) {
 | ||
|               openQuote = state.md.options.quotes[2];
 | ||
|               closeQuote = state.md.options.quotes[3];
 | ||
|             } else {
 | ||
|               openQuote = state.md.options.quotes[0];
 | ||
|               closeQuote = state.md.options.quotes[1];
 | ||
|             }
 | ||
| 
 | ||
|             // replace token.content *before* tokens[item.token].content,
 | ||
|             // because, if they are pointing at the same token, replaceAt
 | ||
|             // could mess up indices when quote length != 1
 | ||
|             token.content = replaceAt(token.content, t.index, closeQuote);
 | ||
|             tokens[item.token].content = replaceAt(tokens[item.token].content, item.pos, openQuote);
 | ||
|             pos += closeQuote.length - 1;
 | ||
|             if (item.token === i) {
 | ||
|               pos += openQuote.length - 1;
 | ||
|             }
 | ||
|             text = token.content;
 | ||
|             max = text.length;
 | ||
|             stack.length = j;
 | ||
|             continue OUTER;
 | ||
|           }
 | ||
|         }
 | ||
|       }
 | ||
|       if (canOpen) {
 | ||
|         stack.push({
 | ||
|           token: i,
 | ||
|           pos: t.index,
 | ||
|           single: isSingle,
 | ||
|           level: thisLevel
 | ||
|         });
 | ||
|       } else if (canClose && isSingle) {
 | ||
|         token.content = replaceAt(token.content, t.index, APOSTROPHE);
 | ||
|       }
 | ||
|     }
 | ||
|   }
 | ||
| }
 | ||
| function smartquotes(state) {
 | ||
|   /* eslint max-depth:0 */
 | ||
|   if (!state.md.options.typographer) {
 | ||
|     return;
 | ||
|   }
 | ||
|   for (let blkIdx = state.tokens.length - 1; blkIdx >= 0; blkIdx--) {
 | ||
|     if (state.tokens[blkIdx].type !== 'inline' || !QUOTE_TEST_RE.test(state.tokens[blkIdx].content)) {
 | ||
|       continue;
 | ||
|     }
 | ||
|     process_inlines(state.tokens[blkIdx].children, state);
 | ||
|   }
 | ||
| }
 | ||
| 
 | ||
| // Join raw text tokens with the rest of the text
 | ||
| //
 | ||
| // This is set as a separate rule to provide an opportunity for plugins
 | ||
| // to run text replacements after text join, but before escape join.
 | ||
| //
 | ||
| // For example, `\:)` shouldn't be replaced with an emoji.
 | ||
| //
 | ||
| 
 | ||
| function text_join(state) {
 | ||
|   let curr, last;
 | ||
|   const blockTokens = state.tokens;
 | ||
|   const l = blockTokens.length;
 | ||
|   for (let j = 0; j < l; j++) {
 | ||
|     if (blockTokens[j].type !== 'inline') continue;
 | ||
|     const tokens = blockTokens[j].children;
 | ||
|     const max = tokens.length;
 | ||
|     for (curr = 0; curr < max; curr++) {
 | ||
|       if (tokens[curr].type === 'text_special') {
 | ||
|         tokens[curr].type = 'text';
 | ||
|       }
 | ||
|     }
 | ||
|     for (curr = last = 0; curr < max; curr++) {
 | ||
|       if (tokens[curr].type === 'text' && curr + 1 < max && tokens[curr + 1].type === 'text') {
 | ||
|         // collapse two adjacent text nodes
 | ||
|         tokens[curr + 1].content = tokens[curr].content + tokens[curr + 1].content;
 | ||
|       } else {
 | ||
|         if (curr !== last) {
 | ||
|           tokens[last] = tokens[curr];
 | ||
|         }
 | ||
|         last++;
 | ||
|       }
 | ||
|     }
 | ||
|     if (curr !== last) {
 | ||
|       tokens.length = last;
 | ||
|     }
 | ||
|   }
 | ||
| }
 | ||
| 
 | ||
| /** internal
 | ||
|  * class Core
 | ||
|  *
 | ||
|  * Top-level rules executor. Glues block/inline parsers and does intermediate
 | ||
|  * transformations.
 | ||
|  **/
 | ||
| 
 | ||
| const _rules$2 = [['normalize', normalize], ['block', block], ['inline', inline], ['linkify', linkify$1], ['replacements', replace], ['smartquotes', smartquotes],
 | ||
| // `text_join` finds `text_special` tokens (for escape sequences)
 | ||
| // and joins them with the rest of the text
 | ||
| ['text_join', text_join]];
 | ||
| 
 | ||
| /**
 | ||
|  * new Core()
 | ||
|  **/
 | ||
| function Core() {
 | ||
|   /**
 | ||
|    * Core#ruler -> Ruler
 | ||
|    *
 | ||
|    * [[Ruler]] instance. Keep configuration of core rules.
 | ||
|    **/
 | ||
|   this.ruler = new Ruler();
 | ||
|   for (let i = 0; i < _rules$2.length; i++) {
 | ||
|     this.ruler.push(_rules$2[i][0], _rules$2[i][1]);
 | ||
|   }
 | ||
| }
 | ||
| 
 | ||
| /**
 | ||
|  * Core.process(state)
 | ||
|  *
 | ||
|  * Executes core chain rules.
 | ||
|  **/
 | ||
| Core.prototype.process = function (state) {
 | ||
|   const rules = this.ruler.getRules('');
 | ||
|   for (let i = 0, l = rules.length; i < l; i++) {
 | ||
|     rules[i](state);
 | ||
|   }
 | ||
| };
 | ||
| Core.prototype.State = StateCore;
 | ||
| 
 | ||
| // Parser state class
 | ||
| 
 | ||
| function StateBlock(src, md, env, tokens) {
 | ||
|   this.src = src;
 | ||
| 
 | ||
|   // link to parser instance
 | ||
|   this.md = md;
 | ||
|   this.env = env;
 | ||
| 
 | ||
|   //
 | ||
|   // Internal state vartiables
 | ||
|   //
 | ||
| 
 | ||
|   this.tokens = tokens;
 | ||
|   this.bMarks = []; // line begin offsets for fast jumps
 | ||
|   this.eMarks = []; // line end offsets for fast jumps
 | ||
|   this.tShift = []; // offsets of the first non-space characters (tabs not expanded)
 | ||
|   this.sCount = []; // indents for each line (tabs expanded)
 | ||
| 
 | ||
|   // An amount of virtual spaces (tabs expanded) between beginning
 | ||
|   // of each line (bMarks) and real beginning of that line.
 | ||
|   //
 | ||
|   // It exists only as a hack because blockquotes override bMarks
 | ||
|   // losing information in the process.
 | ||
|   //
 | ||
|   // It's used only when expanding tabs, you can think about it as
 | ||
|   // an initial tab length, e.g. bsCount=21 applied to string `\t123`
 | ||
|   // means first tab should be expanded to 4-21%4 === 3 spaces.
 | ||
|   //
 | ||
|   this.bsCount = [];
 | ||
| 
 | ||
|   // block parser variables
 | ||
| 
 | ||
|   // required block content indent (for example, if we are
 | ||
|   // inside a list, it would be positioned after list marker)
 | ||
|   this.blkIndent = 0;
 | ||
|   this.line = 0; // line index in src
 | ||
|   this.lineMax = 0; // lines count
 | ||
|   this.tight = false; // loose/tight mode for lists
 | ||
|   this.ddIndent = -1; // indent of the current dd block (-1 if there isn't any)
 | ||
|   this.listIndent = -1; // indent of the current list block (-1 if there isn't any)
 | ||
| 
 | ||
|   // can be 'blockquote', 'list', 'root', 'paragraph' or 'reference'
 | ||
|   // used in lists to determine if they interrupt a paragraph
 | ||
|   this.parentType = 'root';
 | ||
|   this.level = 0;
 | ||
| 
 | ||
|   // Create caches
 | ||
|   // Generate markers.
 | ||
|   const s = this.src;
 | ||
|   for (let start = 0, pos = 0, indent = 0, offset = 0, len = s.length, indent_found = false; pos < len; pos++) {
 | ||
|     const ch = s.charCodeAt(pos);
 | ||
|     if (!indent_found) {
 | ||
|       if (isSpace(ch)) {
 | ||
|         indent++;
 | ||
|         if (ch === 0x09) {
 | ||
|           offset += 4 - offset % 4;
 | ||
|         } else {
 | ||
|           offset++;
 | ||
|         }
 | ||
|         continue;
 | ||
|       } else {
 | ||
|         indent_found = true;
 | ||
|       }
 | ||
|     }
 | ||
|     if (ch === 0x0A || pos === len - 1) {
 | ||
|       if (ch !== 0x0A) {
 | ||
|         pos++;
 | ||
|       }
 | ||
|       this.bMarks.push(start);
 | ||
|       this.eMarks.push(pos);
 | ||
|       this.tShift.push(indent);
 | ||
|       this.sCount.push(offset);
 | ||
|       this.bsCount.push(0);
 | ||
|       indent_found = false;
 | ||
|       indent = 0;
 | ||
|       offset = 0;
 | ||
|       start = pos + 1;
 | ||
|     }
 | ||
|   }
 | ||
| 
 | ||
|   // Push fake entry to simplify cache bounds checks
 | ||
|   this.bMarks.push(s.length);
 | ||
|   this.eMarks.push(s.length);
 | ||
|   this.tShift.push(0);
 | ||
|   this.sCount.push(0);
 | ||
|   this.bsCount.push(0);
 | ||
|   this.lineMax = this.bMarks.length - 1; // don't count last fake line
 | ||
| }
 | ||
| 
 | ||
| // Push new token to "stream".
 | ||
| //
 | ||
| StateBlock.prototype.push = function (type, tag, nesting) {
 | ||
|   const token = new Token(type, tag, nesting);
 | ||
|   token.block = true;
 | ||
|   if (nesting < 0) this.level--; // closing tag
 | ||
|   token.level = this.level;
 | ||
|   if (nesting > 0) this.level++; // opening tag
 | ||
| 
 | ||
|   this.tokens.push(token);
 | ||
|   return token;
 | ||
| };
 | ||
| StateBlock.prototype.isEmpty = function isEmpty(line) {
 | ||
|   return this.bMarks[line] + this.tShift[line] >= this.eMarks[line];
 | ||
| };
 | ||
| StateBlock.prototype.skipEmptyLines = function skipEmptyLines(from) {
 | ||
|   for (let max = this.lineMax; from < max; from++) {
 | ||
|     if (this.bMarks[from] + this.tShift[from] < this.eMarks[from]) {
 | ||
|       break;
 | ||
|     }
 | ||
|   }
 | ||
|   return from;
 | ||
| };
 | ||
| 
 | ||
| // Skip spaces from given position.
 | ||
| StateBlock.prototype.skipSpaces = function skipSpaces(pos) {
 | ||
|   for (let max = this.src.length; pos < max; pos++) {
 | ||
|     const ch = this.src.charCodeAt(pos);
 | ||
|     if (!isSpace(ch)) {
 | ||
|       break;
 | ||
|     }
 | ||
|   }
 | ||
|   return pos;
 | ||
| };
 | ||
| 
 | ||
| // Skip spaces from given position in reverse.
 | ||
| StateBlock.prototype.skipSpacesBack = function skipSpacesBack(pos, min) {
 | ||
|   if (pos <= min) {
 | ||
|     return pos;
 | ||
|   }
 | ||
|   while (pos > min) {
 | ||
|     if (!isSpace(this.src.charCodeAt(--pos))) {
 | ||
|       return pos + 1;
 | ||
|     }
 | ||
|   }
 | ||
|   return pos;
 | ||
| };
 | ||
| 
 | ||
| // Skip char codes from given position
 | ||
| StateBlock.prototype.skipChars = function skipChars(pos, code) {
 | ||
|   for (let max = this.src.length; pos < max; pos++) {
 | ||
|     if (this.src.charCodeAt(pos) !== code) {
 | ||
|       break;
 | ||
|     }
 | ||
|   }
 | ||
|   return pos;
 | ||
| };
 | ||
| 
 | ||
| // Skip char codes reverse from given position - 1
 | ||
| StateBlock.prototype.skipCharsBack = function skipCharsBack(pos, code, min) {
 | ||
|   if (pos <= min) {
 | ||
|     return pos;
 | ||
|   }
 | ||
|   while (pos > min) {
 | ||
|     if (code !== this.src.charCodeAt(--pos)) {
 | ||
|       return pos + 1;
 | ||
|     }
 | ||
|   }
 | ||
|   return pos;
 | ||
| };
 | ||
| 
 | ||
| // cut lines range from source.
 | ||
| StateBlock.prototype.getLines = function getLines(begin, end, indent, keepLastLF) {
 | ||
|   if (begin >= end) {
 | ||
|     return '';
 | ||
|   }
 | ||
|   const queue = new Array(end - begin);
 | ||
|   for (let i = 0, line = begin; line < end; line++, i++) {
 | ||
|     let lineIndent = 0;
 | ||
|     const lineStart = this.bMarks[line];
 | ||
|     let first = lineStart;
 | ||
|     let last;
 | ||
|     if (line + 1 < end || keepLastLF) {
 | ||
|       // No need for bounds check because we have fake entry on tail.
 | ||
|       last = this.eMarks[line] + 1;
 | ||
|     } else {
 | ||
|       last = this.eMarks[line];
 | ||
|     }
 | ||
|     while (first < last && lineIndent < indent) {
 | ||
|       const ch = this.src.charCodeAt(first);
 | ||
|       if (isSpace(ch)) {
 | ||
|         if (ch === 0x09) {
 | ||
|           lineIndent += 4 - (lineIndent + this.bsCount[line]) % 4;
 | ||
|         } else {
 | ||
|           lineIndent++;
 | ||
|         }
 | ||
|       } else if (first - lineStart < this.tShift[line]) {
 | ||
|         // patched tShift masked characters to look like spaces (blockquotes, list markers)
 | ||
|         lineIndent++;
 | ||
|       } else {
 | ||
|         break;
 | ||
|       }
 | ||
|       first++;
 | ||
|     }
 | ||
|     if (lineIndent > indent) {
 | ||
|       // partially expanding tabs in code blocks, e.g '\t\tfoobar'
 | ||
|       // with indent=2 becomes '  \tfoobar'
 | ||
|       queue[i] = new Array(lineIndent - indent + 1).join(' ') + this.src.slice(first, last);
 | ||
|     } else {
 | ||
|       queue[i] = this.src.slice(first, last);
 | ||
|     }
 | ||
|   }
 | ||
|   return queue.join('');
 | ||
| };
 | ||
| 
 | ||
| // re-export Token class to use in block rules
 | ||
| StateBlock.prototype.Token = Token;
 | ||
| 
 | ||
| // GFM table, https://github.github.com/gfm/#tables-extension-
 | ||
| 
 | ||
| 
 | ||
| // Limit the amount of empty autocompleted cells in a table,
 | ||
| // see https://github.com/markdown-it/markdown-it/issues/1000,
 | ||
| //
 | ||
| // Both pulldown-cmark and commonmark-hs limit the number of cells this way to ~200k.
 | ||
| // We set it to 65k, which can expand user input by a factor of x370
 | ||
| // (256x256 square is 1.8kB expanded into 650kB).
 | ||
| const MAX_AUTOCOMPLETED_CELLS = 0x10000;
 | ||
| function getLine(state, line) {
 | ||
|   const pos = state.bMarks[line] + state.tShift[line];
 | ||
|   const max = state.eMarks[line];
 | ||
|   return state.src.slice(pos, max);
 | ||
| }
 | ||
| function escapedSplit(str) {
 | ||
|   const result = [];
 | ||
|   const max = str.length;
 | ||
|   let pos = 0;
 | ||
|   let ch = str.charCodeAt(pos);
 | ||
|   let isEscaped = false;
 | ||
|   let lastPos = 0;
 | ||
|   let current = '';
 | ||
|   while (pos < max) {
 | ||
|     if (ch === 0x7c /* | */) {
 | ||
|       if (!isEscaped) {
 | ||
|         // pipe separating cells, '|'
 | ||
|         result.push(current + str.substring(lastPos, pos));
 | ||
|         current = '';
 | ||
|         lastPos = pos + 1;
 | ||
|       } else {
 | ||
|         // escaped pipe, '\|'
 | ||
|         current += str.substring(lastPos, pos - 1);
 | ||
|         lastPos = pos;
 | ||
|       }
 | ||
|     }
 | ||
|     isEscaped = ch === 0x5c /* \ */;
 | ||
|     pos++;
 | ||
|     ch = str.charCodeAt(pos);
 | ||
|   }
 | ||
|   result.push(current + str.substring(lastPos));
 | ||
|   return result;
 | ||
| }
 | ||
| function table(state, startLine, endLine, silent) {
 | ||
|   // should have at least two lines
 | ||
|   if (startLine + 2 > endLine) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   let nextLine = startLine + 1;
 | ||
|   if (state.sCount[nextLine] < state.blkIndent) {
 | ||
|     return false;
 | ||
|   }
 | ||
| 
 | ||
|   // if it's indented more than 3 spaces, it should be a code block
 | ||
|   if (state.sCount[nextLine] - state.blkIndent >= 4) {
 | ||
|     return false;
 | ||
|   }
 | ||
| 
 | ||
|   // first character of the second line should be '|', '-', ':',
 | ||
|   // and no other characters are allowed but spaces;
 | ||
|   // basically, this is the equivalent of /^[-:|][-:|\s]*$/ regexp
 | ||
| 
 | ||
|   let pos = state.bMarks[nextLine] + state.tShift[nextLine];
 | ||
|   if (pos >= state.eMarks[nextLine]) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   const firstCh = state.src.charCodeAt(pos++);
 | ||
|   if (firstCh !== 0x7C /* | */ && firstCh !== 0x2D /* - */ && firstCh !== 0x3A /* : */) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   if (pos >= state.eMarks[nextLine]) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   const secondCh = state.src.charCodeAt(pos++);
 | ||
|   if (secondCh !== 0x7C /* | */ && secondCh !== 0x2D /* - */ && secondCh !== 0x3A /* : */ && !isSpace(secondCh)) {
 | ||
|     return false;
 | ||
|   }
 | ||
| 
 | ||
|   // if first character is '-', then second character must not be a space
 | ||
|   // (due to parsing ambiguity with list)
 | ||
|   if (firstCh === 0x2D /* - */ && isSpace(secondCh)) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   while (pos < state.eMarks[nextLine]) {
 | ||
|     const ch = state.src.charCodeAt(pos);
 | ||
|     if (ch !== 0x7C /* | */ && ch !== 0x2D /* - */ && ch !== 0x3A /* : */ && !isSpace(ch)) {
 | ||
|       return false;
 | ||
|     }
 | ||
|     pos++;
 | ||
|   }
 | ||
|   let lineText = getLine(state, startLine + 1);
 | ||
|   let columns = lineText.split('|');
 | ||
|   const aligns = [];
 | ||
|   for (let i = 0; i < columns.length; i++) {
 | ||
|     const t = columns[i].trim();
 | ||
|     if (!t) {
 | ||
|       // allow empty columns before and after table, but not in between columns;
 | ||
|       // e.g. allow ` |---| `, disallow ` ---||--- `
 | ||
|       if (i === 0 || i === columns.length - 1) {
 | ||
|         continue;
 | ||
|       } else {
 | ||
|         return false;
 | ||
|       }
 | ||
|     }
 | ||
|     if (!/^:?-+:?$/.test(t)) {
 | ||
|       return false;
 | ||
|     }
 | ||
|     if (t.charCodeAt(t.length - 1) === 0x3A /* : */) {
 | ||
|       aligns.push(t.charCodeAt(0) === 0x3A /* : */ ? 'center' : 'right');
 | ||
|     } else if (t.charCodeAt(0) === 0x3A /* : */) {
 | ||
|       aligns.push('left');
 | ||
|     } else {
 | ||
|       aligns.push('');
 | ||
|     }
 | ||
|   }
 | ||
|   lineText = getLine(state, startLine).trim();
 | ||
|   if (lineText.indexOf('|') === -1) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   if (state.sCount[startLine] - state.blkIndent >= 4) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   columns = escapedSplit(lineText);
 | ||
|   if (columns.length && columns[0] === '') columns.shift();
 | ||
|   if (columns.length && columns[columns.length - 1] === '') columns.pop();
 | ||
| 
 | ||
|   // header row will define an amount of columns in the entire table,
 | ||
|   // and align row should be exactly the same (the rest of the rows can differ)
 | ||
|   const columnCount = columns.length;
 | ||
|   if (columnCount === 0 || columnCount !== aligns.length) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   if (silent) {
 | ||
|     return true;
 | ||
|   }
 | ||
|   const oldParentType = state.parentType;
 | ||
|   state.parentType = 'table';
 | ||
| 
 | ||
|   // use 'blockquote' lists for termination because it's
 | ||
|   // the most similar to tables
 | ||
|   const terminatorRules = state.md.block.ruler.getRules('blockquote');
 | ||
|   const token_to = state.push('table_open', 'table', 1);
 | ||
|   const tableLines = [startLine, 0];
 | ||
|   token_to.map = tableLines;
 | ||
|   const token_tho = state.push('thead_open', 'thead', 1);
 | ||
|   token_tho.map = [startLine, startLine + 1];
 | ||
|   const token_htro = state.push('tr_open', 'tr', 1);
 | ||
|   token_htro.map = [startLine, startLine + 1];
 | ||
|   for (let i = 0; i < columns.length; i++) {
 | ||
|     const token_ho = state.push('th_open', 'th', 1);
 | ||
|     if (aligns[i]) {
 | ||
|       token_ho.attrs = [['style', 'text-align:' + aligns[i]]];
 | ||
|     }
 | ||
|     const token_il = state.push('inline', '', 0);
 | ||
|     token_il.content = columns[i].trim();
 | ||
|     token_il.children = [];
 | ||
|     state.push('th_close', 'th', -1);
 | ||
|   }
 | ||
|   state.push('tr_close', 'tr', -1);
 | ||
|   state.push('thead_close', 'thead', -1);
 | ||
|   let tbodyLines;
 | ||
|   let autocompletedCells = 0;
 | ||
|   for (nextLine = startLine + 2; nextLine < endLine; nextLine++) {
 | ||
|     if (state.sCount[nextLine] < state.blkIndent) {
 | ||
|       break;
 | ||
|     }
 | ||
|     let terminate = false;
 | ||
|     for (let i = 0, l = terminatorRules.length; i < l; i++) {
 | ||
|       if (terminatorRules[i](state, nextLine, endLine, true)) {
 | ||
|         terminate = true;
 | ||
|         break;
 | ||
|       }
 | ||
|     }
 | ||
|     if (terminate) {
 | ||
|       break;
 | ||
|     }
 | ||
|     lineText = getLine(state, nextLine).trim();
 | ||
|     if (!lineText) {
 | ||
|       break;
 | ||
|     }
 | ||
|     if (state.sCount[nextLine] - state.blkIndent >= 4) {
 | ||
|       break;
 | ||
|     }
 | ||
|     columns = escapedSplit(lineText);
 | ||
|     if (columns.length && columns[0] === '') columns.shift();
 | ||
|     if (columns.length && columns[columns.length - 1] === '') columns.pop();
 | ||
| 
 | ||
|     // note: autocomplete count can be negative if user specifies more columns than header,
 | ||
|     // but that does not affect intended use (which is limiting expansion)
 | ||
|     autocompletedCells += columnCount - columns.length;
 | ||
|     if (autocompletedCells > MAX_AUTOCOMPLETED_CELLS) {
 | ||
|       break;
 | ||
|     }
 | ||
|     if (nextLine === startLine + 2) {
 | ||
|       const token_tbo = state.push('tbody_open', 'tbody', 1);
 | ||
|       token_tbo.map = tbodyLines = [startLine + 2, 0];
 | ||
|     }
 | ||
|     const token_tro = state.push('tr_open', 'tr', 1);
 | ||
|     token_tro.map = [nextLine, nextLine + 1];
 | ||
|     for (let i = 0; i < columnCount; i++) {
 | ||
|       const token_tdo = state.push('td_open', 'td', 1);
 | ||
|       if (aligns[i]) {
 | ||
|         token_tdo.attrs = [['style', 'text-align:' + aligns[i]]];
 | ||
|       }
 | ||
|       const token_il = state.push('inline', '', 0);
 | ||
|       token_il.content = columns[i] ? columns[i].trim() : '';
 | ||
|       token_il.children = [];
 | ||
|       state.push('td_close', 'td', -1);
 | ||
|     }
 | ||
|     state.push('tr_close', 'tr', -1);
 | ||
|   }
 | ||
|   if (tbodyLines) {
 | ||
|     state.push('tbody_close', 'tbody', -1);
 | ||
|     tbodyLines[1] = nextLine;
 | ||
|   }
 | ||
|   state.push('table_close', 'table', -1);
 | ||
|   tableLines[1] = nextLine;
 | ||
|   state.parentType = oldParentType;
 | ||
|   state.line = nextLine;
 | ||
|   return true;
 | ||
| }
 | ||
| 
 | ||
| // Code block (4 spaces padded)
 | ||
| 
 | ||
| function code(state, startLine, endLine /*, silent */) {
 | ||
|   if (state.sCount[startLine] - state.blkIndent < 4) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   let nextLine = startLine + 1;
 | ||
|   let last = nextLine;
 | ||
|   while (nextLine < endLine) {
 | ||
|     if (state.isEmpty(nextLine)) {
 | ||
|       nextLine++;
 | ||
|       continue;
 | ||
|     }
 | ||
|     if (state.sCount[nextLine] - state.blkIndent >= 4) {
 | ||
|       nextLine++;
 | ||
|       last = nextLine;
 | ||
|       continue;
 | ||
|     }
 | ||
|     break;
 | ||
|   }
 | ||
|   state.line = last;
 | ||
|   const token = state.push('code_block', 'code', 0);
 | ||
|   token.content = state.getLines(startLine, last, 4 + state.blkIndent, false) + '\n';
 | ||
|   token.map = [startLine, state.line];
 | ||
|   return true;
 | ||
| }
 | ||
| 
 | ||
| // fences (``` lang, ~~~ lang)
 | ||
| 
 | ||
| function fence(state, startLine, endLine, silent) {
 | ||
|   let pos = state.bMarks[startLine] + state.tShift[startLine];
 | ||
|   let max = state.eMarks[startLine];
 | ||
| 
 | ||
|   // if it's indented more than 3 spaces, it should be a code block
 | ||
|   if (state.sCount[startLine] - state.blkIndent >= 4) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   if (pos + 3 > max) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   const marker = state.src.charCodeAt(pos);
 | ||
|   if (marker !== 0x7E /* ~ */ && marker !== 0x60 /* ` */) {
 | ||
|     return false;
 | ||
|   }
 | ||
| 
 | ||
|   // scan marker length
 | ||
|   let mem = pos;
 | ||
|   pos = state.skipChars(pos, marker);
 | ||
|   let len = pos - mem;
 | ||
|   if (len < 3) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   const markup = state.src.slice(mem, pos);
 | ||
|   const params = state.src.slice(pos, max);
 | ||
|   if (marker === 0x60 /* ` */) {
 | ||
|     if (params.indexOf(String.fromCharCode(marker)) >= 0) {
 | ||
|       return false;
 | ||
|     }
 | ||
|   }
 | ||
| 
 | ||
|   // Since start is found, we can report success here in validation mode
 | ||
|   if (silent) {
 | ||
|     return true;
 | ||
|   }
 | ||
| 
 | ||
|   // search end of block
 | ||
|   let nextLine = startLine;
 | ||
|   let haveEndMarker = false;
 | ||
|   for (;;) {
 | ||
|     nextLine++;
 | ||
|     if (nextLine >= endLine) {
 | ||
|       // unclosed block should be autoclosed by end of document.
 | ||
|       // also block seems to be autoclosed by end of parent
 | ||
|       break;
 | ||
|     }
 | ||
|     pos = mem = state.bMarks[nextLine] + state.tShift[nextLine];
 | ||
|     max = state.eMarks[nextLine];
 | ||
|     if (pos < max && state.sCount[nextLine] < state.blkIndent) {
 | ||
|       // non-empty line with negative indent should stop the list:
 | ||
|       // - ```
 | ||
|       //  test
 | ||
|       break;
 | ||
|     }
 | ||
|     if (state.src.charCodeAt(pos) !== marker) {
 | ||
|       continue;
 | ||
|     }
 | ||
|     if (state.sCount[nextLine] - state.blkIndent >= 4) {
 | ||
|       // closing fence should be indented less than 4 spaces
 | ||
|       continue;
 | ||
|     }
 | ||
|     pos = state.skipChars(pos, marker);
 | ||
| 
 | ||
|     // closing code fence must be at least as long as the opening one
 | ||
|     if (pos - mem < len) {
 | ||
|       continue;
 | ||
|     }
 | ||
| 
 | ||
|     // make sure tail has spaces only
 | ||
|     pos = state.skipSpaces(pos);
 | ||
|     if (pos < max) {
 | ||
|       continue;
 | ||
|     }
 | ||
|     haveEndMarker = true;
 | ||
|     // found!
 | ||
|     break;
 | ||
|   }
 | ||
| 
 | ||
|   // If a fence has heading spaces, they should be removed from its inner block
 | ||
|   len = state.sCount[startLine];
 | ||
|   state.line = nextLine + (haveEndMarker ? 1 : 0);
 | ||
|   const token = state.push('fence', 'code', 0);
 | ||
|   token.info = params;
 | ||
|   token.content = state.getLines(startLine + 1, nextLine, len, true);
 | ||
|   token.markup = markup;
 | ||
|   token.map = [startLine, state.line];
 | ||
|   return true;
 | ||
| }
 | ||
| 
 | ||
| // Block quotes
 | ||
| 
 | ||
| function blockquote(state, startLine, endLine, silent) {
 | ||
|   let pos = state.bMarks[startLine] + state.tShift[startLine];
 | ||
|   let max = state.eMarks[startLine];
 | ||
|   const oldLineMax = state.lineMax;
 | ||
| 
 | ||
|   // if it's indented more than 3 spaces, it should be a code block
 | ||
|   if (state.sCount[startLine] - state.blkIndent >= 4) {
 | ||
|     return false;
 | ||
|   }
 | ||
| 
 | ||
|   // check the block quote marker
 | ||
|   if (state.src.charCodeAt(pos) !== 0x3E /* > */) {
 | ||
|     return false;
 | ||
|   }
 | ||
| 
 | ||
|   // we know that it's going to be a valid blockquote,
 | ||
|   // so no point trying to find the end of it in silent mode
 | ||
|   if (silent) {
 | ||
|     return true;
 | ||
|   }
 | ||
|   const oldBMarks = [];
 | ||
|   const oldBSCount = [];
 | ||
|   const oldSCount = [];
 | ||
|   const oldTShift = [];
 | ||
|   const terminatorRules = state.md.block.ruler.getRules('blockquote');
 | ||
|   const oldParentType = state.parentType;
 | ||
|   state.parentType = 'blockquote';
 | ||
|   let lastLineEmpty = false;
 | ||
|   let nextLine;
 | ||
| 
 | ||
|   // Search the end of the block
 | ||
|   //
 | ||
|   // Block ends with either:
 | ||
|   //  1. an empty line outside:
 | ||
|   //     ```
 | ||
|   //     > test
 | ||
|   //
 | ||
|   //     ```
 | ||
|   //  2. an empty line inside:
 | ||
|   //     ```
 | ||
|   //     >
 | ||
|   //     test
 | ||
|   //     ```
 | ||
|   //  3. another tag:
 | ||
|   //     ```
 | ||
|   //     > test
 | ||
|   //      - - -
 | ||
|   //     ```
 | ||
|   for (nextLine = startLine; nextLine < endLine; nextLine++) {
 | ||
|     // check if it's outdented, i.e. it's inside list item and indented
 | ||
|     // less than said list item:
 | ||
|     //
 | ||
|     // ```
 | ||
|     // 1. anything
 | ||
|     //    > current blockquote
 | ||
|     // 2. checking this line
 | ||
|     // ```
 | ||
|     const isOutdented = state.sCount[nextLine] < state.blkIndent;
 | ||
|     pos = state.bMarks[nextLine] + state.tShift[nextLine];
 | ||
|     max = state.eMarks[nextLine];
 | ||
|     if (pos >= max) {
 | ||
|       // Case 1: line is not inside the blockquote, and this line is empty.
 | ||
|       break;
 | ||
|     }
 | ||
|     if (state.src.charCodeAt(pos++) === 0x3E /* > */ && !isOutdented) {
 | ||
|       // This line is inside the blockquote.
 | ||
| 
 | ||
|       // set offset past spaces and ">"
 | ||
|       let initial = state.sCount[nextLine] + 1;
 | ||
|       let spaceAfterMarker;
 | ||
|       let adjustTab;
 | ||
| 
 | ||
|       // skip one optional space after '>'
 | ||
|       if (state.src.charCodeAt(pos) === 0x20 /* space */) {
 | ||
|         // ' >   test '
 | ||
|         //     ^ -- position start of line here:
 | ||
|         pos++;
 | ||
|         initial++;
 | ||
|         adjustTab = false;
 | ||
|         spaceAfterMarker = true;
 | ||
|       } else if (state.src.charCodeAt(pos) === 0x09 /* tab */) {
 | ||
|         spaceAfterMarker = true;
 | ||
|         if ((state.bsCount[nextLine] + initial) % 4 === 3) {
 | ||
|           // '  >\t  test '
 | ||
|           //       ^ -- position start of line here (tab has width===1)
 | ||
|           pos++;
 | ||
|           initial++;
 | ||
|           adjustTab = false;
 | ||
|         } else {
 | ||
|           // ' >\t  test '
 | ||
|           //    ^ -- position start of line here + shift bsCount slightly
 | ||
|           //         to make extra space appear
 | ||
|           adjustTab = true;
 | ||
|         }
 | ||
|       } else {
 | ||
|         spaceAfterMarker = false;
 | ||
|       }
 | ||
|       let offset = initial;
 | ||
|       oldBMarks.push(state.bMarks[nextLine]);
 | ||
|       state.bMarks[nextLine] = pos;
 | ||
|       while (pos < max) {
 | ||
|         const ch = state.src.charCodeAt(pos);
 | ||
|         if (isSpace(ch)) {
 | ||
|           if (ch === 0x09) {
 | ||
|             offset += 4 - (offset + state.bsCount[nextLine] + (adjustTab ? 1 : 0)) % 4;
 | ||
|           } else {
 | ||
|             offset++;
 | ||
|           }
 | ||
|         } else {
 | ||
|           break;
 | ||
|         }
 | ||
|         pos++;
 | ||
|       }
 | ||
|       lastLineEmpty = pos >= max;
 | ||
|       oldBSCount.push(state.bsCount[nextLine]);
 | ||
|       state.bsCount[nextLine] = state.sCount[nextLine] + 1 + (spaceAfterMarker ? 1 : 0);
 | ||
|       oldSCount.push(state.sCount[nextLine]);
 | ||
|       state.sCount[nextLine] = offset - initial;
 | ||
|       oldTShift.push(state.tShift[nextLine]);
 | ||
|       state.tShift[nextLine] = pos - state.bMarks[nextLine];
 | ||
|       continue;
 | ||
|     }
 | ||
| 
 | ||
|     // Case 2: line is not inside the blockquote, and the last line was empty.
 | ||
|     if (lastLineEmpty) {
 | ||
|       break;
 | ||
|     }
 | ||
| 
 | ||
|     // Case 3: another tag found.
 | ||
|     let terminate = false;
 | ||
|     for (let i = 0, l = terminatorRules.length; i < l; i++) {
 | ||
|       if (terminatorRules[i](state, nextLine, endLine, true)) {
 | ||
|         terminate = true;
 | ||
|         break;
 | ||
|       }
 | ||
|     }
 | ||
|     if (terminate) {
 | ||
|       // Quirk to enforce "hard termination mode" for paragraphs;
 | ||
|       // normally if you call `tokenize(state, startLine, nextLine)`,
 | ||
|       // paragraphs will look below nextLine for paragraph continuation,
 | ||
|       // but if blockquote is terminated by another tag, they shouldn't
 | ||
|       state.lineMax = nextLine;
 | ||
|       if (state.blkIndent !== 0) {
 | ||
|         // state.blkIndent was non-zero, we now set it to zero,
 | ||
|         // so we need to re-calculate all offsets to appear as
 | ||
|         // if indent wasn't changed
 | ||
|         oldBMarks.push(state.bMarks[nextLine]);
 | ||
|         oldBSCount.push(state.bsCount[nextLine]);
 | ||
|         oldTShift.push(state.tShift[nextLine]);
 | ||
|         oldSCount.push(state.sCount[nextLine]);
 | ||
|         state.sCount[nextLine] -= state.blkIndent;
 | ||
|       }
 | ||
|       break;
 | ||
|     }
 | ||
|     oldBMarks.push(state.bMarks[nextLine]);
 | ||
|     oldBSCount.push(state.bsCount[nextLine]);
 | ||
|     oldTShift.push(state.tShift[nextLine]);
 | ||
|     oldSCount.push(state.sCount[nextLine]);
 | ||
| 
 | ||
|     // A negative indentation means that this is a paragraph continuation
 | ||
|     //
 | ||
|     state.sCount[nextLine] = -1;
 | ||
|   }
 | ||
|   const oldIndent = state.blkIndent;
 | ||
|   state.blkIndent = 0;
 | ||
|   const token_o = state.push('blockquote_open', 'blockquote', 1);
 | ||
|   token_o.markup = '>';
 | ||
|   const lines = [startLine, 0];
 | ||
|   token_o.map = lines;
 | ||
|   state.md.block.tokenize(state, startLine, nextLine);
 | ||
|   const token_c = state.push('blockquote_close', 'blockquote', -1);
 | ||
|   token_c.markup = '>';
 | ||
|   state.lineMax = oldLineMax;
 | ||
|   state.parentType = oldParentType;
 | ||
|   lines[1] = state.line;
 | ||
| 
 | ||
|   // Restore original tShift; this might not be necessary since the parser
 | ||
|   // has already been here, but just to make sure we can do that.
 | ||
|   for (let i = 0; i < oldTShift.length; i++) {
 | ||
|     state.bMarks[i + startLine] = oldBMarks[i];
 | ||
|     state.tShift[i + startLine] = oldTShift[i];
 | ||
|     state.sCount[i + startLine] = oldSCount[i];
 | ||
|     state.bsCount[i + startLine] = oldBSCount[i];
 | ||
|   }
 | ||
|   state.blkIndent = oldIndent;
 | ||
|   return true;
 | ||
| }
 | ||
| 
 | ||
| // Horizontal rule
 | ||
| 
 | ||
| function hr(state, startLine, endLine, silent) {
 | ||
|   const max = state.eMarks[startLine];
 | ||
|   // if it's indented more than 3 spaces, it should be a code block
 | ||
|   if (state.sCount[startLine] - state.blkIndent >= 4) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   let pos = state.bMarks[startLine] + state.tShift[startLine];
 | ||
|   const marker = state.src.charCodeAt(pos++);
 | ||
| 
 | ||
|   // Check hr marker
 | ||
|   if (marker !== 0x2A /* * */ && marker !== 0x2D /* - */ && marker !== 0x5F /* _ */) {
 | ||
|     return false;
 | ||
|   }
 | ||
| 
 | ||
|   // markers can be mixed with spaces, but there should be at least 3 of them
 | ||
| 
 | ||
|   let cnt = 1;
 | ||
|   while (pos < max) {
 | ||
|     const ch = state.src.charCodeAt(pos++);
 | ||
|     if (ch !== marker && !isSpace(ch)) {
 | ||
|       return false;
 | ||
|     }
 | ||
|     if (ch === marker) {
 | ||
|       cnt++;
 | ||
|     }
 | ||
|   }
 | ||
|   if (cnt < 3) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   if (silent) {
 | ||
|     return true;
 | ||
|   }
 | ||
|   state.line = startLine + 1;
 | ||
|   const token = state.push('hr', 'hr', 0);
 | ||
|   token.map = [startLine, state.line];
 | ||
|   token.markup = Array(cnt + 1).join(String.fromCharCode(marker));
 | ||
|   return true;
 | ||
| }
 | ||
| 
 | ||
| // Lists
 | ||
| 
 | ||
| 
 | ||
| // Search `[-+*][\n ]`, returns next pos after marker on success
 | ||
| // or -1 on fail.
 | ||
| function skipBulletListMarker(state, startLine) {
 | ||
|   const max = state.eMarks[startLine];
 | ||
|   let pos = state.bMarks[startLine] + state.tShift[startLine];
 | ||
|   const marker = state.src.charCodeAt(pos++);
 | ||
|   // Check bullet
 | ||
|   if (marker !== 0x2A /* * */ && marker !== 0x2D /* - */ && marker !== 0x2B /* + */) {
 | ||
|     return -1;
 | ||
|   }
 | ||
|   if (pos < max) {
 | ||
|     const ch = state.src.charCodeAt(pos);
 | ||
|     if (!isSpace(ch)) {
 | ||
|       // " -test " - is not a list item
 | ||
|       return -1;
 | ||
|     }
 | ||
|   }
 | ||
|   return pos;
 | ||
| }
 | ||
| 
 | ||
| // Search `\d+[.)][\n ]`, returns next pos after marker on success
 | ||
| // or -1 on fail.
 | ||
| function skipOrderedListMarker(state, startLine) {
 | ||
|   const start = state.bMarks[startLine] + state.tShift[startLine];
 | ||
|   const max = state.eMarks[startLine];
 | ||
|   let pos = start;
 | ||
| 
 | ||
|   // List marker should have at least 2 chars (digit + dot)
 | ||
|   if (pos + 1 >= max) {
 | ||
|     return -1;
 | ||
|   }
 | ||
|   let ch = state.src.charCodeAt(pos++);
 | ||
|   if (ch < 0x30 /* 0 */ || ch > 0x39 /* 9 */) {
 | ||
|     return -1;
 | ||
|   }
 | ||
|   for (;;) {
 | ||
|     // EOL -> fail
 | ||
|     if (pos >= max) {
 | ||
|       return -1;
 | ||
|     }
 | ||
|     ch = state.src.charCodeAt(pos++);
 | ||
|     if (ch >= 0x30 /* 0 */ && ch <= 0x39 /* 9 */) {
 | ||
|       // List marker should have no more than 9 digits
 | ||
|       // (prevents integer overflow in browsers)
 | ||
|       if (pos - start >= 10) {
 | ||
|         return -1;
 | ||
|       }
 | ||
|       continue;
 | ||
|     }
 | ||
| 
 | ||
|     // found valid marker
 | ||
|     if (ch === 0x29 /* ) */ || ch === 0x2e /* . */) {
 | ||
|       break;
 | ||
|     }
 | ||
|     return -1;
 | ||
|   }
 | ||
|   if (pos < max) {
 | ||
|     ch = state.src.charCodeAt(pos);
 | ||
|     if (!isSpace(ch)) {
 | ||
|       // " 1.test " - is not a list item
 | ||
|       return -1;
 | ||
|     }
 | ||
|   }
 | ||
|   return pos;
 | ||
| }
 | ||
| function markTightParagraphs(state, idx) {
 | ||
|   const level = state.level + 2;
 | ||
|   for (let i = idx + 2, l = state.tokens.length - 2; i < l; i++) {
 | ||
|     if (state.tokens[i].level === level && state.tokens[i].type === 'paragraph_open') {
 | ||
|       state.tokens[i + 2].hidden = true;
 | ||
|       state.tokens[i].hidden = true;
 | ||
|       i += 2;
 | ||
|     }
 | ||
|   }
 | ||
| }
 | ||
| function list(state, startLine, endLine, silent) {
 | ||
|   let max, pos, start, token;
 | ||
|   let nextLine = startLine;
 | ||
|   let tight = true;
 | ||
| 
 | ||
|   // if it's indented more than 3 spaces, it should be a code block
 | ||
|   if (state.sCount[nextLine] - state.blkIndent >= 4) {
 | ||
|     return false;
 | ||
|   }
 | ||
| 
 | ||
|   // Special case:
 | ||
|   //  - item 1
 | ||
|   //   - item 2
 | ||
|   //    - item 3
 | ||
|   //     - item 4
 | ||
|   //      - this one is a paragraph continuation
 | ||
|   if (state.listIndent >= 0 && state.sCount[nextLine] - state.listIndent >= 4 && state.sCount[nextLine] < state.blkIndent) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   let isTerminatingParagraph = false;
 | ||
| 
 | ||
|   // limit conditions when list can interrupt
 | ||
|   // a paragraph (validation mode only)
 | ||
|   if (silent && state.parentType === 'paragraph') {
 | ||
|     // Next list item should still terminate previous list item;
 | ||
|     //
 | ||
|     // This code can fail if plugins use blkIndent as well as lists,
 | ||
|     // but I hope the spec gets fixed long before that happens.
 | ||
|     //
 | ||
|     if (state.sCount[nextLine] >= state.blkIndent) {
 | ||
|       isTerminatingParagraph = true;
 | ||
|     }
 | ||
|   }
 | ||
| 
 | ||
|   // Detect list type and position after marker
 | ||
|   let isOrdered;
 | ||
|   let markerValue;
 | ||
|   let posAfterMarker;
 | ||
|   if ((posAfterMarker = skipOrderedListMarker(state, nextLine)) >= 0) {
 | ||
|     isOrdered = true;
 | ||
|     start = state.bMarks[nextLine] + state.tShift[nextLine];
 | ||
|     markerValue = Number(state.src.slice(start, posAfterMarker - 1));
 | ||
| 
 | ||
|     // If we're starting a new ordered list right after
 | ||
|     // a paragraph, it should start with 1.
 | ||
|     if (isTerminatingParagraph && markerValue !== 1) return false;
 | ||
|   } else if ((posAfterMarker = skipBulletListMarker(state, nextLine)) >= 0) {
 | ||
|     isOrdered = false;
 | ||
|   } else {
 | ||
|     return false;
 | ||
|   }
 | ||
| 
 | ||
|   // If we're starting a new unordered list right after
 | ||
|   // a paragraph, first line should not be empty.
 | ||
|   if (isTerminatingParagraph) {
 | ||
|     if (state.skipSpaces(posAfterMarker) >= state.eMarks[nextLine]) return false;
 | ||
|   }
 | ||
| 
 | ||
|   // For validation mode we can terminate immediately
 | ||
|   if (silent) {
 | ||
|     return true;
 | ||
|   }
 | ||
| 
 | ||
|   // We should terminate list on style change. Remember first one to compare.
 | ||
|   const markerCharCode = state.src.charCodeAt(posAfterMarker - 1);
 | ||
| 
 | ||
|   // Start list
 | ||
|   const listTokIdx = state.tokens.length;
 | ||
|   if (isOrdered) {
 | ||
|     token = state.push('ordered_list_open', 'ol', 1);
 | ||
|     if (markerValue !== 1) {
 | ||
|       token.attrs = [['start', markerValue]];
 | ||
|     }
 | ||
|   } else {
 | ||
|     token = state.push('bullet_list_open', 'ul', 1);
 | ||
|   }
 | ||
|   const listLines = [nextLine, 0];
 | ||
|   token.map = listLines;
 | ||
|   token.markup = String.fromCharCode(markerCharCode);
 | ||
| 
 | ||
|   //
 | ||
|   // Iterate list items
 | ||
|   //
 | ||
| 
 | ||
|   let prevEmptyEnd = false;
 | ||
|   const terminatorRules = state.md.block.ruler.getRules('list');
 | ||
|   const oldParentType = state.parentType;
 | ||
|   state.parentType = 'list';
 | ||
|   while (nextLine < endLine) {
 | ||
|     pos = posAfterMarker;
 | ||
|     max = state.eMarks[nextLine];
 | ||
|     const initial = state.sCount[nextLine] + posAfterMarker - (state.bMarks[nextLine] + state.tShift[nextLine]);
 | ||
|     let offset = initial;
 | ||
|     while (pos < max) {
 | ||
|       const ch = state.src.charCodeAt(pos);
 | ||
|       if (ch === 0x09) {
 | ||
|         offset += 4 - (offset + state.bsCount[nextLine]) % 4;
 | ||
|       } else if (ch === 0x20) {
 | ||
|         offset++;
 | ||
|       } else {
 | ||
|         break;
 | ||
|       }
 | ||
|       pos++;
 | ||
|     }
 | ||
|     const contentStart = pos;
 | ||
|     let indentAfterMarker;
 | ||
|     if (contentStart >= max) {
 | ||
|       // trimming space in "-    \n  3" case, indent is 1 here
 | ||
|       indentAfterMarker = 1;
 | ||
|     } else {
 | ||
|       indentAfterMarker = offset - initial;
 | ||
|     }
 | ||
| 
 | ||
|     // If we have more than 4 spaces, the indent is 1
 | ||
|     // (the rest is just indented code block)
 | ||
|     if (indentAfterMarker > 4) {
 | ||
|       indentAfterMarker = 1;
 | ||
|     }
 | ||
| 
 | ||
|     // "  -  test"
 | ||
|     //  ^^^^^ - calculating total length of this thing
 | ||
|     const indent = initial + indentAfterMarker;
 | ||
| 
 | ||
|     // Run subparser & write tokens
 | ||
|     token = state.push('list_item_open', 'li', 1);
 | ||
|     token.markup = String.fromCharCode(markerCharCode);
 | ||
|     const itemLines = [nextLine, 0];
 | ||
|     token.map = itemLines;
 | ||
|     if (isOrdered) {
 | ||
|       token.info = state.src.slice(start, posAfterMarker - 1);
 | ||
|     }
 | ||
| 
 | ||
|     // change current state, then restore it after parser subcall
 | ||
|     const oldTight = state.tight;
 | ||
|     const oldTShift = state.tShift[nextLine];
 | ||
|     const oldSCount = state.sCount[nextLine];
 | ||
| 
 | ||
|     //  - example list
 | ||
|     // ^ listIndent position will be here
 | ||
|     //   ^ blkIndent position will be here
 | ||
|     //
 | ||
|     const oldListIndent = state.listIndent;
 | ||
|     state.listIndent = state.blkIndent;
 | ||
|     state.blkIndent = indent;
 | ||
|     state.tight = true;
 | ||
|     state.tShift[nextLine] = contentStart - state.bMarks[nextLine];
 | ||
|     state.sCount[nextLine] = offset;
 | ||
|     if (contentStart >= max && state.isEmpty(nextLine + 1)) {
 | ||
|       // workaround for this case
 | ||
|       // (list item is empty, list terminates before "foo"):
 | ||
|       // ~~~~~~~~
 | ||
|       //   -
 | ||
|       //
 | ||
|       //     foo
 | ||
|       // ~~~~~~~~
 | ||
|       state.line = Math.min(state.line + 2, endLine);
 | ||
|     } else {
 | ||
|       state.md.block.tokenize(state, nextLine, endLine, true);
 | ||
|     }
 | ||
| 
 | ||
|     // If any of list item is tight, mark list as tight
 | ||
|     if (!state.tight || prevEmptyEnd) {
 | ||
|       tight = false;
 | ||
|     }
 | ||
|     // Item become loose if finish with empty line,
 | ||
|     // but we should filter last element, because it means list finish
 | ||
|     prevEmptyEnd = state.line - nextLine > 1 && state.isEmpty(state.line - 1);
 | ||
|     state.blkIndent = state.listIndent;
 | ||
|     state.listIndent = oldListIndent;
 | ||
|     state.tShift[nextLine] = oldTShift;
 | ||
|     state.sCount[nextLine] = oldSCount;
 | ||
|     state.tight = oldTight;
 | ||
|     token = state.push('list_item_close', 'li', -1);
 | ||
|     token.markup = String.fromCharCode(markerCharCode);
 | ||
|     nextLine = state.line;
 | ||
|     itemLines[1] = nextLine;
 | ||
|     if (nextLine >= endLine) {
 | ||
|       break;
 | ||
|     }
 | ||
| 
 | ||
|     //
 | ||
|     // Try to check if list is terminated or continued.
 | ||
|     //
 | ||
|     if (state.sCount[nextLine] < state.blkIndent) {
 | ||
|       break;
 | ||
|     }
 | ||
| 
 | ||
|     // if it's indented more than 3 spaces, it should be a code block
 | ||
|     if (state.sCount[nextLine] - state.blkIndent >= 4) {
 | ||
|       break;
 | ||
|     }
 | ||
| 
 | ||
|     // fail if terminating block found
 | ||
|     let terminate = false;
 | ||
|     for (let i = 0, l = terminatorRules.length; i < l; i++) {
 | ||
|       if (terminatorRules[i](state, nextLine, endLine, true)) {
 | ||
|         terminate = true;
 | ||
|         break;
 | ||
|       }
 | ||
|     }
 | ||
|     if (terminate) {
 | ||
|       break;
 | ||
|     }
 | ||
| 
 | ||
|     // fail if list has another type
 | ||
|     if (isOrdered) {
 | ||
|       posAfterMarker = skipOrderedListMarker(state, nextLine);
 | ||
|       if (posAfterMarker < 0) {
 | ||
|         break;
 | ||
|       }
 | ||
|       start = state.bMarks[nextLine] + state.tShift[nextLine];
 | ||
|     } else {
 | ||
|       posAfterMarker = skipBulletListMarker(state, nextLine);
 | ||
|       if (posAfterMarker < 0) {
 | ||
|         break;
 | ||
|       }
 | ||
|     }
 | ||
|     if (markerCharCode !== state.src.charCodeAt(posAfterMarker - 1)) {
 | ||
|       break;
 | ||
|     }
 | ||
|   }
 | ||
| 
 | ||
|   // Finalize list
 | ||
|   if (isOrdered) {
 | ||
|     token = state.push('ordered_list_close', 'ol', -1);
 | ||
|   } else {
 | ||
|     token = state.push('bullet_list_close', 'ul', -1);
 | ||
|   }
 | ||
|   token.markup = String.fromCharCode(markerCharCode);
 | ||
|   listLines[1] = nextLine;
 | ||
|   state.line = nextLine;
 | ||
|   state.parentType = oldParentType;
 | ||
| 
 | ||
|   // mark paragraphs tight if needed
 | ||
|   if (tight) {
 | ||
|     markTightParagraphs(state, listTokIdx);
 | ||
|   }
 | ||
|   return true;
 | ||
| }
 | ||
| 
 | ||
| function reference(state, startLine, _endLine, silent) {
 | ||
|   let pos = state.bMarks[startLine] + state.tShift[startLine];
 | ||
|   let max = state.eMarks[startLine];
 | ||
|   let nextLine = startLine + 1;
 | ||
| 
 | ||
|   // if it's indented more than 3 spaces, it should be a code block
 | ||
|   if (state.sCount[startLine] - state.blkIndent >= 4) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   if (state.src.charCodeAt(pos) !== 0x5B /* [ */) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   function getNextLine(nextLine) {
 | ||
|     const endLine = state.lineMax;
 | ||
|     if (nextLine >= endLine || state.isEmpty(nextLine)) {
 | ||
|       // empty line or end of input
 | ||
|       return null;
 | ||
|     }
 | ||
|     let isContinuation = false;
 | ||
| 
 | ||
|     // this would be a code block normally, but after paragraph
 | ||
|     // it's considered a lazy continuation regardless of what's there
 | ||
|     if (state.sCount[nextLine] - state.blkIndent > 3) {
 | ||
|       isContinuation = true;
 | ||
|     }
 | ||
| 
 | ||
|     // quirk for blockquotes, this line should already be checked by that rule
 | ||
|     if (state.sCount[nextLine] < 0) {
 | ||
|       isContinuation = true;
 | ||
|     }
 | ||
|     if (!isContinuation) {
 | ||
|       const terminatorRules = state.md.block.ruler.getRules('reference');
 | ||
|       const oldParentType = state.parentType;
 | ||
|       state.parentType = 'reference';
 | ||
| 
 | ||
|       // Some tags can terminate paragraph without empty line.
 | ||
|       let terminate = false;
 | ||
|       for (let i = 0, l = terminatorRules.length; i < l; i++) {
 | ||
|         if (terminatorRules[i](state, nextLine, endLine, true)) {
 | ||
|           terminate = true;
 | ||
|           break;
 | ||
|         }
 | ||
|       }
 | ||
|       state.parentType = oldParentType;
 | ||
|       if (terminate) {
 | ||
|         // terminated by another block
 | ||
|         return null;
 | ||
|       }
 | ||
|     }
 | ||
|     const pos = state.bMarks[nextLine] + state.tShift[nextLine];
 | ||
|     const max = state.eMarks[nextLine];
 | ||
| 
 | ||
|     // max + 1 explicitly includes the newline
 | ||
|     return state.src.slice(pos, max + 1);
 | ||
|   }
 | ||
|   let str = state.src.slice(pos, max + 1);
 | ||
|   max = str.length;
 | ||
|   let labelEnd = -1;
 | ||
|   for (pos = 1; pos < max; pos++) {
 | ||
|     const ch = str.charCodeAt(pos);
 | ||
|     if (ch === 0x5B /* [ */) {
 | ||
|       return false;
 | ||
|     } else if (ch === 0x5D /* ] */) {
 | ||
|       labelEnd = pos;
 | ||
|       break;
 | ||
|     } else if (ch === 0x0A /* \n */) {
 | ||
|       const lineContent = getNextLine(nextLine);
 | ||
|       if (lineContent !== null) {
 | ||
|         str += lineContent;
 | ||
|         max = str.length;
 | ||
|         nextLine++;
 | ||
|       }
 | ||
|     } else if (ch === 0x5C /* \ */) {
 | ||
|       pos++;
 | ||
|       if (pos < max && str.charCodeAt(pos) === 0x0A) {
 | ||
|         const lineContent = getNextLine(nextLine);
 | ||
|         if (lineContent !== null) {
 | ||
|           str += lineContent;
 | ||
|           max = str.length;
 | ||
|           nextLine++;
 | ||
|         }
 | ||
|       }
 | ||
|     }
 | ||
|   }
 | ||
|   if (labelEnd < 0 || str.charCodeAt(labelEnd + 1) !== 0x3A /* : */) {
 | ||
|     return false;
 | ||
|   }
 | ||
| 
 | ||
|   // [label]:   destination   'title'
 | ||
|   //         ^^^ skip optional whitespace here
 | ||
|   for (pos = labelEnd + 2; pos < max; pos++) {
 | ||
|     const ch = str.charCodeAt(pos);
 | ||
|     if (ch === 0x0A) {
 | ||
|       const lineContent = getNextLine(nextLine);
 | ||
|       if (lineContent !== null) {
 | ||
|         str += lineContent;
 | ||
|         max = str.length;
 | ||
|         nextLine++;
 | ||
|       }
 | ||
|     } else if (isSpace(ch)) ; else {
 | ||
|       break;
 | ||
|     }
 | ||
|   }
 | ||
| 
 | ||
|   // [label]:   destination   'title'
 | ||
|   //            ^^^^^^^^^^^ parse this
 | ||
|   const destRes = state.md.helpers.parseLinkDestination(str, pos, max);
 | ||
|   if (!destRes.ok) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   const href = state.md.normalizeLink(destRes.str);
 | ||
|   if (!state.md.validateLink(href)) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   pos = destRes.pos;
 | ||
| 
 | ||
|   // save cursor state, we could require to rollback later
 | ||
|   const destEndPos = pos;
 | ||
|   const destEndLineNo = nextLine;
 | ||
| 
 | ||
|   // [label]:   destination   'title'
 | ||
|   //                       ^^^ skipping those spaces
 | ||
|   const start = pos;
 | ||
|   for (; pos < max; pos++) {
 | ||
|     const ch = str.charCodeAt(pos);
 | ||
|     if (ch === 0x0A) {
 | ||
|       const lineContent = getNextLine(nextLine);
 | ||
|       if (lineContent !== null) {
 | ||
|         str += lineContent;
 | ||
|         max = str.length;
 | ||
|         nextLine++;
 | ||
|       }
 | ||
|     } else if (isSpace(ch)) ; else {
 | ||
|       break;
 | ||
|     }
 | ||
|   }
 | ||
| 
 | ||
|   // [label]:   destination   'title'
 | ||
|   //                          ^^^^^^^ parse this
 | ||
|   let titleRes = state.md.helpers.parseLinkTitle(str, pos, max);
 | ||
|   while (titleRes.can_continue) {
 | ||
|     const lineContent = getNextLine(nextLine);
 | ||
|     if (lineContent === null) break;
 | ||
|     str += lineContent;
 | ||
|     pos = max;
 | ||
|     max = str.length;
 | ||
|     nextLine++;
 | ||
|     titleRes = state.md.helpers.parseLinkTitle(str, pos, max, titleRes);
 | ||
|   }
 | ||
|   let title;
 | ||
|   if (pos < max && start !== pos && titleRes.ok) {
 | ||
|     title = titleRes.str;
 | ||
|     pos = titleRes.pos;
 | ||
|   } else {
 | ||
|     title = '';
 | ||
|     pos = destEndPos;
 | ||
|     nextLine = destEndLineNo;
 | ||
|   }
 | ||
| 
 | ||
|   // skip trailing spaces until the rest of the line
 | ||
|   while (pos < max) {
 | ||
|     const ch = str.charCodeAt(pos);
 | ||
|     if (!isSpace(ch)) {
 | ||
|       break;
 | ||
|     }
 | ||
|     pos++;
 | ||
|   }
 | ||
|   if (pos < max && str.charCodeAt(pos) !== 0x0A) {
 | ||
|     if (title) {
 | ||
|       // garbage at the end of the line after title,
 | ||
|       // but it could still be a valid reference if we roll back
 | ||
|       title = '';
 | ||
|       pos = destEndPos;
 | ||
|       nextLine = destEndLineNo;
 | ||
|       while (pos < max) {
 | ||
|         const ch = str.charCodeAt(pos);
 | ||
|         if (!isSpace(ch)) {
 | ||
|           break;
 | ||
|         }
 | ||
|         pos++;
 | ||
|       }
 | ||
|     }
 | ||
|   }
 | ||
|   if (pos < max && str.charCodeAt(pos) !== 0x0A) {
 | ||
|     // garbage at the end of the line
 | ||
|     return false;
 | ||
|   }
 | ||
|   const label = normalizeReference(str.slice(1, labelEnd));
 | ||
|   if (!label) {
 | ||
|     // CommonMark 0.20 disallows empty labels
 | ||
|     return false;
 | ||
|   }
 | ||
| 
 | ||
|   // Reference can not terminate anything. This check is for safety only.
 | ||
|   /* istanbul ignore if */
 | ||
|   if (silent) {
 | ||
|     return true;
 | ||
|   }
 | ||
|   if (typeof state.env.references === 'undefined') {
 | ||
|     state.env.references = {};
 | ||
|   }
 | ||
|   if (typeof state.env.references[label] === 'undefined') {
 | ||
|     state.env.references[label] = {
 | ||
|       title,
 | ||
|       href
 | ||
|     };
 | ||
|   }
 | ||
|   state.line = nextLine;
 | ||
|   return true;
 | ||
| }
 | ||
| 
 | ||
| // List of valid html blocks names, according to commonmark spec
 | ||
| // https://spec.commonmark.org/0.30/#html-blocks
 | ||
| 
 | ||
| var block_names = ['address', 'article', 'aside', 'base', 'basefont', 'blockquote', 'body', 'caption', 'center', 'col', 'colgroup', 'dd', 'details', 'dialog', 'dir', 'div', 'dl', 'dt', 'fieldset', 'figcaption', 'figure', 'footer', 'form', 'frame', 'frameset', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'head', 'header', 'hr', 'html', 'iframe', 'legend', 'li', 'link', 'main', 'menu', 'menuitem', 'nav', 'noframes', 'ol', 'optgroup', 'option', 'p', 'param', 'search', 'section', 'summary', 'table', 'tbody', 'td', 'tfoot', 'th', 'thead', 'title', 'tr', 'track', 'ul'];
 | ||
| 
 | ||
| // Regexps to match html elements
 | ||
| 
 | ||
| const attr_name = '[a-zA-Z_:][a-zA-Z0-9:._-]*';
 | ||
| const unquoted = '[^"\'=<>`\\x00-\\x20]+';
 | ||
| const single_quoted = "'[^']*'";
 | ||
| const double_quoted = '"[^"]*"';
 | ||
| const attr_value = '(?:' + unquoted + '|' + single_quoted + '|' + double_quoted + ')';
 | ||
| const attribute = '(?:\\s+' + attr_name + '(?:\\s*=\\s*' + attr_value + ')?)';
 | ||
| const open_tag = '<[A-Za-z][A-Za-z0-9\\-]*' + attribute + '*\\s*\\/?>';
 | ||
| const close_tag = '<\\/[A-Za-z][A-Za-z0-9\\-]*\\s*>';
 | ||
| const comment = '<!---?>|<!--(?:[^-]|-[^-]|--[^>])*-->';
 | ||
| const processing = '<[?][\\s\\S]*?[?]>';
 | ||
| const declaration = '<![A-Za-z][^>]*>';
 | ||
| const cdata = '<!\\[CDATA\\[[\\s\\S]*?\\]\\]>';
 | ||
| const HTML_TAG_RE = new RegExp('^(?:' + open_tag + '|' + close_tag + '|' + comment + '|' + processing + '|' + declaration + '|' + cdata + ')');
 | ||
| const HTML_OPEN_CLOSE_TAG_RE = new RegExp('^(?:' + open_tag + '|' + close_tag + ')');
 | ||
| 
 | ||
| // HTML block
 | ||
| 
 | ||
| 
 | ||
| // An array of opening and corresponding closing sequences for html tags,
 | ||
| // last argument defines whether it can terminate a paragraph or not
 | ||
| //
 | ||
| const HTML_SEQUENCES = [[/^<(script|pre|style|textarea)(?=(\s|>|$))/i, /<\/(script|pre|style|textarea)>/i, true], [/^<!--/, /-->/, true], [/^<\?/, /\?>/, true], [/^<![A-Z]/, />/, true], [/^<!\[CDATA\[/, /\]\]>/, true], [new RegExp('^</?(' + block_names.join('|') + ')(?=(\\s|/?>|$))', 'i'), /^$/, true], [new RegExp(HTML_OPEN_CLOSE_TAG_RE.source + '\\s*$'), /^$/, false]];
 | ||
| function html_block(state, startLine, endLine, silent) {
 | ||
|   let pos = state.bMarks[startLine] + state.tShift[startLine];
 | ||
|   let max = state.eMarks[startLine];
 | ||
| 
 | ||
|   // if it's indented more than 3 spaces, it should be a code block
 | ||
|   if (state.sCount[startLine] - state.blkIndent >= 4) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   if (!state.md.options.html) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   if (state.src.charCodeAt(pos) !== 0x3C /* < */) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   let lineText = state.src.slice(pos, max);
 | ||
|   let i = 0;
 | ||
|   for (; i < HTML_SEQUENCES.length; i++) {
 | ||
|     if (HTML_SEQUENCES[i][0].test(lineText)) {
 | ||
|       break;
 | ||
|     }
 | ||
|   }
 | ||
|   if (i === HTML_SEQUENCES.length) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   if (silent) {
 | ||
|     // true if this sequence can be a terminator, false otherwise
 | ||
|     return HTML_SEQUENCES[i][2];
 | ||
|   }
 | ||
|   let nextLine = startLine + 1;
 | ||
| 
 | ||
|   // If we are here - we detected HTML block.
 | ||
|   // Let's roll down till block end.
 | ||
|   if (!HTML_SEQUENCES[i][1].test(lineText)) {
 | ||
|     for (; nextLine < endLine; nextLine++) {
 | ||
|       if (state.sCount[nextLine] < state.blkIndent) {
 | ||
|         break;
 | ||
|       }
 | ||
|       pos = state.bMarks[nextLine] + state.tShift[nextLine];
 | ||
|       max = state.eMarks[nextLine];
 | ||
|       lineText = state.src.slice(pos, max);
 | ||
|       if (HTML_SEQUENCES[i][1].test(lineText)) {
 | ||
|         if (lineText.length !== 0) {
 | ||
|           nextLine++;
 | ||
|         }
 | ||
|         break;
 | ||
|       }
 | ||
|     }
 | ||
|   }
 | ||
|   state.line = nextLine;
 | ||
|   const token = state.push('html_block', '', 0);
 | ||
|   token.map = [startLine, nextLine];
 | ||
|   token.content = state.getLines(startLine, nextLine, state.blkIndent, true);
 | ||
|   return true;
 | ||
| }
 | ||
| 
 | ||
| // heading (#, ##, ...)
 | ||
| 
 | ||
| function heading(state, startLine, endLine, silent) {
 | ||
|   let pos = state.bMarks[startLine] + state.tShift[startLine];
 | ||
|   let max = state.eMarks[startLine];
 | ||
| 
 | ||
|   // if it's indented more than 3 spaces, it should be a code block
 | ||
|   if (state.sCount[startLine] - state.blkIndent >= 4) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   let ch = state.src.charCodeAt(pos);
 | ||
|   if (ch !== 0x23 /* # */ || pos >= max) {
 | ||
|     return false;
 | ||
|   }
 | ||
| 
 | ||
|   // count heading level
 | ||
|   let level = 1;
 | ||
|   ch = state.src.charCodeAt(++pos);
 | ||
|   while (ch === 0x23 /* # */ && pos < max && level <= 6) {
 | ||
|     level++;
 | ||
|     ch = state.src.charCodeAt(++pos);
 | ||
|   }
 | ||
|   if (level > 6 || pos < max && !isSpace(ch)) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   if (silent) {
 | ||
|     return true;
 | ||
|   }
 | ||
| 
 | ||
|   // Let's cut tails like '    ###  ' from the end of string
 | ||
| 
 | ||
|   max = state.skipSpacesBack(max, pos);
 | ||
|   const tmp = state.skipCharsBack(max, 0x23, pos); // #
 | ||
|   if (tmp > pos && isSpace(state.src.charCodeAt(tmp - 1))) {
 | ||
|     max = tmp;
 | ||
|   }
 | ||
|   state.line = startLine + 1;
 | ||
|   const token_o = state.push('heading_open', 'h' + String(level), 1);
 | ||
|   token_o.markup = '########'.slice(0, level);
 | ||
|   token_o.map = [startLine, state.line];
 | ||
|   const token_i = state.push('inline', '', 0);
 | ||
|   token_i.content = state.src.slice(pos, max).trim();
 | ||
|   token_i.map = [startLine, state.line];
 | ||
|   token_i.children = [];
 | ||
|   const token_c = state.push('heading_close', 'h' + String(level), -1);
 | ||
|   token_c.markup = '########'.slice(0, level);
 | ||
|   return true;
 | ||
| }
 | ||
| 
 | ||
| // lheading (---, ===)
 | ||
| 
 | ||
| function lheading(state, startLine, endLine /*, silent */) {
 | ||
|   const terminatorRules = state.md.block.ruler.getRules('paragraph');
 | ||
| 
 | ||
|   // if it's indented more than 3 spaces, it should be a code block
 | ||
|   if (state.sCount[startLine] - state.blkIndent >= 4) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   const oldParentType = state.parentType;
 | ||
|   state.parentType = 'paragraph'; // use paragraph to match terminatorRules
 | ||
| 
 | ||
|   // jump line-by-line until empty one or EOF
 | ||
|   let level = 0;
 | ||
|   let marker;
 | ||
|   let nextLine = startLine + 1;
 | ||
|   for (; nextLine < endLine && !state.isEmpty(nextLine); nextLine++) {
 | ||
|     // this would be a code block normally, but after paragraph
 | ||
|     // it's considered a lazy continuation regardless of what's there
 | ||
|     if (state.sCount[nextLine] - state.blkIndent > 3) {
 | ||
|       continue;
 | ||
|     }
 | ||
| 
 | ||
|     //
 | ||
|     // Check for underline in setext header
 | ||
|     //
 | ||
|     if (state.sCount[nextLine] >= state.blkIndent) {
 | ||
|       let pos = state.bMarks[nextLine] + state.tShift[nextLine];
 | ||
|       const max = state.eMarks[nextLine];
 | ||
|       if (pos < max) {
 | ||
|         marker = state.src.charCodeAt(pos);
 | ||
|         if (marker === 0x2D /* - */ || marker === 0x3D /* = */) {
 | ||
|           pos = state.skipChars(pos, marker);
 | ||
|           pos = state.skipSpaces(pos);
 | ||
|           if (pos >= max) {
 | ||
|             level = marker === 0x3D /* = */ ? 1 : 2;
 | ||
|             break;
 | ||
|           }
 | ||
|         }
 | ||
|       }
 | ||
|     }
 | ||
| 
 | ||
|     // quirk for blockquotes, this line should already be checked by that rule
 | ||
|     if (state.sCount[nextLine] < 0) {
 | ||
|       continue;
 | ||
|     }
 | ||
| 
 | ||
|     // Some tags can terminate paragraph without empty line.
 | ||
|     let terminate = false;
 | ||
|     for (let i = 0, l = terminatorRules.length; i < l; i++) {
 | ||
|       if (terminatorRules[i](state, nextLine, endLine, true)) {
 | ||
|         terminate = true;
 | ||
|         break;
 | ||
|       }
 | ||
|     }
 | ||
|     if (terminate) {
 | ||
|       break;
 | ||
|     }
 | ||
|   }
 | ||
|   if (!level) {
 | ||
|     // Didn't find valid underline
 | ||
|     return false;
 | ||
|   }
 | ||
|   const content = state.getLines(startLine, nextLine, state.blkIndent, false).trim();
 | ||
|   state.line = nextLine + 1;
 | ||
|   const token_o = state.push('heading_open', 'h' + String(level), 1);
 | ||
|   token_o.markup = String.fromCharCode(marker);
 | ||
|   token_o.map = [startLine, state.line];
 | ||
|   const token_i = state.push('inline', '', 0);
 | ||
|   token_i.content = content;
 | ||
|   token_i.map = [startLine, state.line - 1];
 | ||
|   token_i.children = [];
 | ||
|   const token_c = state.push('heading_close', 'h' + String(level), -1);
 | ||
|   token_c.markup = String.fromCharCode(marker);
 | ||
|   state.parentType = oldParentType;
 | ||
|   return true;
 | ||
| }
 | ||
| 
 | ||
| // Paragraph
 | ||
| 
 | ||
| function paragraph(state, startLine, endLine) {
 | ||
|   const terminatorRules = state.md.block.ruler.getRules('paragraph');
 | ||
|   const oldParentType = state.parentType;
 | ||
|   let nextLine = startLine + 1;
 | ||
|   state.parentType = 'paragraph';
 | ||
| 
 | ||
|   // jump line-by-line until empty one or EOF
 | ||
|   for (; nextLine < endLine && !state.isEmpty(nextLine); nextLine++) {
 | ||
|     // this would be a code block normally, but after paragraph
 | ||
|     // it's considered a lazy continuation regardless of what's there
 | ||
|     if (state.sCount[nextLine] - state.blkIndent > 3) {
 | ||
|       continue;
 | ||
|     }
 | ||
| 
 | ||
|     // quirk for blockquotes, this line should already be checked by that rule
 | ||
|     if (state.sCount[nextLine] < 0) {
 | ||
|       continue;
 | ||
|     }
 | ||
| 
 | ||
|     // Some tags can terminate paragraph without empty line.
 | ||
|     let terminate = false;
 | ||
|     for (let i = 0, l = terminatorRules.length; i < l; i++) {
 | ||
|       if (terminatorRules[i](state, nextLine, endLine, true)) {
 | ||
|         terminate = true;
 | ||
|         break;
 | ||
|       }
 | ||
|     }
 | ||
|     if (terminate) {
 | ||
|       break;
 | ||
|     }
 | ||
|   }
 | ||
|   const content = state.getLines(startLine, nextLine, state.blkIndent, false).trim();
 | ||
|   state.line = nextLine;
 | ||
|   const token_o = state.push('paragraph_open', 'p', 1);
 | ||
|   token_o.map = [startLine, state.line];
 | ||
|   const token_i = state.push('inline', '', 0);
 | ||
|   token_i.content = content;
 | ||
|   token_i.map = [startLine, state.line];
 | ||
|   token_i.children = [];
 | ||
|   state.push('paragraph_close', 'p', -1);
 | ||
|   state.parentType = oldParentType;
 | ||
|   return true;
 | ||
| }
 | ||
| 
 | ||
| /** internal
 | ||
|  * class ParserBlock
 | ||
|  *
 | ||
|  * Block-level tokenizer.
 | ||
|  **/
 | ||
| 
 | ||
| const _rules$1 = [
 | ||
| // First 2 params - rule name & source. Secondary array - list of rules,
 | ||
| // which can be terminated by this one.
 | ||
| ['table', table, ['paragraph', 'reference']], ['code', code], ['fence', fence, ['paragraph', 'reference', 'blockquote', 'list']], ['blockquote', blockquote, ['paragraph', 'reference', 'blockquote', 'list']], ['hr', hr, ['paragraph', 'reference', 'blockquote', 'list']], ['list', list, ['paragraph', 'reference', 'blockquote']], ['reference', reference], ['html_block', html_block, ['paragraph', 'reference', 'blockquote']], ['heading', heading, ['paragraph', 'reference', 'blockquote']], ['lheading', lheading], ['paragraph', paragraph]];
 | ||
| 
 | ||
| /**
 | ||
|  * new ParserBlock()
 | ||
|  **/
 | ||
| function ParserBlock() {
 | ||
|   /**
 | ||
|    * ParserBlock#ruler -> Ruler
 | ||
|    *
 | ||
|    * [[Ruler]] instance. Keep configuration of block rules.
 | ||
|    **/
 | ||
|   this.ruler = new Ruler();
 | ||
|   for (let i = 0; i < _rules$1.length; i++) {
 | ||
|     this.ruler.push(_rules$1[i][0], _rules$1[i][1], {
 | ||
|       alt: (_rules$1[i][2] || []).slice()
 | ||
|     });
 | ||
|   }
 | ||
| }
 | ||
| 
 | ||
| // Generate tokens for input range
 | ||
| //
 | ||
| ParserBlock.prototype.tokenize = function (state, startLine, endLine) {
 | ||
|   const rules = this.ruler.getRules('');
 | ||
|   const len = rules.length;
 | ||
|   const maxNesting = state.md.options.maxNesting;
 | ||
|   let line = startLine;
 | ||
|   let hasEmptyLines = false;
 | ||
|   while (line < endLine) {
 | ||
|     state.line = line = state.skipEmptyLines(line);
 | ||
|     if (line >= endLine) {
 | ||
|       break;
 | ||
|     }
 | ||
| 
 | ||
|     // Termination condition for nested calls.
 | ||
|     // Nested calls currently used for blockquotes & lists
 | ||
|     if (state.sCount[line] < state.blkIndent) {
 | ||
|       break;
 | ||
|     }
 | ||
| 
 | ||
|     // If nesting level exceeded - skip tail to the end. That's not ordinary
 | ||
|     // situation and we should not care about content.
 | ||
|     if (state.level >= maxNesting) {
 | ||
|       state.line = endLine;
 | ||
|       break;
 | ||
|     }
 | ||
| 
 | ||
|     // Try all possible rules.
 | ||
|     // On success, rule should:
 | ||
|     //
 | ||
|     // - update `state.line`
 | ||
|     // - update `state.tokens`
 | ||
|     // - return true
 | ||
|     const prevLine = state.line;
 | ||
|     let ok = false;
 | ||
|     for (let i = 0; i < len; i++) {
 | ||
|       ok = rules[i](state, line, endLine, false);
 | ||
|       if (ok) {
 | ||
|         if (prevLine >= state.line) {
 | ||
|           throw new Error("block rule didn't increment state.line");
 | ||
|         }
 | ||
|         break;
 | ||
|       }
 | ||
|     }
 | ||
| 
 | ||
|     // this can only happen if user disables paragraph rule
 | ||
|     if (!ok) throw new Error('none of the block rules matched');
 | ||
| 
 | ||
|     // set state.tight if we had an empty line before current tag
 | ||
|     // i.e. latest empty line should not count
 | ||
|     state.tight = !hasEmptyLines;
 | ||
| 
 | ||
|     // paragraph might "eat" one newline after it in nested lists
 | ||
|     if (state.isEmpty(state.line - 1)) {
 | ||
|       hasEmptyLines = true;
 | ||
|     }
 | ||
|     line = state.line;
 | ||
|     if (line < endLine && state.isEmpty(line)) {
 | ||
|       hasEmptyLines = true;
 | ||
|       line++;
 | ||
|       state.line = line;
 | ||
|     }
 | ||
|   }
 | ||
| };
 | ||
| 
 | ||
| /**
 | ||
|  * ParserBlock.parse(str, md, env, outTokens)
 | ||
|  *
 | ||
|  * Process input string and push block tokens into `outTokens`
 | ||
|  **/
 | ||
| ParserBlock.prototype.parse = function (src, md, env, outTokens) {
 | ||
|   if (!src) {
 | ||
|     return;
 | ||
|   }
 | ||
|   const state = new this.State(src, md, env, outTokens);
 | ||
|   this.tokenize(state, state.line, state.lineMax);
 | ||
| };
 | ||
| ParserBlock.prototype.State = StateBlock;
 | ||
| 
 | ||
| // Inline parser state
 | ||
| 
 | ||
| function StateInline(src, md, env, outTokens) {
 | ||
|   this.src = src;
 | ||
|   this.env = env;
 | ||
|   this.md = md;
 | ||
|   this.tokens = outTokens;
 | ||
|   this.tokens_meta = Array(outTokens.length);
 | ||
|   this.pos = 0;
 | ||
|   this.posMax = this.src.length;
 | ||
|   this.level = 0;
 | ||
|   this.pending = '';
 | ||
|   this.pendingLevel = 0;
 | ||
| 
 | ||
|   // Stores { start: end } pairs. Useful for backtrack
 | ||
|   // optimization of pairs parse (emphasis, strikes).
 | ||
|   this.cache = {};
 | ||
| 
 | ||
|   // List of emphasis-like delimiters for current tag
 | ||
|   this.delimiters = [];
 | ||
| 
 | ||
|   // Stack of delimiter lists for upper level tags
 | ||
|   this._prev_delimiters = [];
 | ||
| 
 | ||
|   // backtick length => last seen position
 | ||
|   this.backticks = {};
 | ||
|   this.backticksScanned = false;
 | ||
| 
 | ||
|   // Counter used to disable inline linkify-it execution
 | ||
|   // inside <a> and markdown links
 | ||
|   this.linkLevel = 0;
 | ||
| }
 | ||
| 
 | ||
| // Flush pending text
 | ||
| //
 | ||
| StateInline.prototype.pushPending = function () {
 | ||
|   const token = new Token('text', '', 0);
 | ||
|   token.content = this.pending;
 | ||
|   token.level = this.pendingLevel;
 | ||
|   this.tokens.push(token);
 | ||
|   this.pending = '';
 | ||
|   return token;
 | ||
| };
 | ||
| 
 | ||
| // Push new token to "stream".
 | ||
| // If pending text exists - flush it as text token
 | ||
| //
 | ||
| StateInline.prototype.push = function (type, tag, nesting) {
 | ||
|   if (this.pending) {
 | ||
|     this.pushPending();
 | ||
|   }
 | ||
|   const token = new Token(type, tag, nesting);
 | ||
|   let token_meta = null;
 | ||
|   if (nesting < 0) {
 | ||
|     // closing tag
 | ||
|     this.level--;
 | ||
|     this.delimiters = this._prev_delimiters.pop();
 | ||
|   }
 | ||
|   token.level = this.level;
 | ||
|   if (nesting > 0) {
 | ||
|     // opening tag
 | ||
|     this.level++;
 | ||
|     this._prev_delimiters.push(this.delimiters);
 | ||
|     this.delimiters = [];
 | ||
|     token_meta = {
 | ||
|       delimiters: this.delimiters
 | ||
|     };
 | ||
|   }
 | ||
|   this.pendingLevel = this.level;
 | ||
|   this.tokens.push(token);
 | ||
|   this.tokens_meta.push(token_meta);
 | ||
|   return token;
 | ||
| };
 | ||
| 
 | ||
| // Scan a sequence of emphasis-like markers, and determine whether
 | ||
| // it can start an emphasis sequence or end an emphasis sequence.
 | ||
| //
 | ||
| //  - start - position to scan from (it should point at a valid marker);
 | ||
| //  - canSplitWord - determine if these markers can be found inside a word
 | ||
| //
 | ||
| StateInline.prototype.scanDelims = function (start, canSplitWord) {
 | ||
|   const max = this.posMax;
 | ||
|   const marker = this.src.charCodeAt(start);
 | ||
| 
 | ||
|   // treat beginning of the line as a whitespace
 | ||
|   const lastChar = start > 0 ? this.src.charCodeAt(start - 1) : 0x20;
 | ||
|   let pos = start;
 | ||
|   while (pos < max && this.src.charCodeAt(pos) === marker) {
 | ||
|     pos++;
 | ||
|   }
 | ||
|   const count = pos - start;
 | ||
| 
 | ||
|   // treat end of the line as a whitespace
 | ||
|   const nextChar = pos < max ? this.src.charCodeAt(pos) : 0x20;
 | ||
|   const isLastPunctChar = isMdAsciiPunct(lastChar) || isPunctChar(String.fromCharCode(lastChar));
 | ||
|   const isNextPunctChar = isMdAsciiPunct(nextChar) || isPunctChar(String.fromCharCode(nextChar));
 | ||
|   const isLastWhiteSpace = isWhiteSpace(lastChar);
 | ||
|   const isNextWhiteSpace = isWhiteSpace(nextChar);
 | ||
|   const left_flanking = !isNextWhiteSpace && (!isNextPunctChar || isLastWhiteSpace || isLastPunctChar);
 | ||
|   const right_flanking = !isLastWhiteSpace && (!isLastPunctChar || isNextWhiteSpace || isNextPunctChar);
 | ||
|   const can_open = left_flanking && (canSplitWord || !right_flanking || isLastPunctChar);
 | ||
|   const can_close = right_flanking && (canSplitWord || !left_flanking || isNextPunctChar);
 | ||
|   return {
 | ||
|     can_open,
 | ||
|     can_close,
 | ||
|     length: count
 | ||
|   };
 | ||
| };
 | ||
| 
 | ||
| // re-export Token class to use in block rules
 | ||
| StateInline.prototype.Token = Token;
 | ||
| 
 | ||
| // Skip text characters for text token, place those to pending buffer
 | ||
| // and increment current pos
 | ||
| 
 | ||
| // Rule to skip pure text
 | ||
| // '{}$%@~+=:' reserved for extentions
 | ||
| 
 | ||
| // !, ", #, $, %, &, ', (, ), *, +, ,, -, ., /, :, ;, <, =, >, ?, @, [, \, ], ^, _, `, {, |, }, or ~
 | ||
| 
 | ||
| // !!!! Don't confuse with "Markdown ASCII Punctuation" chars
 | ||
| // http://spec.commonmark.org/0.15/#ascii-punctuation-character
 | ||
| function isTerminatorChar(ch) {
 | ||
|   switch (ch) {
 | ||
|     case 0x0A /* \n */:
 | ||
|     case 0x21 /* ! */:
 | ||
|     case 0x23 /* # */:
 | ||
|     case 0x24 /* $ */:
 | ||
|     case 0x25 /* % */:
 | ||
|     case 0x26 /* & */:
 | ||
|     case 0x2A /* * */:
 | ||
|     case 0x2B /* + */:
 | ||
|     case 0x2D /* - */:
 | ||
|     case 0x3A /* : */:
 | ||
|     case 0x3C /* < */:
 | ||
|     case 0x3D /* = */:
 | ||
|     case 0x3E /* > */:
 | ||
|     case 0x40 /* @ */:
 | ||
|     case 0x5B /* [ */:
 | ||
|     case 0x5C /* \ */:
 | ||
|     case 0x5D /* ] */:
 | ||
|     case 0x5E /* ^ */:
 | ||
|     case 0x5F /* _ */:
 | ||
|     case 0x60 /* ` */:
 | ||
|     case 0x7B /* { */:
 | ||
|     case 0x7D /* } */:
 | ||
|     case 0x7E /* ~ */:
 | ||
|       return true;
 | ||
|     default:
 | ||
|       return false;
 | ||
|   }
 | ||
| }
 | ||
| function text(state, silent) {
 | ||
|   let pos = state.pos;
 | ||
|   while (pos < state.posMax && !isTerminatorChar(state.src.charCodeAt(pos))) {
 | ||
|     pos++;
 | ||
|   }
 | ||
|   if (pos === state.pos) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   if (!silent) {
 | ||
|     state.pending += state.src.slice(state.pos, pos);
 | ||
|   }
 | ||
|   state.pos = pos;
 | ||
|   return true;
 | ||
| }
 | ||
| 
 | ||
| // Alternative implementation, for memory.
 | ||
| //
 | ||
| // It costs 10% of performance, but allows extend terminators list, if place it
 | ||
| // to `ParserInline` property. Probably, will switch to it sometime, such
 | ||
| // flexibility required.
 | ||
| 
 | ||
| /*
 | ||
| var TERMINATOR_RE = /[\n!#$%&*+\-:<=>@[\\\]^_`{}~]/;
 | ||
| 
 | ||
| module.exports = function text(state, silent) {
 | ||
|   var pos = state.pos,
 | ||
|       idx = state.src.slice(pos).search(TERMINATOR_RE);
 | ||
| 
 | ||
|   // first char is terminator -> empty text
 | ||
|   if (idx === 0) { return false; }
 | ||
| 
 | ||
|   // no terminator -> text till end of string
 | ||
|   if (idx < 0) {
 | ||
|     if (!silent) { state.pending += state.src.slice(pos); }
 | ||
|     state.pos = state.src.length;
 | ||
|     return true;
 | ||
|   }
 | ||
| 
 | ||
|   if (!silent) { state.pending += state.src.slice(pos, pos + idx); }
 | ||
| 
 | ||
|   state.pos += idx;
 | ||
| 
 | ||
|   return true;
 | ||
| }; */
 | ||
| 
 | ||
| // Process links like https://example.org/
 | ||
| 
 | ||
| // RFC3986: scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
 | ||
| const SCHEME_RE = /(?:^|[^a-z0-9.+-])([a-z][a-z0-9.+-]*)$/i;
 | ||
| function linkify(state, silent) {
 | ||
|   if (!state.md.options.linkify) return false;
 | ||
|   if (state.linkLevel > 0) return false;
 | ||
|   const pos = state.pos;
 | ||
|   const max = state.posMax;
 | ||
|   if (pos + 3 > max) return false;
 | ||
|   if (state.src.charCodeAt(pos) !== 0x3A /* : */) return false;
 | ||
|   if (state.src.charCodeAt(pos + 1) !== 0x2F /* / */) return false;
 | ||
|   if (state.src.charCodeAt(pos + 2) !== 0x2F /* / */) return false;
 | ||
|   const match = state.pending.match(SCHEME_RE);
 | ||
|   if (!match) return false;
 | ||
|   const proto = match[1];
 | ||
|   const link = state.md.linkify.matchAtStart(state.src.slice(pos - proto.length));
 | ||
|   if (!link) return false;
 | ||
|   let url = link.url;
 | ||
| 
 | ||
|   // invalid link, but still detected by linkify somehow;
 | ||
|   // need to check to prevent infinite loop below
 | ||
|   if (url.length <= proto.length) return false;
 | ||
| 
 | ||
|   // disallow '*' at the end of the link (conflicts with emphasis)
 | ||
|   url = url.replace(/\*+$/, '');
 | ||
|   const fullUrl = state.md.normalizeLink(url);
 | ||
|   if (!state.md.validateLink(fullUrl)) return false;
 | ||
|   if (!silent) {
 | ||
|     state.pending = state.pending.slice(0, -proto.length);
 | ||
|     const token_o = state.push('link_open', 'a', 1);
 | ||
|     token_o.attrs = [['href', fullUrl]];
 | ||
|     token_o.markup = 'linkify';
 | ||
|     token_o.info = 'auto';
 | ||
|     const token_t = state.push('text', '', 0);
 | ||
|     token_t.content = state.md.normalizeLinkText(url);
 | ||
|     const token_c = state.push('link_close', 'a', -1);
 | ||
|     token_c.markup = 'linkify';
 | ||
|     token_c.info = 'auto';
 | ||
|   }
 | ||
|   state.pos += url.length - proto.length;
 | ||
|   return true;
 | ||
| }
 | ||
| 
 | ||
| // Proceess '\n'
 | ||
| 
 | ||
| function newline(state, silent) {
 | ||
|   let pos = state.pos;
 | ||
|   if (state.src.charCodeAt(pos) !== 0x0A /* \n */) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   const pmax = state.pending.length - 1;
 | ||
|   const max = state.posMax;
 | ||
| 
 | ||
|   // '  \n' -> hardbreak
 | ||
|   // Lookup in pending chars is bad practice! Don't copy to other rules!
 | ||
|   // Pending string is stored in concat mode, indexed lookups will cause
 | ||
|   // convertion to flat mode.
 | ||
|   if (!silent) {
 | ||
|     if (pmax >= 0 && state.pending.charCodeAt(pmax) === 0x20) {
 | ||
|       if (pmax >= 1 && state.pending.charCodeAt(pmax - 1) === 0x20) {
 | ||
|         // Find whitespaces tail of pending chars.
 | ||
|         let ws = pmax - 1;
 | ||
|         while (ws >= 1 && state.pending.charCodeAt(ws - 1) === 0x20) ws--;
 | ||
|         state.pending = state.pending.slice(0, ws);
 | ||
|         state.push('hardbreak', 'br', 0);
 | ||
|       } else {
 | ||
|         state.pending = state.pending.slice(0, -1);
 | ||
|         state.push('softbreak', 'br', 0);
 | ||
|       }
 | ||
|     } else {
 | ||
|       state.push('softbreak', 'br', 0);
 | ||
|     }
 | ||
|   }
 | ||
|   pos++;
 | ||
| 
 | ||
|   // skip heading spaces for next line
 | ||
|   while (pos < max && isSpace(state.src.charCodeAt(pos))) {
 | ||
|     pos++;
 | ||
|   }
 | ||
|   state.pos = pos;
 | ||
|   return true;
 | ||
| }
 | ||
| 
 | ||
| // Process escaped chars and hardbreaks
 | ||
| 
 | ||
| const ESCAPED = [];
 | ||
| for (let i = 0; i < 256; i++) {
 | ||
|   ESCAPED.push(0);
 | ||
| }
 | ||
| '\\!"#$%&\'()*+,./:;<=>?@[]^_`{|}~-'.split('').forEach(function (ch) {
 | ||
|   ESCAPED[ch.charCodeAt(0)] = 1;
 | ||
| });
 | ||
| function escape(state, silent) {
 | ||
|   let pos = state.pos;
 | ||
|   const max = state.posMax;
 | ||
|   if (state.src.charCodeAt(pos) !== 0x5C /* \ */) return false;
 | ||
|   pos++;
 | ||
| 
 | ||
|   // '\' at the end of the inline block
 | ||
|   if (pos >= max) return false;
 | ||
|   let ch1 = state.src.charCodeAt(pos);
 | ||
|   if (ch1 === 0x0A) {
 | ||
|     if (!silent) {
 | ||
|       state.push('hardbreak', 'br', 0);
 | ||
|     }
 | ||
|     pos++;
 | ||
|     // skip leading whitespaces from next line
 | ||
|     while (pos < max) {
 | ||
|       ch1 = state.src.charCodeAt(pos);
 | ||
|       if (!isSpace(ch1)) break;
 | ||
|       pos++;
 | ||
|     }
 | ||
|     state.pos = pos;
 | ||
|     return true;
 | ||
|   }
 | ||
|   let escapedStr = state.src[pos];
 | ||
|   if (ch1 >= 0xD800 && ch1 <= 0xDBFF && pos + 1 < max) {
 | ||
|     const ch2 = state.src.charCodeAt(pos + 1);
 | ||
|     if (ch2 >= 0xDC00 && ch2 <= 0xDFFF) {
 | ||
|       escapedStr += state.src[pos + 1];
 | ||
|       pos++;
 | ||
|     }
 | ||
|   }
 | ||
|   const origStr = '\\' + escapedStr;
 | ||
|   if (!silent) {
 | ||
|     const token = state.push('text_special', '', 0);
 | ||
|     if (ch1 < 256 && ESCAPED[ch1] !== 0) {
 | ||
|       token.content = escapedStr;
 | ||
|     } else {
 | ||
|       token.content = origStr;
 | ||
|     }
 | ||
|     token.markup = origStr;
 | ||
|     token.info = 'escape';
 | ||
|   }
 | ||
|   state.pos = pos + 1;
 | ||
|   return true;
 | ||
| }
 | ||
| 
 | ||
| // Parse backticks
 | ||
| 
 | ||
| function backtick(state, silent) {
 | ||
|   let pos = state.pos;
 | ||
|   const ch = state.src.charCodeAt(pos);
 | ||
|   if (ch !== 0x60 /* ` */) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   const start = pos;
 | ||
|   pos++;
 | ||
|   const max = state.posMax;
 | ||
| 
 | ||
|   // scan marker length
 | ||
|   while (pos < max && state.src.charCodeAt(pos) === 0x60 /* ` */) {
 | ||
|     pos++;
 | ||
|   }
 | ||
|   const marker = state.src.slice(start, pos);
 | ||
|   const openerLength = marker.length;
 | ||
|   if (state.backticksScanned && (state.backticks[openerLength] || 0) <= start) {
 | ||
|     if (!silent) state.pending += marker;
 | ||
|     state.pos += openerLength;
 | ||
|     return true;
 | ||
|   }
 | ||
|   let matchEnd = pos;
 | ||
|   let matchStart;
 | ||
| 
 | ||
|   // Nothing found in the cache, scan until the end of the line (or until marker is found)
 | ||
|   while ((matchStart = state.src.indexOf('`', matchEnd)) !== -1) {
 | ||
|     matchEnd = matchStart + 1;
 | ||
| 
 | ||
|     // scan marker length
 | ||
|     while (matchEnd < max && state.src.charCodeAt(matchEnd) === 0x60 /* ` */) {
 | ||
|       matchEnd++;
 | ||
|     }
 | ||
|     const closerLength = matchEnd - matchStart;
 | ||
|     if (closerLength === openerLength) {
 | ||
|       // Found matching closer length.
 | ||
|       if (!silent) {
 | ||
|         const token = state.push('code_inline', 'code', 0);
 | ||
|         token.markup = marker;
 | ||
|         token.content = state.src.slice(pos, matchStart).replace(/\n/g, ' ').replace(/^ (.+) $/, '$1');
 | ||
|       }
 | ||
|       state.pos = matchEnd;
 | ||
|       return true;
 | ||
|     }
 | ||
| 
 | ||
|     // Some different length found, put it in cache as upper limit of where closer can be found
 | ||
|     state.backticks[closerLength] = matchStart;
 | ||
|   }
 | ||
| 
 | ||
|   // Scanned through the end, didn't find anything
 | ||
|   state.backticksScanned = true;
 | ||
|   if (!silent) state.pending += marker;
 | ||
|   state.pos += openerLength;
 | ||
|   return true;
 | ||
| }
 | ||
| 
 | ||
| // ~~strike through~~
 | ||
| //
 | ||
| 
 | ||
| // Insert each marker as a separate text token, and add it to delimiter list
 | ||
| //
 | ||
| function strikethrough_tokenize(state, silent) {
 | ||
|   const start = state.pos;
 | ||
|   const marker = state.src.charCodeAt(start);
 | ||
|   if (silent) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   if (marker !== 0x7E /* ~ */) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   const scanned = state.scanDelims(state.pos, true);
 | ||
|   let len = scanned.length;
 | ||
|   const ch = String.fromCharCode(marker);
 | ||
|   if (len < 2) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   let token;
 | ||
|   if (len % 2) {
 | ||
|     token = state.push('text', '', 0);
 | ||
|     token.content = ch;
 | ||
|     len--;
 | ||
|   }
 | ||
|   for (let i = 0; i < len; i += 2) {
 | ||
|     token = state.push('text', '', 0);
 | ||
|     token.content = ch + ch;
 | ||
|     state.delimiters.push({
 | ||
|       marker,
 | ||
|       length: 0,
 | ||
|       // disable "rule of 3" length checks meant for emphasis
 | ||
|       token: state.tokens.length - 1,
 | ||
|       end: -1,
 | ||
|       open: scanned.can_open,
 | ||
|       close: scanned.can_close
 | ||
|     });
 | ||
|   }
 | ||
|   state.pos += scanned.length;
 | ||
|   return true;
 | ||
| }
 | ||
| function postProcess$1(state, delimiters) {
 | ||
|   let token;
 | ||
|   const loneMarkers = [];
 | ||
|   const max = delimiters.length;
 | ||
|   for (let i = 0; i < max; i++) {
 | ||
|     const startDelim = delimiters[i];
 | ||
|     if (startDelim.marker !== 0x7E /* ~ */) {
 | ||
|       continue;
 | ||
|     }
 | ||
|     if (startDelim.end === -1) {
 | ||
|       continue;
 | ||
|     }
 | ||
|     const endDelim = delimiters[startDelim.end];
 | ||
|     token = state.tokens[startDelim.token];
 | ||
|     token.type = 's_open';
 | ||
|     token.tag = 's';
 | ||
|     token.nesting = 1;
 | ||
|     token.markup = '~~';
 | ||
|     token.content = '';
 | ||
|     token = state.tokens[endDelim.token];
 | ||
|     token.type = 's_close';
 | ||
|     token.tag = 's';
 | ||
|     token.nesting = -1;
 | ||
|     token.markup = '~~';
 | ||
|     token.content = '';
 | ||
|     if (state.tokens[endDelim.token - 1].type === 'text' && state.tokens[endDelim.token - 1].content === '~') {
 | ||
|       loneMarkers.push(endDelim.token - 1);
 | ||
|     }
 | ||
|   }
 | ||
| 
 | ||
|   // If a marker sequence has an odd number of characters, it's splitted
 | ||
|   // like this: `~~~~~` -> `~` + `~~` + `~~`, leaving one marker at the
 | ||
|   // start of the sequence.
 | ||
|   //
 | ||
|   // So, we have to move all those markers after subsequent s_close tags.
 | ||
|   //
 | ||
|   while (loneMarkers.length) {
 | ||
|     const i = loneMarkers.pop();
 | ||
|     let j = i + 1;
 | ||
|     while (j < state.tokens.length && state.tokens[j].type === 's_close') {
 | ||
|       j++;
 | ||
|     }
 | ||
|     j--;
 | ||
|     if (i !== j) {
 | ||
|       token = state.tokens[j];
 | ||
|       state.tokens[j] = state.tokens[i];
 | ||
|       state.tokens[i] = token;
 | ||
|     }
 | ||
|   }
 | ||
| }
 | ||
| 
 | ||
| // Walk through delimiter list and replace text tokens with tags
 | ||
| //
 | ||
| function strikethrough_postProcess(state) {
 | ||
|   const tokens_meta = state.tokens_meta;
 | ||
|   const max = state.tokens_meta.length;
 | ||
|   postProcess$1(state, state.delimiters);
 | ||
|   for (let curr = 0; curr < max; curr++) {
 | ||
|     if (tokens_meta[curr] && tokens_meta[curr].delimiters) {
 | ||
|       postProcess$1(state, tokens_meta[curr].delimiters);
 | ||
|     }
 | ||
|   }
 | ||
| }
 | ||
| var r_strikethrough = {
 | ||
|   tokenize: strikethrough_tokenize,
 | ||
|   postProcess: strikethrough_postProcess
 | ||
| };
 | ||
| 
 | ||
| // Process *this* and _that_
 | ||
| //
 | ||
| 
 | ||
| // Insert each marker as a separate text token, and add it to delimiter list
 | ||
| //
 | ||
| function emphasis_tokenize(state, silent) {
 | ||
|   const start = state.pos;
 | ||
|   const marker = state.src.charCodeAt(start);
 | ||
|   if (silent) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   if (marker !== 0x5F /* _ */ && marker !== 0x2A /* * */) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   const scanned = state.scanDelims(state.pos, marker === 0x2A);
 | ||
|   for (let i = 0; i < scanned.length; i++) {
 | ||
|     const token = state.push('text', '', 0);
 | ||
|     token.content = String.fromCharCode(marker);
 | ||
|     state.delimiters.push({
 | ||
|       // Char code of the starting marker (number).
 | ||
|       //
 | ||
|       marker,
 | ||
|       // Total length of these series of delimiters.
 | ||
|       //
 | ||
|       length: scanned.length,
 | ||
|       // A position of the token this delimiter corresponds to.
 | ||
|       //
 | ||
|       token: state.tokens.length - 1,
 | ||
|       // If this delimiter is matched as a valid opener, `end` will be
 | ||
|       // equal to its position, otherwise it's `-1`.
 | ||
|       //
 | ||
|       end: -1,
 | ||
|       // Boolean flags that determine if this delimiter could open or close
 | ||
|       // an emphasis.
 | ||
|       //
 | ||
|       open: scanned.can_open,
 | ||
|       close: scanned.can_close
 | ||
|     });
 | ||
|   }
 | ||
|   state.pos += scanned.length;
 | ||
|   return true;
 | ||
| }
 | ||
| function postProcess(state, delimiters) {
 | ||
|   const max = delimiters.length;
 | ||
|   for (let i = max - 1; i >= 0; i--) {
 | ||
|     const startDelim = delimiters[i];
 | ||
|     if (startDelim.marker !== 0x5F /* _ */ && startDelim.marker !== 0x2A /* * */) {
 | ||
|       continue;
 | ||
|     }
 | ||
| 
 | ||
|     // Process only opening markers
 | ||
|     if (startDelim.end === -1) {
 | ||
|       continue;
 | ||
|     }
 | ||
|     const endDelim = delimiters[startDelim.end];
 | ||
| 
 | ||
|     // If the previous delimiter has the same marker and is adjacent to this one,
 | ||
|     // merge those into one strong delimiter.
 | ||
|     //
 | ||
|     // `<em><em>whatever</em></em>` -> `<strong>whatever</strong>`
 | ||
|     //
 | ||
|     const isStrong = i > 0 && delimiters[i - 1].end === startDelim.end + 1 &&
 | ||
|     // check that first two markers match and adjacent
 | ||
|     delimiters[i - 1].marker === startDelim.marker && delimiters[i - 1].token === startDelim.token - 1 &&
 | ||
|     // check that last two markers are adjacent (we can safely assume they match)
 | ||
|     delimiters[startDelim.end + 1].token === endDelim.token + 1;
 | ||
|     const ch = String.fromCharCode(startDelim.marker);
 | ||
|     const token_o = state.tokens[startDelim.token];
 | ||
|     token_o.type = isStrong ? 'strong_open' : 'em_open';
 | ||
|     token_o.tag = isStrong ? 'strong' : 'em';
 | ||
|     token_o.nesting = 1;
 | ||
|     token_o.markup = isStrong ? ch + ch : ch;
 | ||
|     token_o.content = '';
 | ||
|     const token_c = state.tokens[endDelim.token];
 | ||
|     token_c.type = isStrong ? 'strong_close' : 'em_close';
 | ||
|     token_c.tag = isStrong ? 'strong' : 'em';
 | ||
|     token_c.nesting = -1;
 | ||
|     token_c.markup = isStrong ? ch + ch : ch;
 | ||
|     token_c.content = '';
 | ||
|     if (isStrong) {
 | ||
|       state.tokens[delimiters[i - 1].token].content = '';
 | ||
|       state.tokens[delimiters[startDelim.end + 1].token].content = '';
 | ||
|       i--;
 | ||
|     }
 | ||
|   }
 | ||
| }
 | ||
| 
 | ||
| // Walk through delimiter list and replace text tokens with tags
 | ||
| //
 | ||
| function emphasis_post_process(state) {
 | ||
|   const tokens_meta = state.tokens_meta;
 | ||
|   const max = state.tokens_meta.length;
 | ||
|   postProcess(state, state.delimiters);
 | ||
|   for (let curr = 0; curr < max; curr++) {
 | ||
|     if (tokens_meta[curr] && tokens_meta[curr].delimiters) {
 | ||
|       postProcess(state, tokens_meta[curr].delimiters);
 | ||
|     }
 | ||
|   }
 | ||
| }
 | ||
| var r_emphasis = {
 | ||
|   tokenize: emphasis_tokenize,
 | ||
|   postProcess: emphasis_post_process
 | ||
| };
 | ||
| 
 | ||
| // Process [link](<to> "stuff")
 | ||
| 
 | ||
| function link(state, silent) {
 | ||
|   let code, label, res, ref;
 | ||
|   let href = '';
 | ||
|   let title = '';
 | ||
|   let start = state.pos;
 | ||
|   let parseReference = true;
 | ||
|   if (state.src.charCodeAt(state.pos) !== 0x5B /* [ */) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   const oldPos = state.pos;
 | ||
|   const max = state.posMax;
 | ||
|   const labelStart = state.pos + 1;
 | ||
|   const labelEnd = state.md.helpers.parseLinkLabel(state, state.pos, true);
 | ||
| 
 | ||
|   // parser failed to find ']', so it's not a valid link
 | ||
|   if (labelEnd < 0) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   let pos = labelEnd + 1;
 | ||
|   if (pos < max && state.src.charCodeAt(pos) === 0x28 /* ( */) {
 | ||
|     //
 | ||
|     // Inline link
 | ||
|     //
 | ||
| 
 | ||
|     // might have found a valid shortcut link, disable reference parsing
 | ||
|     parseReference = false;
 | ||
| 
 | ||
|     // [link](  <href>  "title"  )
 | ||
|     //        ^^ skipping these spaces
 | ||
|     pos++;
 | ||
|     for (; pos < max; pos++) {
 | ||
|       code = state.src.charCodeAt(pos);
 | ||
|       if (!isSpace(code) && code !== 0x0A) {
 | ||
|         break;
 | ||
|       }
 | ||
|     }
 | ||
|     if (pos >= max) {
 | ||
|       return false;
 | ||
|     }
 | ||
| 
 | ||
|     // [link](  <href>  "title"  )
 | ||
|     //          ^^^^^^ parsing link destination
 | ||
|     start = pos;
 | ||
|     res = state.md.helpers.parseLinkDestination(state.src, pos, state.posMax);
 | ||
|     if (res.ok) {
 | ||
|       href = state.md.normalizeLink(res.str);
 | ||
|       if (state.md.validateLink(href)) {
 | ||
|         pos = res.pos;
 | ||
|       } else {
 | ||
|         href = '';
 | ||
|       }
 | ||
| 
 | ||
|       // [link](  <href>  "title"  )
 | ||
|       //                ^^ skipping these spaces
 | ||
|       start = pos;
 | ||
|       for (; pos < max; pos++) {
 | ||
|         code = state.src.charCodeAt(pos);
 | ||
|         if (!isSpace(code) && code !== 0x0A) {
 | ||
|           break;
 | ||
|         }
 | ||
|       }
 | ||
| 
 | ||
|       // [link](  <href>  "title"  )
 | ||
|       //                  ^^^^^^^ parsing link title
 | ||
|       res = state.md.helpers.parseLinkTitle(state.src, pos, state.posMax);
 | ||
|       if (pos < max && start !== pos && res.ok) {
 | ||
|         title = res.str;
 | ||
|         pos = res.pos;
 | ||
| 
 | ||
|         // [link](  <href>  "title"  )
 | ||
|         //                         ^^ skipping these spaces
 | ||
|         for (; pos < max; pos++) {
 | ||
|           code = state.src.charCodeAt(pos);
 | ||
|           if (!isSpace(code) && code !== 0x0A) {
 | ||
|             break;
 | ||
|           }
 | ||
|         }
 | ||
|       }
 | ||
|     }
 | ||
|     if (pos >= max || state.src.charCodeAt(pos) !== 0x29 /* ) */) {
 | ||
|       // parsing a valid shortcut link failed, fallback to reference
 | ||
|       parseReference = true;
 | ||
|     }
 | ||
|     pos++;
 | ||
|   }
 | ||
|   if (parseReference) {
 | ||
|     //
 | ||
|     // Link reference
 | ||
|     //
 | ||
|     if (typeof state.env.references === 'undefined') {
 | ||
|       return false;
 | ||
|     }
 | ||
|     if (pos < max && state.src.charCodeAt(pos) === 0x5B /* [ */) {
 | ||
|       start = pos + 1;
 | ||
|       pos = state.md.helpers.parseLinkLabel(state, pos);
 | ||
|       if (pos >= 0) {
 | ||
|         label = state.src.slice(start, pos++);
 | ||
|       } else {
 | ||
|         pos = labelEnd + 1;
 | ||
|       }
 | ||
|     } else {
 | ||
|       pos = labelEnd + 1;
 | ||
|     }
 | ||
| 
 | ||
|     // covers label === '' and label === undefined
 | ||
|     // (collapsed reference link and shortcut reference link respectively)
 | ||
|     if (!label) {
 | ||
|       label = state.src.slice(labelStart, labelEnd);
 | ||
|     }
 | ||
|     ref = state.env.references[normalizeReference(label)];
 | ||
|     if (!ref) {
 | ||
|       state.pos = oldPos;
 | ||
|       return false;
 | ||
|     }
 | ||
|     href = ref.href;
 | ||
|     title = ref.title;
 | ||
|   }
 | ||
| 
 | ||
|   //
 | ||
|   // We found the end of the link, and know for a fact it's a valid link;
 | ||
|   // so all that's left to do is to call tokenizer.
 | ||
|   //
 | ||
|   if (!silent) {
 | ||
|     state.pos = labelStart;
 | ||
|     state.posMax = labelEnd;
 | ||
|     const token_o = state.push('link_open', 'a', 1);
 | ||
|     const attrs = [['href', href]];
 | ||
|     token_o.attrs = attrs;
 | ||
|     if (title) {
 | ||
|       attrs.push(['title', title]);
 | ||
|     }
 | ||
|     state.linkLevel++;
 | ||
|     state.md.inline.tokenize(state);
 | ||
|     state.linkLevel--;
 | ||
|     state.push('link_close', 'a', -1);
 | ||
|   }
 | ||
|   state.pos = pos;
 | ||
|   state.posMax = max;
 | ||
|   return true;
 | ||
| }
 | ||
| 
 | ||
| // Process 
 | ||
| 
 | ||
| function image(state, silent) {
 | ||
|   let code, content, label, pos, ref, res, title, start;
 | ||
|   let href = '';
 | ||
|   const oldPos = state.pos;
 | ||
|   const max = state.posMax;
 | ||
|   if (state.src.charCodeAt(state.pos) !== 0x21 /* ! */) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   if (state.src.charCodeAt(state.pos + 1) !== 0x5B /* [ */) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   const labelStart = state.pos + 2;
 | ||
|   const labelEnd = state.md.helpers.parseLinkLabel(state, state.pos + 1, false);
 | ||
| 
 | ||
|   // parser failed to find ']', so it's not a valid link
 | ||
|   if (labelEnd < 0) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   pos = labelEnd + 1;
 | ||
|   if (pos < max && state.src.charCodeAt(pos) === 0x28 /* ( */) {
 | ||
|     //
 | ||
|     // Inline link
 | ||
|     //
 | ||
| 
 | ||
|     // [link](  <href>  "title"  )
 | ||
|     //        ^^ skipping these spaces
 | ||
|     pos++;
 | ||
|     for (; pos < max; pos++) {
 | ||
|       code = state.src.charCodeAt(pos);
 | ||
|       if (!isSpace(code) && code !== 0x0A) {
 | ||
|         break;
 | ||
|       }
 | ||
|     }
 | ||
|     if (pos >= max) {
 | ||
|       return false;
 | ||
|     }
 | ||
| 
 | ||
|     // [link](  <href>  "title"  )
 | ||
|     //          ^^^^^^ parsing link destination
 | ||
|     start = pos;
 | ||
|     res = state.md.helpers.parseLinkDestination(state.src, pos, state.posMax);
 | ||
|     if (res.ok) {
 | ||
|       href = state.md.normalizeLink(res.str);
 | ||
|       if (state.md.validateLink(href)) {
 | ||
|         pos = res.pos;
 | ||
|       } else {
 | ||
|         href = '';
 | ||
|       }
 | ||
|     }
 | ||
| 
 | ||
|     // [link](  <href>  "title"  )
 | ||
|     //                ^^ skipping these spaces
 | ||
|     start = pos;
 | ||
|     for (; pos < max; pos++) {
 | ||
|       code = state.src.charCodeAt(pos);
 | ||
|       if (!isSpace(code) && code !== 0x0A) {
 | ||
|         break;
 | ||
|       }
 | ||
|     }
 | ||
| 
 | ||
|     // [link](  <href>  "title"  )
 | ||
|     //                  ^^^^^^^ parsing link title
 | ||
|     res = state.md.helpers.parseLinkTitle(state.src, pos, state.posMax);
 | ||
|     if (pos < max && start !== pos && res.ok) {
 | ||
|       title = res.str;
 | ||
|       pos = res.pos;
 | ||
| 
 | ||
|       // [link](  <href>  "title"  )
 | ||
|       //                         ^^ skipping these spaces
 | ||
|       for (; pos < max; pos++) {
 | ||
|         code = state.src.charCodeAt(pos);
 | ||
|         if (!isSpace(code) && code !== 0x0A) {
 | ||
|           break;
 | ||
|         }
 | ||
|       }
 | ||
|     } else {
 | ||
|       title = '';
 | ||
|     }
 | ||
|     if (pos >= max || state.src.charCodeAt(pos) !== 0x29 /* ) */) {
 | ||
|       state.pos = oldPos;
 | ||
|       return false;
 | ||
|     }
 | ||
|     pos++;
 | ||
|   } else {
 | ||
|     //
 | ||
|     // Link reference
 | ||
|     //
 | ||
|     if (typeof state.env.references === 'undefined') {
 | ||
|       return false;
 | ||
|     }
 | ||
|     if (pos < max && state.src.charCodeAt(pos) === 0x5B /* [ */) {
 | ||
|       start = pos + 1;
 | ||
|       pos = state.md.helpers.parseLinkLabel(state, pos);
 | ||
|       if (pos >= 0) {
 | ||
|         label = state.src.slice(start, pos++);
 | ||
|       } else {
 | ||
|         pos = labelEnd + 1;
 | ||
|       }
 | ||
|     } else {
 | ||
|       pos = labelEnd + 1;
 | ||
|     }
 | ||
| 
 | ||
|     // covers label === '' and label === undefined
 | ||
|     // (collapsed reference link and shortcut reference link respectively)
 | ||
|     if (!label) {
 | ||
|       label = state.src.slice(labelStart, labelEnd);
 | ||
|     }
 | ||
|     ref = state.env.references[normalizeReference(label)];
 | ||
|     if (!ref) {
 | ||
|       state.pos = oldPos;
 | ||
|       return false;
 | ||
|     }
 | ||
|     href = ref.href;
 | ||
|     title = ref.title;
 | ||
|   }
 | ||
| 
 | ||
|   //
 | ||
|   // We found the end of the link, and know for a fact it's a valid link;
 | ||
|   // so all that's left to do is to call tokenizer.
 | ||
|   //
 | ||
|   if (!silent) {
 | ||
|     content = state.src.slice(labelStart, labelEnd);
 | ||
|     const tokens = [];
 | ||
|     state.md.inline.parse(content, state.md, state.env, tokens);
 | ||
|     const token = state.push('image', 'img', 0);
 | ||
|     const attrs = [['src', href], ['alt', '']];
 | ||
|     token.attrs = attrs;
 | ||
|     token.children = tokens;
 | ||
|     token.content = content;
 | ||
|     if (title) {
 | ||
|       attrs.push(['title', title]);
 | ||
|     }
 | ||
|   }
 | ||
|   state.pos = pos;
 | ||
|   state.posMax = max;
 | ||
|   return true;
 | ||
| }
 | ||
| 
 | ||
| // Process autolinks '<protocol:...>'
 | ||
| 
 | ||
| /* eslint max-len:0 */
 | ||
| const EMAIL_RE = /^([a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*)$/;
 | ||
| /* eslint-disable-next-line no-control-regex */
 | ||
| const AUTOLINK_RE = /^([a-zA-Z][a-zA-Z0-9+.-]{1,31}):([^<>\x00-\x20]*)$/;
 | ||
| function autolink(state, silent) {
 | ||
|   let pos = state.pos;
 | ||
|   if (state.src.charCodeAt(pos) !== 0x3C /* < */) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   const start = state.pos;
 | ||
|   const max = state.posMax;
 | ||
|   for (;;) {
 | ||
|     if (++pos >= max) return false;
 | ||
|     const ch = state.src.charCodeAt(pos);
 | ||
|     if (ch === 0x3C /* < */) return false;
 | ||
|     if (ch === 0x3E /* > */) break;
 | ||
|   }
 | ||
|   const url = state.src.slice(start + 1, pos);
 | ||
|   if (AUTOLINK_RE.test(url)) {
 | ||
|     const fullUrl = state.md.normalizeLink(url);
 | ||
|     if (!state.md.validateLink(fullUrl)) {
 | ||
|       return false;
 | ||
|     }
 | ||
|     if (!silent) {
 | ||
|       const token_o = state.push('link_open', 'a', 1);
 | ||
|       token_o.attrs = [['href', fullUrl]];
 | ||
|       token_o.markup = 'autolink';
 | ||
|       token_o.info = 'auto';
 | ||
|       const token_t = state.push('text', '', 0);
 | ||
|       token_t.content = state.md.normalizeLinkText(url);
 | ||
|       const token_c = state.push('link_close', 'a', -1);
 | ||
|       token_c.markup = 'autolink';
 | ||
|       token_c.info = 'auto';
 | ||
|     }
 | ||
|     state.pos += url.length + 2;
 | ||
|     return true;
 | ||
|   }
 | ||
|   if (EMAIL_RE.test(url)) {
 | ||
|     const fullUrl = state.md.normalizeLink('mailto:' + url);
 | ||
|     if (!state.md.validateLink(fullUrl)) {
 | ||
|       return false;
 | ||
|     }
 | ||
|     if (!silent) {
 | ||
|       const token_o = state.push('link_open', 'a', 1);
 | ||
|       token_o.attrs = [['href', fullUrl]];
 | ||
|       token_o.markup = 'autolink';
 | ||
|       token_o.info = 'auto';
 | ||
|       const token_t = state.push('text', '', 0);
 | ||
|       token_t.content = state.md.normalizeLinkText(url);
 | ||
|       const token_c = state.push('link_close', 'a', -1);
 | ||
|       token_c.markup = 'autolink';
 | ||
|       token_c.info = 'auto';
 | ||
|     }
 | ||
|     state.pos += url.length + 2;
 | ||
|     return true;
 | ||
|   }
 | ||
|   return false;
 | ||
| }
 | ||
| 
 | ||
| // Process html tags
 | ||
| 
 | ||
| function isLinkOpen(str) {
 | ||
|   return /^<a[>\s]/i.test(str);
 | ||
| }
 | ||
| function isLinkClose(str) {
 | ||
|   return /^<\/a\s*>/i.test(str);
 | ||
| }
 | ||
| function isLetter(ch) {
 | ||
|   /* eslint no-bitwise:0 */
 | ||
|   const lc = ch | 0x20; // to lower case
 | ||
|   return lc >= 0x61 /* a */ && lc <= 0x7a /* z */;
 | ||
| }
 | ||
| function html_inline(state, silent) {
 | ||
|   if (!state.md.options.html) {
 | ||
|     return false;
 | ||
|   }
 | ||
| 
 | ||
|   // Check start
 | ||
|   const max = state.posMax;
 | ||
|   const pos = state.pos;
 | ||
|   if (state.src.charCodeAt(pos) !== 0x3C /* < */ || pos + 2 >= max) {
 | ||
|     return false;
 | ||
|   }
 | ||
| 
 | ||
|   // Quick fail on second char
 | ||
|   const ch = state.src.charCodeAt(pos + 1);
 | ||
|   if (ch !== 0x21 /* ! */ && ch !== 0x3F /* ? */ && ch !== 0x2F /* / */ && !isLetter(ch)) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   const match = state.src.slice(pos).match(HTML_TAG_RE);
 | ||
|   if (!match) {
 | ||
|     return false;
 | ||
|   }
 | ||
|   if (!silent) {
 | ||
|     const token = state.push('html_inline', '', 0);
 | ||
|     token.content = match[0];
 | ||
|     if (isLinkOpen(token.content)) state.linkLevel++;
 | ||
|     if (isLinkClose(token.content)) state.linkLevel--;
 | ||
|   }
 | ||
|   state.pos += match[0].length;
 | ||
|   return true;
 | ||
| }
 | ||
| 
 | ||
| // Process html entity - {, ¯, ", ...
 | ||
| 
 | ||
| const DIGITAL_RE = /^&#((?:x[a-f0-9]{1,6}|[0-9]{1,7}));/i;
 | ||
| const NAMED_RE = /^&([a-z][a-z0-9]{1,31});/i;
 | ||
| function entity(state, silent) {
 | ||
|   const pos = state.pos;
 | ||
|   const max = state.posMax;
 | ||
|   if (state.src.charCodeAt(pos) !== 0x26 /* & */) return false;
 | ||
|   if (pos + 1 >= max) return false;
 | ||
|   const ch = state.src.charCodeAt(pos + 1);
 | ||
|   if (ch === 0x23 /* # */) {
 | ||
|     const match = state.src.slice(pos).match(DIGITAL_RE);
 | ||
|     if (match) {
 | ||
|       if (!silent) {
 | ||
|         const code = match[1][0].toLowerCase() === 'x' ? parseInt(match[1].slice(1), 16) : parseInt(match[1], 10);
 | ||
|         const token = state.push('text_special', '', 0);
 | ||
|         token.content = isValidEntityCode(code) ? fromCodePoint(code) : fromCodePoint(0xFFFD);
 | ||
|         token.markup = match[0];
 | ||
|         token.info = 'entity';
 | ||
|       }
 | ||
|       state.pos += match[0].length;
 | ||
|       return true;
 | ||
|     }
 | ||
|   } else {
 | ||
|     const match = state.src.slice(pos).match(NAMED_RE);
 | ||
|     if (match) {
 | ||
|       const decoded = entities.decodeHTML(match[0]);
 | ||
|       if (decoded !== match[0]) {
 | ||
|         if (!silent) {
 | ||
|           const token = state.push('text_special', '', 0);
 | ||
|           token.content = decoded;
 | ||
|           token.markup = match[0];
 | ||
|           token.info = 'entity';
 | ||
|         }
 | ||
|         state.pos += match[0].length;
 | ||
|         return true;
 | ||
|       }
 | ||
|     }
 | ||
|   }
 | ||
|   return false;
 | ||
| }
 | ||
| 
 | ||
| // For each opening emphasis-like marker find a matching closing one
 | ||
| //
 | ||
| 
 | ||
| function processDelimiters(delimiters) {
 | ||
|   const openersBottom = {};
 | ||
|   const max = delimiters.length;
 | ||
|   if (!max) return;
 | ||
| 
 | ||
|   // headerIdx is the first delimiter of the current (where closer is) delimiter run
 | ||
|   let headerIdx = 0;
 | ||
|   let lastTokenIdx = -2; // needs any value lower than -1
 | ||
|   const jumps = [];
 | ||
|   for (let closerIdx = 0; closerIdx < max; closerIdx++) {
 | ||
|     const closer = delimiters[closerIdx];
 | ||
|     jumps.push(0);
 | ||
| 
 | ||
|     // markers belong to same delimiter run if:
 | ||
|     //  - they have adjacent tokens
 | ||
|     //  - AND markers are the same
 | ||
|     //
 | ||
|     if (delimiters[headerIdx].marker !== closer.marker || lastTokenIdx !== closer.token - 1) {
 | ||
|       headerIdx = closerIdx;
 | ||
|     }
 | ||
|     lastTokenIdx = closer.token;
 | ||
| 
 | ||
|     // Length is only used for emphasis-specific "rule of 3",
 | ||
|     // if it's not defined (in strikethrough or 3rd party plugins),
 | ||
|     // we can default it to 0 to disable those checks.
 | ||
|     //
 | ||
|     closer.length = closer.length || 0;
 | ||
|     if (!closer.close) continue;
 | ||
| 
 | ||
|     // Previously calculated lower bounds (previous fails)
 | ||
|     // for each marker, each delimiter length modulo 3,
 | ||
|     // and for whether this closer can be an opener;
 | ||
|     // https://github.com/commonmark/cmark/commit/34250e12ccebdc6372b8b49c44fab57c72443460
 | ||
|     /* eslint-disable-next-line no-prototype-builtins */
 | ||
|     if (!openersBottom.hasOwnProperty(closer.marker)) {
 | ||
|       openersBottom[closer.marker] = [-1, -1, -1, -1, -1, -1];
 | ||
|     }
 | ||
|     const minOpenerIdx = openersBottom[closer.marker][(closer.open ? 3 : 0) + closer.length % 3];
 | ||
|     let openerIdx = headerIdx - jumps[headerIdx] - 1;
 | ||
|     let newMinOpenerIdx = openerIdx;
 | ||
|     for (; openerIdx > minOpenerIdx; openerIdx -= jumps[openerIdx] + 1) {
 | ||
|       const opener = delimiters[openerIdx];
 | ||
|       if (opener.marker !== closer.marker) continue;
 | ||
|       if (opener.open && opener.end < 0) {
 | ||
|         let isOddMatch = false;
 | ||
| 
 | ||
|         // from spec:
 | ||
|         //
 | ||
|         // If one of the delimiters can both open and close emphasis, then the
 | ||
|         // sum of the lengths of the delimiter runs containing the opening and
 | ||
|         // closing delimiters must not be a multiple of 3 unless both lengths
 | ||
|         // are multiples of 3.
 | ||
|         //
 | ||
|         if (opener.close || closer.open) {
 | ||
|           if ((opener.length + closer.length) % 3 === 0) {
 | ||
|             if (opener.length % 3 !== 0 || closer.length % 3 !== 0) {
 | ||
|               isOddMatch = true;
 | ||
|             }
 | ||
|           }
 | ||
|         }
 | ||
|         if (!isOddMatch) {
 | ||
|           // If previous delimiter cannot be an opener, we can safely skip
 | ||
|           // the entire sequence in future checks. This is required to make
 | ||
|           // sure algorithm has linear complexity (see *_*_*_*_*_... case).
 | ||
|           //
 | ||
|           const lastJump = openerIdx > 0 && !delimiters[openerIdx - 1].open ? jumps[openerIdx - 1] + 1 : 0;
 | ||
|           jumps[closerIdx] = closerIdx - openerIdx + lastJump;
 | ||
|           jumps[openerIdx] = lastJump;
 | ||
|           closer.open = false;
 | ||
|           opener.end = closerIdx;
 | ||
|           opener.close = false;
 | ||
|           newMinOpenerIdx = -1;
 | ||
|           // treat next token as start of run,
 | ||
|           // it optimizes skips in **<...>**a**<...>** pathological case
 | ||
|           lastTokenIdx = -2;
 | ||
|           break;
 | ||
|         }
 | ||
|       }
 | ||
|     }
 | ||
|     if (newMinOpenerIdx !== -1) {
 | ||
|       // If match for this delimiter run failed, we want to set lower bound for
 | ||
|       // future lookups. This is required to make sure algorithm has linear
 | ||
|       // complexity.
 | ||
|       //
 | ||
|       // See details here:
 | ||
|       // https://github.com/commonmark/cmark/issues/178#issuecomment-270417442
 | ||
|       //
 | ||
|       openersBottom[closer.marker][(closer.open ? 3 : 0) + (closer.length || 0) % 3] = newMinOpenerIdx;
 | ||
|     }
 | ||
|   }
 | ||
| }
 | ||
| function link_pairs(state) {
 | ||
|   const tokens_meta = state.tokens_meta;
 | ||
|   const max = state.tokens_meta.length;
 | ||
|   processDelimiters(state.delimiters);
 | ||
|   for (let curr = 0; curr < max; curr++) {
 | ||
|     if (tokens_meta[curr] && tokens_meta[curr].delimiters) {
 | ||
|       processDelimiters(tokens_meta[curr].delimiters);
 | ||
|     }
 | ||
|   }
 | ||
| }
 | ||
| 
 | ||
| // Clean up tokens after emphasis and strikethrough postprocessing:
 | ||
| // merge adjacent text nodes into one and re-calculate all token levels
 | ||
| //
 | ||
| // This is necessary because initially emphasis delimiter markers (*, _, ~)
 | ||
| // are treated as their own separate text tokens. Then emphasis rule either
 | ||
| // leaves them as text (needed to merge with adjacent text) or turns them
 | ||
| // into opening/closing tags (which messes up levels inside).
 | ||
| //
 | ||
| 
 | ||
| function fragments_join(state) {
 | ||
|   let curr, last;
 | ||
|   let level = 0;
 | ||
|   const tokens = state.tokens;
 | ||
|   const max = state.tokens.length;
 | ||
|   for (curr = last = 0; curr < max; curr++) {
 | ||
|     // re-calculate levels after emphasis/strikethrough turns some text nodes
 | ||
|     // into opening/closing tags
 | ||
|     if (tokens[curr].nesting < 0) level--; // closing tag
 | ||
|     tokens[curr].level = level;
 | ||
|     if (tokens[curr].nesting > 0) level++; // opening tag
 | ||
| 
 | ||
|     if (tokens[curr].type === 'text' && curr + 1 < max && tokens[curr + 1].type === 'text') {
 | ||
|       // collapse two adjacent text nodes
 | ||
|       tokens[curr + 1].content = tokens[curr].content + tokens[curr + 1].content;
 | ||
|     } else {
 | ||
|       if (curr !== last) {
 | ||
|         tokens[last] = tokens[curr];
 | ||
|       }
 | ||
|       last++;
 | ||
|     }
 | ||
|   }
 | ||
|   if (curr !== last) {
 | ||
|     tokens.length = last;
 | ||
|   }
 | ||
| }
 | ||
| 
 | ||
| /** internal
 | ||
|  * class ParserInline
 | ||
|  *
 | ||
|  * Tokenizes paragraph content.
 | ||
|  **/
 | ||
| 
 | ||
| 
 | ||
| // Parser rules
 | ||
| 
 | ||
| const _rules = [['text', text], ['linkify', linkify], ['newline', newline], ['escape', escape], ['backticks', backtick], ['strikethrough', r_strikethrough.tokenize], ['emphasis', r_emphasis.tokenize], ['link', link], ['image', image], ['autolink', autolink], ['html_inline', html_inline], ['entity', entity]];
 | ||
| 
 | ||
| // `rule2` ruleset was created specifically for emphasis/strikethrough
 | ||
| // post-processing and may be changed in the future.
 | ||
| //
 | ||
| // Don't use this for anything except pairs (plugins working with `balance_pairs`).
 | ||
| //
 | ||
| const _rules2 = [['balance_pairs', link_pairs], ['strikethrough', r_strikethrough.postProcess], ['emphasis', r_emphasis.postProcess],
 | ||
| // rules for pairs separate '**' into its own text tokens, which may be left unused,
 | ||
| // rule below merges unused segments back with the rest of the text
 | ||
| ['fragments_join', fragments_join]];
 | ||
| 
 | ||
| /**
 | ||
|  * new ParserInline()
 | ||
|  **/
 | ||
| function ParserInline() {
 | ||
|   /**
 | ||
|    * ParserInline#ruler -> Ruler
 | ||
|    *
 | ||
|    * [[Ruler]] instance. Keep configuration of inline rules.
 | ||
|    **/
 | ||
|   this.ruler = new Ruler();
 | ||
|   for (let i = 0; i < _rules.length; i++) {
 | ||
|     this.ruler.push(_rules[i][0], _rules[i][1]);
 | ||
|   }
 | ||
| 
 | ||
|   /**
 | ||
|    * ParserInline#ruler2 -> Ruler
 | ||
|    *
 | ||
|    * [[Ruler]] instance. Second ruler used for post-processing
 | ||
|    * (e.g. in emphasis-like rules).
 | ||
|    **/
 | ||
|   this.ruler2 = new Ruler();
 | ||
|   for (let i = 0; i < _rules2.length; i++) {
 | ||
|     this.ruler2.push(_rules2[i][0], _rules2[i][1]);
 | ||
|   }
 | ||
| }
 | ||
| 
 | ||
| // Skip single token by running all rules in validation mode;
 | ||
| // returns `true` if any rule reported success
 | ||
| //
 | ||
| ParserInline.prototype.skipToken = function (state) {
 | ||
|   const pos = state.pos;
 | ||
|   const rules = this.ruler.getRules('');
 | ||
|   const len = rules.length;
 | ||
|   const maxNesting = state.md.options.maxNesting;
 | ||
|   const cache = state.cache;
 | ||
|   if (typeof cache[pos] !== 'undefined') {
 | ||
|     state.pos = cache[pos];
 | ||
|     return;
 | ||
|   }
 | ||
|   let ok = false;
 | ||
|   if (state.level < maxNesting) {
 | ||
|     for (let i = 0; i < len; i++) {
 | ||
|       // Increment state.level and decrement it later to limit recursion.
 | ||
|       // It's harmless to do here, because no tokens are created. But ideally,
 | ||
|       // we'd need a separate private state variable for this purpose.
 | ||
|       //
 | ||
|       state.level++;
 | ||
|       ok = rules[i](state, true);
 | ||
|       state.level--;
 | ||
|       if (ok) {
 | ||
|         if (pos >= state.pos) {
 | ||
|           throw new Error("inline rule didn't increment state.pos");
 | ||
|         }
 | ||
|         break;
 | ||
|       }
 | ||
|     }
 | ||
|   } else {
 | ||
|     // Too much nesting, just skip until the end of the paragraph.
 | ||
|     //
 | ||
|     // NOTE: this will cause links to behave incorrectly in the following case,
 | ||
|     //       when an amount of `[` is exactly equal to `maxNesting + 1`:
 | ||
|     //
 | ||
|     //       [[[[[[[[[[[[[[[[[[[[[foo]()
 | ||
|     //
 | ||
|     // TODO: remove this workaround when CM standard will allow nested links
 | ||
|     //       (we can replace it by preventing links from being parsed in
 | ||
|     //       validation mode)
 | ||
|     //
 | ||
|     state.pos = state.posMax;
 | ||
|   }
 | ||
|   if (!ok) {
 | ||
|     state.pos++;
 | ||
|   }
 | ||
|   cache[pos] = state.pos;
 | ||
| };
 | ||
| 
 | ||
| // Generate tokens for input range
 | ||
| //
 | ||
| ParserInline.prototype.tokenize = function (state) {
 | ||
|   const rules = this.ruler.getRules('');
 | ||
|   const len = rules.length;
 | ||
|   const end = state.posMax;
 | ||
|   const maxNesting = state.md.options.maxNesting;
 | ||
|   while (state.pos < end) {
 | ||
|     // Try all possible rules.
 | ||
|     // On success, rule should:
 | ||
|     //
 | ||
|     // - update `state.pos`
 | ||
|     // - update `state.tokens`
 | ||
|     // - return true
 | ||
|     const prevPos = state.pos;
 | ||
|     let ok = false;
 | ||
|     if (state.level < maxNesting) {
 | ||
|       for (let i = 0; i < len; i++) {
 | ||
|         ok = rules[i](state, false);
 | ||
|         if (ok) {
 | ||
|           if (prevPos >= state.pos) {
 | ||
|             throw new Error("inline rule didn't increment state.pos");
 | ||
|           }
 | ||
|           break;
 | ||
|         }
 | ||
|       }
 | ||
|     }
 | ||
|     if (ok) {
 | ||
|       if (state.pos >= end) {
 | ||
|         break;
 | ||
|       }
 | ||
|       continue;
 | ||
|     }
 | ||
|     state.pending += state.src[state.pos++];
 | ||
|   }
 | ||
|   if (state.pending) {
 | ||
|     state.pushPending();
 | ||
|   }
 | ||
| };
 | ||
| 
 | ||
| /**
 | ||
|  * ParserInline.parse(str, md, env, outTokens)
 | ||
|  *
 | ||
|  * Process input string and push inline tokens into `outTokens`
 | ||
|  **/
 | ||
| ParserInline.prototype.parse = function (str, md, env, outTokens) {
 | ||
|   const state = new this.State(str, md, env, outTokens);
 | ||
|   this.tokenize(state);
 | ||
|   const rules = this.ruler2.getRules('');
 | ||
|   const len = rules.length;
 | ||
|   for (let i = 0; i < len; i++) {
 | ||
|     rules[i](state);
 | ||
|   }
 | ||
| };
 | ||
| ParserInline.prototype.State = StateInline;
 | ||
| 
 | ||
| // markdown-it default options
 | ||
| 
 | ||
| var cfg_default = {
 | ||
|   options: {
 | ||
|     // Enable HTML tags in source
 | ||
|     html: false,
 | ||
|     // Use '/' to close single tags (<br />)
 | ||
|     xhtmlOut: false,
 | ||
|     // Convert '\n' in paragraphs into <br>
 | ||
|     breaks: false,
 | ||
|     // CSS language prefix for fenced blocks
 | ||
|     langPrefix: 'language-',
 | ||
|     // autoconvert URL-like texts to links
 | ||
|     linkify: false,
 | ||
|     // Enable some language-neutral replacements + quotes beautification
 | ||
|     typographer: false,
 | ||
|     // Double + single quotes replacement pairs, when typographer enabled,
 | ||
|     // and smartquotes on. Could be either a String or an Array.
 | ||
|     //
 | ||
|     // For example, you can use '«»„“' for Russian, '„“‚‘' for German,
 | ||
|     // and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp).
 | ||
|     quotes: '\u201c\u201d\u2018\u2019',
 | ||
|     /* “”‘’ */
 | ||
| 
 | ||
|     // Highlighter function. Should return escaped HTML,
 | ||
|     // or '' if the source string is not changed and should be escaped externaly.
 | ||
|     // If result starts with <pre... internal wrapper is skipped.
 | ||
|     //
 | ||
|     // function (/*str, lang*/) { return ''; }
 | ||
|     //
 | ||
|     highlight: null,
 | ||
|     // Internal protection, recursion limit
 | ||
|     maxNesting: 100
 | ||
|   },
 | ||
|   components: {
 | ||
|     core: {},
 | ||
|     block: {},
 | ||
|     inline: {}
 | ||
|   }
 | ||
| };
 | ||
| 
 | ||
| // "Zero" preset, with nothing enabled. Useful for manual configuring of simple
 | ||
| // modes. For example, to parse bold/italic only.
 | ||
| 
 | ||
| var cfg_zero = {
 | ||
|   options: {
 | ||
|     // Enable HTML tags in source
 | ||
|     html: false,
 | ||
|     // Use '/' to close single tags (<br />)
 | ||
|     xhtmlOut: false,
 | ||
|     // Convert '\n' in paragraphs into <br>
 | ||
|     breaks: false,
 | ||
|     // CSS language prefix for fenced blocks
 | ||
|     langPrefix: 'language-',
 | ||
|     // autoconvert URL-like texts to links
 | ||
|     linkify: false,
 | ||
|     // Enable some language-neutral replacements + quotes beautification
 | ||
|     typographer: false,
 | ||
|     // Double + single quotes replacement pairs, when typographer enabled,
 | ||
|     // and smartquotes on. Could be either a String or an Array.
 | ||
|     //
 | ||
|     // For example, you can use '«»„“' for Russian, '„“‚‘' for German,
 | ||
|     // and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp).
 | ||
|     quotes: '\u201c\u201d\u2018\u2019',
 | ||
|     /* “”‘’ */
 | ||
| 
 | ||
|     // Highlighter function. Should return escaped HTML,
 | ||
|     // or '' if the source string is not changed and should be escaped externaly.
 | ||
|     // If result starts with <pre... internal wrapper is skipped.
 | ||
|     //
 | ||
|     // function (/*str, lang*/) { return ''; }
 | ||
|     //
 | ||
|     highlight: null,
 | ||
|     // Internal protection, recursion limit
 | ||
|     maxNesting: 20
 | ||
|   },
 | ||
|   components: {
 | ||
|     core: {
 | ||
|       rules: ['normalize', 'block', 'inline', 'text_join']
 | ||
|     },
 | ||
|     block: {
 | ||
|       rules: ['paragraph']
 | ||
|     },
 | ||
|     inline: {
 | ||
|       rules: ['text'],
 | ||
|       rules2: ['balance_pairs', 'fragments_join']
 | ||
|     }
 | ||
|   }
 | ||
| };
 | ||
| 
 | ||
| // Commonmark default options
 | ||
| 
 | ||
| var cfg_commonmark = {
 | ||
|   options: {
 | ||
|     // Enable HTML tags in source
 | ||
|     html: true,
 | ||
|     // Use '/' to close single tags (<br />)
 | ||
|     xhtmlOut: true,
 | ||
|     // Convert '\n' in paragraphs into <br>
 | ||
|     breaks: false,
 | ||
|     // CSS language prefix for fenced blocks
 | ||
|     langPrefix: 'language-',
 | ||
|     // autoconvert URL-like texts to links
 | ||
|     linkify: false,
 | ||
|     // Enable some language-neutral replacements + quotes beautification
 | ||
|     typographer: false,
 | ||
|     // Double + single quotes replacement pairs, when typographer enabled,
 | ||
|     // and smartquotes on. Could be either a String or an Array.
 | ||
|     //
 | ||
|     // For example, you can use '«»„“' for Russian, '„“‚‘' for German,
 | ||
|     // and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp).
 | ||
|     quotes: '\u201c\u201d\u2018\u2019',
 | ||
|     /* “”‘’ */
 | ||
| 
 | ||
|     // Highlighter function. Should return escaped HTML,
 | ||
|     // or '' if the source string is not changed and should be escaped externaly.
 | ||
|     // If result starts with <pre... internal wrapper is skipped.
 | ||
|     //
 | ||
|     // function (/*str, lang*/) { return ''; }
 | ||
|     //
 | ||
|     highlight: null,
 | ||
|     // Internal protection, recursion limit
 | ||
|     maxNesting: 20
 | ||
|   },
 | ||
|   components: {
 | ||
|     core: {
 | ||
|       rules: ['normalize', 'block', 'inline', 'text_join']
 | ||
|     },
 | ||
|     block: {
 | ||
|       rules: ['blockquote', 'code', 'fence', 'heading', 'hr', 'html_block', 'lheading', 'list', 'reference', 'paragraph']
 | ||
|     },
 | ||
|     inline: {
 | ||
|       rules: ['autolink', 'backticks', 'emphasis', 'entity', 'escape', 'html_inline', 'image', 'link', 'newline', 'text'],
 | ||
|       rules2: ['balance_pairs', 'emphasis', 'fragments_join']
 | ||
|     }
 | ||
|   }
 | ||
| };
 | ||
| 
 | ||
| // Main parser class
 | ||
| 
 | ||
| const config = {
 | ||
|   default: cfg_default,
 | ||
|   zero: cfg_zero,
 | ||
|   commonmark: cfg_commonmark
 | ||
| };
 | ||
| 
 | ||
| //
 | ||
| // This validator can prohibit more than really needed to prevent XSS. It's a
 | ||
| // tradeoff to keep code simple and to be secure by default.
 | ||
| //
 | ||
| // If you need different setup - override validator method as you wish. Or
 | ||
| // replace it with dummy function and use external sanitizer.
 | ||
| //
 | ||
| 
 | ||
| const BAD_PROTO_RE = /^(vbscript|javascript|file|data):/;
 | ||
| const GOOD_DATA_RE = /^data:image\/(gif|png|jpeg|webp);/;
 | ||
| function validateLink(url) {
 | ||
|   // url should be normalized at this point, and existing entities are decoded
 | ||
|   const str = url.trim().toLowerCase();
 | ||
|   return BAD_PROTO_RE.test(str) ? GOOD_DATA_RE.test(str) : true;
 | ||
| }
 | ||
| const RECODE_HOSTNAME_FOR = ['http:', 'https:', 'mailto:'];
 | ||
| function normalizeLink(url) {
 | ||
|   const parsed = mdurl__namespace.parse(url, true);
 | ||
|   if (parsed.hostname) {
 | ||
|     // Encode hostnames in urls like:
 | ||
|     // `http://host/`, `https://host/`, `mailto:user@host`, `//host/`
 | ||
|     //
 | ||
|     // We don't encode unknown schemas, because it's likely that we encode
 | ||
|     // something we shouldn't (e.g. `skype:name` treated as `skype:host`)
 | ||
|     //
 | ||
|     if (!parsed.protocol || RECODE_HOSTNAME_FOR.indexOf(parsed.protocol) >= 0) {
 | ||
|       try {
 | ||
|         parsed.hostname = punycode.toASCII(parsed.hostname);
 | ||
|       } catch (er) {/**/}
 | ||
|     }
 | ||
|   }
 | ||
|   return mdurl__namespace.encode(mdurl__namespace.format(parsed));
 | ||
| }
 | ||
| function normalizeLinkText(url) {
 | ||
|   const parsed = mdurl__namespace.parse(url, true);
 | ||
|   if (parsed.hostname) {
 | ||
|     // Encode hostnames in urls like:
 | ||
|     // `http://host/`, `https://host/`, `mailto:user@host`, `//host/`
 | ||
|     //
 | ||
|     // We don't encode unknown schemas, because it's likely that we encode
 | ||
|     // something we shouldn't (e.g. `skype:name` treated as `skype:host`)
 | ||
|     //
 | ||
|     if (!parsed.protocol || RECODE_HOSTNAME_FOR.indexOf(parsed.protocol) >= 0) {
 | ||
|       try {
 | ||
|         parsed.hostname = punycode.toUnicode(parsed.hostname);
 | ||
|       } catch (er) {/**/}
 | ||
|     }
 | ||
|   }
 | ||
| 
 | ||
|   // add '%' to exclude list because of https://github.com/markdown-it/markdown-it/issues/720
 | ||
|   return mdurl__namespace.decode(mdurl__namespace.format(parsed), mdurl__namespace.decode.defaultChars + '%');
 | ||
| }
 | ||
| 
 | ||
| /**
 | ||
|  * class MarkdownIt
 | ||
|  *
 | ||
|  * Main parser/renderer class.
 | ||
|  *
 | ||
|  * ##### Usage
 | ||
|  *
 | ||
|  * ```javascript
 | ||
|  * // node.js, "classic" way:
 | ||
|  * var MarkdownIt = require('markdown-it'),
 | ||
|  *     md = new MarkdownIt();
 | ||
|  * var result = md.render('# markdown-it rulezz!');
 | ||
|  *
 | ||
|  * // node.js, the same, but with sugar:
 | ||
|  * var md = require('markdown-it')();
 | ||
|  * var result = md.render('# markdown-it rulezz!');
 | ||
|  *
 | ||
|  * // browser without AMD, added to "window" on script load
 | ||
|  * // Note, there are no dash.
 | ||
|  * var md = window.markdownit();
 | ||
|  * var result = md.render('# markdown-it rulezz!');
 | ||
|  * ```
 | ||
|  *
 | ||
|  * Single line rendering, without paragraph wrap:
 | ||
|  *
 | ||
|  * ```javascript
 | ||
|  * var md = require('markdown-it')();
 | ||
|  * var result = md.renderInline('__markdown-it__ rulezz!');
 | ||
|  * ```
 | ||
|  **/
 | ||
| 
 | ||
| /**
 | ||
|  * new MarkdownIt([presetName, options])
 | ||
|  * - presetName (String): optional, `commonmark` / `zero`
 | ||
|  * - options (Object)
 | ||
|  *
 | ||
|  * Creates parser instanse with given config. Can be called without `new`.
 | ||
|  *
 | ||
|  * ##### presetName
 | ||
|  *
 | ||
|  * MarkdownIt provides named presets as a convenience to quickly
 | ||
|  * enable/disable active syntax rules and options for common use cases.
 | ||
|  *
 | ||
|  * - ["commonmark"](https://github.com/markdown-it/markdown-it/blob/master/lib/presets/commonmark.mjs) -
 | ||
|  *   configures parser to strict [CommonMark](http://commonmark.org/) mode.
 | ||
|  * - [default](https://github.com/markdown-it/markdown-it/blob/master/lib/presets/default.mjs) -
 | ||
|  *   similar to GFM, used when no preset name given. Enables all available rules,
 | ||
|  *   but still without html, typographer & autolinker.
 | ||
|  * - ["zero"](https://github.com/markdown-it/markdown-it/blob/master/lib/presets/zero.mjs) -
 | ||
|  *   all rules disabled. Useful to quickly setup your config via `.enable()`.
 | ||
|  *   For example, when you need only `bold` and `italic` markup and nothing else.
 | ||
|  *
 | ||
|  * ##### options:
 | ||
|  *
 | ||
|  * - __html__ - `false`. Set `true` to enable HTML tags in source. Be careful!
 | ||
|  *   That's not safe! You may need external sanitizer to protect output from XSS.
 | ||
|  *   It's better to extend features via plugins, instead of enabling HTML.
 | ||
|  * - __xhtmlOut__ - `false`. Set `true` to add '/' when closing single tags
 | ||
|  *   (`<br />`). This is needed only for full CommonMark compatibility. In real
 | ||
|  *   world you will need HTML output.
 | ||
|  * - __breaks__ - `false`. Set `true` to convert `\n` in paragraphs into `<br>`.
 | ||
|  * - __langPrefix__ - `language-`. CSS language class prefix for fenced blocks.
 | ||
|  *   Can be useful for external highlighters.
 | ||
|  * - __linkify__ - `false`. Set `true` to autoconvert URL-like text to links.
 | ||
|  * - __typographer__  - `false`. Set `true` to enable [some language-neutral
 | ||
|  *   replacement](https://github.com/markdown-it/markdown-it/blob/master/lib/rules_core/replacements.mjs) +
 | ||
|  *   quotes beautification (smartquotes).
 | ||
|  * - __quotes__ - `“”‘’`, String or Array. Double + single quotes replacement
 | ||
|  *   pairs, when typographer enabled and smartquotes on. For example, you can
 | ||
|  *   use `'«»„“'` for Russian, `'„“‚‘'` for German, and
 | ||
|  *   `['«\xA0', '\xA0»', '‹\xA0', '\xA0›']` for French (including nbsp).
 | ||
|  * - __highlight__ - `null`. Highlighter function for fenced code blocks.
 | ||
|  *   Highlighter `function (str, lang)` should return escaped HTML. It can also
 | ||
|  *   return empty string if the source was not changed and should be escaped
 | ||
|  *   externaly. If result starts with <pre... internal wrapper is skipped.
 | ||
|  *
 | ||
|  * ##### Example
 | ||
|  *
 | ||
|  * ```javascript
 | ||
|  * // commonmark mode
 | ||
|  * var md = require('markdown-it')('commonmark');
 | ||
|  *
 | ||
|  * // default mode
 | ||
|  * var md = require('markdown-it')();
 | ||
|  *
 | ||
|  * // enable everything
 | ||
|  * var md = require('markdown-it')({
 | ||
|  *   html: true,
 | ||
|  *   linkify: true,
 | ||
|  *   typographer: true
 | ||
|  * });
 | ||
|  * ```
 | ||
|  *
 | ||
|  * ##### Syntax highlighting
 | ||
|  *
 | ||
|  * ```js
 | ||
|  * var hljs = require('highlight.js') // https://highlightjs.org/
 | ||
|  *
 | ||
|  * var md = require('markdown-it')({
 | ||
|  *   highlight: function (str, lang) {
 | ||
|  *     if (lang && hljs.getLanguage(lang)) {
 | ||
|  *       try {
 | ||
|  *         return hljs.highlight(str, { language: lang, ignoreIllegals: true }).value;
 | ||
|  *       } catch (__) {}
 | ||
|  *     }
 | ||
|  *
 | ||
|  *     return ''; // use external default escaping
 | ||
|  *   }
 | ||
|  * });
 | ||
|  * ```
 | ||
|  *
 | ||
|  * Or with full wrapper override (if you need assign class to `<pre>` or `<code>`):
 | ||
|  *
 | ||
|  * ```javascript
 | ||
|  * var hljs = require('highlight.js') // https://highlightjs.org/
 | ||
|  *
 | ||
|  * // Actual default values
 | ||
|  * var md = require('markdown-it')({
 | ||
|  *   highlight: function (str, lang) {
 | ||
|  *     if (lang && hljs.getLanguage(lang)) {
 | ||
|  *       try {
 | ||
|  *         return '<pre><code class="hljs">' +
 | ||
|  *                hljs.highlight(str, { language: lang, ignoreIllegals: true }).value +
 | ||
|  *                '</code></pre>';
 | ||
|  *       } catch (__) {}
 | ||
|  *     }
 | ||
|  *
 | ||
|  *     return '<pre><code class="hljs">' + md.utils.escapeHtml(str) + '</code></pre>';
 | ||
|  *   }
 | ||
|  * });
 | ||
|  * ```
 | ||
|  *
 | ||
|  **/
 | ||
| function MarkdownIt(presetName, options) {
 | ||
|   if (!(this instanceof MarkdownIt)) {
 | ||
|     return new MarkdownIt(presetName, options);
 | ||
|   }
 | ||
|   if (!options) {
 | ||
|     if (!isString(presetName)) {
 | ||
|       options = presetName || {};
 | ||
|       presetName = 'default';
 | ||
|     }
 | ||
|   }
 | ||
| 
 | ||
|   /**
 | ||
|    * MarkdownIt#inline -> ParserInline
 | ||
|    *
 | ||
|    * Instance of [[ParserInline]]. You may need it to add new rules when
 | ||
|    * writing plugins. For simple rules control use [[MarkdownIt.disable]] and
 | ||
|    * [[MarkdownIt.enable]].
 | ||
|    **/
 | ||
|   this.inline = new ParserInline();
 | ||
| 
 | ||
|   /**
 | ||
|    * MarkdownIt#block -> ParserBlock
 | ||
|    *
 | ||
|    * Instance of [[ParserBlock]]. You may need it to add new rules when
 | ||
|    * writing plugins. For simple rules control use [[MarkdownIt.disable]] and
 | ||
|    * [[MarkdownIt.enable]].
 | ||
|    **/
 | ||
|   this.block = new ParserBlock();
 | ||
| 
 | ||
|   /**
 | ||
|    * MarkdownIt#core -> Core
 | ||
|    *
 | ||
|    * Instance of [[Core]] chain executor. You may need it to add new rules when
 | ||
|    * writing plugins. For simple rules control use [[MarkdownIt.disable]] and
 | ||
|    * [[MarkdownIt.enable]].
 | ||
|    **/
 | ||
|   this.core = new Core();
 | ||
| 
 | ||
|   /**
 | ||
|    * MarkdownIt#renderer -> Renderer
 | ||
|    *
 | ||
|    * Instance of [[Renderer]]. Use it to modify output look. Or to add rendering
 | ||
|    * rules for new token types, generated by plugins.
 | ||
|    *
 | ||
|    * ##### Example
 | ||
|    *
 | ||
|    * ```javascript
 | ||
|    * var md = require('markdown-it')();
 | ||
|    *
 | ||
|    * function myToken(tokens, idx, options, env, self) {
 | ||
|    *   //...
 | ||
|    *   return result;
 | ||
|    * };
 | ||
|    *
 | ||
|    * md.renderer.rules['my_token'] = myToken
 | ||
|    * ```
 | ||
|    *
 | ||
|    * See [[Renderer]] docs and [source code](https://github.com/markdown-it/markdown-it/blob/master/lib/renderer.mjs).
 | ||
|    **/
 | ||
|   this.renderer = new Renderer();
 | ||
| 
 | ||
|   /**
 | ||
|    * MarkdownIt#linkify -> LinkifyIt
 | ||
|    *
 | ||
|    * [linkify-it](https://github.com/markdown-it/linkify-it) instance.
 | ||
|    * Used by [linkify](https://github.com/markdown-it/markdown-it/blob/master/lib/rules_core/linkify.mjs)
 | ||
|    * rule.
 | ||
|    **/
 | ||
|   this.linkify = new LinkifyIt();
 | ||
| 
 | ||
|   /**
 | ||
|    * MarkdownIt#validateLink(url) -> Boolean
 | ||
|    *
 | ||
|    * Link validation function. CommonMark allows too much in links. By default
 | ||
|    * we disable `javascript:`, `vbscript:`, `file:` schemas, and almost all `data:...` schemas
 | ||
|    * except some embedded image types.
 | ||
|    *
 | ||
|    * You can change this behaviour:
 | ||
|    *
 | ||
|    * ```javascript
 | ||
|    * var md = require('markdown-it')();
 | ||
|    * // enable everything
 | ||
|    * md.validateLink = function () { return true; }
 | ||
|    * ```
 | ||
|    **/
 | ||
|   this.validateLink = validateLink;
 | ||
| 
 | ||
|   /**
 | ||
|    * MarkdownIt#normalizeLink(url) -> String
 | ||
|    *
 | ||
|    * Function used to encode link url to a machine-readable format,
 | ||
|    * which includes url-encoding, punycode, etc.
 | ||
|    **/
 | ||
|   this.normalizeLink = normalizeLink;
 | ||
| 
 | ||
|   /**
 | ||
|    * MarkdownIt#normalizeLinkText(url) -> String
 | ||
|    *
 | ||
|    * Function used to decode link url to a human-readable format`
 | ||
|    **/
 | ||
|   this.normalizeLinkText = normalizeLinkText;
 | ||
| 
 | ||
|   // Expose utils & helpers for easy acces from plugins
 | ||
| 
 | ||
|   /**
 | ||
|    * MarkdownIt#utils -> utils
 | ||
|    *
 | ||
|    * Assorted utility functions, useful to write plugins. See details
 | ||
|    * [here](https://github.com/markdown-it/markdown-it/blob/master/lib/common/utils.mjs).
 | ||
|    **/
 | ||
|   this.utils = utils;
 | ||
| 
 | ||
|   /**
 | ||
|    * MarkdownIt#helpers -> helpers
 | ||
|    *
 | ||
|    * Link components parser functions, useful to write plugins. See details
 | ||
|    * [here](https://github.com/markdown-it/markdown-it/blob/master/lib/helpers).
 | ||
|    **/
 | ||
|   this.helpers = assign({}, helpers);
 | ||
|   this.options = {};
 | ||
|   this.configure(presetName);
 | ||
|   if (options) {
 | ||
|     this.set(options);
 | ||
|   }
 | ||
| }
 | ||
| 
 | ||
| /** chainable
 | ||
|  * MarkdownIt.set(options)
 | ||
|  *
 | ||
|  * Set parser options (in the same format as in constructor). Probably, you
 | ||
|  * will never need it, but you can change options after constructor call.
 | ||
|  *
 | ||
|  * ##### Example
 | ||
|  *
 | ||
|  * ```javascript
 | ||
|  * var md = require('markdown-it')()
 | ||
|  *             .set({ html: true, breaks: true })
 | ||
|  *             .set({ typographer, true });
 | ||
|  * ```
 | ||
|  *
 | ||
|  * __Note:__ To achieve the best possible performance, don't modify a
 | ||
|  * `markdown-it` instance options on the fly. If you need multiple configurations
 | ||
|  * it's best to create multiple instances and initialize each with separate
 | ||
|  * config.
 | ||
|  **/
 | ||
| MarkdownIt.prototype.set = function (options) {
 | ||
|   assign(this.options, options);
 | ||
|   return this;
 | ||
| };
 | ||
| 
 | ||
| /** chainable, internal
 | ||
|  * MarkdownIt.configure(presets)
 | ||
|  *
 | ||
|  * Batch load of all options and compenent settings. This is internal method,
 | ||
|  * and you probably will not need it. But if you will - see available presets
 | ||
|  * and data structure [here](https://github.com/markdown-it/markdown-it/tree/master/lib/presets)
 | ||
|  *
 | ||
|  * We strongly recommend to use presets instead of direct config loads. That
 | ||
|  * will give better compatibility with next versions.
 | ||
|  **/
 | ||
| MarkdownIt.prototype.configure = function (presets) {
 | ||
|   const self = this;
 | ||
|   if (isString(presets)) {
 | ||
|     const presetName = presets;
 | ||
|     presets = config[presetName];
 | ||
|     if (!presets) {
 | ||
|       throw new Error('Wrong `markdown-it` preset "' + presetName + '", check name');
 | ||
|     }
 | ||
|   }
 | ||
|   if (!presets) {
 | ||
|     throw new Error('Wrong `markdown-it` preset, can\'t be empty');
 | ||
|   }
 | ||
|   if (presets.options) {
 | ||
|     self.set(presets.options);
 | ||
|   }
 | ||
|   if (presets.components) {
 | ||
|     Object.keys(presets.components).forEach(function (name) {
 | ||
|       if (presets.components[name].rules) {
 | ||
|         self[name].ruler.enableOnly(presets.components[name].rules);
 | ||
|       }
 | ||
|       if (presets.components[name].rules2) {
 | ||
|         self[name].ruler2.enableOnly(presets.components[name].rules2);
 | ||
|       }
 | ||
|     });
 | ||
|   }
 | ||
|   return this;
 | ||
| };
 | ||
| 
 | ||
| /** chainable
 | ||
|  * MarkdownIt.enable(list, ignoreInvalid)
 | ||
|  * - list (String|Array): rule name or list of rule names to enable
 | ||
|  * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.
 | ||
|  *
 | ||
|  * Enable list or rules. It will automatically find appropriate components,
 | ||
|  * containing rules with given names. If rule not found, and `ignoreInvalid`
 | ||
|  * not set - throws exception.
 | ||
|  *
 | ||
|  * ##### Example
 | ||
|  *
 | ||
|  * ```javascript
 | ||
|  * var md = require('markdown-it')()
 | ||
|  *             .enable(['sub', 'sup'])
 | ||
|  *             .disable('smartquotes');
 | ||
|  * ```
 | ||
|  **/
 | ||
| MarkdownIt.prototype.enable = function (list, ignoreInvalid) {
 | ||
|   let result = [];
 | ||
|   if (!Array.isArray(list)) {
 | ||
|     list = [list];
 | ||
|   }
 | ||
|   ['core', 'block', 'inline'].forEach(function (chain) {
 | ||
|     result = result.concat(this[chain].ruler.enable(list, true));
 | ||
|   }, this);
 | ||
|   result = result.concat(this.inline.ruler2.enable(list, true));
 | ||
|   const missed = list.filter(function (name) {
 | ||
|     return result.indexOf(name) < 0;
 | ||
|   });
 | ||
|   if (missed.length && !ignoreInvalid) {
 | ||
|     throw new Error('MarkdownIt. Failed to enable unknown rule(s): ' + missed);
 | ||
|   }
 | ||
|   return this;
 | ||
| };
 | ||
| 
 | ||
| /** chainable
 | ||
|  * MarkdownIt.disable(list, ignoreInvalid)
 | ||
|  * - list (String|Array): rule name or list of rule names to disable.
 | ||
|  * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.
 | ||
|  *
 | ||
|  * The same as [[MarkdownIt.enable]], but turn specified rules off.
 | ||
|  **/
 | ||
| MarkdownIt.prototype.disable = function (list, ignoreInvalid) {
 | ||
|   let result = [];
 | ||
|   if (!Array.isArray(list)) {
 | ||
|     list = [list];
 | ||
|   }
 | ||
|   ['core', 'block', 'inline'].forEach(function (chain) {
 | ||
|     result = result.concat(this[chain].ruler.disable(list, true));
 | ||
|   }, this);
 | ||
|   result = result.concat(this.inline.ruler2.disable(list, true));
 | ||
|   const missed = list.filter(function (name) {
 | ||
|     return result.indexOf(name) < 0;
 | ||
|   });
 | ||
|   if (missed.length && !ignoreInvalid) {
 | ||
|     throw new Error('MarkdownIt. Failed to disable unknown rule(s): ' + missed);
 | ||
|   }
 | ||
|   return this;
 | ||
| };
 | ||
| 
 | ||
| /** chainable
 | ||
|  * MarkdownIt.use(plugin, params)
 | ||
|  *
 | ||
|  * Load specified plugin with given params into current parser instance.
 | ||
|  * It's just a sugar to call `plugin(md, params)` with curring.
 | ||
|  *
 | ||
|  * ##### Example
 | ||
|  *
 | ||
|  * ```javascript
 | ||
|  * var iterator = require('markdown-it-for-inline');
 | ||
|  * var md = require('markdown-it')()
 | ||
|  *             .use(iterator, 'foo_replace', 'text', function (tokens, idx) {
 | ||
|  *               tokens[idx].content = tokens[idx].content.replace(/foo/g, 'bar');
 | ||
|  *             });
 | ||
|  * ```
 | ||
|  **/
 | ||
| MarkdownIt.prototype.use = function (plugin /*, params, ... */) {
 | ||
|   const args = [this].concat(Array.prototype.slice.call(arguments, 1));
 | ||
|   plugin.apply(plugin, args);
 | ||
|   return this;
 | ||
| };
 | ||
| 
 | ||
| /** internal
 | ||
|  * MarkdownIt.parse(src, env) -> Array
 | ||
|  * - src (String): source string
 | ||
|  * - env (Object): environment sandbox
 | ||
|  *
 | ||
|  * Parse input string and return list of block tokens (special token type
 | ||
|  * "inline" will contain list of inline tokens). You should not call this
 | ||
|  * method directly, until you write custom renderer (for example, to produce
 | ||
|  * AST).
 | ||
|  *
 | ||
|  * `env` is used to pass data between "distributed" rules and return additional
 | ||
|  * metadata like reference info, needed for the renderer. It also can be used to
 | ||
|  * inject data in specific cases. Usually, you will be ok to pass `{}`,
 | ||
|  * and then pass updated object to renderer.
 | ||
|  **/
 | ||
| MarkdownIt.prototype.parse = function (src, env) {
 | ||
|   if (typeof src !== 'string') {
 | ||
|     throw new Error('Input data should be a String');
 | ||
|   }
 | ||
|   const state = new this.core.State(src, this, env);
 | ||
|   this.core.process(state);
 | ||
|   return state.tokens;
 | ||
| };
 | ||
| 
 | ||
| /**
 | ||
|  * MarkdownIt.render(src [, env]) -> String
 | ||
|  * - src (String): source string
 | ||
|  * - env (Object): environment sandbox
 | ||
|  *
 | ||
|  * Render markdown string into html. It does all magic for you :).
 | ||
|  *
 | ||
|  * `env` can be used to inject additional metadata (`{}` by default).
 | ||
|  * But you will not need it with high probability. See also comment
 | ||
|  * in [[MarkdownIt.parse]].
 | ||
|  **/
 | ||
| MarkdownIt.prototype.render = function (src, env) {
 | ||
|   env = env || {};
 | ||
|   return this.renderer.render(this.parse(src, env), this.options, env);
 | ||
| };
 | ||
| 
 | ||
| /** internal
 | ||
|  * MarkdownIt.parseInline(src, env) -> Array
 | ||
|  * - src (String): source string
 | ||
|  * - env (Object): environment sandbox
 | ||
|  *
 | ||
|  * The same as [[MarkdownIt.parse]] but skip all block rules. It returns the
 | ||
|  * block tokens list with the single `inline` element, containing parsed inline
 | ||
|  * tokens in `children` property. Also updates `env` object.
 | ||
|  **/
 | ||
| MarkdownIt.prototype.parseInline = function (src, env) {
 | ||
|   const state = new this.core.State(src, this, env);
 | ||
|   state.inlineMode = true;
 | ||
|   this.core.process(state);
 | ||
|   return state.tokens;
 | ||
| };
 | ||
| 
 | ||
| /**
 | ||
|  * MarkdownIt.renderInline(src [, env]) -> String
 | ||
|  * - src (String): source string
 | ||
|  * - env (Object): environment sandbox
 | ||
|  *
 | ||
|  * Similar to [[MarkdownIt.render]] but for single paragraph content. Result
 | ||
|  * will NOT be wrapped into `<p>` tags.
 | ||
|  **/
 | ||
| MarkdownIt.prototype.renderInline = function (src, env) {
 | ||
|   env = env || {};
 | ||
|   return this.renderer.render(this.parseInline(src, env), this.options, env);
 | ||
| };
 | ||
| 
 | ||
| module.exports = MarkdownIt;
 |