install marked

This commit is contained in:
Mylloon 2022-06-23 02:00:30 +02:00
parent 01b17fad61
commit d140e0262f
Signed by: Anri
GPG key ID: A82D63DFF8D1317F
24 changed files with 12401 additions and 2 deletions

1
node_modules/.bin/marked generated vendored Symbolic link
View file

@ -0,0 +1 @@
../marked/bin/marked.js

11
node_modules/.package-lock.json generated vendored
View file

@ -435,6 +435,17 @@
"node": ">=10"
}
},
"node_modules/marked": {
"version": "4.0.17",
"resolved": "https://registry.npmjs.org/marked/-/marked-4.0.17.tgz",
"integrity": "sha512-Wfk0ATOK5iPxM4ptrORkFemqroz0ZDxp5MWfYA7H/F+wO17NRWV5Ypxi6p3g2Xmw2bKeiYOl6oVnLHKxBA0VhA==",
"bin": {
"marked": "bin/marked.js"
},
"engines": {
"node": ">= 12"
}
},
"node_modules/media-typer": {
"version": "0.3.0",
"resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",

44
node_modules/marked/LICENSE.md generated vendored Normal file
View file

@ -0,0 +1,44 @@
# License information
## Contribution License Agreement
If you contribute code to this project, you are implicitly allowing your code
to be distributed under the MIT license. You are also implicitly verifying that
all code is your original work. `</legalese>`
## Marked
Copyright (c) 2018+, MarkedJS (https://github.com/markedjs/)
Copyright (c) 2011-2018, Christopher Jeffrey (https://github.com/chjj/)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
## Markdown
Copyright © 2004, John Gruber
http://daringfireball.net/
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name “Markdown” nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
This software is provided by the copyright holders and contributors “as is” and any express or implied warranties, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose are disclaimed. In no event shall the copyright owner or contributors be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage.

98
node_modules/marked/README.md generated vendored Normal file
View file

@ -0,0 +1,98 @@
<a href="https://marked.js.org">
<img width="60px" height="60px" src="https://marked.js.org/img/logo-black.svg" align="right" />
</a>
# Marked
[![npm](https://badgen.net/npm/v/marked)](https://www.npmjs.com/package/marked)
[![gzip size](https://badgen.net/badgesize/gzip/https://cdn.jsdelivr.net/npm/marked/marked.min.js)](https://cdn.jsdelivr.net/npm/marked/marked.min.js)
[![install size](https://badgen.net/packagephobia/install/marked)](https://packagephobia.now.sh/result?p=marked)
[![downloads](https://badgen.net/npm/dt/marked)](https://www.npmjs.com/package/marked)
[![github actions](https://github.com/markedjs/marked/workflows/Tests/badge.svg)](https://github.com/markedjs/marked/actions)
[![snyk](https://snyk.io/test/npm/marked/badge.svg)](https://snyk.io/test/npm/marked)
- ⚡ built for speed
- ⬇️ low-level compiler for parsing markdown without caching or blocking for long periods of time
- ⚖️ light-weight while implementing all markdown features from the supported flavors & specifications
- 🌐 works in a browser, on a server, or from a command line interface (CLI)
## Demo
Checkout the [demo page](https://marked.js.org/demo/) to see marked in action ⛹️
## Docs
Our [documentation pages](https://marked.js.org) are also rendered using marked 💯
Also read about:
* [Options](https://marked.js.org/#/USING_ADVANCED.md)
* [Extensibility](https://marked.js.org/#/USING_PRO.md)
## Compatibility
**Node.js:** Only [current and LTS](https://nodejs.org/en/about/releases/) Node.js versions are supported. End of life Node.js versions may become incompatible with Marked at any point in time.
**Browser:** Not IE11 :)
## Installation
**CLI:**
```sh
npm install -g marked
```
**In-browser:**
```sh
npm install marked
```
## Usage
### Warning: 🚨 Marked does not [sanitize](https://marked.js.org/#/USING_ADVANCED.md#options) the output HTML. Please use a sanitize library, like [DOMPurify](https://github.com/cure53/DOMPurify) (recommended), [sanitize-html](https://github.com/apostrophecms/sanitize-html) or [insane](https://github.com/bevacqua/insane) on the *output* HTML! 🚨
```
DOMPurify.sanitize(marked.parse(`<img src="x" onerror="alert('not happening')">`));
```
**CLI**
``` bash
# Example with stdin input
$ marked -o hello.html
hello world
^D
$ cat hello.html
<p>hello world</p>
```
```bash
# Print all options
$ marked --help
```
**Browser**
```html
<!doctype html>
<html>
<head>
<meta charset="utf-8"/>
<title>Marked in the browser</title>
</head>
<body>
<div id="content"></div>
<script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
<script>
document.getElementById('content').innerHTML =
marked.parse('# Marked in the browser\n\nRendered by **marked**.');
</script>
</body>
</html>
```
## License
Copyright (c) 2011-2022, Christopher Jeffrey. (MIT License)

217
node_modules/marked/bin/marked.js generated vendored Executable file
View file

@ -0,0 +1,217 @@
#!/usr/bin/env node
/**
* Marked CLI
* Copyright (c) 2011-2013, Christopher Jeffrey (MIT License)
*/
import { promises } from 'fs';
import { marked } from '../lib/marked.esm.js';
const { readFile, writeFile } = promises;
/**
* Man Page
*/
async function help() {
const { spawn } = await import('child_process');
const options = {
cwd: process.cwd(),
env: process.env,
setsid: false,
stdio: 'inherit'
};
const { dirname, resolve } = await import('path');
const { fileURLToPath } = await import('url');
const __dirname = dirname(fileURLToPath(import.meta.url));
const helpText = await readFile(resolve(__dirname, '../man/marked.1.txt'), 'utf8');
// eslint-disable-next-line promise/param-names
await new Promise(res => {
spawn('man', [resolve(__dirname, '../man/marked.1')], options)
.on('error', () => {
console.log(helpText);
})
.on('close', res);
});
}
async function version() {
const { createRequire } = await import('module');
const require = createRequire(import.meta.url);
const pkg = require('../package.json');
console.log(pkg.version);
}
/**
* Main
*/
async function main(argv) {
const files = [];
const options = {};
let input;
let output;
let string;
let arg;
let tokens;
let opt;
function getarg() {
let arg = argv.shift();
if (arg.indexOf('--') === 0) {
// e.g. --opt
arg = arg.split('=');
if (arg.length > 1) {
// e.g. --opt=val
argv.unshift(arg.slice(1).join('='));
}
arg = arg[0];
} else if (arg[0] === '-') {
if (arg.length > 2) {
// e.g. -abc
argv = arg.substring(1).split('').map(function(ch) {
return '-' + ch;
}).concat(argv);
arg = argv.shift();
} else {
// e.g. -a
}
} else {
// e.g. foo
}
return arg;
}
while (argv.length) {
arg = getarg();
switch (arg) {
case '-o':
case '--output':
output = argv.shift();
break;
case '-i':
case '--input':
input = argv.shift();
break;
case '-s':
case '--string':
string = argv.shift();
break;
case '-t':
case '--tokens':
tokens = true;
break;
case '-h':
case '--help':
return await help();
case '-v':
case '--version':
return await version();
default:
if (arg.indexOf('--') === 0) {
opt = camelize(arg.replace(/^--(no-)?/, ''));
if (!marked.defaults.hasOwnProperty(opt)) {
continue;
}
if (arg.indexOf('--no-') === 0) {
options[opt] = typeof marked.defaults[opt] !== 'boolean'
? null
: false;
} else {
options[opt] = typeof marked.defaults[opt] !== 'boolean'
? argv.shift()
: true;
}
} else {
files.push(arg);
}
break;
}
}
async function getData() {
if (!input) {
if (files.length <= 2) {
if (string) {
return string;
}
return await getStdin();
}
input = files.pop();
}
return await readFile(input, 'utf8');
}
const data = await getData();
const html = tokens
? JSON.stringify(marked.lexer(data, options), null, 2)
: marked(data, options);
if (output) {
return await writeFile(output, html);
}
process.stdout.write(html + '\n');
}
/**
* Helpers
*/
function getStdin() {
return new Promise((resolve, reject) => {
const stdin = process.stdin;
let buff = '';
stdin.setEncoding('utf8');
stdin.on('data', function(data) {
buff += data;
});
stdin.on('error', function(err) {
reject(err);
});
stdin.on('end', function() {
resolve(buff);
});
stdin.resume();
});
}
/**
* @param {string} text
*/
function camelize(text) {
return text.replace(/(\w)-(\w)/g, function(_, a, b) {
return a + b.toUpperCase();
});
}
function handleError(err) {
if (err.code === 'ENOENT') {
console.error('marked: output to ' + err.path + ': No such directory');
return process.exit(1);
}
throw err;
}
/**
* Expose / Entry Point
*/
process.title = 'marked';
main(process.argv.slice()).then(code => {
process.exit(code || 0);
}).catch(err => {
handleError(err);
});

3050
node_modules/marked/lib/marked.cjs generated vendored Normal file

File diff suppressed because one or more lines are too long

2794
node_modules/marked/lib/marked.esm.js generated vendored Normal file

File diff suppressed because it is too large Load diff

3056
node_modules/marked/lib/marked.umd.js generated vendored Normal file

File diff suppressed because one or more lines are too long

92
node_modules/marked/man/marked.1 generated vendored Normal file
View file

@ -0,0 +1,92 @@
.ds q \N'34'
.TH marked 1
.SH NAME
marked \- a javascript markdown parser
.SH SYNOPSIS
.B marked
[\-o \fI<output>\fP] [\-i \fI<input>\fP] [\-s \fI<string>\fP] [\-\-help]
[\-\-tokens] [\-\-pedantic] [\-\-gfm]
[\-\-breaks] [\-\-sanitize]
[\-\-smart\-lists] [\-\-lang\-prefix \fI<prefix>\fP]
[\-\-no\-etc...] [\-\-silent] [\fIfilename\fP]
.SH DESCRIPTION
.B marked
is a full-featured javascript markdown parser, built for speed.
It also includes multiple GFM features.
.SH EXAMPLES
.TP
cat in.md | marked > out.html
.TP
echo "hello *world*" | marked
.TP
marked \-o out.html \-i in.md \-\-gfm
.TP
marked \-\-output="hello world.html" \-i in.md \-\-no-breaks
.SH OPTIONS
.TP
.BI \-o,\ \-\-output\ [\fIoutput\fP]
Specify file output. If none is specified, write to stdout.
.TP
.BI \-i,\ \-\-input\ [\fIinput\fP]
Specify file input, otherwise use last argument as input file.
If no input file is specified, read from stdin.
.TP
.BI \-s,\ \-\-string\ [\fIstring\fP]
Specify string input instead of a file.
.TP
.BI \-t,\ \-\-tokens
Output a token stream instead of html.
.TP
.BI \-\-pedantic
Conform to obscure parts of markdown.pl as much as possible.
Don't fix original markdown bugs.
.TP
.BI \-\-gfm
Enable github flavored markdown.
.TP
.BI \-\-breaks
Enable GFM line breaks. Only works with the gfm option.
.TP
.BI \-\-sanitize
Sanitize output. Ignore any HTML input.
.TP
.BI \-\-smart\-lists
Use smarter list behavior than the original markdown.
.TP
.BI \-\-lang\-prefix\ [\fIprefix\fP]
Set the prefix for code block classes.
.TP
.BI \-\-mangle
Mangle email addresses.
.TP
.BI \-\-no\-sanitize,\ \-no-etc...
The inverse of any of the marked options above.
.TP
.BI \-\-silent
Silence error output.
.TP
.BI \-h,\ \-\-help
Display help information.
.SH CONFIGURATION
For configuring and running programmatically.
.B Example
import { marked } from 'marked';
marked('*foo*', { gfm: true });
.SH BUGS
Please report any bugs to https://github.com/markedjs/marked.
.SH LICENSE
Copyright (c) 2011-2014, Christopher Jeffrey (MIT License).
.SH "SEE ALSO"
.BR markdown(1),
.BR node.js(1)

86
node_modules/marked/man/marked.1.txt generated vendored Normal file
View file

@ -0,0 +1,86 @@
marked(1) General Commands Manual marked(1)
NAME
marked - a javascript markdown parser
SYNOPSIS
marked [-o <output>] [-i <input>] [-s <string>] [--help] [--tokens]
[--pedantic] [--gfm] [--breaks] [--sanitize] [--smart-lists]
[--lang-prefix <prefix>] [--no-etc...] [--silent] [filename]
DESCRIPTION
marked is a full-featured javascript markdown parser, built for speed.
It also includes multiple GFM features.
EXAMPLES
cat in.md | marked > out.html
echo "hello *world*" | marked
marked -o out.html -i in.md --gfm
marked --output="hello world.html" -i in.md --no-breaks
OPTIONS
-o, --output [output]
Specify file output. If none is specified, write to stdout.
-i, --input [input]
Specify file input, otherwise use last argument as input file.
If no input file is specified, read from stdin.
-s, --string [string]
Specify string input instead of a file.
-t, --tokens
Output a token stream instead of html.
--pedantic
Conform to obscure parts of markdown.pl as much as possible.
Don't fix original markdown bugs.
--gfm Enable github flavored markdown.
--breaks
Enable GFM line breaks. Only works with the gfm option.
--sanitize
Sanitize output. Ignore any HTML input.
--smart-lists
Use smarter list behavior than the original markdown.
--lang-prefix [prefix]
Set the prefix for code block classes.
--mangle
Mangle email addresses.
--no-sanitize, -no-etc...
The inverse of any of the marked options above.
--silent
Silence error output.
-h, --help
Display help information.
CONFIGURATION
For configuring and running programmatically.
Example
import { marked } from 'marked';
marked('*foo*', { gfm: true });
BUGS
Please report any bugs to https://github.com/markedjs/marked.
LICENSE
Copyright (c) 2011-2014, Christopher Jeffrey (MIT License).
SEE ALSO
markdown(1), node.js(1)
marked(1)

6
node_modules/marked/marked.min.js generated vendored Normal file

File diff suppressed because one or more lines are too long

97
node_modules/marked/package.json generated vendored Normal file
View file

@ -0,0 +1,97 @@
{
"name": "marked",
"description": "A markdown parser built for speed",
"author": "Christopher Jeffrey",
"version": "4.0.17",
"type": "module",
"main": "./lib/marked.cjs",
"module": "./lib/marked.esm.js",
"browser": "./lib/marked.umd.js",
"bin": {
"marked": "bin/marked.js"
},
"man": "./man/marked.1",
"files": [
"bin/",
"lib/",
"src/",
"man/",
"marked.min.js"
],
"exports": {
".": {
"import": "./lib/marked.esm.js",
"default": "./lib/marked.cjs"
},
"./package.json": "./package.json"
},
"repository": "git://github.com/markedjs/marked.git",
"homepage": "https://marked.js.org",
"bugs": {
"url": "http://github.com/markedjs/marked/issues"
},
"license": "MIT",
"keywords": [
"markdown",
"markup",
"html"
],
"tags": [
"markdown",
"markup",
"html"
],
"devDependencies": {
"@babel/core": "^7.18.2",
"@babel/preset-env": "^7.18.2",
"@markedjs/html-differ": "^4.0.2",
"@rollup/plugin-babel": "^5.3.1",
"@rollup/plugin-commonjs": "^22.0.0",
"@semantic-release/commit-analyzer": "^9.0.2",
"@semantic-release/git": "^10.0.1",
"@semantic-release/github": "^8.0.4",
"@semantic-release/npm": "^9.0.1",
"@semantic-release/release-notes-generator": "^10.0.3",
"cheerio": "^1.0.0-rc.11",
"commonmark": "0.30.0",
"eslint": "^8.17.0",
"eslint-config-standard": "^17.0.0",
"eslint-plugin-import": "^2.26.0",
"eslint-plugin-n": "^15.2.1",
"eslint-plugin-promise": "^6.0.0",
"front-matter": "^4.0.2",
"highlight.js": "^11.5.1",
"jasmine": "^4.1.0",
"markdown-it": "13.0.1",
"node-fetch": "^3.2.5",
"rollup": "^2.75.5",
"rollup-plugin-license": "^2.8.0",
"semantic-release": "^19.0.3",
"titleize": "^3.0.0",
"uglify-js": "^3.16.0",
"vuln-regex-detector": "^1.3.0"
},
"scripts": {
"test": "jasmine --config=jasmine.json",
"test:all": "npm test && npm run test:lint",
"test:unit": "npm test -- test/unit/**/*-spec.js",
"test:specs": "npm test -- test/specs/**/*-spec.js",
"test:lint": "eslint .",
"test:redos": "node test/vuln-regex.js",
"test:update": "node test/update-specs.js",
"rules": "node test/rules.js",
"bench": "npm run rollup && node test/bench.js",
"lint": "eslint --fix .",
"build:reset": "git checkout upstream/master lib/marked.cjs lib/marked.umd.js lib/marked.esm.js marked.min.js",
"build": "npm run rollup && npm run minify",
"build:docs": "node build-docs.js",
"rollup": "npm run rollup:umd && npm run rollup:esm",
"rollup:umd": "rollup -c rollup.config.js",
"rollup:esm": "rollup -c rollup.config.esm.js",
"minify": "uglifyjs lib/marked.umd.js -cm --comments /Copyright/ -o marked.min.js",
"preversion": "npm run build && (git diff --quiet || git commit -am build)"
},
"engines": {
"node": ">= 12"
}
}

501
node_modules/marked/src/Lexer.js generated vendored Normal file
View file

@ -0,0 +1,501 @@
import { Tokenizer } from './Tokenizer.js';
import { defaults } from './defaults.js';
import { block, inline } from './rules.js';
import { repeatString } from './helpers.js';
/**
* smartypants text replacement
* @param {string} text
*/
function smartypants(text) {
return text
// em-dashes
.replace(/---/g, '\u2014')
// en-dashes
.replace(/--/g, '\u2013')
// opening singles
.replace(/(^|[-\u2014/(\[{"\s])'/g, '$1\u2018')
// closing singles & apostrophes
.replace(/'/g, '\u2019')
// opening doubles
.replace(/(^|[-\u2014/(\[{\u2018\s])"/g, '$1\u201c')
// closing doubles
.replace(/"/g, '\u201d')
// ellipses
.replace(/\.{3}/g, '\u2026');
}
/**
* mangle email addresses
* @param {string} text
*/
function mangle(text) {
let out = '',
i,
ch;
const l = text.length;
for (i = 0; i < l; i++) {
ch = text.charCodeAt(i);
if (Math.random() > 0.5) {
ch = 'x' + ch.toString(16);
}
out += '&#' + ch + ';';
}
return out;
}
/**
* Block Lexer
*/
export class Lexer {
constructor(options) {
this.tokens = [];
this.tokens.links = Object.create(null);
this.options = options || defaults;
this.options.tokenizer = this.options.tokenizer || new Tokenizer();
this.tokenizer = this.options.tokenizer;
this.tokenizer.options = this.options;
this.tokenizer.lexer = this;
this.inlineQueue = [];
this.state = {
inLink: false,
inRawBlock: false,
top: true
};
const rules = {
block: block.normal,
inline: inline.normal
};
if (this.options.pedantic) {
rules.block = block.pedantic;
rules.inline = inline.pedantic;
} else if (this.options.gfm) {
rules.block = block.gfm;
if (this.options.breaks) {
rules.inline = inline.breaks;
} else {
rules.inline = inline.gfm;
}
}
this.tokenizer.rules = rules;
}
/**
* Expose Rules
*/
static get rules() {
return {
block,
inline
};
}
/**
* Static Lex Method
*/
static lex(src, options) {
const lexer = new Lexer(options);
return lexer.lex(src);
}
/**
* Static Lex Inline Method
*/
static lexInline(src, options) {
const lexer = new Lexer(options);
return lexer.inlineTokens(src);
}
/**
* Preprocessing
*/
lex(src) {
src = src
.replace(/\r\n|\r/g, '\n');
this.blockTokens(src, this.tokens);
let next;
while (next = this.inlineQueue.shift()) {
this.inlineTokens(next.src, next.tokens);
}
return this.tokens;
}
/**
* Lexing
*/
blockTokens(src, tokens = []) {
if (this.options.pedantic) {
src = src.replace(/\t/g, ' ').replace(/^ +$/gm, '');
} else {
src = src.replace(/^( *)(\t+)/gm, (_, leading, tabs) => {
return leading + ' '.repeat(tabs.length);
});
}
let token, lastToken, cutSrc, lastParagraphClipped;
while (src) {
if (this.options.extensions
&& this.options.extensions.block
&& this.options.extensions.block.some((extTokenizer) => {
if (token = extTokenizer.call({ lexer: this }, src, tokens)) {
src = src.substring(token.raw.length);
tokens.push(token);
return true;
}
return false;
})) {
continue;
}
// newline
if (token = this.tokenizer.space(src)) {
src = src.substring(token.raw.length);
if (token.raw.length === 1 && tokens.length > 0) {
// if there's a single \n as a spacer, it's terminating the last line,
// so move it there so that we don't get unecessary paragraph tags
tokens[tokens.length - 1].raw += '\n';
} else {
tokens.push(token);
}
continue;
}
// code
if (token = this.tokenizer.code(src)) {
src = src.substring(token.raw.length);
lastToken = tokens[tokens.length - 1];
// An indented code block cannot interrupt a paragraph.
if (lastToken && (lastToken.type === 'paragraph' || lastToken.type === 'text')) {
lastToken.raw += '\n' + token.raw;
lastToken.text += '\n' + token.text;
this.inlineQueue[this.inlineQueue.length - 1].src = lastToken.text;
} else {
tokens.push(token);
}
continue;
}
// fences
if (token = this.tokenizer.fences(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// heading
if (token = this.tokenizer.heading(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// hr
if (token = this.tokenizer.hr(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// blockquote
if (token = this.tokenizer.blockquote(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// list
if (token = this.tokenizer.list(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// html
if (token = this.tokenizer.html(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// def
if (token = this.tokenizer.def(src)) {
src = src.substring(token.raw.length);
lastToken = tokens[tokens.length - 1];
if (lastToken && (lastToken.type === 'paragraph' || lastToken.type === 'text')) {
lastToken.raw += '\n' + token.raw;
lastToken.text += '\n' + token.raw;
this.inlineQueue[this.inlineQueue.length - 1].src = lastToken.text;
} else if (!this.tokens.links[token.tag]) {
this.tokens.links[token.tag] = {
href: token.href,
title: token.title
};
}
continue;
}
// table (gfm)
if (token = this.tokenizer.table(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// lheading
if (token = this.tokenizer.lheading(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// top-level paragraph
// prevent paragraph consuming extensions by clipping 'src' to extension start
cutSrc = src;
if (this.options.extensions && this.options.extensions.startBlock) {
let startIndex = Infinity;
const tempSrc = src.slice(1);
let tempStart;
this.options.extensions.startBlock.forEach(function(getStartIndex) {
tempStart = getStartIndex.call({ lexer: this }, tempSrc);
if (typeof tempStart === 'number' && tempStart >= 0) { startIndex = Math.min(startIndex, tempStart); }
});
if (startIndex < Infinity && startIndex >= 0) {
cutSrc = src.substring(0, startIndex + 1);
}
}
if (this.state.top && (token = this.tokenizer.paragraph(cutSrc))) {
lastToken = tokens[tokens.length - 1];
if (lastParagraphClipped && lastToken.type === 'paragraph') {
lastToken.raw += '\n' + token.raw;
lastToken.text += '\n' + token.text;
this.inlineQueue.pop();
this.inlineQueue[this.inlineQueue.length - 1].src = lastToken.text;
} else {
tokens.push(token);
}
lastParagraphClipped = (cutSrc.length !== src.length);
src = src.substring(token.raw.length);
continue;
}
// text
if (token = this.tokenizer.text(src)) {
src = src.substring(token.raw.length);
lastToken = tokens[tokens.length - 1];
if (lastToken && lastToken.type === 'text') {
lastToken.raw += '\n' + token.raw;
lastToken.text += '\n' + token.text;
this.inlineQueue.pop();
this.inlineQueue[this.inlineQueue.length - 1].src = lastToken.text;
} else {
tokens.push(token);
}
continue;
}
if (src) {
const errMsg = 'Infinite loop on byte: ' + src.charCodeAt(0);
if (this.options.silent) {
console.error(errMsg);
break;
} else {
throw new Error(errMsg);
}
}
}
this.state.top = true;
return tokens;
}
inline(src, tokens) {
this.inlineQueue.push({ src, tokens });
}
/**
* Lexing/Compiling
*/
inlineTokens(src, tokens = []) {
let token, lastToken, cutSrc;
// String with links masked to avoid interference with em and strong
let maskedSrc = src;
let match;
let keepPrevChar, prevChar;
// Mask out reflinks
if (this.tokens.links) {
const links = Object.keys(this.tokens.links);
if (links.length > 0) {
while ((match = this.tokenizer.rules.inline.reflinkSearch.exec(maskedSrc)) != null) {
if (links.includes(match[0].slice(match[0].lastIndexOf('[') + 1, -1))) {
maskedSrc = maskedSrc.slice(0, match.index) + '[' + repeatString('a', match[0].length - 2) + ']' + maskedSrc.slice(this.tokenizer.rules.inline.reflinkSearch.lastIndex);
}
}
}
}
// Mask out other blocks
while ((match = this.tokenizer.rules.inline.blockSkip.exec(maskedSrc)) != null) {
maskedSrc = maskedSrc.slice(0, match.index) + '[' + repeatString('a', match[0].length - 2) + ']' + maskedSrc.slice(this.tokenizer.rules.inline.blockSkip.lastIndex);
}
// Mask out escaped em & strong delimiters
while ((match = this.tokenizer.rules.inline.escapedEmSt.exec(maskedSrc)) != null) {
maskedSrc = maskedSrc.slice(0, match.index) + '++' + maskedSrc.slice(this.tokenizer.rules.inline.escapedEmSt.lastIndex);
}
while (src) {
if (!keepPrevChar) {
prevChar = '';
}
keepPrevChar = false;
// extensions
if (this.options.extensions
&& this.options.extensions.inline
&& this.options.extensions.inline.some((extTokenizer) => {
if (token = extTokenizer.call({ lexer: this }, src, tokens)) {
src = src.substring(token.raw.length);
tokens.push(token);
return true;
}
return false;
})) {
continue;
}
// escape
if (token = this.tokenizer.escape(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// tag
if (token = this.tokenizer.tag(src)) {
src = src.substring(token.raw.length);
lastToken = tokens[tokens.length - 1];
if (lastToken && token.type === 'text' && lastToken.type === 'text') {
lastToken.raw += token.raw;
lastToken.text += token.text;
} else {
tokens.push(token);
}
continue;
}
// link
if (token = this.tokenizer.link(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// reflink, nolink
if (token = this.tokenizer.reflink(src, this.tokens.links)) {
src = src.substring(token.raw.length);
lastToken = tokens[tokens.length - 1];
if (lastToken && token.type === 'text' && lastToken.type === 'text') {
lastToken.raw += token.raw;
lastToken.text += token.text;
} else {
tokens.push(token);
}
continue;
}
// em & strong
if (token = this.tokenizer.emStrong(src, maskedSrc, prevChar)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// code
if (token = this.tokenizer.codespan(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// br
if (token = this.tokenizer.br(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// del (gfm)
if (token = this.tokenizer.del(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// autolink
if (token = this.tokenizer.autolink(src, mangle)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// url (gfm)
if (!this.state.inLink && (token = this.tokenizer.url(src, mangle))) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// text
// prevent inlineText consuming extensions by clipping 'src' to extension start
cutSrc = src;
if (this.options.extensions && this.options.extensions.startInline) {
let startIndex = Infinity;
const tempSrc = src.slice(1);
let tempStart;
this.options.extensions.startInline.forEach(function(getStartIndex) {
tempStart = getStartIndex.call({ lexer: this }, tempSrc);
if (typeof tempStart === 'number' && tempStart >= 0) { startIndex = Math.min(startIndex, tempStart); }
});
if (startIndex < Infinity && startIndex >= 0) {
cutSrc = src.substring(0, startIndex + 1);
}
}
if (token = this.tokenizer.inlineText(cutSrc, smartypants)) {
src = src.substring(token.raw.length);
if (token.raw.slice(-1) !== '_') { // Track prevChar before string of ____ started
prevChar = token.raw.slice(-1);
}
keepPrevChar = true;
lastToken = tokens[tokens.length - 1];
if (lastToken && lastToken.type === 'text') {
lastToken.raw += token.raw;
lastToken.text += token.text;
} else {
tokens.push(token);
}
continue;
}
if (src) {
const errMsg = 'Infinite loop on byte: ' + src.charCodeAt(0);
if (this.options.silent) {
console.error(errMsg);
break;
} else {
throw new Error(errMsg);
}
}
}
return tokens;
}
}

286
node_modules/marked/src/Parser.js generated vendored Normal file
View file

@ -0,0 +1,286 @@
import { Renderer } from './Renderer.js';
import { TextRenderer } from './TextRenderer.js';
import { Slugger } from './Slugger.js';
import { defaults } from './defaults.js';
import {
unescape
} from './helpers.js';
/**
* Parsing & Compiling
*/
export class Parser {
constructor(options) {
this.options = options || defaults;
this.options.renderer = this.options.renderer || new Renderer();
this.renderer = this.options.renderer;
this.renderer.options = this.options;
this.textRenderer = new TextRenderer();
this.slugger = new Slugger();
}
/**
* Static Parse Method
*/
static parse(tokens, options) {
const parser = new Parser(options);
return parser.parse(tokens);
}
/**
* Static Parse Inline Method
*/
static parseInline(tokens, options) {
const parser = new Parser(options);
return parser.parseInline(tokens);
}
/**
* Parse Loop
*/
parse(tokens, top = true) {
let out = '',
i,
j,
k,
l2,
l3,
row,
cell,
header,
body,
token,
ordered,
start,
loose,
itemBody,
item,
checked,
task,
checkbox,
ret;
const l = tokens.length;
for (i = 0; i < l; i++) {
token = tokens[i];
// Run any renderer extensions
if (this.options.extensions && this.options.extensions.renderers && this.options.extensions.renderers[token.type]) {
ret = this.options.extensions.renderers[token.type].call({ parser: this }, token);
if (ret !== false || !['space', 'hr', 'heading', 'code', 'table', 'blockquote', 'list', 'html', 'paragraph', 'text'].includes(token.type)) {
out += ret || '';
continue;
}
}
switch (token.type) {
case 'space': {
continue;
}
case 'hr': {
out += this.renderer.hr();
continue;
}
case 'heading': {
out += this.renderer.heading(
this.parseInline(token.tokens),
token.depth,
unescape(this.parseInline(token.tokens, this.textRenderer)),
this.slugger);
continue;
}
case 'code': {
out += this.renderer.code(token.text,
token.lang,
token.escaped);
continue;
}
case 'table': {
header = '';
// header
cell = '';
l2 = token.header.length;
for (j = 0; j < l2; j++) {
cell += this.renderer.tablecell(
this.parseInline(token.header[j].tokens),
{ header: true, align: token.align[j] }
);
}
header += this.renderer.tablerow(cell);
body = '';
l2 = token.rows.length;
for (j = 0; j < l2; j++) {
row = token.rows[j];
cell = '';
l3 = row.length;
for (k = 0; k < l3; k++) {
cell += this.renderer.tablecell(
this.parseInline(row[k].tokens),
{ header: false, align: token.align[k] }
);
}
body += this.renderer.tablerow(cell);
}
out += this.renderer.table(header, body);
continue;
}
case 'blockquote': {
body = this.parse(token.tokens);
out += this.renderer.blockquote(body);
continue;
}
case 'list': {
ordered = token.ordered;
start = token.start;
loose = token.loose;
l2 = token.items.length;
body = '';
for (j = 0; j < l2; j++) {
item = token.items[j];
checked = item.checked;
task = item.task;
itemBody = '';
if (item.task) {
checkbox = this.renderer.checkbox(checked);
if (loose) {
if (item.tokens.length > 0 && item.tokens[0].type === 'paragraph') {
item.tokens[0].text = checkbox + ' ' + item.tokens[0].text;
if (item.tokens[0].tokens && item.tokens[0].tokens.length > 0 && item.tokens[0].tokens[0].type === 'text') {
item.tokens[0].tokens[0].text = checkbox + ' ' + item.tokens[0].tokens[0].text;
}
} else {
item.tokens.unshift({
type: 'text',
text: checkbox
});
}
} else {
itemBody += checkbox;
}
}
itemBody += this.parse(item.tokens, loose);
body += this.renderer.listitem(itemBody, task, checked);
}
out += this.renderer.list(body, ordered, start);
continue;
}
case 'html': {
// TODO parse inline content if parameter markdown=1
out += this.renderer.html(token.text);
continue;
}
case 'paragraph': {
out += this.renderer.paragraph(this.parseInline(token.tokens));
continue;
}
case 'text': {
body = token.tokens ? this.parseInline(token.tokens) : token.text;
while (i + 1 < l && tokens[i + 1].type === 'text') {
token = tokens[++i];
body += '\n' + (token.tokens ? this.parseInline(token.tokens) : token.text);
}
out += top ? this.renderer.paragraph(body) : body;
continue;
}
default: {
const errMsg = 'Token with "' + token.type + '" type was not found.';
if (this.options.silent) {
console.error(errMsg);
return;
} else {
throw new Error(errMsg);
}
}
}
}
return out;
}
/**
* Parse Inline Tokens
*/
parseInline(tokens, renderer) {
renderer = renderer || this.renderer;
let out = '',
i,
token,
ret;
const l = tokens.length;
for (i = 0; i < l; i++) {
token = tokens[i];
// Run any renderer extensions
if (this.options.extensions && this.options.extensions.renderers && this.options.extensions.renderers[token.type]) {
ret = this.options.extensions.renderers[token.type].call({ parser: this }, token);
if (ret !== false || !['escape', 'html', 'link', 'image', 'strong', 'em', 'codespan', 'br', 'del', 'text'].includes(token.type)) {
out += ret || '';
continue;
}
}
switch (token.type) {
case 'escape': {
out += renderer.text(token.text);
break;
}
case 'html': {
out += renderer.html(token.text);
break;
}
case 'link': {
out += renderer.link(token.href, token.title, this.parseInline(token.tokens, renderer));
break;
}
case 'image': {
out += renderer.image(token.href, token.title, token.text);
break;
}
case 'strong': {
out += renderer.strong(this.parseInline(token.tokens, renderer));
break;
}
case 'em': {
out += renderer.em(this.parseInline(token.tokens, renderer));
break;
}
case 'codespan': {
out += renderer.codespan(token.text);
break;
}
case 'br': {
out += renderer.br();
break;
}
case 'del': {
out += renderer.del(this.parseInline(token.tokens, renderer));
break;
}
case 'text': {
out += renderer.text(token.text);
break;
}
default: {
const errMsg = 'Token with "' + token.type + '" type was not found.';
if (this.options.silent) {
console.error(errMsg);
return;
} else {
throw new Error(errMsg);
}
}
}
}
return out;
}
}

203
node_modules/marked/src/Renderer.js generated vendored Normal file
View file

@ -0,0 +1,203 @@
import { defaults } from './defaults.js';
import {
cleanUrl,
escape
} from './helpers.js';
/**
* Renderer
*/
export class Renderer {
constructor(options) {
this.options = options || defaults;
}
code(code, infostring, escaped) {
const lang = (infostring || '').match(/\S*/)[0];
if (this.options.highlight) {
const out = this.options.highlight(code, lang);
if (out != null && out !== code) {
escaped = true;
code = out;
}
}
code = code.replace(/\n$/, '') + '\n';
if (!lang) {
return '<pre><code>'
+ (escaped ? code : escape(code, true))
+ '</code></pre>\n';
}
return '<pre><code class="'
+ this.options.langPrefix
+ escape(lang, true)
+ '">'
+ (escaped ? code : escape(code, true))
+ '</code></pre>\n';
}
/**
* @param {string} quote
*/
blockquote(quote) {
return `<blockquote>\n${quote}</blockquote>\n`;
}
html(html) {
return html;
}
/**
* @param {string} text
* @param {string} level
* @param {string} raw
* @param {any} slugger
*/
heading(text, level, raw, slugger) {
if (this.options.headerIds) {
const id = this.options.headerPrefix + slugger.slug(raw);
return `<h${level} id="${id}">${text}</h${level}>\n`;
}
// ignore IDs
return `<h${level}>${text}</h${level}>\n`;
}
hr() {
return this.options.xhtml ? '<hr/>\n' : '<hr>\n';
}
list(body, ordered, start) {
const type = ordered ? 'ol' : 'ul',
startatt = (ordered && start !== 1) ? (' start="' + start + '"') : '';
return '<' + type + startatt + '>\n' + body + '</' + type + '>\n';
}
/**
* @param {string} text
*/
listitem(text) {
return `<li>${text}</li>\n`;
}
checkbox(checked) {
return '<input '
+ (checked ? 'checked="" ' : '')
+ 'disabled="" type="checkbox"'
+ (this.options.xhtml ? ' /' : '')
+ '> ';
}
/**
* @param {string} text
*/
paragraph(text) {
return `<p>${text}</p>\n`;
}
/**
* @param {string} header
* @param {string} body
*/
table(header, body) {
if (body) body = `<tbody>${body}</tbody>`;
return '<table>\n'
+ '<thead>\n'
+ header
+ '</thead>\n'
+ body
+ '</table>\n';
}
/**
* @param {string} content
*/
tablerow(content) {
return `<tr>\n${content}</tr>\n`;
}
tablecell(content, flags) {
const type = flags.header ? 'th' : 'td';
const tag = flags.align
? `<${type} align="${flags.align}">`
: `<${type}>`;
return tag + content + `</${type}>\n`;
}
/**
* span level renderer
* @param {string} text
*/
strong(text) {
return `<strong>${text}</strong>`;
}
/**
* @param {string} text
*/
em(text) {
return `<em>${text}</em>`;
}
/**
* @param {string} text
*/
codespan(text) {
return `<code>${text}</code>`;
}
br() {
return this.options.xhtml ? '<br/>' : '<br>';
}
/**
* @param {string} text
*/
del(text) {
return `<del>${text}</del>`;
}
/**
* @param {string} href
* @param {string} title
* @param {string} text
*/
link(href, title, text) {
href = cleanUrl(this.options.sanitize, this.options.baseUrl, href);
if (href === null) {
return text;
}
let out = '<a href="' + escape(href) + '"';
if (title) {
out += ' title="' + title + '"';
}
out += '>' + text + '</a>';
return out;
}
/**
* @param {string} href
* @param {string} title
* @param {string} text
*/
image(href, title, text) {
href = cleanUrl(this.options.sanitize, this.options.baseUrl, href);
if (href === null) {
return text;
}
let out = `<img src="${href}" alt="${text}"`;
if (title) {
out += ` title="${title}"`;
}
out += this.options.xhtml ? '/>' : '>';
return out;
}
text(text) {
return text;
}
}

55
node_modules/marked/src/Slugger.js generated vendored Normal file
View file

@ -0,0 +1,55 @@
/**
* Slugger generates header id
*/
export class Slugger {
constructor() {
this.seen = {};
}
/**
* @param {string} value
*/
serialize(value) {
return value
.toLowerCase()
.trim()
// remove html tags
.replace(/<[!\/a-z].*?>/ig, '')
// remove unwanted chars
.replace(/[\u2000-\u206F\u2E00-\u2E7F\\'!"#$%&()*+,./:;<=>?@[\]^`{|}~]/g, '')
.replace(/\s/g, '-');
}
/**
* Finds the next safe (unique) slug to use
* @param {string} originalSlug
* @param {boolean} isDryRun
*/
getNextSafeSlug(originalSlug, isDryRun) {
let slug = originalSlug;
let occurenceAccumulator = 0;
if (this.seen.hasOwnProperty(slug)) {
occurenceAccumulator = this.seen[originalSlug];
do {
occurenceAccumulator++;
slug = originalSlug + '-' + occurenceAccumulator;
} while (this.seen.hasOwnProperty(slug));
}
if (!isDryRun) {
this.seen[originalSlug] = occurenceAccumulator;
this.seen[slug] = 0;
}
return slug;
}
/**
* Convert string to unique id
* @param {object} [options]
* @param {boolean} [options.dryrun] Generates the next unique slug without
* updating the internal accumulator.
*/
slug(value, options = {}) {
const slug = this.serialize(value);
return this.getNextSafeSlug(slug, options.dryrun);
}
}

42
node_modules/marked/src/TextRenderer.js generated vendored Normal file
View file

@ -0,0 +1,42 @@
/**
* TextRenderer
* returns only the textual part of the token
*/
export class TextRenderer {
// no need for block level renderers
strong(text) {
return text;
}
em(text) {
return text;
}
codespan(text) {
return text;
}
del(text) {
return text;
}
html(text) {
return text;
}
text(text) {
return text;
}
link(href, title, text) {
return '' + text;
}
image(href, title, text) {
return '' + text;
}
br() {
return '';
}
}

784
node_modules/marked/src/Tokenizer.js generated vendored Normal file
View file

@ -0,0 +1,784 @@
import { defaults } from './defaults.js';
import {
rtrim,
splitCells,
escape,
findClosingBracket
} from './helpers.js';
function outputLink(cap, link, raw, lexer) {
const href = link.href;
const title = link.title ? escape(link.title) : null;
const text = cap[1].replace(/\\([\[\]])/g, '$1');
if (cap[0].charAt(0) !== '!') {
lexer.state.inLink = true;
const token = {
type: 'link',
raw,
href,
title,
text,
tokens: lexer.inlineTokens(text, [])
};
lexer.state.inLink = false;
return token;
}
return {
type: 'image',
raw,
href,
title,
text: escape(text)
};
}
function indentCodeCompensation(raw, text) {
const matchIndentToCode = raw.match(/^(\s+)(?:```)/);
if (matchIndentToCode === null) {
return text;
}
const indentToCode = matchIndentToCode[1];
return text
.split('\n')
.map(node => {
const matchIndentInNode = node.match(/^\s+/);
if (matchIndentInNode === null) {
return node;
}
const [indentInNode] = matchIndentInNode;
if (indentInNode.length >= indentToCode.length) {
return node.slice(indentToCode.length);
}
return node;
})
.join('\n');
}
/**
* Tokenizer
*/
export class Tokenizer {
constructor(options) {
this.options = options || defaults;
}
space(src) {
const cap = this.rules.block.newline.exec(src);
if (cap && cap[0].length > 0) {
return {
type: 'space',
raw: cap[0]
};
}
}
code(src) {
const cap = this.rules.block.code.exec(src);
if (cap) {
const text = cap[0].replace(/^ {1,4}/gm, '');
return {
type: 'code',
raw: cap[0],
codeBlockStyle: 'indented',
text: !this.options.pedantic
? rtrim(text, '\n')
: text
};
}
}
fences(src) {
const cap = this.rules.block.fences.exec(src);
if (cap) {
const raw = cap[0];
const text = indentCodeCompensation(raw, cap[3] || '');
return {
type: 'code',
raw,
lang: cap[2] ? cap[2].trim() : cap[2],
text
};
}
}
heading(src) {
const cap = this.rules.block.heading.exec(src);
if (cap) {
let text = cap[2].trim();
// remove trailing #s
if (/#$/.test(text)) {
const trimmed = rtrim(text, '#');
if (this.options.pedantic) {
text = trimmed.trim();
} else if (!trimmed || / $/.test(trimmed)) {
// CommonMark requires space before trailing #s
text = trimmed.trim();
}
}
const token = {
type: 'heading',
raw: cap[0],
depth: cap[1].length,
text,
tokens: []
};
this.lexer.inline(token.text, token.tokens);
return token;
}
}
hr(src) {
const cap = this.rules.block.hr.exec(src);
if (cap) {
return {
type: 'hr',
raw: cap[0]
};
}
}
blockquote(src) {
const cap = this.rules.block.blockquote.exec(src);
if (cap) {
const text = cap[0].replace(/^ *>[ \t]?/gm, '');
return {
type: 'blockquote',
raw: cap[0],
tokens: this.lexer.blockTokens(text, []),
text
};
}
}
list(src) {
let cap = this.rules.block.list.exec(src);
if (cap) {
let raw, istask, ischecked, indent, i, blankLine, endsWithBlankLine,
line, nextLine, rawLine, itemContents, endEarly;
let bull = cap[1].trim();
const isordered = bull.length > 1;
const list = {
type: 'list',
raw: '',
ordered: isordered,
start: isordered ? +bull.slice(0, -1) : '',
loose: false,
items: []
};
bull = isordered ? `\\d{1,9}\\${bull.slice(-1)}` : `\\${bull}`;
if (this.options.pedantic) {
bull = isordered ? bull : '[*+-]';
}
// Get next list item
const itemRegex = new RegExp(`^( {0,3}${bull})((?:[\t ][^\\n]*)?(?:\\n|$))`);
// Check if current bullet point can start a new List Item
while (src) {
endEarly = false;
if (!(cap = itemRegex.exec(src))) {
break;
}
if (this.rules.block.hr.test(src)) { // End list if bullet was actually HR (possibly move into itemRegex?)
break;
}
raw = cap[0];
src = src.substring(raw.length);
line = cap[2].split('\n', 1)[0];
nextLine = src.split('\n', 1)[0];
if (this.options.pedantic) {
indent = 2;
itemContents = line.trimLeft();
} else {
indent = cap[2].search(/[^ ]/); // Find first non-space char
indent = indent > 4 ? 1 : indent; // Treat indented code blocks (> 4 spaces) as having only 1 indent
itemContents = line.slice(indent);
indent += cap[1].length;
}
blankLine = false;
if (!line && /^ *$/.test(nextLine)) { // Items begin with at most one blank line
raw += nextLine + '\n';
src = src.substring(nextLine.length + 1);
endEarly = true;
}
if (!endEarly) {
const nextBulletRegex = new RegExp(`^ {0,${Math.min(3, indent - 1)}}(?:[*+-]|\\d{1,9}[.)])((?: [^\\n]*)?(?:\\n|$))`);
const hrRegex = new RegExp(`^ {0,${Math.min(3, indent - 1)}}((?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$)`);
const fencesBeginRegex = new RegExp(`^( {0,${Math.min(3, indent - 1)}})(\`\`\`|~~~)`);
// Check if following lines should be included in List Item
while (src) {
rawLine = src.split('\n', 1)[0];
line = rawLine;
// Re-align to follow commonmark nesting rules
if (this.options.pedantic) {
line = line.replace(/^ {1,4}(?=( {4})*[^ ])/g, ' ');
}
// End list item if found code fences
if (fencesBeginRegex.test(line)) {
break;
}
// End list item if found start of new heading
if (this.rules.block.heading.test(line)) {
break;
}
// End list item if found start of new bullet
if (nextBulletRegex.test(line)) {
break;
}
// Horizontal rule found
if (hrRegex.test(src)) {
break;
}
if (line.search(/[^ ]/) >= indent || !line.trim()) { // Dedent if possible
itemContents += '\n' + line.slice(indent);
} else if (!blankLine) { // Until blank line, item doesn't need indentation
itemContents += '\n' + line;
} else { // Otherwise, improper indentation ends this item
break;
}
if (!blankLine && !line.trim()) { // Check if current line is blank
blankLine = true;
}
raw += rawLine + '\n';
src = src.substring(rawLine.length + 1);
}
}
if (!list.loose) {
// If the previous item ended with a blank line, the list is loose
if (endsWithBlankLine) {
list.loose = true;
} else if (/\n *\n *$/.test(raw)) {
endsWithBlankLine = true;
}
}
// Check for task list items
if (this.options.gfm) {
istask = /^\[[ xX]\] /.exec(itemContents);
if (istask) {
ischecked = istask[0] !== '[ ] ';
itemContents = itemContents.replace(/^\[[ xX]\] +/, '');
}
}
list.items.push({
type: 'list_item',
raw,
task: !!istask,
checked: ischecked,
loose: false,
text: itemContents
});
list.raw += raw;
}
// Do not consume newlines at end of final item. Alternatively, make itemRegex *start* with any newlines to simplify/speed up endsWithBlankLine logic
list.items[list.items.length - 1].raw = raw.trimRight();
list.items[list.items.length - 1].text = itemContents.trimRight();
list.raw = list.raw.trimRight();
const l = list.items.length;
// Item child tokens handled here at end because we needed to have the final item to trim it first
for (i = 0; i < l; i++) {
this.lexer.state.top = false;
list.items[i].tokens = this.lexer.blockTokens(list.items[i].text, []);
const spacers = list.items[i].tokens.filter(t => t.type === 'space');
const hasMultipleLineBreaks = spacers.every(t => {
const chars = t.raw.split('');
let lineBreaks = 0;
for (const char of chars) {
if (char === '\n') {
lineBreaks += 1;
}
if (lineBreaks > 1) {
return true;
}
}
return false;
});
if (!list.loose && spacers.length && hasMultipleLineBreaks) {
// Having a single line break doesn't mean a list is loose. A single line break is terminating the last list item
list.loose = true;
list.items[i].loose = true;
}
}
return list;
}
}
html(src) {
const cap = this.rules.block.html.exec(src);
if (cap) {
const token = {
type: 'html',
raw: cap[0],
pre: !this.options.sanitizer
&& (cap[1] === 'pre' || cap[1] === 'script' || cap[1] === 'style'),
text: cap[0]
};
if (this.options.sanitize) {
token.type = 'paragraph';
token.text = this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape(cap[0]);
token.tokens = [];
this.lexer.inline(token.text, token.tokens);
}
return token;
}
}
def(src) {
const cap = this.rules.block.def.exec(src);
if (cap) {
if (cap[3]) cap[3] = cap[3].substring(1, cap[3].length - 1);
const tag = cap[1].toLowerCase().replace(/\s+/g, ' ');
return {
type: 'def',
tag,
raw: cap[0],
href: cap[2],
title: cap[3]
};
}
}
table(src) {
const cap = this.rules.block.table.exec(src);
if (cap) {
const item = {
type: 'table',
header: splitCells(cap[1]).map(c => { return { text: c }; }),
align: cap[2].replace(/^ *|\| *$/g, '').split(/ *\| */),
rows: cap[3] && cap[3].trim() ? cap[3].replace(/\n[ \t]*$/, '').split('\n') : []
};
if (item.header.length === item.align.length) {
item.raw = cap[0];
let l = item.align.length;
let i, j, k, row;
for (i = 0; i < l; i++) {
if (/^ *-+: *$/.test(item.align[i])) {
item.align[i] = 'right';
} else if (/^ *:-+: *$/.test(item.align[i])) {
item.align[i] = 'center';
} else if (/^ *:-+ *$/.test(item.align[i])) {
item.align[i] = 'left';
} else {
item.align[i] = null;
}
}
l = item.rows.length;
for (i = 0; i < l; i++) {
item.rows[i] = splitCells(item.rows[i], item.header.length).map(c => { return { text: c }; });
}
// parse child tokens inside headers and cells
// header child tokens
l = item.header.length;
for (j = 0; j < l; j++) {
item.header[j].tokens = [];
this.lexer.inline(item.header[j].text, item.header[j].tokens);
}
// cell child tokens
l = item.rows.length;
for (j = 0; j < l; j++) {
row = item.rows[j];
for (k = 0; k < row.length; k++) {
row[k].tokens = [];
this.lexer.inline(row[k].text, row[k].tokens);
}
}
return item;
}
}
}
lheading(src) {
const cap = this.rules.block.lheading.exec(src);
if (cap) {
const token = {
type: 'heading',
raw: cap[0],
depth: cap[2].charAt(0) === '=' ? 1 : 2,
text: cap[1],
tokens: []
};
this.lexer.inline(token.text, token.tokens);
return token;
}
}
paragraph(src) {
const cap = this.rules.block.paragraph.exec(src);
if (cap) {
const token = {
type: 'paragraph',
raw: cap[0],
text: cap[1].charAt(cap[1].length - 1) === '\n'
? cap[1].slice(0, -1)
: cap[1],
tokens: []
};
this.lexer.inline(token.text, token.tokens);
return token;
}
}
text(src) {
const cap = this.rules.block.text.exec(src);
if (cap) {
const token = {
type: 'text',
raw: cap[0],
text: cap[0],
tokens: []
};
this.lexer.inline(token.text, token.tokens);
return token;
}
}
escape(src) {
const cap = this.rules.inline.escape.exec(src);
if (cap) {
return {
type: 'escape',
raw: cap[0],
text: escape(cap[1])
};
}
}
tag(src) {
const cap = this.rules.inline.tag.exec(src);
if (cap) {
if (!this.lexer.state.inLink && /^<a /i.test(cap[0])) {
this.lexer.state.inLink = true;
} else if (this.lexer.state.inLink && /^<\/a>/i.test(cap[0])) {
this.lexer.state.inLink = false;
}
if (!this.lexer.state.inRawBlock && /^<(pre|code|kbd|script)(\s|>)/i.test(cap[0])) {
this.lexer.state.inRawBlock = true;
} else if (this.lexer.state.inRawBlock && /^<\/(pre|code|kbd|script)(\s|>)/i.test(cap[0])) {
this.lexer.state.inRawBlock = false;
}
return {
type: this.options.sanitize
? 'text'
: 'html',
raw: cap[0],
inLink: this.lexer.state.inLink,
inRawBlock: this.lexer.state.inRawBlock,
text: this.options.sanitize
? (this.options.sanitizer
? this.options.sanitizer(cap[0])
: escape(cap[0]))
: cap[0]
};
}
}
link(src) {
const cap = this.rules.inline.link.exec(src);
if (cap) {
const trimmedUrl = cap[2].trim();
if (!this.options.pedantic && /^</.test(trimmedUrl)) {
// commonmark requires matching angle brackets
if (!(/>$/.test(trimmedUrl))) {
return;
}
// ending angle bracket cannot be escaped
const rtrimSlash = rtrim(trimmedUrl.slice(0, -1), '\\');
if ((trimmedUrl.length - rtrimSlash.length) % 2 === 0) {
return;
}
} else {
// find closing parenthesis
const lastParenIndex = findClosingBracket(cap[2], '()');
if (lastParenIndex > -1) {
const start = cap[0].indexOf('!') === 0 ? 5 : 4;
const linkLen = start + cap[1].length + lastParenIndex;
cap[2] = cap[2].substring(0, lastParenIndex);
cap[0] = cap[0].substring(0, linkLen).trim();
cap[3] = '';
}
}
let href = cap[2];
let title = '';
if (this.options.pedantic) {
// split pedantic href and title
const link = /^([^'"]*[^\s])\s+(['"])(.*)\2/.exec(href);
if (link) {
href = link[1];
title = link[3];
}
} else {
title = cap[3] ? cap[3].slice(1, -1) : '';
}
href = href.trim();
if (/^</.test(href)) {
if (this.options.pedantic && !(/>$/.test(trimmedUrl))) {
// pedantic allows starting angle bracket without ending angle bracket
href = href.slice(1);
} else {
href = href.slice(1, -1);
}
}
return outputLink(cap, {
href: href ? href.replace(this.rules.inline._escapes, '$1') : href,
title: title ? title.replace(this.rules.inline._escapes, '$1') : title
}, cap[0], this.lexer);
}
}
reflink(src, links) {
let cap;
if ((cap = this.rules.inline.reflink.exec(src))
|| (cap = this.rules.inline.nolink.exec(src))) {
let link = (cap[2] || cap[1]).replace(/\s+/g, ' ');
link = links[link.toLowerCase()];
if (!link || !link.href) {
const text = cap[0].charAt(0);
return {
type: 'text',
raw: text,
text
};
}
return outputLink(cap, link, cap[0], this.lexer);
}
}
emStrong(src, maskedSrc, prevChar = '') {
let match = this.rules.inline.emStrong.lDelim.exec(src);
if (!match) return;
// _ can't be between two alphanumerics. \p{L}\p{N} includes non-english alphabet/numbers as well
if (match[3] && prevChar.match(/[\p{L}\p{N}]/u)) return;
const nextChar = match[1] || match[2] || '';
if (!nextChar || (nextChar && (prevChar === '' || this.rules.inline.punctuation.exec(prevChar)))) {
const lLength = match[0].length - 1;
let rDelim, rLength, delimTotal = lLength, midDelimTotal = 0;
const endReg = match[0][0] === '*' ? this.rules.inline.emStrong.rDelimAst : this.rules.inline.emStrong.rDelimUnd;
endReg.lastIndex = 0;
// Clip maskedSrc to same section of string as src (move to lexer?)
maskedSrc = maskedSrc.slice(-1 * src.length + lLength);
while ((match = endReg.exec(maskedSrc)) != null) {
rDelim = match[1] || match[2] || match[3] || match[4] || match[5] || match[6];
if (!rDelim) continue; // skip single * in __abc*abc__
rLength = rDelim.length;
if (match[3] || match[4]) { // found another Left Delim
delimTotal += rLength;
continue;
} else if (match[5] || match[6]) { // either Left or Right Delim
if (lLength % 3 && !((lLength + rLength) % 3)) {
midDelimTotal += rLength;
continue; // CommonMark Emphasis Rules 9-10
}
}
delimTotal -= rLength;
if (delimTotal > 0) continue; // Haven't found enough closing delimiters
// Remove extra characters. *a*** -> *a*
rLength = Math.min(rLength, rLength + delimTotal + midDelimTotal);
// Create `em` if smallest delimiter has odd char count. *a***
if (Math.min(lLength, rLength) % 2) {
const text = src.slice(1, lLength + match.index + rLength);
return {
type: 'em',
raw: src.slice(0, lLength + match.index + rLength + 1),
text,
tokens: this.lexer.inlineTokens(text, [])
};
}
// Create 'strong' if smallest delimiter has even char count. **a***
const text = src.slice(2, lLength + match.index + rLength - 1);
return {
type: 'strong',
raw: src.slice(0, lLength + match.index + rLength + 1),
text,
tokens: this.lexer.inlineTokens(text, [])
};
}
}
}
codespan(src) {
const cap = this.rules.inline.code.exec(src);
if (cap) {
let text = cap[2].replace(/\n/g, ' ');
const hasNonSpaceChars = /[^ ]/.test(text);
const hasSpaceCharsOnBothEnds = /^ /.test(text) && / $/.test(text);
if (hasNonSpaceChars && hasSpaceCharsOnBothEnds) {
text = text.substring(1, text.length - 1);
}
text = escape(text, true);
return {
type: 'codespan',
raw: cap[0],
text
};
}
}
br(src) {
const cap = this.rules.inline.br.exec(src);
if (cap) {
return {
type: 'br',
raw: cap[0]
};
}
}
del(src) {
const cap = this.rules.inline.del.exec(src);
if (cap) {
return {
type: 'del',
raw: cap[0],
text: cap[2],
tokens: this.lexer.inlineTokens(cap[2], [])
};
}
}
autolink(src, mangle) {
const cap = this.rules.inline.autolink.exec(src);
if (cap) {
let text, href;
if (cap[2] === '@') {
text = escape(this.options.mangle ? mangle(cap[1]) : cap[1]);
href = 'mailto:' + text;
} else {
text = escape(cap[1]);
href = text;
}
return {
type: 'link',
raw: cap[0],
text,
href,
tokens: [
{
type: 'text',
raw: text,
text
}
]
};
}
}
url(src, mangle) {
let cap;
if (cap = this.rules.inline.url.exec(src)) {
let text, href;
if (cap[2] === '@') {
text = escape(this.options.mangle ? mangle(cap[0]) : cap[0]);
href = 'mailto:' + text;
} else {
// do extended autolink path validation
let prevCapZero;
do {
prevCapZero = cap[0];
cap[0] = this.rules.inline._backpedal.exec(cap[0])[0];
} while (prevCapZero !== cap[0]);
text = escape(cap[0]);
if (cap[1] === 'www.') {
href = 'http://' + text;
} else {
href = text;
}
}
return {
type: 'link',
raw: cap[0],
text,
href,
tokens: [
{
type: 'text',
raw: text,
text
}
]
};
}
}
inlineText(src, smartypants) {
const cap = this.rules.inline.text.exec(src);
if (cap) {
let text;
if (this.lexer.state.inRawBlock) {
text = this.options.sanitize ? (this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape(cap[0])) : cap[0];
} else {
text = escape(this.options.smartypants ? smartypants(cap[0]) : cap[0]);
}
return {
type: 'text',
raw: cap[0],
text
};
}
}
}

29
node_modules/marked/src/defaults.js generated vendored Normal file
View file

@ -0,0 +1,29 @@
export function getDefaults() {
return {
baseUrl: null,
breaks: false,
extensions: null,
gfm: true,
headerIds: true,
headerPrefix: '',
highlight: null,
langPrefix: 'language-',
mangle: true,
pedantic: false,
renderer: null,
sanitize: false,
sanitizer: null,
silent: false,
smartLists: false,
smartypants: false,
tokenizer: null,
walkTokens: null,
xhtml: false
};
}
export let defaults = getDefaults();
export function changeDefaults(newDefaults) {
defaults = newDefaults;
}

276
node_modules/marked/src/helpers.js generated vendored Normal file
View file

@ -0,0 +1,276 @@
/**
* Helpers
*/
const escapeTest = /[&<>"']/;
const escapeReplace = /[&<>"']/g;
const escapeTestNoEncode = /[<>"']|&(?!#?\w+;)/;
const escapeReplaceNoEncode = /[<>"']|&(?!#?\w+;)/g;
const escapeReplacements = {
'&': '&amp;',
'<': '&lt;',
'>': '&gt;',
'"': '&quot;',
"'": '&#39;'
};
const getEscapeReplacement = (ch) => escapeReplacements[ch];
export function escape(html, encode) {
if (encode) {
if (escapeTest.test(html)) {
return html.replace(escapeReplace, getEscapeReplacement);
}
} else {
if (escapeTestNoEncode.test(html)) {
return html.replace(escapeReplaceNoEncode, getEscapeReplacement);
}
}
return html;
}
const unescapeTest = /&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/ig;
/**
* @param {string} html
*/
export function unescape(html) {
// explicitly match decimal, hex, and named HTML entities
return html.replace(unescapeTest, (_, n) => {
n = n.toLowerCase();
if (n === 'colon') return ':';
if (n.charAt(0) === '#') {
return n.charAt(1) === 'x'
? String.fromCharCode(parseInt(n.substring(2), 16))
: String.fromCharCode(+n.substring(1));
}
return '';
});
}
const caret = /(^|[^\[])\^/g;
/**
* @param {string | RegExp} regex
* @param {string} opt
*/
export function edit(regex, opt) {
regex = typeof regex === 'string' ? regex : regex.source;
opt = opt || '';
const obj = {
replace: (name, val) => {
val = val.source || val;
val = val.replace(caret, '$1');
regex = regex.replace(name, val);
return obj;
},
getRegex: () => {
return new RegExp(regex, opt);
}
};
return obj;
}
const nonWordAndColonTest = /[^\w:]/g;
const originIndependentUrl = /^$|^[a-z][a-z0-9+.-]*:|^[?#]/i;
/**
* @param {boolean} sanitize
* @param {string} base
* @param {string} href
*/
export function cleanUrl(sanitize, base, href) {
if (sanitize) {
let prot;
try {
prot = decodeURIComponent(unescape(href))
.replace(nonWordAndColonTest, '')
.toLowerCase();
} catch (e) {
return null;
}
if (prot.indexOf('javascript:') === 0 || prot.indexOf('vbscript:') === 0 || prot.indexOf('data:') === 0) {
return null;
}
}
if (base && !originIndependentUrl.test(href)) {
href = resolveUrl(base, href);
}
try {
href = encodeURI(href).replace(/%25/g, '%');
} catch (e) {
return null;
}
return href;
}
const baseUrls = {};
const justDomain = /^[^:]+:\/*[^/]*$/;
const protocol = /^([^:]+:)[\s\S]*$/;
const domain = /^([^:]+:\/*[^/]*)[\s\S]*$/;
/**
* @param {string} base
* @param {string} href
*/
export function resolveUrl(base, href) {
if (!baseUrls[' ' + base]) {
// we can ignore everything in base after the last slash of its path component,
// but we might need to add _that_
// https://tools.ietf.org/html/rfc3986#section-3
if (justDomain.test(base)) {
baseUrls[' ' + base] = base + '/';
} else {
baseUrls[' ' + base] = rtrim(base, '/', true);
}
}
base = baseUrls[' ' + base];
const relativeBase = base.indexOf(':') === -1;
if (href.substring(0, 2) === '//') {
if (relativeBase) {
return href;
}
return base.replace(protocol, '$1') + href;
} else if (href.charAt(0) === '/') {
if (relativeBase) {
return href;
}
return base.replace(domain, '$1') + href;
} else {
return base + href;
}
}
export const noopTest = { exec: function noopTest() {} };
export function merge(obj) {
let i = 1,
target,
key;
for (; i < arguments.length; i++) {
target = arguments[i];
for (key in target) {
if (Object.prototype.hasOwnProperty.call(target, key)) {
obj[key] = target[key];
}
}
}
return obj;
}
export function splitCells(tableRow, count) {
// ensure that every cell-delimiting pipe has a space
// before it to distinguish it from an escaped pipe
const row = tableRow.replace(/\|/g, (match, offset, str) => {
let escaped = false,
curr = offset;
while (--curr >= 0 && str[curr] === '\\') escaped = !escaped;
if (escaped) {
// odd number of slashes means | is escaped
// so we leave it alone
return '|';
} else {
// add space before unescaped |
return ' |';
}
}),
cells = row.split(/ \|/);
let i = 0;
// First/last cell in a row cannot be empty if it has no leading/trailing pipe
if (!cells[0].trim()) { cells.shift(); }
if (cells.length > 0 && !cells[cells.length - 1].trim()) { cells.pop(); }
if (cells.length > count) {
cells.splice(count);
} else {
while (cells.length < count) cells.push('');
}
for (; i < cells.length; i++) {
// leading or trailing whitespace is ignored per the gfm spec
cells[i] = cells[i].trim().replace(/\\\|/g, '|');
}
return cells;
}
/**
* Remove trailing 'c's. Equivalent to str.replace(/c*$/, '').
* /c*$/ is vulnerable to REDOS.
*
* @param {string} str
* @param {string} c
* @param {boolean} invert Remove suffix of non-c chars instead. Default falsey.
*/
export function rtrim(str, c, invert) {
const l = str.length;
if (l === 0) {
return '';
}
// Length of suffix matching the invert condition.
let suffLen = 0;
// Step left until we fail to match the invert condition.
while (suffLen < l) {
const currChar = str.charAt(l - suffLen - 1);
if (currChar === c && !invert) {
suffLen++;
} else if (currChar !== c && invert) {
suffLen++;
} else {
break;
}
}
return str.slice(0, l - suffLen);
}
export function findClosingBracket(str, b) {
if (str.indexOf(b[1]) === -1) {
return -1;
}
const l = str.length;
let level = 0,
i = 0;
for (; i < l; i++) {
if (str[i] === '\\') {
i++;
} else if (str[i] === b[0]) {
level++;
} else if (str[i] === b[1]) {
level--;
if (level < 0) {
return i;
}
}
}
return -1;
}
export function checkSanitizeDeprecation(opt) {
if (opt && opt.sanitize && !opt.silent) {
console.warn('marked(): sanitize and sanitizer parameters are deprecated since version 0.7.0, should not be used and will be removed in the future. Read more here: https://marked.js.org/#/USING_ADVANCED.md#options');
}
}
// copied from https://stackoverflow.com/a/5450113/806777
/**
* @param {string} pattern
* @param {number} count
*/
export function repeatString(pattern, count) {
if (count < 1) {
return '';
}
let result = '';
while (count > 1) {
if (count & 1) {
result += pattern;
}
count >>= 1;
pattern += pattern;
}
return result + pattern;
}

351
node_modules/marked/src/marked.js generated vendored Normal file
View file

@ -0,0 +1,351 @@
import { Lexer } from './Lexer.js';
import { Parser } from './Parser.js';
import { Tokenizer } from './Tokenizer.js';
import { Renderer } from './Renderer.js';
import { TextRenderer } from './TextRenderer.js';
import { Slugger } from './Slugger.js';
import {
merge,
checkSanitizeDeprecation,
escape
} from './helpers.js';
import {
getDefaults,
changeDefaults,
defaults
} from './defaults.js';
/**
* Marked
*/
export function marked(src, opt, callback) {
// throw error in case of non string input
if (typeof src === 'undefined' || src === null) {
throw new Error('marked(): input parameter is undefined or null');
}
if (typeof src !== 'string') {
throw new Error('marked(): input parameter is of type '
+ Object.prototype.toString.call(src) + ', string expected');
}
if (typeof opt === 'function') {
callback = opt;
opt = null;
}
opt = merge({}, marked.defaults, opt || {});
checkSanitizeDeprecation(opt);
if (callback) {
const highlight = opt.highlight;
let tokens;
try {
tokens = Lexer.lex(src, opt);
} catch (e) {
return callback(e);
}
const done = function(err) {
let out;
if (!err) {
try {
if (opt.walkTokens) {
marked.walkTokens(tokens, opt.walkTokens);
}
out = Parser.parse(tokens, opt);
} catch (e) {
err = e;
}
}
opt.highlight = highlight;
return err
? callback(err)
: callback(null, out);
};
if (!highlight || highlight.length < 3) {
return done();
}
delete opt.highlight;
if (!tokens.length) return done();
let pending = 0;
marked.walkTokens(tokens, function(token) {
if (token.type === 'code') {
pending++;
setTimeout(() => {
highlight(token.text, token.lang, function(err, code) {
if (err) {
return done(err);
}
if (code != null && code !== token.text) {
token.text = code;
token.escaped = true;
}
pending--;
if (pending === 0) {
done();
}
});
}, 0);
}
});
if (pending === 0) {
done();
}
return;
}
try {
const tokens = Lexer.lex(src, opt);
if (opt.walkTokens) {
marked.walkTokens(tokens, opt.walkTokens);
}
return Parser.parse(tokens, opt);
} catch (e) {
e.message += '\nPlease report this to https://github.com/markedjs/marked.';
if (opt.silent) {
return '<p>An error occurred:</p><pre>'
+ escape(e.message + '', true)
+ '</pre>';
}
throw e;
}
}
/**
* Options
*/
marked.options =
marked.setOptions = function(opt) {
merge(marked.defaults, opt);
changeDefaults(marked.defaults);
return marked;
};
marked.getDefaults = getDefaults;
marked.defaults = defaults;
/**
* Use Extension
*/
marked.use = function(...args) {
const opts = merge({}, ...args);
const extensions = marked.defaults.extensions || { renderers: {}, childTokens: {} };
let hasExtensions;
args.forEach((pack) => {
// ==-- Parse "addon" extensions --== //
if (pack.extensions) {
hasExtensions = true;
pack.extensions.forEach((ext) => {
if (!ext.name) {
throw new Error('extension name required');
}
if (ext.renderer) { // Renderer extensions
const prevRenderer = extensions.renderers ? extensions.renderers[ext.name] : null;
if (prevRenderer) {
// Replace extension with func to run new extension but fall back if false
extensions.renderers[ext.name] = function(...args) {
let ret = ext.renderer.apply(this, args);
if (ret === false) {
ret = prevRenderer.apply(this, args);
}
return ret;
};
} else {
extensions.renderers[ext.name] = ext.renderer;
}
}
if (ext.tokenizer) { // Tokenizer Extensions
if (!ext.level || (ext.level !== 'block' && ext.level !== 'inline')) {
throw new Error("extension level must be 'block' or 'inline'");
}
if (extensions[ext.level]) {
extensions[ext.level].unshift(ext.tokenizer);
} else {
extensions[ext.level] = [ext.tokenizer];
}
if (ext.start) { // Function to check for start of token
if (ext.level === 'block') {
if (extensions.startBlock) {
extensions.startBlock.push(ext.start);
} else {
extensions.startBlock = [ext.start];
}
} else if (ext.level === 'inline') {
if (extensions.startInline) {
extensions.startInline.push(ext.start);
} else {
extensions.startInline = [ext.start];
}
}
}
}
if (ext.childTokens) { // Child tokens to be visited by walkTokens
extensions.childTokens[ext.name] = ext.childTokens;
}
});
}
// ==-- Parse "overwrite" extensions --== //
if (pack.renderer) {
const renderer = marked.defaults.renderer || new Renderer();
for (const prop in pack.renderer) {
const prevRenderer = renderer[prop];
// Replace renderer with func to run extension, but fall back if false
renderer[prop] = (...args) => {
let ret = pack.renderer[prop].apply(renderer, args);
if (ret === false) {
ret = prevRenderer.apply(renderer, args);
}
return ret;
};
}
opts.renderer = renderer;
}
if (pack.tokenizer) {
const tokenizer = marked.defaults.tokenizer || new Tokenizer();
for (const prop in pack.tokenizer) {
const prevTokenizer = tokenizer[prop];
// Replace tokenizer with func to run extension, but fall back if false
tokenizer[prop] = (...args) => {
let ret = pack.tokenizer[prop].apply(tokenizer, args);
if (ret === false) {
ret = prevTokenizer.apply(tokenizer, args);
}
return ret;
};
}
opts.tokenizer = tokenizer;
}
// ==-- Parse WalkTokens extensions --== //
if (pack.walkTokens) {
const walkTokens = marked.defaults.walkTokens;
opts.walkTokens = function(token) {
pack.walkTokens.call(this, token);
if (walkTokens) {
walkTokens.call(this, token);
}
};
}
if (hasExtensions) {
opts.extensions = extensions;
}
marked.setOptions(opts);
});
};
/**
* Run callback for every token
*/
marked.walkTokens = function(tokens, callback) {
for (const token of tokens) {
callback.call(marked, token);
switch (token.type) {
case 'table': {
for (const cell of token.header) {
marked.walkTokens(cell.tokens, callback);
}
for (const row of token.rows) {
for (const cell of row) {
marked.walkTokens(cell.tokens, callback);
}
}
break;
}
case 'list': {
marked.walkTokens(token.items, callback);
break;
}
default: {
if (marked.defaults.extensions && marked.defaults.extensions.childTokens && marked.defaults.extensions.childTokens[token.type]) { // Walk any extensions
marked.defaults.extensions.childTokens[token.type].forEach(function(childTokens) {
marked.walkTokens(token[childTokens], callback);
});
} else if (token.tokens) {
marked.walkTokens(token.tokens, callback);
}
}
}
}
};
/**
* Parse Inline
* @param {string} src
*/
marked.parseInline = function(src, opt) {
// throw error in case of non string input
if (typeof src === 'undefined' || src === null) {
throw new Error('marked.parseInline(): input parameter is undefined or null');
}
if (typeof src !== 'string') {
throw new Error('marked.parseInline(): input parameter is of type '
+ Object.prototype.toString.call(src) + ', string expected');
}
opt = merge({}, marked.defaults, opt || {});
checkSanitizeDeprecation(opt);
try {
const tokens = Lexer.lexInline(src, opt);
if (opt.walkTokens) {
marked.walkTokens(tokens, opt.walkTokens);
}
return Parser.parseInline(tokens, opt);
} catch (e) {
e.message += '\nPlease report this to https://github.com/markedjs/marked.';
if (opt.silent) {
return '<p>An error occurred:</p><pre>'
+ escape(e.message + '', true)
+ '</pre>';
}
throw e;
}
};
/**
* Expose
*/
marked.Parser = Parser;
marked.parser = Parser.parse;
marked.Renderer = Renderer;
marked.TextRenderer = TextRenderer;
marked.Lexer = Lexer;
marked.lexer = Lexer.lex;
marked.Tokenizer = Tokenizer;
marked.Slugger = Slugger;
marked.parse = marked;
export const options = marked.options;
export const setOptions = marked.setOptions;
export const use = marked.use;
export const walkTokens = marked.walkTokens;
export const parseInline = marked.parseInline;
export const parse = marked;
export const parser = Parser.parse;
export const lexer = Lexer.lex;
export { defaults, getDefaults } from './defaults.js';
export { Lexer } from './Lexer.js';
export { Parser } from './Parser.js';
export { Tokenizer } from './Tokenizer.js';
export { Renderer } from './Renderer.js';
export { TextRenderer } from './TextRenderer.js';
export { Slugger } from './Slugger.js';

302
node_modules/marked/src/rules.js generated vendored Normal file
View file

@ -0,0 +1,302 @@
import {
noopTest,
edit,
merge
} from './helpers.js';
/**
* Block-Level Grammar
*/
export const block = {
newline: /^(?: *(?:\n|$))+/,
code: /^( {4}[^\n]+(?:\n(?: *(?:\n|$))*)?)+/,
fences: /^ {0,3}(`{3,}(?=[^`\n]*\n)|~{3,})([^\n]*)\n(?:|([\s\S]*?)\n)(?: {0,3}\1[~`]* *(?=\n|$)|$)/,
hr: /^ {0,3}((?:-[\t ]*){3,}|(?:_[ \t]*){3,}|(?:\*[ \t]*){3,})(?:\n+|$)/,
heading: /^ {0,3}(#{1,6})(?=\s|$)(.*)(?:\n+|$)/,
blockquote: /^( {0,3}> ?(paragraph|[^\n]*)(?:\n|$))+/,
list: /^( {0,3}bull)([ \t][^\n]+?)?(?:\n|$)/,
html: '^ {0,3}(?:' // optional indentation
+ '<(script|pre|style|textarea)[\\s>][\\s\\S]*?(?:</\\1>[^\\n]*\\n+|$)' // (1)
+ '|comment[^\\n]*(\\n+|$)' // (2)
+ '|<\\?[\\s\\S]*?(?:\\?>\\n*|$)' // (3)
+ '|<![A-Z][\\s\\S]*?(?:>\\n*|$)' // (4)
+ '|<!\\[CDATA\\[[\\s\\S]*?(?:\\]\\]>\\n*|$)' // (5)
+ '|</?(tag)(?: +|\\n|/?>)[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (6)
+ '|<(?!script|pre|style|textarea)([a-z][\\w-]*)(?:attribute)*? */?>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (7) open tag
+ '|</(?!script|pre|style|textarea)[a-z][\\w-]*\\s*>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (7) closing tag
+ ')',
def: /^ {0,3}\[(label)\]: *(?:\n *)?<?([^\s>]+)>?(?:(?: +(?:\n *)?| *\n *)(title))? *(?:\n+|$)/,
table: noopTest,
lheading: /^([^\n]+)\n {0,3}(=+|-+) *(?:\n+|$)/,
// regex template, placeholders will be replaced according to different paragraph
// interruption rules of commonmark and the original markdown spec:
_paragraph: /^([^\n]+(?:\n(?!hr|heading|lheading|blockquote|fences|list|html|table| +\n)[^\n]+)*)/,
text: /^[^\n]+/
};
block._label = /(?!\s*\])(?:\\.|[^\[\]\\])+/;
block._title = /(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/;
block.def = edit(block.def)
.replace('label', block._label)
.replace('title', block._title)
.getRegex();
block.bullet = /(?:[*+-]|\d{1,9}[.)])/;
block.listItemStart = edit(/^( *)(bull) */)
.replace('bull', block.bullet)
.getRegex();
block.list = edit(block.list)
.replace(/bull/g, block.bullet)
.replace('hr', '\\n+(?=\\1?(?:(?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$))')
.replace('def', '\\n+(?=' + block.def.source + ')')
.getRegex();
block._tag = 'address|article|aside|base|basefont|blockquote|body|caption'
+ '|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption'
+ '|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe'
+ '|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option'
+ '|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr'
+ '|track|ul';
block._comment = /<!--(?!-?>)[\s\S]*?(?:-->|$)/;
block.html = edit(block.html, 'i')
.replace('comment', block._comment)
.replace('tag', block._tag)
.replace('attribute', / +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/)
.getRegex();
block.paragraph = edit(block._paragraph)
.replace('hr', block.hr)
.replace('heading', ' {0,3}#{1,6} ')
.replace('|lheading', '') // setex headings don't interrupt commonmark paragraphs
.replace('|table', '')
.replace('blockquote', ' {0,3}>')
.replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n')
.replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt
.replace('html', '</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)')
.replace('tag', block._tag) // pars can be interrupted by type (6) html blocks
.getRegex();
block.blockquote = edit(block.blockquote)
.replace('paragraph', block.paragraph)
.getRegex();
/**
* Normal Block Grammar
*/
block.normal = merge({}, block);
/**
* GFM Block Grammar
*/
block.gfm = merge({}, block.normal, {
table: '^ *([^\\n ].*\\|.*)\\n' // Header
+ ' {0,3}(?:\\| *)?(:?-+:? *(?:\\| *:?-+:? *)*)(?:\\| *)?' // Align
+ '(?:\\n((?:(?! *\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)' // Cells
});
block.gfm.table = edit(block.gfm.table)
.replace('hr', block.hr)
.replace('heading', ' {0,3}#{1,6} ')
.replace('blockquote', ' {0,3}>')
.replace('code', ' {4}[^\\n]')
.replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n')
.replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt
.replace('html', '</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)')
.replace('tag', block._tag) // tables can be interrupted by type (6) html blocks
.getRegex();
block.gfm.paragraph = edit(block._paragraph)
.replace('hr', block.hr)
.replace('heading', ' {0,3}#{1,6} ')
.replace('|lheading', '') // setex headings don't interrupt commonmark paragraphs
.replace('table', block.gfm.table) // interrupt paragraphs with table
.replace('blockquote', ' {0,3}>')
.replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n')
.replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt
.replace('html', '</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)')
.replace('tag', block._tag) // pars can be interrupted by type (6) html blocks
.getRegex();
/**
* Pedantic grammar (original John Gruber's loose markdown specification)
*/
block.pedantic = merge({}, block.normal, {
html: edit(
'^ *(?:comment *(?:\\n|\\s*$)'
+ '|<(tag)[\\s\\S]+?</\\1> *(?:\\n{2,}|\\s*$)' // closed tag
+ '|<tag(?:"[^"]*"|\'[^\']*\'|\\s[^\'"/>\\s]*)*?/?> *(?:\\n{2,}|\\s*$))')
.replace('comment', block._comment)
.replace(/tag/g, '(?!(?:'
+ 'a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub'
+ '|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)'
+ '\\b)\\w+(?!:|[^\\w\\s@]*@)\\b')
.getRegex(),
def: /^ *\[([^\]]+)\]: *<?([^\s>]+)>?(?: +(["(][^\n]+[")]))? *(?:\n+|$)/,
heading: /^(#{1,6})(.*)(?:\n+|$)/,
fences: noopTest, // fences not supported
paragraph: edit(block.normal._paragraph)
.replace('hr', block.hr)
.replace('heading', ' *#{1,6} *[^\n]')
.replace('lheading', block.lheading)
.replace('blockquote', ' {0,3}>')
.replace('|fences', '')
.replace('|list', '')
.replace('|html', '')
.getRegex()
});
/**
* Inline-Level Grammar
*/
export const inline = {
escape: /^\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/,
autolink: /^<(scheme:[^\s\x00-\x1f<>]*|email)>/,
url: noopTest,
tag: '^comment'
+ '|^</[a-zA-Z][\\w:-]*\\s*>' // self-closing tag
+ '|^<[a-zA-Z][\\w-]*(?:attribute)*?\\s*/?>' // open tag
+ '|^<\\?[\\s\\S]*?\\?>' // processing instruction, e.g. <?php ?>
+ '|^<![a-zA-Z]+\\s[\\s\\S]*?>' // declaration, e.g. <!DOCTYPE html>
+ '|^<!\\[CDATA\\[[\\s\\S]*?\\]\\]>', // CDATA section
link: /^!?\[(label)\]\(\s*(href)(?:\s+(title))?\s*\)/,
reflink: /^!?\[(label)\]\[(ref)\]/,
nolink: /^!?\[(ref)\](?:\[\])?/,
reflinkSearch: 'reflink|nolink(?!\\()',
emStrong: {
lDelim: /^(?:\*+(?:([punct_])|[^\s*]))|^_+(?:([punct*])|([^\s_]))/,
// (1) and (2) can only be a Right Delimiter. (3) and (4) can only be Left. (5) and (6) can be either Left or Right.
// () Skip orphan inside strong () Consume to delim (1) #*** (2) a***#, a*** (3) #***a, ***a (4) ***# (5) #***# (6) a***a
rDelimAst: /^[^_*]*?\_\_[^_*]*?\*[^_*]*?(?=\_\_)|[^*]+(?=[^*])|[punct_](\*+)(?=[\s]|$)|[^punct*_\s](\*+)(?=[punct_\s]|$)|[punct_\s](\*+)(?=[^punct*_\s])|[\s](\*+)(?=[punct_])|[punct_](\*+)(?=[punct_])|[^punct*_\s](\*+)(?=[^punct*_\s])/,
rDelimUnd: /^[^_*]*?\*\*[^_*]*?\_[^_*]*?(?=\*\*)|[^_]+(?=[^_])|[punct*](\_+)(?=[\s]|$)|[^punct*_\s](\_+)(?=[punct*\s]|$)|[punct*\s](\_+)(?=[^punct*_\s])|[\s](\_+)(?=[punct*])|[punct*](\_+)(?=[punct*])/ // ^- Not allowed for _
},
code: /^(`+)([^`]|[^`][\s\S]*?[^`])\1(?!`)/,
br: /^( {2,}|\\)\n(?!\s*$)/,
del: noopTest,
text: /^(`+|[^`])(?:(?= {2,}\n)|[\s\S]*?(?:(?=[\\<!\[`*_]|\b_|$)|[^ ](?= {2,}\n)))/,
punctuation: /^([\spunctuation])/
};
// list of punctuation marks from CommonMark spec
// without * and _ to handle the different emphasis markers * and _
inline._punctuation = '!"#$%&\'()+\\-.,/:;<=>?@\\[\\]`^{|}~';
inline.punctuation = edit(inline.punctuation).replace(/punctuation/g, inline._punctuation).getRegex();
// sequences em should skip over [title](link), `code`, <html>
inline.blockSkip = /\[[^\]]*?\]\([^\)]*?\)|`[^`]*?`|<[^>]*?>/g;
inline.escapedEmSt = /\\\*|\\_/g;
inline._comment = edit(block._comment).replace('(?:-->|$)', '-->').getRegex();
inline.emStrong.lDelim = edit(inline.emStrong.lDelim)
.replace(/punct/g, inline._punctuation)
.getRegex();
inline.emStrong.rDelimAst = edit(inline.emStrong.rDelimAst, 'g')
.replace(/punct/g, inline._punctuation)
.getRegex();
inline.emStrong.rDelimUnd = edit(inline.emStrong.rDelimUnd, 'g')
.replace(/punct/g, inline._punctuation)
.getRegex();
inline._escapes = /\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/g;
inline._scheme = /[a-zA-Z][a-zA-Z0-9+.-]{1,31}/;
inline._email = /[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/;
inline.autolink = edit(inline.autolink)
.replace('scheme', inline._scheme)
.replace('email', inline._email)
.getRegex();
inline._attribute = /\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/;
inline.tag = edit(inline.tag)
.replace('comment', inline._comment)
.replace('attribute', inline._attribute)
.getRegex();
inline._label = /(?:\[(?:\\.|[^\[\]\\])*\]|\\.|`[^`]*`|[^\[\]\\`])*?/;
inline._href = /<(?:\\.|[^\n<>\\])+>|[^\s\x00-\x1f]*/;
inline._title = /"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/;
inline.link = edit(inline.link)
.replace('label', inline._label)
.replace('href', inline._href)
.replace('title', inline._title)
.getRegex();
inline.reflink = edit(inline.reflink)
.replace('label', inline._label)
.replace('ref', block._label)
.getRegex();
inline.nolink = edit(inline.nolink)
.replace('ref', block._label)
.getRegex();
inline.reflinkSearch = edit(inline.reflinkSearch, 'g')
.replace('reflink', inline.reflink)
.replace('nolink', inline.nolink)
.getRegex();
/**
* Normal Inline Grammar
*/
inline.normal = merge({}, inline);
/**
* Pedantic Inline Grammar
*/
inline.pedantic = merge({}, inline.normal, {
strong: {
start: /^__|\*\*/,
middle: /^__(?=\S)([\s\S]*?\S)__(?!_)|^\*\*(?=\S)([\s\S]*?\S)\*\*(?!\*)/,
endAst: /\*\*(?!\*)/g,
endUnd: /__(?!_)/g
},
em: {
start: /^_|\*/,
middle: /^()\*(?=\S)([\s\S]*?\S)\*(?!\*)|^_(?=\S)([\s\S]*?\S)_(?!_)/,
endAst: /\*(?!\*)/g,
endUnd: /_(?!_)/g
},
link: edit(/^!?\[(label)\]\((.*?)\)/)
.replace('label', inline._label)
.getRegex(),
reflink: edit(/^!?\[(label)\]\s*\[([^\]]*)\]/)
.replace('label', inline._label)
.getRegex()
});
/**
* GFM Inline Grammar
*/
inline.gfm = merge({}, inline.normal, {
escape: edit(inline.escape).replace('])', '~|])').getRegex(),
_extended_email: /[A-Za-z0-9._+-]+(@)[a-zA-Z0-9-_]+(?:\.[a-zA-Z0-9-_]*[a-zA-Z0-9])+(?![-_])/,
url: /^((?:ftp|https?):\/\/|www\.)(?:[a-zA-Z0-9\-]+\.?)+[^\s<]*|^email/,
_backpedal: /(?:[^?!.,:;*_~()&]+|\([^)]*\)|&(?![a-zA-Z0-9]+;$)|[?!.,:;*_~)]+(?!$))+/,
del: /^(~~?)(?=[^\s~])([\s\S]*?[^\s~])\1(?=[^~]|$)/,
text: /^([`~]+|[^`~])(?:(?= {2,}\n)|(?=[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@)|[\s\S]*?(?:(?=[\\<!\[`*~_]|\b_|https?:\/\/|ftp:\/\/|www\.|$)|[^ ](?= {2,}\n)|[^a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-](?=[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@)))/
});
inline.gfm.url = edit(inline.gfm.url, 'i')
.replace('email', inline.gfm._extended_email)
.getRegex();
/**
* GFM + Line Breaks Inline Grammar
*/
inline.breaks = merge({}, inline.gfm, {
br: edit(inline.br).replace('{2,}', '*').getRegex(),
text: edit(inline.gfm.text)
.replace('\\b_', '\\b_| {2,}\\n')
.replace(/\{2,\}/g, '*')
.getRegex()
});

19
package-lock.json generated
View file

@ -10,7 +10,8 @@
"license": "GPL-3.0-only",
"dependencies": {
"ejs": "^3.1.8",
"express": "^4.18.1"
"express": "^4.18.1",
"marked": "^4.0.17"
}
},
"node_modules/accepts": {
@ -444,6 +445,17 @@
"node": ">=10"
}
},
"node_modules/marked": {
"version": "4.0.17",
"resolved": "https://registry.npmjs.org/marked/-/marked-4.0.17.tgz",
"integrity": "sha512-Wfk0ATOK5iPxM4ptrORkFemqroz0ZDxp5MWfYA7H/F+wO17NRWV5Ypxi6p3g2Xmw2bKeiYOl6oVnLHKxBA0VhA==",
"bin": {
"marked": "bin/marked.js"
},
"engines": {
"node": ">= 12"
}
},
"node_modules/media-typer": {
"version": "0.3.0",
"resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
@ -1080,6 +1092,11 @@
"minimatch": "^3.0.4"
}
},
"marked": {
"version": "4.0.17",
"resolved": "https://registry.npmjs.org/marked/-/marked-4.0.17.tgz",
"integrity": "sha512-Wfk0ATOK5iPxM4ptrORkFemqroz0ZDxp5MWfYA7H/F+wO17NRWV5Ypxi6p3g2Xmw2bKeiYOl6oVnLHKxBA0VhA=="
},
"media-typer": {
"version": "0.3.0",
"resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",

View file

@ -19,6 +19,7 @@
"license": "GPL-3.0-only",
"dependencies": {
"ejs": "^3.1.8",
"express": "^4.18.1"
"express": "^4.18.1",
"marked": "^4.0.17"
}
}