From 8164e1de65d0727b323d036432036eb5dc1a7489 Mon Sep 17 00:00:00 2001
From: Filip Pajic <42151917+pajicf@users.noreply.github.com>
Date: Fri, 19 Apr 2024 14:24:43 +0200
Subject: [PATCH] chore: Markdown fetching script refactor (#1)
* Added .env.example
* env config loading refactored
* utility file system functions for reading and writing
* vac to docusaurus converter functions extracted
* github api service extracted
* refactor the scrapping script
* removed unused imports
* updated directories to sync
---
.env.example | 1 +
README.md | 7 +-
fetch-content.js | 270 --------------------------------
package.json | 6 +-
scrapper/config.mjs | 13 ++
scrapper/fetch-content.mjs | 57 +++++++
scrapper/file.mjs | 62 ++++++++
scrapper/github.mjs | 13 ++
scrapper/main.mjs | 19 +++
scrapper/markdown-convertor.mjs | 148 +++++++++++++++++
yarn.lock | 10 ++
11 files changed, 330 insertions(+), 276 deletions(-)
create mode 100644 .env.example
delete mode 100644 fetch-content.js
create mode 100644 scrapper/config.mjs
create mode 100644 scrapper/fetch-content.mjs
create mode 100644 scrapper/file.mjs
create mode 100644 scrapper/github.mjs
create mode 100644 scrapper/main.mjs
create mode 100644 scrapper/markdown-convertor.mjs
diff --git a/.env.example b/.env.example
new file mode 100644
index 0000000..0c6a33d
--- /dev/null
+++ b/.env.example
@@ -0,0 +1 @@
+GITHUB_TOKEN=
\ No newline at end of file
diff --git a/README.md b/README.md
index 16603bf..b9b99fb 100644
--- a/README.md
+++ b/README.md
@@ -19,13 +19,12 @@ $ git clone https://github.com/acid-info/logos-docs-template.git
$ yarn install
```
-3. Create .env and run fetch-content.js
+3. Create .env and run the scraping script
+and
```bash
-GITHUB_TOKEN=
+$ yarn scrape
```
-and `node fetch-content.js`
-
4. Start the website:
```bash
diff --git a/fetch-content.js b/fetch-content.js
deleted file mode 100644
index 553cc01..0000000
--- a/fetch-content.js
+++ /dev/null
@@ -1,270 +0,0 @@
-const https = require('https')
-const fs = require('fs')
-const path = require('path')
-
-function loadEnv() {
- const envPath = path.resolve(process.cwd(), '.env')
-
- try {
- const data = fs.readFileSync(envPath, 'utf8')
-
- data.split('\n').forEach(line => {
- line = line.trim()
-
- if (line && !line.startsWith('#')) {
- const [key, value] = line.split('=')
- process.env[key.trim()] = value.trim()
- }
- })
- } catch (err) {
- console.error('Error loading .env file', err)
- }
-}
-
-loadEnv()
-
-async function fetchFromGitHub(url, callback) {
- https
- .get(
- url,
- {
- headers: {
- 'User-Agent': 'Node.js',
- // NOTE: Create .env file and add GITHUB_TOKEN=your_token
- Authorization: `token ${process.env.GITHUB_TOKEN}`,
- },
- },
- res => {
- let data = ''
-
- res.on('data', chunk => {
- data += chunk
- })
-
- res.on('end', () => {
- const parsedData = JSON.parse(data)
- // console.log('parsedData:', parsedData)
- callback(null, parsedData)
- })
- },
- )
- .on('error', err => {
- callback(err, null)
- })
-}
-
-async function fetchDirectoryContents(dirUrl, basePath, prefixToRemove) {
- fetchFromGitHub(dirUrl, async (err, files) => {
- if (err) {
- console.error('Error fetching files:', err.message)
- return
- }
-
- if (!files) {
- console.log('No files found', files)
- return
- }
-
- for (const file of files) {
- const relativePath = file.path.replace(
- new RegExp(`^${prefixToRemove}`),
- '',
- )
-
- function adjustPathForMarkdown(filePath) {
- const parts = filePath.split('/')
-
- if (parts?.length === 1) return filePath
- if (filePath.includes('README.md')) return filePath
-
- if (parts[parts.length - 1].endsWith('.md')) {
- parts.splice(parts.length - 2, 1)
- }
-
- return parts.join('/')
- }
-
- const filePath = path.join(basePath, adjustPathForMarkdown(relativePath))
-
- if (file.type === 'file') {
- await downloadAndSaveFile(file.download_url, filePath)
- } else if (file.type === 'dir') {
- await fetchDirectoryContents(file.url, basePath, prefixToRemove)
- }
- }
- })
-}
-
-function enhanceMarkdownWithBulletPointsCorrected(input) {
- // Split the input text into lines
- const lines = input.split('\n')
- // Initialize an array to hold the extracted fields
- let extractedFields = []
- // Initialize variables to track the frontmatter and contributors section
- let inFrontMatter = false
- let inContributors = false
- let contributorsLines = [] // Holds contributors lines
-
- // Process each line
- const outputLines = lines.map(line => {
- if (line.trim() === '---') {
- inFrontMatter = !inFrontMatter
- if (!inFrontMatter && contributorsLines.length) {
- // We're exiting frontmatter; time to add contributors
- extractedFields.push(`contributors:\n${contributorsLines.join('\n')}`)
- contributorsLines = [] // Reset for safety
- }
- return line // Keep the frontmatter delimiters
- }
-
- if (inFrontMatter) {
- if (line.startsWith('contributors:')) {
- inContributors = true // Entering contributors section
- } else if (inContributors) {
- if (line.startsWith(' -')) {
- contributorsLines.push(line.trim()) // Add contributors line
- } else {
- // Exiting contributors section
- inContributors = false
- extractedFields.push(`contributors:\n${contributorsLines.join('\n')}`)
- contributorsLines = [] // Reset
- }
- } else {
- const match = line.match(/(status|category|editor):(.*)/)
- if (match) {
- extractedFields.push(line.trim())
- }
- }
- }
-
- return line // Return the line unmodified
- })
-
- // Find the index of the second frontmatter delimiter
- const endOfFrontMatterIndex = outputLines.findIndex(
- (line, index) => line.trim() === '---' && index > 0,
- )
-
- // Insert the extracted fields as capitalized bullet points after the frontmatter
- const bulletPoints = extractedFields
- .map(field => {
- // Capitalize the first letter of the label and ensure proper formatting for multi-line fields
- if (field.includes('\n')) {
- const [label, ...values] = field.split('\n')
- return `- ${label.charAt(0).toUpperCase() +
- label.slice(1)}:\n ${values.join('\n ')}`
- } else {
- return `- ${field.charAt(0).toUpperCase() + field.slice(1)}`
- }
- })
- .join('\n')
- outputLines.splice(endOfFrontMatterIndex + 1, 0, bulletPoints)
-
- // Join the lines back into a single string and return
- return outputLines.join('\n')
-}
-
-function parseSlugFromFrontmatter(content) {
- const frontmatterMatch = content.match(/---\s*\n([\s\S]*?)\n---/)
- if (frontmatterMatch) {
- const frontmatterContent = frontmatterMatch[1]
-
- function extractNumberFromTitle(content) {
- const parts = content.split('/')
- return parseInt(parts[0].split(' ')[1], 10)
- }
-
- const number = extractNumberFromTitle(frontmatterContent)
- return number
- }
- return 1 // Return null if not found
-}
-
-function unescapeHtmlComments(htmlString) {
- return htmlString.replace(/\\<\!--/g, '\n\n')
-}
-
-async function downloadAndSaveFile(url, filePath) {
- https
- .get(url, res => {
- let content = ''
- res.on('data', chunk => {
- content += chunk
- })
-
- res.on('end', () => {
- const fullFilePath = path.join(__dirname, filePath)
- const directory = path.dirname(fullFilePath)
- fs.mkdirSync(directory, { recursive: true })
-
- const fileExtension = path.extname(filePath)
-
- function updateMarkdownImagePath(content, number) {
- const regex = /(!\[.*?\]\(\.\/)images/g
-
- return content.replace(regex, `$1${number}/images`)
- }
-
- if (fileExtension === '.md' || fileExtension === '.mdx') {
- // Remove 'tags' line from frontmatter because the format is wrong
- content = content.replace(/tags:.*\n?/, '')
-
- // Replace
with
- content = content.replace(/
/g, '
')
-
- // Escape < and > with \< and \>, respectively
- // Be cautious with this replacement; adjust as needed based on your context
- content = content.replace(//g, '\\>')
-
- // NEW: Remove 'slug' line from frontmatter
- content = content.replace(/^slug:.*\n?/m, '')
-
- // Replace empty Markdown links with placeholder URL
- content = content.replace(/\[([^\]]+)\]\(\)/g, '[$1](#)')
-
- content = unescapeHtmlComments(content)
-
- // // parse sidebarPosition from the slug in the frontmatter
- const sidebarPosition = parseSlugFromFrontmatter(content) || 1
-
- content = enhanceMarkdownWithBulletPointsCorrected(content)
-
- content = updateMarkdownImagePath(content, sidebarPosition)
-
- // Insert sidebar_position at the end of frontmatter if it doesn't exist
- if (
- /^---\s*[\s\S]+?---/.test(content) &&
- !/sidebar_position: \d+/.test(content)
- ) {
- content = content.replace(
- /^---\s*([\s\S]+?)---/,
- `---\n$1sidebar_position: ${sidebarPosition}\n---`,
- )
- }
- }
-
- fs.writeFile(fullFilePath, content, err => {
- if (err) {
- // console.error('Error saving file:', err.message)
- return
- }
- // console.log('Downloaded and saved:', filePath)
- })
- })
- })
- .on('error', err => {
- console.error('Error downloading file:', err.message)
- })
-}
-
-const directoriesToSync = ['codex', 'nomos', 'status', 'vac', 'waku']
-
-directoriesToSync.forEach(dirName => {
- const baseUrl = `https://api.github.com/repos/vacp2p/rfc-index/contents/${dirName}`
- const baseSavePath = `./${dirName}/`
- const prefixToRemove = dirName + '/'
-
- fetchDirectoryContents(baseUrl, baseSavePath, prefixToRemove).then(() => {
- // console.log(`Synced ${dirName}`)
- })
-})
diff --git a/package.json b/package.json
index 6498a9f..fbebaf4 100644
--- a/package.json
+++ b/package.json
@@ -12,7 +12,8 @@
"serve": "docusaurus serve",
"write-translations": "docusaurus write-translations",
"write-heading-ids": "docusaurus write-heading-ids",
- "typecheck": "tsc"
+ "typecheck": "tsc",
+ "scrape": "node ./scrapper/main.mjs"
},
"dependencies": {
"@acid-info/logos-docusaurus-preset": "^1.0.0-alpha.14",
@@ -23,7 +24,8 @@
"@emotion/styled": "^11.11.0",
"@mdx-js/react": "^1.6.22",
"clsx": "^1.2.1",
- "dotenv": "^16.0.3",
+ "dotenv": "^16.4.5",
+ "mkdirp": "^3.0.1",
"prism-react-renderer": "^1.3.5",
"react": "^17.0.2",
"react-dom": "^17.0.2",
diff --git a/scrapper/config.mjs b/scrapper/config.mjs
new file mode 100644
index 0000000..4631064
--- /dev/null
+++ b/scrapper/config.mjs
@@ -0,0 +1,13 @@
+import 'dotenv/config';
+
+const {
+ GITHUB_TOKEN
+} = process.env;
+
+if (!GITHUB_TOKEN) {
+ throw new Error("Please provide the GITHUB_TOKEN")
+}
+
+export {
+ GITHUB_TOKEN
+}
\ No newline at end of file
diff --git a/scrapper/fetch-content.mjs b/scrapper/fetch-content.mjs
new file mode 100644
index 0000000..985b142
--- /dev/null
+++ b/scrapper/fetch-content.mjs
@@ -0,0 +1,57 @@
+import path from "path";
+
+import { fetchFromGitHub } from "./github.mjs";
+import axios from 'axios'
+
+import { createDirectory, readFile, writeFile, writeLargeFile } from './file.mjs'
+import { adjustPathForMarkdown, vacMarkdownToDocusaurusMarkdown } from './markdown-convertor.mjs'
+
+async function downloadFile(url, fullFilePath) {
+ const request = await axios.get(url, {
+ responseType: "stream"
+ });
+
+ const directory = path.dirname(fullFilePath)
+ await createDirectory(directory)
+ await writeLargeFile(fullFilePath, request.data)
+}
+
+async function downloadAndModifyFile(url, filePath) {
+ const fullFilePath = path.join(process.cwd(), filePath)
+
+ await downloadFile(url, fullFilePath);
+
+ const fileExtension = path.extname(filePath)
+ if (fileExtension === '.md' || fileExtension === '.mdx') {
+ const fileBuffer = await readFile(fullFilePath);
+ const fileContent = fileBuffer.toString();
+ const convertedFileContent = vacMarkdownToDocusaurusMarkdown(fileContent, filePath);
+
+ await writeFile(fullFilePath, convertedFileContent);
+ }
+}
+
+export async function fetchDirectoryContents(dirUrl, basePath, prefixToRemove) {
+ try {
+ const files = await fetchFromGitHub(dirUrl);
+
+ if (!files) {
+ console.log('No files found', files)
+ return
+ }
+
+ for (const file of files) {
+ const prefixRemovalRegex = new RegExp(`^${prefixToRemove}`)
+ const relativePath = file.path.replace(prefixRemovalRegex, '')
+ const filePath = path.join(basePath, adjustPathForMarkdown(relativePath))
+
+ if (file.type === 'file') {
+ await downloadAndModifyFile(file.download_url, filePath)
+ } else if (file.type === 'dir') {
+ await fetchDirectoryContents(file.url, basePath, prefixToRemove)
+ }
+ }
+ } catch (e) {
+ console.error('Error fetching files:', e)
+ }
+}
\ No newline at end of file
diff --git a/scrapper/file.mjs b/scrapper/file.mjs
new file mode 100644
index 0000000..3f11b64
--- /dev/null
+++ b/scrapper/file.mjs
@@ -0,0 +1,62 @@
+import { mkdirp } from 'mkdirp'
+import fs from 'fs'
+import util from 'util'
+import stream from 'stream'
+
+export function readFile(path) {
+ return new Promise((resolve, reject) => {
+ fs.readFile(path, (err, data) => {
+ if (err) {
+ reject(err);
+ }
+
+ resolve(data);
+ })
+ });
+}
+
+export function writeFile(path, content) {
+ return new Promise((resolve, reject) => {
+ fs.writeFile(path, content, err => {
+ if (err) {
+ reject(err);
+ }
+
+ resolve();
+ })
+ })
+}
+
+export async function writeLargeFile(path, data) {
+ const pipeline = util.promisify(stream.pipeline)
+
+ // We need to handle backpressuring to not corrupt larger files, https://nodejs.org/en/learn/modules/backpressuring-in-streams
+ return pipeline(data, fs.createWriteStream(path))
+}
+
+export function removeDirectory(path) {
+ return new Promise((resolve, reject) => {
+ fs.rmdir(path, {recursive: true}, err => {
+ if (err) {
+ reject(err)
+ }
+
+ resolve();
+ })
+ })
+}
+
+export async function createDirectory(path) {
+ try {
+ /*
+ On Windows file systems, attempts to create a root directory (ie, a drive letter or root UNC path) will fail. If the root directory exists, then it will fail with EPERM. If the root directory does not exist, then it will fail with ENOENT.
+ On posix file systems, attempts to create a root directory (in recursive mode) will succeed silently, as it is treated like just another directory that already exists. (In non-recursive mode, of course, it fails with EEXIST.)
+ In order to preserve this system-specific behavior (and because it's not as if we can create the parent of a root directory anyway), attempts to create a root directory are passed directly to the fs implementation, and any errors encountered are not handled.
+
+ That's why we're using the next library
+ */
+ return await mkdirp(path)
+ } catch (error) {
+ throw error;
+ }
+}
\ No newline at end of file
diff --git a/scrapper/github.mjs b/scrapper/github.mjs
new file mode 100644
index 0000000..84be9ce
--- /dev/null
+++ b/scrapper/github.mjs
@@ -0,0 +1,13 @@
+import { GITHUB_TOKEN } from './config.mjs'
+import axios from "axios";
+
+export async function fetchFromGitHub(url, callback) {
+ const response = await axios.get(url, {
+ headers: {
+ 'User-Agent': 'Node.js',
+ 'Authorization': `token ${GITHUB_TOKEN}`
+ }
+ });
+
+ return response.data;
+}
\ No newline at end of file
diff --git a/scrapper/main.mjs b/scrapper/main.mjs
new file mode 100644
index 0000000..580e00a
--- /dev/null
+++ b/scrapper/main.mjs
@@ -0,0 +1,19 @@
+import { fetchDirectoryContents } from './fetch-content.mjs'
+
+const directoriesToSync = ['codex', 'nomos', 'status', 'vac', 'waku']
+
+async function main() {
+ for (let i = 0; i < directoriesToSync.length; i++) {
+ const dirName = directoriesToSync[i];
+
+ const baseUrl = `https://api.github.com/repos/vacp2p/rfc-index/contents/${dirName}`
+
+ const baseSavePath = `./${dirName}/`
+ const prefixToRemove = dirName + '/'
+
+ await fetchDirectoryContents(baseUrl, baseSavePath, prefixToRemove)
+ console.log(`Synced ${dirName}`)
+ }
+}
+
+main();
\ No newline at end of file
diff --git a/scrapper/markdown-convertor.mjs b/scrapper/markdown-convertor.mjs
new file mode 100644
index 0000000..31b8c98
--- /dev/null
+++ b/scrapper/markdown-convertor.mjs
@@ -0,0 +1,148 @@
+function enhanceMarkdownWithBulletPointsCorrected(input) {
+ // Split the input text into lines
+ const lines = input.split('\n')
+ // Initialize an array to hold the extracted fields
+ let extractedFields = []
+ // Initialize variables to track the frontmatter and contributors section
+ let inFrontMatter = false
+ let inContributors = false
+ let contributorsLines = [] // Holds contributors lines
+
+ // Process each line
+ const outputLines = lines.map(line => {
+ if (line.trim() === '---') {
+ inFrontMatter = !inFrontMatter
+ if (!inFrontMatter && contributorsLines.length) {
+ // We're exiting frontmatter; time to add contributors
+ extractedFields.push(`contributors:\n${contributorsLines.join('\n')}`)
+ contributorsLines = [] // Reset for safety
+ }
+ return line // Keep the frontmatter delimiters
+ }
+
+ if (inFrontMatter) {
+ if (line.startsWith('contributors:')) {
+ inContributors = true // Entering contributors section
+ } else if (inContributors) {
+ if (line.startsWith(' -')) {
+ contributorsLines.push(line.trim()) // Add contributors line
+ } else {
+ // Exiting contributors section
+ inContributors = false
+ extractedFields.push(`contributors:\n${contributorsLines.join('\n')}`)
+ contributorsLines = [] // Reset
+ }
+ } else {
+ const match = line.match(/(status|category|editor):(.*)/)
+ if (match) {
+ extractedFields.push(line.trim())
+ }
+ }
+ }
+
+ return line // Return the line unmodified
+ })
+
+ // Find the index of the second frontmatter delimiter
+ const endOfFrontMatterIndex = outputLines.findIndex(
+ (line, index) => line.trim() === '---' && index > 0,
+ )
+
+ // Insert the extracted fields as capitalized bullet points after the frontmatter
+ const bulletPoints = extractedFields
+ .map(field => {
+ // Capitalize the first letter of the label and ensure proper formatting for multi-line fields
+ if (field.includes('\n')) {
+ const [label, ...values] = field.split('\n')
+ return `- ${label.charAt(0).toUpperCase() +
+ label.slice(1)}:\n ${values.join('\n ')}`
+ } else {
+ return `- ${field.charAt(0).toUpperCase() + field.slice(1)}`
+ }
+ })
+ .join('\n')
+ outputLines.splice(endOfFrontMatterIndex + 1, 0, bulletPoints)
+
+ // Join the lines back into a single string and return
+ return outputLines.join('\n')
+}
+
+function parseSlugFromFrontmatter(content) {
+ const frontmatterMatch = content.match(/---\s*\n([\s\S]*?)\n---/)
+ if (frontmatterMatch) {
+ const frontmatterContent = frontmatterMatch[1]
+
+ function extractNumberFromTitle(content) {
+ const parts = content.split('/')
+ return parseInt(parts[0].split(' ')[1], 10)
+ }
+
+ return extractNumberFromTitle(frontmatterContent)
+ }
+ return 1 // Return null if not found
+}
+
+function unescapeHtmlComments(htmlString) {
+ return htmlString.replace(/\\<\!--/g, '\n\n')
+}
+
+function updateMarkdownImagePath(content, number) {
+ const regex = /(!\[.*?\]\(\.\/)images/g
+
+ return content.replace(regex, `$1${number}/images`)
+}
+
+export function vacMarkdownToDocusaurusMarkdown(fileContent) {
+ let convertedContent = fileContent;
+
+ // Remove 'tags' line from frontmatter because the format is wrong
+ convertedContent = convertedContent.replace(/tags:.*\n?/, '')
+
+ // Replace
with
+ convertedContent = convertedContent.replace(/
/g, '
')
+
+ // Escape < and > with \< and \>, respectively
+ // Be cautious with this replacement; adjust as needed based on your context
+ convertedContent = convertedContent.replace(//g, '\\>')
+
+ // NEW: Remove 'slug' line from frontmatter
+ convertedContent = convertedContent.replace(/^slug:.*\n?/m, '')
+
+ // Replace empty Markdown links with placeholder URL
+ convertedContent = convertedContent.replace(/\[([^\]]+)\]\(\)/g, '[$1](#)')
+
+ convertedContent = unescapeHtmlComments(convertedContent)
+
+ // // parse sidebarPosition from the slug in the frontmatter
+ const sidebarPosition = parseSlugFromFrontmatter(convertedContent) || 1
+
+ convertedContent = enhanceMarkdownWithBulletPointsCorrected(convertedContent)
+
+ convertedContent = updateMarkdownImagePath(convertedContent, sidebarPosition)
+
+ // Insert sidebar_position at the end of frontmatter if it doesn't exist
+ if (
+ /^---\s*[\s\S]+?---/.test(convertedContent) &&
+ !/sidebar_position: \d+/.test(convertedContent)
+ ) {
+ convertedContent = convertedContent.replace(
+ /^---\s*([\s\S]+?)---/,
+ `---\n$1sidebar_position: ${sidebarPosition}\n---`,
+ )
+ }
+
+ return convertedContent;
+}
+
+export function adjustPathForMarkdown(filePath) {
+ const parts = filePath.split('/')
+
+ if (parts?.length === 1) return filePath
+ if (filePath.includes('README.md')) return filePath
+
+ if (parts[parts.length - 1].endsWith('.md')) {
+ parts.splice(parts.length - 2, 1)
+ }
+
+ return parts.join('/')
+}
\ No newline at end of file
diff --git a/yarn.lock b/yarn.lock
index d6aae1a..ca8ca3e 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -6765,6 +6765,11 @@ dotenv@^16.0.3:
resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-16.0.3.tgz#115aec42bac5053db3c456db30cc243a5a836a07"
integrity sha512-7GO6HghkA5fYG9TYnNxi14/7K9f5occMlp3zXAuSxn7CKCxt9xbNWG7yF8hTCSUchlfWSe3uLmlPfigevRItzQ==
+dotenv@^16.4.5:
+ version "16.4.5"
+ resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-16.4.5.tgz#cdd3b3b604cb327e286b4762e13502f717cb099f"
+ integrity sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==
+
draco3d@^1.4.1:
version "1.5.6"
resolved "https://registry.yarnpkg.com/draco3d/-/draco3d-1.5.6.tgz#0d570a9792e3a3a9fafbfea065b692940441c626"
@@ -10354,6 +10359,11 @@ mkdirp@0.x, mkdirp@^0.5.1:
dependencies:
minimist "^1.2.6"
+mkdirp@^3.0.1:
+ version "3.0.1"
+ resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-3.0.1.tgz#e44e4c5607fb279c168241713cc6e0fea9adcb50"
+ integrity sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg==
+
mmd-parser@^1.0.4:
version "1.0.4"
resolved "https://registry.yarnpkg.com/mmd-parser/-/mmd-parser-1.0.4.tgz#87cc05782cb5974ca854f0303fc5147bc9d690e7"