From af599402d0155c2c34334035b191ed3edbb21517 Mon Sep 17 00:00:00 2001 From: Andrea Maria Piana Date: Thu, 7 Nov 2019 09:56:45 +0100 Subject: [PATCH] Stripped down version of markdown --- ast/node.go | 182 ++++- ast/print.go | 17 +- block_test.go | 223 ------- cmd/printast/response.txt | 310 +++++++++ esc_test.go | 50 -- go.mod | 8 +- go.sum | 21 + helpers_test.go | 79 +-- html/callouts.go | 42 -- html/callouts_test.go | 27 - html/doc.go | 43 -- html/esc.go | 50 -- html/renderer.go | 1318 ------------------------------------- html/smartypants.go | 444 ------------- html_renderer_test.go | 56 -- inline_test.go | 334 ++-------- main/main.go | 21 + markdown.go | 19 - markdown_test.go | 8 +- mmark_test.go | 17 - parser/block.go | 581 +--------------- parser/inline.go | 140 ++-- parser/parser.go | 23 +- ref_test.go | 168 ----- s/fuzz.sh | 2 +- testdata/emphasis.test | 34 +- testdata/status_tag.test | 44 ++ 27 files changed, 746 insertions(+), 3515 deletions(-) delete mode 100644 block_test.go create mode 100644 cmd/printast/response.txt delete mode 100644 esc_test.go create mode 100644 go.sum delete mode 100644 html/callouts.go delete mode 100644 html/callouts_test.go delete mode 100644 html/doc.go delete mode 100644 html/esc.go delete mode 100644 html/renderer.go delete mode 100644 html/smartypants.go delete mode 100644 html_renderer_test.go create mode 100644 main/main.go delete mode 100644 ref_test.go create mode 100644 testdata/status_tag.test diff --git a/ast/node.go b/ast/node.go index e6fcba9..a6f0327 100644 --- a/ast/node.go +++ b/ast/node.go @@ -1,5 +1,7 @@ package ast +import "encoding/json" + // ListType contains bitwise or'ed flags for list and list item objects. type ListType int @@ -74,7 +76,7 @@ type Node interface { // Container is a type of node that can contain children type Container struct { - Parent Node + Parent Node `json:"-"` Children []Node Literal []byte // Text contents of the leaf nodes @@ -83,6 +85,21 @@ type Container struct { *Attribute // Block level attribute } +func (c *Container) MarshalJSON() ([]byte, error) { + type ContainerJSON struct { + Children []Node `json:"children"` + Literal string `json:"literal"` + *Attribute + } + var c1 ContainerJSON + c1.Children = c.Children + c1.Literal = string(c.Literal) + c1.Attribute = c.Attribute + + return json.Marshal(&c1) + +} + // AsContainer returns itself as *Container func (c *Container) AsContainer() *Container { return c @@ -115,7 +132,7 @@ func (c *Container) SetChildren(newChildren []Node) { // Leaf is a type of node that cannot have children type Leaf struct { - Parent Node + Parent Node `json:"-"` Literal []byte // Text contents of the leaf nodes Content []byte // Markdown content of the block nodes @@ -123,6 +140,16 @@ type Leaf struct { *Attribute // Block level attribute } +func (c *Leaf) MarshalJSON() ([]byte, error) { + type LeafJSON struct { + Literal string `json:"literal"` + } + var c1 LeafJSON + c1.Literal = string(c.Literal) + + return json.Marshal(&c1) +} + // AsContainer returns nil func (l *Leaf) AsContainer() *Container { return nil @@ -158,6 +185,14 @@ type Document struct { Container } +func (doc *Document) MarshalJSON() ([]byte, error) { + children := doc.GetChildren() + if len(children) != 0 { + return json.Marshal(children) + } + return []byte("[]"), nil +} + // DocumentMatter represents markdown node that signals a document // division: frontmatter, mainmatter or backmatter. type DocumentMatter struct { @@ -171,6 +206,21 @@ type BlockQuote struct { Container } +func (c *BlockQuote) MarshalJSON() ([]byte, error) { + type BlockQuoteJSON struct { + Type string `json:"type"` + Children []Node `json:"children"` + Literal string `json:"literal"` + *Attribute + } + var c1 BlockQuoteJSON + c1.Children = c.Children + c1.Literal = string(c.Literal) + c1.Type = "blockquote" + + return json.Marshal(&c1) +} + // Aside represents an markdown aside node. type Aside struct { Container @@ -206,6 +256,17 @@ type Paragraph struct { Container } +func (c *Paragraph) MarshalJSON() ([]byte, error) { + type ParagraphJSON struct { + Type string `json:"type"` + Children []Node `json:"children"` + } + var c1 ParagraphJSON + c1.Children = c.Children + c1.Type = "paragraph" + return json.Marshal(&c1) +} + // Math represents markdown MathAjax inline node type Math struct { Leaf @@ -226,6 +287,28 @@ type Heading struct { IsSpecial bool // We are a special heading (starts with .#) } +func (c *Heading) MarshalJSON() ([]byte, error) { + type HeadingJSON struct { + Type string `json:"type"` + Children []Node `json:"children"` + Literal string `json:"literal"` + Level int `json:"level"` + IsTitleblock bool `json:"isTitleBlock"` + + *Attribute + } + var c1 HeadingJSON + c1.Children = c.Children + c1.Literal = string(c.Literal) + c1.Attribute = c.Attribute + c1.Type = "heading" + c1.Level = c.Level + c1.IsTitleblock = c.IsTitleblock + + return json.Marshal(&c1) + +} + // HorizontalRule represents markdown horizontal rule node type HorizontalRule struct { Leaf @@ -233,17 +316,60 @@ type HorizontalRule struct { // Emph represents markdown emphasis node type Emph struct { - Container + Leaf +} + +func (c *Emph) MarshalJSON() ([]byte, error) { + type EmphJSON struct { + Type string `json:"type"` + Literal string `json:"literal"` + *Attribute + } + var c1 EmphJSON + c1.Literal = string(c.Literal) + c1.Attribute = c.Attribute + c1.Type = "emph" + + return json.Marshal(&c1) +} + +type StatusTag struct { + Leaf +} + +func (c *StatusTag) MarshalJSON() ([]byte, error) { + type StatusTagJSON struct { + Type string `json:"type"` + Literal string `json:"literal"` + } + var c1 StatusTagJSON + c1.Literal = string(c.Literal) + c1.Type = "status-tag" + return json.Marshal(&c1) } // Strong represents markdown strong node type Strong struct { - Container + Leaf +} + +func (c *Strong) MarshalJSON() ([]byte, error) { + type StrongJSON struct { + Type string `json:"type"` + Literal string `json:"literal"` + *Attribute + } + var c1 StrongJSON + c1.Literal = string(c.Literal) + c1.Attribute = c.Attribute + c1.Type = "strong" + + return json.Marshal(&c1) } // Del represents markdown del node type Del struct { - Container + Leaf } // Link represents markdown link node @@ -257,6 +383,26 @@ type Link struct { DeferredID []byte // If a deferred link this holds the original ID. } +func (c *Link) MarshalJSON() ([]byte, error) { + type LinkJSON struct { + Type string `json:"type"` + Children []Node `json:"children"` + Literal string `json:"literal"` + Title string `json:"title"` + Destination string `json:"destination"` + *Attribute + } + var c1 LinkJSON + c1.Children = c.Children + c1.Literal = string(c.Literal) + c1.Attribute = c.Attribute + c1.Title = string(c.Title) + c1.Destination = string(c.Destination) + c1.Type = "link" + + return json.Marshal(&c1) +} + // CrossReference is a reference node. type CrossReference struct { Container @@ -302,6 +448,20 @@ type CodeBlock struct { FenceOffset int } +func (c *CodeBlock) MarshalJSON() ([]byte, error) { + type CodeBlockJSON struct { + Type string `json:"type"` + Literal string `json:"literal"` + *Attribute + } + var c1 CodeBlockJSON + c1.Literal = string(c.Literal) + c1.Type = "codeblock" + c1.Attribute = c.Attribute + + return json.Marshal(&c1) +} + // Softbreak represents markdown softbreak node // Note: not used currently type Softbreak struct { @@ -323,6 +483,18 @@ type Code struct { Leaf } +func (c *Code) MarshalJSON() ([]byte, error) { + type CodeJSON struct { + Type string `json:"type"` + Literal string `json:"literal"` + } + var c1 CodeJSON + c1.Literal = string(c.Literal) + c1.Type = "code" + + return json.Marshal(&c1) +} + // HTMLSpan represents markdown html span node type HTMLSpan struct { Leaf diff --git a/ast/print.go b/ast/print.go index 75daf91..35ea0c7 100644 --- a/ast/print.go +++ b/ast/print.go @@ -36,22 +36,12 @@ func ToString(doc Node) string { return buf.String() } -func contentToString(d1 []byte, d2 []byte) string { - if d1 != nil { - return string(d1) - } - if d2 != nil { - return string(d2) - } - return "" -} - func getContent(node Node) string { if c := node.AsContainer(); c != nil { - return contentToString(c.Literal, c.Content) + return string(c.Literal) } leaf := node.AsLeaf() - return contentToString(leaf.Literal, leaf.Content) + return string(leaf.Literal) } func shortenString(s string, maxLen int) string { @@ -126,6 +116,9 @@ func printRecur(w io.Writer, node Node, prefix string, depth int) { case *Link: content := "url=" + string(v.Destination) printDefault(w, indent, typeName, content) + case *StatusTag: + content := "tag=" + string(v.Literal) + printDefault(w, indent, typeName, content) case *Image: content := "url=" + string(v.Destination) printDefault(w, indent, typeName, content) diff --git a/block_test.go b/block_test.go deleted file mode 100644 index d5ad924..0000000 --- a/block_test.go +++ /dev/null @@ -1,223 +0,0 @@ -package markdown - -import ( - "fmt" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/gomarkdown/markdown/html" - "github.com/gomarkdown/markdown/parser" -) - -func must(err error) { - if err != nil { - panic(err.Error()) - } -} - -func writeTest(file string, tests []string) { - path := filepath.Join("testdata", file) - f, err := os.Create(path) - must(err) - defer f.Close() - lastIdx := len(tests) - 1 - for i, s := range tests { - if !strings.HasSuffix(s, "\n") { - s += "\n" - } - fmt.Fprint(f, s) - if i != lastIdx { - fmt.Fprint(f, "+++\n") - } - } -} - -func TestPrefixHeaderNoExtensions(t *testing.T) { - tests := readTestFile2(t, "PrefixHeaderNoExtensions.tests") - doTestsBlock(t, tests, 0) -} - -func TestPrefixHeaderSpaceExtension(t *testing.T) { - tests := readTestFile2(t, "PrefixHeaderSpaceExtension.tests") - doTestsBlock(t, tests, parser.SpaceHeadings) -} - -func TestPrefixHeaderIdExtension(t *testing.T) { - tests := readTestFile2(t, "PrefixHeaderIdExtension.tests") - doTestsBlock(t, tests, parser.HeadingIDs) -} - -func TestPrefixHeaderIdExtensionWithPrefixAndSuffix(t *testing.T) { - tests := readTestFile2(t, "PrefixHeaderIdExtensionWithPrefixAndSuffix.tests") - - parameters := html.RendererOptions{ - HeadingIDPrefix: "PRE:", - HeadingIDSuffix: ":POST", - } - - doTestsParam(t, tests, TestParams{ - extensions: parser.HeadingIDs, - Flags: html.UseXHTML, - RendererOptions: parameters, - }) -} - -func TestPrefixAutoHeaderIdExtension(t *testing.T) { - tests := readTestFile2(t, "PrefixAutoHeaderIdExtension.tests") - doTestsBlock(t, tests, parser.AutoHeadingIDs) -} - -func TestPrefixAutoHeaderIdExtensionWithPrefixAndSuffix(t *testing.T) { - tests := readTestFile2(t, "PrefixAutoHeaderIdExtensionWithPrefixAndSuffix.tests") - parameters := html.RendererOptions{ - HeadingIDPrefix: "PRE:", - HeadingIDSuffix: ":POST", - } - - doTestsParam(t, tests, TestParams{ - extensions: parser.AutoHeadingIDs, - Flags: html.UseXHTML, - RendererOptions: parameters, - }) -} - -func TestPrefixMultipleHeaderExtensions(t *testing.T) { - tests := readTestFile2(t, "PrefixMultipleHeaderExtensions.tests") - doTestsBlock(t, tests, parser.AutoHeadingIDs|parser.HeadingIDs) -} - -func TestPrefixHeaderMmarkExtension(t *testing.T) { - tests := readTestFile2(t, "PrefixHeaderMmarkExtension.tests") - doTestsBlock(t, tests, parser.Mmark) -} - -func TestUnderlineHeaders(t *testing.T) { - tests := readTestFile2(t, "UnderlineHeaders.tests") - doTestsBlock(t, tests, 0) -} - -func TestUnderlineHeadersAutoIDs(t *testing.T) { - tests := readTestFile2(t, "UnderlineHeadersAutoIDs.tests") - doTestsBlock(t, tests, parser.AutoHeadingIDs) -} - -func TestHorizontalRule(t *testing.T) { - tests := readTestFile2(t, "HorizontalRule.tests") - doTestsBlock(t, tests, 0) -} - -func TestUnorderedList(t *testing.T) { - tests := readTestFile2(t, "UnorderedList.tests") - doTestsBlock(t, tests, 0) -} - -func TestOrderedList(t *testing.T) { - tests := readTestFile2(t, "OrderedList.tests") - doTestsBlock(t, tests, 0) -} - -func TestDefinitionList(t *testing.T) { - tests := readTestFile2(t, "DefinitionList.tests") - doTestsBlock(t, tests, parser.DefinitionLists) -} - -func TestNestedDefinitionList(t *testing.T) { - tests := readTestFile2(t, "NestedDefinitionList.tests") - doTestsBlock(t, tests, parser.DefinitionLists) -} - -func TestPreformattedHtml(t *testing.T) { - tests := readTestFile2(t, "PreformattedHtml.tests") - doTestsBlock(t, tests, 0) -} - -func TestPreformattedHtmlLax(t *testing.T) { - tests := readTestFile2(t, "PreformattedHtmlLax.tests") - doTestsBlock(t, tests, parser.LaxHTMLBlocks) -} - -func TestFencedCodeBlock(t *testing.T) { - tests := readTestFile2(t, "FencedCodeBlock.tests") - doTestsBlock(t, tests, parser.FencedCode) -} - -func TestFencedCodeInsideBlockquotes(t *testing.T) { - tests := readTestFile2(t, "FencedCodeInsideBlockquotes.tests") - doTestsBlock(t, tests, parser.FencedCode) -} - -func TestTable(t *testing.T) { - tests := readTestFile2(t, "Table.tests") - doTestsBlock(t, tests, parser.Tables) -} - -func TestUnorderedListWith_EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK(t *testing.T) { - tests := readTestFile2(t, "UnorderedListWith_EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK.tests") - doTestsBlock(t, tests, parser.NoEmptyLineBeforeBlock) -} - -func TestOrderedList_EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK(t *testing.T) { - tests := readTestFile2(t, "OrderedList_EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK.tests") - doTestsBlock(t, tests, parser.NoEmptyLineBeforeBlock) -} - -func TestFencedCodeBlock_EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK(t *testing.T) { - tests := readTestFile2(t, "FencedCodeBlock_EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK.tests") - doTestsBlock(t, tests, parser.FencedCode|parser.NoEmptyLineBeforeBlock) -} - -func TestMathBlock(t *testing.T) { - tests := readTestFile2(t, "MathBlock.tests") - doTestsBlock(t, tests, parser.CommonExtensions) -} - -func TestDefinitionListWithFencedCodeBlock(t *testing.T) { - tests := readTestFile2(t, "DefinitionListWithFencedCodeBlock.tests") - doTestsBlock(t, tests, parser.FencedCode|parser.DefinitionLists) -} - -func TestListWithFencedCodeBlockAndHeader(t *testing.T) { - tests := readTestFile2(t, "ListWithFencedCodeBlockAndHeader.tests") - doTestsBlock(t, tests, parser.FencedCode) -} - -func TestTitleBlock_EXTENSION_TITLEBLOCK(t *testing.T) { - tests := readTestFile2(t, "TitleBlock_EXTENSION_TITLEBLOCK.tests") - doTestsBlock(t, tests, parser.Titleblock) -} - -func TestBlockComments(t *testing.T) { - tests := readTestFile2(t, "BlockComments.tests") - doTestsBlock(t, tests, 0) -} - -func TestTOC(t *testing.T) { - tests := readTestFile2(t, "TOC.tests") - doTestsParam(t, tests, TestParams{ - Flags: html.UseXHTML | html.TOC, - }) -} - -func TestCompletePage(t *testing.T) { - tests := readTestFile2(t, "CompletePage.tests") - doTestsParam(t, tests, TestParams{Flags: html.UseXHTML | html.CompletePage}) -} - -func TestSpaceHeadings(t *testing.T) { - tests := readTestFile2(t, "SpaceHeadings.tests") - doTestsParam(t, tests, TestParams{extensions: parser.SpaceHeadings}) -} - -func TestCodeInList(t *testing.T) { - tests := readTestFile2(t, "code_in_list.test") - exts := parser.CommonExtensions - doTestsParam(t, tests, TestParams{extensions: exts}) -} - -func TestLists(t *testing.T) { - tests := readTestFile2(t, "Lists.tests") - exts := parser.CommonExtensions - doTestsParam(t, tests, TestParams{extensions: exts}) -} diff --git a/cmd/printast/response.txt b/cmd/printast/response.txt new file mode 100644 index 0000000..4f2b301 --- /dev/null +++ b/cmd/printast/response.txt @@ -0,0 +1,310 @@ +Ast of file 'TEST.md': +HorizontalRule +Paragraph + Text + Strong + Text 'Advertisement :)' +Paragraph + Text '-' + Strong + Text '[pica](https://nodeca.github.io/pica/...' + Text '- high quality and fast image\n res...' + Strong + Text '[babelfish](https://github.com/nodeca...' + Text '- developer friendly\n i18n with pl...' +Paragraph + Text 'You will like those projects!' +HorizontalRule +Heading + Text 'h1 Heading 8-)' +Heading + Text 'h2 Heading' +Heading + Text 'h3 Heading' +Heading + Text 'h4 Heading' +Heading + Text 'h5 Heading' +Heading + Text 'h6 Heading' +Heading + Text 'Horizontal Rules' +HorizontalRule +HorizontalRule +HorizontalRule +Heading + Text 'Typographic replacements' +Paragraph + Text 'Enable typographer option to see resu...' +Paragraph + Text '(c) (C) (r) (R) (tm) (TM) (p) (P) +-' +Paragraph + Text 'test.. test... test..... test?..... t...' +Paragraph + Text '!!!!!! ???? ,, -- ---' +Paragraph + Text '"Smartypants, double quotes" and 'sin...' +Heading + Text 'Emphasis' +Paragraph + Text + Strong + Text 'This is bold text' +Paragraph + Text + Strong + Text 'This is bold text' +Paragraph + Text + Emph + Text 'This is italic text' +Paragraph + Text + Emph + Text 'This is italic text' +Paragraph + Text + Del + Text 'Strikethrough' +Heading + Text 'Blockquotes' +BlockQuote + Paragraph + Text 'Blockquotes can also be nested...\n> ...' +Heading + Text 'Lists' +Paragraph + Text 'Unordered' +Paragraph + Text '+ Create a list by starting a line wi...' + Code '+ "' + Text '" +' + Code '+' + Text '+ "' + Code '" +' + Text ',' + Code '+ "' + Text '" +' + Code '-' + Text '+ "' + Code '" +' + Text ', or' + Code '+ "' + Text '" +' + Code '*' + Text '+ "' + Code '" +' + Text '\n+ Sub-lists are made by indenting 2...' +Paragraph + Text 'Ordered' +Paragraph + Text '1. Lorem ipsum dolor sit amet\n2. Con...' +Paragraph + Text '1. You can use sequential numbers...\...' + Code '+ "' + Text '" +' + Code '1.' + Text '+ "' + Code '" +' +Paragraph + Text 'Start numbering with offset:' +Paragraph + Text '57. foo\n1. bar' +Heading + Text 'Code' +Paragraph + Text 'Inline' + Code '+ "' + Text '" +' + Code 'code' + Text '+ "' + Code '" +' +Paragraph + Text 'Indented code' +Paragraph + Text '// Some comments\n line 1 of code\...' +Paragraph + Text 'Block code "fences"' +Paragraph + Text + Code '+ "' + Text '" +' + Code '+ "`" +' + Text '+ "' + Code '" +' + Text '\nSample text here...\n' + Code '+ "' + Text '" +' + Code '+ "`" +' + Text '+ "' + Code '" +' +Paragraph + Text 'Syntax highlighting' +Paragraph + Text + Code '+ "' + Text '" +' + Code '+ "`" +' + Text '+ "' + Code '" +' + Text 'js\nvar foo = function (bar) {\n re...' +Paragraph + Text 'console.log(foo(5));\n' + Code '+ "' + Text '" +' + Code '+ "`" +' + Text '+ "' + Code '" +' +Heading + Text 'Tables' +Paragraph + Text '| Option | Description |\n| ------ | ...' +Paragraph + Text 'Right aligned columns' +Paragraph + Text '| Option | Description |\n| ------:| ...' +Heading + Text 'Links' +Paragraph + Text + Link 'url=http://dev.nodeca.com' + Text 'link text' +Paragraph + Text + Link 'url=http://nodeca.github.io/pica/demo/' + Text 'link with title' +Paragraph + Text 'Autoconverted link' + Link 'url=https://github.com/nodeca/pica' + Text 'https://github.com/nodeca/pica' + Text '(enable linkify to see)' +Heading + Text 'Images' +Paragraph + Text + Image 'url=https://octodex.github.com/images/minion.png' + Text 'Minion' + Text '\n' + Image 'url=https://octodex.github.com/images/stormtroopocat.jpg' + Text 'Stormtroopocat' +Paragraph + Text 'Like links, Images also have a footno...' +Paragraph + Text + Image 'url=https://octodex.github.com/images/dojocat.jpg' + Text 'Alt text' +Paragraph + Text 'With a reference later in the documen...' +Heading + Text 'Plugins' +Paragraph + Text 'The killer feature of' + Code '+ "' + Text '" +' + Code 'markdown-it' + Text '+ "' + Code '" +' + Text 'is very effective support of\n' + Link 'url=https://www.npmjs.org/browse/keyword/markdown-it-plugin' + Text 'syntax plugins' + Text '.' +Heading + Text + Link 'url=https://github.com/markdown-it/markdown-it-emoji' + Text 'Emojies' +BlockQuote + Paragraph + Text 'Classic markup: :wink: :crush: :cry: ...' +Paragraph + Text 'see' + Link 'url=https://github.com/markdown-it/markdown-it-emoji#change-output' + Text 'how to change output' + Text 'with twemoji.' +Heading + Text + Link 'url=https://github.com/markdown-it/markdown-it-sub' + Text 'Subscript' + Text '/' + Link 'url=https://github.com/markdown-it/markdown-it-sup' + Text 'Superscript' +Paragraph + Text '- 19^th^\n- H~2~O' +Heading + Text + Link 'url=https://github.com/markdown-it/markdown-it-ins' + Text '\' +Paragraph + Text '++Inserted text++' +Heading + Text + Link 'url=https://github.com/markdown-it/markdown-it-mark' + Text '\' +Paragraph + Text '==Marked text==' +Heading + Text + Link 'url=https://github.com/markdown-it/markdown-it-footnote' + Text 'Footnotes' +Paragraph + Text 'Footnote 1 link[^first].' +Paragraph + Text 'Footnote 2 link[^second].' +Paragraph + Text 'Inline footnote^[Text of inline footn...' +Paragraph + Text 'Duplicated footnote reference[^second].' +Paragraph + Text '[^first]: Footnote' + Strong + Text 'can have markup' +Paragraph + Text 'and multiple paragraphs.' +Paragraph + Text '[^second]: Footnote text.' +Heading + Text + Link 'url=https://github.com/markdown-it/markdown-it-deflist' + Text 'Definition lists' +List 'flags=definition start' + ListItem 'flags=definition term has_block start' + Paragraph + Text 'Term 1' + ListItem 'flags=definition has_block' + Paragraph + Text 'Definition 1\nwith lazy continuation.' + ListItem 'flags=definition term has_block' + Paragraph + Text 'Term 2 with' + Emph + Text 'inline markup' + Text + ListItem 'flags=definition has_block end' + Paragraph + Text 'Definition 2\n\n\n { some code, pa...' +Paragraph + Text + Emph + Text 'Compact style:' +Paragraph + Text 'Term 1\n ~ Definition 1' +Paragraph + Text 'Term 2\n ~ Definition 2a\n ~ Defini...' +Heading + Text + Link 'url=https://github.com/markdown-it/markdown-it-abbr' + Text 'Abbreviations' +Paragraph + Text 'This is HTML abbreviation example.' +Paragraph + Text 'It converts "HTML", but keep intact p...' +Paragraph + Text '*[HTML]: Hyper Text Markup Language' +Heading + Text + Link 'url=https://github.com/markdown-it/markdown-it-container' + Text 'Custom containers' +Paragraph + Text '::: warning\n' + Emph + Text 'here be dragons' + diff --git a/esc_test.go b/esc_test.go deleted file mode 100644 index adf7339..0000000 --- a/esc_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package markdown - -import ( - "bytes" - "testing" - - "github.com/gomarkdown/markdown/html" -) - -func TestEsc(t *testing.T) { - tests := []string{ - "abc", "abc", - "a&c", "a&c", - "<", "<", - "[]:<", "[]:<", - "Hello |" - processingInstruction = "[<][?].*?[?][>]" - singleQuotedValue = "'[^']*'" - tagName = "[A-Za-z][A-Za-z0-9-]*" - unquotedValue = "[^\"'=<>`\\x00-\\x20]+" -) - -// RenderNodeFunc allows reusing most of Renderer logic and replacing -// rendering of some nodes. If it returns false, Renderer.RenderNode -// will execute its logic. If it returns true, Renderer.RenderNode will -// skip rendering this node and will return WalkStatus -type RenderNodeFunc func(w io.Writer, node ast.Node, entering bool) (ast.WalkStatus, bool) - -// RendererOptions is a collection of supplementary parameters tweaking -// the behavior of various parts of HTML renderer. -type RendererOptions struct { - // Prepend this text to each relative URL. - AbsolutePrefix string - // Add this text to each footnote anchor, to ensure uniqueness. - FootnoteAnchorPrefix string - // Show this text inside the tag for a footnote return link, if the - // FootnoteReturnLinks flag is enabled. If blank, the string - // [return] is used. - FootnoteReturnLinkContents string - // CitationFormatString defines how a citation is rendered. If blnck, the string - // [%s] is used. Where %s will be substituted with the citation target. - CitationFormatString string - // If set, add this text to the front of each Heading ID, to ensure uniqueness. - HeadingIDPrefix string - // If set, add this text to the back of each Heading ID, to ensure uniqueness. - HeadingIDSuffix string - - Title string // Document title (used if CompletePage is set) - CSS string // Optional CSS file URL (used if CompletePage is set) - Icon string // Optional icon file URL (used if CompletePage is set) - Head []byte // Optional head data injected in the section (used if CompletePage is set) - - Flags Flags // Flags allow customizing this renderer's behavior - - // if set, called at the start of RenderNode(). Allows replacing - // rendering of some nodes - RenderNodeHook RenderNodeFunc - - // Comments is a list of comments the renderer should detect when - // parsing code blocks and detecting callouts. - Comments [][]byte - - // Generator is a meta tag that is inserted in the generated HTML so show what rendered it. It should not include the closing tag. - // Defaults (note content quote is not closed) to ` " or ">" - - // Track heading IDs to prevent ID collision in a single generation. - headingIDs map[string]int - - lastOutputLen int - disableTags int - - sr *SPRenderer - - documentMatter ast.DocumentMatters // keep track of front/main/back matter. -} - -// NewRenderer creates and configures an Renderer object, which -// satisfies the Renderer interface. -func NewRenderer(opts RendererOptions) *Renderer { - // configure the rendering engine - closeTag := ">" - if opts.Flags&UseXHTML != 0 { - closeTag = " />" - } - - if opts.FootnoteReturnLinkContents == "" { - opts.FootnoteReturnLinkContents = `[return]` - } - if opts.CitationFormatString == "" { - opts.CitationFormatString = `[%s]` - } - if opts.Generator == "" { - opts.Generator = ` = len(tagname) { - break - } - - if strings.ToLower(string(tag[i]))[0] != tagname[j] { - return false, -1 - } - } - - if i == len(tag) { - return false, -1 - } - - rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>') - if rightAngle >= i { - return true, rightAngle - } - - return false, -1 -} - -func isRelativeLink(link []byte) (yes bool) { - // a tag begin with '#' - if link[0] == '#' { - return true - } - - // link begin with '/' but not '//', the second maybe a protocol relative link - if len(link) >= 2 && link[0] == '/' && link[1] != '/' { - return true - } - - // only the root '/' - if len(link) == 1 && link[0] == '/' { - return true - } - - // current directory : begin with "./" - if bytes.HasPrefix(link, []byte("./")) { - return true - } - - // parent directory : begin with "../" - if bytes.HasPrefix(link, []byte("../")) { - return true - } - - return false -} - -func (r *Renderer) ensureUniqueHeadingID(id string) string { - for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] { - tmp := fmt.Sprintf("%s-%d", id, count+1) - - if _, tmpFound := r.headingIDs[tmp]; !tmpFound { - r.headingIDs[id] = count + 1 - id = tmp - } else { - id = id + "-1" - } - } - - if _, found := r.headingIDs[id]; !found { - r.headingIDs[id] = 0 - } - - return id -} - -func (r *Renderer) addAbsPrefix(link []byte) []byte { - if r.opts.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' { - newDest := r.opts.AbsolutePrefix - if link[0] != '/' { - newDest += "/" - } - newDest += string(link) - return []byte(newDest) - } - return link -} - -func appendLinkAttrs(attrs []string, flags Flags, link []byte) []string { - if isRelativeLink(link) { - return attrs - } - var val []string - if flags&NofollowLinks != 0 { - val = append(val, "nofollow") - } - if flags&NoreferrerLinks != 0 { - val = append(val, "noreferrer") - } - if flags&HrefTargetBlank != 0 { - attrs = append(attrs, `target="_blank"`) - } - if len(val) == 0 { - return attrs - } - attr := fmt.Sprintf("rel=%q", strings.Join(val, " ")) - return append(attrs, attr) -} - -func isMailto(link []byte) bool { - return bytes.HasPrefix(link, []byte("mailto:")) -} - -func needSkipLink(flags Flags, dest []byte) bool { - if flags&SkipLinks != 0 { - return true - } - return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest) -} - -func isSmartypantable(node ast.Node) bool { - switch node.GetParent().(type) { - case *ast.Link, *ast.CodeBlock, *ast.Code: - return false - } - return true -} - -func appendLanguageAttr(attrs []string, info []byte) []string { - if len(info) == 0 { - return attrs - } - endOfLang := bytes.IndexAny(info, "\t ") - if endOfLang < 0 { - endOfLang = len(info) - } - s := `class="language-` + string(info[:endOfLang]) + `"` - return append(attrs, s) -} - -func (r *Renderer) outTag(w io.Writer, name string, attrs []string) { - s := name - if len(attrs) > 0 { - s += " " + strings.Join(attrs, " ") - } - io.WriteString(w, s+">") - r.lastOutputLen = 1 -} - -func footnoteRef(prefix string, node *ast.Link) string { - urlFrag := prefix + string(slugify(node.Destination)) - nStr := strconv.Itoa(node.NoteID) - anchor := `` + nStr + `` - return `` + anchor + `` -} - -func footnoteItem(prefix string, slug []byte) string { - return `
  • ` -} - -func footnoteReturnLink(prefix, returnLink string, slug []byte) string { - return ` ` + returnLink + `` -} - -func listItemOpenCR(listItem *ast.ListItem) bool { - if ast.GetPrevNode(listItem) == nil { - return false - } - ld := listItem.Parent.(*ast.List) - return !ld.Tight && ld.ListFlags&ast.ListTypeDefinition == 0 -} - -func skipParagraphTags(para *ast.Paragraph) bool { - parent := para.Parent - grandparent := parent.GetParent() - if grandparent == nil || !isList(grandparent) { - return false - } - isParentTerm := isListItemTerm(parent) - grandparentListData := grandparent.(*ast.List) - tightOrTerm := grandparentListData.Tight || isParentTerm - return tightOrTerm -} - -func (r *Renderer) out(w io.Writer, d []byte) { - r.lastOutputLen = len(d) - if r.disableTags > 0 { - d = htmlTagRe.ReplaceAll(d, []byte{}) - } - w.Write(d) -} - -func (r *Renderer) outs(w io.Writer, s string) { - r.lastOutputLen = len(s) - if r.disableTags > 0 { - s = htmlTagRe.ReplaceAllString(s, "") - } - io.WriteString(w, s) -} - -func (r *Renderer) cr(w io.Writer) { - if r.lastOutputLen > 0 { - r.outs(w, "\n") - } -} - -var ( - openHTags = []string{"", "", "", "", ""} -) - -func headingOpenTagFromLevel(level int) string { - if level < 1 || level > 5 { - return " 5 { - return "" - } - return closeHTags[level-1] -} - -func (r *Renderer) outHRTag(w io.Writer, attrs []string) { - hr := tagWithAttributes("") -} - -func (r *Renderer) text(w io.Writer, text *ast.Text) { - if r.opts.Flags&Smartypants != 0 { - var tmp bytes.Buffer - EscapeHTML(&tmp, text.Literal) - r.sr.Process(w, tmp.Bytes()) - } else { - _, parentIsLink := text.Parent.(*ast.Link) - if parentIsLink { - escLink(w, text.Literal) - } else { - EscapeHTML(w, text.Literal) - } - } -} - -func (r *Renderer) hardBreak(w io.Writer, node *ast.Hardbreak) { - r.outOneOf(w, r.opts.Flags&UseXHTML == 0, "
    ", "
    ") - r.cr(w) -} - -func (r *Renderer) nonBlockingSpace(w io.Writer, node *ast.NonBlockingSpace) { - r.outs(w, " ") -} - -func (r *Renderer) outOneOf(w io.Writer, outFirst bool, first string, second string) { - if outFirst { - r.outs(w, first) - } else { - r.outs(w, second) - } -} - -func (r *Renderer) outOneOfCr(w io.Writer, outFirst bool, first string, second string) { - if outFirst { - r.cr(w) - r.outs(w, first) - } else { - r.outs(w, second) - r.cr(w) - } -} - -func (r *Renderer) htmlSpan(w io.Writer, span *ast.HTMLSpan) { - if r.opts.Flags&SkipHTML == 0 { - r.out(w, span.Literal) - } -} - -func (r *Renderer) linkEnter(w io.Writer, link *ast.Link) { - var attrs []string - dest := link.Destination - dest = r.addAbsPrefix(dest) - var hrefBuf bytes.Buffer - hrefBuf.WriteString("href=\"") - escLink(&hrefBuf, dest) - hrefBuf.WriteByte('"') - attrs = append(attrs, hrefBuf.String()) - if link.NoteID != 0 { - r.outs(w, footnoteRef(r.opts.FootnoteAnchorPrefix, link)) - return - } - - attrs = appendLinkAttrs(attrs, r.opts.Flags, dest) - if len(link.Title) > 0 { - var titleBuff bytes.Buffer - titleBuff.WriteString("title=\"") - EscapeHTML(&titleBuff, link.Title) - titleBuff.WriteByte('"') - attrs = append(attrs, titleBuff.String()) - } - r.outTag(w, "") - } -} - -func (r *Renderer) link(w io.Writer, link *ast.Link, entering bool) { - // mark it but don't link it if it is not a safe link: no smartypants - if needSkipLink(r.opts.Flags, link.Destination) { - r.outOneOf(w, entering, "", "") - return - } - - if entering { - r.linkEnter(w, link) - } else { - r.linkExit(w, link) - } -} - -func (r *Renderer) imageEnter(w io.Writer, image *ast.Image) { - dest := image.Destination - dest = r.addAbsPrefix(dest) - if r.disableTags == 0 { - //if options.safe && potentiallyUnsafe(dest) { - //out(w, ``)
-		//} else {
-		r.outs(w, `<img src=`) - } -} - -func (r *Renderer) paragraphEnter(w io.Writer, para *ast.Paragraph) { - // TODO: untangle this clusterfuck about when the newlines need - // to be added and when not. - prev := ast.GetPrevNode(para) - if prev != nil { - switch prev.(type) { - case *ast.HTMLBlock, *ast.List, *ast.Paragraph, *ast.Heading, *ast.CaptionFigure, *ast.CodeBlock, *ast.BlockQuote, *ast.Aside, *ast.HorizontalRule: - r.cr(w) - } - } - - if prev == nil { - _, isParentBlockQuote := para.Parent.(*ast.BlockQuote) - if isParentBlockQuote { - r.cr(w) - } - _, isParentAside := para.Parent.(*ast.Aside) - if isParentAside { - r.cr(w) - } - } - - tag := tagWithAttributes("") - if !(isListItem(para.Parent) && ast.GetNextNode(para) == nil) { - r.cr(w) - } -} - -func (r *Renderer) paragraph(w io.Writer, para *ast.Paragraph, entering bool) { - if skipParagraphTags(para) { - return - } - if entering { - r.paragraphEnter(w, para) - } else { - r.paragraphExit(w, para) - } -} -func (r *Renderer) image(w io.Writer, node *ast.Image, entering bool) { - if entering { - r.imageEnter(w, node) - } else { - r.imageExit(w, node) - } -} - -func (r *Renderer) code(w io.Writer, node *ast.Code) { - r.outs(w, "") - EscapeHTML(w, node.Literal) - r.outs(w, "") -} - -func (r *Renderer) htmlBlock(w io.Writer, node *ast.HTMLBlock) { - if r.opts.Flags&SkipHTML != 0 { - return - } - r.cr(w) - r.out(w, node.Literal) - r.cr(w) -} - -func (r *Renderer) headingEnter(w io.Writer, nodeData *ast.Heading) { - var attrs []string - var class string - // TODO(miek): add helper functions for coalescing these classes. - if nodeData.IsTitleblock { - class = "title" - } - if nodeData.IsSpecial { - if class != "" { - class += " special" - } else { - class = "special" - } - } - if class != "" { - attrs = []string{`class="` + class + `"`} - } - if nodeData.HeadingID != "" { - id := r.ensureUniqueHeadingID(nodeData.HeadingID) - if r.opts.HeadingIDPrefix != "" { - id = r.opts.HeadingIDPrefix + id - } - if r.opts.HeadingIDSuffix != "" { - id = id + r.opts.HeadingIDSuffix - } - attrID := `id="` + id + `"` - attrs = append(attrs, attrID) - } - attrs = append(attrs, BlockAttrs(nodeData)...) - r.cr(w) - r.outTag(w, headingOpenTagFromLevel(nodeData.Level), attrs) -} - -func (r *Renderer) headingExit(w io.Writer, heading *ast.Heading) { - r.outs(w, headingCloseTagFromLevel(heading.Level)) - if !(isListItem(heading.Parent) && ast.GetNextNode(heading) == nil) { - r.cr(w) - } -} - -func (r *Renderer) heading(w io.Writer, node *ast.Heading, entering bool) { - if entering { - r.headingEnter(w, node) - } else { - r.headingExit(w, node) - } -} - -func (r *Renderer) horizontalRule(w io.Writer, node *ast.HorizontalRule) { - r.cr(w) - r.outHRTag(w, BlockAttrs(node)) - r.cr(w) -} - -func (r *Renderer) listEnter(w io.Writer, nodeData *ast.List) { - // TODO: attrs don't seem to be set - var attrs []string - - if nodeData.IsFootnotesList { - r.outs(w, "\n
    \n\n") - if r.opts.Flags&FootnoteNoHRTag == 0 { - r.outHRTag(w, nil) - r.cr(w) - } - } - r.cr(w) - if isListItem(nodeData.Parent) { - grand := nodeData.Parent.GetParent() - if isListTight(grand) { - r.cr(w) - } - } - - openTag := " 0 { - attrs = append(attrs, fmt.Sprintf(`start="%d"`, nodeData.Start)) - } - openTag = "\n") - } -} - -func (r *Renderer) list(w io.Writer, list *ast.List, entering bool) { - if entering { - r.listEnter(w, list) - } else { - r.listExit(w, list) - } -} - -func (r *Renderer) listItemEnter(w io.Writer, listItem *ast.ListItem) { - if listItemOpenCR(listItem) { - r.cr(w) - } - if listItem.RefLink != nil { - slug := slugify(listItem.RefLink) - r.outs(w, footnoteItem(r.opts.FootnoteAnchorPrefix, slug)) - return - } - - openTag := "
  • " - if listItem.ListFlags&ast.ListTypeDefinition != 0 { - openTag = "
    " - } - if listItem.ListFlags&ast.ListTypeTerm != 0 { - openTag = "
    " - } - r.outs(w, openTag) -} - -func (r *Renderer) listItemExit(w io.Writer, listItem *ast.ListItem) { - if listItem.RefLink != nil && r.opts.Flags&FootnoteReturnLinks != 0 { - slug := slugify(listItem.RefLink) - prefix := r.opts.FootnoteAnchorPrefix - link := r.opts.FootnoteReturnLinkContents - s := footnoteReturnLink(prefix, link, slug) - r.outs(w, s) - } - - closeTag := "
  • " - if listItem.ListFlags&ast.ListTypeDefinition != 0 { - closeTag = "" - } - if listItem.ListFlags&ast.ListTypeTerm != 0 { - closeTag = "" - } - r.outs(w, closeTag) - r.cr(w) -} - -func (r *Renderer) listItem(w io.Writer, listItem *ast.ListItem, entering bool) { - if entering { - r.listItemEnter(w, listItem) - } else { - r.listItemExit(w, listItem) - } -} - -func (r *Renderer) codeBlock(w io.Writer, codeBlock *ast.CodeBlock) { - var attrs []string - // TODO(miek): this can add multiple class= attribute, they should be coalesced into one. - // This is probably true for some other elements as well - attrs = appendLanguageAttr(attrs, codeBlock.Info) - attrs = append(attrs, BlockAttrs(codeBlock)...) - r.cr(w) - - r.outs(w, "
    ")
    -	code := tagWithAttributes("")
    -	r.outs(w, "
    ") - if !isListItem(codeBlock.Parent) { - r.cr(w) - } -} - -func (r *Renderer) caption(w io.Writer, caption *ast.Caption, entering bool) { - if entering { - r.outs(w, "
    ") - return - } - r.outs(w, "
    ") -} - -func (r *Renderer) captionFigure(w io.Writer, figure *ast.CaptionFigure, entering bool) { - // TODO(miek): copy more generic ways of mmark over to here. - fig := "` - } else { - fig += ">" - } - r.outOneOf(w, entering, fig, "\n\n") -} - -func (r *Renderer) tableCell(w io.Writer, tableCell *ast.TableCell, entering bool) { - if !entering { - r.outOneOf(w, tableCell.IsHeader, "", "") - r.cr(w) - return - } - - // entering - var attrs []string - openTag := "") - // XXX: this is to adhere to a rather silly test. Should fix test. - if ast.GetFirstChild(node) == nil { - r.cr(w) - } - } else { - r.outs(w, "") - r.cr(w) - } -} - -func (r *Renderer) matter(w io.Writer, node *ast.DocumentMatter, entering bool) { - if !entering { - return - } - if r.documentMatter != ast.DocumentMatterNone { - r.outs(w, "\n") - } - switch node.Matter { - case ast.DocumentMatterFront: - r.outs(w, `
    `) - case ast.DocumentMatterMain: - r.outs(w, `
    `) - case ast.DocumentMatterBack: - r.outs(w, `
    `) - } - r.documentMatter = node.Matter -} - -func (r *Renderer) citation(w io.Writer, node *ast.Citation) { - for i, c := range node.Destination { - attr := []string{`class="none"`} - switch node.Type[i] { - case ast.CitationTypeNormative: - attr[0] = `class="normative"` - case ast.CitationTypeInformative: - attr[0] = `class="informative"` - case ast.CitationTypeSuppressed: - attr[0] = `class="suppressed"` - } - r.outTag(w, "`+r.opts.CitationFormatString+``, c, c)) - r.outs(w, "") - } -} - -func (r *Renderer) callout(w io.Writer, node *ast.Callout) { - attr := []string{`class="callout"`} - r.outTag(w, "") -} - -func (r *Renderer) index(w io.Writer, node *ast.Index) { - // there is no in-text representation. - attr := []string{`class="index"`, fmt.Sprintf(`id="%s"`, node.ID)} - r.outTag(w, "") -} - -// RenderNode renders a markdown node to HTML -func (r *Renderer) RenderNode(w io.Writer, node ast.Node, entering bool) ast.WalkStatus { - if r.opts.RenderNodeHook != nil { - status, didHandle := r.opts.RenderNodeHook(w, node, entering) - if didHandle { - return status - } - } - switch node := node.(type) { - case *ast.Text: - r.text(w, node) - case *ast.Softbreak: - r.cr(w) - // TODO: make it configurable via out(renderer.softbreak) - case *ast.Hardbreak: - r.hardBreak(w, node) - case *ast.NonBlockingSpace: - r.nonBlockingSpace(w, node) - case *ast.Emph: - r.outOneOf(w, entering, "", "") - case *ast.Strong: - r.outOneOf(w, entering, "", "") - case *ast.Del: - r.outOneOf(w, entering, "", "") - case *ast.BlockQuote: - tag := tagWithAttributes("") - case *ast.Aside: - tag := tagWithAttributes("") - case *ast.Link: - r.link(w, node, entering) - case *ast.CrossReference: - link := &ast.Link{Destination: append([]byte("#"), node.Destination...)} - r.link(w, link, entering) - case *ast.Citation: - r.citation(w, node) - case *ast.Image: - if r.opts.Flags&SkipImages != 0 { - return ast.SkipChildren - } - r.image(w, node, entering) - case *ast.Code: - r.code(w, node) - case *ast.CodeBlock: - r.codeBlock(w, node) - case *ast.Caption: - r.caption(w, node, entering) - case *ast.CaptionFigure: - r.captionFigure(w, node, entering) - case *ast.Document: - // do nothing - case *ast.Paragraph: - r.paragraph(w, node, entering) - case *ast.HTMLSpan: - r.htmlSpan(w, node) - case *ast.HTMLBlock: - r.htmlBlock(w, node) - case *ast.Heading: - r.heading(w, node, entering) - case *ast.HorizontalRule: - r.horizontalRule(w, node) - case *ast.List: - r.list(w, node, entering) - case *ast.ListItem: - r.listItem(w, node, entering) - case *ast.Table: - tag := tagWithAttributes("") - case *ast.TableCell: - r.tableCell(w, node, entering) - case *ast.TableHeader: - r.outOneOfCr(w, entering, "", "") - case *ast.TableBody: - r.tableBody(w, node, entering) - case *ast.TableRow: - r.outOneOfCr(w, entering, "", "") - case *ast.TableFooter: - r.outOneOfCr(w, entering, "", "") - case *ast.Math: - r.outOneOf(w, true, `\(`, `\)`) - EscapeHTML(w, node.Literal) - r.outOneOf(w, false, `\(`, `\)`) - case *ast.MathBlock: - r.outOneOf(w, entering, `

    \[`, `\]

    `) - if entering { - EscapeHTML(w, node.Literal) - } - case *ast.DocumentMatter: - r.matter(w, node, entering) - case *ast.Callout: - r.callout(w, node) - case *ast.Index: - r.index(w, node) - case *ast.Subscript: - r.outOneOf(w, true, "", "") - if entering { - Escape(w, node.Literal) - } - r.outOneOf(w, false, "", "") - case *ast.Superscript: - r.outOneOf(w, true, "", "") - if entering { - Escape(w, node.Literal) - } - r.outOneOf(w, false, "", "") - case *ast.Footnotes: - // nothing by default; just output the list. - default: - panic(fmt.Sprintf("Unknown node %T", node)) - } - return ast.GoToNext -} - -// RenderHeader writes HTML document preamble and TOC if requested. -func (r *Renderer) RenderHeader(w io.Writer, ast ast.Node) { - r.writeDocumentHeader(w) - if r.opts.Flags&TOC != 0 { - r.writeTOC(w, ast) - } -} - -// RenderFooter writes HTML document footer. -func (r *Renderer) RenderFooter(w io.Writer, _ ast.Node) { - if r.documentMatter != ast.DocumentMatterNone { - r.outs(w, "
    \n") - } - - if r.opts.Flags&CompletePage == 0 { - return - } - io.WriteString(w, "\n\n\n") -} - -func (r *Renderer) writeDocumentHeader(w io.Writer) { - if r.opts.Flags&CompletePage == 0 { - return - } - ending := "" - if r.opts.Flags&UseXHTML != 0 { - io.WriteString(w, "\n") - io.WriteString(w, "\n") - ending = " /" - } else { - io.WriteString(w, "\n") - io.WriteString(w, "\n") - } - io.WriteString(w, "\n") - io.WriteString(w, " ") - if r.opts.Flags&Smartypants != 0 { - r.sr.Process(w, []byte(r.opts.Title)) - } else { - EscapeHTML(w, []byte(r.opts.Title)) - } - io.WriteString(w, "\n") - io.WriteString(w, r.opts.Generator) - io.WriteString(w, "\"") - io.WriteString(w, ending) - io.WriteString(w, ">\n") - io.WriteString(w, " \n") - if r.opts.CSS != "" { - io.WriteString(w, " \n") - } - if r.opts.Icon != "" { - io.WriteString(w, " \n") - } - if r.opts.Head != nil { - w.Write(r.opts.Head) - } - io.WriteString(w, "\n") - io.WriteString(w, "\n\n") -} - -func (r *Renderer) writeTOC(w io.Writer, doc ast.Node) { - buf := bytes.Buffer{} - - inHeading := false - tocLevel := 0 - headingCount := 0 - - ast.WalkFunc(doc, func(node ast.Node, entering bool) ast.WalkStatus { - if nodeData, ok := node.(*ast.Heading); ok && !nodeData.IsTitleblock { - inHeading = entering - if !entering { - buf.WriteString("") - return ast.GoToNext - } - nodeData.HeadingID = fmt.Sprintf("toc_%d", headingCount) - if nodeData.Level == tocLevel { - buf.WriteString("\n\n
  • ") - } else if nodeData.Level < tocLevel { - for nodeData.Level < tocLevel { - tocLevel-- - buf.WriteString("
  • \n") - } - buf.WriteString("\n\n
  • ") - } else { - for nodeData.Level > tocLevel { - tocLevel++ - buf.WriteString("\n") - } - - if buf.Len() > 0 { - io.WriteString(w, "\n") - } - r.lastOutputLen = buf.Len() -} - -func isList(node ast.Node) bool { - _, ok := node.(*ast.List) - return ok -} - -func isListTight(node ast.Node) bool { - if list, ok := node.(*ast.List); ok { - return list.Tight - } - return false -} - -func isListItem(node ast.Node) bool { - _, ok := node.(*ast.ListItem) - return ok -} - -func isListItemTerm(node ast.Node) bool { - data, ok := node.(*ast.ListItem) - return ok && data.ListFlags&ast.ListTypeTerm != 0 -} - -// TODO: move to internal package -func skipSpace(data []byte, i int) int { - n := len(data) - for i < n && isSpace(data[i]) { - i++ - } - return i -} - -// TODO: move to internal package -var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")} -var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")} - -func isSafeLink(link []byte) bool { - for _, path := range validPaths { - if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) { - if len(link) == len(path) { - return true - } else if isAlnum(link[len(path)]) { - return true - } - } - } - - for _, prefix := range validUris { - // TODO: handle unicode here - // case-insensitive prefix test - if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isAlnum(link[len(prefix)]) { - return true - } - } - - return false -} - -// TODO: move to internal package -// Create a url-safe slug for fragments -func slugify(in []byte) []byte { - if len(in) == 0 { - return in - } - out := make([]byte, 0, len(in)) - sym := false - - for _, ch := range in { - if isAlnum(ch) { - sym = false - out = append(out, ch) - } else if sym { - continue - } else { - out = append(out, '-') - sym = true - } - } - var a, b int - var ch byte - for a, ch = range out { - if ch != '-' { - break - } - } - for b = len(out) - 1; b > 0; b-- { - if out[b] != '-' { - break - } - } - return out[a : b+1] -} - -// TODO: move to internal package -// isAlnum returns true if c is a digit or letter -// TODO: check when this is looking for ASCII alnum and when it should use unicode -func isAlnum(c byte) bool { - return (c >= '0' && c <= '9') || isLetter(c) -} - -// isSpace returns true if c is a white-space charactr -func isSpace(c byte) bool { - return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v' -} - -// isLetter returns true if c is ascii letter -func isLetter(c byte) bool { - return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') -} - -// isPunctuation returns true if c is a punctuation symbol. -func isPunctuation(c byte) bool { - for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") { - if c == r { - return true - } - } - return false -} - -// BlockAttrs takes a node and checks if it has block level attributes set. If so it -// will return a slice each containing a "key=value(s)" string. -func BlockAttrs(node ast.Node) []string { - var attr *ast.Attribute - if c := node.AsContainer(); c != nil && c.Attribute != nil { - attr = c.Attribute - } - if l := node.AsLeaf(); l != nil && l.Attribute != nil { - attr = l.Attribute - } - if attr == nil { - return nil - } - - var s []string - if attr.ID != nil { - s = append(s, fmt.Sprintf(`%s="%s"`, IDTag, attr.ID)) - } - - classes := "" - for _, c := range attr.Classes { - classes += " " + string(c) - } - if classes != "" { - s = append(s, fmt.Sprintf(`class="%s"`, classes[1:])) // skip space we added. - } - - // sort the attributes so it remain stable between runs - var keys = []string{} - for k, _ := range attr.Attrs { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - s = append(s, fmt.Sprintf(`%s="%s"`, k, attr.Attrs[k])) - } - - return s -} - -func tagWithAttributes(name string, attrs []string) string { - s := name - if len(attrs) > 0 { - s += " " + strings.Join(attrs, " ") - } - return s + ">" -} diff --git a/html/smartypants.go b/html/smartypants.go deleted file mode 100644 index a09866b..0000000 --- a/html/smartypants.go +++ /dev/null @@ -1,444 +0,0 @@ -package html - -import ( - "bytes" - "io" -) - -// SmartyPants rendering - -// SPRenderer is a struct containing state of a Smartypants renderer. -type SPRenderer struct { - inSingleQuote bool - inDoubleQuote bool - callbacks [256]smartCallback -} - -func wordBoundary(c byte) bool { - return c == 0 || isSpace(c) || isPunctuation(c) -} - -func tolower(c byte) byte { - if c >= 'A' && c <= 'Z' { - return c - 'A' + 'a' - } - return c -} - -func isdigit(c byte) bool { - return c >= '0' && c <= '9' -} - -func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool { - // edge of the buffer is likely to be a tag that we don't get to see, - // so we treat it like text sometimes - - // enumerate all sixteen possibilities for (previousChar, nextChar) - // each can be one of {0, space, punct, other} - switch { - case previousChar == 0 && nextChar == 0: - // context is not any help here, so toggle - *isOpen = !*isOpen - case isSpace(previousChar) && nextChar == 0: - // [ "] might be [ "foo...] - *isOpen = true - case isPunctuation(previousChar) && nextChar == 0: - // [!"] hmm... could be [Run!"] or [("...] - *isOpen = false - case /* isnormal(previousChar) && */ nextChar == 0: - // [a"] is probably a close - *isOpen = false - case previousChar == 0 && isSpace(nextChar): - // [" ] might be [...foo" ] - *isOpen = false - case isSpace(previousChar) && isSpace(nextChar): - // [ " ] context is not any help here, so toggle - *isOpen = !*isOpen - case isPunctuation(previousChar) && isSpace(nextChar): - // [!" ] is probably a close - *isOpen = false - case /* isnormal(previousChar) && */ isSpace(nextChar): - // [a" ] this is one of the easy cases - *isOpen = false - case previousChar == 0 && isPunctuation(nextChar): - // ["!] hmm... could be ["$1.95] or ["!...] - *isOpen = false - case isSpace(previousChar) && isPunctuation(nextChar): - // [ "!] looks more like [ "$1.95] - *isOpen = true - case isPunctuation(previousChar) && isPunctuation(nextChar): - // [!"!] context is not any help here, so toggle - *isOpen = !*isOpen - case /* isnormal(previousChar) && */ isPunctuation(nextChar): - // [a"!] is probably a close - *isOpen = false - case previousChar == 0 /* && isnormal(nextChar) */ : - // ["a] is probably an open - *isOpen = true - case isSpace(previousChar) /* && isnormal(nextChar) */ : - // [ "a] this is one of the easy cases - *isOpen = true - case isPunctuation(previousChar) /* && isnormal(nextChar) */ : - // [!"a] is probably an open - *isOpen = true - default: - // [a'b] maybe a contraction? - *isOpen = false - } - - // Note that with the limited lookahead, this non-breaking - // space will also be appended to single double quotes. - if addNBSP && !*isOpen { - out.WriteString(" ") - } - - out.WriteByte('&') - if *isOpen { - out.WriteByte('l') - } else { - out.WriteByte('r') - } - out.WriteByte(quote) - out.WriteString("quo;") - - if addNBSP && *isOpen { - out.WriteString(" ") - } - - return true -} - -func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 2 { - t1 := tolower(text[1]) - - if t1 == '\'' { - nextChar := byte(0) - if len(text) >= 3 { - nextChar = text[2] - } - if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { - return 1 - } - } - - if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) { - out.WriteString("’") - return 0 - } - - if len(text) >= 3 { - t2 := tolower(text[2]) - - if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) && - (len(text) < 4 || wordBoundary(text[3])) { - out.WriteString("’") - return 0 - } - } - } - - nextChar := byte(0) - if len(text) > 1 { - nextChar = text[1] - } - if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) { - return 0 - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 3 { - t1 := tolower(text[1]) - t2 := tolower(text[2]) - - if t1 == 'c' && t2 == ')' { - out.WriteString("©") - return 2 - } - - if t1 == 'r' && t2 == ')' { - out.WriteString("®") - return 2 - } - - if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' { - out.WriteString("™") - return 3 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 2 { - if text[1] == '-' { - out.WriteString("—") - return 1 - } - - if wordBoundary(previousChar) && wordBoundary(text[1]) { - out.WriteString("–") - return 0 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 3 && text[1] == '-' && text[2] == '-' { - out.WriteString("—") - return 2 - } - if len(text) >= 2 && text[1] == '-' { - out.WriteString("–") - return 1 - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int { - if bytes.HasPrefix(text, []byte(""")) { - nextChar := byte(0) - if len(text) >= 7 { - nextChar = text[6] - } - if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) { - return 5 - } - } - - if bytes.HasPrefix(text, []byte("�")) { - return 3 - } - - out.WriteByte('&') - return 0 -} - -func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int { - var quote byte = 'd' - if angledQuotes { - quote = 'a' - } - - return func(out *bytes.Buffer, previousChar byte, text []byte) int { - return r.smartAmpVariant(out, previousChar, text, quote, addNBSP) - } -} - -func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 3 && text[1] == '.' && text[2] == '.' { - out.WriteString("…") - return 2 - } - - if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' { - out.WriteString("…") - return 4 - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 2 && text[1] == '`' { - nextChar := byte(0) - if len(text) >= 3 { - nextChar = text[2] - } - if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { - return 1 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int { - if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { - // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b - // note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8) - // and avoid changing dates like 1/23/2005 into fractions. - numEnd := 0 - for len(text) > numEnd && isdigit(text[numEnd]) { - numEnd++ - } - if numEnd == 0 { - out.WriteByte(text[0]) - return 0 - } - denStart := numEnd + 1 - if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 { - denStart = numEnd + 3 - } else if len(text) < numEnd+2 || text[numEnd] != '/' { - out.WriteByte(text[0]) - return 0 - } - denEnd := denStart - for len(text) > denEnd && isdigit(text[denEnd]) { - denEnd++ - } - if denEnd == denStart { - out.WriteByte(text[0]) - return 0 - } - if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' { - out.WriteString("") - out.Write(text[:numEnd]) - out.WriteString("") - out.Write(text[denStart:denEnd]) - out.WriteString("") - return denEnd - 1 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int { - if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { - if text[0] == '1' && text[1] == '/' && text[2] == '2' { - if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' { - out.WriteString("½") - return 2 - } - } - - if text[0] == '1' && text[1] == '/' && text[2] == '4' { - if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') { - out.WriteString("¼") - return 2 - } - } - - if text[0] == '3' && text[1] == '/' && text[2] == '4' { - if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') { - out.WriteString("¾") - return 2 - } - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int { - nextChar := byte(0) - if len(text) > 1 { - nextChar = text[1] - } - if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) { - out.WriteString(""") - } - - return 0 -} - -func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { - return r.smartDoubleQuoteVariant(out, previousChar, text, 'd') -} - -func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { - return r.smartDoubleQuoteVariant(out, previousChar, text, 'a') -} - -func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int { - i := 0 - - for i < len(text) && text[i] != '>' { - i++ - } - - out.Write(text[:i+1]) - return i -} - -type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int - -// NewSmartypantsRenderer constructs a Smartypants renderer object. -func NewSmartypantsRenderer(flags Flags) *SPRenderer { - var ( - r SPRenderer - - smartAmpAngled = r.smartAmp(true, false) - smartAmpAngledNBSP = r.smartAmp(true, true) - smartAmpRegular = r.smartAmp(false, false) - smartAmpRegularNBSP = r.smartAmp(false, true) - - addNBSP = flags&SmartypantsQuotesNBSP != 0 - ) - - if flags&SmartypantsAngledQuotes == 0 { - r.callbacks['"'] = r.smartDoubleQuote - if !addNBSP { - r.callbacks['&'] = smartAmpRegular - } else { - r.callbacks['&'] = smartAmpRegularNBSP - } - } else { - r.callbacks['"'] = r.smartAngledDoubleQuote - if !addNBSP { - r.callbacks['&'] = smartAmpAngled - } else { - r.callbacks['&'] = smartAmpAngledNBSP - } - } - r.callbacks['\''] = r.smartSingleQuote - r.callbacks['('] = r.smartParens - if flags&SmartypantsDashes != 0 { - if flags&SmartypantsLatexDashes == 0 { - r.callbacks['-'] = r.smartDash - } else { - r.callbacks['-'] = r.smartDashLatex - } - } - r.callbacks['.'] = r.smartPeriod - if flags&SmartypantsFractions == 0 { - r.callbacks['1'] = r.smartNumber - r.callbacks['3'] = r.smartNumber - } else { - for ch := '1'; ch <= '9'; ch++ { - r.callbacks[ch] = r.smartNumberGeneric - } - } - r.callbacks['<'] = r.smartLeftAngle - r.callbacks['`'] = r.smartBacktick - return &r -} - -// Process is the entry point of the Smartypants renderer. -func (r *SPRenderer) Process(w io.Writer, text []byte) { - mark := 0 - for i := 0; i < len(text); i++ { - if action := r.callbacks[text[i]]; action != nil { - if i > mark { - w.Write(text[mark:i]) - } - previousChar := byte(0) - if i > 0 { - previousChar = text[i-1] - } - var tmp bytes.Buffer - i += action(&tmp, previousChar, text[i:]) - w.Write(tmp.Bytes()) - mark = i + 1 - } - } - if mark < len(text) { - w.Write(text[mark:]) - } -} diff --git a/html_renderer_test.go b/html_renderer_test.go deleted file mode 100644 index 888a320..0000000 --- a/html_renderer_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package markdown - -import ( - "io" - "testing" - - "github.com/gomarkdown/markdown/ast" - "github.com/gomarkdown/markdown/html" - "github.com/gomarkdown/markdown/parser" -) - -func renderHookEmpty(w io.Writer, node ast.Node, entering bool) (ast.WalkStatus, bool) { - return ast.GoToNext, true -} - -func TestRenderNodeHookEmpty(t *testing.T) { - tests := []string{ - "[foo](gopher://foo.bar)", - "", - - "[foo](mailto://bar/)\n", - "", - } - - htmlParams := html.RendererOptions{ - RenderNodeHook: renderHookEmpty, - } - params := TestParams{ - RendererOptions: htmlParams, - } - doTestsParam(t, tests, params) -} - -func renderHookCodeBlock(w io.Writer, node ast.Node, entering bool) (ast.WalkStatus, bool) { - _, ok := node.(*ast.CodeBlock) - if !ok { - return ast.GoToNext, false - } - io.WriteString(w, "code_replacement") - return ast.GoToNext, true -} - -func TestRenderNodeHookCode(t *testing.T) { - tests := []string{ - "a\n```go\ncode\n```\nb", - "

    a

    \ncode_replacement\n

    b

    \n", - } - opts := html.RendererOptions{ - RenderNodeHook: renderHookCodeBlock, - } - params := TestParams{ - RendererOptions: opts, - extensions: parser.CommonExtensions, - } - doTestsParam(t, tests, params) -} diff --git a/inline_test.go b/inline_test.go index 9d27e54..c754e6f 100644 --- a/inline_test.go +++ b/inline_test.go @@ -1,12 +1,8 @@ package markdown import ( - "regexp" "testing" - "strings" - - "github.com/gomarkdown/markdown/html" "github.com/gomarkdown/markdown/parser" ) @@ -15,7 +11,12 @@ func TestEmphasis(t *testing.T) { doTestsInlineParam(t, tests, TestParams{}) } -func TestReferenceOverride(t *testing.T) { +func TestStatusTag(t *testing.T) { + tests := readTestFile2(t, "status_tag.test") + doTestsInlineParam(t, tests, TestParams{}) +} + +func testReferenceOverride(t *testing.T) { var tests = []string{ "test [ref1][]\n", "

    test ref1

    \n", @@ -76,58 +77,61 @@ func TestReferenceOverride(t *testing.T) { func TestStrong(t *testing.T) { var tests = []string{ "nothing inline\n", - "

    nothing inline

    \n", + "[{\"literal\":\"nothing inline\"}]", "simple **inline** test\n", - "

    simple inline test

    \n", + "[{\"literal\":\"simple \"},{\"type\":\"strong\",\"literal\":\"inline\"},{\"literal\":\" test\"}]", + + "simple ***triple*** test\n", + "[{\"literal\":\"simple \"},{\"type\":\"strong\",\"literal\":\"triple\"},{\"literal\":\" test\"}]", "**at the** beginning\n", - "

    at the beginning

    \n", + "[{\"literal\":\"\"},{\"type\":\"strong\",\"literal\":\"at the\"},{\"literal\":\" beginning\"}]", "at the **end**\n", - "

    at the end

    \n", + "[{\"literal\":\"at the \"},{\"type\":\"strong\",\"literal\":\"end\"}]", "**try two** in **one line**\n", - "

    try two in one line

    \n", + "[{\"literal\":\"\"},{\"type\":\"strong\",\"literal\":\"try two\"},{\"literal\":\" in \"},{\"type\":\"strong\",\"literal\":\"one line\"}]", "over **two\nlines** test\n", - "

    over two\nlines test

    \n", + "[{\"literal\":\"over \"},{\"type\":\"strong\",\"literal\":\"two\\nlines\"},{\"literal\":\" test\"}]", "odd **number of** markers** here\n", - "

    odd number of markers** here

    \n", + "[{\"literal\":\"odd \"},{\"type\":\"strong\",\"literal\":\"number of\"},{\"literal\":\" markers** here\"}]", "odd **number\nof** markers** here\n", - "

    odd number\nof markers** here

    \n", + "[{\"literal\":\"odd \"},{\"type\":\"strong\",\"literal\":\"number\\nof\"},{\"literal\":\" markers** here\"}]", "simple __inline__ test\n", - "

    simple inline test

    \n", + "[{\"literal\":\"simple \"},{\"type\":\"strong\",\"literal\":\"inline\"},{\"literal\":\" test\"}]", "__at the__ beginning\n", - "

    at the beginning

    \n", + "[{\"literal\":\"\"},{\"type\":\"strong\",\"literal\":\"at the\"},{\"literal\":\" beginning\"}]", "at the __end__\n", - "

    at the end

    \n", + "[{\"literal\":\"at the \"},{\"type\":\"strong\",\"literal\":\"end\"}]", "__try two__ in __one line__\n", - "

    try two in one line

    \n", + "[{\"literal\":\"\"},{\"type\":\"strong\",\"literal\":\"try two\"},{\"literal\":\" in \"},{\"type\":\"strong\",\"literal\":\"one line\"}]", "over __two\nlines__ test\n", - "

    over two\nlines test

    \n", + "[{\"literal\":\"over \"},{\"type\":\"strong\",\"literal\":\"two\\nlines\"},{\"literal\":\" test\"}]", "odd __number of__ markers__ here\n", - "

    odd number of markers__ here

    \n", + "[{\"literal\":\"odd \"},{\"type\":\"strong\",\"literal\":\"number of\"},{\"literal\":\" markers__ here\"}]", "odd __number\nof__ markers__ here\n", - "

    odd number\nof markers__ here

    \n", + "[{\"literal\":\"odd \"},{\"type\":\"strong\",\"literal\":\"number\\nof\"},{\"literal\":\" markers__ here\"}]", "mix of **markers__\n", - "

    mix of **markers__

    \n", + "[{\"literal\":\"mix of **markers__\"}]", "**`/usr`** : this folder is named `usr`\n", - "

    /usr : this folder is named usr

    \n", + "[{\"literal\":\"\"},{\"type\":\"strong\",\"literal\":\"`/usr`\"},{\"literal\":\" : this folder is named \"},{\"type\":\"code\",\"literal\":\"usr\"}]", "**`/usr`** :\n\n this folder is named `usr`\n", - "

    /usr :

    \n\n

    this folder is named usr

    \n", + "[{\"literal\":\"\"},{\"type\":\"strong\",\"literal\":\"`/usr`\"},{\"literal\":\" :\\n\\n this folder is named \"},{\"type\":\"code\",\"literal\":\"usr\"}]", } doTestsInline(t, tests) } @@ -135,12 +139,12 @@ func TestStrong(t *testing.T) { func TestStrongShort(t *testing.T) { var tests = []string{ "**`/usr`** :\n\n this folder is named `usr`\n", - "

    /usr :

    \n\n

    this folder is named usr

    \n", + "[{\"literal\":\"\"},{\"type\":\"strong\",\"literal\":\"`/usr`\"},{\"literal\":\" :\\n\\n this folder is named \"},{\"type\":\"code\",\"literal\":\"usr\"}]", } doTestsInline(t, tests) } -func TestEmphasisMix(t *testing.T) { +func testEmphasisMix(t *testing.T) { var tests = []string{ "***triple emphasis***\n", "

    triple emphasis

    \n", @@ -169,7 +173,7 @@ func TestEmphasisMix(t *testing.T) { doTestsInline(t, tests) } -func TestEmphasisLink(t *testing.T) { +func testEmphasisLink(t *testing.T) { var tests = []string{ "[first](before) *text[second] (inside)text* [third](after)\n", "

    first textsecondtext third

    \n", @@ -186,7 +190,7 @@ func TestEmphasisLink(t *testing.T) { doTestsInline(t, tests) } -func TestStrikeThrough(t *testing.T) { +func testStrikeThrough(t *testing.T) { var tests = []string{ "nothing inline\n", "

    nothing inline

    \n", @@ -215,7 +219,7 @@ func TestStrikeThrough(t *testing.T) { doTestsInline(t, tests) } -func TestCodeSpan(t *testing.T) { +func testCodeSpan(t *testing.T) { var tests = []string{ "`source code`\n", "

    source code

    \n", @@ -253,7 +257,7 @@ func TestCodeSpan(t *testing.T) { doTestsInline(t, tests) } -func TestLineBreak(t *testing.T) { +func testLineBreak(t *testing.T) { var tests = []string{ "this line \nhas a break\n", "

    this line
    \nhas a break

    \n", @@ -292,7 +296,7 @@ func TestLineBreak(t *testing.T) { extensions: parser.BackslashLineBreak}) } -func TestInlineLink(t *testing.T) { +func testInlineLink(t *testing.T) { var tests = []string{ "[foo](/bar/)\n", "

    foo

    \n", @@ -409,54 +413,7 @@ func TestInlineLink(t *testing.T) { } -func TestRelAttrLink(t *testing.T) { - var nofollowTests = []string{ - "[foo](http://bar.com/foo/)\n", - "

    foo

    \n", - - "[foo](/bar/)\n", - "

    foo

    \n", - - "[foo](/)\n", - "

    foo

    \n", - - "[foo](./)\n", - "

    foo

    \n", - - "[foo](../)\n", - "

    foo

    \n", - - "[foo](../bar)\n", - "

    foo

    \n", - } - doTestsInlineParam(t, nofollowTests, TestParams{ - Flags: html.Safelink | html.NofollowLinks, - }) - - var noreferrerTests = []string{ - "[foo](http://bar.com/foo/)\n", - "

    foo

    \n", - - "[foo](/bar/)\n", - "

    foo

    \n", - } - doTestsInlineParam(t, noreferrerTests, TestParams{ - Flags: html.Safelink | html.NoreferrerLinks, - }) - - var nofollownoreferrerTests = []string{ - "[foo](http://bar.com/foo/)\n", - "

    foo

    \n", - - "[foo](/bar/)\n", - "

    foo

    \n", - } - doTestsInlineParam(t, nofollownoreferrerTests, TestParams{ - Flags: html.Safelink | html.NofollowLinks | html.NoreferrerLinks, - }) -} - -func TestHrefTargetBlank(t *testing.T) { +func testHrefTargetBlank(t *testing.T) { var tests = []string{ // internal link "[foo](/bar/)\n", @@ -480,12 +437,10 @@ func TestHrefTargetBlank(t *testing.T) { "[foo](http://example.com)\n", "

    foo

    \n", } - doTestsInlineParam(t, tests, TestParams{ - Flags: html.Safelink | html.HrefTargetBlank, - }) + doTestsInlineParam(t, tests, TestParams{}) } -func TestSafeInlineLink(t *testing.T) { +func testSafeInlineLink(t *testing.T) { var tests = []string{ "[foo](/bar/)\n", "

    foo

    \n", @@ -518,7 +473,7 @@ func TestSafeInlineLink(t *testing.T) { doSafeTestsInline(t, tests) } -func TestReferenceLink(t *testing.T) { +func testReferenceLink(t *testing.T) { var tests = []string{ "[link][ref]\n", "

    [link][ref]

    \n", @@ -556,7 +511,7 @@ func TestReferenceLink(t *testing.T) { doLinkTestsInline(t, tests) } -func TestTags(t *testing.T) { +func testTags(t *testing.T) { var tests = []string{ "a tag\n", "

    a tag

    \n", @@ -573,7 +528,7 @@ func TestTags(t *testing.T) { doTestsInline(t, tests) } -func TestAutoLink(t *testing.T) { +func testAutoLink(t *testing.T) { var tests = []string{ "http://foo.com/\n", "

    http://foo.com/

    \n", @@ -899,128 +854,7 @@ what happens here `, } -func TestFootnotes(t *testing.T) { - doTestsInlineParam(t, footnoteTests, TestParams{ - extensions: parser.Footnotes, - }) -} - -func TestFootnotesWithParameters(t *testing.T) { - tests := make([]string, len(footnoteTests)) - - prefix := "testPrefix" - returnText := "ret" - re := regexp.MustCompile(`(?ms)
  • (.*?)
  • `) - - // Transform the test expectations to match the parameters we're using. - for i, test := range footnoteTests { - if i%2 == 1 { - test = strings.Replace(test, "fn:", "fn:"+prefix, -1) - test = strings.Replace(test, "fnref:", "fnref:"+prefix, -1) - test = re.ReplaceAllString(test, `
  • $2 ret
  • `) - } - tests[i] = test - } - - params := html.RendererOptions{ - FootnoteAnchorPrefix: prefix, - FootnoteReturnLinkContents: returnText, - } - - doTestsInlineParam(t, tests, TestParams{ - extensions: parser.Footnotes, - Flags: html.FootnoteReturnLinks, - RendererOptions: params, - }) -} - -func TestNestedFootnotes(t *testing.T) { - var tests = []string{ - `Paragraph.[^fn1] - -[^fn1]: - Asterisk[^fn2] - -[^fn2]: - Obelisk`, - `

    Paragraph.1

    - -
    - -
    - -
      -
    1. Asterisk2
    2. - -
    3. Obelisk
    4. -
    - -
    -`, - `This uses footnote A.[^A] - -This uses footnote C.[^C] - -[^A]: - A note. use itself.[^A] -[^B]: - B note, uses A to test duplicate.[^A] -[^C]: - C note, uses B.[^B] -`, - - `

    This uses footnote A.1

    - -

    This uses footnote C.2

    - -
    - -
    - -
      -
    1. A note. use itself.1
    2. - -
    3. C note, uses B.3
    4. - -
    5. B note, uses A to test duplicate.1
    6. -
    - -
    -`, - } - doTestsInlineParam(t, tests, TestParams{extensions: parser.Footnotes}) -} - -func TestInlineComments(t *testing.T) { - var tests = []string{ - "Hello \n", - "

    Hello

    \n", - - "Hello ", - "

    Hello

    \n", - - "Hello \n", - "

    Hello

    \n", - - "Hello \na", - "

    Hello \na

    \n", - - "* list \n", - "
      \n
    • list
    • \n
    \n", - - " comment\n", - "

    comment

    \n", - - "blahblah\n\nrhubarb\n", - "

    blahblah\n\nrhubarb

    \n", - } - doTestsInlineParam(t, tests, TestParams{Flags: html.Smartypants | html.SmartypantsDashes}) -} - -func TestSmartDoubleQuotes(t *testing.T) { +func testSmartDoubleQuotes(t *testing.T) { var tests = []string{ "this should be normal \"quoted\" text.\n", "

    this should be normal “quoted” text.

    \n", @@ -1029,10 +863,10 @@ func TestSmartDoubleQuotes(t *testing.T) { "two pair of \"some\" quoted \"text\".\n", "

    two pair of “some” quoted “text”.

    \n"} - doTestsInlineParam(t, tests, TestParams{Flags: html.Smartypants}) + doTestsInlineParam(t, tests, TestParams{}) } -func TestSmartDoubleQuotesNBSP(t *testing.T) { +func testSmartDoubleQuotesNBSP(t *testing.T) { var tests = []string{ "this should be normal \"quoted\" text.\n", "

    this should be normal “ quoted ” text.

    \n", @@ -1041,10 +875,10 @@ func TestSmartDoubleQuotesNBSP(t *testing.T) { "two pair of \"some\" quoted \"text\".\n", "

    two pair of “ some ” quoted “ text ”.

    \n"} - doTestsInlineParam(t, tests, TestParams{Flags: html.Smartypants | html.SmartypantsQuotesNBSP}) + doTestsInlineParam(t, tests, TestParams{}) } -func TestSmartAngledDoubleQuotes(t *testing.T) { +func testSmartAngledDoubleQuotes(t *testing.T) { var tests = []string{ "this should be angled \"quoted\" text.\n", "

    this should be angled «quoted» text.

    \n", @@ -1053,40 +887,10 @@ func TestSmartAngledDoubleQuotes(t *testing.T) { "two pair of \"some\" quoted \"text\".\n", "

    two pair of «some» quoted «text».

    \n"} - doTestsInlineParam(t, tests, TestParams{Flags: html.Smartypants | html.SmartypantsAngledQuotes}) + doTestsInlineParam(t, tests, TestParams{}) } -func TestSmartAngledDoubleQuotesNBSP(t *testing.T) { - var tests = []string{ - "this should be angled \"quoted\" text.\n", - "

    this should be angled « quoted » text.

    \n", - "this \" single double\n", - "

    this «  single double

    \n", - "two pair of \"some\" quoted \"text\".\n", - "

    two pair of « some » quoted « text ».

    \n"} - - doTestsInlineParam(t, tests, TestParams{Flags: html.Smartypants | html.SmartypantsAngledQuotes | html.SmartypantsQuotesNBSP}) -} - -func TestSmartFractions(t *testing.T) { - var tests = []string{ - "1/2, 1/4 and 3/4; 1/4th and 3/4ths\n", - "

    ½, ¼ and ¾; ¼th and ¾ths

    \n", - "1/2/2015, 1/4/2015, 3/4/2015; 2015/1/2, 2015/1/4, 2015/3/4.\n", - "

    1/2/2015, 1/4/2015, 3/4/2015; 2015/1/2, 2015/1/4, 2015/3/4.

    \n"} - - doTestsInlineParam(t, tests, TestParams{Flags: html.Smartypants}) - - tests = []string{ - "1/2, 2/3, 81/100 and 1000000/1048576.\n", - "

    12, 23, 81100 and 10000001048576.

    \n", - "1/2/2015, 1/4/2015, 3/4/2015; 2015/1/2, 2015/1/4, 2015/3/4.\n", - "

    1/2/2015, 1/4/2015, 3/4/2015; 2015/1/2, 2015/1/4, 2015/3/4.

    \n"} - - doTestsInlineParam(t, tests, TestParams{Flags: html.Smartypants | html.SmartypantsFractions}) -} - -func TestDisableSmartDashes(t *testing.T) { +func testDisableSmartDashes(t *testing.T) { doTestsInlineParam(t, []string{ "foo - bar\n", "

    foo - bar

    \n", @@ -1102,7 +906,7 @@ func TestDisableSmartDashes(t *testing.T) { "

    foo — bar

    \n", "foo --- bar\n", "

    foo —– bar

    \n", - }, TestParams{Flags: html.Smartypants | html.SmartypantsDashes}) + }, TestParams{}) doTestsInlineParam(t, []string{ "foo - bar\n", "

    foo - bar

    \n", @@ -1110,7 +914,7 @@ func TestDisableSmartDashes(t *testing.T) { "

    foo – bar

    \n", "foo --- bar\n", "

    foo — bar

    \n", - }, TestParams{Flags: html.Smartypants | html.SmartypantsLatexDashes | html.SmartypantsDashes}) + }, TestParams{}) doTestsInlineParam(t, []string{ "foo - bar\n", "

    foo - bar

    \n", @@ -1118,31 +922,27 @@ func TestDisableSmartDashes(t *testing.T) { "

    foo -- bar

    \n", "foo --- bar\n", "

    foo --- bar

    \n", - }, TestParams{Flags: html.Smartypants | html.SmartypantsLatexDashes}) + }, TestParams{}) } -func TestSkipLinks(t *testing.T) { +func testSkipLinks(t *testing.T) { doTestsInlineParam(t, []string{ "[foo](gopher://foo.bar)", "

    foo

    \n", "[foo](mailto://bar/)\n", "

    foo

    \n", - }, TestParams{ - Flags: html.SkipLinks, - }) + }, TestParams{}) } -func TestSkipImages(t *testing.T) { +func testSkipImages(t *testing.T) { doTestsInlineParam(t, []string{ "![foo](/bar/)\n", "

    \n", - }, TestParams{ - Flags: html.SkipImages, - }) + }, TestParams{}) } -func TestUseXHTML(t *testing.T) { +func testUseXHTML(t *testing.T) { doTestsParam(t, []string{ "---", "
    \n", @@ -1150,33 +950,15 @@ func TestUseXHTML(t *testing.T) { doTestsParam(t, []string{ "---", "
    \n", - }, TestParams{Flags: html.UseXHTML}) + }, TestParams{}) } -func TestSkipHTML(t *testing.T) { +func testSkipHTML(t *testing.T) { doTestsParam(t, []string{ "
    \n\ntext\n\n
    the form
    ", "

    text

    \n\n

    the form

    \n", "text inline html more text", "

    text inline html more text

    \n", - }, TestParams{Flags: html.SkipHTML}) -} - -func TestInlineMath(t *testing.T) { - doTestsParam(t, []string{ - "$a_b$", - `

    \(a_b\)

    -`, - }, TestParams{Flags: html.SkipHTML, extensions: parser.CommonExtensions}) -} - -func BenchmarkSmartDoubleQuotes(b *testing.B) { - params := TestParams{Flags: html.Smartypants} - params.extensions |= parser.Autolink | parser.Strikethrough - params.Flags |= html.UseXHTML - - for i := 0; i < b.N; i++ { - runMarkdown("this should be normal \"quoted\" text.\n", params) - } + }, TestParams{}) } diff --git a/main/main.go b/main/main.go new file mode 100644 index 0000000..d009570 --- /dev/null +++ b/main/main.go @@ -0,0 +1,21 @@ +package main + +import ( + "encoding/json" + "fmt" + "github.com/gomarkdown/markdown" +) + +func main() { + md := []byte("## markdown document") + output := markdown.Parse(md, nil) + fmt.Printf("## markdown document\n") + fmt.Printf("%+v\n", output) + fmt.Printf("%+v\n", output.GetChildren()[0]) + fmt.Printf("%+v\n", output.GetChildren()[0].GetChildren()[0]) + j, err := json.Marshal(output) + if err != nil { + fmt.Println(err) + } + fmt.Println(j) +} diff --git a/markdown.go b/markdown.go index fd5c1cf..29c23a4 100644 --- a/markdown.go +++ b/markdown.go @@ -5,7 +5,6 @@ import ( "io" "github.com/gomarkdown/markdown/ast" - "github.com/gomarkdown/markdown/html" "github.com/gomarkdown/markdown/parser" ) @@ -65,21 +64,3 @@ func Render(doc ast.Node, renderer Renderer) []byte { renderer.RenderFooter(&buf, doc) return buf.Bytes() } - -// ToHTML converts markdownDoc to HTML. -// -// You can optionally pass a parser and renderer. This allows to customize -// a parser, use a customized html render or use completely custom renderer. -// -// If you pass nil for both, we use parser configured with parser.CommonExtensions -// and html.Renderer configured with html.CommonFlags. -func ToHTML(markdown []byte, p *parser.Parser, renderer Renderer) []byte { - doc := Parse(markdown, p) - if renderer == nil { - opts := html.RendererOptions{ - Flags: html.CommonFlags, - } - renderer = html.NewRenderer(opts) - } - return Render(doc, renderer) -} diff --git a/markdown_test.go b/markdown_test.go index 59dc4c0..945c2a4 100644 --- a/markdown_test.go +++ b/markdown_test.go @@ -6,20 +6,20 @@ func TestDocument(t *testing.T) { var tests = []string{ // Empty document. "", - "", + "[]", " ", - "", + "[]", // This shouldn't panic. // https://github.com/russross/blackfriday/issues/172 "[]:<", - "

    []:<

    \n", + "[{\"literal\":\"[]:\\u003c\"}]", // This shouldn't panic. // https://github.com/russross/blackfriday/issues/173 " [", - "

    [

    \n", + "[{\"literal\":\"[\"}]", } doTests(t, tests) } diff --git a/mmark_test.go b/mmark_test.go index 0315ce4..c024609 100644 --- a/mmark_test.go +++ b/mmark_test.go @@ -5,8 +5,6 @@ import ( "io/ioutil" "path/filepath" "testing" - - "github.com/gomarkdown/markdown/parser" ) type testData struct { @@ -66,18 +64,3 @@ func readTestFile(t *testing.T, fileName string) []*testData { } return res } - -func TestMmark(t *testing.T) { - testData := readTestFile(t, "mmark.test") - ext := parser.CommonExtensions | parser.Attributes | parser.OrderedListStart | parser.SuperSubscript | parser.Mmark - for _, td := range testData { - p := parser.NewWithExtensions(ext) - - got := ToHTML(td.md, p, nil) - want := td.html - - if bytes.Compare(got, want) != 0 { - t.Errorf("want (%d bytes) %s, got (%d bytes) %s, for input %q", len(want), want, len(got), got, td.md) - } - } -} diff --git a/parser/block.go b/parser/block.go index 18a2dd8..86c43c3 100644 --- a/parser/block.go +++ b/parser/block.go @@ -4,7 +4,6 @@ import ( "bytes" "html" "regexp" - "strconv" "unicode" "github.com/gomarkdown/markdown/ast" @@ -92,263 +91,23 @@ func sanitizeAnchorName(text string) string { // the input buffer ends with a newline. func (p *Parser) block(data []byte) { // this is called recursively: enforce a maximum depth - if p.nesting >= p.maxNesting { - return - } - p.nesting++ - - // parse out one block-level construct at a time for len(data) > 0 { - // attributes that can be specific before a block element: - // - // {#id .class1 .class2 key="value"} - if p.extensions&Attributes != 0 { - data = p.attribute(data) - } - - if p.extensions&Includes != 0 { - f := p.readInclude - path, address, consumed := p.isInclude(data) - if consumed == 0 { - path, address, consumed = p.isCodeInclude(data) - f = p.readCodeInclude - } - if consumed > 0 { - included := f(p.includeStack.Last(), path, address) - p.includeStack.Push(path) - p.block(included) - p.includeStack.Pop() - data = data[consumed:] - continue - } - } - - // user supplied parser function - if p.Opts.ParserHook != nil { - node, blockdata, consumed := p.Opts.ParserHook(data) - if consumed > 0 { - data = data[consumed:] - - if node != nil { - p.addBlock(node) - if blockdata != nil { - p.block(blockdata) - p.finalize(node) - } - } - continue - } - } - - // prefixed heading: - // - // # Heading 1 - // ## Heading 2 - // ... - // ###### Heading 6 - if p.isPrefixHeading(data) { - data = data[p.prefixHeading(data):] - continue - } - - // prefixed special heading: - // (there are no levels.) - // - // .# Abstract - if p.isPrefixSpecialHeading(data) { - data = data[p.prefixSpecialHeading(data):] - continue - } - - // block of preformatted HTML: - // - //
    - // ... - //
    - if data[0] == '<' { - if i := p.html(data, true); i > 0 { - data = data[i:] - continue - } - } - - // title block - // - // % stuff - // % more stuff - // % even more stuff - if p.extensions&Titleblock != 0 { - if data[0] == '%' { - if i := p.titleBlock(data, true); i > 0 { - data = data[i:] - continue - } - } - } - - // blank lines. note: returns the # of bytes to skip - if i := p.isEmpty(data); i > 0 { + if i := p.fencedCodeBlock(data, true); i > 0 { data = data[i:] continue } - // indented code block: - // - // func max(a, b int) int { - // if a > b { - // return a - // } - // return b - // } - if p.codePrefix(data) > 0 { - data = data[p.code(data):] - continue - } - - // fenced code block: - // - // ``` go - // func fact(n int) int { - // if n <= 1 { - // return n - // } - // return n * fact(n-1) - // } - // ``` - if p.extensions&FencedCode != 0 { - if i := p.fencedCodeBlock(data, true); i > 0 { - data = data[i:] - continue - } - } - - // horizontal rule: - // - // ------ - // or - // ****** - // or - // ______ - if p.isHRule(data) { - p.addBlock(&ast.HorizontalRule{}) - i := skipUntilChar(data, 0, '\n') - data = data[i:] - continue - } - - // block quote: - // - // > A big quote I found somewhere - // > on the web if p.quotePrefix(data) > 0 { data = data[p.quote(data):] continue } - // aside: - // - // A> The proof is too large to fit - // A> in the margin. - if p.extensions&Mmark != 0 { - if p.asidePrefix(data) > 0 { - data = data[p.aside(data):] - continue - } - } - - // figure block: - // - // !--- - // ![Alt Text](img.jpg "This is an image") - // ![Alt Text](img2.jpg "This is a second image") - // !--- - if p.extensions&Mmark != 0 { - if i := p.figureBlock(data, true); i > 0 { - data = data[i:] - continue - } - } - - // table: - // - // Name | Age | Phone - // ------|-----|--------- - // Bob | 31 | 555-1234 - // Alice | 27 | 555-4321 - if p.extensions&Tables != 0 { - if i := p.table(data); i > 0 { - data = data[i:] - continue - } - } - - // an itemized/unordered list: - // - // * Item 1 - // * Item 2 - // - // also works with + or - - if p.uliPrefix(data) > 0 { - data = data[p.list(data, 0, 0):] - continue - } - - // a numbered/ordered list: - // - // 1. Item 1 - // 2. Item 2 - if i := p.oliPrefix(data); i > 0 { - start := 0 - if i > 2 && p.extensions&OrderedListStart != 0 { - s := string(data[:i-2]) - start, _ = strconv.Atoi(s) - if start == 1 { - start = 0 - } - } - data = data[p.list(data, ast.ListTypeOrdered, start):] - continue - } - - // definition lists: - // - // Term 1 - // : Definition a - // : Definition b - // - // Term 2 - // : Definition c - if p.extensions&DefinitionLists != 0 { - if p.dliPrefix(data) > 0 { - data = data[p.list(data, ast.ListTypeDefinition, 0):] - continue - } - } - - if p.extensions&MathJax != 0 { - if i := p.blockMath(data); i > 0 { - data = data[i:] - continue - } - } - - // document matters: - // - // {frontmatter}/{mainmatter}/{backmatter} - if p.extensions&Mmark != 0 { - if i := p.documentMatter(data); i > 0 { - data = data[i:] - continue - } - } - - // anything else must look like a normal paragraph - // note: this finds underlined headings, too idx := p.paragraph(data) data = data[idx:] } + //p.renderParagraph(data) + return - p.nesting-- } func (p *Parser) addBlock(n ast.Node) ast.Node { @@ -367,17 +126,7 @@ func (p *Parser) addBlock(n ast.Node) ast.Node { } func (p *Parser) isPrefixHeading(data []byte) bool { - if data[0] != '#' { - return false - } - - if p.extensions&SpaceHeadings != 0 { - level := skipCharN(data, 0, '#', 6) - if level == len(data) || data[level] != ' ' { - return false - } - } - return true + return len(data) > 1 && data[0] == '#' && isSpace(data[1]) } func (p *Parser) prefixHeading(data []byte) int { @@ -385,24 +134,6 @@ func (p *Parser) prefixHeading(data []byte) int { i := skipChar(data, level, ' ') end := skipUntilChar(data, i, '\n') skip := end - id := "" - if p.extensions&HeadingIDs != 0 { - j, k := 0, 0 - // find start/end of heading id - for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { - } - for k = j + 1; k < end && data[k] != '}'; k++ { - } - // extract heading id iff found - if j < end && k < end { - id = string(data[j+2 : k]) - end = j - skip = k + 1 - for end > 0 && data[end-1] == ' ' { - end-- - } - } - } for end > 0 && data[end-1] == '#' { if isBackslashEscaped(data, end-1) { break @@ -413,23 +144,16 @@ func (p *Parser) prefixHeading(data []byte) int { end-- } if end > i { - if id == "" && p.extensions&AutoHeadingIDs != 0 { - id = sanitizeAnchorName(string(data[i:end])) - } block := &ast.Heading{ - HeadingID: id, - Level: level, + Level: level, } - block.Content = data[i:end] + block.Literal = data[i:end] p.addBlock(block) } return skip } func (p *Parser) isPrefixSpecialHeading(data []byte) bool { - if p.extensions|Mmark == 0 { - return false - } if len(data) < 4 { return false } @@ -549,203 +273,6 @@ func (p *Parser) titleBlock(data []byte, doRender bool) int { return consumed } -func (p *Parser) html(data []byte, doRender bool) int { - var i, j int - - // identify the opening tag - if data[0] != '<' { - return 0 - } - curtag, tagfound := p.htmlFindTag(data[1:]) - - // handle special cases - if !tagfound { - // check for an HTML comment - if size := p.htmlComment(data, doRender); size > 0 { - return size - } - - // check for an
    tag - if size := p.htmlHr(data, doRender); size > 0 { - return size - } - - // no special case recognized - return 0 - } - - // look for an unindented matching closing tag - // followed by a blank line - found := false - /* - closetag := []byte("\n") - j = len(curtag) + 1 - for !found { - // scan for a closing tag at the beginning of a line - if skip := bytes.Index(data[j:], closetag); skip >= 0 { - j += skip + len(closetag) - } else { - break - } - - // see if it is the only thing on the line - if skip := p.isEmpty(data[j:]); skip > 0 { - // see if it is followed by a blank line/eof - j += skip - if j >= len(data) { - found = true - i = j - } else { - if skip := p.isEmpty(data[j:]); skip > 0 { - j += skip - found = true - i = j - } - } - } - } - */ - - // if not found, try a second pass looking for indented match - // but not if tag is "ins" or "del" (following original Markdown.pl) - if !found && curtag != "ins" && curtag != "del" { - i = 1 - for i < len(data) { - i++ - for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { - i++ - } - - if i+2+len(curtag) >= len(data) { - break - } - - j = p.htmlFindEnd(curtag, data[i-1:]) - - if j > 0 { - i += j - 1 - found = true - break - } - } - } - - if !found { - return 0 - } - - // the end of the block has been found - if doRender { - // trim newlines - end := backChar(data, i, '\n') - htmlBLock := &ast.HTMLBlock{ast.Leaf{Content: data[:end]}} - p.addBlock(htmlBLock) - finalizeHTMLBlock(htmlBLock) - } - - return i -} - -func finalizeHTMLBlock(block *ast.HTMLBlock) { - block.Literal = block.Content - block.Content = nil -} - -// HTML comment, lax form -func (p *Parser) htmlComment(data []byte, doRender bool) int { - i := p.inlineHTMLComment(data) - // needs to end with a blank line - if j := p.isEmpty(data[i:]); j > 0 { - size := i + j - if doRender { - // trim trailing newlines - end := backChar(data, size, '\n') - htmlBLock := &ast.HTMLBlock{ast.Leaf{Content: data[:end]}} - p.addBlock(htmlBLock) - finalizeHTMLBlock(htmlBLock) - } - return size - } - return 0 -} - -// HR, which is the only self-closing block tag considered -func (p *Parser) htmlHr(data []byte, doRender bool) int { - if len(data) < 4 { - return 0 - } - if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { - return 0 - } - if data[3] != ' ' && data[3] != '/' && data[3] != '>' { - // not an
    tag after all; at least not a valid one - return 0 - } - i := 3 - for i < len(data) && data[i] != '>' && data[i] != '\n' { - i++ - } - if i < len(data) && data[i] == '>' { - i++ - if j := p.isEmpty(data[i:]); j > 0 { - size := i + j - if doRender { - // trim newlines - end := backChar(data, size, '\n') - htmlBlock := &ast.HTMLBlock{ast.Leaf{Content: data[:end]}} - p.addBlock(htmlBlock) - finalizeHTMLBlock(htmlBlock) - } - return size - } - } - return 0 -} - -func (p *Parser) htmlFindTag(data []byte) (string, bool) { - i := skipAlnum(data, 0) - key := string(data[:i]) - if _, ok := blockTags[key]; ok { - return key, true - } - return "", false -} - -func (p *Parser) htmlFindEnd(tag string, data []byte) int { - // assume data[0] == '<' && data[1] == '/' already tested - if tag == "hr" { - return 2 - } - // check if tag is a match - closetag := []byte("") - if !bytes.HasPrefix(data, closetag) { - return 0 - } - i := len(closetag) - - // check that the rest of the line is blank - skip := 0 - if skip = p.isEmpty(data[i:]); skip == 0 { - return 0 - } - i += skip - skip = 0 - - if i >= len(data) { - return i - } - - if p.extensions&LaxHTMLBlocks != 0 { - return i - } - if skip = p.isEmpty(data[i:]); skip == 0 { - // following line must be blank - return 0 - } - - return i + skip -} - func (*Parser) isEmpty(data []byte) int { // it is okay to call isEmpty on an empty buffer if len(data) == 0 { @@ -935,12 +462,6 @@ func (p *Parser) fencedCodeBlock(data []byte, doRender bool) int { } codeBlock.Content = work.Bytes() // TODO: get rid of temp buffer - if p.extensions&Mmark == 0 { - p.addBlock(codeBlock) - finalizeCodeBlock(codeBlock) - return beg - } - // Check for caption and if found make it a figure. if captionContent, id, consumed := p.caption(data[beg:], []byte("Figure: ")); consumed > 0 { figure := &ast.CaptionFigure{} @@ -1283,13 +804,6 @@ func (p *Parser) quote(data []byte) int { // fenced code and if one's found, incorporate it altogether, // irregardless of any contents inside it for end < len(data) && data[end] != '\n' { - if p.extensions&FencedCode != 0 { - if i := p.fencedCodeBlock(data[end:], false); i > 0 { - // -1 to compensate for the extra end++ after the loop: - end += i - 1 - break - } - } end++ } end = skipCharN(data, end, '\n', 1) @@ -1304,36 +818,9 @@ func (p *Parser) quote(data []byte) int { beg = end } - if p.extensions&Mmark == 0 { - block := p.addBlock(&ast.BlockQuote{}) - p.block(raw.Bytes()) - p.finalize(block) - return end - } - - if captionContent, id, consumed := p.caption(data[end:], []byte("Quote: ")); consumed > 0 { - figure := &ast.CaptionFigure{} - caption := &ast.Caption{} - figure.HeadingID = id - p.Inline(caption, captionContent) - - p.addBlock(figure) // this discard any attributes - block := &ast.BlockQuote{} - block.AsContainer().Attribute = figure.AsContainer().Attribute - p.addChild(block) - p.block(raw.Bytes()) - p.finalize(block) - - p.addChild(caption) - p.finalize(figure) - - end += consumed - - return end - } - - block := p.addBlock(&ast.BlockQuote{}) - p.block(raw.Bytes()) + quote := &ast.BlockQuote{} + quote.Literal = raw.Bytes() + block := p.addBlock(quote) p.finalize(block) return end @@ -1812,14 +1299,6 @@ func (p *Parser) paragraph(data []byte) int { // did we find a blank line marking the end of the paragraph? if n := p.isEmpty(current); n > 0 { - // did this blank line followed by a definition list item? - if p.extensions&DefinitionLists != 0 { - if i < len(data)-1 && data[i+1] == ':' { - listLen := p.list(data[prev:], ast.ListTypeDefinition, 0) - return prev + listLen - } - } - p.renderParagraph(data[:i]) return i + n } @@ -1856,21 +1335,6 @@ func (p *Parser) paragraph(data []byte) int { } } - // if the next line starts a block of HTML, then the paragraph ends here - if p.extensions&LaxHTMLBlocks != 0 { - if data[i] == '<' && p.html(current, false) > 0 { - // rewind to before the HTML block - p.renderParagraph(data[:i]) - return i - } - } - - // if there's a prefixed heading or a horizontal rule after this, paragraph is over - if p.isPrefixHeading(current) || p.isPrefixSpecialHeading(current) || p.isHRule(current) { - p.renderParagraph(data[:i]) - return i - } - // if there's a fenced code block, paragraph is over if p.extensions&FencedCode != 0 { if p.fencedCodeBlock(current, false) > 0 { @@ -1879,33 +1343,6 @@ func (p *Parser) paragraph(data []byte) int { } } - // if there's a figure block, paragraph is over - if p.extensions&Mmark != 0 { - if p.figureBlock(current, false) > 0 { - p.renderParagraph(data[:i]) - return i - } - } - - // if there's a definition list item, prev line is a definition term - if p.extensions&DefinitionLists != 0 { - if p.dliPrefix(current) != 0 { - ret := p.list(data[prev:], ast.ListTypeDefinition, 0) - return ret + prev - } - } - - // if there's a list after this, paragraph is over - if p.extensions&NoEmptyLineBeforeBlock != 0 { - if p.uliPrefix(current) != 0 || - p.oliPrefix(current) != 0 || - p.quotePrefix(current) != 0 || - p.codePrefix(current) != 0 { - p.renderParagraph(data[:i]) - return i - } - } - // otherwise, scan to the beginning of the next line nl := bytes.IndexByte(data[i:], '\n') if nl >= 0 { diff --git a/parser/inline.go b/parser/inline.go index 81766b8..1c41661 100644 --- a/parser/inline.go +++ b/parser/inline.go @@ -22,7 +22,11 @@ var ( // Each function returns the number of consumed chars. func (p *Parser) Inline(currBlock ast.Node, data []byte) { // handlers might call us recursively: enforce a maximum depth - if p.nesting >= p.maxNesting || len(data) == 0 { + if len(data) == 0 { + return + } + if p.nesting >= p.maxNesting { + ast.AppendChild(currBlock, newTextNode(data)) return } p.nesting++ @@ -59,6 +63,40 @@ func (p *Parser) Inline(currBlock ast.Node, data []byte) { p.nesting-- } +func statusTag(p *Parser, data []byte, offset int) (int, ast.Node) { + data = data[offset:] + n := len(data) + + if n == 1 { + return 0, nil + } + + // Space cannot follow tag + if isSpace(data[1]) { + return 0, nil + } + + i := 1 + for i < n { + if isSpace(data[i]) { + break + } + if !isValidStatusTagChar(data[i]) { + return 0, nil + } + i++ + } + + if i == 1 { + return 0, nil + } + + statusTag := &ast.StatusTag{} + statusTag.Literal = data[1:i] + + return i, statusTag +} + // single and double emphasis parsing func emphasis(p *Parser, data []byte, offset int) (int, ast.Node) { data = data[offset:] @@ -604,25 +642,6 @@ func link(p *Parser, data []byte, offset int) (int, ast.Node) { } } -func (p *Parser) inlineHTMLComment(data []byte) int { - if len(data) < 5 { - return 0 - } - if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' { - return 0 - } - i := 5 - // scan for an end-of-comment marker, across lines if necessary - for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') { - i++ - } - // no end-of-comment marker - if i >= len(data) { - return 0 - } - return i + 1 -} - func stripMailto(link []byte) []byte { if bytes.HasPrefix(link, []byte("mailto://")) { return link[9:] @@ -643,48 +662,6 @@ const ( emailAutolink ) -// '<' when tags or autolinks are allowed -func leftAngle(p *Parser, data []byte, offset int) (int, ast.Node) { - data = data[offset:] - - if p.extensions&Mmark != 0 { - id, consumed := IsCallout(data) - if consumed > 0 { - node := &ast.Callout{} - node.ID = id - return consumed, node - } - } - - altype, end := tagLength(data) - if size := p.inlineHTMLComment(data); size > 0 { - end = size - } - if end <= 2 { - return end, nil - } - if altype == notAutolink { - htmlTag := &ast.HTMLSpan{} - htmlTag.Literal = data[:end] - return end, htmlTag - } - - var uLink bytes.Buffer - unescapeText(&uLink, data[1:end+1-2]) - if uLink.Len() <= 0 { - return end, nil - } - link := uLink.Bytes() - node := &ast.Link{ - Destination: link, - } - if altype == emailAutolink { - node.Destination = append([]byte("mailto:"), link...) - } - ast.AppendChild(node, newTextNode(stripMailto(link))) - return end, node -} - // '\\' backslash escape var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~") @@ -731,30 +708,6 @@ func unescapeText(ob *bytes.Buffer, src []byte) { } } -// '&' escaped when it doesn't belong to an entity -// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+; -func entity(p *Parser, data []byte, offset int) (int, ast.Node) { - data = data[offset:] - - end := skipCharN(data, 1, '#', 1) - end = skipAlnum(data, end) - - if end < len(data) && data[end] == ';' { - end++ // real entity - } else { - return 0, nil // lone '&' - } - - ent := data[:end] - // undo & escaping or it will be converted to &amp; by another - // escaper in the renderer - if bytes.Equal(ent, []byte("&")) { - ent = []byte{'&'} - } - - return end, newTextNode(ent) -} - func linkEndsWithEntity(data []byte, linkEnd int) bool { entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1) return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd @@ -1175,7 +1128,7 @@ func helperEmphasis(p *Parser, data []byte, c byte) (int, ast.Node) { } emph := &ast.Emph{} - p.Inline(emph, data[:i]) + emph.Literal = data[:i] return i + 1, emph } } @@ -1194,11 +1147,14 @@ func helperDoubleEmphasis(p *Parser, data []byte, c byte) (int, ast.Node) { i += length if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isSpace(data[i-1]) { - var node ast.Node = &ast.Strong{} + strong := &ast.Strong{} + strong.Literal = data[:i] + var node ast.Node = strong if c == '~' { - node = &ast.Del{} + del := &ast.Del{} + del.Literal = data[:i] + node = del } - p.Inline(node, data[:i]) return i + 2, node } i++ @@ -1227,9 +1183,7 @@ func helperTripleEmphasis(p *Parser, data []byte, offset int, c byte) (int, ast. case i+2 < len(data) && data[i+1] == c && data[i+2] == c: // triple symbol found strong := &ast.Strong{} - em := &ast.Emph{} - ast.AppendChild(strong, em) - p.Inline(em, data[:i]) + strong.Literal = data[:i] return i + 3, strong case i+1 < len(data) && data[i+1] == c: // double symbol found, hand over to emph1 diff --git a/parser/parser.go b/parser/parser.go index c7302df..b87d5b6 100644 --- a/parser/parser.go +++ b/parser/parser.go @@ -129,7 +129,7 @@ func NewWithExtensions(extension Extensions) *Parser { p := Parser{ refs: make(map[string]*reference), refsRecord: make(map[string]struct{}), - maxNesting: 16, + maxNesting: 1, insideLink: false, Doc: &ast.Document{}, extensions: extension, @@ -142,21 +142,14 @@ func NewWithExtensions(extension Extensions) *Parser { p.inlineCallback[' '] = maybeLineBreak p.inlineCallback['*'] = emphasis + p.inlineCallback['#'] = statusTag p.inlineCallback['_'] = emphasis if p.extensions&Strikethrough != 0 { p.inlineCallback['~'] = emphasis } p.inlineCallback['`'] = codeSpan p.inlineCallback['\n'] = lineBreak - p.inlineCallback['['] = link - p.inlineCallback['<'] = leftAngle p.inlineCallback['\\'] = escape - p.inlineCallback['&'] = entity - p.inlineCallback['!'] = maybeImage - if p.extensions&Mmark != 0 { - p.inlineCallback['('] = maybeShortRefOrIndex - } - p.inlineCallback['^'] = maybeInlineFootnoteOrSuper if p.extensions&Autolink != 0 { p.inlineCallback['h'] = maybeAutoLink p.inlineCallback['m'] = maybeAutoLink @@ -165,9 +158,6 @@ func NewWithExtensions(extension Extensions) *Parser { p.inlineCallback['M'] = maybeAutoLink p.inlineCallback['F'] = maybeAutoLink } - if p.extensions&MathJax != 0 { - p.inlineCallback['$'] = math - } return &p } @@ -272,16 +262,13 @@ func (p *Parser) Parse(input []byte) ast.Node { // Walk the tree again and process inline markdown in each block ast.WalkFunc(p.Doc, func(node ast.Node, entering bool) ast.WalkStatus { switch node.(type) { - case *ast.Paragraph, *ast.Heading, *ast.TableCell: + case *ast.Paragraph: p.Inline(node, node.AsContainer().Content) node.AsContainer().Content = nil } return ast.GoToNext }) - if p.Opts.Flags&SkipFootnoteList == 0 { - p.parseRefsToAST() - } return p.Doc } @@ -693,6 +680,10 @@ func isAlnum(c byte) bool { return (c >= '0' && c <= '9') || isLetter(c) } +func isValidStatusTagChar(c byte) bool { + return isAlnum(c) || c == '-' +} + // TODO: this is not used // Replace tab characters with spaces, aligning to the next TAB_SIZE column. // always ends output with a newline diff --git a/ref_test.go b/ref_test.go deleted file mode 100644 index 7a32298..0000000 --- a/ref_test.go +++ /dev/null @@ -1,168 +0,0 @@ -package markdown - -import ( - "io/ioutil" - "path/filepath" - "testing" - - "github.com/gomarkdown/markdown/parser" -) - -// Markdown 1.0.3 reference tests - -var ( - refFiles = []string{ - "Amps and angle encoding", - "Auto links", - "Backslash escapes", - "Blockquotes with code blocks", - "Code Blocks", - "Code Spans", - "Horizontal rules", - "Inline HTML (Advanced)", - "Inline HTML (Simple)", - "Inline HTML comments", - "Links, inline style", - "Links, reference style", - "Links, shortcut references", - "Literal quotes in titles", - "Markdown Documentation - Basics", - "Markdown Documentation - Syntax", - "Nested blockquotes", - "Ordered and unordered lists", - "Strong and em together", - "Tabs", - "Tidyness", - } -) - -func TestReference(t *testing.T) { - files := append(refFiles, "Hard-wrapped paragraphs with list-like lines") - doTestsReference(t, files, 0) -} - -func TestReference_EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK(t *testing.T) { - files := append(refFiles, "Hard-wrapped paragraphs with list-like lines no empty line before block") - doTestsReference(t, files, parser.NoEmptyLineBeforeBlock) -} - -// benchResultAnchor is an anchor variable to store the result of a benchmarked -// code so that compiler could never optimize away the call to runMarkdown() -var benchResultAnchor string - -func benchFile(b *testing.B, basename string) { - params := TestParams{extensions: parser.CommonExtensions} - filename := filepath.Join("testdata", basename+".text") - inputBytes, err := ioutil.ReadFile(filename) - if err != nil { - b.Errorf("Couldn't open '%s', error: %v\n", filename, err) - return - } - - test := string(inputBytes) - b.ResetTimer() - for n := 0; n < b.N; n++ { - benchResultAnchor = runMarkdown(test, params) - } -} - -func BenchmarkReferenceAmps(b *testing.B) { - benchFile(b, "Amps and angle encoding") -} - -func BenchmarkReferenceAutoLinks(b *testing.B) { - benchFile(b, "Auto links") -} - -func BenchmarkReferenceBackslashEscapes(b *testing.B) { - benchFile(b, "Backslash escapes") -} - -func BenchmarkReferenceBlockquotesWithCodeBlocks(b *testing.B) { - benchFile(b, "Blockquotes with code blocks") -} - -func BenchmarkReferenceCodeBlocks(b *testing.B) { - benchFile(b, "Code Blocks") -} - -func BenchmarkReferenceCodeSpans(b *testing.B) { - benchFile(b, "Code Spans") -} - -func BenchmarkReferenceHardWrappedPara(b *testing.B) { - benchFile(b, "Hard-wrapped paragraphs with list-like lines") -} -func BenchmarkReferenceHorizontalRules(b *testing.B) { - benchFile(b, "Horizontal rules") -} -func BenchmarkReferenceInlineHTMLAdvances(b *testing.B) { - benchFile(b, "Inline HTML (Advanced)") -} -func BenchmarkReferenceInlineHTMLSimple(b *testing.B) { - benchFile(b, "Inline HTML (Simple)") -} -func BenchmarkReferenceInlineHTMLComments(b *testing.B) { - benchFile(b, "Inline HTML comments") -} -func BenchmarkReferenceLinksInline(b *testing.B) { - benchFile(b, "Links, inline style") -} -func BenchmarkReferenceLinksReference(b *testing.B) { - benchFile(b, "Links, reference style") -} -func BenchmarkReferenceLinksShortcut(b *testing.B) { - benchFile(b, "Links, shortcut references") -} -func BenchmarkReferenceLiterQuotesInTitles(b *testing.B) { - benchFile(b, "Literal quotes in titles") -} - -func BenchmarkReferenceMarkdownBasics(b *testing.B) { - benchFile(b, "Markdown Documentation - Basics") -} - -func BenchmarkReferenceMarkdownSyntax(b *testing.B) { - benchFile(b, "Markdown Documentation - Syntax") -} - -func BenchmarkReferenceNestedBlockquotes(b *testing.B) { - benchFile(b, "Nested blockquotes") -} - -func BenchmarkReferenceOrderedAndUnorderedLists(b *testing.B) { - benchFile(b, "Ordered and unordered lists") -} - -func BenchmarkReferenceStrongAndEm(b *testing.B) { - benchFile(b, "Strong and em together") -} - -func BenchmarkReferenceTabs(b *testing.B) { - benchFile(b, "Tabs") -} - -func BenchmarkReferenceTidyness(b *testing.B) { - benchFile(b, "Tidyness") -} - -func BenchmarkReference(b *testing.B) { - params := TestParams{extensions: parser.CommonExtensions} - files := append(refFiles, "Hard-wrapped paragraphs with list-like lines") - var tests []string - for _, basename := range files { - filename := filepath.Join("testdata", basename+".text") - inputBytes, err := ioutil.ReadFile(filename) - if err != nil { - b.Errorf("Couldn't open '%s', error: %v\n", filename, err) - continue - } - tests = append(tests, string(inputBytes)) - } - b.ResetTimer() - for n := 0; n < b.N; n++ { - for _, test := range tests { - benchResultAnchor = runMarkdown(test, params) - } - } -} diff --git a/s/fuzz.sh b/s/fuzz.sh index 6c4eb78..6796302 100755 --- a/s/fuzz.sh +++ b/s/fuzz.sh @@ -11,7 +11,7 @@ go get github.com/dvyukov/go-fuzz/go-fuzz-build # this step is expensive, so re-use previous runs if possible if [ ! -f ./markdown-fuzz.zip ]; then mkdir -p fuzz-workdir/corpus - cp testdata/*.text fuzz-workdir/corpus + cp testdata/*.md fuzz-workdir/corpus echo "running go-fuzz-build, might take a while..." go-fuzz-build github.com/gomarkdown/markdown fi diff --git a/testdata/emphasis.test b/testdata/emphasis.test index a12f010..57fa3f1 100644 --- a/testdata/emphasis.test +++ b/testdata/emphasis.test @@ -1,67 +1,67 @@ nothing inline +++ -

    nothing inline

    +[{"literal":"nothing inline"}] +++ simple *inline* test +++ -

    simple inline test

    +[{"literal":"simple "},{"type":"emph","literal":"inline"},{"literal":" test"}] +++ *at the* beginning +++ -

    at the beginning

    +[{"literal":""},{"type":"emph","literal":"at the"},{"literal":" beginning"}] +++ at the *end* +++ -

    at the end

    +[{"literal":"at the "},{"type":"emph","literal":"end"}] +++ *try two* in *one line* +++ -

    try two in one line

    +[{"literal":""},{"type":"emph","literal":"try two"},{"literal":" in "},{"type":"emph","literal":"one line"}] +++ over *two\nlines* test +++ -

    over two\nlines test

    +[{"literal":"over "},{"type":"emph","literal":"two\\nlines"},{"literal":" test"}] +++ odd *number of* markers* here +++ -

    odd number of markers* here

    +[{"literal":"odd "},{"type":"emph","literal":"number of"},{"literal":" markers* here"}] +++ odd *number\nof* markers* here +++ -

    odd number\nof markers* here

    +[{"literal":"odd "},{"type":"emph","literal":"number\\nof"},{"literal":" markers* here"}] +++ simple _inline_ test +++ -

    simple inline test

    +[{"literal":"simple "},{"type":"emph","literal":"inline"},{"literal":" test"}] +++ _at the_ beginning +++ -

    at the beginning

    +[{"literal":""},{"type":"emph","literal":"at the"},{"literal":" beginning"}] +++ at the _end_ +++ -

    at the end

    +[{"literal":"at the "},{"type":"emph","literal":"end"}] +++ _try two_ in _one line_ +++ -

    try two in one line

    +[{"literal":""},{"type":"emph","literal":"try two"},{"literal":" in "},{"type":"emph","literal":"one line"}] +++ over _two\nlines_ test +++ -

    over two\nlines test

    +[{"literal":"over "},{"type":"emph","literal":"two\\nlines"},{"literal":" test"}] +++ odd _number of_ markers_ here +++ -

    odd number of markers_ here

    +[{"literal":"odd "},{"type":"emph","literal":"number of"},{"literal":" markers_ here"}] +++ odd _number\nof_ markers_ here +++ -

    odd number\nof markers_ here

    +[{"literal":"odd "},{"type":"emph","literal":"number\\nof"},{"literal":" markers_ here"}] +++ mix of *markers_ +++ -

    mix of *markers_

    +[{"literal":"mix of *markers_"}] +++ *What is A\* algorithm?* +++ -

    What is A* algorithm?

    +[{"literal":""},{"type":"emph","literal":"What is A\\* algorithm?"}] diff --git a/testdata/status_tag.test b/testdata/status_tag.test new file mode 100644 index 0000000..43b09a6 --- /dev/null +++ b/testdata/status_tag.test @@ -0,0 +1,44 @@ +nothing inline ++++ +[{"literal":"nothing inline"}] ++++ +simple #tag test ++++ +[{"literal":"simple "},{"type":"status-tag","literal":"tag"},{"literal":" test"}] ++++ +#at-the beginning ++++ +[{"literal":""},{"type":"status-tag","literal":"at-the"},{"literal":" beginning"}] ++++ +at the #end ++++ +[{"literal":"at the "},{"type":"status-tag","literal":"end"}] ++++ +#try-two in #one-line ++++ +[{"literal":""},{"type":"status-tag","literal":"try-two"},{"literal":" in "},{"type":"status-tag","literal":"one-line"}] ++++ +over #two +lines test ++++ +[{"literal":"over "},{"type":"status-tag","literal":"two"},{"literal":"\\nlines test"}] ++++ +over two\n#lines test ++++ +[{"literal":"over two\\n"},{"type":"status-tag","literal":"lines"},{"literal":" test"}] ++++ +not valid #status_tag ++++ +[{"literal":"not valid #status_tag"}] ++++ +another not valid #status?tag ++++ +[{"literal":"another not valid #status?tag"}] ++++ +empty # status-tag ++++ +[{"literal":"empty # status-tag"}] ++++ +status-tag with #number9 ++++ +[{"literal":"status-tag with "},{"type":"status-tag","literal":"number9"}]