Stripped down version of markdown
This commit is contained in:
parent
4d42851d4d
commit
af599402d0
182
ast/node.go
182
ast/node.go
|
@ -1,5 +1,7 @@
|
|||
package ast
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
// ListType contains bitwise or'ed flags for list and list item objects.
|
||||
type ListType int
|
||||
|
||||
|
@ -74,7 +76,7 @@ type Node interface {
|
|||
|
||||
// Container is a type of node that can contain children
|
||||
type Container struct {
|
||||
Parent Node
|
||||
Parent Node `json:"-"`
|
||||
Children []Node
|
||||
|
||||
Literal []byte // Text contents of the leaf nodes
|
||||
|
@ -83,6 +85,21 @@ type Container struct {
|
|||
*Attribute // Block level attribute
|
||||
}
|
||||
|
||||
func (c *Container) MarshalJSON() ([]byte, error) {
|
||||
type ContainerJSON struct {
|
||||
Children []Node `json:"children"`
|
||||
Literal string `json:"literal"`
|
||||
*Attribute
|
||||
}
|
||||
var c1 ContainerJSON
|
||||
c1.Children = c.Children
|
||||
c1.Literal = string(c.Literal)
|
||||
c1.Attribute = c.Attribute
|
||||
|
||||
return json.Marshal(&c1)
|
||||
|
||||
}
|
||||
|
||||
// AsContainer returns itself as *Container
|
||||
func (c *Container) AsContainer() *Container {
|
||||
return c
|
||||
|
@ -115,7 +132,7 @@ func (c *Container) SetChildren(newChildren []Node) {
|
|||
|
||||
// Leaf is a type of node that cannot have children
|
||||
type Leaf struct {
|
||||
Parent Node
|
||||
Parent Node `json:"-"`
|
||||
|
||||
Literal []byte // Text contents of the leaf nodes
|
||||
Content []byte // Markdown content of the block nodes
|
||||
|
@ -123,6 +140,16 @@ type Leaf struct {
|
|||
*Attribute // Block level attribute
|
||||
}
|
||||
|
||||
func (c *Leaf) MarshalJSON() ([]byte, error) {
|
||||
type LeafJSON struct {
|
||||
Literal string `json:"literal"`
|
||||
}
|
||||
var c1 LeafJSON
|
||||
c1.Literal = string(c.Literal)
|
||||
|
||||
return json.Marshal(&c1)
|
||||
}
|
||||
|
||||
// AsContainer returns nil
|
||||
func (l *Leaf) AsContainer() *Container {
|
||||
return nil
|
||||
|
@ -158,6 +185,14 @@ type Document struct {
|
|||
Container
|
||||
}
|
||||
|
||||
func (doc *Document) MarshalJSON() ([]byte, error) {
|
||||
children := doc.GetChildren()
|
||||
if len(children) != 0 {
|
||||
return json.Marshal(children)
|
||||
}
|
||||
return []byte("[]"), nil
|
||||
}
|
||||
|
||||
// DocumentMatter represents markdown node that signals a document
|
||||
// division: frontmatter, mainmatter or backmatter.
|
||||
type DocumentMatter struct {
|
||||
|
@ -171,6 +206,21 @@ type BlockQuote struct {
|
|||
Container
|
||||
}
|
||||
|
||||
func (c *BlockQuote) MarshalJSON() ([]byte, error) {
|
||||
type BlockQuoteJSON struct {
|
||||
Type string `json:"type"`
|
||||
Children []Node `json:"children"`
|
||||
Literal string `json:"literal"`
|
||||
*Attribute
|
||||
}
|
||||
var c1 BlockQuoteJSON
|
||||
c1.Children = c.Children
|
||||
c1.Literal = string(c.Literal)
|
||||
c1.Type = "blockquote"
|
||||
|
||||
return json.Marshal(&c1)
|
||||
}
|
||||
|
||||
// Aside represents an markdown aside node.
|
||||
type Aside struct {
|
||||
Container
|
||||
|
@ -206,6 +256,17 @@ type Paragraph struct {
|
|||
Container
|
||||
}
|
||||
|
||||
func (c *Paragraph) MarshalJSON() ([]byte, error) {
|
||||
type ParagraphJSON struct {
|
||||
Type string `json:"type"`
|
||||
Children []Node `json:"children"`
|
||||
}
|
||||
var c1 ParagraphJSON
|
||||
c1.Children = c.Children
|
||||
c1.Type = "paragraph"
|
||||
return json.Marshal(&c1)
|
||||
}
|
||||
|
||||
// Math represents markdown MathAjax inline node
|
||||
type Math struct {
|
||||
Leaf
|
||||
|
@ -226,6 +287,28 @@ type Heading struct {
|
|||
IsSpecial bool // We are a special heading (starts with .#)
|
||||
}
|
||||
|
||||
func (c *Heading) MarshalJSON() ([]byte, error) {
|
||||
type HeadingJSON struct {
|
||||
Type string `json:"type"`
|
||||
Children []Node `json:"children"`
|
||||
Literal string `json:"literal"`
|
||||
Level int `json:"level"`
|
||||
IsTitleblock bool `json:"isTitleBlock"`
|
||||
|
||||
*Attribute
|
||||
}
|
||||
var c1 HeadingJSON
|
||||
c1.Children = c.Children
|
||||
c1.Literal = string(c.Literal)
|
||||
c1.Attribute = c.Attribute
|
||||
c1.Type = "heading"
|
||||
c1.Level = c.Level
|
||||
c1.IsTitleblock = c.IsTitleblock
|
||||
|
||||
return json.Marshal(&c1)
|
||||
|
||||
}
|
||||
|
||||
// HorizontalRule represents markdown horizontal rule node
|
||||
type HorizontalRule struct {
|
||||
Leaf
|
||||
|
@ -233,17 +316,60 @@ type HorizontalRule struct {
|
|||
|
||||
// Emph represents markdown emphasis node
|
||||
type Emph struct {
|
||||
Container
|
||||
Leaf
|
||||
}
|
||||
|
||||
func (c *Emph) MarshalJSON() ([]byte, error) {
|
||||
type EmphJSON struct {
|
||||
Type string `json:"type"`
|
||||
Literal string `json:"literal"`
|
||||
*Attribute
|
||||
}
|
||||
var c1 EmphJSON
|
||||
c1.Literal = string(c.Literal)
|
||||
c1.Attribute = c.Attribute
|
||||
c1.Type = "emph"
|
||||
|
||||
return json.Marshal(&c1)
|
||||
}
|
||||
|
||||
type StatusTag struct {
|
||||
Leaf
|
||||
}
|
||||
|
||||
func (c *StatusTag) MarshalJSON() ([]byte, error) {
|
||||
type StatusTagJSON struct {
|
||||
Type string `json:"type"`
|
||||
Literal string `json:"literal"`
|
||||
}
|
||||
var c1 StatusTagJSON
|
||||
c1.Literal = string(c.Literal)
|
||||
c1.Type = "status-tag"
|
||||
return json.Marshal(&c1)
|
||||
}
|
||||
|
||||
// Strong represents markdown strong node
|
||||
type Strong struct {
|
||||
Container
|
||||
Leaf
|
||||
}
|
||||
|
||||
func (c *Strong) MarshalJSON() ([]byte, error) {
|
||||
type StrongJSON struct {
|
||||
Type string `json:"type"`
|
||||
Literal string `json:"literal"`
|
||||
*Attribute
|
||||
}
|
||||
var c1 StrongJSON
|
||||
c1.Literal = string(c.Literal)
|
||||
c1.Attribute = c.Attribute
|
||||
c1.Type = "strong"
|
||||
|
||||
return json.Marshal(&c1)
|
||||
}
|
||||
|
||||
// Del represents markdown del node
|
||||
type Del struct {
|
||||
Container
|
||||
Leaf
|
||||
}
|
||||
|
||||
// Link represents markdown link node
|
||||
|
@ -257,6 +383,26 @@ type Link struct {
|
|||
DeferredID []byte // If a deferred link this holds the original ID.
|
||||
}
|
||||
|
||||
func (c *Link) MarshalJSON() ([]byte, error) {
|
||||
type LinkJSON struct {
|
||||
Type string `json:"type"`
|
||||
Children []Node `json:"children"`
|
||||
Literal string `json:"literal"`
|
||||
Title string `json:"title"`
|
||||
Destination string `json:"destination"`
|
||||
*Attribute
|
||||
}
|
||||
var c1 LinkJSON
|
||||
c1.Children = c.Children
|
||||
c1.Literal = string(c.Literal)
|
||||
c1.Attribute = c.Attribute
|
||||
c1.Title = string(c.Title)
|
||||
c1.Destination = string(c.Destination)
|
||||
c1.Type = "link"
|
||||
|
||||
return json.Marshal(&c1)
|
||||
}
|
||||
|
||||
// CrossReference is a reference node.
|
||||
type CrossReference struct {
|
||||
Container
|
||||
|
@ -302,6 +448,20 @@ type CodeBlock struct {
|
|||
FenceOffset int
|
||||
}
|
||||
|
||||
func (c *CodeBlock) MarshalJSON() ([]byte, error) {
|
||||
type CodeBlockJSON struct {
|
||||
Type string `json:"type"`
|
||||
Literal string `json:"literal"`
|
||||
*Attribute
|
||||
}
|
||||
var c1 CodeBlockJSON
|
||||
c1.Literal = string(c.Literal)
|
||||
c1.Type = "codeblock"
|
||||
c1.Attribute = c.Attribute
|
||||
|
||||
return json.Marshal(&c1)
|
||||
}
|
||||
|
||||
// Softbreak represents markdown softbreak node
|
||||
// Note: not used currently
|
||||
type Softbreak struct {
|
||||
|
@ -323,6 +483,18 @@ type Code struct {
|
|||
Leaf
|
||||
}
|
||||
|
||||
func (c *Code) MarshalJSON() ([]byte, error) {
|
||||
type CodeJSON struct {
|
||||
Type string `json:"type"`
|
||||
Literal string `json:"literal"`
|
||||
}
|
||||
var c1 CodeJSON
|
||||
c1.Literal = string(c.Literal)
|
||||
c1.Type = "code"
|
||||
|
||||
return json.Marshal(&c1)
|
||||
}
|
||||
|
||||
// HTMLSpan represents markdown html span node
|
||||
type HTMLSpan struct {
|
||||
Leaf
|
||||
|
|
17
ast/print.go
17
ast/print.go
|
@ -36,22 +36,12 @@ func ToString(doc Node) string {
|
|||
return buf.String()
|
||||
}
|
||||
|
||||
func contentToString(d1 []byte, d2 []byte) string {
|
||||
if d1 != nil {
|
||||
return string(d1)
|
||||
}
|
||||
if d2 != nil {
|
||||
return string(d2)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getContent(node Node) string {
|
||||
if c := node.AsContainer(); c != nil {
|
||||
return contentToString(c.Literal, c.Content)
|
||||
return string(c.Literal)
|
||||
}
|
||||
leaf := node.AsLeaf()
|
||||
return contentToString(leaf.Literal, leaf.Content)
|
||||
return string(leaf.Literal)
|
||||
}
|
||||
|
||||
func shortenString(s string, maxLen int) string {
|
||||
|
@ -126,6 +116,9 @@ func printRecur(w io.Writer, node Node, prefix string, depth int) {
|
|||
case *Link:
|
||||
content := "url=" + string(v.Destination)
|
||||
printDefault(w, indent, typeName, content)
|
||||
case *StatusTag:
|
||||
content := "tag=" + string(v.Literal)
|
||||
printDefault(w, indent, typeName, content)
|
||||
case *Image:
|
||||
content := "url=" + string(v.Destination)
|
||||
printDefault(w, indent, typeName, content)
|
||||
|
|
223
block_test.go
223
block_test.go
|
@ -1,223 +0,0 @@
|
|||
package markdown
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/gomarkdown/markdown/html"
|
||||
"github.com/gomarkdown/markdown/parser"
|
||||
)
|
||||
|
||||
func must(err error) {
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func writeTest(file string, tests []string) {
|
||||
path := filepath.Join("testdata", file)
|
||||
f, err := os.Create(path)
|
||||
must(err)
|
||||
defer f.Close()
|
||||
lastIdx := len(tests) - 1
|
||||
for i, s := range tests {
|
||||
if !strings.HasSuffix(s, "\n") {
|
||||
s += "\n"
|
||||
}
|
||||
fmt.Fprint(f, s)
|
||||
if i != lastIdx {
|
||||
fmt.Fprint(f, "+++\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrefixHeaderNoExtensions(t *testing.T) {
|
||||
tests := readTestFile2(t, "PrefixHeaderNoExtensions.tests")
|
||||
doTestsBlock(t, tests, 0)
|
||||
}
|
||||
|
||||
func TestPrefixHeaderSpaceExtension(t *testing.T) {
|
||||
tests := readTestFile2(t, "PrefixHeaderSpaceExtension.tests")
|
||||
doTestsBlock(t, tests, parser.SpaceHeadings)
|
||||
}
|
||||
|
||||
func TestPrefixHeaderIdExtension(t *testing.T) {
|
||||
tests := readTestFile2(t, "PrefixHeaderIdExtension.tests")
|
||||
doTestsBlock(t, tests, parser.HeadingIDs)
|
||||
}
|
||||
|
||||
func TestPrefixHeaderIdExtensionWithPrefixAndSuffix(t *testing.T) {
|
||||
tests := readTestFile2(t, "PrefixHeaderIdExtensionWithPrefixAndSuffix.tests")
|
||||
|
||||
parameters := html.RendererOptions{
|
||||
HeadingIDPrefix: "PRE:",
|
||||
HeadingIDSuffix: ":POST",
|
||||
}
|
||||
|
||||
doTestsParam(t, tests, TestParams{
|
||||
extensions: parser.HeadingIDs,
|
||||
Flags: html.UseXHTML,
|
||||
RendererOptions: parameters,
|
||||
})
|
||||
}
|
||||
|
||||
func TestPrefixAutoHeaderIdExtension(t *testing.T) {
|
||||
tests := readTestFile2(t, "PrefixAutoHeaderIdExtension.tests")
|
||||
doTestsBlock(t, tests, parser.AutoHeadingIDs)
|
||||
}
|
||||
|
||||
func TestPrefixAutoHeaderIdExtensionWithPrefixAndSuffix(t *testing.T) {
|
||||
tests := readTestFile2(t, "PrefixAutoHeaderIdExtensionWithPrefixAndSuffix.tests")
|
||||
parameters := html.RendererOptions{
|
||||
HeadingIDPrefix: "PRE:",
|
||||
HeadingIDSuffix: ":POST",
|
||||
}
|
||||
|
||||
doTestsParam(t, tests, TestParams{
|
||||
extensions: parser.AutoHeadingIDs,
|
||||
Flags: html.UseXHTML,
|
||||
RendererOptions: parameters,
|
||||
})
|
||||
}
|
||||
|
||||
func TestPrefixMultipleHeaderExtensions(t *testing.T) {
|
||||
tests := readTestFile2(t, "PrefixMultipleHeaderExtensions.tests")
|
||||
doTestsBlock(t, tests, parser.AutoHeadingIDs|parser.HeadingIDs)
|
||||
}
|
||||
|
||||
func TestPrefixHeaderMmarkExtension(t *testing.T) {
|
||||
tests := readTestFile2(t, "PrefixHeaderMmarkExtension.tests")
|
||||
doTestsBlock(t, tests, parser.Mmark)
|
||||
}
|
||||
|
||||
func TestUnderlineHeaders(t *testing.T) {
|
||||
tests := readTestFile2(t, "UnderlineHeaders.tests")
|
||||
doTestsBlock(t, tests, 0)
|
||||
}
|
||||
|
||||
func TestUnderlineHeadersAutoIDs(t *testing.T) {
|
||||
tests := readTestFile2(t, "UnderlineHeadersAutoIDs.tests")
|
||||
doTestsBlock(t, tests, parser.AutoHeadingIDs)
|
||||
}
|
||||
|
||||
func TestHorizontalRule(t *testing.T) {
|
||||
tests := readTestFile2(t, "HorizontalRule.tests")
|
||||
doTestsBlock(t, tests, 0)
|
||||
}
|
||||
|
||||
func TestUnorderedList(t *testing.T) {
|
||||
tests := readTestFile2(t, "UnorderedList.tests")
|
||||
doTestsBlock(t, tests, 0)
|
||||
}
|
||||
|
||||
func TestOrderedList(t *testing.T) {
|
||||
tests := readTestFile2(t, "OrderedList.tests")
|
||||
doTestsBlock(t, tests, 0)
|
||||
}
|
||||
|
||||
func TestDefinitionList(t *testing.T) {
|
||||
tests := readTestFile2(t, "DefinitionList.tests")
|
||||
doTestsBlock(t, tests, parser.DefinitionLists)
|
||||
}
|
||||
|
||||
func TestNestedDefinitionList(t *testing.T) {
|
||||
tests := readTestFile2(t, "NestedDefinitionList.tests")
|
||||
doTestsBlock(t, tests, parser.DefinitionLists)
|
||||
}
|
||||
|
||||
func TestPreformattedHtml(t *testing.T) {
|
||||
tests := readTestFile2(t, "PreformattedHtml.tests")
|
||||
doTestsBlock(t, tests, 0)
|
||||
}
|
||||
|
||||
func TestPreformattedHtmlLax(t *testing.T) {
|
||||
tests := readTestFile2(t, "PreformattedHtmlLax.tests")
|
||||
doTestsBlock(t, tests, parser.LaxHTMLBlocks)
|
||||
}
|
||||
|
||||
func TestFencedCodeBlock(t *testing.T) {
|
||||
tests := readTestFile2(t, "FencedCodeBlock.tests")
|
||||
doTestsBlock(t, tests, parser.FencedCode)
|
||||
}
|
||||
|
||||
func TestFencedCodeInsideBlockquotes(t *testing.T) {
|
||||
tests := readTestFile2(t, "FencedCodeInsideBlockquotes.tests")
|
||||
doTestsBlock(t, tests, parser.FencedCode)
|
||||
}
|
||||
|
||||
func TestTable(t *testing.T) {
|
||||
tests := readTestFile2(t, "Table.tests")
|
||||
doTestsBlock(t, tests, parser.Tables)
|
||||
}
|
||||
|
||||
func TestUnorderedListWith_EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK(t *testing.T) {
|
||||
tests := readTestFile2(t, "UnorderedListWith_EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK.tests")
|
||||
doTestsBlock(t, tests, parser.NoEmptyLineBeforeBlock)
|
||||
}
|
||||
|
||||
func TestOrderedList_EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK(t *testing.T) {
|
||||
tests := readTestFile2(t, "OrderedList_EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK.tests")
|
||||
doTestsBlock(t, tests, parser.NoEmptyLineBeforeBlock)
|
||||
}
|
||||
|
||||
func TestFencedCodeBlock_EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK(t *testing.T) {
|
||||
tests := readTestFile2(t, "FencedCodeBlock_EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK.tests")
|
||||
doTestsBlock(t, tests, parser.FencedCode|parser.NoEmptyLineBeforeBlock)
|
||||
}
|
||||
|
||||
func TestMathBlock(t *testing.T) {
|
||||
tests := readTestFile2(t, "MathBlock.tests")
|
||||
doTestsBlock(t, tests, parser.CommonExtensions)
|
||||
}
|
||||
|
||||
func TestDefinitionListWithFencedCodeBlock(t *testing.T) {
|
||||
tests := readTestFile2(t, "DefinitionListWithFencedCodeBlock.tests")
|
||||
doTestsBlock(t, tests, parser.FencedCode|parser.DefinitionLists)
|
||||
}
|
||||
|
||||
func TestListWithFencedCodeBlockAndHeader(t *testing.T) {
|
||||
tests := readTestFile2(t, "ListWithFencedCodeBlockAndHeader.tests")
|
||||
doTestsBlock(t, tests, parser.FencedCode)
|
||||
}
|
||||
|
||||
func TestTitleBlock_EXTENSION_TITLEBLOCK(t *testing.T) {
|
||||
tests := readTestFile2(t, "TitleBlock_EXTENSION_TITLEBLOCK.tests")
|
||||
doTestsBlock(t, tests, parser.Titleblock)
|
||||
}
|
||||
|
||||
func TestBlockComments(t *testing.T) {
|
||||
tests := readTestFile2(t, "BlockComments.tests")
|
||||
doTestsBlock(t, tests, 0)
|
||||
}
|
||||
|
||||
func TestTOC(t *testing.T) {
|
||||
tests := readTestFile2(t, "TOC.tests")
|
||||
doTestsParam(t, tests, TestParams{
|
||||
Flags: html.UseXHTML | html.TOC,
|
||||
})
|
||||
}
|
||||
|
||||
func TestCompletePage(t *testing.T) {
|
||||
tests := readTestFile2(t, "CompletePage.tests")
|
||||
doTestsParam(t, tests, TestParams{Flags: html.UseXHTML | html.CompletePage})
|
||||
}
|
||||
|
||||
func TestSpaceHeadings(t *testing.T) {
|
||||
tests := readTestFile2(t, "SpaceHeadings.tests")
|
||||
doTestsParam(t, tests, TestParams{extensions: parser.SpaceHeadings})
|
||||
}
|
||||
|
||||
func TestCodeInList(t *testing.T) {
|
||||
tests := readTestFile2(t, "code_in_list.test")
|
||||
exts := parser.CommonExtensions
|
||||
doTestsParam(t, tests, TestParams{extensions: exts})
|
||||
}
|
||||
|
||||
func TestLists(t *testing.T) {
|
||||
tests := readTestFile2(t, "Lists.tests")
|
||||
exts := parser.CommonExtensions
|
||||
doTestsParam(t, tests, TestParams{extensions: exts})
|
||||
}
|
|
@ -0,0 +1,310 @@
|
|||
Ast of file 'TEST.md':
|
||||
HorizontalRule
|
||||
Paragraph
|
||||
Text
|
||||
Strong
|
||||
Text 'Advertisement :)'
|
||||
Paragraph
|
||||
Text '-'
|
||||
Strong
|
||||
Text '[pica](https://nodeca.github.io/pica/...'
|
||||
Text '- high quality and fast image\n res...'
|
||||
Strong
|
||||
Text '[babelfish](https://github.com/nodeca...'
|
||||
Text '- developer friendly\n i18n with pl...'
|
||||
Paragraph
|
||||
Text 'You will like those projects!'
|
||||
HorizontalRule
|
||||
Heading
|
||||
Text 'h1 Heading 8-)'
|
||||
Heading
|
||||
Text 'h2 Heading'
|
||||
Heading
|
||||
Text 'h3 Heading'
|
||||
Heading
|
||||
Text 'h4 Heading'
|
||||
Heading
|
||||
Text 'h5 Heading'
|
||||
Heading
|
||||
Text 'h6 Heading'
|
||||
Heading
|
||||
Text 'Horizontal Rules'
|
||||
HorizontalRule
|
||||
HorizontalRule
|
||||
HorizontalRule
|
||||
Heading
|
||||
Text 'Typographic replacements'
|
||||
Paragraph
|
||||
Text 'Enable typographer option to see resu...'
|
||||
Paragraph
|
||||
Text '(c) (C) (r) (R) (tm) (TM) (p) (P) +-'
|
||||
Paragraph
|
||||
Text 'test.. test... test..... test?..... t...'
|
||||
Paragraph
|
||||
Text '!!!!!! ???? ,, -- ---'
|
||||
Paragraph
|
||||
Text '"Smartypants, double quotes" and 'sin...'
|
||||
Heading
|
||||
Text 'Emphasis'
|
||||
Paragraph
|
||||
Text
|
||||
Strong
|
||||
Text 'This is bold text'
|
||||
Paragraph
|
||||
Text
|
||||
Strong
|
||||
Text 'This is bold text'
|
||||
Paragraph
|
||||
Text
|
||||
Emph
|
||||
Text 'This is italic text'
|
||||
Paragraph
|
||||
Text
|
||||
Emph
|
||||
Text 'This is italic text'
|
||||
Paragraph
|
||||
Text
|
||||
Del
|
||||
Text 'Strikethrough'
|
||||
Heading
|
||||
Text 'Blockquotes'
|
||||
BlockQuote
|
||||
Paragraph
|
||||
Text 'Blockquotes can also be nested...\n> ...'
|
||||
Heading
|
||||
Text 'Lists'
|
||||
Paragraph
|
||||
Text 'Unordered'
|
||||
Paragraph
|
||||
Text '+ Create a list by starting a line wi...'
|
||||
Code '+ "'
|
||||
Text '" +'
|
||||
Code '+'
|
||||
Text '+ "'
|
||||
Code '" +'
|
||||
Text ','
|
||||
Code '+ "'
|
||||
Text '" +'
|
||||
Code '-'
|
||||
Text '+ "'
|
||||
Code '" +'
|
||||
Text ', or'
|
||||
Code '+ "'
|
||||
Text '" +'
|
||||
Code '*'
|
||||
Text '+ "'
|
||||
Code '" +'
|
||||
Text '\n+ Sub-lists are made by indenting 2...'
|
||||
Paragraph
|
||||
Text 'Ordered'
|
||||
Paragraph
|
||||
Text '1. Lorem ipsum dolor sit amet\n2. Con...'
|
||||
Paragraph
|
||||
Text '1. You can use sequential numbers...\...'
|
||||
Code '+ "'
|
||||
Text '" +'
|
||||
Code '1.'
|
||||
Text '+ "'
|
||||
Code '" +'
|
||||
Paragraph
|
||||
Text 'Start numbering with offset:'
|
||||
Paragraph
|
||||
Text '57. foo\n1. bar'
|
||||
Heading
|
||||
Text 'Code'
|
||||
Paragraph
|
||||
Text 'Inline'
|
||||
Code '+ "'
|
||||
Text '" +'
|
||||
Code 'code'
|
||||
Text '+ "'
|
||||
Code '" +'
|
||||
Paragraph
|
||||
Text 'Indented code'
|
||||
Paragraph
|
||||
Text '// Some comments\n line 1 of code\...'
|
||||
Paragraph
|
||||
Text 'Block code "fences"'
|
||||
Paragraph
|
||||
Text
|
||||
Code '+ "'
|
||||
Text '" +'
|
||||
Code '+ "`" +'
|
||||
Text '+ "'
|
||||
Code '" +'
|
||||
Text '\nSample text here...\n'
|
||||
Code '+ "'
|
||||
Text '" +'
|
||||
Code '+ "`" +'
|
||||
Text '+ "'
|
||||
Code '" +'
|
||||
Paragraph
|
||||
Text 'Syntax highlighting'
|
||||
Paragraph
|
||||
Text
|
||||
Code '+ "'
|
||||
Text '" +'
|
||||
Code '+ "`" +'
|
||||
Text '+ "'
|
||||
Code '" +'
|
||||
Text 'js\nvar foo = function (bar) {\n re...'
|
||||
Paragraph
|
||||
Text 'console.log(foo(5));\n'
|
||||
Code '+ "'
|
||||
Text '" +'
|
||||
Code '+ "`" +'
|
||||
Text '+ "'
|
||||
Code '" +'
|
||||
Heading
|
||||
Text 'Tables'
|
||||
Paragraph
|
||||
Text '| Option | Description |\n| ------ | ...'
|
||||
Paragraph
|
||||
Text 'Right aligned columns'
|
||||
Paragraph
|
||||
Text '| Option | Description |\n| ------:| ...'
|
||||
Heading
|
||||
Text 'Links'
|
||||
Paragraph
|
||||
Text
|
||||
Link 'url=http://dev.nodeca.com'
|
||||
Text 'link text'
|
||||
Paragraph
|
||||
Text
|
||||
Link 'url=http://nodeca.github.io/pica/demo/'
|
||||
Text 'link with title'
|
||||
Paragraph
|
||||
Text 'Autoconverted link'
|
||||
Link 'url=https://github.com/nodeca/pica'
|
||||
Text 'https://github.com/nodeca/pica'
|
||||
Text '(enable linkify to see)'
|
||||
Heading
|
||||
Text 'Images'
|
||||
Paragraph
|
||||
Text
|
||||
Image 'url=https://octodex.github.com/images/minion.png'
|
||||
Text 'Minion'
|
||||
Text '\n'
|
||||
Image 'url=https://octodex.github.com/images/stormtroopocat.jpg'
|
||||
Text 'Stormtroopocat'
|
||||
Paragraph
|
||||
Text 'Like links, Images also have a footno...'
|
||||
Paragraph
|
||||
Text
|
||||
Image 'url=https://octodex.github.com/images/dojocat.jpg'
|
||||
Text 'Alt text'
|
||||
Paragraph
|
||||
Text 'With a reference later in the documen...'
|
||||
Heading
|
||||
Text 'Plugins'
|
||||
Paragraph
|
||||
Text 'The killer feature of'
|
||||
Code '+ "'
|
||||
Text '" +'
|
||||
Code 'markdown-it'
|
||||
Text '+ "'
|
||||
Code '" +'
|
||||
Text 'is very effective support of\n'
|
||||
Link 'url=https://www.npmjs.org/browse/keyword/markdown-it-plugin'
|
||||
Text 'syntax plugins'
|
||||
Text '.'
|
||||
Heading
|
||||
Text
|
||||
Link 'url=https://github.com/markdown-it/markdown-it-emoji'
|
||||
Text 'Emojies'
|
||||
BlockQuote
|
||||
Paragraph
|
||||
Text 'Classic markup: :wink: :crush: :cry: ...'
|
||||
Paragraph
|
||||
Text 'see'
|
||||
Link 'url=https://github.com/markdown-it/markdown-it-emoji#change-output'
|
||||
Text 'how to change output'
|
||||
Text 'with twemoji.'
|
||||
Heading
|
||||
Text
|
||||
Link 'url=https://github.com/markdown-it/markdown-it-sub'
|
||||
Text 'Subscript'
|
||||
Text '/'
|
||||
Link 'url=https://github.com/markdown-it/markdown-it-sup'
|
||||
Text 'Superscript'
|
||||
Paragraph
|
||||
Text '- 19^th^\n- H~2~O'
|
||||
Heading
|
||||
Text
|
||||
Link 'url=https://github.com/markdown-it/markdown-it-ins'
|
||||
Text '\<ins>'
|
||||
Paragraph
|
||||
Text '++Inserted text++'
|
||||
Heading
|
||||
Text
|
||||
Link 'url=https://github.com/markdown-it/markdown-it-mark'
|
||||
Text '\<mark>'
|
||||
Paragraph
|
||||
Text '==Marked text=='
|
||||
Heading
|
||||
Text
|
||||
Link 'url=https://github.com/markdown-it/markdown-it-footnote'
|
||||
Text 'Footnotes'
|
||||
Paragraph
|
||||
Text 'Footnote 1 link[^first].'
|
||||
Paragraph
|
||||
Text 'Footnote 2 link[^second].'
|
||||
Paragraph
|
||||
Text 'Inline footnote^[Text of inline footn...'
|
||||
Paragraph
|
||||
Text 'Duplicated footnote reference[^second].'
|
||||
Paragraph
|
||||
Text '[^first]: Footnote'
|
||||
Strong
|
||||
Text 'can have markup'
|
||||
Paragraph
|
||||
Text 'and multiple paragraphs.'
|
||||
Paragraph
|
||||
Text '[^second]: Footnote text.'
|
||||
Heading
|
||||
Text
|
||||
Link 'url=https://github.com/markdown-it/markdown-it-deflist'
|
||||
Text 'Definition lists'
|
||||
List 'flags=definition start'
|
||||
ListItem 'flags=definition term has_block start'
|
||||
Paragraph
|
||||
Text 'Term 1'
|
||||
ListItem 'flags=definition has_block'
|
||||
Paragraph
|
||||
Text 'Definition 1\nwith lazy continuation.'
|
||||
ListItem 'flags=definition term has_block'
|
||||
Paragraph
|
||||
Text 'Term 2 with'
|
||||
Emph
|
||||
Text 'inline markup'
|
||||
Text
|
||||
ListItem 'flags=definition has_block end'
|
||||
Paragraph
|
||||
Text 'Definition 2\n\n\n { some code, pa...'
|
||||
Paragraph
|
||||
Text
|
||||
Emph
|
||||
Text 'Compact style:'
|
||||
Paragraph
|
||||
Text 'Term 1\n ~ Definition 1'
|
||||
Paragraph
|
||||
Text 'Term 2\n ~ Definition 2a\n ~ Defini...'
|
||||
Heading
|
||||
Text
|
||||
Link 'url=https://github.com/markdown-it/markdown-it-abbr'
|
||||
Text 'Abbreviations'
|
||||
Paragraph
|
||||
Text 'This is HTML abbreviation example.'
|
||||
Paragraph
|
||||
Text 'It converts "HTML", but keep intact p...'
|
||||
Paragraph
|
||||
Text '*[HTML]: Hyper Text Markup Language'
|
||||
Heading
|
||||
Text
|
||||
Link 'url=https://github.com/markdown-it/markdown-it-container'
|
||||
Text 'Custom containers'
|
||||
Paragraph
|
||||
Text '::: warning\n'
|
||||
Emph
|
||||
Text 'here be dragons'
|
||||
|
50
esc_test.go
50
esc_test.go
|
@ -1,50 +0,0 @@
|
|||
package markdown
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/gomarkdown/markdown/html"
|
||||
)
|
||||
|
||||
func TestEsc(t *testing.T) {
|
||||
tests := []string{
|
||||
"abc", "abc",
|
||||
"a&c", "a&c",
|
||||
"<", "<",
|
||||
"[]:<", "[]:<",
|
||||
"Hello <!--", "Hello <!--",
|
||||
}
|
||||
for i := 0; i < len(tests); i += 2 {
|
||||
var b bytes.Buffer
|
||||
html.EscapeHTML(&b, []byte(tests[i]))
|
||||
if !bytes.Equal(b.Bytes(), []byte(tests[i+1])) {
|
||||
t.Errorf("\nInput [%#v]\nExpected[%#v]\nActual [%#v]",
|
||||
tests[i], tests[i+1], b.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEscapeHTML(b *testing.B) {
|
||||
tests := [][]byte{
|
||||
[]byte(""),
|
||||
[]byte("AT&T has an ampersand in their name."),
|
||||
[]byte("AT&T is another way to write it."),
|
||||
[]byte("This & that."),
|
||||
[]byte("4 < 5."),
|
||||
[]byte("6 > 5."),
|
||||
[]byte("Here's a [link] [1] with an ampersand in the URL."),
|
||||
[]byte("Here's a link with an ampersand in the link text: [AT&T] [2]."),
|
||||
[]byte("Here's an inline [link](/script?foo=1&bar=2)."),
|
||||
[]byte("Here's an inline [link](</script?foo=1&bar=2>)."),
|
||||
[]byte("[1]: http://example.com/?foo=1&bar=2"),
|
||||
[]byte("[2]: http://att.com/ \"AT&T\""),
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
for n := 0; n < b.N; n++ {
|
||||
for _, t := range tests {
|
||||
html.EscapeHTML(&buf, t)
|
||||
buf.Reset()
|
||||
}
|
||||
}
|
||||
}
|
8
go.mod
8
go.mod
|
@ -2,4 +2,10 @@ module github.com/gomarkdown/markdown
|
|||
|
||||
go 1.12
|
||||
|
||||
require golang.org/dl v0.0.0-20190829154251-82a15e2f2ead // indirect
|
||||
require (
|
||||
github.com/dvyukov/go-fuzz v0.0.0-20191022152526-8cb203812681 // indirect
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0 // indirect
|
||||
github.com/stephens2424/writerset v1.0.2 // indirect
|
||||
golang.org/dl v0.0.0-20190829154251-82a15e2f2ead // indirect
|
||||
golang.org/x/tools v0.0.0-20191109212701-97ad0ed33101 // indirect
|
||||
)
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dvyukov/go-fuzz v0.0.0-20191022152526-8cb203812681 h1:3WV5aRRj1ELP3RcLlBp/v0WJTuy47OQMkL9GIQq8QEE=
|
||||
github.com/dvyukov/go-fuzz v0.0.0-20191022152526-8cb203812681/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s=
|
||||
github.com/stephens2424/writerset v1.0.2 h1:znRLgU6g8RS5euYRcy004XeE4W+Tu44kALzy7ghPif8=
|
||||
github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20191109212701-97ad0ed33101 h1:LCmXVkvpQCDj724eX6irUTPCJP5GelFHxqGSWL2D1R0=
|
||||
golang.org/x/tools v0.0.0-20191109212701-97ad0ed33101/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
@ -2,54 +2,51 @@ package markdown
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"encoding/json"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/gomarkdown/markdown/html"
|
||||
"github.com/gomarkdown/markdown/parser"
|
||||
)
|
||||
|
||||
type TestParams struct {
|
||||
extensions parser.Extensions
|
||||
referenceOverride parser.ReferenceOverrideFunc
|
||||
html.Flags
|
||||
html.RendererOptions
|
||||
}
|
||||
|
||||
func runMarkdown(input string, params TestParams) string {
|
||||
params.RendererOptions.Flags = params.Flags
|
||||
func runMarkdown(input string, params TestParams) (string, error) {
|
||||
parser := parser.NewWithExtensions(params.extensions)
|
||||
parser.ReferenceOverride = params.referenceOverride
|
||||
renderer := html.NewRenderer(params.RendererOptions)
|
||||
|
||||
d := ToHTML([]byte(input), parser, renderer)
|
||||
return string(d)
|
||||
result := parser.Parse([]byte(input))
|
||||
if result == nil {
|
||||
return "", nil
|
||||
}
|
||||
output, err := json.Marshal(result)
|
||||
return string(output), err
|
||||
}
|
||||
|
||||
// doTests runs full document tests using MarkdownCommon configuration.
|
||||
func doTests(t *testing.T, tests []string) {
|
||||
doTestsParam(t, tests, TestParams{
|
||||
extensions: parser.CommonExtensions,
|
||||
RendererOptions: html.RendererOptions{
|
||||
Flags: html.CommonFlags,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func doTestsBlock(t *testing.T, tests []string, extensions parser.Extensions) {
|
||||
doTestsParam(t, tests, TestParams{
|
||||
extensions: extensions,
|
||||
Flags: html.UseXHTML,
|
||||
})
|
||||
}
|
||||
|
||||
func doTestsParam(t *testing.T, tests []string, params TestParams) {
|
||||
for i := 0; i+1 < len(tests); i += 2 {
|
||||
input := tests[i]
|
||||
expected := tests[i+1]
|
||||
got := runMarkdown(input, params)
|
||||
expected := strings.TrimRight(tests[i+1], "\n")
|
||||
got, err := runMarkdown(input, params)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to marshal json: %+v\n", err)
|
||||
}
|
||||
if got != expected {
|
||||
t.Errorf("\nInput [%#v]\nExpected[%#v]\nGot [%#v]\nInput:\n%s\nExpected:\n%s\nGot:\n%s\n",
|
||||
input, expected, got, input, expected, got)
|
||||
|
@ -65,34 +62,23 @@ func doLinkTestsInline(t *testing.T, tests []string) {
|
|||
doTestsInline(t, tests)
|
||||
|
||||
prefix := "http://localhost"
|
||||
params := html.RendererOptions{AbsolutePrefix: prefix}
|
||||
transformTests := transformLinks(tests, prefix)
|
||||
doTestsInlineParam(t, transformTests, TestParams{
|
||||
RendererOptions: params,
|
||||
})
|
||||
doTestsInlineParam(t, transformTests, TestParams{
|
||||
Flags: html.UseXHTML,
|
||||
RendererOptions: params,
|
||||
})
|
||||
doTestsInlineParam(t, transformTests, TestParams{})
|
||||
doTestsInlineParam(t, transformTests, TestParams{})
|
||||
}
|
||||
|
||||
func doSafeTestsInline(t *testing.T, tests []string) {
|
||||
doTestsInlineParam(t, tests, TestParams{Flags: html.Safelink})
|
||||
doTestsInlineParam(t, tests, TestParams{})
|
||||
|
||||
// All the links in this test should not have the prefix appended, so
|
||||
// just rerun it with different parameters and the same expectations.
|
||||
prefix := "http://localhost"
|
||||
params := html.RendererOptions{AbsolutePrefix: prefix}
|
||||
transformTests := transformLinks(tests, prefix)
|
||||
doTestsInlineParam(t, transformTests, TestParams{
|
||||
Flags: html.Safelink,
|
||||
RendererOptions: params,
|
||||
})
|
||||
doTestsInlineParam(t, transformTests, TestParams{})
|
||||
}
|
||||
|
||||
func doTestsInlineParam(t *testing.T, tests []string, params TestParams) {
|
||||
params.extensions |= parser.Autolink | parser.Strikethrough
|
||||
params.Flags |= html.UseXHTML
|
||||
doTestsParam(t, tests, params)
|
||||
}
|
||||
|
||||
|
@ -110,35 +96,6 @@ func transformLinks(tests []string, prefix string) []string {
|
|||
return newTests
|
||||
}
|
||||
|
||||
func doTestsReference(t *testing.T, files []string, flag parser.Extensions) {
|
||||
params := TestParams{extensions: flag}
|
||||
for _, basename := range files {
|
||||
filename := filepath.Join("testdata", basename+".text")
|
||||
inputBytes, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't open '%s', error: %v\n", filename, err)
|
||||
continue
|
||||
}
|
||||
inputBytes = normalizeNewlines(inputBytes)
|
||||
input := string(inputBytes)
|
||||
|
||||
filename = filepath.Join("testdata", basename+".html")
|
||||
expectedBytes, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't open '%s', error: %v\n", filename, err)
|
||||
continue
|
||||
}
|
||||
expectedBytes = normalizeNewlines(expectedBytes)
|
||||
expected := string(expectedBytes)
|
||||
|
||||
actual := string(runMarkdown(input, params))
|
||||
if actual != expected {
|
||||
t.Errorf("\n [%#v]\nExpected[%#v]\nActual [%#v]",
|
||||
basename+".text", expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func normalizeNewlines(d []byte) []byte {
|
||||
// replace CR LF (windows) with LF (unix)
|
||||
d = bytes.Replace(d, []byte{13, 10}, []byte{10}, -1)
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
package html
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/gomarkdown/markdown/ast"
|
||||
"github.com/gomarkdown/markdown/parser"
|
||||
)
|
||||
|
||||
// EscapeHTMLCallouts writes html-escaped d to w. It escapes &, <, > and " characters, *but*
|
||||
// expands callouts <<N>> with the callout HTML, i.e. by calling r.callout() with a newly created
|
||||
// ast.Callout node.
|
||||
func (r *Renderer) EscapeHTMLCallouts(w io.Writer, d []byte) {
|
||||
ld := len(d)
|
||||
Parse:
|
||||
for i := 0; i < ld; i++ {
|
||||
for _, comment := range r.opts.Comments {
|
||||
if !bytes.HasPrefix(d[i:], comment) {
|
||||
break
|
||||
}
|
||||
|
||||
lc := len(comment)
|
||||
if i+lc < ld {
|
||||
if id, consumed := parser.IsCallout(d[i+lc:]); consumed > 0 {
|
||||
// We have seen a callout
|
||||
callout := &ast.Callout{ID: id}
|
||||
r.callout(w, callout)
|
||||
i += consumed + lc - 1
|
||||
continue Parse
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
escSeq := Escaper[d[i]]
|
||||
if escSeq != nil {
|
||||
w.Write(escSeq)
|
||||
} else {
|
||||
w.Write([]byte{d[i]})
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
package html
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEscapeHTMLCallouts(t *testing.T) {
|
||||
buf := &bytes.Buffer{}
|
||||
code := []byte(`println("hello")
|
||||
more code //<<4>>
|
||||
bliep bliep
|
||||
`)
|
||||
out := `println("hello")
|
||||
more code <span class="callout">4</span>
|
||||
bliep bliep
|
||||
`
|
||||
opts := RendererOptions{}
|
||||
opts.Comments = [][]byte{[]byte("//")}
|
||||
|
||||
r := NewRenderer(opts)
|
||||
r.EscapeHTMLCallouts(buf, code)
|
||||
|
||||
if buf.String() != out {
|
||||
t.Error("callout code block not correctly parsed")
|
||||
}
|
||||
}
|
43
html/doc.go
43
html/doc.go
|
@ -1,43 +0,0 @@
|
|||
/*
|
||||
Package html implements HTML renderer of parsed markdown document.
|
||||
|
||||
Configuring and customizing a renderer
|
||||
|
||||
A renderer can be configured with multiple options:
|
||||
|
||||
import "github.com/gomarkdown/markdown/html"
|
||||
|
||||
flags := html.CommonFlags | html.CompletePage | html.HrefTargetBlank
|
||||
opts := html.RenderOptions{
|
||||
TItle: "A custom title",
|
||||
Flags: flags,
|
||||
}
|
||||
renderer := html.NewRenderer(opts)
|
||||
|
||||
You can also re-use most of the logic and customize rendering of selected nodes
|
||||
by providing node render hook.
|
||||
This is most useful for rendering nodes that allow for design choices, like
|
||||
links or code blocks.
|
||||
|
||||
import (
|
||||
"github.com/gomarkdown/markdown/html"
|
||||
"github.com/gomarkdown/markdown/ast"
|
||||
)
|
||||
|
||||
// a very dummy render hook that will output "code_replacements" instead of
|
||||
// <code>${content}</code> emitted by html.Renderer
|
||||
func renderHookCodeBlock(w io.Writer, node *ast.Node, entering bool) (ast.WalkStatus, bool) {
|
||||
_, ok := node.Data.(*ast.CodeBlockData)
|
||||
if !ok {
|
||||
return ast.GoToNext, false
|
||||
}
|
||||
io.WriteString(w, "code_replacement")
|
||||
return ast.GoToNext, true
|
||||
}
|
||||
|
||||
opts := html.RendererOptions{
|
||||
RenderNodeHook: renderHookCodeBlock,
|
||||
}
|
||||
renderer := html.NewRenderer(opts)
|
||||
*/
|
||||
package html
|
50
html/esc.go
50
html/esc.go
|
@ -1,50 +0,0 @@
|
|||
package html
|
||||
|
||||
import (
|
||||
"html"
|
||||
"io"
|
||||
)
|
||||
|
||||
var Escaper = [256][]byte{
|
||||
'&': []byte("&"),
|
||||
'<': []byte("<"),
|
||||
'>': []byte(">"),
|
||||
'"': []byte("""),
|
||||
}
|
||||
|
||||
// EscapeHTML writes html-escaped d to w. It escapes &, <, > and " characters.
|
||||
func EscapeHTML(w io.Writer, d []byte) {
|
||||
var start, end int
|
||||
n := len(d)
|
||||
for end < n {
|
||||
escSeq := Escaper[d[end]]
|
||||
if escSeq != nil {
|
||||
w.Write(d[start:end])
|
||||
w.Write(escSeq)
|
||||
start = end + 1
|
||||
}
|
||||
end++
|
||||
}
|
||||
if start < n && end <= n {
|
||||
w.Write(d[start:end])
|
||||
}
|
||||
}
|
||||
|
||||
func escLink(w io.Writer, text []byte) {
|
||||
unesc := html.UnescapeString(string(text))
|
||||
EscapeHTML(w, []byte(unesc))
|
||||
}
|
||||
|
||||
// Escape writes the text to w, but skips the escape character.
|
||||
func Escape(w io.Writer, text []byte) {
|
||||
esc := false
|
||||
for i := 0; i < len(text); i++ {
|
||||
if text[i] == '\\' {
|
||||
esc = !esc
|
||||
}
|
||||
if esc && text[i] == '\\' {
|
||||
continue
|
||||
}
|
||||
w.Write([]byte{text[i]})
|
||||
}
|
||||
}
|
1318
html/renderer.go
1318
html/renderer.go
File diff suppressed because it is too large
Load Diff
|
@ -1,444 +0,0 @@
|
|||
package html
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
// SmartyPants rendering
|
||||
|
||||
// SPRenderer is a struct containing state of a Smartypants renderer.
|
||||
type SPRenderer struct {
|
||||
inSingleQuote bool
|
||||
inDoubleQuote bool
|
||||
callbacks [256]smartCallback
|
||||
}
|
||||
|
||||
func wordBoundary(c byte) bool {
|
||||
return c == 0 || isSpace(c) || isPunctuation(c)
|
||||
}
|
||||
|
||||
func tolower(c byte) byte {
|
||||
if c >= 'A' && c <= 'Z' {
|
||||
return c - 'A' + 'a'
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func isdigit(c byte) bool {
|
||||
return c >= '0' && c <= '9'
|
||||
}
|
||||
|
||||
func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool {
|
||||
// edge of the buffer is likely to be a tag that we don't get to see,
|
||||
// so we treat it like text sometimes
|
||||
|
||||
// enumerate all sixteen possibilities for (previousChar, nextChar)
|
||||
// each can be one of {0, space, punct, other}
|
||||
switch {
|
||||
case previousChar == 0 && nextChar == 0:
|
||||
// context is not any help here, so toggle
|
||||
*isOpen = !*isOpen
|
||||
case isSpace(previousChar) && nextChar == 0:
|
||||
// [ "] might be [ "<code>foo...]
|
||||
*isOpen = true
|
||||
case isPunctuation(previousChar) && nextChar == 0:
|
||||
// [!"] hmm... could be [Run!"] or [("<code>...]
|
||||
*isOpen = false
|
||||
case /* isnormal(previousChar) && */ nextChar == 0:
|
||||
// [a"] is probably a close
|
||||
*isOpen = false
|
||||
case previousChar == 0 && isSpace(nextChar):
|
||||
// [" ] might be [...foo</code>" ]
|
||||
*isOpen = false
|
||||
case isSpace(previousChar) && isSpace(nextChar):
|
||||
// [ " ] context is not any help here, so toggle
|
||||
*isOpen = !*isOpen
|
||||
case isPunctuation(previousChar) && isSpace(nextChar):
|
||||
// [!" ] is probably a close
|
||||
*isOpen = false
|
||||
case /* isnormal(previousChar) && */ isSpace(nextChar):
|
||||
// [a" ] this is one of the easy cases
|
||||
*isOpen = false
|
||||
case previousChar == 0 && isPunctuation(nextChar):
|
||||
// ["!] hmm... could be ["$1.95] or [</code>"!...]
|
||||
*isOpen = false
|
||||
case isSpace(previousChar) && isPunctuation(nextChar):
|
||||
// [ "!] looks more like [ "$1.95]
|
||||
*isOpen = true
|
||||
case isPunctuation(previousChar) && isPunctuation(nextChar):
|
||||
// [!"!] context is not any help here, so toggle
|
||||
*isOpen = !*isOpen
|
||||
case /* isnormal(previousChar) && */ isPunctuation(nextChar):
|
||||
// [a"!] is probably a close
|
||||
*isOpen = false
|
||||
case previousChar == 0 /* && isnormal(nextChar) */ :
|
||||
// ["a] is probably an open
|
||||
*isOpen = true
|
||||
case isSpace(previousChar) /* && isnormal(nextChar) */ :
|
||||
// [ "a] this is one of the easy cases
|
||||
*isOpen = true
|
||||
case isPunctuation(previousChar) /* && isnormal(nextChar) */ :
|
||||
// [!"a] is probably an open
|
||||
*isOpen = true
|
||||
default:
|
||||
// [a'b] maybe a contraction?
|
||||
*isOpen = false
|
||||
}
|
||||
|
||||
// Note that with the limited lookahead, this non-breaking
|
||||
// space will also be appended to single double quotes.
|
||||
if addNBSP && !*isOpen {
|
||||
out.WriteString(" ")
|
||||
}
|
||||
|
||||
out.WriteByte('&')
|
||||
if *isOpen {
|
||||
out.WriteByte('l')
|
||||
} else {
|
||||
out.WriteByte('r')
|
||||
}
|
||||
out.WriteByte(quote)
|
||||
out.WriteString("quo;")
|
||||
|
||||
if addNBSP && *isOpen {
|
||||
out.WriteString(" ")
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
if len(text) >= 2 {
|
||||
t1 := tolower(text[1])
|
||||
|
||||
if t1 == '\'' {
|
||||
nextChar := byte(0)
|
||||
if len(text) >= 3 {
|
||||
nextChar = text[2]
|
||||
}
|
||||
if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) {
|
||||
out.WriteString("’")
|
||||
return 0
|
||||
}
|
||||
|
||||
if len(text) >= 3 {
|
||||
t2 := tolower(text[2])
|
||||
|
||||
if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) &&
|
||||
(len(text) < 4 || wordBoundary(text[3])) {
|
||||
out.WriteString("’")
|
||||
return 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nextChar := byte(0)
|
||||
if len(text) > 1 {
|
||||
nextChar = text[1]
|
||||
}
|
||||
if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) {
|
||||
return 0
|
||||
}
|
||||
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
if len(text) >= 3 {
|
||||
t1 := tolower(text[1])
|
||||
t2 := tolower(text[2])
|
||||
|
||||
if t1 == 'c' && t2 == ')' {
|
||||
out.WriteString("©")
|
||||
return 2
|
||||
}
|
||||
|
||||
if t1 == 'r' && t2 == ')' {
|
||||
out.WriteString("®")
|
||||
return 2
|
||||
}
|
||||
|
||||
if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' {
|
||||
out.WriteString("™")
|
||||
return 3
|
||||
}
|
||||
}
|
||||
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
if len(text) >= 2 {
|
||||
if text[1] == '-' {
|
||||
out.WriteString("—")
|
||||
return 1
|
||||
}
|
||||
|
||||
if wordBoundary(previousChar) && wordBoundary(text[1]) {
|
||||
out.WriteString("–")
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
if len(text) >= 3 && text[1] == '-' && text[2] == '-' {
|
||||
out.WriteString("—")
|
||||
return 2
|
||||
}
|
||||
if len(text) >= 2 && text[1] == '-' {
|
||||
out.WriteString("–")
|
||||
return 1
|
||||
}
|
||||
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int {
|
||||
if bytes.HasPrefix(text, []byte(""")) {
|
||||
nextChar := byte(0)
|
||||
if len(text) >= 7 {
|
||||
nextChar = text[6]
|
||||
}
|
||||
if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) {
|
||||
return 5
|
||||
}
|
||||
}
|
||||
|
||||
if bytes.HasPrefix(text, []byte("�")) {
|
||||
return 3
|
||||
}
|
||||
|
||||
out.WriteByte('&')
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int {
|
||||
var quote byte = 'd'
|
||||
if angledQuotes {
|
||||
quote = 'a'
|
||||
}
|
||||
|
||||
return func(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
return r.smartAmpVariant(out, previousChar, text, quote, addNBSP)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
if len(text) >= 3 && text[1] == '.' && text[2] == '.' {
|
||||
out.WriteString("…")
|
||||
return 2
|
||||
}
|
||||
|
||||
if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' {
|
||||
out.WriteString("…")
|
||||
return 4
|
||||
}
|
||||
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
if len(text) >= 2 && text[1] == '`' {
|
||||
nextChar := byte(0)
|
||||
if len(text) >= 3 {
|
||||
nextChar = text[2]
|
||||
}
|
||||
if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
|
||||
// is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b
|
||||
// note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8)
|
||||
// and avoid changing dates like 1/23/2005 into fractions.
|
||||
numEnd := 0
|
||||
for len(text) > numEnd && isdigit(text[numEnd]) {
|
||||
numEnd++
|
||||
}
|
||||
if numEnd == 0 {
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
denStart := numEnd + 1
|
||||
if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 {
|
||||
denStart = numEnd + 3
|
||||
} else if len(text) < numEnd+2 || text[numEnd] != '/' {
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
denEnd := denStart
|
||||
for len(text) > denEnd && isdigit(text[denEnd]) {
|
||||
denEnd++
|
||||
}
|
||||
if denEnd == denStart {
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' {
|
||||
out.WriteString("<sup>")
|
||||
out.Write(text[:numEnd])
|
||||
out.WriteString("</sup>⁄<sub>")
|
||||
out.Write(text[denStart:denEnd])
|
||||
out.WriteString("</sub>")
|
||||
return denEnd - 1
|
||||
}
|
||||
}
|
||||
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
|
||||
if text[0] == '1' && text[1] == '/' && text[2] == '2' {
|
||||
if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' {
|
||||
out.WriteString("½")
|
||||
return 2
|
||||
}
|
||||
}
|
||||
|
||||
if text[0] == '1' && text[1] == '/' && text[2] == '4' {
|
||||
if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') {
|
||||
out.WriteString("¼")
|
||||
return 2
|
||||
}
|
||||
}
|
||||
|
||||
if text[0] == '3' && text[1] == '/' && text[2] == '4' {
|
||||
if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') {
|
||||
out.WriteString("¾")
|
||||
return 2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int {
|
||||
nextChar := byte(0)
|
||||
if len(text) > 1 {
|
||||
nextChar = text[1]
|
||||
}
|
||||
if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) {
|
||||
out.WriteString(""")
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
return r.smartDoubleQuoteVariant(out, previousChar, text, 'd')
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
return r.smartDoubleQuoteVariant(out, previousChar, text, 'a')
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
i := 0
|
||||
|
||||
for i < len(text) && text[i] != '>' {
|
||||
i++
|
||||
}
|
||||
|
||||
out.Write(text[:i+1])
|
||||
return i
|
||||
}
|
||||
|
||||
type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int
|
||||
|
||||
// NewSmartypantsRenderer constructs a Smartypants renderer object.
|
||||
func NewSmartypantsRenderer(flags Flags) *SPRenderer {
|
||||
var (
|
||||
r SPRenderer
|
||||
|
||||
smartAmpAngled = r.smartAmp(true, false)
|
||||
smartAmpAngledNBSP = r.smartAmp(true, true)
|
||||
smartAmpRegular = r.smartAmp(false, false)
|
||||
smartAmpRegularNBSP = r.smartAmp(false, true)
|
||||
|
||||
addNBSP = flags&SmartypantsQuotesNBSP != 0
|
||||
)
|
||||
|
||||
if flags&SmartypantsAngledQuotes == 0 {
|
||||
r.callbacks['"'] = r.smartDoubleQuote
|
||||
if !addNBSP {
|
||||
r.callbacks['&'] = smartAmpRegular
|
||||
} else {
|
||||
r.callbacks['&'] = smartAmpRegularNBSP
|
||||
}
|
||||
} else {
|
||||
r.callbacks['"'] = r.smartAngledDoubleQuote
|
||||
if !addNBSP {
|
||||
r.callbacks['&'] = smartAmpAngled
|
||||
} else {
|
||||
r.callbacks['&'] = smartAmpAngledNBSP
|
||||
}
|
||||
}
|
||||
r.callbacks['\''] = r.smartSingleQuote
|
||||
r.callbacks['('] = r.smartParens
|
||||
if flags&SmartypantsDashes != 0 {
|
||||
if flags&SmartypantsLatexDashes == 0 {
|
||||
r.callbacks['-'] = r.smartDash
|
||||
} else {
|
||||
r.callbacks['-'] = r.smartDashLatex
|
||||
}
|
||||
}
|
||||
r.callbacks['.'] = r.smartPeriod
|
||||
if flags&SmartypantsFractions == 0 {
|
||||
r.callbacks['1'] = r.smartNumber
|
||||
r.callbacks['3'] = r.smartNumber
|
||||
} else {
|
||||
for ch := '1'; ch <= '9'; ch++ {
|
||||
r.callbacks[ch] = r.smartNumberGeneric
|
||||
}
|
||||
}
|
||||
r.callbacks['<'] = r.smartLeftAngle
|
||||
r.callbacks['`'] = r.smartBacktick
|
||||
return &r
|
||||
}
|
||||
|
||||
// Process is the entry point of the Smartypants renderer.
|
||||
func (r *SPRenderer) Process(w io.Writer, text []byte) {
|
||||
mark := 0
|
||||
for i := 0; i < len(text); i++ {
|
||||
if action := r.callbacks[text[i]]; action != nil {
|
||||
if i > mark {
|
||||
w.Write(text[mark:i])
|
||||
}
|
||||
previousChar := byte(0)
|
||||
if i > 0 {
|
||||
previousChar = text[i-1]
|
||||
}
|
||||
var tmp bytes.Buffer
|
||||
i += action(&tmp, previousChar, text[i:])
|
||||
w.Write(tmp.Bytes())
|
||||
mark = i + 1
|
||||
}
|
||||
}
|
||||
if mark < len(text) {
|
||||
w.Write(text[mark:])
|
||||
}
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
package markdown
|
||||
|
||||
import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/gomarkdown/markdown/ast"
|
||||
"github.com/gomarkdown/markdown/html"
|
||||
"github.com/gomarkdown/markdown/parser"
|
||||
)
|
||||
|
||||
func renderHookEmpty(w io.Writer, node ast.Node, entering bool) (ast.WalkStatus, bool) {
|
||||
return ast.GoToNext, true
|
||||
}
|
||||
|
||||
func TestRenderNodeHookEmpty(t *testing.T) {
|
||||
tests := []string{
|
||||
"[foo](gopher://foo.bar)",
|
||||
"",
|
||||
|
||||
"[foo](mailto://bar/)\n",
|
||||
"",
|
||||
}
|
||||
|
||||
htmlParams := html.RendererOptions{
|
||||
RenderNodeHook: renderHookEmpty,
|
||||
}
|
||||
params := TestParams{
|
||||
RendererOptions: htmlParams,
|
||||
}
|
||||
doTestsParam(t, tests, params)
|
||||
}
|
||||
|
||||
func renderHookCodeBlock(w io.Writer, node ast.Node, entering bool) (ast.WalkStatus, bool) {
|
||||
_, ok := node.(*ast.CodeBlock)
|
||||
if !ok {
|
||||
return ast.GoToNext, false
|
||||
}
|
||||
io.WriteString(w, "code_replacement")
|
||||
return ast.GoToNext, true
|
||||
}
|
||||
|
||||
func TestRenderNodeHookCode(t *testing.T) {
|
||||
tests := []string{
|
||||
"a\n```go\ncode\n```\nb",
|
||||
"<p>a</p>\ncode_replacement\n<p>b</p>\n",
|
||||
}
|
||||
opts := html.RendererOptions{
|
||||
RenderNodeHook: renderHookCodeBlock,
|
||||
}
|
||||
params := TestParams{
|
||||
RendererOptions: opts,
|
||||
extensions: parser.CommonExtensions,
|
||||
}
|
||||
doTestsParam(t, tests, params)
|
||||
}
|
334
inline_test.go
334
inline_test.go
|
@ -1,12 +1,8 @@
|
|||
package markdown
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/gomarkdown/markdown/html"
|
||||
"github.com/gomarkdown/markdown/parser"
|
||||
)
|
||||
|
||||
|
@ -15,7 +11,12 @@ func TestEmphasis(t *testing.T) {
|
|||
doTestsInlineParam(t, tests, TestParams{})
|
||||
}
|
||||
|
||||
func TestReferenceOverride(t *testing.T) {
|
||||
func TestStatusTag(t *testing.T) {
|
||||
tests := readTestFile2(t, "status_tag.test")
|
||||
doTestsInlineParam(t, tests, TestParams{})
|
||||
}
|
||||
|
||||
func testReferenceOverride(t *testing.T) {
|
||||
var tests = []string{
|
||||
"test [ref1][]\n",
|
||||
"<p>test <a href=\"http://www.ref1.com/\" title=\"Reference 1\">ref1</a></p>\n",
|
||||
|
@ -76,58 +77,61 @@ func TestReferenceOverride(t *testing.T) {
|
|||
func TestStrong(t *testing.T) {
|
||||
var tests = []string{
|
||||
"nothing inline\n",
|
||||
"<p>nothing inline</p>\n",
|
||||
"[{\"literal\":\"nothing inline\"}]",
|
||||
|
||||
"simple **inline** test\n",
|
||||
"<p>simple <strong>inline</strong> test</p>\n",
|
||||
"[{\"literal\":\"simple \"},{\"type\":\"strong\",\"literal\":\"inline\"},{\"literal\":\" test\"}]",
|
||||
|
||||
"simple ***triple*** test\n",
|
||||
"[{\"literal\":\"simple \"},{\"type\":\"strong\",\"literal\":\"triple\"},{\"literal\":\" test\"}]",
|
||||
|
||||
"**at the** beginning\n",
|
||||
"<p><strong>at the</strong> beginning</p>\n",
|
||||
"[{\"literal\":\"\"},{\"type\":\"strong\",\"literal\":\"at the\"},{\"literal\":\" beginning\"}]",
|
||||
|
||||
"at the **end**\n",
|
||||
"<p>at the <strong>end</strong></p>\n",
|
||||
"[{\"literal\":\"at the \"},{\"type\":\"strong\",\"literal\":\"end\"}]",
|
||||
|
||||
"**try two** in **one line**\n",
|
||||
"<p><strong>try two</strong> in <strong>one line</strong></p>\n",
|
||||
"[{\"literal\":\"\"},{\"type\":\"strong\",\"literal\":\"try two\"},{\"literal\":\" in \"},{\"type\":\"strong\",\"literal\":\"one line\"}]",
|
||||
|
||||
"over **two\nlines** test\n",
|
||||
"<p>over <strong>two\nlines</strong> test</p>\n",
|
||||
"[{\"literal\":\"over \"},{\"type\":\"strong\",\"literal\":\"two\\nlines\"},{\"literal\":\" test\"}]",
|
||||
|
||||
"odd **number of** markers** here\n",
|
||||
"<p>odd <strong>number of</strong> markers** here</p>\n",
|
||||
"[{\"literal\":\"odd \"},{\"type\":\"strong\",\"literal\":\"number of\"},{\"literal\":\" markers** here\"}]",
|
||||
|
||||
"odd **number\nof** markers** here\n",
|
||||
"<p>odd <strong>number\nof</strong> markers** here</p>\n",
|
||||
"[{\"literal\":\"odd \"},{\"type\":\"strong\",\"literal\":\"number\\nof\"},{\"literal\":\" markers** here\"}]",
|
||||
|
||||
"simple __inline__ test\n",
|
||||
"<p>simple <strong>inline</strong> test</p>\n",
|
||||
"[{\"literal\":\"simple \"},{\"type\":\"strong\",\"literal\":\"inline\"},{\"literal\":\" test\"}]",
|
||||
|
||||
"__at the__ beginning\n",
|
||||
"<p><strong>at the</strong> beginning</p>\n",
|
||||
"[{\"literal\":\"\"},{\"type\":\"strong\",\"literal\":\"at the\"},{\"literal\":\" beginning\"}]",
|
||||
|
||||
"at the __end__\n",
|
||||
"<p>at the <strong>end</strong></p>\n",
|
||||
"[{\"literal\":\"at the \"},{\"type\":\"strong\",\"literal\":\"end\"}]",
|
||||
|
||||
"__try two__ in __one line__\n",
|
||||
"<p><strong>try two</strong> in <strong>one line</strong></p>\n",
|
||||
"[{\"literal\":\"\"},{\"type\":\"strong\",\"literal\":\"try two\"},{\"literal\":\" in \"},{\"type\":\"strong\",\"literal\":\"one line\"}]",
|
||||
|
||||
"over __two\nlines__ test\n",
|
||||
"<p>over <strong>two\nlines</strong> test</p>\n",
|
||||
"[{\"literal\":\"over \"},{\"type\":\"strong\",\"literal\":\"two\\nlines\"},{\"literal\":\" test\"}]",
|
||||
|
||||
"odd __number of__ markers__ here\n",
|
||||
"<p>odd <strong>number of</strong> markers__ here</p>\n",
|
||||
"[{\"literal\":\"odd \"},{\"type\":\"strong\",\"literal\":\"number of\"},{\"literal\":\" markers__ here\"}]",
|
||||
|
||||
"odd __number\nof__ markers__ here\n",
|
||||
"<p>odd <strong>number\nof</strong> markers__ here</p>\n",
|
||||
"[{\"literal\":\"odd \"},{\"type\":\"strong\",\"literal\":\"number\\nof\"},{\"literal\":\" markers__ here\"}]",
|
||||
|
||||
"mix of **markers__\n",
|
||||
"<p>mix of **markers__</p>\n",
|
||||
"[{\"literal\":\"mix of **markers__\"}]",
|
||||
|
||||
"**`/usr`** : this folder is named `usr`\n",
|
||||
"<p><strong><code>/usr</code></strong> : this folder is named <code>usr</code></p>\n",
|
||||
"[{\"literal\":\"\"},{\"type\":\"strong\",\"literal\":\"`/usr`\"},{\"literal\":\" : this folder is named \"},{\"type\":\"code\",\"literal\":\"usr\"}]",
|
||||
|
||||
"**`/usr`** :\n\n this folder is named `usr`\n",
|
||||
"<p><strong><code>/usr</code></strong> :</p>\n\n<p>this folder is named <code>usr</code></p>\n",
|
||||
"[{\"literal\":\"\"},{\"type\":\"strong\",\"literal\":\"`/usr`\"},{\"literal\":\" :\\n\\n this folder is named \"},{\"type\":\"code\",\"literal\":\"usr\"}]",
|
||||
}
|
||||
doTestsInline(t, tests)
|
||||
}
|
||||
|
@ -135,12 +139,12 @@ func TestStrong(t *testing.T) {
|
|||
func TestStrongShort(t *testing.T) {
|
||||
var tests = []string{
|
||||
"**`/usr`** :\n\n this folder is named `usr`\n",
|
||||
"<p><strong><code>/usr</code></strong> :</p>\n\n<p>this folder is named <code>usr</code></p>\n",
|
||||
"[{\"literal\":\"\"},{\"type\":\"strong\",\"literal\":\"`/usr`\"},{\"literal\":\" :\\n\\n this folder is named \"},{\"type\":\"code\",\"literal\":\"usr\"}]",
|
||||
}
|
||||
doTestsInline(t, tests)
|
||||
|
||||
}
|
||||
func TestEmphasisMix(t *testing.T) {
|
||||
func testEmphasisMix(t *testing.T) {
|
||||
var tests = []string{
|
||||
"***triple emphasis***\n",
|
||||
"<p><strong><em>triple emphasis</em></strong></p>\n",
|
||||
|
@ -169,7 +173,7 @@ func TestEmphasisMix(t *testing.T) {
|
|||
doTestsInline(t, tests)
|
||||
}
|
||||
|
||||
func TestEmphasisLink(t *testing.T) {
|
||||
func testEmphasisLink(t *testing.T) {
|
||||
var tests = []string{
|
||||
"[first](before) *text[second] (inside)text* [third](after)\n",
|
||||
"<p><a href=\"before\">first</a> <em>text<a href=\"inside\">second</a>text</em> <a href=\"after\">third</a></p>\n",
|
||||
|
@ -186,7 +190,7 @@ func TestEmphasisLink(t *testing.T) {
|
|||
doTestsInline(t, tests)
|
||||
}
|
||||
|
||||
func TestStrikeThrough(t *testing.T) {
|
||||
func testStrikeThrough(t *testing.T) {
|
||||
var tests = []string{
|
||||
"nothing inline\n",
|
||||
"<p>nothing inline</p>\n",
|
||||
|
@ -215,7 +219,7 @@ func TestStrikeThrough(t *testing.T) {
|
|||
doTestsInline(t, tests)
|
||||
}
|
||||
|
||||
func TestCodeSpan(t *testing.T) {
|
||||
func testCodeSpan(t *testing.T) {
|
||||
var tests = []string{
|
||||
"`source code`\n",
|
||||
"<p><code>source code</code></p>\n",
|
||||
|
@ -253,7 +257,7 @@ func TestCodeSpan(t *testing.T) {
|
|||
doTestsInline(t, tests)
|
||||
}
|
||||
|
||||
func TestLineBreak(t *testing.T) {
|
||||
func testLineBreak(t *testing.T) {
|
||||
var tests = []string{
|
||||
"this line \nhas a break\n",
|
||||
"<p>this line<br />\nhas a break</p>\n",
|
||||
|
@ -292,7 +296,7 @@ func TestLineBreak(t *testing.T) {
|
|||
extensions: parser.BackslashLineBreak})
|
||||
}
|
||||
|
||||
func TestInlineLink(t *testing.T) {
|
||||
func testInlineLink(t *testing.T) {
|
||||
var tests = []string{
|
||||
"[foo](/bar/)\n",
|
||||
"<p><a href=\"/bar/\">foo</a></p>\n",
|
||||
|
@ -409,54 +413,7 @@ func TestInlineLink(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
func TestRelAttrLink(t *testing.T) {
|
||||
var nofollowTests = []string{
|
||||
"[foo](http://bar.com/foo/)\n",
|
||||
"<p><a href=\"http://bar.com/foo/\" rel=\"nofollow\">foo</a></p>\n",
|
||||
|
||||
"[foo](/bar/)\n",
|
||||
"<p><a href=\"/bar/\">foo</a></p>\n",
|
||||
|
||||
"[foo](/)\n",
|
||||
"<p><a href=\"/\">foo</a></p>\n",
|
||||
|
||||
"[foo](./)\n",
|
||||
"<p><a href=\"./\">foo</a></p>\n",
|
||||
|
||||
"[foo](../)\n",
|
||||
"<p><a href=\"../\">foo</a></p>\n",
|
||||
|
||||
"[foo](../bar)\n",
|
||||
"<p><a href=\"../bar\">foo</a></p>\n",
|
||||
}
|
||||
doTestsInlineParam(t, nofollowTests, TestParams{
|
||||
Flags: html.Safelink | html.NofollowLinks,
|
||||
})
|
||||
|
||||
var noreferrerTests = []string{
|
||||
"[foo](http://bar.com/foo/)\n",
|
||||
"<p><a href=\"http://bar.com/foo/\" rel=\"noreferrer\">foo</a></p>\n",
|
||||
|
||||
"[foo](/bar/)\n",
|
||||
"<p><a href=\"/bar/\">foo</a></p>\n",
|
||||
}
|
||||
doTestsInlineParam(t, noreferrerTests, TestParams{
|
||||
Flags: html.Safelink | html.NoreferrerLinks,
|
||||
})
|
||||
|
||||
var nofollownoreferrerTests = []string{
|
||||
"[foo](http://bar.com/foo/)\n",
|
||||
"<p><a href=\"http://bar.com/foo/\" rel=\"nofollow noreferrer\">foo</a></p>\n",
|
||||
|
||||
"[foo](/bar/)\n",
|
||||
"<p><a href=\"/bar/\">foo</a></p>\n",
|
||||
}
|
||||
doTestsInlineParam(t, nofollownoreferrerTests, TestParams{
|
||||
Flags: html.Safelink | html.NofollowLinks | html.NoreferrerLinks,
|
||||
})
|
||||
}
|
||||
|
||||
func TestHrefTargetBlank(t *testing.T) {
|
||||
func testHrefTargetBlank(t *testing.T) {
|
||||
var tests = []string{
|
||||
// internal link
|
||||
"[foo](/bar/)\n",
|
||||
|
@ -480,12 +437,10 @@ func TestHrefTargetBlank(t *testing.T) {
|
|||
"[foo](http://example.com)\n",
|
||||
"<p><a href=\"http://example.com\" target=\"_blank\">foo</a></p>\n",
|
||||
}
|
||||
doTestsInlineParam(t, tests, TestParams{
|
||||
Flags: html.Safelink | html.HrefTargetBlank,
|
||||
})
|
||||
doTestsInlineParam(t, tests, TestParams{})
|
||||
}
|
||||
|
||||
func TestSafeInlineLink(t *testing.T) {
|
||||
func testSafeInlineLink(t *testing.T) {
|
||||
var tests = []string{
|
||||
"[foo](/bar/)\n",
|
||||
"<p><a href=\"/bar/\">foo</a></p>\n",
|
||||
|
@ -518,7 +473,7 @@ func TestSafeInlineLink(t *testing.T) {
|
|||
doSafeTestsInline(t, tests)
|
||||
}
|
||||
|
||||
func TestReferenceLink(t *testing.T) {
|
||||
func testReferenceLink(t *testing.T) {
|
||||
var tests = []string{
|
||||
"[link][ref]\n",
|
||||
"<p>[link][ref]</p>\n",
|
||||
|
@ -556,7 +511,7 @@ func TestReferenceLink(t *testing.T) {
|
|||
doLinkTestsInline(t, tests)
|
||||
}
|
||||
|
||||
func TestTags(t *testing.T) {
|
||||
func testTags(t *testing.T) {
|
||||
var tests = []string{
|
||||
"a <span>tag</span>\n",
|
||||
"<p>a <span>tag</span></p>\n",
|
||||
|
@ -573,7 +528,7 @@ func TestTags(t *testing.T) {
|
|||
doTestsInline(t, tests)
|
||||
}
|
||||
|
||||
func TestAutoLink(t *testing.T) {
|
||||
func testAutoLink(t *testing.T) {
|
||||
var tests = []string{
|
||||
"http://foo.com/\n",
|
||||
"<p><a href=\"http://foo.com/\">http://foo.com/</a></p>\n",
|
||||
|
@ -899,128 +854,7 @@ what happens here
|
|||
`,
|
||||
}
|
||||
|
||||
func TestFootnotes(t *testing.T) {
|
||||
doTestsInlineParam(t, footnoteTests, TestParams{
|
||||
extensions: parser.Footnotes,
|
||||
})
|
||||
}
|
||||
|
||||
func TestFootnotesWithParameters(t *testing.T) {
|
||||
tests := make([]string, len(footnoteTests))
|
||||
|
||||
prefix := "testPrefix"
|
||||
returnText := "ret"
|
||||
re := regexp.MustCompile(`(?ms)<li id="fn:(\S+?)">(.*?)</li>`)
|
||||
|
||||
// Transform the test expectations to match the parameters we're using.
|
||||
for i, test := range footnoteTests {
|
||||
if i%2 == 1 {
|
||||
test = strings.Replace(test, "fn:", "fn:"+prefix, -1)
|
||||
test = strings.Replace(test, "fnref:", "fnref:"+prefix, -1)
|
||||
test = re.ReplaceAllString(test, `<li id="fn:$1">$2 <a class="footnote-return" href="#fnref:$1">ret</a></li>`)
|
||||
}
|
||||
tests[i] = test
|
||||
}
|
||||
|
||||
params := html.RendererOptions{
|
||||
FootnoteAnchorPrefix: prefix,
|
||||
FootnoteReturnLinkContents: returnText,
|
||||
}
|
||||
|
||||
doTestsInlineParam(t, tests, TestParams{
|
||||
extensions: parser.Footnotes,
|
||||
Flags: html.FootnoteReturnLinks,
|
||||
RendererOptions: params,
|
||||
})
|
||||
}
|
||||
|
||||
func TestNestedFootnotes(t *testing.T) {
|
||||
var tests = []string{
|
||||
`Paragraph.[^fn1]
|
||||
|
||||
[^fn1]:
|
||||
Asterisk[^fn2]
|
||||
|
||||
[^fn2]:
|
||||
Obelisk`,
|
||||
`<p>Paragraph.<sup class="footnote-ref" id="fnref:fn1"><a href="#fn:fn1">1</a></sup></p>
|
||||
|
||||
<div class="footnotes">
|
||||
|
||||
<hr />
|
||||
|
||||
<ol>
|
||||
<li id="fn:fn1">Asterisk<sup class="footnote-ref" id="fnref:fn2"><a href="#fn:fn2">2</a></sup></li>
|
||||
|
||||
<li id="fn:fn2">Obelisk</li>
|
||||
</ol>
|
||||
|
||||
</div>
|
||||
`,
|
||||
`This uses footnote A.[^A]
|
||||
|
||||
This uses footnote C.[^C]
|
||||
|
||||
[^A]:
|
||||
A note. use itself.[^A]
|
||||
[^B]:
|
||||
B note, uses A to test duplicate.[^A]
|
||||
[^C]:
|
||||
C note, uses B.[^B]
|
||||
`,
|
||||
|
||||
`<p>This uses footnote A.<sup class="footnote-ref" id="fnref:A"><a href="#fn:A">1</a></sup></p>
|
||||
|
||||
<p>This uses footnote C.<sup class="footnote-ref" id="fnref:C"><a href="#fn:C">2</a></sup></p>
|
||||
|
||||
<div class="footnotes">
|
||||
|
||||
<hr />
|
||||
|
||||
<ol>
|
||||
<li id="fn:A">A note. use itself.<sup class="footnote-ref" id="fnref:A"><a href="#fn:A">1</a></sup></li>
|
||||
|
||||
<li id="fn:C">C note, uses B.<sup class="footnote-ref" id="fnref:B"><a href="#fn:B">3</a></sup></li>
|
||||
|
||||
<li id="fn:B">B note, uses A to test duplicate.<sup class="footnote-ref" id="fnref:A"><a href="#fn:A">1</a></sup></li>
|
||||
</ol>
|
||||
|
||||
</div>
|
||||
`,
|
||||
}
|
||||
doTestsInlineParam(t, tests, TestParams{extensions: parser.Footnotes})
|
||||
}
|
||||
|
||||
func TestInlineComments(t *testing.T) {
|
||||
var tests = []string{
|
||||
"Hello <!-- there ->\n",
|
||||
"<p>Hello <!— there –></p>\n",
|
||||
|
||||
"Hello <!-- there -->\n",
|
||||
"<p>Hello <!-- there --></p>\n",
|
||||
|
||||
"Hello <!-- there -->",
|
||||
"<p>Hello <!-- there --></p>\n",
|
||||
|
||||
"Hello <!---->\n",
|
||||
"<p>Hello <!----></p>\n",
|
||||
|
||||
"Hello <!-- there -->\na",
|
||||
"<p>Hello <!-- there -->\na</p>\n",
|
||||
|
||||
"* list <!-- item -->\n",
|
||||
"<ul>\n<li>list <!-- item --></li>\n</ul>\n",
|
||||
|
||||
"<!-- Front --> comment\n",
|
||||
"<p><!-- Front --> comment</p>\n",
|
||||
|
||||
"blahblah\n<!--- foo -->\nrhubarb\n",
|
||||
"<p>blahblah\n<!--- foo -->\nrhubarb</p>\n",
|
||||
}
|
||||
doTestsInlineParam(t, tests, TestParams{Flags: html.Smartypants | html.SmartypantsDashes})
|
||||
}
|
||||
|
||||
func TestSmartDoubleQuotes(t *testing.T) {
|
||||
func testSmartDoubleQuotes(t *testing.T) {
|
||||
var tests = []string{
|
||||
"this should be normal \"quoted\" text.\n",
|
||||
"<p>this should be normal “quoted” text.</p>\n",
|
||||
|
@ -1029,10 +863,10 @@ func TestSmartDoubleQuotes(t *testing.T) {
|
|||
"two pair of \"some\" quoted \"text\".\n",
|
||||
"<p>two pair of “some” quoted “text”.</p>\n"}
|
||||
|
||||
doTestsInlineParam(t, tests, TestParams{Flags: html.Smartypants})
|
||||
doTestsInlineParam(t, tests, TestParams{})
|
||||
}
|
||||
|
||||
func TestSmartDoubleQuotesNBSP(t *testing.T) {
|
||||
func testSmartDoubleQuotesNBSP(t *testing.T) {
|
||||
var tests = []string{
|
||||
"this should be normal \"quoted\" text.\n",
|
||||
"<p>this should be normal “ quoted ” text.</p>\n",
|
||||
|
@ -1041,10 +875,10 @@ func TestSmartDoubleQuotesNBSP(t *testing.T) {
|
|||
"two pair of \"some\" quoted \"text\".\n",
|
||||
"<p>two pair of “ some ” quoted “ text ”.</p>\n"}
|
||||
|
||||
doTestsInlineParam(t, tests, TestParams{Flags: html.Smartypants | html.SmartypantsQuotesNBSP})
|
||||
doTestsInlineParam(t, tests, TestParams{})
|
||||
}
|
||||
|
||||
func TestSmartAngledDoubleQuotes(t *testing.T) {
|
||||
func testSmartAngledDoubleQuotes(t *testing.T) {
|
||||
var tests = []string{
|
||||
"this should be angled \"quoted\" text.\n",
|
||||
"<p>this should be angled «quoted» text.</p>\n",
|
||||
|
@ -1053,40 +887,10 @@ func TestSmartAngledDoubleQuotes(t *testing.T) {
|
|||
"two pair of \"some\" quoted \"text\".\n",
|
||||
"<p>two pair of «some» quoted «text».</p>\n"}
|
||||
|
||||
doTestsInlineParam(t, tests, TestParams{Flags: html.Smartypants | html.SmartypantsAngledQuotes})
|
||||
doTestsInlineParam(t, tests, TestParams{})
|
||||
}
|
||||
|
||||
func TestSmartAngledDoubleQuotesNBSP(t *testing.T) {
|
||||
var tests = []string{
|
||||
"this should be angled \"quoted\" text.\n",
|
||||
"<p>this should be angled « quoted » text.</p>\n",
|
||||
"this \" single double\n",
|
||||
"<p>this « single double</p>\n",
|
||||
"two pair of \"some\" quoted \"text\".\n",
|
||||
"<p>two pair of « some » quoted « text ».</p>\n"}
|
||||
|
||||
doTestsInlineParam(t, tests, TestParams{Flags: html.Smartypants | html.SmartypantsAngledQuotes | html.SmartypantsQuotesNBSP})
|
||||
}
|
||||
|
||||
func TestSmartFractions(t *testing.T) {
|
||||
var tests = []string{
|
||||
"1/2, 1/4 and 3/4; 1/4th and 3/4ths\n",
|
||||
"<p>½, ¼ and ¾; ¼th and ¾ths</p>\n",
|
||||
"1/2/2015, 1/4/2015, 3/4/2015; 2015/1/2, 2015/1/4, 2015/3/4.\n",
|
||||
"<p>1/2/2015, 1/4/2015, 3/4/2015; 2015/1/2, 2015/1/4, 2015/3/4.</p>\n"}
|
||||
|
||||
doTestsInlineParam(t, tests, TestParams{Flags: html.Smartypants})
|
||||
|
||||
tests = []string{
|
||||
"1/2, 2/3, 81/100 and 1000000/1048576.\n",
|
||||
"<p><sup>1</sup>⁄<sub>2</sub>, <sup>2</sup>⁄<sub>3</sub>, <sup>81</sup>⁄<sub>100</sub> and <sup>1000000</sup>⁄<sub>1048576</sub>.</p>\n",
|
||||
"1/2/2015, 1/4/2015, 3/4/2015; 2015/1/2, 2015/1/4, 2015/3/4.\n",
|
||||
"<p>1/2/2015, 1/4/2015, 3/4/2015; 2015/1/2, 2015/1/4, 2015/3/4.</p>\n"}
|
||||
|
||||
doTestsInlineParam(t, tests, TestParams{Flags: html.Smartypants | html.SmartypantsFractions})
|
||||
}
|
||||
|
||||
func TestDisableSmartDashes(t *testing.T) {
|
||||
func testDisableSmartDashes(t *testing.T) {
|
||||
doTestsInlineParam(t, []string{
|
||||
"foo - bar\n",
|
||||
"<p>foo - bar</p>\n",
|
||||
|
@ -1102,7 +906,7 @@ func TestDisableSmartDashes(t *testing.T) {
|
|||
"<p>foo — bar</p>\n",
|
||||
"foo --- bar\n",
|
||||
"<p>foo —– bar</p>\n",
|
||||
}, TestParams{Flags: html.Smartypants | html.SmartypantsDashes})
|
||||
}, TestParams{})
|
||||
doTestsInlineParam(t, []string{
|
||||
"foo - bar\n",
|
||||
"<p>foo - bar</p>\n",
|
||||
|
@ -1110,7 +914,7 @@ func TestDisableSmartDashes(t *testing.T) {
|
|||
"<p>foo – bar</p>\n",
|
||||
"foo --- bar\n",
|
||||
"<p>foo — bar</p>\n",
|
||||
}, TestParams{Flags: html.Smartypants | html.SmartypantsLatexDashes | html.SmartypantsDashes})
|
||||
}, TestParams{})
|
||||
doTestsInlineParam(t, []string{
|
||||
"foo - bar\n",
|
||||
"<p>foo - bar</p>\n",
|
||||
|
@ -1118,31 +922,27 @@ func TestDisableSmartDashes(t *testing.T) {
|
|||
"<p>foo -- bar</p>\n",
|
||||
"foo --- bar\n",
|
||||
"<p>foo --- bar</p>\n",
|
||||
}, TestParams{Flags: html.Smartypants | html.SmartypantsLatexDashes})
|
||||
}, TestParams{})
|
||||
}
|
||||
|
||||
func TestSkipLinks(t *testing.T) {
|
||||
func testSkipLinks(t *testing.T) {
|
||||
doTestsInlineParam(t, []string{
|
||||
"[foo](gopher://foo.bar)",
|
||||
"<p><tt>foo</tt></p>\n",
|
||||
|
||||
"[foo](mailto://bar/)\n",
|
||||
"<p><tt>foo</tt></p>\n",
|
||||
}, TestParams{
|
||||
Flags: html.SkipLinks,
|
||||
})
|
||||
}, TestParams{})
|
||||
}
|
||||
|
||||
func TestSkipImages(t *testing.T) {
|
||||
func testSkipImages(t *testing.T) {
|
||||
doTestsInlineParam(t, []string{
|
||||
"![foo](/bar/)\n",
|
||||
"<p></p>\n",
|
||||
}, TestParams{
|
||||
Flags: html.SkipImages,
|
||||
})
|
||||
}, TestParams{})
|
||||
}
|
||||
|
||||
func TestUseXHTML(t *testing.T) {
|
||||
func testUseXHTML(t *testing.T) {
|
||||
doTestsParam(t, []string{
|
||||
"---",
|
||||
"<hr>\n",
|
||||
|
@ -1150,33 +950,15 @@ func TestUseXHTML(t *testing.T) {
|
|||
doTestsParam(t, []string{
|
||||
"---",
|
||||
"<hr />\n",
|
||||
}, TestParams{Flags: html.UseXHTML})
|
||||
}, TestParams{})
|
||||
}
|
||||
|
||||
func TestSkipHTML(t *testing.T) {
|
||||
func testSkipHTML(t *testing.T) {
|
||||
doTestsParam(t, []string{
|
||||
"<div class=\"foo\"></div>\n\ntext\n\n<form>the form</form>",
|
||||
"<p>text</p>\n\n<p>the form</p>\n",
|
||||
|
||||
"text <em>inline html</em> more text",
|
||||
"<p>text inline html more text</p>\n",
|
||||
}, TestParams{Flags: html.SkipHTML})
|
||||
}
|
||||
|
||||
func TestInlineMath(t *testing.T) {
|
||||
doTestsParam(t, []string{
|
||||
"$a_b$",
|
||||
`<p><span class="math inline">\(a_b\)</span></p>
|
||||
`,
|
||||
}, TestParams{Flags: html.SkipHTML, extensions: parser.CommonExtensions})
|
||||
}
|
||||
|
||||
func BenchmarkSmartDoubleQuotes(b *testing.B) {
|
||||
params := TestParams{Flags: html.Smartypants}
|
||||
params.extensions |= parser.Autolink | parser.Strikethrough
|
||||
params.Flags |= html.UseXHTML
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
runMarkdown("this should be normal \"quoted\" text.\n", params)
|
||||
}
|
||||
}, TestParams{})
|
||||
}
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/gomarkdown/markdown"
|
||||
)
|
||||
|
||||
func main() {
|
||||
md := []byte("## markdown document")
|
||||
output := markdown.Parse(md, nil)
|
||||
fmt.Printf("## markdown document\n")
|
||||
fmt.Printf("%+v\n", output)
|
||||
fmt.Printf("%+v\n", output.GetChildren()[0])
|
||||
fmt.Printf("%+v\n", output.GetChildren()[0].GetChildren()[0])
|
||||
j, err := json.Marshal(output)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
fmt.Println(j)
|
||||
}
|
19
markdown.go
19
markdown.go
|
@ -5,7 +5,6 @@ import (
|
|||
"io"
|
||||
|
||||
"github.com/gomarkdown/markdown/ast"
|
||||
"github.com/gomarkdown/markdown/html"
|
||||
"github.com/gomarkdown/markdown/parser"
|
||||
)
|
||||
|
||||
|
@ -65,21 +64,3 @@ func Render(doc ast.Node, renderer Renderer) []byte {
|
|||
renderer.RenderFooter(&buf, doc)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// ToHTML converts markdownDoc to HTML.
|
||||
//
|
||||
// You can optionally pass a parser and renderer. This allows to customize
|
||||
// a parser, use a customized html render or use completely custom renderer.
|
||||
//
|
||||
// If you pass nil for both, we use parser configured with parser.CommonExtensions
|
||||
// and html.Renderer configured with html.CommonFlags.
|
||||
func ToHTML(markdown []byte, p *parser.Parser, renderer Renderer) []byte {
|
||||
doc := Parse(markdown, p)
|
||||
if renderer == nil {
|
||||
opts := html.RendererOptions{
|
||||
Flags: html.CommonFlags,
|
||||
}
|
||||
renderer = html.NewRenderer(opts)
|
||||
}
|
||||
return Render(doc, renderer)
|
||||
}
|
||||
|
|
|
@ -6,20 +6,20 @@ func TestDocument(t *testing.T) {
|
|||
var tests = []string{
|
||||
// Empty document.
|
||||
"",
|
||||
"",
|
||||
"[]",
|
||||
|
||||
" ",
|
||||
"",
|
||||
"[]",
|
||||
|
||||
// This shouldn't panic.
|
||||
// https://github.com/russross/blackfriday/issues/172
|
||||
"[]:<",
|
||||
"<p>[]:<</p>\n",
|
||||
"[{\"literal\":\"[]:\\u003c\"}]",
|
||||
|
||||
// This shouldn't panic.
|
||||
// https://github.com/russross/blackfriday/issues/173
|
||||
" [",
|
||||
"<p>[</p>\n",
|
||||
"[{\"literal\":\"[\"}]",
|
||||
}
|
||||
doTests(t, tests)
|
||||
}
|
||||
|
|
|
@ -5,8 +5,6 @@ import (
|
|||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/gomarkdown/markdown/parser"
|
||||
)
|
||||
|
||||
type testData struct {
|
||||
|
@ -66,18 +64,3 @@ func readTestFile(t *testing.T, fileName string) []*testData {
|
|||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func TestMmark(t *testing.T) {
|
||||
testData := readTestFile(t, "mmark.test")
|
||||
ext := parser.CommonExtensions | parser.Attributes | parser.OrderedListStart | parser.SuperSubscript | parser.Mmark
|
||||
for _, td := range testData {
|
||||
p := parser.NewWithExtensions(ext)
|
||||
|
||||
got := ToHTML(td.md, p, nil)
|
||||
want := td.html
|
||||
|
||||
if bytes.Compare(got, want) != 0 {
|
||||
t.Errorf("want (%d bytes) %s, got (%d bytes) %s, for input %q", len(want), want, len(got), got, td.md)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
581
parser/block.go
581
parser/block.go
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"html"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"unicode"
|
||||
|
||||
"github.com/gomarkdown/markdown/ast"
|
||||
|
@ -92,263 +91,23 @@ func sanitizeAnchorName(text string) string {
|
|||
// the input buffer ends with a newline.
|
||||
func (p *Parser) block(data []byte) {
|
||||
// this is called recursively: enforce a maximum depth
|
||||
if p.nesting >= p.maxNesting {
|
||||
return
|
||||
}
|
||||
p.nesting++
|
||||
|
||||
// parse out one block-level construct at a time
|
||||
for len(data) > 0 {
|
||||
// attributes that can be specific before a block element:
|
||||
//
|
||||
// {#id .class1 .class2 key="value"}
|
||||
if p.extensions&Attributes != 0 {
|
||||
data = p.attribute(data)
|
||||
}
|
||||
|
||||
if p.extensions&Includes != 0 {
|
||||
f := p.readInclude
|
||||
path, address, consumed := p.isInclude(data)
|
||||
if consumed == 0 {
|
||||
path, address, consumed = p.isCodeInclude(data)
|
||||
f = p.readCodeInclude
|
||||
}
|
||||
if consumed > 0 {
|
||||
included := f(p.includeStack.Last(), path, address)
|
||||
p.includeStack.Push(path)
|
||||
p.block(included)
|
||||
p.includeStack.Pop()
|
||||
data = data[consumed:]
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// user supplied parser function
|
||||
if p.Opts.ParserHook != nil {
|
||||
node, blockdata, consumed := p.Opts.ParserHook(data)
|
||||
if consumed > 0 {
|
||||
data = data[consumed:]
|
||||
|
||||
if node != nil {
|
||||
p.addBlock(node)
|
||||
if blockdata != nil {
|
||||
p.block(blockdata)
|
||||
p.finalize(node)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// prefixed heading:
|
||||
//
|
||||
// # Heading 1
|
||||
// ## Heading 2
|
||||
// ...
|
||||
// ###### Heading 6
|
||||
if p.isPrefixHeading(data) {
|
||||
data = data[p.prefixHeading(data):]
|
||||
continue
|
||||
}
|
||||
|
||||
// prefixed special heading:
|
||||
// (there are no levels.)
|
||||
//
|
||||
// .# Abstract
|
||||
if p.isPrefixSpecialHeading(data) {
|
||||
data = data[p.prefixSpecialHeading(data):]
|
||||
continue
|
||||
}
|
||||
|
||||
// block of preformatted HTML:
|
||||
//
|
||||
// <div>
|
||||
// ...
|
||||
// </div>
|
||||
if data[0] == '<' {
|
||||
if i := p.html(data, true); i > 0 {
|
||||
data = data[i:]
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// title block
|
||||
//
|
||||
// % stuff
|
||||
// % more stuff
|
||||
// % even more stuff
|
||||
if p.extensions&Titleblock != 0 {
|
||||
if data[0] == '%' {
|
||||
if i := p.titleBlock(data, true); i > 0 {
|
||||
data = data[i:]
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// blank lines. note: returns the # of bytes to skip
|
||||
if i := p.isEmpty(data); i > 0 {
|
||||
if i := p.fencedCodeBlock(data, true); i > 0 {
|
||||
data = data[i:]
|
||||
continue
|
||||
}
|
||||
|
||||
// indented code block:
|
||||
//
|
||||
// func max(a, b int) int {
|
||||
// if a > b {
|
||||
// return a
|
||||
// }
|
||||
// return b
|
||||
// }
|
||||
if p.codePrefix(data) > 0 {
|
||||
data = data[p.code(data):]
|
||||
continue
|
||||
}
|
||||
|
||||
// fenced code block:
|
||||
//
|
||||
// ``` go
|
||||
// func fact(n int) int {
|
||||
// if n <= 1 {
|
||||
// return n
|
||||
// }
|
||||
// return n * fact(n-1)
|
||||
// }
|
||||
// ```
|
||||
if p.extensions&FencedCode != 0 {
|
||||
if i := p.fencedCodeBlock(data, true); i > 0 {
|
||||
data = data[i:]
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// horizontal rule:
|
||||
//
|
||||
// ------
|
||||
// or
|
||||
// ******
|
||||
// or
|
||||
// ______
|
||||
if p.isHRule(data) {
|
||||
p.addBlock(&ast.HorizontalRule{})
|
||||
i := skipUntilChar(data, 0, '\n')
|
||||
data = data[i:]
|
||||
continue
|
||||
}
|
||||
|
||||
// block quote:
|
||||
//
|
||||
// > A big quote I found somewhere
|
||||
// > on the web
|
||||
if p.quotePrefix(data) > 0 {
|
||||
data = data[p.quote(data):]
|
||||
continue
|
||||
}
|
||||
|
||||
// aside:
|
||||
//
|
||||
// A> The proof is too large to fit
|
||||
// A> in the margin.
|
||||
if p.extensions&Mmark != 0 {
|
||||
if p.asidePrefix(data) > 0 {
|
||||
data = data[p.aside(data):]
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// figure block:
|
||||
//
|
||||
// !---
|
||||
// ![Alt Text](img.jpg "This is an image")
|
||||
// ![Alt Text](img2.jpg "This is a second image")
|
||||
// !---
|
||||
if p.extensions&Mmark != 0 {
|
||||
if i := p.figureBlock(data, true); i > 0 {
|
||||
data = data[i:]
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// table:
|
||||
//
|
||||
// Name | Age | Phone
|
||||
// ------|-----|---------
|
||||
// Bob | 31 | 555-1234
|
||||
// Alice | 27 | 555-4321
|
||||
if p.extensions&Tables != 0 {
|
||||
if i := p.table(data); i > 0 {
|
||||
data = data[i:]
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// an itemized/unordered list:
|
||||
//
|
||||
// * Item 1
|
||||
// * Item 2
|
||||
//
|
||||
// also works with + or -
|
||||
if p.uliPrefix(data) > 0 {
|
||||
data = data[p.list(data, 0, 0):]
|
||||
continue
|
||||
}
|
||||
|
||||
// a numbered/ordered list:
|
||||
//
|
||||
// 1. Item 1
|
||||
// 2. Item 2
|
||||
if i := p.oliPrefix(data); i > 0 {
|
||||
start := 0
|
||||
if i > 2 && p.extensions&OrderedListStart != 0 {
|
||||
s := string(data[:i-2])
|
||||
start, _ = strconv.Atoi(s)
|
||||
if start == 1 {
|
||||
start = 0
|
||||
}
|
||||
}
|
||||
data = data[p.list(data, ast.ListTypeOrdered, start):]
|
||||
continue
|
||||
}
|
||||
|
||||
// definition lists:
|
||||
//
|
||||
// Term 1
|
||||
// : Definition a
|
||||
// : Definition b
|
||||
//
|
||||
// Term 2
|
||||
// : Definition c
|
||||
if p.extensions&DefinitionLists != 0 {
|
||||
if p.dliPrefix(data) > 0 {
|
||||
data = data[p.list(data, ast.ListTypeDefinition, 0):]
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if p.extensions&MathJax != 0 {
|
||||
if i := p.blockMath(data); i > 0 {
|
||||
data = data[i:]
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// document matters:
|
||||
//
|
||||
// {frontmatter}/{mainmatter}/{backmatter}
|
||||
if p.extensions&Mmark != 0 {
|
||||
if i := p.documentMatter(data); i > 0 {
|
||||
data = data[i:]
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// anything else must look like a normal paragraph
|
||||
// note: this finds underlined headings, too
|
||||
idx := p.paragraph(data)
|
||||
data = data[idx:]
|
||||
}
|
||||
//p.renderParagraph(data)
|
||||
return
|
||||
|
||||
p.nesting--
|
||||
}
|
||||
|
||||
func (p *Parser) addBlock(n ast.Node) ast.Node {
|
||||
|
@ -367,17 +126,7 @@ func (p *Parser) addBlock(n ast.Node) ast.Node {
|
|||
}
|
||||
|
||||
func (p *Parser) isPrefixHeading(data []byte) bool {
|
||||
if data[0] != '#' {
|
||||
return false
|
||||
}
|
||||
|
||||
if p.extensions&SpaceHeadings != 0 {
|
||||
level := skipCharN(data, 0, '#', 6)
|
||||
if level == len(data) || data[level] != ' ' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
return len(data) > 1 && data[0] == '#' && isSpace(data[1])
|
||||
}
|
||||
|
||||
func (p *Parser) prefixHeading(data []byte) int {
|
||||
|
@ -385,24 +134,6 @@ func (p *Parser) prefixHeading(data []byte) int {
|
|||
i := skipChar(data, level, ' ')
|
||||
end := skipUntilChar(data, i, '\n')
|
||||
skip := end
|
||||
id := ""
|
||||
if p.extensions&HeadingIDs != 0 {
|
||||
j, k := 0, 0
|
||||
// find start/end of heading id
|
||||
for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ {
|
||||
}
|
||||
for k = j + 1; k < end && data[k] != '}'; k++ {
|
||||
}
|
||||
// extract heading id iff found
|
||||
if j < end && k < end {
|
||||
id = string(data[j+2 : k])
|
||||
end = j
|
||||
skip = k + 1
|
||||
for end > 0 && data[end-1] == ' ' {
|
||||
end--
|
||||
}
|
||||
}
|
||||
}
|
||||
for end > 0 && data[end-1] == '#' {
|
||||
if isBackslashEscaped(data, end-1) {
|
||||
break
|
||||
|
@ -413,23 +144,16 @@ func (p *Parser) prefixHeading(data []byte) int {
|
|||
end--
|
||||
}
|
||||
if end > i {
|
||||
if id == "" && p.extensions&AutoHeadingIDs != 0 {
|
||||
id = sanitizeAnchorName(string(data[i:end]))
|
||||
}
|
||||
block := &ast.Heading{
|
||||
HeadingID: id,
|
||||
Level: level,
|
||||
Level: level,
|
||||
}
|
||||
block.Content = data[i:end]
|
||||
block.Literal = data[i:end]
|
||||
p.addBlock(block)
|
||||
}
|
||||
return skip
|
||||
}
|
||||
|
||||
func (p *Parser) isPrefixSpecialHeading(data []byte) bool {
|
||||
if p.extensions|Mmark == 0 {
|
||||
return false
|
||||
}
|
||||
if len(data) < 4 {
|
||||
return false
|
||||
}
|
||||
|
@ -549,203 +273,6 @@ func (p *Parser) titleBlock(data []byte, doRender bool) int {
|
|||
return consumed
|
||||
}
|
||||
|
||||
func (p *Parser) html(data []byte, doRender bool) int {
|
||||
var i, j int
|
||||
|
||||
// identify the opening tag
|
||||
if data[0] != '<' {
|
||||
return 0
|
||||
}
|
||||
curtag, tagfound := p.htmlFindTag(data[1:])
|
||||
|
||||
// handle special cases
|
||||
if !tagfound {
|
||||
// check for an HTML comment
|
||||
if size := p.htmlComment(data, doRender); size > 0 {
|
||||
return size
|
||||
}
|
||||
|
||||
// check for an <hr> tag
|
||||
if size := p.htmlHr(data, doRender); size > 0 {
|
||||
return size
|
||||
}
|
||||
|
||||
// no special case recognized
|
||||
return 0
|
||||
}
|
||||
|
||||
// look for an unindented matching closing tag
|
||||
// followed by a blank line
|
||||
found := false
|
||||
/*
|
||||
closetag := []byte("\n</" + curtag + ">")
|
||||
j = len(curtag) + 1
|
||||
for !found {
|
||||
// scan for a closing tag at the beginning of a line
|
||||
if skip := bytes.Index(data[j:], closetag); skip >= 0 {
|
||||
j += skip + len(closetag)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
|
||||
// see if it is the only thing on the line
|
||||
if skip := p.isEmpty(data[j:]); skip > 0 {
|
||||
// see if it is followed by a blank line/eof
|
||||
j += skip
|
||||
if j >= len(data) {
|
||||
found = true
|
||||
i = j
|
||||
} else {
|
||||
if skip := p.isEmpty(data[j:]); skip > 0 {
|
||||
j += skip
|
||||
found = true
|
||||
i = j
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
// if not found, try a second pass looking for indented match
|
||||
// but not if tag is "ins" or "del" (following original Markdown.pl)
|
||||
if !found && curtag != "ins" && curtag != "del" {
|
||||
i = 1
|
||||
for i < len(data) {
|
||||
i++
|
||||
for i < len(data) && !(data[i-1] == '<' && data[i] == '/') {
|
||||
i++
|
||||
}
|
||||
|
||||
if i+2+len(curtag) >= len(data) {
|
||||
break
|
||||
}
|
||||
|
||||
j = p.htmlFindEnd(curtag, data[i-1:])
|
||||
|
||||
if j > 0 {
|
||||
i += j - 1
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return 0
|
||||
}
|
||||
|
||||
// the end of the block has been found
|
||||
if doRender {
|
||||
// trim newlines
|
||||
end := backChar(data, i, '\n')
|
||||
htmlBLock := &ast.HTMLBlock{ast.Leaf{Content: data[:end]}}
|
||||
p.addBlock(htmlBLock)
|
||||
finalizeHTMLBlock(htmlBLock)
|
||||
}
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
func finalizeHTMLBlock(block *ast.HTMLBlock) {
|
||||
block.Literal = block.Content
|
||||
block.Content = nil
|
||||
}
|
||||
|
||||
// HTML comment, lax form
|
||||
func (p *Parser) htmlComment(data []byte, doRender bool) int {
|
||||
i := p.inlineHTMLComment(data)
|
||||
// needs to end with a blank line
|
||||
if j := p.isEmpty(data[i:]); j > 0 {
|
||||
size := i + j
|
||||
if doRender {
|
||||
// trim trailing newlines
|
||||
end := backChar(data, size, '\n')
|
||||
htmlBLock := &ast.HTMLBlock{ast.Leaf{Content: data[:end]}}
|
||||
p.addBlock(htmlBLock)
|
||||
finalizeHTMLBlock(htmlBLock)
|
||||
}
|
||||
return size
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// HR, which is the only self-closing block tag considered
|
||||
func (p *Parser) htmlHr(data []byte, doRender bool) int {
|
||||
if len(data) < 4 {
|
||||
return 0
|
||||
}
|
||||
if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') {
|
||||
return 0
|
||||
}
|
||||
if data[3] != ' ' && data[3] != '/' && data[3] != '>' {
|
||||
// not an <hr> tag after all; at least not a valid one
|
||||
return 0
|
||||
}
|
||||
i := 3
|
||||
for i < len(data) && data[i] != '>' && data[i] != '\n' {
|
||||
i++
|
||||
}
|
||||
if i < len(data) && data[i] == '>' {
|
||||
i++
|
||||
if j := p.isEmpty(data[i:]); j > 0 {
|
||||
size := i + j
|
||||
if doRender {
|
||||
// trim newlines
|
||||
end := backChar(data, size, '\n')
|
||||
htmlBlock := &ast.HTMLBlock{ast.Leaf{Content: data[:end]}}
|
||||
p.addBlock(htmlBlock)
|
||||
finalizeHTMLBlock(htmlBlock)
|
||||
}
|
||||
return size
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (p *Parser) htmlFindTag(data []byte) (string, bool) {
|
||||
i := skipAlnum(data, 0)
|
||||
key := string(data[:i])
|
||||
if _, ok := blockTags[key]; ok {
|
||||
return key, true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (p *Parser) htmlFindEnd(tag string, data []byte) int {
|
||||
// assume data[0] == '<' && data[1] == '/' already tested
|
||||
if tag == "hr" {
|
||||
return 2
|
||||
}
|
||||
// check if tag is a match
|
||||
closetag := []byte("</" + tag + ">")
|
||||
if !bytes.HasPrefix(data, closetag) {
|
||||
return 0
|
||||
}
|
||||
i := len(closetag)
|
||||
|
||||
// check that the rest of the line is blank
|
||||
skip := 0
|
||||
if skip = p.isEmpty(data[i:]); skip == 0 {
|
||||
return 0
|
||||
}
|
||||
i += skip
|
||||
skip = 0
|
||||
|
||||
if i >= len(data) {
|
||||
return i
|
||||
}
|
||||
|
||||
if p.extensions&LaxHTMLBlocks != 0 {
|
||||
return i
|
||||
}
|
||||
if skip = p.isEmpty(data[i:]); skip == 0 {
|
||||
// following line must be blank
|
||||
return 0
|
||||
}
|
||||
|
||||
return i + skip
|
||||
}
|
||||
|
||||
func (*Parser) isEmpty(data []byte) int {
|
||||
// it is okay to call isEmpty on an empty buffer
|
||||
if len(data) == 0 {
|
||||
|
@ -935,12 +462,6 @@ func (p *Parser) fencedCodeBlock(data []byte, doRender bool) int {
|
|||
}
|
||||
codeBlock.Content = work.Bytes() // TODO: get rid of temp buffer
|
||||
|
||||
if p.extensions&Mmark == 0 {
|
||||
p.addBlock(codeBlock)
|
||||
finalizeCodeBlock(codeBlock)
|
||||
return beg
|
||||
}
|
||||
|
||||
// Check for caption and if found make it a figure.
|
||||
if captionContent, id, consumed := p.caption(data[beg:], []byte("Figure: ")); consumed > 0 {
|
||||
figure := &ast.CaptionFigure{}
|
||||
|
@ -1283,13 +804,6 @@ func (p *Parser) quote(data []byte) int {
|
|||
// fenced code and if one's found, incorporate it altogether,
|
||||
// irregardless of any contents inside it
|
||||
for end < len(data) && data[end] != '\n' {
|
||||
if p.extensions&FencedCode != 0 {
|
||||
if i := p.fencedCodeBlock(data[end:], false); i > 0 {
|
||||
// -1 to compensate for the extra end++ after the loop:
|
||||
end += i - 1
|
||||
break
|
||||
}
|
||||
}
|
||||
end++
|
||||
}
|
||||
end = skipCharN(data, end, '\n', 1)
|
||||
|
@ -1304,36 +818,9 @@ func (p *Parser) quote(data []byte) int {
|
|||
beg = end
|
||||
}
|
||||
|
||||
if p.extensions&Mmark == 0 {
|
||||
block := p.addBlock(&ast.BlockQuote{})
|
||||
p.block(raw.Bytes())
|
||||
p.finalize(block)
|
||||
return end
|
||||
}
|
||||
|
||||
if captionContent, id, consumed := p.caption(data[end:], []byte("Quote: ")); consumed > 0 {
|
||||
figure := &ast.CaptionFigure{}
|
||||
caption := &ast.Caption{}
|
||||
figure.HeadingID = id
|
||||
p.Inline(caption, captionContent)
|
||||
|
||||
p.addBlock(figure) // this discard any attributes
|
||||
block := &ast.BlockQuote{}
|
||||
block.AsContainer().Attribute = figure.AsContainer().Attribute
|
||||
p.addChild(block)
|
||||
p.block(raw.Bytes())
|
||||
p.finalize(block)
|
||||
|
||||
p.addChild(caption)
|
||||
p.finalize(figure)
|
||||
|
||||
end += consumed
|
||||
|
||||
return end
|
||||
}
|
||||
|
||||
block := p.addBlock(&ast.BlockQuote{})
|
||||
p.block(raw.Bytes())
|
||||
quote := &ast.BlockQuote{}
|
||||
quote.Literal = raw.Bytes()
|
||||
block := p.addBlock(quote)
|
||||
p.finalize(block)
|
||||
|
||||
return end
|
||||
|
@ -1812,14 +1299,6 @@ func (p *Parser) paragraph(data []byte) int {
|
|||
|
||||
// did we find a blank line marking the end of the paragraph?
|
||||
if n := p.isEmpty(current); n > 0 {
|
||||
// did this blank line followed by a definition list item?
|
||||
if p.extensions&DefinitionLists != 0 {
|
||||
if i < len(data)-1 && data[i+1] == ':' {
|
||||
listLen := p.list(data[prev:], ast.ListTypeDefinition, 0)
|
||||
return prev + listLen
|
||||
}
|
||||
}
|
||||
|
||||
p.renderParagraph(data[:i])
|
||||
return i + n
|
||||
}
|
||||
|
@ -1856,21 +1335,6 @@ func (p *Parser) paragraph(data []byte) int {
|
|||
}
|
||||
}
|
||||
|
||||
// if the next line starts a block of HTML, then the paragraph ends here
|
||||
if p.extensions&LaxHTMLBlocks != 0 {
|
||||
if data[i] == '<' && p.html(current, false) > 0 {
|
||||
// rewind to before the HTML block
|
||||
p.renderParagraph(data[:i])
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
// if there's a prefixed heading or a horizontal rule after this, paragraph is over
|
||||
if p.isPrefixHeading(current) || p.isPrefixSpecialHeading(current) || p.isHRule(current) {
|
||||
p.renderParagraph(data[:i])
|
||||
return i
|
||||
}
|
||||
|
||||
// if there's a fenced code block, paragraph is over
|
||||
if p.extensions&FencedCode != 0 {
|
||||
if p.fencedCodeBlock(current, false) > 0 {
|
||||
|
@ -1879,33 +1343,6 @@ func (p *Parser) paragraph(data []byte) int {
|
|||
}
|
||||
}
|
||||
|
||||
// if there's a figure block, paragraph is over
|
||||
if p.extensions&Mmark != 0 {
|
||||
if p.figureBlock(current, false) > 0 {
|
||||
p.renderParagraph(data[:i])
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
// if there's a definition list item, prev line is a definition term
|
||||
if p.extensions&DefinitionLists != 0 {
|
||||
if p.dliPrefix(current) != 0 {
|
||||
ret := p.list(data[prev:], ast.ListTypeDefinition, 0)
|
||||
return ret + prev
|
||||
}
|
||||
}
|
||||
|
||||
// if there's a list after this, paragraph is over
|
||||
if p.extensions&NoEmptyLineBeforeBlock != 0 {
|
||||
if p.uliPrefix(current) != 0 ||
|
||||
p.oliPrefix(current) != 0 ||
|
||||
p.quotePrefix(current) != 0 ||
|
||||
p.codePrefix(current) != 0 {
|
||||
p.renderParagraph(data[:i])
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
// otherwise, scan to the beginning of the next line
|
||||
nl := bytes.IndexByte(data[i:], '\n')
|
||||
if nl >= 0 {
|
||||
|
|
140
parser/inline.go
140
parser/inline.go
|
@ -22,7 +22,11 @@ var (
|
|||
// Each function returns the number of consumed chars.
|
||||
func (p *Parser) Inline(currBlock ast.Node, data []byte) {
|
||||
// handlers might call us recursively: enforce a maximum depth
|
||||
if p.nesting >= p.maxNesting || len(data) == 0 {
|
||||
if len(data) == 0 {
|
||||
return
|
||||
}
|
||||
if p.nesting >= p.maxNesting {
|
||||
ast.AppendChild(currBlock, newTextNode(data))
|
||||
return
|
||||
}
|
||||
p.nesting++
|
||||
|
@ -59,6 +63,40 @@ func (p *Parser) Inline(currBlock ast.Node, data []byte) {
|
|||
p.nesting--
|
||||
}
|
||||
|
||||
func statusTag(p *Parser, data []byte, offset int) (int, ast.Node) {
|
||||
data = data[offset:]
|
||||
n := len(data)
|
||||
|
||||
if n == 1 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Space cannot follow tag
|
||||
if isSpace(data[1]) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
i := 1
|
||||
for i < n {
|
||||
if isSpace(data[i]) {
|
||||
break
|
||||
}
|
||||
if !isValidStatusTagChar(data[i]) {
|
||||
return 0, nil
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if i == 1 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
statusTag := &ast.StatusTag{}
|
||||
statusTag.Literal = data[1:i]
|
||||
|
||||
return i, statusTag
|
||||
}
|
||||
|
||||
// single and double emphasis parsing
|
||||
func emphasis(p *Parser, data []byte, offset int) (int, ast.Node) {
|
||||
data = data[offset:]
|
||||
|
@ -604,25 +642,6 @@ func link(p *Parser, data []byte, offset int) (int, ast.Node) {
|
|||
}
|
||||
}
|
||||
|
||||
func (p *Parser) inlineHTMLComment(data []byte) int {
|
||||
if len(data) < 5 {
|
||||
return 0
|
||||
}
|
||||
if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' {
|
||||
return 0
|
||||
}
|
||||
i := 5
|
||||
// scan for an end-of-comment marker, across lines if necessary
|
||||
for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') {
|
||||
i++
|
||||
}
|
||||
// no end-of-comment marker
|
||||
if i >= len(data) {
|
||||
return 0
|
||||
}
|
||||
return i + 1
|
||||
}
|
||||
|
||||
func stripMailto(link []byte) []byte {
|
||||
if bytes.HasPrefix(link, []byte("mailto://")) {
|
||||
return link[9:]
|
||||
|
@ -643,48 +662,6 @@ const (
|
|||
emailAutolink
|
||||
)
|
||||
|
||||
// '<' when tags or autolinks are allowed
|
||||
func leftAngle(p *Parser, data []byte, offset int) (int, ast.Node) {
|
||||
data = data[offset:]
|
||||
|
||||
if p.extensions&Mmark != 0 {
|
||||
id, consumed := IsCallout(data)
|
||||
if consumed > 0 {
|
||||
node := &ast.Callout{}
|
||||
node.ID = id
|
||||
return consumed, node
|
||||
}
|
||||
}
|
||||
|
||||
altype, end := tagLength(data)
|
||||
if size := p.inlineHTMLComment(data); size > 0 {
|
||||
end = size
|
||||
}
|
||||
if end <= 2 {
|
||||
return end, nil
|
||||
}
|
||||
if altype == notAutolink {
|
||||
htmlTag := &ast.HTMLSpan{}
|
||||
htmlTag.Literal = data[:end]
|
||||
return end, htmlTag
|
||||
}
|
||||
|
||||
var uLink bytes.Buffer
|
||||
unescapeText(&uLink, data[1:end+1-2])
|
||||
if uLink.Len() <= 0 {
|
||||
return end, nil
|
||||
}
|
||||
link := uLink.Bytes()
|
||||
node := &ast.Link{
|
||||
Destination: link,
|
||||
}
|
||||
if altype == emailAutolink {
|
||||
node.Destination = append([]byte("mailto:"), link...)
|
||||
}
|
||||
ast.AppendChild(node, newTextNode(stripMailto(link)))
|
||||
return end, node
|
||||
}
|
||||
|
||||
// '\\' backslash escape
|
||||
var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~")
|
||||
|
||||
|
@ -731,30 +708,6 @@ func unescapeText(ob *bytes.Buffer, src []byte) {
|
|||
}
|
||||
}
|
||||
|
||||
// '&' escaped when it doesn't belong to an entity
|
||||
// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+;
|
||||
func entity(p *Parser, data []byte, offset int) (int, ast.Node) {
|
||||
data = data[offset:]
|
||||
|
||||
end := skipCharN(data, 1, '#', 1)
|
||||
end = skipAlnum(data, end)
|
||||
|
||||
if end < len(data) && data[end] == ';' {
|
||||
end++ // real entity
|
||||
} else {
|
||||
return 0, nil // lone '&'
|
||||
}
|
||||
|
||||
ent := data[:end]
|
||||
// undo & escaping or it will be converted to &amp; by another
|
||||
// escaper in the renderer
|
||||
if bytes.Equal(ent, []byte("&")) {
|
||||
ent = []byte{'&'}
|
||||
}
|
||||
|
||||
return end, newTextNode(ent)
|
||||
}
|
||||
|
||||
func linkEndsWithEntity(data []byte, linkEnd int) bool {
|
||||
entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1)
|
||||
return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd
|
||||
|
@ -1175,7 +1128,7 @@ func helperEmphasis(p *Parser, data []byte, c byte) (int, ast.Node) {
|
|||
}
|
||||
|
||||
emph := &ast.Emph{}
|
||||
p.Inline(emph, data[:i])
|
||||
emph.Literal = data[:i]
|
||||
return i + 1, emph
|
||||
}
|
||||
}
|
||||
|
@ -1194,11 +1147,14 @@ func helperDoubleEmphasis(p *Parser, data []byte, c byte) (int, ast.Node) {
|
|||
i += length
|
||||
|
||||
if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isSpace(data[i-1]) {
|
||||
var node ast.Node = &ast.Strong{}
|
||||
strong := &ast.Strong{}
|
||||
strong.Literal = data[:i]
|
||||
var node ast.Node = strong
|
||||
if c == '~' {
|
||||
node = &ast.Del{}
|
||||
del := &ast.Del{}
|
||||
del.Literal = data[:i]
|
||||
node = del
|
||||
}
|
||||
p.Inline(node, data[:i])
|
||||
return i + 2, node
|
||||
}
|
||||
i++
|
||||
|
@ -1227,9 +1183,7 @@ func helperTripleEmphasis(p *Parser, data []byte, offset int, c byte) (int, ast.
|
|||
case i+2 < len(data) && data[i+1] == c && data[i+2] == c:
|
||||
// triple symbol found
|
||||
strong := &ast.Strong{}
|
||||
em := &ast.Emph{}
|
||||
ast.AppendChild(strong, em)
|
||||
p.Inline(em, data[:i])
|
||||
strong.Literal = data[:i]
|
||||
return i + 3, strong
|
||||
case i+1 < len(data) && data[i+1] == c:
|
||||
// double symbol found, hand over to emph1
|
||||
|
|
|
@ -129,7 +129,7 @@ func NewWithExtensions(extension Extensions) *Parser {
|
|||
p := Parser{
|
||||
refs: make(map[string]*reference),
|
||||
refsRecord: make(map[string]struct{}),
|
||||
maxNesting: 16,
|
||||
maxNesting: 1,
|
||||
insideLink: false,
|
||||
Doc: &ast.Document{},
|
||||
extensions: extension,
|
||||
|
@ -142,21 +142,14 @@ func NewWithExtensions(extension Extensions) *Parser {
|
|||
|
||||
p.inlineCallback[' '] = maybeLineBreak
|
||||
p.inlineCallback['*'] = emphasis
|
||||
p.inlineCallback['#'] = statusTag
|
||||
p.inlineCallback['_'] = emphasis
|
||||
if p.extensions&Strikethrough != 0 {
|
||||
p.inlineCallback['~'] = emphasis
|
||||
}
|
||||
p.inlineCallback['`'] = codeSpan
|
||||
p.inlineCallback['\n'] = lineBreak
|
||||
p.inlineCallback['['] = link
|
||||
p.inlineCallback['<'] = leftAngle
|
||||
p.inlineCallback['\\'] = escape
|
||||
p.inlineCallback['&'] = entity
|
||||
p.inlineCallback['!'] = maybeImage
|
||||
if p.extensions&Mmark != 0 {
|
||||
p.inlineCallback['('] = maybeShortRefOrIndex
|
||||
}
|
||||
p.inlineCallback['^'] = maybeInlineFootnoteOrSuper
|
||||
if p.extensions&Autolink != 0 {
|
||||
p.inlineCallback['h'] = maybeAutoLink
|
||||
p.inlineCallback['m'] = maybeAutoLink
|
||||
|
@ -165,9 +158,6 @@ func NewWithExtensions(extension Extensions) *Parser {
|
|||
p.inlineCallback['M'] = maybeAutoLink
|
||||
p.inlineCallback['F'] = maybeAutoLink
|
||||
}
|
||||
if p.extensions&MathJax != 0 {
|
||||
p.inlineCallback['$'] = math
|
||||
}
|
||||
|
||||
return &p
|
||||
}
|
||||
|
@ -272,16 +262,13 @@ func (p *Parser) Parse(input []byte) ast.Node {
|
|||
// Walk the tree again and process inline markdown in each block
|
||||
ast.WalkFunc(p.Doc, func(node ast.Node, entering bool) ast.WalkStatus {
|
||||
switch node.(type) {
|
||||
case *ast.Paragraph, *ast.Heading, *ast.TableCell:
|
||||
case *ast.Paragraph:
|
||||
p.Inline(node, node.AsContainer().Content)
|
||||
node.AsContainer().Content = nil
|
||||
}
|
||||
return ast.GoToNext
|
||||
})
|
||||
|
||||
if p.Opts.Flags&SkipFootnoteList == 0 {
|
||||
p.parseRefsToAST()
|
||||
}
|
||||
return p.Doc
|
||||
}
|
||||
|
||||
|
@ -693,6 +680,10 @@ func isAlnum(c byte) bool {
|
|||
return (c >= '0' && c <= '9') || isLetter(c)
|
||||
}
|
||||
|
||||
func isValidStatusTagChar(c byte) bool {
|
||||
return isAlnum(c) || c == '-'
|
||||
}
|
||||
|
||||
// TODO: this is not used
|
||||
// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
|
||||
// always ends output with a newline
|
||||
|
|
168
ref_test.go
168
ref_test.go
|
@ -1,168 +0,0 @@
|
|||
package markdown
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/gomarkdown/markdown/parser"
|
||||
)
|
||||
|
||||
// Markdown 1.0.3 reference tests
|
||||
|
||||
var (
|
||||
refFiles = []string{
|
||||
"Amps and angle encoding",
|
||||
"Auto links",
|
||||
"Backslash escapes",
|
||||
"Blockquotes with code blocks",
|
||||
"Code Blocks",
|
||||
"Code Spans",
|
||||
"Horizontal rules",
|
||||
"Inline HTML (Advanced)",
|
||||
"Inline HTML (Simple)",
|
||||
"Inline HTML comments",
|
||||
"Links, inline style",
|
||||
"Links, reference style",
|
||||
"Links, shortcut references",
|
||||
"Literal quotes in titles",
|
||||
"Markdown Documentation - Basics",
|
||||
"Markdown Documentation - Syntax",
|
||||
"Nested blockquotes",
|
||||
"Ordered and unordered lists",
|
||||
"Strong and em together",
|
||||
"Tabs",
|
||||
"Tidyness",
|
||||
}
|
||||
)
|
||||
|
||||
func TestReference(t *testing.T) {
|
||||
files := append(refFiles, "Hard-wrapped paragraphs with list-like lines")
|
||||
doTestsReference(t, files, 0)
|
||||
}
|
||||
|
||||
func TestReference_EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK(t *testing.T) {
|
||||
files := append(refFiles, "Hard-wrapped paragraphs with list-like lines no empty line before block")
|
||||
doTestsReference(t, files, parser.NoEmptyLineBeforeBlock)
|
||||
}
|
||||
|
||||
// benchResultAnchor is an anchor variable to store the result of a benchmarked
|
||||
// code so that compiler could never optimize away the call to runMarkdown()
|
||||
var benchResultAnchor string
|
||||
|
||||
func benchFile(b *testing.B, basename string) {
|
||||
params := TestParams{extensions: parser.CommonExtensions}
|
||||
filename := filepath.Join("testdata", basename+".text")
|
||||
inputBytes, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
b.Errorf("Couldn't open '%s', error: %v\n", filename, err)
|
||||
return
|
||||
}
|
||||
|
||||
test := string(inputBytes)
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
benchResultAnchor = runMarkdown(test, params)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReferenceAmps(b *testing.B) {
|
||||
benchFile(b, "Amps and angle encoding")
|
||||
}
|
||||
|
||||
func BenchmarkReferenceAutoLinks(b *testing.B) {
|
||||
benchFile(b, "Auto links")
|
||||
}
|
||||
|
||||
func BenchmarkReferenceBackslashEscapes(b *testing.B) {
|
||||
benchFile(b, "Backslash escapes")
|
||||
}
|
||||
|
||||
func BenchmarkReferenceBlockquotesWithCodeBlocks(b *testing.B) {
|
||||
benchFile(b, "Blockquotes with code blocks")
|
||||
}
|
||||
|
||||
func BenchmarkReferenceCodeBlocks(b *testing.B) {
|
||||
benchFile(b, "Code Blocks")
|
||||
}
|
||||
|
||||
func BenchmarkReferenceCodeSpans(b *testing.B) {
|
||||
benchFile(b, "Code Spans")
|
||||
}
|
||||
|
||||
func BenchmarkReferenceHardWrappedPara(b *testing.B) {
|
||||
benchFile(b, "Hard-wrapped paragraphs with list-like lines")
|
||||
}
|
||||
func BenchmarkReferenceHorizontalRules(b *testing.B) {
|
||||
benchFile(b, "Horizontal rules")
|
||||
}
|
||||
func BenchmarkReferenceInlineHTMLAdvances(b *testing.B) {
|
||||
benchFile(b, "Inline HTML (Advanced)")
|
||||
}
|
||||
func BenchmarkReferenceInlineHTMLSimple(b *testing.B) {
|
||||
benchFile(b, "Inline HTML (Simple)")
|
||||
}
|
||||
func BenchmarkReferenceInlineHTMLComments(b *testing.B) {
|
||||
benchFile(b, "Inline HTML comments")
|
||||
}
|
||||
func BenchmarkReferenceLinksInline(b *testing.B) {
|
||||
benchFile(b, "Links, inline style")
|
||||
}
|
||||
func BenchmarkReferenceLinksReference(b *testing.B) {
|
||||
benchFile(b, "Links, reference style")
|
||||
}
|
||||
func BenchmarkReferenceLinksShortcut(b *testing.B) {
|
||||
benchFile(b, "Links, shortcut references")
|
||||
}
|
||||
func BenchmarkReferenceLiterQuotesInTitles(b *testing.B) {
|
||||
benchFile(b, "Literal quotes in titles")
|
||||
}
|
||||
|
||||
func BenchmarkReferenceMarkdownBasics(b *testing.B) {
|
||||
benchFile(b, "Markdown Documentation - Basics")
|
||||
}
|
||||
|
||||
func BenchmarkReferenceMarkdownSyntax(b *testing.B) {
|
||||
benchFile(b, "Markdown Documentation - Syntax")
|
||||
}
|
||||
|
||||
func BenchmarkReferenceNestedBlockquotes(b *testing.B) {
|
||||
benchFile(b, "Nested blockquotes")
|
||||
}
|
||||
|
||||
func BenchmarkReferenceOrderedAndUnorderedLists(b *testing.B) {
|
||||
benchFile(b, "Ordered and unordered lists")
|
||||
}
|
||||
|
||||
func BenchmarkReferenceStrongAndEm(b *testing.B) {
|
||||
benchFile(b, "Strong and em together")
|
||||
}
|
||||
|
||||
func BenchmarkReferenceTabs(b *testing.B) {
|
||||
benchFile(b, "Tabs")
|
||||
}
|
||||
|
||||
func BenchmarkReferenceTidyness(b *testing.B) {
|
||||
benchFile(b, "Tidyness")
|
||||
}
|
||||
|
||||
func BenchmarkReference(b *testing.B) {
|
||||
params := TestParams{extensions: parser.CommonExtensions}
|
||||
files := append(refFiles, "Hard-wrapped paragraphs with list-like lines")
|
||||
var tests []string
|
||||
for _, basename := range files {
|
||||
filename := filepath.Join("testdata", basename+".text")
|
||||
inputBytes, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
b.Errorf("Couldn't open '%s', error: %v\n", filename, err)
|
||||
continue
|
||||
}
|
||||
tests = append(tests, string(inputBytes))
|
||||
}
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
for _, test := range tests {
|
||||
benchResultAnchor = runMarkdown(test, params)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -11,7 +11,7 @@ go get github.com/dvyukov/go-fuzz/go-fuzz-build
|
|||
# this step is expensive, so re-use previous runs if possible
|
||||
if [ ! -f ./markdown-fuzz.zip ]; then
|
||||
mkdir -p fuzz-workdir/corpus
|
||||
cp testdata/*.text fuzz-workdir/corpus
|
||||
cp testdata/*.md fuzz-workdir/corpus
|
||||
echo "running go-fuzz-build, might take a while..."
|
||||
go-fuzz-build github.com/gomarkdown/markdown
|
||||
fi
|
||||
|
|
|
@ -1,67 +1,67 @@
|
|||
nothing inline
|
||||
+++
|
||||
<p>nothing inline</p>
|
||||
[{"literal":"nothing inline"}]
|
||||
+++
|
||||
simple *inline* test
|
||||
+++
|
||||
<p>simple <em>inline</em> test</p>
|
||||
[{"literal":"simple "},{"type":"emph","literal":"inline"},{"literal":" test"}]
|
||||
+++
|
||||
*at the* beginning
|
||||
+++
|
||||
<p><em>at the</em> beginning</p>
|
||||
[{"literal":""},{"type":"emph","literal":"at the"},{"literal":" beginning"}]
|
||||
+++
|
||||
at the *end*
|
||||
+++
|
||||
<p>at the <em>end</em></p>
|
||||
[{"literal":"at the "},{"type":"emph","literal":"end"}]
|
||||
+++
|
||||
*try two* in *one line*
|
||||
+++
|
||||
<p><em>try two</em> in <em>one line</em></p>
|
||||
[{"literal":""},{"type":"emph","literal":"try two"},{"literal":" in "},{"type":"emph","literal":"one line"}]
|
||||
+++
|
||||
over *two\nlines* test
|
||||
+++
|
||||
<p>over <em>two\nlines</em> test</p>
|
||||
[{"literal":"over "},{"type":"emph","literal":"two\\nlines"},{"literal":" test"}]
|
||||
+++
|
||||
odd *number of* markers* here
|
||||
+++
|
||||
<p>odd <em>number of</em> markers* here</p>
|
||||
[{"literal":"odd "},{"type":"emph","literal":"number of"},{"literal":" markers* here"}]
|
||||
+++
|
||||
odd *number\nof* markers* here
|
||||
+++
|
||||
<p>odd <em>number\nof</em> markers* here</p>
|
||||
[{"literal":"odd "},{"type":"emph","literal":"number\\nof"},{"literal":" markers* here"}]
|
||||
+++
|
||||
simple _inline_ test
|
||||
+++
|
||||
<p>simple <em>inline</em> test</p>
|
||||
[{"literal":"simple "},{"type":"emph","literal":"inline"},{"literal":" test"}]
|
||||
+++
|
||||
_at the_ beginning
|
||||
+++
|
||||
<p><em>at the</em> beginning</p>
|
||||
[{"literal":""},{"type":"emph","literal":"at the"},{"literal":" beginning"}]
|
||||
+++
|
||||
at the _end_
|
||||
+++
|
||||
<p>at the <em>end</em></p>
|
||||
[{"literal":"at the "},{"type":"emph","literal":"end"}]
|
||||
+++
|
||||
_try two_ in _one line_
|
||||
+++
|
||||
<p><em>try two</em> in <em>one line</em></p>
|
||||
[{"literal":""},{"type":"emph","literal":"try two"},{"literal":" in "},{"type":"emph","literal":"one line"}]
|
||||
+++
|
||||
over _two\nlines_ test
|
||||
+++
|
||||
<p>over <em>two\nlines</em> test</p>
|
||||
[{"literal":"over "},{"type":"emph","literal":"two\\nlines"},{"literal":" test"}]
|
||||
+++
|
||||
odd _number of_ markers_ here
|
||||
+++
|
||||
<p>odd <em>number of</em> markers_ here</p>
|
||||
[{"literal":"odd "},{"type":"emph","literal":"number of"},{"literal":" markers_ here"}]
|
||||
+++
|
||||
odd _number\nof_ markers_ here
|
||||
+++
|
||||
<p>odd <em>number\nof</em> markers_ here</p>
|
||||
[{"literal":"odd "},{"type":"emph","literal":"number\\nof"},{"literal":" markers_ here"}]
|
||||
+++
|
||||
mix of *markers_
|
||||
+++
|
||||
<p>mix of *markers_</p>
|
||||
[{"literal":"mix of *markers_"}]
|
||||
+++
|
||||
*What is A\* algorithm?*
|
||||
+++
|
||||
<p><em>What is A* algorithm?</em></p>
|
||||
[{"literal":""},{"type":"emph","literal":"What is A\\* algorithm?"}]
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
nothing inline
|
||||
+++
|
||||
[{"literal":"nothing inline"}]
|
||||
+++
|
||||
simple #tag test
|
||||
+++
|
||||
[{"literal":"simple "},{"type":"status-tag","literal":"tag"},{"literal":" test"}]
|
||||
+++
|
||||
#at-the beginning
|
||||
+++
|
||||
[{"literal":""},{"type":"status-tag","literal":"at-the"},{"literal":" beginning"}]
|
||||
+++
|
||||
at the #end
|
||||
+++
|
||||
[{"literal":"at the "},{"type":"status-tag","literal":"end"}]
|
||||
+++
|
||||
#try-two in #one-line
|
||||
+++
|
||||
[{"literal":""},{"type":"status-tag","literal":"try-two"},{"literal":" in "},{"type":"status-tag","literal":"one-line"}]
|
||||
+++
|
||||
over #two
|
||||
lines test
|
||||
+++
|
||||
[{"literal":"over "},{"type":"status-tag","literal":"two"},{"literal":"\\nlines test"}]
|
||||
+++
|
||||
over two\n#lines test
|
||||
+++
|
||||
[{"literal":"over two\\n"},{"type":"status-tag","literal":"lines"},{"literal":" test"}]
|
||||
+++
|
||||
not valid #status_tag
|
||||
+++
|
||||
[{"literal":"not valid #status_tag"}]
|
||||
+++
|
||||
another not valid #status?tag
|
||||
+++
|
||||
[{"literal":"another not valid #status?tag"}]
|
||||
+++
|
||||
empty # status-tag
|
||||
+++
|
||||
[{"literal":"empty # status-tag"}]
|
||||
+++
|
||||
status-tag with #number9
|
||||
+++
|
||||
[{"literal":"status-tag with "},{"type":"status-tag","literal":"number9"}]
|
Loading…
Reference in New Issue