diff --git a/block.go b/block.go index 791db95..9d3a003 100644 --- a/block.go +++ b/block.go @@ -764,7 +764,7 @@ func (p *parser) table(data []byte) int { } // include the newline in data sent to tableRow - if i < len(data)-1 && data[i] == '\n' { + if i < len(data) && data[i] == '\n' { i++ } p.tableRow(data[rowStart:i], columns, false) @@ -945,7 +945,7 @@ func (p *parser) quotePrefix(data []byte) int { i++ } if i < len(data) && data[i] == '>' { - if i < len(data)-1 && data[i+1] == ' ' { + if i+1 < len(data) && data[i+1] == ' ' { return i + 2 } return i + 1 @@ -1005,7 +1005,7 @@ func (p *parser) quote(data []byte) int { // returns prefix length for block code func (p *parser) codePrefix(data []byte) int { - if data[0] == '\t' { + if len(data) >= 1 && data[0] == '\t' { return 1 } if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { @@ -1073,7 +1073,7 @@ func (p *parser) uliPrefix(data []byte) int { if i >= len(data)-1 { return 0 } - // need a *, +, or - followed by a space + // need one of {'*', '+', '-'} followed by a space or a tab if (data[i] != '*' && data[i] != '+' && data[i] != '-') || (data[i+1] != ' ' && data[i+1] != '\t') { return 0 @@ -1099,7 +1099,7 @@ func (p *parser) oliPrefix(data []byte) int { return 0 } - // we need >= 1 digits followed by a dot and a space + // we need >= 1 digits followed by a dot and a space or a tab if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') { return 0 } @@ -1112,7 +1112,7 @@ func (p *parser) dliPrefix(data []byte) int { return 0 } i := 0 - // need a : followed by a spaces + // need a ':' followed by a space or a tab if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') { return 0 } @@ -1194,11 +1194,12 @@ func finalizeList(block *Node) { func (p *parser) listItem(data []byte, flags *ListType) int { // keep track of the indentation of the first line itemIndent := 0 - if data[itemIndent] == '\t' { + if data[0] == '\t' { itemIndent += 4 - } - for itemIndent < 3 && data[itemIndent] == ' ' { - itemIndent++ + } else { + for itemIndent < 3 && data[itemIndent] == ' ' { + itemIndent++ + } } var bulletChar byte = '*' @@ -1436,8 +1437,7 @@ func (p *parser) paragraph(data []byte) int { // did this blank line followed by a definition list item? if p.flags&DefinitionLists != 0 { if i < len(data)-1 && data[i+1] == ':' { - ret := p.list(data[prev:], ListTypeDefinition) - return ret + return p.list(data[prev:], ListTypeDefinition) } } diff --git a/esc_test.go b/esc_test.go index b3b7a07..ff67d54 100644 --- a/esc_test.go +++ b/esc_test.go @@ -38,11 +38,11 @@ func BenchmarkEscapeHTML(b *testing.B) { []byte("[1]: http://example.com/?foo=1&bar=2"), []byte("[2]: http://att.com/ \"AT&T\""), } - var buff bytes.Buffer + var buf bytes.Buffer for n := 0; n < b.N; n++ { for _, t := range tests { - escapeHTML(&buff, t) - buff.Reset() + escapeHTML(&buf, t) + buf.Reset() } } } diff --git a/html.go b/html.go index 527367d..10cca9c 100644 --- a/html.go +++ b/html.go @@ -559,11 +559,11 @@ func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkSt } else { if entering { dest = r.addAbsPrefix(dest) - var hrefBuff bytes.Buffer - hrefBuff.WriteString("href=\"") - escLink(&hrefBuff, dest) - hrefBuff.WriteByte('"') - attrs = append(attrs, hrefBuff.String()) + var hrefBuf bytes.Buffer + hrefBuf.WriteString("href=\"") + escLink(&hrefBuf, dest) + hrefBuf.WriteByte('"') + attrs = append(attrs, hrefBuf.String()) if node.NoteID != 0 { r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node)) break @@ -939,17 +939,17 @@ func (r *HTMLRenderer) writeDocumentFooter(w *bytes.Buffer) { func (r *HTMLRenderer) Render(ast *Node) []byte { //println("render_Blackfriday") //dump(ast) - var buff bytes.Buffer - r.writeDocumentHeader(&buff) + var buf bytes.Buffer + r.writeDocumentHeader(&buf) if r.Extensions&TOC != 0 || r.Extensions&OmitContents != 0 { - r.writeTOC(&buff, ast) + r.writeTOC(&buf, ast) if r.Extensions&OmitContents != 0 { - return buff.Bytes() + return buf.Bytes() } } ast.Walk(func(node *Node, entering bool) WalkStatus { - return r.RenderNode(&buff, node, entering) + return r.RenderNode(&buf, node, entering) }) - r.writeDocumentFooter(&buff) - return buff.Bytes() + r.writeDocumentFooter(&buf) + return buf.Bytes() } diff --git a/inline.go b/inline.go index 3e80b6e..6b1c9f8 100644 --- a/inline.go +++ b/inline.go @@ -715,14 +715,11 @@ func linkEndsWithEntity(data []byte, linkEnd int) bool { return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd } -var prefixes = [][]byte{ - []byte("http://"), - []byte("https://"), - []byte("ftp://"), - []byte("file://"), - []byte("mailto:"), -} - +// hasPrefixCaseInsensitive is a custom implementation of +// strings.HasPrefix(strings.ToLower(s), prefix) +// we rolled our own because ToLower pulls in a huge machinery of lowercasing +// anything from Unicode and that's very slow. Since this func will only be +// used on ASCII protocol prefixes, we can take shortcuts. func hasPrefixCaseInsensitive(s, prefix []byte) bool { if len(s) < len(prefix) { return false @@ -736,12 +733,22 @@ func hasPrefixCaseInsensitive(s, prefix []byte) bool { return true } +var protocolPrefixes = [][]byte{ + []byte("http://"), + []byte("https://"), + []byte("ftp://"), + []byte("file://"), + []byte("mailto:"), +} + +const shortestPrefix = 6 // len("ftp://"), the shortest of the above + func maybeAutoLink(p *parser, data []byte, offset int) (int, *Node) { // quick check to rule out most false hits - if p.insideLink || len(data) < offset+6 { // 6 is the len() of the shortest of the prefixes + if p.insideLink || len(data) < offset+shortestPrefix { return 0, nil } - for _, prefix := range prefixes { + for _, prefix := range protocolPrefixes { endOfHead := offset + 8 // 8 is the len() of the longest prefix if endOfHead > len(data) { endOfHead = len(data)