2011-05-27 22:12:21 +00:00
|
|
|
//
|
2011-06-28 02:11:32 +00:00
|
|
|
// Blackfriday Markdown Processor
|
|
|
|
// Available at http://github.com/russross/blackfriday
|
|
|
|
//
|
|
|
|
// Copyright © 2011 Russ Ross <russ@russross.com>.
|
2011-06-28 17:30:10 +00:00
|
|
|
// Distributed under the Simplified BSD License.
|
2011-06-28 02:11:32 +00:00
|
|
|
// See README.md for details.
|
2011-05-27 22:12:21 +00:00
|
|
|
//
|
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
//
|
|
|
|
//
|
|
|
|
// Markdown parsing and processing
|
|
|
|
//
|
|
|
|
//
|
|
|
|
|
2011-07-07 18:05:29 +00:00
|
|
|
// Blackfriday markdown processor.
|
|
|
|
//
|
|
|
|
// Translates plain text with simple formatting rules into HTML or LaTeX.
|
2011-05-29 03:17:53 +00:00
|
|
|
package blackfriday
|
2011-05-24 22:14:35 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2015-11-02 18:24:34 +00:00
|
|
|
"fmt"
|
2014-12-16 23:17:49 +00:00
|
|
|
"strings"
|
2012-03-08 04:36:31 +00:00
|
|
|
"unicode/utf8"
|
2011-05-24 22:14:35 +00:00
|
|
|
)
|
|
|
|
|
2015-10-18 17:51:29 +00:00
|
|
|
const VERSION = "1.4"
|
2011-06-28 17:30:10 +00:00
|
|
|
|
2015-10-26 15:29:52 +00:00
|
|
|
type Extensions int
|
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
// These are the supported markdown parsing extensions.
|
|
|
|
// OR these values together to select multiple extensions.
|
2011-05-24 22:14:35 +00:00
|
|
|
const (
|
2016-04-01 07:44:22 +00:00
|
|
|
NoExtensions Extensions = 0
|
|
|
|
NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words
|
|
|
|
Tables // Render tables
|
|
|
|
FencedCode // Render fenced code blocks
|
|
|
|
Autolink // Detect embedded URLs that are not explicitly marked
|
|
|
|
Strikethrough // Strikethrough text using ~~test~~
|
|
|
|
LaxHTMLBlocks // Loosen up HTML block parsing rules
|
|
|
|
SpaceHeaders // Be strict about prefix header rules
|
|
|
|
HardLineBreak // Translate newlines into line breaks
|
|
|
|
TabSizeEight // Expand tabs to eight spaces instead of four
|
|
|
|
Footnotes // Pandoc-style footnotes
|
|
|
|
NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block
|
|
|
|
HeaderIDs // specify header IDs with {#id}
|
|
|
|
Titleblock // Titleblock ala pandoc
|
|
|
|
AutoHeaderIDs // Create the header ID from the text
|
|
|
|
BackslashLineBreak // Translate trailing backslashes into line breaks
|
|
|
|
DefinitionLists // Render definition lists
|
|
|
|
Smartypants // Enable smart punctuation substitutions
|
|
|
|
SmartypantsFractions // Enable smart fractions (with Smartypants)
|
|
|
|
SmartypantsDashes // Enable smart dashes (with Smartypants)
|
|
|
|
SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants)
|
|
|
|
SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering
|
2016-04-04 07:14:49 +00:00
|
|
|
TOC // Generate a table of contents
|
|
|
|
OmitContents // Skip the main contents (for a standalone table of contents)
|
2016-04-01 07:44:22 +00:00
|
|
|
|
2016-04-01 07:49:23 +00:00
|
|
|
CommonHtmlFlags HTMLFlags = UseXHTML
|
2015-10-26 15:29:52 +00:00
|
|
|
|
2016-03-30 16:29:00 +00:00
|
|
|
CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode |
|
2015-10-26 15:29:52 +00:00
|
|
|
Autolink | Strikethrough | SpaceHeaders | HeaderIDs |
|
2016-04-01 07:44:22 +00:00
|
|
|
BackslashLineBreak | DefinitionLists | Smartypants |
|
|
|
|
SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes
|
2011-05-24 22:14:35 +00:00
|
|
|
)
|
|
|
|
|
2016-03-30 16:29:00 +00:00
|
|
|
var DefaultOptions = Options{
|
|
|
|
Extensions: CommonExtensions,
|
|
|
|
}
|
|
|
|
|
2015-10-26 15:29:52 +00:00
|
|
|
type LinkType int
|
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
// These are the possible flag values for the link renderer.
|
|
|
|
// Only a single one of these values will be used; they are not ORed together.
|
|
|
|
// These are mostly of interest if you are writing a new output format.
|
2011-05-24 22:14:35 +00:00
|
|
|
const (
|
2015-10-26 15:29:52 +00:00
|
|
|
LinkTypeNotAutolink LinkType = iota
|
|
|
|
LinkTypeNormal
|
|
|
|
LinkTypeEmail
|
2011-05-24 22:14:35 +00:00
|
|
|
)
|
|
|
|
|
2015-10-26 15:29:52 +00:00
|
|
|
type ListType int
|
|
|
|
|
2011-07-07 17:56:45 +00:00
|
|
|
// These are the possible flag values for the ListItem renderer.
|
2011-05-29 03:17:53 +00:00
|
|
|
// Multiple flag values may be ORed together.
|
|
|
|
// These are mostly of interest if you are writing a new output format.
|
2011-05-25 19:59:30 +00:00
|
|
|
const (
|
2015-10-26 15:29:52 +00:00
|
|
|
ListTypeOrdered ListType = 1 << iota
|
|
|
|
ListTypeDefinition
|
|
|
|
ListTypeTerm
|
|
|
|
|
|
|
|
ListItemContainsBlock
|
|
|
|
ListItemBeginningOfList
|
|
|
|
ListItemEndOfList
|
2011-05-25 19:59:30 +00:00
|
|
|
)
|
|
|
|
|
2016-04-01 08:44:59 +00:00
|
|
|
type CellAlignFlags int
|
2015-10-26 15:29:52 +00:00
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
// These are the possible flag values for the table cell renderer.
|
|
|
|
// Only a single one of these values will be used; they are not ORed together.
|
|
|
|
// These are mostly of interest if you are writing a new output format.
|
2011-05-25 19:59:30 +00:00
|
|
|
const (
|
2015-10-26 15:29:52 +00:00
|
|
|
TableAlignmentLeft = 1 << iota
|
|
|
|
TableAlignmentRight
|
|
|
|
TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight)
|
2011-05-25 19:59:30 +00:00
|
|
|
)
|
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
// The size of a tab stop.
|
2011-06-28 16:58:10 +00:00
|
|
|
const (
|
2015-10-26 15:29:52 +00:00
|
|
|
TabSizeDefault = 4
|
|
|
|
TabSizeDouble = 8
|
2011-06-28 16:58:10 +00:00
|
|
|
)
|
2011-05-29 03:17:53 +00:00
|
|
|
|
2015-11-10 05:18:55 +00:00
|
|
|
// blockTags is a set of tags that are recognized as HTML block tags.
|
2011-05-29 03:17:53 +00:00
|
|
|
// Any of these can be included in markdown text without special escaping.
|
2015-11-10 05:18:55 +00:00
|
|
|
var blockTags = map[string]struct{}{
|
|
|
|
"blockquote": struct{}{},
|
|
|
|
"del": struct{}{},
|
|
|
|
"div": struct{}{},
|
|
|
|
"dl": struct{}{},
|
|
|
|
"fieldset": struct{}{},
|
|
|
|
"form": struct{}{},
|
|
|
|
"h1": struct{}{},
|
|
|
|
"h2": struct{}{},
|
|
|
|
"h3": struct{}{},
|
|
|
|
"h4": struct{}{},
|
|
|
|
"h5": struct{}{},
|
|
|
|
"h6": struct{}{},
|
|
|
|
"iframe": struct{}{},
|
|
|
|
"ins": struct{}{},
|
|
|
|
"math": struct{}{},
|
|
|
|
"noscript": struct{}{},
|
|
|
|
"ol": struct{}{},
|
|
|
|
"pre": struct{}{},
|
|
|
|
"p": struct{}{},
|
|
|
|
"script": struct{}{},
|
|
|
|
"style": struct{}{},
|
|
|
|
"table": struct{}{},
|
|
|
|
"ul": struct{}{},
|
2012-10-22 04:28:31 +00:00
|
|
|
|
|
|
|
// HTML5
|
2015-11-10 05:18:55 +00:00
|
|
|
"address": struct{}{},
|
|
|
|
"article": struct{}{},
|
|
|
|
"aside": struct{}{},
|
|
|
|
"canvas": struct{}{},
|
|
|
|
"figcaption": struct{}{},
|
|
|
|
"figure": struct{}{},
|
|
|
|
"footer": struct{}{},
|
|
|
|
"header": struct{}{},
|
|
|
|
"hgroup": struct{}{},
|
|
|
|
"main": struct{}{},
|
|
|
|
"nav": struct{}{},
|
|
|
|
"output": struct{}{},
|
|
|
|
"progress": struct{}{},
|
|
|
|
"section": struct{}{},
|
|
|
|
"video": struct{}{},
|
2011-05-24 22:14:35 +00:00
|
|
|
}
|
|
|
|
|
2011-07-07 17:56:45 +00:00
|
|
|
// Renderer is the rendering interface.
|
2011-05-29 03:17:53 +00:00
|
|
|
// This is mostly of interest if you are implementing a new rendering format.
|
2011-07-07 17:56:45 +00:00
|
|
|
//
|
|
|
|
// When a byte slice is provided, it contains the (rendered) contents of the
|
|
|
|
// element.
|
|
|
|
//
|
|
|
|
// When a callback is provided instead, it will write the contents of the
|
|
|
|
// respective element directly to the output buffer and return true on success.
|
|
|
|
// If the callback returns false, the rendering function should reset the
|
|
|
|
// output buffer as though it had never been called.
|
|
|
|
//
|
2011-06-29 17:21:46 +00:00
|
|
|
// Currently Html and Latex implementations are provided
|
2011-06-29 17:13:17 +00:00
|
|
|
type Renderer interface {
|
|
|
|
// block-level callbacks
|
2015-11-01 19:27:55 +00:00
|
|
|
BlockCode(text []byte, lang string)
|
|
|
|
BlockQuote(text []byte)
|
|
|
|
BlockHtml(text []byte)
|
2015-11-04 20:14:02 +00:00
|
|
|
BeginHeader(level int, id string)
|
|
|
|
EndHeader(level int, id string, header []byte)
|
2015-11-01 19:27:55 +00:00
|
|
|
HRule()
|
|
|
|
BeginList(flags ListType)
|
|
|
|
EndList(flags ListType)
|
|
|
|
ListItem(text []byte, flags ListType)
|
|
|
|
BeginParagraph()
|
|
|
|
EndParagraph()
|
2016-04-01 08:44:59 +00:00
|
|
|
Table(header []byte, body []byte, columnData []CellAlignFlags)
|
2015-11-01 19:27:55 +00:00
|
|
|
TableRow(text []byte)
|
2016-04-01 08:44:59 +00:00
|
|
|
TableHeaderCell(out *bytes.Buffer, text []byte, flags CellAlignFlags)
|
|
|
|
TableCell(out *bytes.Buffer, text []byte, flags CellAlignFlags)
|
2015-11-01 19:27:55 +00:00
|
|
|
BeginFootnotes()
|
|
|
|
EndFootnotes()
|
|
|
|
FootnoteItem(name, text []byte, flags ListType)
|
|
|
|
TitleBlock(text []byte)
|
2011-06-29 17:13:17 +00:00
|
|
|
|
2011-06-29 19:00:54 +00:00
|
|
|
// Span-level callbacks
|
2015-11-01 19:27:55 +00:00
|
|
|
AutoLink(link []byte, kind LinkType)
|
|
|
|
CodeSpan(text []byte)
|
|
|
|
DoubleEmphasis(text []byte)
|
|
|
|
Emphasis(text []byte)
|
|
|
|
Image(link []byte, title []byte, alt []byte)
|
|
|
|
LineBreak()
|
|
|
|
Link(link []byte, title []byte, content []byte)
|
|
|
|
RawHtmlTag(tag []byte)
|
|
|
|
TripleEmphasis(text []byte)
|
|
|
|
StrikeThrough(text []byte)
|
|
|
|
FootnoteRef(ref []byte, id int)
|
2011-06-29 17:13:17 +00:00
|
|
|
|
|
|
|
// Low-level callbacks
|
2015-11-01 19:27:55 +00:00
|
|
|
Entity(entity []byte)
|
|
|
|
NormalText(text []byte)
|
2011-05-31 03:44:52 +00:00
|
|
|
|
|
|
|
// Header and footer
|
2015-11-01 19:27:55 +00:00
|
|
|
DocumentHeader()
|
|
|
|
DocumentFooter()
|
2014-01-21 23:14:35 +00:00
|
|
|
|
2015-11-09 19:14:32 +00:00
|
|
|
Write(b []byte) (int, error)
|
2016-03-30 11:47:30 +00:00
|
|
|
|
|
|
|
Render(ast *Node) []byte
|
2011-05-24 22:14:35 +00:00
|
|
|
}
|
|
|
|
|
2011-06-29 17:21:46 +00:00
|
|
|
// Callback functions for inline parsing. One such function is defined
|
|
|
|
// for each character that triggers a response when parsing inline data.
|
2015-11-01 19:35:43 +00:00
|
|
|
type inlineParser func(p *parser, data []byte, offset int) int
|
2011-05-26 02:46:16 +00:00
|
|
|
|
2011-07-07 17:56:45 +00:00
|
|
|
// Parser holds runtime state used by the parser.
|
|
|
|
// This is constructed by the Markdown function.
|
|
|
|
type parser struct {
|
2014-12-16 23:17:49 +00:00
|
|
|
refOverride ReferenceOverrideFunc
|
2011-07-05 20:22:21 +00:00
|
|
|
refs map[string]*reference
|
|
|
|
inlineCallback [256]inlineParser
|
2015-10-26 16:16:57 +00:00
|
|
|
flags Extensions
|
2011-07-05 20:22:21 +00:00
|
|
|
nesting int
|
|
|
|
maxNesting int
|
|
|
|
insideLink bool
|
2013-06-25 01:18:47 +00:00
|
|
|
|
|
|
|
// Footnotes need to be ordered as well as available to quickly check for
|
|
|
|
// presence. If a ref is also a footnote, it's stored both in refs and here
|
|
|
|
// in notes. Slice is nil if footnotes not enabled.
|
|
|
|
notes []*reference
|
2016-03-30 09:57:12 +00:00
|
|
|
|
|
|
|
doc *Node
|
|
|
|
tip *Node // = doc
|
|
|
|
oldTip *Node
|
|
|
|
lastMatchedContainer *Node // = doc
|
|
|
|
allClosed bool
|
2016-03-30 11:38:19 +00:00
|
|
|
currBlock *Node // a block node currently being parsed by inline parser
|
2011-05-24 22:14:35 +00:00
|
|
|
}
|
|
|
|
|
2014-12-16 23:17:49 +00:00
|
|
|
func (p *parser) getRef(refid string) (ref *reference, found bool) {
|
|
|
|
if p.refOverride != nil {
|
|
|
|
r, overridden := p.refOverride(refid)
|
|
|
|
if overridden {
|
|
|
|
if r == nil {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
return &reference{
|
|
|
|
link: []byte(r.Link),
|
|
|
|
title: []byte(r.Title),
|
|
|
|
noteId: 0,
|
2014-12-19 00:36:46 +00:00
|
|
|
hasBlock: false,
|
|
|
|
text: []byte(r.Text)}, true
|
2014-12-16 23:17:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// refs are case insensitive
|
|
|
|
ref, found = p.refs[strings.ToLower(refid)]
|
|
|
|
return ref, found
|
|
|
|
}
|
|
|
|
|
2016-03-30 09:57:12 +00:00
|
|
|
func (p *parser) finalize(block *Node) {
|
|
|
|
above := block.Parent
|
|
|
|
block.open = false
|
|
|
|
p.tip = above
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *parser) addChild(node NodeType, offset uint32) *Node {
|
|
|
|
for !p.tip.canContain(node) {
|
|
|
|
p.finalize(p.tip)
|
|
|
|
}
|
|
|
|
newNode := NewNode(node)
|
|
|
|
newNode.content = []byte{}
|
|
|
|
p.tip.appendChild(newNode)
|
|
|
|
p.tip = newNode
|
|
|
|
return newNode
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *parser) closeUnmatchedBlocks() {
|
|
|
|
if !p.allClosed {
|
|
|
|
for p.oldTip != p.lastMatchedContainer {
|
|
|
|
parent := p.oldTip.Parent
|
|
|
|
p.finalize(p.oldTip)
|
|
|
|
p.oldTip = parent
|
|
|
|
}
|
|
|
|
p.allClosed = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
//
|
|
|
|
//
|
|
|
|
// Public interface
|
|
|
|
//
|
2011-05-26 15:47:41 +00:00
|
|
|
//
|
2011-05-24 22:14:35 +00:00
|
|
|
|
2014-12-16 23:17:49 +00:00
|
|
|
// Reference represents the details of a link.
|
|
|
|
// See the documentation in Options for more details on use-case.
|
|
|
|
type Reference struct {
|
|
|
|
// Link is usually the URL the reference points to.
|
|
|
|
Link string
|
|
|
|
// Title is the alternate text describing the link in more detail.
|
|
|
|
Title string
|
2014-12-19 00:36:46 +00:00
|
|
|
// Text is the optional text to override the ref with if the syntax used was
|
|
|
|
// [refid][]
|
|
|
|
Text string
|
2014-12-16 23:17:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ReferenceOverrideFunc is expected to be called with a reference string and
|
|
|
|
// return either a valid Reference type that the reference string maps to or
|
|
|
|
// nil. If overridden is false, the default reference logic will be executed.
|
|
|
|
// See the documentation in Options for more details on use-case.
|
|
|
|
type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool)
|
|
|
|
|
|
|
|
// Options represents configurable overrides and callbacks (in addition to the
|
|
|
|
// extension flag set) for configuring a Markdown parse.
|
|
|
|
type Options struct {
|
|
|
|
// Extensions is a flag set of bit-wise ORed extension bits. See the
|
2015-10-26 16:16:57 +00:00
|
|
|
// Extensions flags defined in this package.
|
|
|
|
Extensions Extensions
|
2014-12-16 23:17:49 +00:00
|
|
|
|
|
|
|
// ReferenceOverride is an optional function callback that is called every
|
|
|
|
// time a reference is resolved.
|
|
|
|
//
|
|
|
|
// In Markdown, the link reference syntax can be made to resolve a link to
|
|
|
|
// a reference instead of an inline URL, in one of the following ways:
|
|
|
|
//
|
|
|
|
// * [link text][refid]
|
|
|
|
// * [refid][]
|
|
|
|
//
|
|
|
|
// Usually, the refid is defined at the bottom of the Markdown document. If
|
|
|
|
// this override function is provided, the refid is passed to the override
|
|
|
|
// function first, before consulting the defined refids at the bottom. If
|
|
|
|
// the override function indicates an override did not occur, the refids at
|
|
|
|
// the bottom will be used to fill in the link details.
|
|
|
|
ReferenceOverride ReferenceOverrideFunc
|
|
|
|
}
|
|
|
|
|
2011-07-07 17:56:45 +00:00
|
|
|
// MarkdownBasic is a convenience function for simple rendering.
|
|
|
|
// It processes markdown input with no extensions enabled.
|
2011-06-28 21:55:27 +00:00
|
|
|
func MarkdownBasic(input []byte) []byte {
|
|
|
|
// set up the HTML renderer
|
2015-10-26 16:16:57 +00:00
|
|
|
htmlFlags := UseXHTML
|
2016-04-01 12:37:21 +00:00
|
|
|
renderer := HTMLRenderer(htmlFlags, CommonExtensions, "", "")
|
2011-06-28 21:55:27 +00:00
|
|
|
|
|
|
|
// set up the parser
|
2014-12-16 23:17:49 +00:00
|
|
|
return MarkdownOptions(input, renderer, Options{Extensions: 0})
|
2011-06-28 21:55:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Call Markdown with most useful extensions enabled
|
2011-07-07 17:56:45 +00:00
|
|
|
// MarkdownCommon is a convenience function for simple rendering.
|
|
|
|
// It processes markdown input with common extensions enabled, including:
|
|
|
|
//
|
|
|
|
// * Smartypants processing with smart fractions and LaTeX dashes
|
|
|
|
//
|
2013-04-13 22:44:18 +00:00
|
|
|
// * Intra-word emphasis suppression
|
2011-07-07 17:56:45 +00:00
|
|
|
//
|
|
|
|
// * Tables
|
|
|
|
//
|
|
|
|
// * Fenced code blocks
|
|
|
|
//
|
|
|
|
// * Autolinking
|
|
|
|
//
|
|
|
|
// * Strikethrough support
|
|
|
|
//
|
|
|
|
// * Strict header parsing
|
2014-04-05 19:45:57 +00:00
|
|
|
//
|
|
|
|
// * Custom Header IDs
|
2011-06-28 21:55:27 +00:00
|
|
|
func MarkdownCommon(input []byte) []byte {
|
|
|
|
// set up the HTML renderer
|
2016-04-01 12:37:21 +00:00
|
|
|
renderer := HTMLRenderer(CommonHtmlFlags, CommonExtensions, "", "")
|
2016-03-30 16:29:00 +00:00
|
|
|
return MarkdownOptions(input, renderer, DefaultOptions)
|
2011-06-28 21:55:27 +00:00
|
|
|
}
|
|
|
|
|
2011-07-07 17:56:45 +00:00
|
|
|
// Markdown is the main rendering function.
|
|
|
|
// It parses and renders a block of markdown-encoded text.
|
|
|
|
// The supplied Renderer is used to format the output, and extensions dictates
|
|
|
|
// which non-standard extensions are enabled.
|
|
|
|
//
|
|
|
|
// To use the supplied Html or LaTeX renderers, see HtmlRenderer and
|
|
|
|
// LatexRenderer, respectively.
|
2015-10-26 16:16:57 +00:00
|
|
|
func Markdown(input []byte, renderer Renderer, extensions Extensions) []byte {
|
2014-12-16 23:17:49 +00:00
|
|
|
return MarkdownOptions(input, renderer, Options{
|
|
|
|
Extensions: extensions})
|
|
|
|
}
|
|
|
|
|
|
|
|
// MarkdownOptions is just like Markdown but takes additional options through
|
|
|
|
// the Options struct.
|
|
|
|
func MarkdownOptions(input []byte, renderer Renderer, opts Options) []byte {
|
2011-05-29 03:17:53 +00:00
|
|
|
// no point in parsing if we can't render
|
|
|
|
if renderer == nil {
|
2011-05-29 04:37:12 +00:00
|
|
|
return nil
|
2011-05-26 02:46:16 +00:00
|
|
|
}
|
2016-03-30 16:40:10 +00:00
|
|
|
return renderer.Render(Parse(input, opts))
|
|
|
|
}
|
2011-05-26 02:46:16 +00:00
|
|
|
|
2016-03-30 16:40:10 +00:00
|
|
|
func Parse(input []byte, opts Options) *Node {
|
2014-12-16 23:17:49 +00:00
|
|
|
extensions := opts.Extensions
|
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
// fill in the render structure
|
2011-07-07 17:56:45 +00:00
|
|
|
p := new(parser)
|
|
|
|
p.flags = extensions
|
2014-12-16 23:17:49 +00:00
|
|
|
p.refOverride = opts.ReferenceOverride
|
2011-07-07 17:56:45 +00:00
|
|
|
p.refs = make(map[string]*reference)
|
|
|
|
p.maxNesting = 16
|
|
|
|
p.insideLink = false
|
2011-05-29 17:43:18 +00:00
|
|
|
|
2016-03-30 09:57:12 +00:00
|
|
|
docNode := NewNode(Document)
|
|
|
|
p.doc = docNode
|
|
|
|
p.tip = docNode
|
|
|
|
p.oldTip = docNode
|
|
|
|
p.lastMatchedContainer = docNode
|
|
|
|
p.allClosed = true
|
|
|
|
|
2011-05-29 17:43:18 +00:00
|
|
|
// register inline parsers
|
2011-07-07 17:56:45 +00:00
|
|
|
p.inlineCallback['*'] = emphasis
|
|
|
|
p.inlineCallback['_'] = emphasis
|
2015-10-26 16:16:57 +00:00
|
|
|
if extensions&Strikethrough != 0 {
|
2011-07-07 17:56:45 +00:00
|
|
|
p.inlineCallback['~'] = emphasis
|
2011-05-29 03:17:53 +00:00
|
|
|
}
|
2011-07-07 17:56:45 +00:00
|
|
|
p.inlineCallback['`'] = codeSpan
|
|
|
|
p.inlineCallback['\n'] = lineBreak
|
|
|
|
p.inlineCallback['['] = link
|
|
|
|
p.inlineCallback['<'] = leftAngle
|
|
|
|
p.inlineCallback['\\'] = escape
|
|
|
|
p.inlineCallback['&'] = entity
|
2015-10-26 18:47:20 +00:00
|
|
|
p.inlineCallback['!'] = maybeImage
|
|
|
|
p.inlineCallback['^'] = maybeInlineFootnote
|
2011-05-26 02:46:16 +00:00
|
|
|
|
2015-10-26 16:16:57 +00:00
|
|
|
if extensions&Autolink != 0 {
|
2015-10-27 19:56:16 +00:00
|
|
|
p.inlineCallback['h'] = maybeAutoLink
|
|
|
|
p.inlineCallback['m'] = maybeAutoLink
|
|
|
|
p.inlineCallback['f'] = maybeAutoLink
|
|
|
|
p.inlineCallback['H'] = maybeAutoLink
|
|
|
|
p.inlineCallback['M'] = maybeAutoLink
|
|
|
|
p.inlineCallback['F'] = maybeAutoLink
|
2011-05-29 03:17:53 +00:00
|
|
|
}
|
|
|
|
|
2015-10-26 16:16:57 +00:00
|
|
|
if extensions&Footnotes != 0 {
|
2013-06-25 01:18:47 +00:00
|
|
|
p.notes = make([]*reference, 0)
|
|
|
|
}
|
|
|
|
|
2011-07-07 17:56:45 +00:00
|
|
|
first := firstPass(p, input)
|
2016-03-30 11:47:30 +00:00
|
|
|
secondPass(p, first)
|
2016-04-01 07:44:22 +00:00
|
|
|
// Walk the tree and finish up some of unfinished blocks
|
2016-03-30 11:47:30 +00:00
|
|
|
for p.tip != nil {
|
|
|
|
p.finalize(p.tip)
|
|
|
|
}
|
2016-04-01 07:44:22 +00:00
|
|
|
// Walk the tree again and process inline markdown in each block
|
2016-04-01 09:36:56 +00:00
|
|
|
p.doc.Walk(func(node *Node, entering bool) {
|
2016-03-30 11:47:30 +00:00
|
|
|
if node.Type == Paragraph || node.Type == Header || node.Type == TableCell {
|
|
|
|
p.currBlock = node
|
|
|
|
p.inline(node.content)
|
|
|
|
node.content = nil
|
|
|
|
}
|
|
|
|
})
|
|
|
|
p.parseRefsToAST()
|
2016-04-04 07:14:49 +00:00
|
|
|
p.generateTOC()
|
2016-03-30 16:40:10 +00:00
|
|
|
return p.doc
|
2016-03-30 11:47:30 +00:00
|
|
|
}
|
|
|
|
|
2016-04-04 07:14:49 +00:00
|
|
|
func (p *parser) generateTOC() {
|
|
|
|
if p.flags&TOC == 0 && p.flags&OmitContents == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
navNode := NewNode(HTMLBlock)
|
|
|
|
navNode.Literal = []byte("<nav>")
|
|
|
|
navNode.open = false
|
|
|
|
|
|
|
|
var topList *Node
|
|
|
|
var listNode *Node
|
|
|
|
var lastItem *Node
|
|
|
|
headerCount := 0
|
|
|
|
var currentLevel uint32
|
|
|
|
p.doc.Walk(func(node *Node, entering bool) {
|
|
|
|
if entering && node.Type == Header {
|
|
|
|
if node.Level > currentLevel {
|
|
|
|
currentLevel++
|
|
|
|
newList := NewNode(List)
|
|
|
|
if lastItem != nil {
|
|
|
|
lastItem.appendChild(newList)
|
|
|
|
listNode = newList
|
|
|
|
} else {
|
|
|
|
listNode = newList
|
|
|
|
topList = listNode
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if node.Level < currentLevel {
|
|
|
|
finalizeList(listNode)
|
|
|
|
lastItem = listNode.Parent
|
|
|
|
listNode = lastItem.Parent
|
|
|
|
}
|
|
|
|
node.HeaderID = fmt.Sprintf("toc_%d", headerCount)
|
|
|
|
headerCount++
|
|
|
|
lastItem = NewNode(Item)
|
|
|
|
listNode.appendChild(lastItem)
|
|
|
|
anchorNode := NewNode(Link)
|
|
|
|
anchorNode.Destination = []byte("#" + node.HeaderID)
|
|
|
|
lastItem.appendChild(anchorNode)
|
|
|
|
anchorNode.appendChild(text(node.FirstChild.Literal))
|
|
|
|
}
|
|
|
|
})
|
|
|
|
firstChild := p.doc.FirstChild
|
|
|
|
// Insert TOC only if there is anything to insert
|
|
|
|
if topList != nil {
|
|
|
|
finalizeList(topList)
|
|
|
|
firstChild.insertBefore(navNode)
|
|
|
|
firstChild.insertBefore(topList)
|
|
|
|
navCloseNode := NewNode(HTMLBlock)
|
|
|
|
navCloseNode.Literal = []byte("</nav>")
|
|
|
|
navCloseNode.open = false
|
|
|
|
firstChild.insertBefore(navCloseNode)
|
|
|
|
}
|
|
|
|
// Drop everything after the TOC if OmitContents was requested
|
|
|
|
if p.flags&OmitContents != 0 {
|
|
|
|
for firstChild != nil {
|
|
|
|
next := firstChild.Next
|
|
|
|
firstChild.unlink()
|
|
|
|
firstChild = next
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-30 11:47:30 +00:00
|
|
|
func (p *parser) parseRefsToAST() {
|
|
|
|
if p.flags&Footnotes == 0 || len(p.notes) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
p.tip = p.doc
|
2016-04-01 10:15:47 +00:00
|
|
|
finalizeHtmlBlock(p.addBlock(HTMLBlock, []byte(`<div class="footnotes">`)))
|
2016-03-30 11:47:30 +00:00
|
|
|
p.addBlock(HorizontalRule, nil)
|
|
|
|
block := p.addBlock(List, nil)
|
2016-04-01 08:21:25 +00:00
|
|
|
block.ListFlags = ListTypeOrdered
|
2016-03-30 11:47:30 +00:00
|
|
|
flags := ListItemBeginningOfList
|
|
|
|
// Note: this loop is intentionally explicit, not range-form. This is
|
|
|
|
// because the body of the loop will append nested footnotes to p.notes and
|
|
|
|
// we need to process those late additions. Range form would only walk over
|
|
|
|
// the fixed initial set.
|
|
|
|
for i := 0; i < len(p.notes); i++ {
|
|
|
|
ref := p.notes[i]
|
|
|
|
block := p.addBlock(Item, nil)
|
2016-04-01 08:21:25 +00:00
|
|
|
block.ListFlags = ListTypeOrdered
|
|
|
|
block.RefLink = ref.link
|
2016-03-30 11:47:30 +00:00
|
|
|
if ref.hasBlock {
|
|
|
|
flags |= ListItemContainsBlock
|
|
|
|
p.block(ref.title)
|
|
|
|
} else {
|
|
|
|
p.currBlock = block
|
|
|
|
p.inline(ref.title)
|
|
|
|
}
|
|
|
|
flags &^= ListItemBeginningOfList | ListItemContainsBlock
|
|
|
|
}
|
|
|
|
above := block.Parent
|
|
|
|
finalizeList(block)
|
|
|
|
p.tip = above
|
2016-04-01 10:15:47 +00:00
|
|
|
finalizeHtmlBlock(p.addBlock(HTMLBlock, []byte("</div>")))
|
2016-04-01 09:36:56 +00:00
|
|
|
block.Walk(func(node *Node, entering bool) {
|
2016-03-30 11:47:30 +00:00
|
|
|
if node.Type == Paragraph || node.Type == Header {
|
|
|
|
p.currBlock = node
|
|
|
|
p.inline(node.content)
|
|
|
|
node.content = nil
|
|
|
|
}
|
|
|
|
})
|
2011-06-26 15:51:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// first pass:
|
|
|
|
// - extract references
|
|
|
|
// - expand tabs
|
|
|
|
// - normalize newlines
|
|
|
|
// - copy everything else
|
2011-07-07 17:56:45 +00:00
|
|
|
func firstPass(p *parser, input []byte) []byte {
|
2011-06-26 15:51:36 +00:00
|
|
|
var out bytes.Buffer
|
2015-10-26 16:16:57 +00:00
|
|
|
tabSize := TabSizeDefault
|
|
|
|
if p.flags&TabSizeEight != 0 {
|
|
|
|
tabSize = TabSizeDouble
|
2011-06-28 16:58:10 +00:00
|
|
|
}
|
2011-05-29 03:17:53 +00:00
|
|
|
beg, end := 0, 0
|
2014-03-30 19:57:58 +00:00
|
|
|
lastFencedCodeBlockEnd := 0
|
2011-05-29 03:17:53 +00:00
|
|
|
for beg < len(input) { // iterate over lines
|
2013-06-25 01:18:47 +00:00
|
|
|
if end = isReference(p, input[beg:], tabSize); end > 0 {
|
2011-05-29 03:17:53 +00:00
|
|
|
beg += end
|
|
|
|
} else { // skip to the next line
|
|
|
|
end = beg
|
|
|
|
for end < len(input) && input[end] != '\n' && input[end] != '\r' {
|
|
|
|
end++
|
|
|
|
}
|
|
|
|
|
2015-10-26 16:16:57 +00:00
|
|
|
if p.flags&FencedCode != 0 {
|
2015-10-28 19:37:14 +00:00
|
|
|
// track fenced code block boundaries to suppress tab expansion
|
|
|
|
// inside them:
|
2014-04-12 04:27:28 +00:00
|
|
|
if beg >= lastFencedCodeBlockEnd {
|
2015-11-05 20:00:37 +00:00
|
|
|
if i := p.fencedCode(input[beg:], false); i > 0 {
|
2014-04-01 21:14:31 +00:00
|
|
|
lastFencedCodeBlockEnd = beg + i
|
|
|
|
}
|
2014-03-30 19:57:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
// add the line body if present
|
|
|
|
if end > beg {
|
2014-04-12 21:45:25 +00:00
|
|
|
if end < lastFencedCodeBlockEnd { // Do not expand tabs while inside fenced code blocks.
|
|
|
|
out.Write(input[beg:end])
|
|
|
|
} else {
|
|
|
|
expandTabs(&out, input[beg:end], tabSize)
|
|
|
|
}
|
2011-05-29 03:17:53 +00:00
|
|
|
}
|
2011-06-28 16:58:10 +00:00
|
|
|
out.WriteByte('\n')
|
2011-05-29 03:17:53 +00:00
|
|
|
|
2011-06-26 15:51:36 +00:00
|
|
|
if end < len(input) && input[end] == '\r' {
|
|
|
|
end++
|
|
|
|
}
|
|
|
|
if end < len(input) && input[end] == '\n' {
|
2011-05-29 03:17:53 +00:00
|
|
|
end++
|
|
|
|
}
|
|
|
|
|
|
|
|
beg = end
|
|
|
|
}
|
|
|
|
}
|
2011-07-03 16:51:07 +00:00
|
|
|
|
|
|
|
// empty input?
|
|
|
|
if out.Len() == 0 {
|
|
|
|
out.WriteByte('\n')
|
|
|
|
}
|
|
|
|
|
2011-06-26 15:51:36 +00:00
|
|
|
return out.Bytes()
|
|
|
|
}
|
2011-05-29 03:17:53 +00:00
|
|
|
|
2011-06-26 15:51:36 +00:00
|
|
|
// second pass: actual rendering
|
2016-03-30 12:56:53 +00:00
|
|
|
func secondPass(p *parser, input []byte) {
|
2015-11-05 20:00:37 +00:00
|
|
|
p.block(input)
|
2013-06-25 01:18:47 +00:00
|
|
|
|
2015-10-26 16:16:57 +00:00
|
|
|
if p.flags&Footnotes != 0 && len(p.notes) > 0 {
|
2015-10-26 18:39:08 +00:00
|
|
|
flags := ListItemBeginningOfList
|
|
|
|
for i := 0; i < len(p.notes); i += 1 {
|
|
|
|
ref := p.notes[i]
|
|
|
|
if ref.hasBlock {
|
|
|
|
flags |= ListItemContainsBlock
|
2016-03-30 12:56:53 +00:00
|
|
|
p.block(ref.title)
|
2015-10-26 18:39:08 +00:00
|
|
|
} else {
|
2016-03-30 12:56:53 +00:00
|
|
|
p.inline(ref.title)
|
2013-06-26 15:57:51 +00:00
|
|
|
}
|
2015-10-26 18:39:08 +00:00
|
|
|
flags &^= ListItemBeginningOfList | ListItemContainsBlock
|
|
|
|
}
|
2013-06-25 01:18:47 +00:00
|
|
|
}
|
|
|
|
|
2011-07-07 17:56:45 +00:00
|
|
|
if p.nesting != 0 {
|
2011-05-29 03:17:53 +00:00
|
|
|
panic("Nesting level did not end at zero")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-25 19:59:30 +00:00
|
|
|
//
|
2011-05-29 03:17:53 +00:00
|
|
|
// Link references
|
2011-05-25 19:59:30 +00:00
|
|
|
//
|
2011-05-29 03:17:53 +00:00
|
|
|
// This section implements support for references that (usually) appear
|
|
|
|
// as footnotes in a document, and can be referenced anywhere in the document.
|
|
|
|
// The basic format is:
|
2011-05-25 19:59:30 +00:00
|
|
|
//
|
2011-05-29 03:17:53 +00:00
|
|
|
// [1]: http://www.google.com/ "Google"
|
|
|
|
// [2]: http://www.github.com/ "Github"
|
2011-05-25 19:59:30 +00:00
|
|
|
//
|
2011-05-29 03:17:53 +00:00
|
|
|
// Anywhere in the document, the reference can be linked by referring to its
|
|
|
|
// label, i.e., 1 and 2 in this example, as in:
|
|
|
|
//
|
|
|
|
// This library is hosted on [Github][2], a git hosting site.
|
2013-06-25 01:18:47 +00:00
|
|
|
//
|
|
|
|
// Actual footnotes as specified in Pandoc and supported by some other Markdown
|
|
|
|
// libraries such as php-markdown are also taken care of. They look like this:
|
|
|
|
//
|
|
|
|
// This sentence needs a bit of further explanation.[^note]
|
|
|
|
//
|
|
|
|
// [^note]: This is the explanation.
|
|
|
|
//
|
|
|
|
// Footnotes should be placed at the end of the document in an ordered list.
|
|
|
|
// Inline footnotes such as:
|
|
|
|
//
|
|
|
|
// Inline footnotes^[Not supported.] also exist.
|
|
|
|
//
|
|
|
|
// are not yet supported.
|
2011-05-25 19:59:30 +00:00
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
// References are parsed and stored in this struct.
|
2011-05-29 17:43:18 +00:00
|
|
|
type reference struct {
|
2013-06-26 15:57:51 +00:00
|
|
|
link []byte
|
|
|
|
title []byte
|
|
|
|
noteId int // 0 if not a footnote ref
|
|
|
|
hasBlock bool
|
2014-12-19 00:36:46 +00:00
|
|
|
text []byte
|
2011-05-24 22:14:35 +00:00
|
|
|
}
|
|
|
|
|
2015-11-02 18:24:34 +00:00
|
|
|
func (r *reference) String() string {
|
|
|
|
return fmt.Sprintf("{link: %q, title: %q, text: %q, noteId: %d, hasBlock: %v}",
|
|
|
|
r.link, r.title, r.text, r.noteId, r.hasBlock)
|
|
|
|
}
|
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
// Check whether or not data starts with a reference link.
|
|
|
|
// If so, it is parsed and stored in the list of references
|
|
|
|
// (in the render struct).
|
2011-06-26 15:51:36 +00:00
|
|
|
// Returns the number of bytes to skip to move past it,
|
|
|
|
// or zero if the first line is not a reference.
|
2013-06-25 01:18:47 +00:00
|
|
|
func isReference(p *parser, data []byte, tabSize int) int {
|
2011-05-29 03:17:53 +00:00
|
|
|
// up to 3 optional leading spaces
|
|
|
|
if len(data) < 4 {
|
|
|
|
return 0
|
2011-05-24 22:14:35 +00:00
|
|
|
}
|
2011-05-29 03:17:53 +00:00
|
|
|
i := 0
|
|
|
|
for i < 3 && data[i] == ' ' {
|
|
|
|
i++
|
2011-05-24 22:14:35 +00:00
|
|
|
}
|
2011-05-29 03:17:53 +00:00
|
|
|
|
2013-06-25 01:18:47 +00:00
|
|
|
noteId := 0
|
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
// id part: anything but a newline between brackets
|
|
|
|
if data[i] != '[' {
|
|
|
|
return 0
|
2011-05-24 22:14:35 +00:00
|
|
|
}
|
2011-05-29 03:17:53 +00:00
|
|
|
i++
|
2015-10-26 16:16:57 +00:00
|
|
|
if p.flags&Footnotes != 0 {
|
2015-06-08 07:31:55 +00:00
|
|
|
if i < len(data) && data[i] == '^' {
|
2013-07-01 01:37:52 +00:00
|
|
|
// we can set it to anything here because the proper noteIds will
|
|
|
|
// be assigned later during the second pass. It just has to be != 0
|
|
|
|
noteId = 1
|
2013-06-25 01:18:47 +00:00
|
|
|
i++
|
|
|
|
}
|
|
|
|
}
|
2011-06-28 22:02:12 +00:00
|
|
|
idOffset := i
|
2011-05-29 03:17:53 +00:00
|
|
|
for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
|
|
|
|
i++
|
2011-05-24 22:14:35 +00:00
|
|
|
}
|
2011-05-29 03:17:53 +00:00
|
|
|
if i >= len(data) || data[i] != ']' {
|
|
|
|
return 0
|
2011-05-24 22:14:35 +00:00
|
|
|
}
|
2011-06-28 22:02:12 +00:00
|
|
|
idEnd := i
|
2011-05-24 22:14:35 +00:00
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
// spacer: colon (space | tab)* newline? (space | tab)*
|
|
|
|
i++
|
|
|
|
if i >= len(data) || data[i] != ':' {
|
|
|
|
return 0
|
2011-05-25 19:59:30 +00:00
|
|
|
}
|
2011-05-29 03:17:53 +00:00
|
|
|
i++
|
|
|
|
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
|
|
|
i++
|
2011-05-25 19:59:30 +00:00
|
|
|
}
|
2011-05-29 03:17:53 +00:00
|
|
|
if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
|
|
|
|
i++
|
|
|
|
if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
|
|
|
|
i++
|
2011-05-25 19:59:30 +00:00
|
|
|
}
|
|
|
|
}
|
2011-05-29 03:17:53 +00:00
|
|
|
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
if i >= len(data) {
|
|
|
|
return 0
|
2011-05-25 19:59:30 +00:00
|
|
|
}
|
|
|
|
|
2013-06-25 01:18:47 +00:00
|
|
|
var (
|
|
|
|
linkOffset, linkEnd int
|
|
|
|
titleOffset, titleEnd int
|
|
|
|
lineEnd int
|
|
|
|
raw []byte
|
2013-06-26 15:57:51 +00:00
|
|
|
hasBlock bool
|
2013-06-25 01:18:47 +00:00
|
|
|
)
|
|
|
|
|
2015-10-26 16:16:57 +00:00
|
|
|
if p.flags&Footnotes != 0 && noteId != 0 {
|
2013-06-26 15:57:51 +00:00
|
|
|
linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
|
|
|
|
lineEnd = linkEnd
|
2013-06-25 01:18:47 +00:00
|
|
|
} else {
|
|
|
|
linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i)
|
|
|
|
}
|
|
|
|
if lineEnd == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// a valid ref has been found
|
|
|
|
|
|
|
|
ref := &reference{
|
2013-06-26 15:57:51 +00:00
|
|
|
noteId: noteId,
|
|
|
|
hasBlock: hasBlock,
|
2013-06-25 01:18:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if noteId > 0 {
|
2013-06-26 15:57:51 +00:00
|
|
|
// reusing the link field for the id since footnotes don't have links
|
2013-06-25 01:18:47 +00:00
|
|
|
ref.link = data[idOffset:idEnd]
|
|
|
|
// if footnote, it's not really a title, it's the contained text
|
|
|
|
ref.title = raw
|
|
|
|
} else {
|
|
|
|
ref.link = data[linkOffset:linkEnd]
|
|
|
|
ref.title = data[titleOffset:titleEnd]
|
|
|
|
}
|
|
|
|
|
|
|
|
// id matches are case-insensitive
|
|
|
|
id := string(bytes.ToLower(data[idOffset:idEnd]))
|
2013-07-08 22:34:12 +00:00
|
|
|
|
2013-06-25 01:18:47 +00:00
|
|
|
p.refs[id] = ref
|
2013-07-08 22:34:12 +00:00
|
|
|
|
2013-06-25 01:18:47 +00:00
|
|
|
return lineEnd
|
|
|
|
}
|
|
|
|
|
|
|
|
func scanLinkRef(p *parser, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
|
2011-05-29 03:17:53 +00:00
|
|
|
// link: whitespace-free sequence, optionally between angle brackets
|
|
|
|
if data[i] == '<' {
|
2011-05-25 22:00:01 +00:00
|
|
|
i++
|
|
|
|
}
|
2013-06-25 01:18:47 +00:00
|
|
|
linkOffset = i
|
2011-05-29 03:17:53 +00:00
|
|
|
for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
|
|
|
|
i++
|
2011-05-27 19:38:10 +00:00
|
|
|
}
|
2015-06-08 07:31:55 +00:00
|
|
|
if i == len(data) {
|
|
|
|
return
|
|
|
|
}
|
2013-06-25 01:18:47 +00:00
|
|
|
linkEnd = i
|
2011-06-28 22:02:12 +00:00
|
|
|
if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
|
|
|
|
linkOffset++
|
|
|
|
linkEnd--
|
2011-05-27 19:38:10 +00:00
|
|
|
}
|
2011-05-27 04:27:33 +00:00
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
// optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
|
|
|
|
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
|
|
|
i++
|
2011-05-27 04:27:33 +00:00
|
|
|
}
|
2011-05-29 03:17:53 +00:00
|
|
|
if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
|
2013-06-25 01:18:47 +00:00
|
|
|
return
|
2011-05-27 04:27:33 +00:00
|
|
|
}
|
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
// compute end-of-line
|
|
|
|
if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
|
2011-06-28 22:02:12 +00:00
|
|
|
lineEnd = i
|
2011-05-27 04:27:33 +00:00
|
|
|
}
|
2011-05-29 03:17:53 +00:00
|
|
|
if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
|
2011-06-28 22:02:12 +00:00
|
|
|
lineEnd++
|
2011-05-28 23:37:18 +00:00
|
|
|
}
|
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
// optional (space|tab)* spacer after a newline
|
2011-06-28 22:02:12 +00:00
|
|
|
if lineEnd > 0 {
|
|
|
|
i = lineEnd + 1
|
2011-05-29 03:17:53 +00:00
|
|
|
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
|
|
|
i++
|
2011-05-28 23:37:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
// optional title: any non-newline sequence enclosed in '"() alone on its line
|
|
|
|
if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
|
2011-05-27 04:27:33 +00:00
|
|
|
i++
|
2011-06-28 22:02:12 +00:00
|
|
|
titleOffset = i
|
2011-05-27 04:27:33 +00:00
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
// look for EOL
|
|
|
|
for i < len(data) && data[i] != '\n' && data[i] != '\r' {
|
|
|
|
i++
|
2011-05-27 04:27:33 +00:00
|
|
|
}
|
2011-05-29 03:17:53 +00:00
|
|
|
if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
|
2011-06-28 22:02:12 +00:00
|
|
|
titleEnd = i + 1
|
2011-05-29 03:17:53 +00:00
|
|
|
} else {
|
2011-06-28 22:02:12 +00:00
|
|
|
titleEnd = i
|
2011-05-28 23:37:18 +00:00
|
|
|
}
|
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
// step back
|
|
|
|
i--
|
2011-06-28 22:02:12 +00:00
|
|
|
for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
|
2011-05-29 03:17:53 +00:00
|
|
|
i--
|
2011-05-28 23:37:18 +00:00
|
|
|
}
|
2011-06-28 22:02:12 +00:00
|
|
|
if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
|
|
|
|
lineEnd = titleEnd
|
|
|
|
titleEnd = i
|
2011-05-28 23:37:18 +00:00
|
|
|
}
|
|
|
|
}
|
2013-06-25 01:18:47 +00:00
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// The first bit of this logic is the same as (*parser).listItem, but the rest
|
|
|
|
// is much simpler. This function simply finds the entire block and shifts it
|
|
|
|
// over by one tab if it is indeed a block (just returns the line if it's not).
|
|
|
|
// blockEnd is the end of the section in the input buffer, and contents is the
|
|
|
|
// extracted text that was shifted over one tab. It will need to be rendered at
|
|
|
|
// the end of the document.
|
2013-06-26 15:57:51 +00:00
|
|
|
func scanFootnote(p *parser, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
|
2013-07-08 06:54:25 +00:00
|
|
|
if i == 0 || len(data) == 0 {
|
2013-06-25 01:18:47 +00:00
|
|
|
return
|
2011-05-28 23:37:18 +00:00
|
|
|
}
|
|
|
|
|
2013-06-25 01:18:47 +00:00
|
|
|
// skip leading whitespace on first line
|
2013-07-08 06:54:25 +00:00
|
|
|
for i < len(data) && data[i] == ' ' {
|
2013-06-25 01:18:47 +00:00
|
|
|
i++
|
|
|
|
}
|
2011-05-29 17:43:18 +00:00
|
|
|
|
2013-06-25 01:18:47 +00:00
|
|
|
blockStart = i
|
|
|
|
|
|
|
|
// find the end of the line
|
|
|
|
blockEnd = i
|
2013-06-26 15:57:51 +00:00
|
|
|
for i < len(data) && data[i-1] != '\n' {
|
2013-06-25 01:18:47 +00:00
|
|
|
i++
|
2011-05-29 17:43:18 +00:00
|
|
|
}
|
2011-05-28 23:37:18 +00:00
|
|
|
|
2013-06-25 01:18:47 +00:00
|
|
|
// get working buffer
|
|
|
|
var raw bytes.Buffer
|
|
|
|
|
|
|
|
// put the first line into the working buffer
|
|
|
|
raw.Write(data[blockEnd:i])
|
|
|
|
blockEnd = i
|
|
|
|
|
|
|
|
// process the following lines
|
|
|
|
containsBlankLine := false
|
|
|
|
|
|
|
|
gatherLines:
|
|
|
|
for blockEnd < len(data) {
|
|
|
|
i++
|
|
|
|
|
|
|
|
// find the end of this line
|
2013-06-26 15:57:51 +00:00
|
|
|
for i < len(data) && data[i-1] != '\n' {
|
2013-06-25 01:18:47 +00:00
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
// if it is an empty line, guess that it is part of this item
|
|
|
|
// and move on to the next line
|
|
|
|
if p.isEmpty(data[blockEnd:i]) > 0 {
|
|
|
|
containsBlankLine = true
|
|
|
|
blockEnd = i
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
n := 0
|
|
|
|
if n = isIndented(data[blockEnd:i], indentSize); n == 0 {
|
|
|
|
// this is the end of the block.
|
|
|
|
// we don't want to include this last line in the index.
|
|
|
|
break gatherLines
|
|
|
|
}
|
|
|
|
|
|
|
|
// if there were blank lines before this one, insert a new one now
|
|
|
|
if containsBlankLine {
|
|
|
|
raw.WriteByte('\n')
|
|
|
|
containsBlankLine = false
|
|
|
|
}
|
|
|
|
|
|
|
|
// get rid of that first tab, write to buffer
|
|
|
|
raw.Write(data[blockEnd+n : i])
|
2013-06-26 15:57:51 +00:00
|
|
|
hasBlock = true
|
2013-06-25 01:18:47 +00:00
|
|
|
|
|
|
|
blockEnd = i
|
|
|
|
}
|
|
|
|
|
2013-06-26 15:57:51 +00:00
|
|
|
if data[blockEnd-1] != '\n' {
|
|
|
|
raw.WriteByte('\n')
|
2013-06-25 01:18:47 +00:00
|
|
|
}
|
2013-06-26 15:57:51 +00:00
|
|
|
|
|
|
|
contents = raw.Bytes()
|
2013-06-25 01:18:47 +00:00
|
|
|
|
|
|
|
return
|
2011-05-28 23:37:18 +00:00
|
|
|
}
|
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
//
|
|
|
|
//
|
|
|
|
// Miscellaneous helper functions
|
|
|
|
//
|
|
|
|
//
|
2011-05-28 23:37:18 +00:00
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
// Test if a character is a punctuation symbol.
|
|
|
|
// Taken from a private function in regexp in the stdlib.
|
|
|
|
func ispunct(c byte) bool {
|
|
|
|
for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
|
|
|
|
if c == r {
|
|
|
|
return true
|
2011-05-28 23:37:18 +00:00
|
|
|
}
|
|
|
|
}
|
2011-05-29 03:17:53 +00:00
|
|
|
return false
|
2011-05-28 23:37:18 +00:00
|
|
|
}
|
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
// Test if a character is a whitespace character.
|
|
|
|
func isspace(c byte) bool {
|
|
|
|
return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v'
|
2011-05-28 23:37:18 +00:00
|
|
|
}
|
|
|
|
|
2013-08-09 09:24:26 +00:00
|
|
|
// Test if a character is letter.
|
|
|
|
func isletter(c byte) bool {
|
|
|
|
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
|
|
|
|
}
|
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
// Test if a character is a letter or a digit.
|
2011-05-29 17:43:18 +00:00
|
|
|
// TODO: check when this is looking for ASCII alnum and when it should use unicode
|
2011-05-29 03:17:53 +00:00
|
|
|
func isalnum(c byte) bool {
|
2013-08-09 09:24:26 +00:00
|
|
|
return (c >= '0' && c <= '9') || isletter(c)
|
2011-05-28 23:37:18 +00:00
|
|
|
}
|
2011-05-27 04:27:33 +00:00
|
|
|
|
2011-05-29 03:17:53 +00:00
|
|
|
// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
|
2011-06-26 15:51:36 +00:00
|
|
|
// always ends output with a newline
|
2011-06-28 22:02:12 +00:00
|
|
|
func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
|
2011-05-31 18:04:58 +00:00
|
|
|
// first, check for common cases: no tabs, or only tabs at beginning of line
|
|
|
|
i, prefix := 0, 0
|
|
|
|
slowcase := false
|
|
|
|
for i = 0; i < len(line); i++ {
|
|
|
|
if line[i] == '\t' {
|
|
|
|
if prefix == i {
|
|
|
|
prefix++
|
|
|
|
} else {
|
|
|
|
slowcase = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-05-26 02:46:16 +00:00
|
|
|
|
2011-05-31 18:04:58 +00:00
|
|
|
// no need to decode runes if all tabs are at the beginning of the line
|
|
|
|
if !slowcase {
|
2011-06-28 22:02:12 +00:00
|
|
|
for i = 0; i < prefix*tabSize; i++ {
|
2011-05-31 18:04:58 +00:00
|
|
|
out.WriteByte(' ')
|
|
|
|
}
|
|
|
|
out.Write(line[prefix:])
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// the slow case: we need to count runes to figure out how
|
|
|
|
// many spaces to insert for each tab
|
|
|
|
column := 0
|
2011-06-24 17:50:03 +00:00
|
|
|
i = 0
|
2011-05-26 02:46:16 +00:00
|
|
|
for i < len(line) {
|
2011-05-31 18:04:58 +00:00
|
|
|
start := i
|
2011-05-26 02:46:16 +00:00
|
|
|
for i < len(line) && line[i] != '\t' {
|
2011-05-31 18:04:58 +00:00
|
|
|
_, size := utf8.DecodeRune(line[i:])
|
|
|
|
i += size
|
|
|
|
column++
|
2011-05-26 02:46:16 +00:00
|
|
|
}
|
|
|
|
|
2011-05-31 18:04:58 +00:00
|
|
|
if i > start {
|
|
|
|
out.Write(line[start:i])
|
2011-05-26 02:46:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if i >= len(line) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
2011-05-29 17:43:18 +00:00
|
|
|
out.WriteByte(' ')
|
2011-05-31 18:04:58 +00:00
|
|
|
column++
|
2011-06-28 22:02:12 +00:00
|
|
|
if column%tabSize == 0 {
|
2011-05-26 02:46:16 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
}
|
2013-06-25 01:18:47 +00:00
|
|
|
|
|
|
|
// Find if a line counts as indented or not.
|
|
|
|
// Returns number of characters the indent is (0 = not indented).
|
|
|
|
func isIndented(data []byte, indentSize int) int {
|
|
|
|
if len(data) == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
if data[0] == '\t' {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
if len(data) < indentSize {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
for i := 0; i < indentSize; i++ {
|
|
|
|
if data[i] != ' ' {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return indentSize
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a url-safe slug for fragments
|
|
|
|
func slugify(in []byte) []byte {
|
|
|
|
if len(in) == 0 {
|
|
|
|
return in
|
|
|
|
}
|
|
|
|
out := make([]byte, 0, len(in))
|
|
|
|
sym := false
|
|
|
|
|
|
|
|
for _, ch := range in {
|
|
|
|
if isalnum(ch) {
|
|
|
|
sym = false
|
|
|
|
out = append(out, ch)
|
|
|
|
} else if sym {
|
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
out = append(out, '-')
|
|
|
|
sym = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
var a, b int
|
|
|
|
var ch byte
|
|
|
|
for a, ch = range out {
|
|
|
|
if ch != '-' {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for b = len(out) - 1; b > 0; b-- {
|
|
|
|
if out[b] != '-' {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return out[a : b+1]
|
|
|
|
}
|