2016-08-15 19:30:49 +00:00
|
|
|
# NimYAML - YAML implementation in Nim
|
|
|
|
# (c) Copyright 2015 Felix Krause
|
|
|
|
#
|
|
|
|
# See the file "copying.txt", included in this
|
|
|
|
# distribution, for details about the copyright.
|
|
|
|
|
2016-08-30 20:15:29 +00:00
|
|
|
type
|
2016-08-15 19:30:49 +00:00
|
|
|
ScalarType = enum
|
|
|
|
stFlow, stLiteral, stFolded
|
2016-08-30 20:15:29 +00:00
|
|
|
|
2016-08-15 19:30:49 +00:00
|
|
|
LexedDirective = enum
|
|
|
|
ldYaml, ldTag, ldUnknown
|
2016-08-30 20:15:29 +00:00
|
|
|
|
2016-08-15 19:30:49 +00:00
|
|
|
YamlContext = enum
|
|
|
|
cBlock, cFlow
|
2016-08-30 20:15:29 +00:00
|
|
|
|
2016-08-15 19:30:49 +00:00
|
|
|
ChompType = enum
|
|
|
|
ctKeep, ctClip, ctStrip
|
2016-08-30 20:15:29 +00:00
|
|
|
|
2016-08-17 20:21:34 +00:00
|
|
|
ParserContext = ref object of YamlStream
|
2016-08-15 19:30:49 +00:00
|
|
|
p: YamlParser
|
|
|
|
storedState: proc(s: YamlStream, e: var YamlStreamEvent): bool
|
|
|
|
scalarType: ScalarType
|
|
|
|
chomp: ChompType
|
|
|
|
atSequenceItem: bool
|
|
|
|
recentWasMoreIndented: bool
|
2016-08-17 18:04:59 +00:00
|
|
|
flowdepth: int
|
|
|
|
explicitFlowKey: bool
|
2016-08-17 20:21:34 +00:00
|
|
|
content, after: string
|
|
|
|
ancestry: seq[FastParseLevel]
|
|
|
|
level: FastParseLevel
|
|
|
|
tagUri: string
|
|
|
|
tag: TagId
|
|
|
|
anchor: AnchorId
|
|
|
|
shorthands: Table[string, string]
|
|
|
|
nextAnchorId: AnchorId
|
|
|
|
newlines: int
|
|
|
|
indentation: int
|
2016-08-15 19:30:49 +00:00
|
|
|
|
|
|
|
LevelEndResult = enum
|
|
|
|
lerNothing, lerOne, lerAdditionalMapEnd
|
|
|
|
|
|
|
|
const
|
|
|
|
space = {' ', '\t'}
|
|
|
|
lineEnd = {'\l', '\c', EndOfFile}
|
|
|
|
spaceOrLineEnd = {' ', '\t', '\l', '\c', EndOfFile}
|
|
|
|
digits = {'0'..'9'}
|
|
|
|
flowIndicators = {'[', ']', '{', '}', ','}
|
|
|
|
|
|
|
|
UTF8NextLine = toUTF8(0x85.Rune)
|
|
|
|
UTF8NonBreakingSpace = toUTF8(0xA0.Rune)
|
|
|
|
UTF8LineSeparator = toUTF8(0x2028.Rune)
|
|
|
|
UTF8ParagraphSeparator = toUTF8(0x2029.Rune)
|
|
|
|
UnknownIndentation = int.low
|
|
|
|
|
|
|
|
proc newYamlParser*(tagLib: TagLibrary = initExtendedTagLibrary(),
|
|
|
|
callback: WarningCallback = nil): YamlParser =
|
|
|
|
new(result)
|
|
|
|
result.tagLib = tagLib
|
|
|
|
result.callback = callback
|
|
|
|
|
|
|
|
proc getLineNumber*(p: YamlParser): int = p.lexer.lineNumber
|
2016-08-30 20:15:29 +00:00
|
|
|
|
2016-08-15 19:30:49 +00:00
|
|
|
proc getColNumber*(p: YamlParser): int = p.tokenstart + 1 # column is 1-based
|
|
|
|
|
|
|
|
proc getLineContent*(p: YamlParser, marker: bool = true): string =
|
|
|
|
result = p.lexer.getCurrentLine(false)
|
|
|
|
if marker: result.add(repeat(' ', p.tokenstart) & "^\n")
|
|
|
|
|
2016-08-17 20:21:34 +00:00
|
|
|
proc lexer(c: ParserContext): var BaseLexer {.inline.} = c.p.lexer
|
|
|
|
|
2016-08-15 19:30:49 +00:00
|
|
|
template debug(message: string) {.dirty.} =
|
|
|
|
when defined(yamlDebug):
|
|
|
|
try: styledWriteLine(stdout, fgBlue, message)
|
|
|
|
except IOError: discard
|
|
|
|
|
2016-08-17 20:21:34 +00:00
|
|
|
proc generateError(c: ParserContext, message: string):
|
2016-08-15 19:30:49 +00:00
|
|
|
ref YamlParserError {.raises: [].} =
|
|
|
|
result = newException(YamlParserError, message)
|
2016-08-17 20:21:34 +00:00
|
|
|
result.line = c.lexer.lineNumber
|
|
|
|
result.column = c.p.tokenstart + 1
|
|
|
|
result.lineContent = c.p.getLineContent(true)
|
2016-08-15 19:30:49 +00:00
|
|
|
|
|
|
|
proc generateError(lx: BaseLexer, message: string):
|
|
|
|
ref YamlParserError {.raises: [].} =
|
|
|
|
result = newException(YamlParserError, message)
|
|
|
|
result.line = lx.lineNumber
|
|
|
|
result.column = lx.bufpos + 1
|
|
|
|
result.lineContent = lx.getCurrentLine(false) &
|
|
|
|
repeat(' ', lx.getColNumber(lx.bufpos)) & "^\n"
|
|
|
|
|
|
|
|
template lexCR(lexer: BaseLexer) {.dirty.} =
|
|
|
|
try: lexer.bufpos = lexer.handleCR(lexer.bufpos)
|
|
|
|
except:
|
|
|
|
var e = generateError(lexer, "I/O Error: " & getCurrentExceptionMsg())
|
|
|
|
e.parent = getCurrentException()
|
|
|
|
raise e
|
|
|
|
|
|
|
|
template lexLF(lexer: BaseLexer) {.dirty.} =
|
|
|
|
try: lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
except:
|
|
|
|
var e = generateError(lexer, "I/O Error: " & getCurrentExceptionMsg())
|
|
|
|
e.parent = getCurrentException()
|
|
|
|
raise e
|
|
|
|
|
2016-08-17 20:21:34 +00:00
|
|
|
proc callCallback(c: ParserContext, msg: string) {.raises: [YamlParserError].} =
|
2016-08-15 19:30:49 +00:00
|
|
|
try:
|
2016-08-17 20:21:34 +00:00
|
|
|
if not isNil(c.p.callback):
|
|
|
|
c.p.callback(c.lexer.lineNumber, c.p.getColNumber(), c.p.getLineContent(),
|
|
|
|
msg)
|
2016-08-15 19:30:49 +00:00
|
|
|
except:
|
|
|
|
var e = newException(YamlParserError,
|
|
|
|
"Warning callback raised exception: " & getCurrentExceptionMsg())
|
|
|
|
e.parent = getCurrentException()
|
|
|
|
raise e
|
|
|
|
|
|
|
|
proc addMultiple(s: var string, c: char, num: int) {.raises: [], inline.} =
|
|
|
|
for i in 1..num:
|
|
|
|
s.add(c)
|
|
|
|
|
|
|
|
proc reset(buffer: var string) {.raises: [], inline.} = buffer.setLen(0)
|
|
|
|
|
|
|
|
proc initLevel(k: FastParseLevelKind): FastParseLevel {.raises: [], inline.} =
|
|
|
|
FastParseLevel(kind: k, indentation: UnknownIndentation)
|
|
|
|
|
2016-08-17 20:21:34 +00:00
|
|
|
proc emptyScalar(c: ParserContext): YamlStreamEvent {.raises: [], inline.} =
|
|
|
|
result = scalarEvent("", c.tag, c.anchor)
|
|
|
|
c.tag = yTagQuestionMark
|
|
|
|
c.anchor = yAnchorNone
|
|
|
|
|
|
|
|
proc currentScalar(c: ParserContext): YamlStreamEvent {.raises: [], inline.} =
|
|
|
|
result = YamlStreamEvent(kind: yamlScalar, scalarTag: c.tag,
|
|
|
|
scalarAnchor: c.anchor, scalarContent: c.content)
|
|
|
|
c.tag = yTagQuestionMark
|
|
|
|
c.anchor = yAnchorNone
|
|
|
|
|
|
|
|
proc handleLineEnd(c: ParserContext, incNewlines: static[bool]): bool =
|
|
|
|
case c.lexer.buf[c.lexer.bufpos]
|
|
|
|
of '\l': c.lexer.lexLF()
|
|
|
|
of '\c': c.lexer.lexCR()
|
2016-08-15 19:30:49 +00:00
|
|
|
of EndOfFile: return true
|
|
|
|
else: discard
|
2016-08-17 20:21:34 +00:00
|
|
|
when incNewlines: c.newlines.inc()
|
2016-08-15 19:30:49 +00:00
|
|
|
|
2016-08-17 20:21:34 +00:00
|
|
|
proc objectStart(c: ParserContext, k: static[YamlStreamEventKind],
|
2016-08-15 19:30:49 +00:00
|
|
|
single: bool = false): YamlStreamEvent {.raises: [].} =
|
2016-08-17 20:21:34 +00:00
|
|
|
yAssert(c.level.kind == fplUnknown)
|
2016-08-15 19:30:49 +00:00
|
|
|
when k == yamlStartMap:
|
2016-08-17 20:21:34 +00:00
|
|
|
result = startMapEvent(c.tag, c.anchor)
|
2016-08-15 19:30:49 +00:00
|
|
|
if single:
|
|
|
|
debug("started single-pair map at " &
|
2016-08-17 20:21:34 +00:00
|
|
|
(if c.level.indentation == UnknownIndentation: $c.indentation else:
|
|
|
|
$c.level.indentation))
|
|
|
|
c.level.kind = fplSinglePairKey
|
2016-08-15 19:30:49 +00:00
|
|
|
else:
|
|
|
|
debug("started map at " &
|
2016-08-17 20:21:34 +00:00
|
|
|
(if c.level.indentation == UnknownIndentation: $c.indentation else:
|
|
|
|
$c.level.indentation))
|
|
|
|
c.level.kind = fplMapKey
|
2016-08-15 19:30:49 +00:00
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
result = startSeqEvent(c.tag, c.anchor)
|
2016-08-15 19:30:49 +00:00
|
|
|
debug("started sequence at " &
|
2016-08-17 20:21:34 +00:00
|
|
|
(if c.level.indentation == UnknownIndentation: $c.indentation else:
|
|
|
|
$c.level.indentation))
|
|
|
|
c.level.kind = fplSequence
|
|
|
|
c.tag = yTagQuestionMark
|
|
|
|
c.anchor = yAnchorNone
|
|
|
|
if c.level.indentation == UnknownIndentation:
|
|
|
|
c.level.indentation = c.indentation
|
|
|
|
c.ancestry.add(c.level)
|
|
|
|
c.level = initLevel(fplUnknown)
|
|
|
|
|
|
|
|
proc initDocValues(c: ParserContext) {.raises: [].} =
|
|
|
|
c.shorthands = initTable[string, string]()
|
|
|
|
c.p.anchors = initTable[string, AnchorId]()
|
|
|
|
c.shorthands["!"] = "!"
|
|
|
|
c.shorthands["!!"] = "tag:yaml.org,2002:"
|
|
|
|
c.nextAnchorId = 0.AnchorId
|
|
|
|
c.level = initLevel(fplUnknown)
|
|
|
|
c.tag = yTagQuestionMark
|
|
|
|
c.anchor = yAnchorNone
|
|
|
|
c.ancestry.add(FastParseLevel(kind: fplDocument, indentation: -1))
|
|
|
|
|
|
|
|
proc startToken(c: ParserContext) {.raises: [], inline.} =
|
|
|
|
c.p.tokenstart = c.lexer.getColNumber(c.lexer.bufpos)
|
|
|
|
|
|
|
|
proc anchorName(c: ParserContext) {.raises: [].} =
|
2016-08-15 19:30:49 +00:00
|
|
|
debug("lex: anchorName")
|
|
|
|
while true:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc()
|
|
|
|
let ch = c.lexer.buf[c.lexer.bufpos]
|
|
|
|
case ch
|
2016-08-15 19:30:49 +00:00
|
|
|
of spaceOrLineEnd, '[', ']', '{', '}', ',': break
|
2016-08-17 20:21:34 +00:00
|
|
|
else: c.content.add(ch)
|
|
|
|
|
|
|
|
proc handleAnchor(c: ParserContext) {.raises: [YamlParserError].} =
|
|
|
|
c.startToken()
|
|
|
|
if c.level.kind != fplUnknown: raise c.generateError("Unexpected token")
|
|
|
|
if c.anchor != yAnchorNone:
|
|
|
|
raise c.generateError("Only one anchor is allowed per node")
|
|
|
|
c.content.reset()
|
|
|
|
c.anchorName()
|
|
|
|
c.anchor = c.nextAnchorId
|
|
|
|
c.p.anchors[c.content] = c.anchor
|
|
|
|
c.nextAnchorId = AnchorId(int(c.nextAnchorId) + 1)
|
2016-08-15 19:30:49 +00:00
|
|
|
|
|
|
|
proc finishLine(lexer: var BaseLexer) {.raises: [], inline.} =
|
|
|
|
debug("lex: finishLine")
|
|
|
|
while lexer.buf[lexer.bufpos] notin lineEnd:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
|
|
|
|
proc skipWhitespace(lexer: var BaseLexer) {.raises: [], inline.} =
|
|
|
|
debug("lex: skipWhitespace")
|
|
|
|
while lexer.buf[lexer.bufpos] in space: lexer.bufpos.inc()
|
|
|
|
|
|
|
|
# TODO: {.raises: [].}
|
|
|
|
proc skipWhitespaceCommentsAndNewlines(lexer: var BaseLexer) {.inline.} =
|
|
|
|
debug("lex: skipWhitespaceCommentsAndNewlines")
|
|
|
|
if lexer.buf[lexer.bufpos] != '#':
|
|
|
|
while true:
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of space: lexer.bufpos.inc()
|
|
|
|
of '\l': lexer.lexLF()
|
|
|
|
of '\c': lexer.lexCR()
|
|
|
|
of '#': # also skip comments
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
while lexer.buf[lexer.bufpos] notin lineEnd:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
else: break
|
|
|
|
|
|
|
|
proc skipIndentation(lexer: var BaseLexer) {.raises: [], inline.} =
|
|
|
|
debug("lex: skipIndentation")
|
|
|
|
while lexer.buf[lexer.bufpos] == ' ': lexer.bufpos.inc()
|
|
|
|
|
|
|
|
proc directiveName(lexer: var BaseLexer, directive: var LexedDirective)
|
|
|
|
{.raises: [].} =
|
|
|
|
debug("lex: directiveName")
|
|
|
|
directive = ldUnknown
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] == 'Y':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] == 'A':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] == 'M':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] == 'L':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] in spaceOrLineEnd:
|
|
|
|
directive = ldYaml
|
|
|
|
elif lexer.buf[lexer.bufpos] == 'T':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] == 'A':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] == 'G':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] in spaceOrLineEnd:
|
|
|
|
directive = ldTag
|
|
|
|
while lexer.buf[lexer.bufpos] notin spaceOrLineEnd:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
|
|
|
|
proc yamlVersion(lexer: var BaseLexer, o: var string)
|
|
|
|
{.raises: [YamlParserError], inline.} =
|
|
|
|
debug("lex: yamlVersion")
|
|
|
|
while lexer.buf[lexer.bufpos] in space: lexer.bufpos.inc()
|
|
|
|
var c = lexer.buf[lexer.bufpos]
|
|
|
|
if c notin digits: raise lexer.generateError("Invalid YAML version number")
|
|
|
|
o.add(c)
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
c = lexer.buf[lexer.bufpos]
|
|
|
|
while c in digits:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
o.add(c)
|
|
|
|
c = lexer.buf[lexer.bufpos]
|
|
|
|
if lexer.buf[lexer.bufpos] != '.':
|
|
|
|
raise lexer.generateError("Invalid YAML version number")
|
|
|
|
o.add('.')
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
c = lexer.buf[lexer.bufpos]
|
|
|
|
if c notin digits: raise lexer.generateError("Invalid YAML version number")
|
|
|
|
o.add(c)
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
c = lexer.buf[lexer.bufpos]
|
|
|
|
while c in digits:
|
|
|
|
o.add(c)
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
c = lexer.buf[lexer.bufpos]
|
|
|
|
if lexer.buf[lexer.bufpos] notin spaceOrLineEnd:
|
|
|
|
raise lexer.generateError("Invalid YAML version number")
|
|
|
|
|
2016-08-17 20:21:34 +00:00
|
|
|
proc lineEnding(c: ParserContext) {.raises: [YamlParserError], inline.} =
|
2016-08-15 19:30:49 +00:00
|
|
|
debug("lex: lineEnding")
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.lexer.buf[c.lexer.bufpos] notin lineEnd:
|
|
|
|
while c.lexer.buf[c.lexer.bufpos] in space: c.lexer.bufpos.inc()
|
|
|
|
if c.lexer.buf[c.lexer.bufpos] in lineEnd: discard
|
|
|
|
elif c.lexer.buf[c.lexer.bufpos] == '#':
|
|
|
|
while c.lexer.buf[c.lexer.bufpos] notin lineEnd: c.lexer.bufpos.inc()
|
2016-08-15 19:30:49 +00:00
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
raise c.generateError("Unexpected token (expected comment or line end)")
|
2016-08-15 19:30:49 +00:00
|
|
|
|
|
|
|
proc tagShorthand(lexer: var BaseLexer, shorthand: var string) {.inline.} =
|
|
|
|
debug("lex: tagShorthand")
|
|
|
|
while lexer.buf[lexer.bufpos] in space: lexer.bufpos.inc()
|
|
|
|
yAssert lexer.buf[lexer.bufpos] == '!'
|
|
|
|
shorthand.add('!')
|
|
|
|
lexer.bufpos.inc()
|
2016-08-17 20:21:34 +00:00
|
|
|
var ch = lexer.buf[lexer.bufpos]
|
|
|
|
if ch in spaceOrLineEnd: discard
|
2016-08-15 19:30:49 +00:00
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
while ch != '!':
|
|
|
|
case ch
|
2016-08-15 19:30:49 +00:00
|
|
|
of 'a' .. 'z', 'A' .. 'Z', '0' .. '9', '-':
|
2016-08-17 20:21:34 +00:00
|
|
|
shorthand.add(ch)
|
2016-08-15 19:30:49 +00:00
|
|
|
lexer.bufpos.inc()
|
2016-08-17 20:21:34 +00:00
|
|
|
ch = lexer.buf[lexer.bufpos]
|
2016-08-15 19:30:49 +00:00
|
|
|
else: raise lexer.generateError("Illegal character in tag shorthand")
|
2016-08-17 20:21:34 +00:00
|
|
|
shorthand.add(ch)
|
2016-08-15 19:30:49 +00:00
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] notin spaceOrLineEnd:
|
|
|
|
raise lexer.generateError("Missing space after tag shorthand")
|
|
|
|
|
|
|
|
proc tagUriMapping(lexer: var BaseLexer, uri: var string)
|
|
|
|
{.raises: [YamlParserError].} =
|
|
|
|
debug("lex: tagUriMapping")
|
|
|
|
while lexer.buf[lexer.bufpos] in space:
|
|
|
|
lexer.bufpos.inc()
|
2016-08-17 20:21:34 +00:00
|
|
|
var ch = lexer.buf[lexer.bufpos]
|
|
|
|
if ch == '!':
|
|
|
|
uri.add(ch)
|
2016-08-15 19:30:49 +00:00
|
|
|
lexer.bufpos.inc()
|
2016-08-17 20:21:34 +00:00
|
|
|
ch = lexer.buf[lexer.bufpos]
|
|
|
|
while ch notin spaceOrLineEnd:
|
|
|
|
case ch
|
2016-08-15 19:30:49 +00:00
|
|
|
of 'a' .. 'z', 'A' .. 'Z', '0' .. '9', '#', ';', '/', '?', ':', '@', '&',
|
|
|
|
'-', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')':
|
2016-08-17 20:21:34 +00:00
|
|
|
uri.add(ch)
|
2016-08-15 19:30:49 +00:00
|
|
|
lexer.bufpos.inc()
|
2016-08-17 20:21:34 +00:00
|
|
|
ch = lexer.buf[lexer.bufpos]
|
2016-08-15 19:30:49 +00:00
|
|
|
else: raise lexer.generateError("Invalid tag uri")
|
|
|
|
|
|
|
|
proc directivesEndMarker(lexer: var BaseLexer, success: var bool)
|
|
|
|
{.raises: [].} =
|
|
|
|
debug("lex: directivesEndMarker")
|
|
|
|
success = true
|
|
|
|
for i in 0..2:
|
|
|
|
if lexer.buf[lexer.bufpos + i] != '-':
|
|
|
|
success = false
|
|
|
|
break
|
|
|
|
if success: success = lexer.buf[lexer.bufpos + 3] in spaceOrLineEnd
|
|
|
|
|
|
|
|
proc documentEndMarker(lexer: var BaseLexer, success: var bool) {.raises: [].} =
|
|
|
|
debug("lex: documentEndMarker")
|
|
|
|
success = true
|
|
|
|
for i in 0..2:
|
|
|
|
if lexer.buf[lexer.bufpos + i] != '.':
|
|
|
|
success = false
|
|
|
|
break
|
|
|
|
if success: success = lexer.buf[lexer.bufpos + 3] in spaceOrLineEnd
|
|
|
|
|
|
|
|
proc unicodeSequence(lexer: var BaseLexer, length: int):
|
|
|
|
string {.raises: [YamlParserError].} =
|
|
|
|
debug("lex: unicodeSequence")
|
|
|
|
var unicodeChar = 0.int
|
|
|
|
for i in countup(0, length - 1):
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
let
|
|
|
|
digitPosition = length - i - 1
|
2016-08-17 20:21:34 +00:00
|
|
|
ch = lexer.buf[lexer.bufpos]
|
|
|
|
case ch
|
2016-08-15 19:30:49 +00:00
|
|
|
of EndOFFile, '\l', '\c':
|
|
|
|
raise lexer.generateError("Unfinished unicode escape sequence")
|
|
|
|
of '0' .. '9':
|
2016-08-17 20:21:34 +00:00
|
|
|
unicodeChar = unicodechar or (int(ch) - 0x30) shl (digitPosition * 4)
|
2016-08-15 19:30:49 +00:00
|
|
|
of 'A' .. 'F':
|
2016-08-17 20:21:34 +00:00
|
|
|
unicodeChar = unicodechar or (int(ch) - 0x37) shl (digitPosition * 4)
|
2016-08-15 19:30:49 +00:00
|
|
|
of 'a' .. 'f':
|
2016-08-17 20:21:34 +00:00
|
|
|
unicodeChar = unicodechar or (int(ch) - 0x57) shl (digitPosition * 4)
|
2016-08-15 19:30:49 +00:00
|
|
|
else:
|
|
|
|
raise lexer.generateError(
|
|
|
|
"Invalid character in unicode escape sequence")
|
|
|
|
return toUTF8(Rune(unicodeChar))
|
|
|
|
|
|
|
|
proc byteSequence(lexer: var BaseLexer): char {.raises: [YamlParserError].} =
|
|
|
|
debug("lex: byteSequence")
|
|
|
|
var charCode = 0.int8
|
|
|
|
for i in 0 .. 1:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
let
|
|
|
|
digitPosition = int8(1 - i)
|
2016-08-17 20:21:34 +00:00
|
|
|
ch = lexer.buf[lexer.bufpos]
|
|
|
|
case ch
|
2016-08-15 19:30:49 +00:00
|
|
|
of EndOfFile, '\l', 'r':
|
|
|
|
raise lexer.generateError("Unfinished octet escape sequence")
|
|
|
|
of '0' .. '9':
|
2016-08-17 20:21:34 +00:00
|
|
|
charCode = charCode or (int8(ch) - 0x30.int8) shl (digitPosition * 4)
|
2016-08-15 19:30:49 +00:00
|
|
|
of 'A' .. 'F':
|
2016-08-17 20:21:34 +00:00
|
|
|
charCode = charCode or (int8(ch) - 0x37.int8) shl (digitPosition * 4)
|
2016-08-15 19:30:49 +00:00
|
|
|
of 'a' .. 'f':
|
2016-08-17 20:21:34 +00:00
|
|
|
charCode = charCode or (int8(ch) - 0x57.int8) shl (digitPosition * 4)
|
2016-08-15 19:30:49 +00:00
|
|
|
else:
|
|
|
|
raise lexer.generateError("Invalid character in octet escape sequence")
|
|
|
|
return char(charCode)
|
|
|
|
|
|
|
|
# TODO: {.raises: [].}
|
2016-08-17 20:21:34 +00:00
|
|
|
proc processQuotedWhitespace(c: ParserContext, newlines: var int) =
|
|
|
|
c.after.reset()
|
2016-08-15 19:30:49 +00:00
|
|
|
block outer:
|
|
|
|
while true:
|
2016-08-17 20:21:34 +00:00
|
|
|
case c.lexer.buf[c.lexer.bufpos]
|
|
|
|
of ' ', '\t': c.after.add(c.lexer.buf[c.lexer.bufpos])
|
2016-08-15 19:30:49 +00:00
|
|
|
of '\l':
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos = c.lexer.handleLF(c.lexer.bufpos)
|
2016-08-15 19:30:49 +00:00
|
|
|
break
|
|
|
|
of '\c':
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos = c.lexer.handleLF(c.lexer.bufpos)
|
2016-08-15 19:30:49 +00:00
|
|
|
break
|
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.content.add(c.after)
|
2016-08-15 19:30:49 +00:00
|
|
|
break outer
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc()
|
2016-08-15 19:30:49 +00:00
|
|
|
while true:
|
2016-08-17 20:21:34 +00:00
|
|
|
case c.lexer.buf[c.lexer.bufpos]
|
2016-08-15 19:30:49 +00:00
|
|
|
of ' ', '\t': discard
|
|
|
|
of '\l':
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.lexLF()
|
2016-08-15 19:30:49 +00:00
|
|
|
newlines.inc()
|
|
|
|
continue
|
|
|
|
of '\c':
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.lexCR()
|
2016-08-15 19:30:49 +00:00
|
|
|
newlines.inc()
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
if newlines == 0: discard
|
2016-08-17 20:21:34 +00:00
|
|
|
elif newlines == 1: c.content.add(' ')
|
|
|
|
else: c.content.addMultiple('\l', newlines - 1)
|
2016-08-15 19:30:49 +00:00
|
|
|
break
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc()
|
2016-08-15 19:30:49 +00:00
|
|
|
|
|
|
|
# TODO: {.raises: [YamlParserError].}
|
2016-08-17 20:21:34 +00:00
|
|
|
proc doubleQuotedScalar(c: ParserContext) =
|
2016-08-15 19:30:49 +00:00
|
|
|
debug("lex: doubleQuotedScalar")
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc()
|
2016-08-15 19:30:49 +00:00
|
|
|
while true:
|
2016-08-17 20:21:34 +00:00
|
|
|
var ch = c.lexer.buf[c.lexer.bufpos]
|
|
|
|
case ch
|
2016-08-15 19:30:49 +00:00
|
|
|
of EndOfFile:
|
2016-08-17 20:21:34 +00:00
|
|
|
raise c.lexer.generateError("Unfinished double quoted string")
|
2016-08-15 19:30:49 +00:00
|
|
|
of '\\':
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc()
|
|
|
|
case c.lexer.buf[c.lexer.bufpos]
|
2016-08-15 19:30:49 +00:00
|
|
|
of EndOfFile:
|
2016-08-17 20:21:34 +00:00
|
|
|
raise c.lexer.generateError("Unfinished escape sequence")
|
|
|
|
of '0': c.content.add('\0')
|
|
|
|
of 'a': c.content.add('\x07')
|
|
|
|
of 'b': c.content.add('\x08')
|
|
|
|
of '\t', 't': c.content.add('\t')
|
|
|
|
of 'n': c.content.add('\l')
|
|
|
|
of 'v': c.content.add('\v')
|
|
|
|
of 'f': c.content.add('\f')
|
|
|
|
of 'r': c.content.add('\c')
|
|
|
|
of 'e': c.content.add('\e')
|
|
|
|
of ' ': c.content.add(' ')
|
|
|
|
of '"': c.content.add('"')
|
|
|
|
of '/': c.content.add('/')
|
|
|
|
of '\\': c.content.add('\\')
|
|
|
|
of 'N': c.content.add(UTF8NextLine)
|
|
|
|
of '_': c.content.add(UTF8NonBreakingSpace)
|
|
|
|
of 'L': c.content.add(UTF8LineSeparator)
|
|
|
|
of 'P': c.content.add(UTF8ParagraphSeparator)
|
|
|
|
of 'x': c.content.add(c.lexer.unicodeSequence(2))
|
|
|
|
of 'u': c.content.add(c.lexer.unicodeSequence(4))
|
|
|
|
of 'U': c.content.add(c.lexer.unicodeSequence(8))
|
2016-08-15 19:30:49 +00:00
|
|
|
of '\l', '\c':
|
|
|
|
var newlines = 0
|
2016-08-17 20:21:34 +00:00
|
|
|
c.processQuotedWhitespace(newlines)
|
2016-08-15 19:30:49 +00:00
|
|
|
continue
|
2016-08-17 20:21:34 +00:00
|
|
|
else: raise c.lexer.generateError("Illegal character in escape sequence")
|
2016-08-15 19:30:49 +00:00
|
|
|
of '"':
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc()
|
2016-08-15 19:30:49 +00:00
|
|
|
break
|
|
|
|
of '\l', '\c', '\t', ' ':
|
|
|
|
var newlines = 1
|
2016-08-17 20:21:34 +00:00
|
|
|
c.processQuotedWhitespace(newlines)
|
2016-08-15 19:30:49 +00:00
|
|
|
continue
|
2016-08-17 20:21:34 +00:00
|
|
|
else: c.content.add(ch)
|
|
|
|
c.lexer.bufpos.inc()
|
2016-08-15 19:30:49 +00:00
|
|
|
|
|
|
|
# TODO: {.raises: [].}
|
2016-08-17 20:21:34 +00:00
|
|
|
proc singleQuotedScalar(c: ParserContext) =
|
2016-08-15 19:30:49 +00:00
|
|
|
debug("lex: singleQuotedScalar")
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc()
|
2016-08-15 19:30:49 +00:00
|
|
|
while true:
|
2016-08-17 20:21:34 +00:00
|
|
|
case c.lexer.buf[c.lexer.bufpos]
|
2016-08-15 19:30:49 +00:00
|
|
|
of '\'':
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc()
|
|
|
|
if c.lexer.buf[c.lexer.bufpos] == '\'': c.content.add('\'')
|
2016-08-15 19:30:49 +00:00
|
|
|
else: break
|
2016-08-17 20:21:34 +00:00
|
|
|
of EndOfFile: raise c.lexer.generateError("Unfinished single quoted string")
|
2016-08-15 19:30:49 +00:00
|
|
|
of '\l', '\c', '\t', ' ':
|
|
|
|
var newlines = 1
|
2016-08-17 20:21:34 +00:00
|
|
|
c.processQuotedWhitespace(newlines)
|
2016-08-15 19:30:49 +00:00
|
|
|
continue
|
2016-08-17 20:21:34 +00:00
|
|
|
else: c.content.add(c.lexer.buf[c.lexer.bufpos])
|
|
|
|
c.lexer.bufpos.inc()
|
2016-08-15 19:30:49 +00:00
|
|
|
|
|
|
|
proc isPlainSafe(lexer: BaseLexer, index: int, context: YamlContext): bool
|
|
|
|
{.raises: [].} =
|
|
|
|
case lexer.buf[lexer.bufpos + 1]
|
|
|
|
of spaceOrLineEnd: result = false
|
|
|
|
of flowIndicators: result = context == cBlock
|
|
|
|
else: result = true
|
|
|
|
|
|
|
|
# tried this for performance optimization, but it didn't optimize any
|
|
|
|
# performance. keeping it around for future reference.
|
|
|
|
#const
|
|
|
|
# plainCharOut = {'!', '\"', '$'..'9', ';'..'\xFF'}
|
|
|
|
# plainCharIn = {'!', '\"', '$'..'+', '-'..'9', ';'..'Z', '\\', '^'..'z',
|
|
|
|
# '|', '~'..'\xFF'}
|
|
|
|
#template isPlainChar(c: char, context: YamlContext): bool =
|
|
|
|
# when context == cBlock: c in plainCharOut
|
|
|
|
# else: c in plainCharIn
|
|
|
|
|
2016-08-17 20:21:34 +00:00
|
|
|
proc plainScalar(c: ParserContext, context: static[YamlContext])
|
|
|
|
{.raises: [].} =
|
2016-08-15 19:30:49 +00:00
|
|
|
debug("lex: plainScalar")
|
2016-08-17 20:21:34 +00:00
|
|
|
c.content.add(c.lexer.buf[c.lexer.bufpos])
|
2016-08-15 19:30:49 +00:00
|
|
|
block outer:
|
|
|
|
while true:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc()
|
|
|
|
let ch = c.lexer.buf[c.lexer.bufpos]
|
|
|
|
case ch
|
2016-08-15 19:30:49 +00:00
|
|
|
of ' ', '\t':
|
2016-08-17 20:21:34 +00:00
|
|
|
c.after.setLen(1)
|
|
|
|
c.after[0] = ch
|
2016-08-15 19:30:49 +00:00
|
|
|
while true:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc()
|
|
|
|
let ch2 = c.lexer.buf[c.lexer.bufpos]
|
|
|
|
case ch2
|
|
|
|
of ' ', '\t': c.after.add(ch2)
|
2016-08-15 19:30:49 +00:00
|
|
|
of lineEnd: break outer
|
|
|
|
of ':':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.lexer.isPlainSafe(c.lexer.bufpos + 1, context):
|
|
|
|
c.content.add(c.after & ':')
|
2016-08-15 19:30:49 +00:00
|
|
|
break
|
|
|
|
else: break outer
|
|
|
|
of '#': break outer
|
|
|
|
of flowIndicators:
|
|
|
|
if context == cBlock:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.content.add(c.after)
|
|
|
|
c.content.add(ch2)
|
2016-08-15 19:30:49 +00:00
|
|
|
break
|
|
|
|
else: break outer
|
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.content.add(c.after)
|
|
|
|
c.content.add(ch2)
|
2016-08-15 19:30:49 +00:00
|
|
|
break
|
|
|
|
of flowIndicators:
|
|
|
|
when context == cFlow: break
|
2016-08-17 20:21:34 +00:00
|
|
|
else: c.content.add(ch)
|
2016-08-15 19:30:49 +00:00
|
|
|
of lineEnd: break
|
|
|
|
of ':':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.lexer.isPlainSafe(c.lexer.bufpos + 1, context): c.content.add(':')
|
2016-08-15 19:30:49 +00:00
|
|
|
else: break outer
|
2016-08-17 20:21:34 +00:00
|
|
|
else: c.content.add(ch)
|
|
|
|
debug("lex: \"" & c.content & '\"')
|
2016-08-15 19:30:49 +00:00
|
|
|
|
2016-08-17 20:21:34 +00:00
|
|
|
proc continueMultilineScalar(c: ParserContext) {.raises: [].} =
|
|
|
|
c.content.add(if c.newlines == 1: " " else: repeat('\l', c.newlines - 1))
|
|
|
|
c.startToken()
|
|
|
|
c.plainScalar(cBlock)
|
2016-08-30 20:15:29 +00:00
|
|
|
|
2016-08-15 19:30:49 +00:00
|
|
|
template startScalar(t: ScalarType) {.dirty.} =
|
2016-08-17 20:21:34 +00:00
|
|
|
c.newlines = 0
|
|
|
|
c.level.kind = fplScalar
|
|
|
|
c.scalarType = t
|
2016-08-30 20:15:29 +00:00
|
|
|
|
2016-08-17 20:21:34 +00:00
|
|
|
proc blockScalarHeader(c: ParserContext): bool =
|
2016-08-15 19:30:49 +00:00
|
|
|
debug("lex: blockScalarHeader")
|
2016-08-17 20:21:34 +00:00
|
|
|
c.chomp = ctClip
|
|
|
|
c.level.indentation = UnknownIndentation
|
|
|
|
if c.tag == yTagQuestionMark: c.tag = yTagExclamationMark
|
|
|
|
let t = if c.lexer.buf[c.lexer.bufpos] == '|': stLiteral else: stFolded
|
2016-08-15 19:30:49 +00:00
|
|
|
while true:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc()
|
|
|
|
case c.lexer.buf[c.lexer.bufpos]
|
2016-08-15 19:30:49 +00:00
|
|
|
of '+':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.chomp != ctClip:
|
|
|
|
raise c.lexer.generateError("Only one chomping indicator is allowed")
|
|
|
|
c.chomp = ctKeep
|
2016-08-15 19:30:49 +00:00
|
|
|
of '-':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.chomp != ctClip:
|
|
|
|
raise c.lexer.generateError("Only one chomping indicator is allowed")
|
|
|
|
c.chomp = ctStrip
|
2016-08-15 19:30:49 +00:00
|
|
|
of '1'..'9':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.level.indentation != UnknownIndentation:
|
|
|
|
raise c.lexer.generateError("Only one p.indentation indicator is allowed")
|
|
|
|
c.level.indentation = c.ancestry[c.ancestry.high].indentation +
|
|
|
|
ord(c.lexer.buf[c.lexer.bufpos]) - ord('\x30')
|
2016-08-15 19:30:49 +00:00
|
|
|
of spaceOrLineEnd: break
|
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
raise c.lexer.generateError(
|
2016-08-15 19:30:49 +00:00
|
|
|
"Illegal character in block scalar header: '" &
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.buf[c.lexer.bufpos] & "'")
|
|
|
|
c.recentWasMoreIndented = false
|
|
|
|
c.lineEnding()
|
|
|
|
result = c.handleLineEnd(true)
|
2016-08-15 19:30:49 +00:00
|
|
|
if not result:
|
|
|
|
startScalar(t)
|
2016-08-17 20:21:34 +00:00
|
|
|
c.content.reset()
|
2016-08-15 19:30:49 +00:00
|
|
|
|
2016-08-17 20:21:34 +00:00
|
|
|
proc blockScalarLine(c: ParserContext):
|
2016-08-15 19:30:49 +00:00
|
|
|
bool {.raises: [YamlParserError].} =
|
|
|
|
debug("lex: blockScalarLine")
|
|
|
|
result = false
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.level.indentation == UnknownIndentation:
|
|
|
|
if c.lexer.buf[c.lexer.bufpos] in lineEnd:
|
|
|
|
return c.handleLineEnd(true)
|
2016-08-15 19:30:49 +00:00
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.level.indentation = c.indentation
|
|
|
|
c.content.addMultiple('\l', c.newlines)
|
|
|
|
elif c.indentation > c.level.indentation or
|
|
|
|
c.lexer.buf[c.lexer.bufpos] == '\t':
|
|
|
|
c.content.addMultiple('\l', c.newlines)
|
|
|
|
c.recentWasMoreIndented = true
|
|
|
|
c.content.addMultiple(' ', c.indentation - c.level.indentation)
|
|
|
|
elif c.scalarType == stFolded:
|
|
|
|
if c.recentWasMoreIndented:
|
|
|
|
c.recentWasMoreIndented = false
|
|
|
|
c.newlines.inc()
|
|
|
|
if c.newlines == 0: discard
|
|
|
|
elif c.newlines == 1: c.content.add(' ')
|
2016-08-30 20:15:29 +00:00
|
|
|
else: c.content.addMultiple('\l', c.newlines - 1)
|
2016-08-17 20:21:34 +00:00
|
|
|
else: c.content.addMultiple('\l', c.newlines)
|
|
|
|
c.newlines = 0
|
|
|
|
while c.lexer.buf[c.lexer.bufpos] notin lineEnd:
|
|
|
|
c.content.add(c.lexer.buf[c.lexer.bufpos])
|
|
|
|
c.lexer.bufpos.inc()
|
|
|
|
result = c.handleLineEnd(true)
|
|
|
|
|
|
|
|
proc tagHandle(c: ParserContext, shorthandEnd: var int)
|
2016-08-15 19:30:49 +00:00
|
|
|
{.raises: [YamlParserError].} =
|
|
|
|
debug("lex: tagHandle")
|
|
|
|
shorthandEnd = 0
|
2016-08-17 20:21:34 +00:00
|
|
|
c.content.add(c.lexer.buf[c.lexer.bufpos])
|
2016-08-15 19:30:49 +00:00
|
|
|
var i = 0
|
|
|
|
while true:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc()
|
2016-08-15 19:30:49 +00:00
|
|
|
i.inc()
|
2016-08-17 20:21:34 +00:00
|
|
|
let ch = c.lexer.buf[c.lexer.bufpos]
|
|
|
|
case ch
|
2016-08-15 19:30:49 +00:00
|
|
|
of spaceOrLineEnd:
|
|
|
|
if shorthandEnd == -1:
|
2016-08-17 20:21:34 +00:00
|
|
|
raise c.lexer.generateError("Unclosed verbatim tag")
|
2016-08-15 19:30:49 +00:00
|
|
|
break
|
|
|
|
of '!':
|
|
|
|
if shorthandEnd == -1 and i == 2:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.content.add(ch)
|
2016-08-15 19:30:49 +00:00
|
|
|
continue
|
|
|
|
elif shorthandEnd != 0:
|
2016-08-17 20:21:34 +00:00
|
|
|
raise c.lexer.generateError("Illegal character in tag suffix")
|
2016-08-15 19:30:49 +00:00
|
|
|
shorthandEnd = i
|
2016-08-17 20:21:34 +00:00
|
|
|
c.content.add(ch)
|
2016-08-15 19:30:49 +00:00
|
|
|
of 'a' .. 'z', 'A' .. 'Z', '0' .. '9', '#', ';', '/', '?', ':', '@', '&',
|
|
|
|
'-', '=', '+', '$', '_', '.', '~', '*', '\'', '(', ')':
|
2016-08-17 20:21:34 +00:00
|
|
|
c.content.add(ch)
|
2016-08-15 19:30:49 +00:00
|
|
|
of ',':
|
|
|
|
if shortHandEnd > 0: break # ',' after shorthand is flow indicator
|
2016-08-17 20:21:34 +00:00
|
|
|
c.content.add(ch)
|
2016-08-15 19:30:49 +00:00
|
|
|
of '<':
|
|
|
|
if i == 1:
|
|
|
|
shorthandEnd = -1
|
2016-08-17 20:21:34 +00:00
|
|
|
c.content.reset()
|
|
|
|
else: raise c.lexer.generateError("Illegal character in tag handle")
|
2016-08-15 19:30:49 +00:00
|
|
|
of '>':
|
|
|
|
if shorthandEnd == -1:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc()
|
|
|
|
if c.lexer.buf[c.lexer.bufpos] notin spaceOrLineEnd:
|
|
|
|
raise c.lexer.generateError("Missing space after verbatim tag handle")
|
2016-08-15 19:30:49 +00:00
|
|
|
break
|
2016-08-17 20:21:34 +00:00
|
|
|
else: raise c.lexer.generateError("Illegal character in tag handle")
|
2016-08-15 19:30:49 +00:00
|
|
|
of '%':
|
2016-08-17 20:21:34 +00:00
|
|
|
if shorthandEnd != 0: c.content.add(c.lexer.byteSequence())
|
|
|
|
else: raise c.lexer.generateError("Illegal character in tag handle")
|
|
|
|
else: raise c.lexer.generateError("Illegal character in tag handle")
|
|
|
|
|
|
|
|
proc handleTagHandle(c: ParserContext) {.raises: [YamlParserError].} =
|
|
|
|
c.startToken()
|
|
|
|
if c.level.kind != fplUnknown: raise c.generateError("Unexpected tag handle")
|
|
|
|
if c.tag != yTagQuestionMark:
|
|
|
|
raise c.generateError("Only one tag handle is allowed per node")
|
|
|
|
c.content.reset()
|
2016-08-15 19:30:49 +00:00
|
|
|
var
|
|
|
|
shorthandEnd: int
|
2016-08-17 20:21:34 +00:00
|
|
|
c.tagHandle(shorthandEnd)
|
2016-08-15 19:30:49 +00:00
|
|
|
if shorthandEnd != -1:
|
|
|
|
try:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.tagUri.reset()
|
|
|
|
c.tagUri.add(c.shorthands[c.content[0..shorthandEnd]])
|
|
|
|
c.tagUri.add(c.content[shorthandEnd + 1 .. ^1])
|
2016-08-15 19:30:49 +00:00
|
|
|
except KeyError:
|
2016-08-17 20:21:34 +00:00
|
|
|
raise c.generateError(
|
|
|
|
"Undefined tag shorthand: " & c.content[0..shorthandEnd])
|
|
|
|
try: c.tag = c.p.tagLib.tags[c.tagUri]
|
|
|
|
except KeyError: c.tag = c.p.tagLib.registerUri(c.tagUri)
|
2016-08-15 19:30:49 +00:00
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
try: c.tag = c.p.tagLib.tags[c.content]
|
|
|
|
except KeyError: c.tag = c.p.tagLib.registerUri(c.content)
|
2016-08-15 19:30:49 +00:00
|
|
|
|
2016-08-17 20:21:34 +00:00
|
|
|
proc consumeLineIfEmpty(c: ParserContext, newlines: var int): bool =
|
2016-08-15 19:30:49 +00:00
|
|
|
result = true
|
|
|
|
while true:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc()
|
|
|
|
case c.lexer.buf[c.lexer.bufpos]
|
2016-08-15 19:30:49 +00:00
|
|
|
of ' ', '\t': discard
|
|
|
|
of '\l':
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.lexLF()
|
2016-08-15 19:30:49 +00:00
|
|
|
break
|
|
|
|
of '\c':
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.lexCR()
|
2016-08-15 19:30:49 +00:00
|
|
|
break
|
|
|
|
of '#', EndOfFile:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lineEnding()
|
|
|
|
discard c.handleLineEnd(true)
|
2016-08-15 19:30:49 +00:00
|
|
|
break
|
|
|
|
else:
|
|
|
|
result = false
|
|
|
|
break
|
|
|
|
|
2016-08-17 20:21:34 +00:00
|
|
|
proc handlePossibleMapStart(c: ParserContext, e: var YamlStreamEvent,
|
|
|
|
flow: bool = false, single: bool = false): bool =
|
2016-08-15 19:30:49 +00:00
|
|
|
result = false
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.level.indentation == UnknownIndentation:
|
2016-08-15 19:30:49 +00:00
|
|
|
var flowDepth = 0
|
2016-08-17 20:21:34 +00:00
|
|
|
var pos = c.lexer.bufpos
|
2016-08-15 19:30:49 +00:00
|
|
|
var recentJsonStyle = false
|
2016-08-17 20:21:34 +00:00
|
|
|
while pos < c.lexer.bufpos + 1024:
|
|
|
|
case c.lexer.buf[pos]
|
2016-08-15 19:30:49 +00:00
|
|
|
of ':':
|
2016-08-17 20:21:34 +00:00
|
|
|
if flowDepth == 0 and (c.lexer.buf[pos + 1] in spaceOrLineEnd or
|
2016-08-15 19:30:49 +00:00
|
|
|
recentJsonStyle):
|
2016-08-17 20:21:34 +00:00
|
|
|
e = c.objectStart(yamlStartMap, single)
|
2016-08-15 19:30:49 +00:00
|
|
|
result = true
|
|
|
|
break
|
|
|
|
of lineEnd: break
|
|
|
|
of '[', '{': flowDepth.inc()
|
|
|
|
of '}', ']':
|
|
|
|
flowDepth.inc(-1)
|
|
|
|
if flowDepth < 0: break
|
|
|
|
of '?', ',':
|
|
|
|
if flowDepth == 0: break
|
|
|
|
of '#':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.lexer.buf[pos - 1] in space: break
|
2016-08-15 19:30:49 +00:00
|
|
|
of '"':
|
|
|
|
pos.inc()
|
2016-08-17 20:21:34 +00:00
|
|
|
while c.lexer.buf[pos] notin {'"', EndOfFile, '\l', '\c'}:
|
|
|
|
if c.lexer.buf[pos] == '\\': pos.inc()
|
2016-08-15 19:30:49 +00:00
|
|
|
pos.inc()
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.lexer.buf[pos] != '"': break
|
2016-08-15 19:30:49 +00:00
|
|
|
of '\'':
|
|
|
|
pos.inc()
|
2016-08-17 20:21:34 +00:00
|
|
|
while c.lexer.buf[pos] notin {'\'', '\l', '\c', EndOfFile}:
|
2016-08-15 19:30:49 +00:00
|
|
|
pos.inc()
|
|
|
|
of '&', '*', '!':
|
2016-08-17 20:21:34 +00:00
|
|
|
if pos == c.lexer.bufpos or c.lexer.buf[c.lexer.bufpos] in space:
|
2016-08-15 19:30:49 +00:00
|
|
|
pos.inc()
|
2016-08-17 20:21:34 +00:00
|
|
|
while c.lexer.buf[pos] notin spaceOrLineEnd:
|
2016-08-15 19:30:49 +00:00
|
|
|
pos.inc()
|
|
|
|
continue
|
|
|
|
else: discard
|
2016-08-17 20:21:34 +00:00
|
|
|
if flow and c.lexer.buf[pos] notin space:
|
|
|
|
recentJsonStyle = c.lexer.buf[pos] in {']', '}', '\'', '"'}
|
2016-08-15 19:30:49 +00:00
|
|
|
pos.inc()
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.level.indentation == UnknownIndentation:
|
|
|
|
c.level.indentation = c.indentation
|
2016-08-15 19:30:49 +00:00
|
|
|
|
2016-08-17 20:21:34 +00:00
|
|
|
proc handleMapKeyIndicator(c: ParserContext, e: var YamlStreamEvent): bool =
|
2016-08-15 19:30:49 +00:00
|
|
|
result = false
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
case c.level.kind
|
2016-08-15 19:30:49 +00:00
|
|
|
of fplUnknown:
|
2016-08-17 20:21:34 +00:00
|
|
|
e = c.objectStart(yamlStartMap)
|
2016-08-15 19:30:49 +00:00
|
|
|
result = true
|
|
|
|
of fplMapValue:
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.level.indentation != c.indentation:
|
|
|
|
raise c.generateError("Invalid p.indentation of map key indicator")
|
2016-08-15 19:30:49 +00:00
|
|
|
e = scalarEvent("", yTagQuestionMark, yAnchorNone)
|
|
|
|
result = true
|
2016-08-17 20:21:34 +00:00
|
|
|
c.level.kind = fplMapKey
|
|
|
|
c.ancestry.add(c.level)
|
|
|
|
c.level = initLevel(fplUnknown)
|
2016-08-15 19:30:49 +00:00
|
|
|
of fplMapKey:
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.level.indentation != c.indentation:
|
|
|
|
raise c.generateError("Invalid p.indentation of map key indicator")
|
|
|
|
c.ancestry.add(c.level)
|
|
|
|
c.level = initLevel(fplUnknown)
|
2016-08-15 19:30:49 +00:00
|
|
|
of fplSequence:
|
2016-08-17 20:21:34 +00:00
|
|
|
raise c.generateError("Unexpected map key indicator (expected '- ')")
|
2016-08-15 19:30:49 +00:00
|
|
|
of fplScalar:
|
2016-08-17 20:21:34 +00:00
|
|
|
raise c.generateError(
|
2016-08-15 19:30:49 +00:00
|
|
|
"Unexpected map key indicator (expected multiline scalar end)")
|
|
|
|
of fplSinglePairKey, fplSinglePairValue, fplDocument:
|
2016-08-17 20:21:34 +00:00
|
|
|
internalError("Unexpected level kind: " & $c.level.kind)
|
|
|
|
c.lexer.skipWhitespace()
|
|
|
|
c.indentation = c.lexer.getColNumber(c.lexer.bufpos)
|
2016-08-15 19:30:49 +00:00
|
|
|
|
2016-08-17 20:21:34 +00:00
|
|
|
proc handleBlockSequenceIndicator(c: ParserContext, e: var YamlStreamEvent):
|
|
|
|
bool =
|
2016-08-15 19:30:49 +00:00
|
|
|
result = false
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
case c.level.kind
|
2016-08-30 20:15:29 +00:00
|
|
|
of fplUnknown:
|
2016-08-17 20:21:34 +00:00
|
|
|
e = c.objectStart(yamlStartSeq)
|
2016-08-15 19:30:49 +00:00
|
|
|
result = true
|
|
|
|
of fplSequence:
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.level.indentation != c.indentation:
|
|
|
|
raise c.generateError("Invalid p.indentation of block sequence indicator")
|
|
|
|
c.ancestry.add(c.level)
|
|
|
|
c.level = initLevel(fplUnknown)
|
|
|
|
else: raise c.generateError("Illegal sequence item in map")
|
|
|
|
c.lexer.skipWhitespace()
|
|
|
|
c.indentation = c.lexer.getColNumber(c.lexer.bufpos)
|
|
|
|
|
|
|
|
proc handleBlockItemStart(c: ParserContext, e: var YamlStreamEvent): bool =
|
2016-08-15 19:30:49 +00:00
|
|
|
result = false
|
2016-08-17 20:21:34 +00:00
|
|
|
case c.level.kind
|
2016-08-15 19:30:49 +00:00
|
|
|
of fplUnknown:
|
2016-08-17 20:21:34 +00:00
|
|
|
result = c.handlePossibleMapStart(e)
|
2016-08-15 19:30:49 +00:00
|
|
|
of fplSequence:
|
2016-08-17 20:21:34 +00:00
|
|
|
raise c.generateError(
|
2016-08-15 19:30:49 +00:00
|
|
|
"Unexpected token (expected block sequence indicator)")
|
|
|
|
of fplMapKey:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.ancestry.add(c.level)
|
|
|
|
c.level = FastParseLevel(kind: fplUnknown, indentation: c.indentation)
|
2016-08-15 19:30:49 +00:00
|
|
|
of fplMapValue:
|
2016-08-17 20:21:34 +00:00
|
|
|
e = emptyScalar(c)
|
2016-08-15 19:30:49 +00:00
|
|
|
result = true
|
2016-08-17 20:21:34 +00:00
|
|
|
c.level.kind = fplMapKey
|
|
|
|
c.ancestry.add(c.level)
|
|
|
|
c.level = FastParseLevel(kind: fplUnknown, indentation: c.indentation)
|
2016-08-15 19:30:49 +00:00
|
|
|
of fplScalar, fplSinglePairKey, fplSinglePairValue, fplDocument:
|
2016-08-17 20:21:34 +00:00
|
|
|
internalError("Unexpected level kind: " & $c.level.kind)
|
|
|
|
|
|
|
|
proc handleFlowItemStart(c: ParserContext, e: var YamlStreamEvent): bool =
|
|
|
|
if c.level.kind == fplUnknown and
|
|
|
|
c.ancestry[c.ancestry.high].kind == fplSequence:
|
|
|
|
result = c.handlePossibleMapStart(e, true, true)
|
|
|
|
|
|
|
|
proc handleFlowPlainScalar(c: ParserContext, e: var YamlStreamEvent) =
|
|
|
|
c.content.reset()
|
|
|
|
c.startToken()
|
|
|
|
c.plainScalar(cFlow)
|
|
|
|
if c.lexer.buf[c.lexer.bufpos] in {'{', '}', '[', ']', ',', ':', '#'}:
|
2016-08-17 18:04:59 +00:00
|
|
|
discard
|
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.newlines = 0
|
2016-08-17 18:04:59 +00:00
|
|
|
while true:
|
2016-08-17 20:21:34 +00:00
|
|
|
case c.lexer.buf[c.lexer.bufpos]
|
2016-08-17 18:04:59 +00:00
|
|
|
of ':':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.lexer.isPlainSafe(c.lexer.bufpos + 1, cFlow):
|
|
|
|
if c.newlines == 1:
|
|
|
|
c.content.add(' ')
|
|
|
|
c.newlines = 0
|
|
|
|
elif c.newlines > 1:
|
|
|
|
c.content.addMultiple(' ', c.newlines - 1)
|
|
|
|
c.newlines = 0
|
|
|
|
c.plainScalar(cFlow)
|
2016-08-17 18:04:59 +00:00
|
|
|
break
|
|
|
|
of '#', EndOfFile: break
|
|
|
|
of '\l':
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos = c.lexer.handleLF(c.lexer.bufpos)
|
|
|
|
c.newlines.inc()
|
2016-08-17 18:04:59 +00:00
|
|
|
of '\c':
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos = c.lexer.handleCR(c.lexer.bufpos)
|
|
|
|
c.newlines.inc()
|
2016-08-17 18:04:59 +00:00
|
|
|
of flowIndicators: break
|
2016-08-17 20:21:34 +00:00
|
|
|
of ' ', '\t': c.lexer.skipWhitespace()
|
2016-08-17 18:04:59 +00:00
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.newlines == 1:
|
|
|
|
c.content.add(' ')
|
|
|
|
c.newlines = 0
|
|
|
|
elif c.newlines > 1:
|
|
|
|
c.content.addMultiple(' ', c.newlines - 1)
|
|
|
|
c.newlines = 0
|
|
|
|
c.plainScalar(cFlow)
|
|
|
|
e = c.currentScalar()
|
2016-08-17 18:04:59 +00:00
|
|
|
|
2016-08-15 19:30:49 +00:00
|
|
|
# --- macros for defining parser states ---
|
|
|
|
|
2016-08-17 20:50:37 +00:00
|
|
|
template capitalize(s: string): string =
|
|
|
|
when declared(strutils.capitalizeAscii): strutils.capitalizeAscii(s)
|
|
|
|
else: strutils.capitalize(s)
|
|
|
|
|
|
|
|
macro parserStates(names: varargs[untyped]): typed =
|
2016-08-15 19:30:49 +00:00
|
|
|
## generates proc declaration for each state in list like this:
|
2016-08-30 20:15:29 +00:00
|
|
|
##
|
2016-08-15 19:30:49 +00:00
|
|
|
## proc name(s: YamlStream, e: var YamlStreamEvent):
|
|
|
|
## bool {.raises: [YamlParserError].}
|
|
|
|
result = newStmtList()
|
|
|
|
for name in names:
|
2016-08-17 20:50:37 +00:00
|
|
|
let nameId = newIdentNode("state" & capitalize($name.ident))
|
2016-08-15 19:30:49 +00:00
|
|
|
result.add(newProc(nameId, [ident("bool"), newIdentDefs(ident("s"),
|
|
|
|
ident("YamlStream")), newIdentDefs(ident("e"), newNimNode(nnkVarTy).add(
|
|
|
|
ident("YamlStreamEvent")))], newEmptyNode()))
|
|
|
|
result[0][4] = newNimNode(nnkPragma).add(newNimNode(nnkExprColonExpr).add(
|
|
|
|
ident("raises"), newNimNode(nnkBracket).add(ident("YamlParserError"))))
|
|
|
|
|
|
|
|
proc processStateAsgns(source, target: NimNode) {.compileTime.} =
|
|
|
|
## copies children of source to target and replaces all assignments
|
|
|
|
## `state = [name]` with the appropriate code for changing states.
|
|
|
|
for child in source.children:
|
|
|
|
if child.kind == nnkAsgn and child[0].kind == nnkIdent:
|
|
|
|
if $child[0].ident == "state":
|
|
|
|
assert child[1].kind == nnkIdent
|
|
|
|
var newNameId: NimNode
|
|
|
|
if child[1].kind == nnkIdent and $child[1].ident == "stored":
|
2016-08-17 20:21:34 +00:00
|
|
|
newNameId = newDotExpr(ident("c"), ident("storedState"))
|
2016-08-15 19:30:49 +00:00
|
|
|
else:
|
|
|
|
newNameId =
|
2016-08-17 20:50:37 +00:00
|
|
|
newIdentNode("state" & capitalize($child[1].ident))
|
2016-08-15 19:30:49 +00:00
|
|
|
target.add(newAssignment(newDotExpr(
|
|
|
|
newIdentNode("s"), newIdentNode("nextImpl")), newNameId))
|
|
|
|
continue
|
|
|
|
elif $child[0].ident == "stored":
|
|
|
|
assert child[1].kind == nnkIdent
|
|
|
|
let newNameId =
|
2016-08-17 20:50:37 +00:00
|
|
|
newIdentNode("state" & capitalize($child[1].ident))
|
2016-08-17 20:21:34 +00:00
|
|
|
target.add(newAssignment(newDotExpr(newIdentNode("c"),
|
|
|
|
newIdentNode("storedState")), newNameId))
|
2016-08-15 19:30:49 +00:00
|
|
|
continue
|
|
|
|
var processed = copyNimNode(child)
|
|
|
|
processStateAsgns(child, processed)
|
|
|
|
target.add(processed)
|
|
|
|
|
2016-08-17 20:50:37 +00:00
|
|
|
macro parserState(name: untyped, impl: untyped): typed =
|
2016-08-15 19:30:49 +00:00
|
|
|
## Creates a parser state. Every parser state is a proc with the signature
|
|
|
|
##
|
|
|
|
## proc(s: YamlStream, e: var YamlStreamEvent):
|
|
|
|
## bool {.raises: [YamlParserError].}
|
|
|
|
##
|
|
|
|
## The proc name will be prefixed with "state" and the original name will be
|
|
|
|
## capitalized, so a state "foo" will yield a proc named "stateFoo".
|
|
|
|
##
|
2016-08-17 20:21:34 +00:00
|
|
|
## Inside the proc, you have access to the ParserContext with the let variable
|
|
|
|
## `c`. You can change the parser state by a assignment `state = [newState]`.
|
2016-08-15 19:30:49 +00:00
|
|
|
## The [newState] must have been declared with states(...) previously.
|
|
|
|
let
|
|
|
|
nameStr = $name.ident
|
2016-08-17 20:50:37 +00:00
|
|
|
nameId = newIdentNode("state" & capitalize(nameStr))
|
2016-08-15 19:30:49 +00:00
|
|
|
var procImpl = quote do:
|
|
|
|
debug("state: " & `nameStr`)
|
2016-08-17 20:21:34 +00:00
|
|
|
procImpl.add(newLetStmt(ident("c"), newCall("ParserContext", ident("s"))))
|
2016-08-15 19:30:49 +00:00
|
|
|
procImpl.add(newAssignment(newIdentNode("result"), newLit(false)))
|
|
|
|
assert impl.kind == nnkStmtList
|
|
|
|
processStateAsgns(impl, procImpl)
|
|
|
|
result = newProc(nameId, [ident("bool"),
|
|
|
|
newIdentDefs(ident("s"), ident("YamlStream")), newIdentDefs(ident("e"),
|
|
|
|
newNimNode(nnkVarTy).add(ident("YamlStreamEvent")))], procImpl)
|
|
|
|
|
|
|
|
# --- parser states ---
|
|
|
|
|
|
|
|
parserStates(initial, blockObjectStart, blockAfterPlainScalar, blockAfterObject,
|
|
|
|
scalarEnd, objectEnd, expectDocEnd, startDoc, afterDocument,
|
|
|
|
closeStream, closeMoreIndentedLevels, emitEmptyScalar, tagHandle,
|
2016-08-17 18:04:59 +00:00
|
|
|
anchor, alias, flow, leaveFlowMap, leaveFlowSeq, flowAfterObject,
|
|
|
|
leaveFlowSinglePairMap)
|
2016-08-15 19:30:49 +00:00
|
|
|
|
2016-08-17 20:21:34 +00:00
|
|
|
proc closeEverything(c: ParserContext) =
|
|
|
|
c.indentation = -1
|
|
|
|
c.nextImpl = stateCloseMoreIndentedLevels
|
|
|
|
c.atSequenceItem = false
|
2016-08-15 19:30:49 +00:00
|
|
|
|
2016-08-17 20:21:34 +00:00
|
|
|
proc endLevel(c: ParserContext, e: var YamlStreamEvent):
|
2016-08-15 19:30:49 +00:00
|
|
|
LevelEndResult =
|
|
|
|
result = lerOne
|
2016-08-17 20:21:34 +00:00
|
|
|
case c.level.kind
|
2016-08-15 19:30:49 +00:00
|
|
|
of fplSequence:
|
|
|
|
e = endSeqEvent()
|
|
|
|
of fplMapKey:
|
|
|
|
e = endMapEvent()
|
|
|
|
of fplMapValue, fplSinglePairValue:
|
2016-08-17 20:21:34 +00:00
|
|
|
e = emptyScalar(c)
|
|
|
|
c.level.kind = fplMapKey
|
2016-08-15 19:30:49 +00:00
|
|
|
result = lerAdditionalMapEnd
|
|
|
|
of fplScalar:
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.scalarType != stFlow:
|
|
|
|
case c.chomp
|
2016-08-15 19:30:49 +00:00
|
|
|
of ctKeep:
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.content.len == 0: c.newlines.inc(-1)
|
|
|
|
c.content.addMultiple('\l', c.newlines)
|
2016-08-15 19:30:49 +00:00
|
|
|
of ctClip:
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.content.len != 0:
|
2016-08-15 19:30:49 +00:00
|
|
|
debug("adding clipped newline")
|
2016-08-17 20:21:34 +00:00
|
|
|
c.content.add('\l')
|
2016-08-15 19:30:49 +00:00
|
|
|
of ctStrip: discard
|
2016-08-17 20:21:34 +00:00
|
|
|
e = currentScalar(c)
|
|
|
|
c.tag = yTagQuestionMark
|
|
|
|
c.anchor = yAnchorNone
|
2016-08-15 19:30:49 +00:00
|
|
|
of fplUnknown:
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.ancestry.len > 1:
|
|
|
|
e = emptyScalar(c) # don't yield scalar for empty doc
|
2016-08-15 19:30:49 +00:00
|
|
|
else:
|
|
|
|
result = lerNothing
|
|
|
|
of fplDocument:
|
|
|
|
e = endDocEvent()
|
|
|
|
of fplSinglePairKey:
|
2016-08-17 20:21:34 +00:00
|
|
|
internalError("Unexpected level kind: " & $c.level.kind)
|
2016-08-15 19:30:49 +00:00
|
|
|
|
2016-08-17 20:21:34 +00:00
|
|
|
proc handleMapValueIndicator(c: ParserContext, e: var YamlStreamEvent): bool =
|
|
|
|
c.startToken()
|
|
|
|
case c.level.kind
|
2016-08-15 19:30:49 +00:00
|
|
|
of fplUnknown:
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.level.indentation == UnknownIndentation:
|
|
|
|
e = c.objectStart(yamlStartMap)
|
2016-08-15 19:30:49 +00:00
|
|
|
result = true
|
2016-08-17 20:21:34 +00:00
|
|
|
c.storedState = c.nextImpl
|
|
|
|
c.nextImpl = stateEmitEmptyScalar
|
2016-08-15 19:30:49 +00:00
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
e = emptyScalar(c)
|
2016-08-15 19:30:49 +00:00
|
|
|
result = true
|
2016-08-17 20:21:34 +00:00
|
|
|
c.ancestry[c.ancestry.high].kind = fplMapValue
|
2016-08-15 19:30:49 +00:00
|
|
|
of fplMapKey:
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.level.indentation != c.indentation:
|
|
|
|
raise c.generateError("Invalid p.indentation of map key indicator")
|
2016-08-15 19:30:49 +00:00
|
|
|
e = scalarEvent("", yTagQuestionMark, yAnchorNone)
|
|
|
|
result = true
|
2016-08-17 20:21:34 +00:00
|
|
|
c.level.kind = fplMapValue
|
|
|
|
c.ancestry.add(c.level)
|
|
|
|
c.level = initLevel(fplUnknown)
|
2016-08-15 19:30:49 +00:00
|
|
|
of fplMapValue:
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.level.indentation != c.indentation:
|
|
|
|
raise c.generateError("Invalid p.indentation of map key indicator")
|
|
|
|
c.ancestry.add(c.level)
|
|
|
|
c.level = initLevel(fplUnknown)
|
2016-08-15 19:30:49 +00:00
|
|
|
of fplSequence:
|
2016-08-17 20:21:34 +00:00
|
|
|
raise c.generateError("Unexpected map value indicator (expected '- ')")
|
2016-08-15 19:30:49 +00:00
|
|
|
of fplScalar:
|
2016-08-17 20:21:34 +00:00
|
|
|
raise c.generateError(
|
2016-08-15 19:30:49 +00:00
|
|
|
"Unexpected map value indicator (expected multiline scalar end)")
|
|
|
|
of fplSinglePairKey, fplSinglePairValue, fplDocument:
|
2016-08-17 20:21:34 +00:00
|
|
|
internalError("Unexpected level kind: " & $c.level.kind)
|
|
|
|
c.lexer.skipWhitespace()
|
|
|
|
c.indentation = c.lexer.getColNumber(c.lexer.bufpos)
|
2016-08-15 19:30:49 +00:00
|
|
|
|
2016-08-17 20:21:34 +00:00
|
|
|
template handleObjectEnd(c: ParserContext, mayHaveEmptyValue: bool = false):
|
|
|
|
bool =
|
2016-08-15 19:30:49 +00:00
|
|
|
var result = false
|
2016-08-17 20:21:34 +00:00
|
|
|
c.level = c.ancestry.pop()
|
2016-08-15 19:30:49 +00:00
|
|
|
when mayHaveEmptyValue:
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.level.kind == fplSinglePairValue:
|
2016-08-15 19:30:49 +00:00
|
|
|
result = true
|
2016-08-17 20:21:34 +00:00
|
|
|
c.level = c.ancestry.pop()
|
|
|
|
case c.level.kind
|
|
|
|
of fplMapKey: c.level.kind = fplMapValue
|
|
|
|
of fplSinglePairKey: c.level.kind = fplSinglePairValue
|
|
|
|
of fplMapValue: c.level.kind = fplMapKey
|
2016-08-15 19:30:49 +00:00
|
|
|
of fplSequence, fplDocument: discard
|
|
|
|
of fplUnknown, fplScalar, fplSinglePairValue:
|
2016-08-17 20:21:34 +00:00
|
|
|
internalError("Unexpected level kind: " & $c.level.kind)
|
2016-08-15 19:30:49 +00:00
|
|
|
result
|
|
|
|
|
2016-08-17 20:21:34 +00:00
|
|
|
proc leaveFlowLevel(c: ParserContext, e: var YamlStreamEvent): bool =
|
|
|
|
c.flowdepth.dec()
|
|
|
|
result = (c.endLevel(e) == lerOne) # lerAdditionalMapEnd cannot happen
|
|
|
|
if c.flowdepth == 0:
|
|
|
|
c.storedState = stateBlockAfterObject
|
2016-08-17 18:04:59 +00:00
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.storedState = stateFlowAfterObject
|
|
|
|
c.nextImpl = stateObjectEnd
|
2016-08-17 18:04:59 +00:00
|
|
|
|
2016-08-15 19:30:49 +00:00
|
|
|
parserState initial:
|
2016-08-17 20:21:34 +00:00
|
|
|
case c.lexer.buf[c.lexer.bufpos]
|
2016-08-15 19:30:49 +00:00
|
|
|
of '%':
|
|
|
|
var ld: LexedDirective
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
c.lexer.directiveName(ld)
|
2016-08-15 19:30:49 +00:00
|
|
|
case ld
|
|
|
|
of ldYaml:
|
|
|
|
var version = ""
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
c.lexer.yamlVersion(version)
|
2016-08-15 19:30:49 +00:00
|
|
|
if version != "1.2":
|
2016-08-17 20:21:34 +00:00
|
|
|
c.callCallback("Version is not 1.2, but " & version)
|
|
|
|
c.lineEnding()
|
|
|
|
discard c.handleLineEnd(true)
|
2016-08-15 19:30:49 +00:00
|
|
|
of ldTag:
|
|
|
|
var shorthand = ""
|
2016-08-17 20:21:34 +00:00
|
|
|
c.tagUri.reset()
|
|
|
|
c.startToken()
|
|
|
|
c.lexer.tagShorthand(shorthand)
|
|
|
|
c.lexer.tagUriMapping(c.tagUri)
|
|
|
|
c.shorthands[shorthand] = c.tagUri
|
|
|
|
c.lineEnding()
|
|
|
|
discard c.handleLineEnd(true)
|
2016-08-15 19:30:49 +00:00
|
|
|
of ldUnknown:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.callCallback("Unknown directive")
|
|
|
|
c.lexer.finishLine()
|
|
|
|
discard c.handleLineEnd(true)
|
2016-08-15 19:30:49 +00:00
|
|
|
of ' ', '\t':
|
2016-08-17 20:21:34 +00:00
|
|
|
if not c.consumeLineIfEmpty(c.newlines):
|
|
|
|
c.indentation = c.lexer.getColNumber(c.lexer.bufpos)
|
2016-08-15 19:30:49 +00:00
|
|
|
e = startDocEvent()
|
|
|
|
result = true
|
|
|
|
state = blockObjectStart
|
2016-08-17 20:21:34 +00:00
|
|
|
of '\l': c.lexer.lexLF()
|
|
|
|
of '\c': c.lexer.lexCR()
|
|
|
|
of EndOfFile: c.isFinished = true
|
2016-08-15 19:30:49 +00:00
|
|
|
of '#':
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lineEnding()
|
|
|
|
discard c.handleLineEnd(true)
|
2016-08-15 19:30:49 +00:00
|
|
|
of '-':
|
|
|
|
var success: bool
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
c.lexer.directivesEndMarker(success)
|
|
|
|
if success: c.lexer.bufpos.inc(3)
|
2016-08-15 19:30:49 +00:00
|
|
|
e = startDocEvent()
|
|
|
|
result = true
|
|
|
|
state = blockObjectStart
|
|
|
|
else:
|
|
|
|
e = startDocEvent()
|
|
|
|
result = true
|
|
|
|
state = blockObjectStart
|
|
|
|
|
|
|
|
parserState blockObjectStart:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.skipIndentation()
|
|
|
|
c.indentation = c.lexer.getColNumber(c.lexer.bufpos)
|
|
|
|
if c.indentation == 0:
|
2016-08-15 19:30:49 +00:00
|
|
|
var success: bool
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.directivesEndMarker(success)
|
2016-08-15 19:30:49 +00:00
|
|
|
if success:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc(3)
|
|
|
|
c.closeEverything()
|
2016-08-15 19:30:49 +00:00
|
|
|
stored = startDoc
|
|
|
|
return false
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.documentEndMarker(success)
|
2016-08-15 19:30:49 +00:00
|
|
|
if success:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.closeEverything()
|
|
|
|
c.lexer.bufpos.inc(3)
|
|
|
|
c.lineEnding()
|
|
|
|
discard c.handleLineEnd(false)
|
2016-08-15 19:30:49 +00:00
|
|
|
stored = afterDocument
|
|
|
|
return false
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.atSequenceItem: c.atSequenceItem = false
|
|
|
|
elif c.indentation <= c.ancestry[c.ancestry.high].indentation:
|
|
|
|
if c.lexer.buf[c.lexer.bufpos] in lineEnd:
|
|
|
|
if c.handleLineEnd(true):
|
|
|
|
c.closeEverything()
|
2016-08-15 19:30:49 +00:00
|
|
|
stored = afterDocument
|
|
|
|
return false
|
2016-08-17 20:21:34 +00:00
|
|
|
elif c.lexer.buf[c.lexer.bufpos] == '#':
|
|
|
|
c.lineEnding()
|
|
|
|
if c.handleLineEnd(true):
|
|
|
|
c.closeEverything()
|
2016-08-15 19:30:49 +00:00
|
|
|
stored = afterDocument
|
|
|
|
return false
|
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.atSequenceItem = c.lexer.buf[c.lexer.bufpos] == '-' and
|
|
|
|
not c.lexer.isPlainSafe(c.lexer.bufpos + 1, cBlock)
|
2016-08-15 19:30:49 +00:00
|
|
|
state = closeMoreIndentedLevels
|
|
|
|
stored = blockObjectStart
|
|
|
|
return false
|
2016-08-17 20:21:34 +00:00
|
|
|
elif c.indentation <= c.level.indentation and
|
|
|
|
c.lexer.buf[c.lexer.bufpos] in lineEnd:
|
|
|
|
if c.handleLineEnd(true):
|
|
|
|
c.closeEverything()
|
2016-08-15 19:30:49 +00:00
|
|
|
stored = afterDocument
|
|
|
|
return false
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.level.kind == fplScalar and c.scalarType != stFlow:
|
|
|
|
if c.indentation < c.level.indentation:
|
|
|
|
if c.lexer.buf[c.lexer.bufpos] == '#':
|
2016-08-15 19:30:49 +00:00
|
|
|
# skip all following comment lines
|
2016-08-17 20:21:34 +00:00
|
|
|
while c.indentation > c.ancestry[c.ancestry.high].indentation:
|
|
|
|
c.lineEnding()
|
|
|
|
if c.handleLineEnd(false):
|
|
|
|
c.closeEverything()
|
2016-08-15 19:30:49 +00:00
|
|
|
stored = afterDocument
|
|
|
|
return false
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.skipIndentation()
|
|
|
|
c.indentation = c.lexer.getColNumber(c.lexer.bufpos)
|
|
|
|
if c.indentation > c.ancestry[c.ancestry.high].indentation:
|
|
|
|
raise c.lexer.generateError(
|
2016-08-15 19:30:49 +00:00
|
|
|
"Invalid content in block scalar after comments")
|
|
|
|
state = closeMoreIndentedLevels
|
|
|
|
stored = blockObjectStart
|
|
|
|
return false
|
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
raise c.lexer.generateError(
|
2016-08-15 19:30:49 +00:00
|
|
|
"Invalid p.indentation (expected p.indentation of at least " &
|
2016-08-17 20:21:34 +00:00
|
|
|
$c.level.indentation & " spaces)")
|
|
|
|
if c.blockScalarLine():
|
|
|
|
c.closeEverything()
|
2016-08-15 19:30:49 +00:00
|
|
|
stored = afterDocument
|
|
|
|
return false
|
2016-08-17 20:21:34 +00:00
|
|
|
case c.lexer.buf[c.lexer.bufpos]
|
2016-08-15 19:30:49 +00:00
|
|
|
of '\l':
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.lexLF()
|
|
|
|
c.newlines.inc()
|
|
|
|
if c.level.kind == fplUnknown:
|
|
|
|
c.level.indentation = UnknownIndentation
|
2016-08-15 19:30:49 +00:00
|
|
|
of '\c':
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.lexCR()
|
|
|
|
c.newlines.inc()
|
|
|
|
if c.level.kind == fplUnknown:
|
|
|
|
c.level.indentation = UnknownIndentation
|
2016-08-15 19:30:49 +00:00
|
|
|
of EndOfFile:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.closeEverything()
|
2016-08-15 19:30:49 +00:00
|
|
|
stored = afterDocument
|
|
|
|
of '#':
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lineEnding()
|
|
|
|
if c.handleLineEnd(true):
|
|
|
|
c.closeEverything()
|
2016-08-15 19:30:49 +00:00
|
|
|
stored = afterDocument
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.level.kind == fplUnknown:
|
|
|
|
c.level.indentation = UnknownIndentation
|
2016-08-15 19:30:49 +00:00
|
|
|
of ':':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.lexer.isPlainSafe(c.lexer.bufpos + 1, cBlock):
|
|
|
|
if c.level.kind == fplScalar:
|
|
|
|
c.continueMultilineScalar()
|
2016-08-15 19:30:49 +00:00
|
|
|
state = blockAfterPlainScalar
|
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
result = c.handleBlockItemStart(e)
|
|
|
|
c.content.reset()
|
|
|
|
c.startToken()
|
|
|
|
c.plainScalar(cBlock)
|
2016-08-15 19:30:49 +00:00
|
|
|
state = blockAfterPlainScalar
|
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc()
|
|
|
|
result = c.handleMapValueIndicator(e)
|
2016-08-15 19:30:49 +00:00
|
|
|
of '\t':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.level.kind == fplScalar:
|
|
|
|
c.lexer.skipWhitespace()
|
|
|
|
c.continueMultilineScalar()
|
2016-08-15 19:30:49 +00:00
|
|
|
state = blockAfterPlainScalar
|
2016-08-17 20:21:34 +00:00
|
|
|
else: raise c.lexer.generateError("\\t cannot start any token")
|
2016-08-15 19:30:49 +00:00
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.level.kind == fplScalar:
|
|
|
|
c.continueMultilineScalar()
|
2016-08-15 19:30:49 +00:00
|
|
|
state = blockAfterPlainScalar
|
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
case c.lexer.buf[c.lexer.bufpos]
|
2016-08-15 19:30:49 +00:00
|
|
|
of '\'':
|
2016-08-17 20:21:34 +00:00
|
|
|
result = c.handleBlockItemStart(e)
|
|
|
|
c.content.reset()
|
|
|
|
c.startToken()
|
|
|
|
c.singleQuotedScalar()
|
2016-08-15 19:30:49 +00:00
|
|
|
state = scalarEnd
|
|
|
|
of '"':
|
2016-08-17 20:21:34 +00:00
|
|
|
result = c.handleBlockItemStart(e)
|
|
|
|
c.content.reset()
|
|
|
|
c.startToken()
|
|
|
|
c.doubleQuotedScalar()
|
2016-08-15 19:30:49 +00:00
|
|
|
state = scalarEnd
|
|
|
|
of '|', '>':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.blockScalarHeader():
|
|
|
|
c.closeEverything()
|
2016-08-15 19:30:49 +00:00
|
|
|
stored = afterDocument
|
|
|
|
of '-':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.lexer.isPlainSafe(c.lexer.bufpos + 1, cBlock):
|
|
|
|
result = c.handleBlockItemStart(e)
|
|
|
|
c.content.reset()
|
|
|
|
c.startToken()
|
|
|
|
c.plainScalar(cBlock)
|
2016-08-15 19:30:49 +00:00
|
|
|
state = blockAfterPlainScalar
|
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc()
|
|
|
|
result = c.handleBlockSequenceIndicator(e)
|
2016-08-15 19:30:49 +00:00
|
|
|
of '!':
|
2016-08-17 20:21:34 +00:00
|
|
|
result = c.handleBlockItemStart(e)
|
2016-08-15 19:30:49 +00:00
|
|
|
state = tagHandle
|
|
|
|
stored = blockObjectStart
|
|
|
|
of '&':
|
2016-08-17 20:21:34 +00:00
|
|
|
result = c.handleBlockItemStart(e)
|
2016-08-15 19:30:49 +00:00
|
|
|
state = anchor
|
|
|
|
stored = blockObjectStart
|
|
|
|
of '*':
|
2016-08-17 20:21:34 +00:00
|
|
|
result = c.handleBlockItemStart(e)
|
2016-08-15 19:30:49 +00:00
|
|
|
state = alias
|
|
|
|
stored = blockAfterObject
|
|
|
|
of '[', '{':
|
2016-08-17 20:21:34 +00:00
|
|
|
result = c.handleBlockItemStart(e)
|
2016-08-15 19:30:49 +00:00
|
|
|
state = flow
|
|
|
|
of '?':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.lexer.isPlainSafe(c.lexer.bufpos + 1, cBlock):
|
|
|
|
result = c.handleBlockItemStart(e)
|
|
|
|
c.content.reset()
|
|
|
|
c.startToken()
|
|
|
|
c.plainScalar(cBlock)
|
2016-08-15 19:30:49 +00:00
|
|
|
state = blockAfterPlainScalar
|
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc()
|
|
|
|
result = c.handleMapKeyIndicator(e)
|
2016-08-15 19:30:49 +00:00
|
|
|
of '@', '`':
|
2016-08-17 20:21:34 +00:00
|
|
|
raise c.lexer.generateError(
|
2016-08-15 19:30:49 +00:00
|
|
|
"Reserved characters cannot start a plain scalar")
|
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
result = c.handleBlockItemStart(e)
|
|
|
|
c.content.reset()
|
|
|
|
c.startToken()
|
|
|
|
c.plainScalar(cBlock)
|
2016-08-15 19:30:49 +00:00
|
|
|
state = blockAfterPlainScalar
|
|
|
|
|
|
|
|
parserState scalarEnd:
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.tag == yTagQuestionMark: c.tag = yTagExclamationMark
|
|
|
|
e = c.currentScalar()
|
2016-08-15 19:30:49 +00:00
|
|
|
result = true
|
|
|
|
state = objectEnd
|
|
|
|
stored = blockAfterObject
|
|
|
|
|
|
|
|
parserState blockAfterPlainScalar:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.skipWhitespace()
|
|
|
|
case c.lexer.buf[c.lexer.bufpos]
|
2016-08-15 19:30:49 +00:00
|
|
|
of '\l':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.level.kind notin {fplUnknown, fplScalar}:
|
|
|
|
c.startToken()
|
|
|
|
raise c.generateError("Unexpected scalar")
|
2016-08-15 19:30:49 +00:00
|
|
|
startScalar(stFlow)
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.lexLF()
|
|
|
|
c.newlines.inc()
|
2016-08-15 19:30:49 +00:00
|
|
|
state = blockObjectStart
|
|
|
|
of '\c':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.level.kind notin {fplUnknown, fplScalar}:
|
|
|
|
c.startToken()
|
|
|
|
raise c.generateError("Unexpected scalar")
|
2016-08-15 19:30:49 +00:00
|
|
|
startScalar(stFlow)
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.lexCR()
|
|
|
|
c.newlines.inc()
|
2016-08-15 19:30:49 +00:00
|
|
|
state = blockObjectStart
|
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
e = c.currentScalar()
|
2016-08-15 19:30:49 +00:00
|
|
|
result = true
|
|
|
|
state = objectEnd
|
|
|
|
stored = blockAfterObject
|
|
|
|
|
|
|
|
parserState blockAfterObject:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.skipWhitespace()
|
|
|
|
case c.lexer.buf[c.lexer.bufpos]
|
2016-08-15 19:30:49 +00:00
|
|
|
of EndOfFile:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.closeEverything()
|
2016-08-15 19:30:49 +00:00
|
|
|
stored = afterDocument
|
|
|
|
of '\l':
|
|
|
|
state = blockObjectStart
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.lexLF()
|
2016-08-15 19:30:49 +00:00
|
|
|
of '\c':
|
|
|
|
state = blockObjectStart
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.lexCR()
|
2016-08-15 19:30:49 +00:00
|
|
|
of ':':
|
2016-08-17 20:21:34 +00:00
|
|
|
case c.level.kind
|
2016-08-15 19:30:49 +00:00
|
|
|
of fplUnknown:
|
2016-08-17 20:21:34 +00:00
|
|
|
e = c.objectStart(yamlStartMap)
|
2016-08-15 19:30:49 +00:00
|
|
|
result = true
|
|
|
|
of fplMapKey:
|
|
|
|
e = scalarEvent("", yTagQuestionMark, yAnchorNone)
|
|
|
|
result = true
|
2016-08-17 20:21:34 +00:00
|
|
|
c.level.kind = fplMapValue
|
|
|
|
c.ancestry.add(c.level)
|
|
|
|
c.level = initLevel(fplUnknown)
|
2016-08-15 19:30:49 +00:00
|
|
|
of fplMapValue:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.level.kind = fplMapValue
|
|
|
|
c.ancestry.add(c.level)
|
|
|
|
c.level = initLevel(fplUnknown)
|
2016-08-15 19:30:49 +00:00
|
|
|
of fplSequence:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
raise c.generateError("Illegal token (expected sequence item)")
|
2016-08-15 19:30:49 +00:00
|
|
|
of fplScalar:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
raise c.generateError(
|
2016-08-15 19:30:49 +00:00
|
|
|
"Multiline scalars may not be implicit map keys")
|
|
|
|
of fplSinglePairKey, fplSinglePairValue, fplDocument:
|
2016-08-17 20:21:34 +00:00
|
|
|
internalError("Unexpected level kind: " & $c.level.kind)
|
|
|
|
c.lexer.bufpos.inc()
|
|
|
|
c.lexer.skipWhitespace()
|
|
|
|
c.indentation = c.lexer.getColNumber(c.lexer.bufpos)
|
2016-08-15 19:30:49 +00:00
|
|
|
state = blockObjectStart
|
|
|
|
of '#':
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lineEnding()
|
|
|
|
if c.handleLineEnd(true):
|
|
|
|
c.closeEverything()
|
2016-08-15 19:30:49 +00:00
|
|
|
stored = afterDocument
|
|
|
|
else: state = blockObjectStart
|
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
raise c.generateError(
|
2016-08-15 19:30:49 +00:00
|
|
|
"Illegal token (expected ':', comment or line end)")
|
|
|
|
|
|
|
|
parserState objectEnd:
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.handleObjectEnd(true):
|
2016-08-15 19:30:49 +00:00
|
|
|
e = endMapEvent()
|
|
|
|
result = true
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.level.kind == fplDocument: state = expectDocEnd
|
2016-08-15 19:30:49 +00:00
|
|
|
else: state = stored
|
|
|
|
|
|
|
|
parserState expectDocEnd:
|
2016-08-17 20:21:34 +00:00
|
|
|
case c.lexer.buf[c.lexer.bufpos]
|
2016-08-15 19:30:49 +00:00
|
|
|
of '-':
|
|
|
|
var success: bool
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.directivesEndMarker(success)
|
2016-08-15 19:30:49 +00:00
|
|
|
if success:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc(3)
|
2016-08-15 19:30:49 +00:00
|
|
|
e = endDocEvent()
|
|
|
|
result = true
|
|
|
|
state = startDoc
|
2016-08-17 20:21:34 +00:00
|
|
|
c.ancestry.setLen(0)
|
2016-08-15 19:30:49 +00:00
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
raise c.generateError("Unexpected content (expected document end)")
|
2016-08-15 19:30:49 +00:00
|
|
|
of '.':
|
|
|
|
var isDocumentEnd: bool
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
c.lexer.documentEndMarker(isDocumentEnd)
|
2016-08-15 19:30:49 +00:00
|
|
|
if isDocumentEnd:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc(3)
|
|
|
|
c.lineEnding()
|
|
|
|
discard c.handleLineEnd(true)
|
2016-08-15 19:30:49 +00:00
|
|
|
e = endDocEvent()
|
|
|
|
result = true
|
|
|
|
state = afterDocument
|
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
raise c.generateError("Unexpected content (expected document end)")
|
2016-08-15 19:30:49 +00:00
|
|
|
of ' ', '\t', '#':
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lineEnding()
|
|
|
|
if c.handleLineEnd(true):
|
|
|
|
c.closeEverything()
|
2016-08-15 19:30:49 +00:00
|
|
|
stored = afterDocument
|
2016-08-17 20:21:34 +00:00
|
|
|
of '\l': c.lexer.lexLF()
|
|
|
|
of '\c': c.lexer.lexCR()
|
2016-08-15 19:30:49 +00:00
|
|
|
of EndOfFile:
|
|
|
|
e = endDocEvent()
|
|
|
|
result = true
|
2016-08-17 20:21:34 +00:00
|
|
|
c.isFinished = true
|
2016-08-15 19:30:49 +00:00
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
raise c.generateError("Unexpected content (expected document end)")
|
2016-08-15 19:30:49 +00:00
|
|
|
|
|
|
|
parserState startDoc:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.initDocValues()
|
2016-08-15 19:30:49 +00:00
|
|
|
e = startDocEvent()
|
|
|
|
result = true
|
|
|
|
state = blockObjectStart
|
|
|
|
|
|
|
|
parserState afterDocument:
|
2016-08-17 20:21:34 +00:00
|
|
|
case c.lexer.buf[c.lexer.bufpos]
|
2016-08-15 19:30:49 +00:00
|
|
|
of '.':
|
|
|
|
var isDocumentEnd: bool
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
c.lexer.documentEndMarker(isDocumentEnd)
|
2016-08-15 19:30:49 +00:00
|
|
|
if isDocumentEnd:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc(3)
|
|
|
|
c.lineEnding()
|
|
|
|
discard c.handleLineEnd(true)
|
2016-08-15 19:30:49 +00:00
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.initDocValues()
|
2016-08-15 19:30:49 +00:00
|
|
|
e = startDocEvent()
|
|
|
|
result = true
|
|
|
|
state = blockObjectStart
|
|
|
|
of '#':
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lineEnding()
|
|
|
|
discard c.handleLineEnd(true)
|
2016-08-15 19:30:49 +00:00
|
|
|
of '\t', ' ':
|
2016-08-17 20:21:34 +00:00
|
|
|
if not c.consumeLineIfEmpty(c.newlines):
|
|
|
|
c.indentation = c.lexer.getColNumber(c.lexer.bufpos)
|
|
|
|
c.initDocValues()
|
2016-08-15 19:30:49 +00:00
|
|
|
e = startDocEvent()
|
|
|
|
result = true
|
|
|
|
state = blockObjectStart
|
2016-08-17 20:21:34 +00:00
|
|
|
of EndOfFile: c.isFinished = true
|
2016-08-15 19:30:49 +00:00
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.initDocValues()
|
2016-08-15 19:30:49 +00:00
|
|
|
state = initial
|
|
|
|
|
|
|
|
parserState closeStream:
|
2016-08-17 20:21:34 +00:00
|
|
|
case c.level.kind
|
|
|
|
of fplUnknown: discard c.ancestry.pop()
|
2016-08-15 19:30:49 +00:00
|
|
|
of fplDocument: discard
|
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
case c.endLevel(e)
|
2016-08-15 19:30:49 +00:00
|
|
|
of lerNothing: discard
|
|
|
|
of lerOne: result = true
|
|
|
|
of lerAdditionalMapEnd: return true
|
2016-08-17 20:21:34 +00:00
|
|
|
c.level = c.ancestry.pop()
|
2016-08-15 19:30:49 +00:00
|
|
|
if result: return
|
|
|
|
e = endDocEvent()
|
|
|
|
result = true
|
2016-08-17 20:21:34 +00:00
|
|
|
c.isFinished = true
|
2016-08-15 19:30:49 +00:00
|
|
|
|
|
|
|
parserState closeMoreIndentedLevels:
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.ancestry.len > 0:
|
|
|
|
let parent = c.ancestry[c.ancestry.high]
|
|
|
|
if parent.indentation >= c.indentation:
|
|
|
|
if c.atSequenceItem:
|
|
|
|
if (c.indentation == c.level.indentation and
|
|
|
|
c.level.kind == fplSequence) or
|
|
|
|
(c.indentation == parent.indentation and
|
|
|
|
c.level.kind == fplUnknown and parent.kind != fplSequence):
|
2016-08-15 19:30:49 +00:00
|
|
|
state = stored
|
|
|
|
return false
|
|
|
|
debug("Closing because parent.indentation (" & $parent.indentation &
|
2016-08-17 20:21:34 +00:00
|
|
|
") >= indentation(" & $c.indentation & ")")
|
|
|
|
case c.endLevel(e)
|
2016-08-15 19:30:49 +00:00
|
|
|
of lerNothing: discard
|
|
|
|
of lerOne: result = true
|
|
|
|
of lerAdditionalMapEnd: return true
|
2016-08-17 20:21:34 +00:00
|
|
|
discard c.handleObjectEnd(false)
|
2016-08-15 19:30:49 +00:00
|
|
|
return result
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.level.kind == fplDocument: state = expectDocEnd
|
2016-08-15 19:30:49 +00:00
|
|
|
else: state = stored
|
2016-08-17 20:21:34 +00:00
|
|
|
elif c.indentation == c.level.indentation:
|
|
|
|
let res = c.endLevel(e)
|
2016-08-15 19:30:49 +00:00
|
|
|
yAssert(res == lerOne)
|
|
|
|
result = true
|
|
|
|
state = stored
|
|
|
|
else:
|
|
|
|
state = stored
|
|
|
|
|
|
|
|
parserState emitEmptyScalar:
|
|
|
|
e = scalarEvent("", yTagQuestionMark, yAnchorNone)
|
|
|
|
result = true
|
|
|
|
state = stored
|
|
|
|
|
|
|
|
parserState tagHandle:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.handleTagHandle()
|
2016-08-15 19:30:49 +00:00
|
|
|
state = stored
|
|
|
|
|
|
|
|
parserState anchor:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.handleAnchor()
|
2016-08-15 19:30:49 +00:00
|
|
|
state = stored
|
|
|
|
|
|
|
|
parserState alias:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
if c.level.kind != fplUnknown: raise c.generateError("Unexpected token")
|
|
|
|
if c.anchor != yAnchorNone or c.tag != yTagQuestionMark:
|
|
|
|
raise c.generateError("Alias may not have anchor or tag")
|
|
|
|
c.content.reset()
|
|
|
|
c.anchorName()
|
2016-08-15 19:30:49 +00:00
|
|
|
var id: AnchorId
|
2016-08-17 20:21:34 +00:00
|
|
|
try: id = c.p.anchors[c.content]
|
|
|
|
except KeyError: raise c.generateError("Unknown anchor")
|
2016-08-15 19:30:49 +00:00
|
|
|
e = aliasEvent(id)
|
|
|
|
result = true
|
|
|
|
state = objectEnd
|
|
|
|
|
|
|
|
parserState flow:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.skipWhitespaceCommentsAndNewlines()
|
|
|
|
case c.lexer.buf[c.lexer.bufpos]
|
2016-08-17 18:04:59 +00:00
|
|
|
of '{':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.handleFlowItemStart(e): return true
|
|
|
|
e = c.objectStart(yamlStartMap)
|
2016-08-17 18:04:59 +00:00
|
|
|
result = true
|
2016-08-17 20:21:34 +00:00
|
|
|
c.flowdepth.inc()
|
|
|
|
c.lexer.bufpos.inc()
|
|
|
|
c.explicitFlowKey = false
|
2016-08-17 18:04:59 +00:00
|
|
|
of '[':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.handleFlowItemStart(e): return true
|
|
|
|
e = c.objectStart(yamlStartSeq)
|
2016-08-17 18:04:59 +00:00
|
|
|
result = true
|
2016-08-17 20:21:34 +00:00
|
|
|
c.flowdepth.inc()
|
|
|
|
c.lexer.bufpos.inc()
|
2016-08-17 18:04:59 +00:00
|
|
|
of '}':
|
2016-08-17 20:21:34 +00:00
|
|
|
yAssert(c.level.kind == fplUnknown)
|
|
|
|
c.level = c.ancestry.pop()
|
|
|
|
c.lexer.bufpos.inc()
|
2016-08-17 18:04:59 +00:00
|
|
|
state = leaveFlowMap
|
|
|
|
of ']':
|
2016-08-17 20:21:34 +00:00
|
|
|
yAssert(c.level.kind == fplUnknown)
|
|
|
|
c.level = c.ancestry.pop()
|
|
|
|
c.lexer.bufpos.inc()
|
2016-08-17 18:04:59 +00:00
|
|
|
state = leaveFlowSeq
|
|
|
|
of ',':
|
2016-08-17 20:21:34 +00:00
|
|
|
yAssert(c.level.kind == fplUnknown)
|
|
|
|
c.level = c.ancestry.pop()
|
|
|
|
case c.level.kind
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplSequence:
|
2016-08-17 20:21:34 +00:00
|
|
|
e = c.emptyScalar()
|
2016-08-17 18:04:59 +00:00
|
|
|
result = true
|
|
|
|
of fplMapValue:
|
2016-08-17 20:21:34 +00:00
|
|
|
e = c.emptyScalar()
|
2016-08-17 18:04:59 +00:00
|
|
|
result = true
|
2016-08-17 20:21:34 +00:00
|
|
|
c.level.kind = fplMapKey
|
|
|
|
c.explicitFlowKey = false
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplMapKey:
|
2016-08-17 20:21:34 +00:00
|
|
|
e = c.emptyScalar()
|
|
|
|
c.level.kind = fplMapValue
|
2016-08-17 18:04:59 +00:00
|
|
|
return true
|
|
|
|
of fplSinglePairValue:
|
2016-08-17 20:21:34 +00:00
|
|
|
e = c.emptyScalar()
|
2016-08-17 18:04:59 +00:00
|
|
|
result = true
|
2016-08-17 20:21:34 +00:00
|
|
|
c.level = c.ancestry.pop()
|
2016-08-17 18:04:59 +00:00
|
|
|
state = leaveFlowSinglePairMap
|
|
|
|
stored = flow
|
|
|
|
of fplUnknown, fplScalar, fplSinglePairKey, fplDocument:
|
2016-08-17 20:21:34 +00:00
|
|
|
internalError("Unexpected level kind: " & $c.level.kind)
|
|
|
|
c.ancestry.add(c.level)
|
|
|
|
c.level = initLevel(fplUnknown)
|
|
|
|
c.lexer.bufpos.inc()
|
2016-08-17 18:04:59 +00:00
|
|
|
of ':':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.lexer.isPlainSafe(c.lexer.bufpos + 1, cFlow):
|
|
|
|
if c.handleFlowItemStart(e): return true
|
|
|
|
c.handleFlowPlainScalar(e)
|
2016-08-17 18:04:59 +00:00
|
|
|
result = true
|
|
|
|
state = objectEnd
|
|
|
|
stored = flowAfterObject
|
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.level = c.ancestry.pop()
|
|
|
|
case c.level.kind
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplSequence:
|
2016-08-17 20:21:34 +00:00
|
|
|
e = startMapEvent(c.tag, c.anchor)
|
2016-08-17 18:04:59 +00:00
|
|
|
result = true
|
|
|
|
debug("started single-pair map at " &
|
2016-08-17 20:21:34 +00:00
|
|
|
(if c.level.indentation == UnknownIndentation:
|
|
|
|
$c.indentation else: $c.level.indentation))
|
|
|
|
c.tag = yTagQuestionMark
|
|
|
|
c.anchor = yAnchorNone
|
|
|
|
if c.level.indentation == UnknownIndentation:
|
|
|
|
c.level.indentation = c.indentation
|
|
|
|
c.ancestry.add(c.level)
|
|
|
|
c.level = initLevel(fplSinglePairKey)
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplMapValue, fplSinglePairValue:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
raise c.generateError("Unexpected token (expected ',')")
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplMapKey:
|
2016-08-17 20:21:34 +00:00
|
|
|
e = c.emptyScalar()
|
2016-08-17 18:04:59 +00:00
|
|
|
result = true
|
2016-08-17 20:21:34 +00:00
|
|
|
c.level.kind = fplMapValue
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplSinglePairKey:
|
2016-08-17 20:21:34 +00:00
|
|
|
e = c.emptyScalar()
|
2016-08-17 18:04:59 +00:00
|
|
|
result = true
|
2016-08-17 20:21:34 +00:00
|
|
|
c.level.kind = fplSinglePairValue
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplUnknown, fplScalar, fplDocument:
|
2016-08-17 20:21:34 +00:00
|
|
|
internalError("Unexpected level kind: " & $c.level.kind)
|
|
|
|
if c.level.kind != fplSinglePairKey: c.lexer.bufpos.inc()
|
|
|
|
c.ancestry.add(c.level)
|
|
|
|
c.level = initLevel(fplUnknown)
|
2016-08-17 18:04:59 +00:00
|
|
|
of '\'':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.handleFlowItemStart(e): return true
|
|
|
|
c.content.reset()
|
|
|
|
c.startToken()
|
|
|
|
c.singleQuotedScalar()
|
|
|
|
if c.tag == yTagQuestionMark: c.tag = yTagExclamationMark
|
|
|
|
e = c.currentScalar()
|
2016-08-17 18:04:59 +00:00
|
|
|
result = true
|
|
|
|
state = objectEnd
|
|
|
|
stored = flowAfterObject
|
|
|
|
of '"':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.handleFlowItemStart(e): return true
|
|
|
|
c.content.reset()
|
|
|
|
c.startToken()
|
|
|
|
c.doubleQuotedScalar()
|
|
|
|
if c.tag == yTagQuestionMark: c.tag = yTagExclamationMark
|
|
|
|
e = c.currentScalar()
|
2016-08-17 18:04:59 +00:00
|
|
|
result = true
|
|
|
|
state = objectEnd
|
|
|
|
stored = flowAfterObject
|
|
|
|
of '!':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.handleFlowItemStart(e): return true
|
|
|
|
c.handleTagHandle()
|
2016-08-17 18:04:59 +00:00
|
|
|
of '&':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.handleFlowItemStart(e): return true
|
|
|
|
c.handleAnchor()
|
2016-08-17 18:04:59 +00:00
|
|
|
of '*':
|
|
|
|
state = alias
|
|
|
|
stored = flowAfterObject
|
|
|
|
of '?':
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.lexer.isPlainSafe(c.lexer.bufpos + 1, cFlow):
|
|
|
|
if c.handleFlowItemStart(e): return true
|
|
|
|
c.handleFlowPlainScalar(e)
|
2016-08-17 18:04:59 +00:00
|
|
|
result = true
|
|
|
|
state = objectEnd
|
|
|
|
stored = flowAfterObject
|
2016-08-17 20:21:34 +00:00
|
|
|
elif c.explicitFlowKey:
|
|
|
|
c.startToken()
|
|
|
|
raise c.generateError("Duplicate '?' in flow mapping")
|
|
|
|
elif c.level.kind == fplUnknown:
|
|
|
|
case c.ancestry[c.ancestry.high].kind
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplMapKey, fplMapValue, fplDocument: discard
|
|
|
|
of fplSequence:
|
2016-08-17 20:21:34 +00:00
|
|
|
e = c.objectStart(yamlStartMap, true)
|
2016-08-17 18:04:59 +00:00
|
|
|
result = true
|
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
raise c.generateError("Unexpected token")
|
|
|
|
c.explicitFlowKey = true
|
|
|
|
c.lexer.bufpos.inc()
|
2016-08-17 18:04:59 +00:00
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.explicitFlowKey = true
|
|
|
|
c.lexer.bufpos.inc()
|
2016-08-17 18:04:59 +00:00
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.handleFlowItemStart(e): return true
|
|
|
|
c.handleFlowPlainScalar(e)
|
2016-08-17 18:04:59 +00:00
|
|
|
result = true
|
|
|
|
state = objectEnd
|
|
|
|
stored = flowAfterObject
|
|
|
|
|
|
|
|
parserState leaveFlowMap:
|
2016-08-17 20:21:34 +00:00
|
|
|
case c.level.kind
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplMapValue:
|
2016-08-17 20:21:34 +00:00
|
|
|
e = c.emptyScalar()
|
|
|
|
c.level.kind = fplMapKey
|
2016-08-17 18:04:59 +00:00
|
|
|
return true
|
|
|
|
of fplMapKey:
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.tag != yTagQuestionMark or c.anchor != yAnchorNone or
|
|
|
|
c.explicitFlowKey:
|
|
|
|
e = c.emptyScalar()
|
|
|
|
c.level.kind = fplMapValue
|
|
|
|
c.explicitFlowKey = false
|
2016-08-17 18:04:59 +00:00
|
|
|
return true
|
|
|
|
of fplSequence:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
raise c.generateError("Unexpected token (expected ']')")
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplSinglePairValue:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
raise c.generateError("Unexpected token (expected ']')")
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplUnknown, fplScalar, fplSinglePairKey, fplDocument:
|
2016-08-17 20:21:34 +00:00
|
|
|
internalError("Unexpected level kind: " & $c.level.kind)
|
|
|
|
result = c.leaveFlowLevel(e)
|
2016-08-30 20:15:29 +00:00
|
|
|
|
2016-08-17 18:04:59 +00:00
|
|
|
parserState leaveFlowSeq:
|
2016-08-17 20:21:34 +00:00
|
|
|
case c.level.kind
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplSequence:
|
2016-08-17 20:21:34 +00:00
|
|
|
if c.tag != yTagQuestionMark or c.anchor != yAnchorNone:
|
|
|
|
e = c.emptyScalar()
|
2016-08-17 18:04:59 +00:00
|
|
|
return true
|
|
|
|
of fplSinglePairValue:
|
2016-08-17 20:21:34 +00:00
|
|
|
e = c.emptyScalar()
|
|
|
|
c.level = c.ancestry.pop()
|
2016-08-17 18:04:59 +00:00
|
|
|
state = leaveFlowSinglePairMap
|
|
|
|
stored = leaveFlowSeq
|
|
|
|
return true
|
|
|
|
of fplMapKey, fplMapValue:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
raise c.generateError("Unexpected token (expected '}')")
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplUnknown, fplScalar, fplSinglePairKey, fplDocument:
|
2016-08-17 20:21:34 +00:00
|
|
|
internalError("Unexpected level kind: " & $c.level.kind)
|
|
|
|
result = c.leaveFlowLevel(e)
|
2016-08-17 18:04:59 +00:00
|
|
|
|
|
|
|
parserState leaveFlowSinglePairMap:
|
|
|
|
e = endMapEvent()
|
|
|
|
result = true
|
|
|
|
state = stored
|
|
|
|
|
|
|
|
parserState flowAfterObject:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.skipWhitespaceCommentsAndNewlines()
|
|
|
|
case c.lexer.buf[c.lexer.bufpos]
|
2016-08-17 18:04:59 +00:00
|
|
|
of ']':
|
2016-08-17 20:21:34 +00:00
|
|
|
case c.level.kind
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplSequence: discard
|
|
|
|
of fplMapKey, fplMapValue:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
raise c.generateError("Unexpected token (expected '}')")
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplSinglePairValue:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.level = c.ancestry.pop()
|
|
|
|
yAssert(c.level.kind == fplSequence)
|
2016-08-17 18:04:59 +00:00
|
|
|
e = endMapEvent()
|
|
|
|
return true
|
|
|
|
of fplScalar, fplUnknown, fplSinglePairKey, fplDocument:
|
2016-08-17 20:21:34 +00:00
|
|
|
internalError("Unexpected level kind: " & $c.level.kind)
|
|
|
|
c.lexer.bufpos.inc()
|
|
|
|
result = c.leaveFlowLevel(e)
|
2016-08-17 18:04:59 +00:00
|
|
|
of '}':
|
2016-08-17 20:21:34 +00:00
|
|
|
case c.level.kind
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplMapKey, fplMapValue: discard
|
|
|
|
of fplSequence, fplSinglePairValue:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
raise c.generateError("Unexpected token (expected ']')")
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplUnknown, fplScalar, fplSinglePairKey, fplDocument:
|
2016-08-17 20:21:34 +00:00
|
|
|
internalError("Unexpected level kind: " & $c.level.kind)
|
|
|
|
c.lexer.bufpos.inc()
|
|
|
|
result = c.leaveFlowLevel(e)
|
2016-08-17 18:04:59 +00:00
|
|
|
of ',':
|
2016-08-17 20:21:34 +00:00
|
|
|
case c.level.kind
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplSequence: discard
|
|
|
|
of fplMapValue:
|
|
|
|
e = scalarEvent("", yTagQuestionMark, yAnchorNone)
|
|
|
|
result = true
|
2016-08-17 20:21:34 +00:00
|
|
|
c.level.kind = fplMapKey
|
|
|
|
c.explicitFlowKey = false
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplSinglePairValue:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.level = c.ancestry.pop()
|
|
|
|
yAssert(c.level.kind == fplSequence)
|
2016-08-17 18:04:59 +00:00
|
|
|
e = endMapEvent()
|
|
|
|
result = true
|
2016-08-17 20:21:34 +00:00
|
|
|
of fplMapKey: c.explicitFlowKey = false
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplUnknown, fplScalar, fplSinglePairKey, fplDocument:
|
2016-08-17 20:21:34 +00:00
|
|
|
internalError("Unexpected level kind: " & $c.level.kind)
|
|
|
|
c.ancestry.add(c.level)
|
|
|
|
c.level = initLevel(fplUnknown)
|
2016-08-17 18:04:59 +00:00
|
|
|
state = flow
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc()
|
2016-08-17 18:04:59 +00:00
|
|
|
of ':':
|
2016-08-17 20:21:34 +00:00
|
|
|
case c.level.kind
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplSequence, fplMapKey:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
raise c.generateError("Unexpected token (expected ',')")
|
2016-08-17 18:04:59 +00:00
|
|
|
of fplMapValue, fplSinglePairValue: discard
|
|
|
|
of fplUnknown, fplScalar, fplSinglePairKey, fplDocument:
|
2016-08-17 20:21:34 +00:00
|
|
|
internalError("Unexpected level kind: " & $c.level.kind)
|
|
|
|
c.ancestry.add(c.level)
|
|
|
|
c.level = initLevel(fplUnknown)
|
2016-08-17 18:04:59 +00:00
|
|
|
state = flow
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lexer.bufpos.inc()
|
2016-08-17 18:04:59 +00:00
|
|
|
of '#':
|
2016-08-17 20:21:34 +00:00
|
|
|
c.lineEnding()
|
|
|
|
if c.handleLineEnd(true):
|
|
|
|
c.startToken()
|
|
|
|
raise c.generateError("Unclosed flow content")
|
2016-08-17 18:04:59 +00:00
|
|
|
of EndOfFile:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
raise c.generateError("Unclosed flow content")
|
2016-08-17 18:04:59 +00:00
|
|
|
else:
|
2016-08-17 20:21:34 +00:00
|
|
|
c.startToken()
|
|
|
|
raise c.generateError("Unexpected content (expected flow indicator)")
|
2016-08-15 19:30:49 +00:00
|
|
|
|
|
|
|
# --- parser initialization ---
|
|
|
|
|
|
|
|
proc parse*(p: YamlParser, s: Stream): YamlStream =
|
2016-08-17 20:21:34 +00:00
|
|
|
result = new(ParserContext)
|
|
|
|
let c = ParserContext(result)
|
|
|
|
c.content = ""
|
|
|
|
c.after = ""
|
|
|
|
c.tagUri = ""
|
|
|
|
c.ancestry = newSeq[FastParseLevel]()
|
|
|
|
c.p = p
|
2016-08-15 19:30:49 +00:00
|
|
|
try: p.lexer.open(s)
|
|
|
|
except:
|
|
|
|
let e = newException(YamlParserError,
|
|
|
|
"Error while opening stream: " & getCurrentExceptionMsg())
|
|
|
|
e.parent = getCurrentException()
|
|
|
|
e.line = 1
|
|
|
|
e.column = 1
|
|
|
|
e.lineContent = ""
|
|
|
|
raise e
|
2016-08-17 20:21:34 +00:00
|
|
|
c.initDocValues()
|
|
|
|
c.atSequenceItem = false
|
|
|
|
c.flowdepth = 0
|
2016-08-15 19:30:49 +00:00
|
|
|
result.isFinished = false
|
|
|
|
result.peeked = false
|
2016-08-30 20:15:29 +00:00
|
|
|
result.nextImpl = stateInitial
|