2016-01-20 20:15:33 +00:00
|
|
|
type
|
|
|
|
FastParseState = enum
|
2016-01-22 20:36:11 +00:00
|
|
|
fpInitial, fpBlockLineStart, fpBlockAfterObject, fpBlockAfterPlainScalar,
|
2016-01-20 22:38:39 +00:00
|
|
|
fpBlockObjectStart, fpBlockContinueScalar, fpExpectDocEnd, fpFlow,
|
|
|
|
fpFlowAfterObject
|
2016-01-20 20:15:33 +00:00
|
|
|
|
|
|
|
FastParseLevelKind = enum
|
|
|
|
fplUnknown, fplSequence, fplMapKey, fplMapValue, fplScalar
|
|
|
|
|
|
|
|
FastParseLevel = object
|
|
|
|
kind: FastParseLevelKind
|
|
|
|
indentation: int
|
|
|
|
|
|
|
|
LexedDirective = enum
|
|
|
|
ldYaml, ldTag, ldUnknown
|
|
|
|
|
|
|
|
LexedPossibleDirectivesEnd = enum
|
|
|
|
lpdeDirectivesEnd, lpdeSequenceItem, lpdeScalarContent
|
|
|
|
|
|
|
|
YamlContext = enum
|
|
|
|
cFlowIn, cFlowOut, cFlowKey, cBlockKey, cBlockIn, cBlockOut
|
|
|
|
|
|
|
|
FastLexer = object of BaseLexer
|
|
|
|
tokenstart: int
|
|
|
|
|
|
|
|
const
|
|
|
|
space = [' ', '\t']
|
|
|
|
lineEnd = ['\x0A', '\c', EndOfFile]
|
|
|
|
spaceOrLineEnd = [' ', '\t', '\x0A', '\c', EndOfFile]
|
|
|
|
digits = '0'..'9'
|
|
|
|
flowIndicators = ['[', ']', '{', '}', ',']
|
|
|
|
|
|
|
|
template debug(message: string) {.dirty.} =
|
|
|
|
when defined(yamlDebug):
|
|
|
|
try: styledWriteLine(stdout, fgBlue, message)
|
|
|
|
except IOError: discard
|
|
|
|
|
|
|
|
template raiseError(message: string) {.dirty.} =
|
|
|
|
var e = newException(YamlParserError, message)
|
|
|
|
e.line = lexer.lineNumber
|
|
|
|
e.column = lexer.tokenstart
|
|
|
|
e.lineContent = lexer.getCurrentLine(false) &
|
|
|
|
repeat(' ', lexer.getColNumber(lexer.bufpos)) & "^\n"
|
|
|
|
raise e
|
|
|
|
|
|
|
|
template raiseError(message: string, col: int) {.dirty.} =
|
|
|
|
var e = newException(YamlParserError, message)
|
|
|
|
e.line = lexer.lineNumber
|
|
|
|
e.column = col
|
|
|
|
e.lineContent = lexer.getCurrentLine(false) &
|
|
|
|
repeat(' ', lexer.getColNumber(lexer.bufpos)) & "^\n"
|
|
|
|
raise e
|
|
|
|
|
2016-01-22 20:36:11 +00:00
|
|
|
template yieldLevelEnd() {.dirty.} =
|
2016-01-20 20:15:33 +00:00
|
|
|
case level.kind
|
|
|
|
of fplSequence:
|
|
|
|
yield endSeqEvent()
|
|
|
|
of fplMapKey:
|
|
|
|
yield endMapEvent()
|
|
|
|
of fplMapValue:
|
|
|
|
yield scalarEvent("", tag, anchor)
|
|
|
|
tag = yTagQuestionMark
|
|
|
|
anchor = yAnchorNone
|
|
|
|
yield endMapEvent()
|
|
|
|
of fplScalar:
|
2016-01-22 20:36:11 +00:00
|
|
|
yield scalarEvent(content, tag, anchor)
|
2016-01-23 16:44:50 +00:00
|
|
|
tag = yTagQuestionMark
|
|
|
|
anchor = yAnchorNone
|
2016-01-20 20:15:33 +00:00
|
|
|
of fplUnknown:
|
2016-01-23 16:44:50 +00:00
|
|
|
yield scalarEvent("", tag, anchor)
|
|
|
|
tag = yTagQuestionMark
|
|
|
|
anchor = yAnchorNone
|
2016-01-20 20:15:33 +00:00
|
|
|
|
|
|
|
template handleLineEnd(insideDocument: bool) {.dirty.} =
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of '\x0A':
|
|
|
|
lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
of '\c':
|
|
|
|
lexer.bufpos = lexer.handleCR(lexer.bufpos)
|
|
|
|
of EndOfFile:
|
|
|
|
when insideDocument:
|
|
|
|
closeEverything()
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
discard
|
|
|
|
|
2016-01-22 20:36:11 +00:00
|
|
|
template handleObjectEnd(nextState: FastParseState) {.dirty.} =
|
|
|
|
if ancestry.len == 0:
|
|
|
|
state = fpExpectDocEnd
|
|
|
|
else:
|
|
|
|
level = ancestry.pop()
|
|
|
|
state = nextState
|
|
|
|
tag = yTagQuestionMark
|
|
|
|
anchor = yAnchorNone
|
|
|
|
case level.kind
|
|
|
|
of fplMapKey:
|
|
|
|
level.kind = fplMapValue
|
|
|
|
of fplMapValue:
|
|
|
|
level.kind = fplMapKey
|
|
|
|
of fplSequence:
|
|
|
|
discard
|
|
|
|
of fplUnknown, fplScalar:
|
|
|
|
assert(false)
|
2016-01-20 20:15:33 +00:00
|
|
|
|
2016-01-23 16:44:50 +00:00
|
|
|
template handleObjectStart(k: YamlStreamEventKind) {.dirty.} =
|
2016-01-22 20:36:11 +00:00
|
|
|
assert(level.kind == fplUnknown)
|
2016-01-20 20:15:33 +00:00
|
|
|
when k == yamlStartMap:
|
2016-01-23 16:44:50 +00:00
|
|
|
yield startMapEvent(tag, anchor)
|
|
|
|
debug("started map at " & (if level.indentation == -1: $indentation else:
|
|
|
|
$level.indentation))
|
2016-01-22 20:36:11 +00:00
|
|
|
level.kind = fplMapKey
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
2016-01-23 16:44:50 +00:00
|
|
|
yield startSeqEvent(tag, anchor)
|
|
|
|
debug("started sequence at " & (if level.indentation == -1: $indentation else:
|
|
|
|
$level.indentation))
|
2016-01-22 20:36:11 +00:00
|
|
|
level.kind = fplSequence
|
2016-01-23 16:44:50 +00:00
|
|
|
tag = yTagQuestionmark
|
|
|
|
anchor = yAnchorNone
|
|
|
|
if level.indentation == -1:
|
|
|
|
level.indentation = indentation
|
2016-01-22 20:36:11 +00:00
|
|
|
ancestry.add(level)
|
2016-01-23 16:44:50 +00:00
|
|
|
level = FastParseLevel(kind: fplUnknown, indentation: -1)
|
2016-01-20 20:15:33 +00:00
|
|
|
|
|
|
|
template closeMoreIndentedLevels() {.dirty.} =
|
|
|
|
while ancestry.len > 0:
|
|
|
|
let parent = ancestry[ancestry.high]
|
|
|
|
if parent.indentation >= indentation:
|
2016-01-22 20:36:11 +00:00
|
|
|
debug("Closing because parent.indentation (" & $parent.indentation &
|
|
|
|
") >= indentation(" & $indentation & ")")
|
|
|
|
yieldLevelEnd()
|
|
|
|
handleObjectEnd(fpBlockAfterObject)
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
|
|
|
template closeEverything() {.dirty.} =
|
|
|
|
indentation = 0
|
|
|
|
closeMoreIndentedLevels()
|
2016-01-22 20:36:11 +00:00
|
|
|
yieldLevelEnd()
|
2016-01-20 20:15:33 +00:00
|
|
|
yield endDocEvent()
|
|
|
|
|
2016-01-22 20:36:11 +00:00
|
|
|
template handleBlockSequenceIndicator() {.dirty.} =
|
2016-01-20 20:15:33 +00:00
|
|
|
case level.kind
|
|
|
|
of fplUnknown:
|
2016-01-23 16:44:50 +00:00
|
|
|
handleObjectStart(yamlStartSequence)
|
2016-01-20 20:15:33 +00:00
|
|
|
of fplSequence:
|
|
|
|
if level.indentation != indentation:
|
|
|
|
raiseError("Invalid indentation of block sequence indicator",
|
|
|
|
lexer.bufpos)
|
2016-01-22 20:36:11 +00:00
|
|
|
ancestry.add(level)
|
2016-01-23 16:44:50 +00:00
|
|
|
level = FastParseLevel(kind: fplUnknown, indentation: -1)
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
|
|
|
raiseError("Illegal sequence item in map")
|
|
|
|
lexer.skipWhitespace()
|
|
|
|
indentation = lexer.getColNumber(lexer.bufpos)
|
2016-01-22 22:44:26 +00:00
|
|
|
|
|
|
|
template handleMapKeyIndicator() {.dirty.} =
|
|
|
|
case level.kind
|
|
|
|
of fplUnknown:
|
2016-01-23 16:44:50 +00:00
|
|
|
handleObjectStart(yamlStartMap)
|
2016-01-22 22:44:26 +00:00
|
|
|
of fplMapValue:
|
|
|
|
if level.indentation != indentation:
|
|
|
|
raiseError("Invalid indentation of map key indicator",
|
|
|
|
lexer.bufpos)
|
|
|
|
yield scalarEvent("", yTagQuestionmark, yAnchorNone)
|
|
|
|
level.kind = fplMapKey
|
|
|
|
ancestry.add(level)
|
2016-01-23 16:44:50 +00:00
|
|
|
level = FastParseLevel(kind: fplUnknown, indentation: -1)
|
2016-01-22 22:44:26 +00:00
|
|
|
of fplMapKey:
|
|
|
|
if level.indentation != indentation:
|
|
|
|
raiseError("Invalid indentation of map key indicator",
|
|
|
|
lexer.bufpos)
|
|
|
|
ancestry.add(level)
|
2016-01-23 16:44:50 +00:00
|
|
|
level = FastParseLevel(kind: fplUnknown, indentation: -1)
|
2016-01-22 22:44:26 +00:00
|
|
|
of fplSequence:
|
|
|
|
raiseError("Unexpected map key indicator (expected '- ')")
|
|
|
|
of fplScalar:
|
|
|
|
raiseError("Unexpected map key indicator (expected multiline scalar end)")
|
|
|
|
lexer.skipWhitespace()
|
|
|
|
indentation = lexer.getColNumber(lexer.bufpos)
|
|
|
|
|
|
|
|
template handleMapValueIndicator() {.dirty.} =
|
|
|
|
case level.kind
|
|
|
|
of fplUnknown:
|
2016-01-23 16:44:50 +00:00
|
|
|
if level.indentation == -1:
|
|
|
|
handleObjectStart(yamlStartMap)
|
|
|
|
yield scalarEvent("", yTagQuestionmark, yAnchorNone)
|
|
|
|
else:
|
|
|
|
yield scalarEvent("", tag, anchor)
|
|
|
|
tag = yTagQuestionmark
|
|
|
|
anchor = yAnchorNone
|
|
|
|
ancestry[ancestry.high].kind = fplMapValue
|
2016-01-22 22:44:26 +00:00
|
|
|
of fplMapKey:
|
|
|
|
if level.indentation != indentation:
|
|
|
|
raiseError("Invalid indentation of map key indicator",
|
|
|
|
lexer.bufpos)
|
|
|
|
yield scalarEvent("", yTagQuestionmark, yAnchorNone)
|
|
|
|
level.kind = fplMapValue
|
|
|
|
ancestry.add(level)
|
2016-01-23 16:44:50 +00:00
|
|
|
level = FastParseLevel(kind: fplUnknown, indentation: -1)
|
2016-01-22 22:44:26 +00:00
|
|
|
of fplMapValue:
|
|
|
|
if level.indentation != indentation:
|
|
|
|
raiseError("Invalid indentation of map key indicator",
|
|
|
|
lexer.bufpos)
|
|
|
|
ancestry.add(level)
|
2016-01-23 16:44:50 +00:00
|
|
|
level = FastParseLevel(kind: fplUnknown, indentation: -1)
|
2016-01-22 22:44:26 +00:00
|
|
|
of fplSequence:
|
|
|
|
raiseError("Unexpected map value indicator (expected '- ')")
|
|
|
|
of fplScalar:
|
|
|
|
raiseError("Unexpected map value indicator (expected multiline scalar end)")
|
|
|
|
lexer.skipWhitespace()
|
|
|
|
indentation = lexer.getColNumber(lexer.bufpos)
|
2016-01-20 20:15:33 +00:00
|
|
|
|
|
|
|
template initDocValues() {.dirty.} =
|
|
|
|
shorthands = initTable[string, string]()
|
|
|
|
anchors = initTable[string, AnchorId]()
|
|
|
|
shorthands["!"] = "!"
|
|
|
|
shorthands["!!"] = "tag:yaml.org,2002:"
|
|
|
|
nextAnchorId = 0.AnchorId
|
2016-01-23 16:44:50 +00:00
|
|
|
level = FastParseLevel(kind: fplUnknown, indentation: -1)
|
2016-01-20 22:38:39 +00:00
|
|
|
tag = yTagQuestionmark
|
|
|
|
anchor = yAnchorNone
|
2016-01-20 20:15:33 +00:00
|
|
|
|
2016-01-20 22:38:39 +00:00
|
|
|
template handleTagHandle() {.dirty.} =
|
2016-01-22 20:36:11 +00:00
|
|
|
if level.kind != fplUnknown:
|
|
|
|
raiseError("Unexpected token", lexer.bufpos)
|
2016-01-20 22:38:39 +00:00
|
|
|
if tag != yTagQuestionmark:
|
|
|
|
raiseError("Only one tag handle is allowed per node")
|
|
|
|
content = ""
|
|
|
|
var
|
|
|
|
shorthandEnd: int
|
|
|
|
tagUri: string
|
|
|
|
lexer.tagHandle(content, shorthandEnd)
|
|
|
|
if shorthandEnd != -1:
|
|
|
|
try:
|
|
|
|
let prefix = shorthands[content[0..shorthandEnd]]
|
|
|
|
tagUri = prefix & content[shorthandEnd + 1 .. ^1]
|
|
|
|
except KeyError:
|
|
|
|
raiseError("Undefined tag shorthand: " & content[0..shorthandEnd])
|
|
|
|
else:
|
|
|
|
shallowCopy(tagUri, content)
|
|
|
|
try:
|
|
|
|
tag = tagLib.tags[tagUri]
|
|
|
|
except KeyError:
|
|
|
|
tag = tagLib.registerUri(tagUri)
|
|
|
|
|
|
|
|
template handleAnchor() {.dirty.} =
|
2016-01-22 20:36:11 +00:00
|
|
|
if level.kind != fplUnknown:
|
|
|
|
raiseError("Unexpected token", lexer.bufpos)
|
2016-01-20 22:38:39 +00:00
|
|
|
if anchor != yAnchorNone:
|
|
|
|
raiseError("Only one anchor is allowed per node", lexer.bufpos)
|
|
|
|
content = ""
|
|
|
|
lexer.anchorName(content)
|
|
|
|
anchor = nextAnchorId
|
|
|
|
anchors[content] = anchor
|
|
|
|
nextAnchorId = cast[AnchorId](cast[int](nextAnchorId) + 1)
|
|
|
|
|
|
|
|
template handleAlias() {.dirty.} =
|
2016-01-22 20:36:11 +00:00
|
|
|
if level.kind != fplUnknown:
|
|
|
|
raiseError("Unexpected token", lexer.bufpos)
|
2016-01-20 22:38:39 +00:00
|
|
|
if anchor != yAnchorNone or tag != yTagQuestionmark:
|
|
|
|
raiseError("Alias may not have anchor or tag")
|
|
|
|
content = ""
|
|
|
|
lexer.anchorName(content)
|
2016-01-22 20:36:11 +00:00
|
|
|
var id: AnchorId
|
2016-01-20 22:38:39 +00:00
|
|
|
try:
|
2016-01-22 20:36:11 +00:00
|
|
|
id = anchors[content]
|
2016-01-20 22:38:39 +00:00
|
|
|
except KeyError:
|
|
|
|
raiseError("Unknown anchor")
|
2016-01-22 20:36:11 +00:00
|
|
|
yield aliasEvent(id)
|
|
|
|
handleObjectEnd(fpBlockAfterObject)
|
2016-01-20 22:38:39 +00:00
|
|
|
|
|
|
|
template leaveFlowLevel() {.dirty.} =
|
|
|
|
flowdepth.inc(-1)
|
2016-01-22 20:36:11 +00:00
|
|
|
if flowdepth == 0:
|
|
|
|
yieldLevelEnd()
|
|
|
|
handleObjectEnd(fpBlockAfterObject)
|
2016-01-20 22:38:39 +00:00
|
|
|
else:
|
2016-01-22 20:36:11 +00:00
|
|
|
yieldLevelEnd()
|
|
|
|
handleObjectEnd(fpFlowAfterObject)
|
|
|
|
|
|
|
|
template handlePossibleMapStart() {.dirty.} =
|
2016-01-23 16:44:50 +00:00
|
|
|
if level.indentation == -1:
|
|
|
|
var flowDepth = 0
|
|
|
|
for p in countup(lexer.bufpos, lexer.bufpos + 1024):
|
|
|
|
case lexer.buf[p]
|
|
|
|
of ':':
|
|
|
|
if flowDepth == 0 and lexer.buf[p + 1] in spaceOrLineEnd:
|
|
|
|
handleObjectStart(yamlStartMap)
|
|
|
|
break
|
|
|
|
of lineEnd:
|
2016-01-22 20:36:11 +00:00
|
|
|
break
|
2016-01-23 16:44:50 +00:00
|
|
|
of '[', '{':
|
|
|
|
flowDepth.inc()
|
|
|
|
of '}', ']':
|
|
|
|
flowDepth.inc(-1)
|
|
|
|
of '?':
|
|
|
|
if flowDepth == 0: break
|
|
|
|
of '#':
|
|
|
|
if lexer.buf[p - 1] in space:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
discard
|
|
|
|
if level.indentation == -1:
|
|
|
|
level.indentation = indentation
|
2016-01-22 20:36:11 +00:00
|
|
|
|
|
|
|
template handleBlockItemStart() {.dirty.} =
|
|
|
|
case level.kind
|
|
|
|
of fplUnknown:
|
2016-01-23 16:44:50 +00:00
|
|
|
handlePossibleMapStart()
|
2016-01-22 20:36:11 +00:00
|
|
|
of fplSequence:
|
|
|
|
raiseError("Unexpected token (expected block sequence indicator)",
|
|
|
|
lexer.bufpos)
|
|
|
|
of fplMapKey:
|
|
|
|
ancestry.add(level)
|
2016-01-23 16:44:50 +00:00
|
|
|
level = FastParseLevel(kind: fplUnknown, indentation: indentation)
|
2016-01-22 20:36:11 +00:00
|
|
|
of fplMapValue:
|
|
|
|
yield scalarEvent("", tag, anchor)
|
|
|
|
tag = yTagQuestionmark
|
|
|
|
anchor = yAnchorNone
|
|
|
|
level.kind = fplMapKey
|
|
|
|
ancestry.add(level)
|
2016-01-23 16:44:50 +00:00
|
|
|
level = FastParseLevel(kind: fplUnknown, indentation: indentation)
|
2016-01-22 20:36:11 +00:00
|
|
|
of fplScalar:
|
|
|
|
assert(false)
|
2016-01-20 22:38:39 +00:00
|
|
|
|
2016-01-20 20:15:33 +00:00
|
|
|
template finishLine(lexer: FastLexer) =
|
|
|
|
debug("lex: finishLine")
|
|
|
|
while lexer.buf[lexer.bufpos] notin lineEnd:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
|
|
|
|
template skipWhitespace(lexer: FastLexer) =
|
|
|
|
debug("lex: skipWhitespace")
|
|
|
|
while lexer.buf[lexer.bufpos] in space: lexer.bufpos.inc()
|
|
|
|
|
2016-01-20 22:38:39 +00:00
|
|
|
template skipWhitespaceAndNewlines(lexer: FastLexer) =
|
|
|
|
debug("lex: skipWhitespaceAndNewLines")
|
|
|
|
while true:
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of space:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
of '\x0A':
|
|
|
|
lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
of '\c':
|
|
|
|
lexer.bufpos = lexer.handleCR(lexer.bufpos)
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
2016-01-20 20:15:33 +00:00
|
|
|
template skipIndentation(lexer: FastLexer) =
|
|
|
|
debug("lex: skipIndentation")
|
|
|
|
while lexer.buf[lexer.bufpos] == ' ': lexer.bufpos.inc()
|
|
|
|
|
|
|
|
template directiveName(lexer: FastLexer, directive: var LexedDirective) =
|
|
|
|
debug("lex: directiveName")
|
|
|
|
directive = ldUnknown
|
|
|
|
lexer.tokenstart = lexer.getColNumber(lexer.bufpos)
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] == 'Y':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] == 'A':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] == 'M':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] == 'L':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] in spaceOrLineEnd:
|
|
|
|
directive = ldYaml
|
|
|
|
elif lexer.buf[lexer.bufpos] == 'T':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] == 'A':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] == 'G':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] in [' ', '\t', '\x0A', '\c', EndOfFile]:
|
|
|
|
directive = ldTag
|
|
|
|
while lexer.buf[lexer.bufpos] notin spaceOrLineEnd:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
|
|
|
|
template yamlVersion(lexer: FastLexer, o: var string) =
|
|
|
|
debug("lex: yamlVersion")
|
|
|
|
while lexer.buf[lexer.bufpos] in space:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
var c = lexer.buf[lexer.bufpos]
|
|
|
|
if c notin digits:
|
|
|
|
raiseError("Invalid YAML version number")
|
|
|
|
o.add(c)
|
|
|
|
lexer.tokenstart = lexer.getColNumber(lexer.bufpos)
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
c = lexer.buf[lexer.bufpos]
|
|
|
|
while c in digits:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
o.add(c)
|
|
|
|
c = lexer.buf[lexer.bufpos]
|
|
|
|
if lexer.buf[lexer.bufpos] != '.':
|
|
|
|
raiseError("Invalid YAML version number")
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] notin digits:
|
|
|
|
raiseError("Invalid YAML version number")
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
while lexer.buf[lexer.bufpos] in digits:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] notin spaceOrLineEnd:
|
|
|
|
raiseError("Invalid YAML version number")
|
|
|
|
|
|
|
|
template lineEnding(lexer: FastLexer) =
|
|
|
|
debug("lex: lineEnding")
|
|
|
|
if lexer.buf[lexer.bufpos] notin lineEnd:
|
|
|
|
while lexer.buf[lexer.bufpos] in space:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] in lineEnd:
|
|
|
|
discard
|
|
|
|
elif lexer.buf[lexer.bufpos] == '#':
|
|
|
|
while lexer.buf[lexer.bufpos] notin lineEnd:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
else:
|
|
|
|
raiseError("Unexpected token (expected comment or line end)",
|
|
|
|
lexer.bufpos)
|
|
|
|
|
|
|
|
template tagShorthand(lexer: FastLexer, shorthand: var string) =
|
|
|
|
debug("lex: tagShorthand")
|
|
|
|
while lexer.buf[lexer.bufpos] in space:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] != '!':
|
|
|
|
raiseError("Invalid tag shorthand")
|
|
|
|
shorthand.add('!')
|
|
|
|
lexer.tokenstart = lexer.getColNumber(lexer.bufpos)
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
var c = lexer.buf[lexer.bufpos]
|
|
|
|
if c in spaceOrLineEnd:
|
|
|
|
discard
|
|
|
|
else:
|
|
|
|
while c != '!':
|
|
|
|
case c
|
|
|
|
of 'a' .. 'z', 'A' .. 'Z', '0' .. '9', '-':
|
|
|
|
shorthand.add(c)
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
c = lexer.buf[lexer.bufpos]
|
|
|
|
else:
|
|
|
|
raiseError("Illegal character in tag shorthand", lexer.bufpos)
|
|
|
|
shorthand.add(c)
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] notin spaceOrLineEnd:
|
|
|
|
raiseError("Missing space after tag shorthand", lexer.bufpos)
|
|
|
|
|
|
|
|
template tagUri(lexer: FastLexer, uri: var string) =
|
|
|
|
debug("lex: tagUri")
|
|
|
|
while lexer.buf[lexer.bufpos] in space:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
lexer.tokenstart = lexer.getColNumber(lexer.bufpos)
|
|
|
|
var c = lexer.buf[lexer.bufpos]
|
|
|
|
while c notin spaceOrLineEnd:
|
|
|
|
case c
|
|
|
|
of 'a' .. 'z', 'A' .. 'Z', '0' .. '9', '#', ';', '/', '?', ':', '@', '&',
|
|
|
|
'-', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')':
|
|
|
|
uri.add(c)
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
c = lexer.buf[lexer.bufpos]
|
|
|
|
else:
|
|
|
|
raiseError("Invalid tag uri")
|
|
|
|
|
2016-01-22 20:36:11 +00:00
|
|
|
template directivesEnd(lexer: FastLexer,
|
2016-01-20 20:15:33 +00:00
|
|
|
token: var LexedPossibleDirectivesEnd) =
|
|
|
|
debug("lex: directivesEnd")
|
|
|
|
lexer.tokenstart = lexer.getColNumber(lexer.bufpos)
|
2016-01-22 20:36:11 +00:00
|
|
|
var p = lexer.bufpos + 1
|
|
|
|
case lexer.buf[p]
|
2016-01-20 20:15:33 +00:00
|
|
|
of '-':
|
2016-01-22 20:36:11 +00:00
|
|
|
p.inc()
|
|
|
|
if lexer.buf[p] == '-':
|
|
|
|
p.inc()
|
|
|
|
if lexer.buf[p] in spaceOrLineEnd:
|
2016-01-20 20:15:33 +00:00
|
|
|
token = lpdeDirectivesEnd
|
|
|
|
else:
|
|
|
|
token = lpdeScalarContent
|
|
|
|
else:
|
|
|
|
token = lpdeScalarContent
|
|
|
|
of spaceOrLineEnd:
|
|
|
|
token = lpdeSequenceItem
|
|
|
|
else:
|
|
|
|
token = lpdeScalarContent
|
|
|
|
|
2016-01-22 20:36:11 +00:00
|
|
|
template documentEnd(lexer: var FastLexer, isDocumentEnd: var bool) =
|
2016-01-20 20:15:33 +00:00
|
|
|
lexer.tokenstart = lexer.getColNumber(lexer.bufpos)
|
2016-01-22 20:36:11 +00:00
|
|
|
var p = lexer.bufpos + 1
|
|
|
|
if lexer.buf[p] == '.':
|
|
|
|
p.inc()
|
|
|
|
if lexer.buf[p] == '.':
|
|
|
|
p.inc()
|
|
|
|
if lexer.buf[p] in spaceOrLineEnd:
|
2016-01-20 20:15:33 +00:00
|
|
|
isDocumentEnd = true
|
|
|
|
else:
|
|
|
|
isDocumentEnd = false
|
|
|
|
else:
|
|
|
|
isDocumentEnd = false
|
|
|
|
else:
|
|
|
|
isDocumentEnd = false
|
|
|
|
|
|
|
|
template singleQuotedScalar(lexer: FastLexer, content: var string) =
|
|
|
|
debug("lex: singleQuotedScalar")
|
|
|
|
lexer.tokenstart = lexer.getColNumber(lexer.bufpos)
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
while true:
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of '\'':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] == '\'':
|
|
|
|
content.add('\'')
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
of EndOfFile:
|
|
|
|
raiseError("Unfinished single quoted string")
|
|
|
|
else:
|
|
|
|
content.add(lexer.buf[lexer.bufpos])
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
|
|
|
|
proc unicodeSequence(lexer: var FastLexer, length: int):
|
|
|
|
string {.raises: [YamlParserError].} =
|
|
|
|
debug("lex: unicodeSequence")
|
|
|
|
var unicodeChar = 0.Rune
|
|
|
|
let start = lexer.bufpos - 1
|
|
|
|
for i in countup(0, length - 1):
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
let
|
|
|
|
digitPosition = length - i - 1
|
|
|
|
c = lexer.buf[lexer.bufpos]
|
|
|
|
case c
|
|
|
|
of EndOFFile:
|
|
|
|
raiseError("Unfinished unicode escape sequence", start)
|
|
|
|
of '0' .. '9':
|
|
|
|
unicodeChar = unicodechar or
|
|
|
|
(cast[int](c) - 0x30) shl (digitPosition * 4)
|
|
|
|
of 'A' .. 'F':
|
|
|
|
unicodeChar = unicodechar or
|
|
|
|
(cast[int](c) - 0x37) shl (digitPosition * 4)
|
|
|
|
of 'a' .. 'f':
|
|
|
|
unicodeChar = unicodechar or
|
|
|
|
(cast[int](c) - 0x57) shl (digitPosition * 4)
|
|
|
|
else:
|
|
|
|
raiseError("Invalid character in unicode escape sequence", lexer.bufpos)
|
|
|
|
return toUTF8(unicodeChar)
|
|
|
|
|
2016-01-23 14:29:45 +00:00
|
|
|
template processDoubleQuotedWhitespace(newlines: var int) {.dirty.} =
|
|
|
|
var
|
|
|
|
after = ""
|
|
|
|
block outer:
|
|
|
|
while true:
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of ' ', '\t':
|
|
|
|
after.add(lexer.buf[lexer.bufpos])
|
|
|
|
of '\x0A':
|
|
|
|
lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
break
|
|
|
|
of '\c':
|
|
|
|
lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
content.add(after)
|
|
|
|
break outer
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
while true:
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of ' ', '\t':
|
|
|
|
discard
|
|
|
|
of '\x0A':
|
|
|
|
lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
newlines.inc()
|
|
|
|
of '\c':
|
|
|
|
lexer.bufpos = lexer.handleCR(lexer.bufpos)
|
|
|
|
newlines.inc()
|
|
|
|
else:
|
|
|
|
if newlines == 0:
|
|
|
|
discard
|
|
|
|
elif newlines == 1:
|
|
|
|
content.add(' ')
|
|
|
|
else:
|
|
|
|
content.add(repeat('\x0A', newlines - 1))
|
|
|
|
break
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
|
|
|
|
template doubleQuotedScalar(lexer: FastLexer, content: var string) =
|
|
|
|
debug("lex: doubleQuotedScalar")
|
2016-01-20 20:15:33 +00:00
|
|
|
lexer.tokenstart = lexer.getColNumber(lexer.bufpos)
|
2016-01-23 14:29:45 +00:00
|
|
|
lexer.bufpos.inc()
|
2016-01-20 20:15:33 +00:00
|
|
|
while true:
|
2016-01-23 14:29:45 +00:00
|
|
|
var c = lexer.buf[lexer.bufpos]
|
2016-01-20 20:15:33 +00:00
|
|
|
case c
|
|
|
|
of EndOfFile:
|
2016-01-23 14:29:45 +00:00
|
|
|
raiseError("Unfinished double quoted string")
|
2016-01-20 20:15:33 +00:00
|
|
|
of '\\':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of EndOfFile:
|
|
|
|
raiseError("Unfinished escape sequence")
|
|
|
|
of '0': content.add('\0')
|
|
|
|
of 'a': content.add('\x07')
|
|
|
|
of 'b': content.add('\x08')
|
|
|
|
of '\t', 't': content.add('\t')
|
|
|
|
of 'n': content.add('\x0A')
|
|
|
|
of 'v': content.add('\v')
|
|
|
|
of 'f': content.add('\f')
|
|
|
|
of 'r': content.add('\r')
|
|
|
|
of 'e': content.add('\e')
|
|
|
|
of ' ': content.add(' ')
|
|
|
|
of '"': content.add('"')
|
|
|
|
of '/': content.add('/')
|
|
|
|
of '\\': content.add('\\')
|
|
|
|
of 'N': content.add(UTF8NextLine)
|
|
|
|
of '_': content.add(UTF8NonBreakingSpace)
|
|
|
|
of 'L': content.add(UTF8LineSeparator)
|
|
|
|
of 'P': content.add(UTF8ParagraphSeparator)
|
|
|
|
of 'x': content.add(lexer.unicodeSequence(2))
|
|
|
|
of 'u': content.add(lexer.unicodeSequence(4))
|
|
|
|
of 'U': content.add(lexer.unicodeSequence(8))
|
2016-01-23 14:29:45 +00:00
|
|
|
of '\x0A', '\c':
|
|
|
|
var newlines = 0
|
|
|
|
processDoubleQuotedWhitespace(newlines)
|
|
|
|
continue
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
|
|
|
raiseError("Illegal character in escape sequence")
|
|
|
|
of '"':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
break
|
2016-01-23 14:29:45 +00:00
|
|
|
of '\x0A', '\c', '\t', ' ':
|
|
|
|
var newlines = 1
|
|
|
|
processdoubleQuotedWhitespace(newlines)
|
|
|
|
continue
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
|
|
|
content.add(c)
|
2016-01-23 14:29:45 +00:00
|
|
|
lexer.bufpos.inc()
|
2016-01-20 20:15:33 +00:00
|
|
|
|
|
|
|
proc isPlainSafe(lexer: FastLexer, index: int, context: YamlContext): bool =
|
|
|
|
case lexer.buf[lexer.bufpos + 1]
|
|
|
|
of spaceOrLineEnd:
|
|
|
|
result = false
|
|
|
|
of flowIndicators:
|
2016-01-22 22:44:26 +00:00
|
|
|
result = context in [cBlockIn, cBlockOut, cBlockKey]
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
|
|
|
result = true
|
|
|
|
|
|
|
|
template plainScalar(lexer: FastLexer, content: var string,
|
|
|
|
context: YamlContext) =
|
|
|
|
debug("lex: plainScalar")
|
|
|
|
lexer.tokenstart = lexer.getColNumber(lexer.bufpos)
|
|
|
|
content.add(lexer.buf[lexer.bufpos])
|
|
|
|
block outer:
|
|
|
|
while true:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
let c = lexer.buf[lexer.bufpos]
|
|
|
|
case c
|
|
|
|
of lineEnd:
|
|
|
|
break
|
|
|
|
of ' ', '\t':
|
|
|
|
var after = "" & c
|
|
|
|
while true:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
let c2 = lexer.buf[lexer.bufpos]
|
|
|
|
case c2
|
|
|
|
of ' ', '\t':
|
|
|
|
after.add(c2)
|
|
|
|
of lineEnd:
|
|
|
|
break outer
|
|
|
|
of ':':
|
|
|
|
if lexer.isPlainSafe(lexer.bufpos + 1, context):
|
|
|
|
content.add(after & ':')
|
|
|
|
else:
|
|
|
|
break outer
|
|
|
|
of '#':
|
|
|
|
break outer
|
|
|
|
else:
|
|
|
|
content.add(after)
|
|
|
|
content.add(c2)
|
|
|
|
break
|
|
|
|
of flowIndicators:
|
2016-01-20 22:38:39 +00:00
|
|
|
if context in [cBlockOut, cBlockIn, cBlockKey]:
|
2016-01-20 20:15:33 +00:00
|
|
|
content.add(c)
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
of ':':
|
|
|
|
if lexer.isPlainSafe(lexer.bufpos + 1, context):
|
|
|
|
content.add(':')
|
|
|
|
else:
|
|
|
|
break outer
|
|
|
|
of '#':
|
|
|
|
break outer
|
|
|
|
else:
|
|
|
|
content.add(c)
|
|
|
|
|
|
|
|
template continueMultilineScalar() {.dirty.} =
|
2016-01-22 20:36:11 +00:00
|
|
|
content.add(if newlines == 1: " " else: repeat('\x0A', newlines - 1))
|
|
|
|
lexer.plainScalar(content, cBlockOut)
|
2016-01-20 20:15:33 +00:00
|
|
|
state = fpBlockAfterPlainScalar
|
|
|
|
|
2016-01-20 22:38:39 +00:00
|
|
|
template handleFlowPlainScalar() {.dirty.} =
|
|
|
|
content = ""
|
|
|
|
lexer.plainScalar(content, cFlowOut)
|
|
|
|
if lexer.buf[lexer.bufpos] in ['{', '}', '[', ']', ',', ':', '#']:
|
|
|
|
discard
|
|
|
|
else:
|
|
|
|
var newlines = 0
|
|
|
|
while true:
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of ':':
|
2016-01-22 22:44:26 +00:00
|
|
|
if lexer.isPlainSafe(lexer.bufpos + 1, cFlowOut):
|
|
|
|
if newlines == 1:
|
|
|
|
content.add(' ')
|
|
|
|
newlines = 0
|
|
|
|
elif newlines > 1:
|
|
|
|
content.add(repeat(' ', newlines - 1))
|
|
|
|
newlines = 0
|
|
|
|
lexer.plainScalar(content, cFlowOut)
|
|
|
|
elif explicitFlowKey:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
raiseError("Multiline scalar is not allowed as implicit key")
|
2016-01-20 22:38:39 +00:00
|
|
|
of '#', EndOfFile:
|
|
|
|
break
|
|
|
|
of '\x0A':
|
|
|
|
lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
newlines.inc()
|
|
|
|
of '\c':
|
|
|
|
lexer.bufpos = lexer.handleCR(lexer.bufpos)
|
|
|
|
newlines.inc()
|
|
|
|
of flowIndicators:
|
|
|
|
break
|
|
|
|
of ' ', '\t':
|
|
|
|
lexer.skipWhitespace()
|
|
|
|
else:
|
|
|
|
if newlines == 1:
|
|
|
|
content.add(' ')
|
|
|
|
newlines = 0
|
|
|
|
elif newlines > 1:
|
|
|
|
content.add(repeat(' ', newlines - 1))
|
|
|
|
newlines = 0
|
|
|
|
lexer.plainScalar(content, cFlowOut)
|
|
|
|
yield scalarEvent(content, tag, anchor)
|
2016-01-22 20:36:11 +00:00
|
|
|
handleObjectEnd(fpFlowAfterObject)
|
|
|
|
|
|
|
|
template ensureCorrectIndentation() {.dirty.} =
|
|
|
|
if level.indentation != indentation:
|
|
|
|
raiseError("Invalid indentation (expected indentation for " & $level.kind &
|
|
|
|
" :" & $level.indentation & ")", lexer.bufpos)
|
2016-01-20 22:38:39 +00:00
|
|
|
|
2016-01-20 20:15:33 +00:00
|
|
|
template tagHandle(lexer: var FastLexer, content: var string,
|
|
|
|
shorthandEnd: var int) =
|
|
|
|
debug("lex: tagHandle")
|
|
|
|
shorthandEnd = 0
|
|
|
|
lexer.tokenstart = lexer.getColNumber(lexer.bufpos)
|
|
|
|
content.add(lexer.buf[lexer.bufpos])
|
|
|
|
var i = 0
|
|
|
|
while true:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
i.inc()
|
|
|
|
let c = lexer.buf[lexer.bufpos]
|
|
|
|
case c
|
|
|
|
of spaceOrLineEnd:
|
|
|
|
if shorthandEnd == -1:
|
|
|
|
raiseError("Unclosed verbatim tag")
|
|
|
|
break
|
|
|
|
of '!':
|
|
|
|
if shorthandEnd == -1 and i == 2:
|
|
|
|
content.add(c)
|
|
|
|
elif shorthandEnd != 0:
|
|
|
|
raiseError("Illegal character in tag suffix", lexer.bufpos)
|
|
|
|
shorthandEnd = i
|
|
|
|
content.add(c)
|
|
|
|
of 'a' .. 'z', 'A' .. 'Z', '0' .. '9', '#', ';', '/', '?', ':', '@', '&',
|
|
|
|
'-', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')':
|
|
|
|
content.add(c)
|
|
|
|
of '<':
|
|
|
|
if i == 1:
|
|
|
|
shorthandEnd = -1
|
|
|
|
content = ""
|
|
|
|
else:
|
|
|
|
raiseError("Illegal character in tag handle", lexer.bufpos)
|
|
|
|
of '>':
|
|
|
|
if shorthandEnd == -1:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] notin spaceOrLineEnd:
|
|
|
|
raiseError("Missing space after verbatim tag handle", lexer.bufpos)
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
raiseError("Illegal character in tag handle", lexer.bufpos)
|
|
|
|
else:
|
|
|
|
raiseError("Illegal character in tag handle", lexer.bufpos)
|
|
|
|
|
|
|
|
template anchorName(lexer: FastLexer, content: var string) =
|
|
|
|
debug("lex: anchorName")
|
|
|
|
lexer.tokenstart = lexer.getColNumber(lexer.bufpos)
|
|
|
|
while true:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
let c = lexer.buf[lexer.bufpos]
|
|
|
|
case c
|
2016-01-23 16:44:50 +00:00
|
|
|
of spaceOrLineEnd, '[', ']', '{', '}', ',':
|
2016-01-20 20:15:33 +00:00
|
|
|
break
|
|
|
|
else:
|
|
|
|
content.add(c)
|
|
|
|
|
2016-01-23 13:09:52 +00:00
|
|
|
template blockScalar(lexer: FastLexer, content: var string,
|
|
|
|
stateAfter: var FastParseState) =
|
|
|
|
type ChompType = enum
|
|
|
|
ctKeep, ctClip, ctStrip
|
|
|
|
var
|
|
|
|
literal: bool
|
|
|
|
blockIndent = 0
|
|
|
|
chomp: ChompType = ctClip
|
|
|
|
detectedIndent = false
|
|
|
|
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of '|':
|
|
|
|
literal = true
|
|
|
|
of '>':
|
|
|
|
literal = false
|
|
|
|
else:
|
|
|
|
assert(false)
|
|
|
|
|
|
|
|
while true:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of '+':
|
|
|
|
if chomp != ctClip:
|
|
|
|
raiseError("Only one chomping indicator is allowed", lexer.bufpos)
|
|
|
|
chomp = ctKeep
|
|
|
|
of '-':
|
|
|
|
if chomp != ctClip:
|
|
|
|
raiseError("Only one chomping indicator is allowed", lexer.bufpos)
|
|
|
|
chomp = ctStrip
|
|
|
|
of '1'..'9':
|
|
|
|
if detectedIndent:
|
|
|
|
raiseError("Only one indentation indicator is allowed", lexer.bufpos)
|
|
|
|
blockIndent = int(lexer.buf[lexer.bufpos]) - int('\x30')
|
|
|
|
detectedIndent = true
|
|
|
|
of spaceOrLineEnd:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
raiseError("Illegal character in block scalar header", lexer.bufpos)
|
|
|
|
lexer.lineEnding()
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of '\x0A':
|
|
|
|
lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
of '\c':
|
|
|
|
lexer.bufpos = lexer.handleCR(lexer.bufpos)
|
|
|
|
of EndOfFile:
|
|
|
|
raiseError("Missing content of block scalar") # TODO: is this correct?
|
|
|
|
else:
|
|
|
|
assert(false)
|
|
|
|
var newlines = 0
|
|
|
|
let parentIndent = ancestry[ancestry.high].indentation
|
|
|
|
content = ""
|
|
|
|
block outer:
|
|
|
|
while true:
|
|
|
|
block inner:
|
|
|
|
for i in countup(1, parentIndent):
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of ' ':
|
|
|
|
discard
|
|
|
|
of '\x0A':
|
|
|
|
lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
newlines.inc()
|
|
|
|
break inner
|
|
|
|
of '\c':
|
|
|
|
lexer.bufpos = lexer.handleCR(lexer.bufpos)
|
|
|
|
newlines.inc()
|
|
|
|
break inner
|
|
|
|
else:
|
|
|
|
stateAfter = if i == 1: fpBlockLineStart else: fpBlockObjectStart
|
|
|
|
break outer
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if detectedIndent:
|
|
|
|
for i in countup(1, blockIndent):
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of ' ':
|
|
|
|
discard
|
|
|
|
of '\x0A':
|
|
|
|
lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
newlines.inc()
|
|
|
|
break inner
|
|
|
|
of '\c':
|
|
|
|
lexer.bufpos = lexer.handleCR(lexer.bufpos)
|
|
|
|
newlines.inc()
|
|
|
|
break inner
|
|
|
|
of EndOfFile:
|
|
|
|
stateAfter = fpBlockLineStart
|
|
|
|
break outer
|
|
|
|
of '#':
|
|
|
|
lexer.lineEnding()
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of '\x0A':
|
|
|
|
lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
of '\c':
|
|
|
|
lexer.bufpos = lexer.handleCR(lexer.bufpos)
|
|
|
|
else: discard
|
|
|
|
stateAfter = fpBlockLineStart
|
|
|
|
break outer
|
|
|
|
else:
|
|
|
|
raiseError("The text is less indented than expected")
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
else:
|
|
|
|
while true:
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of ' ':
|
|
|
|
discard
|
|
|
|
of '\x0A':
|
|
|
|
lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
newlines.inc()
|
|
|
|
break inner
|
|
|
|
of '\c':
|
|
|
|
lexer.bufpos = lexer.handleCR(lexer.bufpos)
|
|
|
|
newlines.inc()
|
|
|
|
break inner
|
|
|
|
of EndOfFile:
|
|
|
|
stateAfter = fpBlockLineStart
|
|
|
|
break outer
|
|
|
|
else:
|
|
|
|
blockIndent = lexer.getColNumber(lexer.bufpos) - parentIndent
|
|
|
|
detectedIndent = true
|
|
|
|
break
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of '\x0A':
|
|
|
|
lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
newlines.inc()
|
|
|
|
break inner
|
|
|
|
of '\c':
|
|
|
|
lexer.bufpos = lexer.handleCR(lexer.bufpos)
|
|
|
|
newlines.inc()
|
|
|
|
break inner
|
|
|
|
of EndOfFile:
|
|
|
|
stateAfter = fpBlockLineStart
|
|
|
|
break outer
|
|
|
|
else:
|
|
|
|
discard
|
|
|
|
if newlines > 0:
|
|
|
|
if literal:
|
|
|
|
content.add(repeat('\x0A', newlines))
|
|
|
|
elif newlines == 1:
|
|
|
|
content.add(' ')
|
|
|
|
else:
|
|
|
|
content.add(repeat('\x0A', newlines - 1))
|
|
|
|
newlines = 0
|
|
|
|
while true:
|
|
|
|
let c = lexer.buf[lexer.bufpos]
|
|
|
|
case c
|
|
|
|
of '\x0A':
|
|
|
|
lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
newlines.inc()
|
|
|
|
break
|
|
|
|
of '\c':
|
|
|
|
lexer.bufpos = lexer.handleCR(lexer.bufpos)
|
|
|
|
newlines.inc()
|
|
|
|
break inner
|
|
|
|
of EndOfFile:
|
|
|
|
stateAfter = fpBlockLineStart
|
|
|
|
break outer
|
|
|
|
else:
|
|
|
|
content.add(c)
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
case chomp
|
|
|
|
of ctClip:
|
|
|
|
content.add('\x0A')
|
|
|
|
of ctKeep:
|
|
|
|
content.add(repeat('\x0A', newlines))
|
|
|
|
of ctStrip:
|
|
|
|
discard
|
|
|
|
|
2016-01-20 20:15:33 +00:00
|
|
|
proc fastparse*(tagLib: TagLibrary, s: Stream): YamlStream =
|
|
|
|
result = iterator(): YamlStreamEvent =
|
|
|
|
var
|
|
|
|
lexer: FastLexer
|
|
|
|
state = fpInitial
|
|
|
|
shorthands: Table[string, string]
|
|
|
|
anchors: Table[string, AnchorId]
|
|
|
|
nextAnchorId: AnchorId
|
|
|
|
content: string
|
2016-01-23 16:44:50 +00:00
|
|
|
tag: TagId
|
|
|
|
anchor: AnchorId
|
2016-01-20 20:15:33 +00:00
|
|
|
ancestry = newSeq[FastParseLevel]()
|
|
|
|
level: FastParseLevel
|
|
|
|
indentation: int
|
|
|
|
newlines: int
|
2016-01-20 22:38:39 +00:00
|
|
|
flowdepth: int = 0
|
2016-01-22 22:44:26 +00:00
|
|
|
explicitFlowKey: bool
|
2016-01-20 20:15:33 +00:00
|
|
|
|
|
|
|
lexer.open(s)
|
|
|
|
initDocValues()
|
|
|
|
|
|
|
|
while true:
|
|
|
|
case state
|
|
|
|
of fpInitial:
|
|
|
|
debug("state: initial")
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of '%':
|
|
|
|
var ld: LexedDirective
|
|
|
|
lexer.directiveName(ld)
|
|
|
|
case ld
|
|
|
|
of ldYaml:
|
|
|
|
var version = ""
|
|
|
|
lexer.yamlVersion(version)
|
|
|
|
if version != "1.2":
|
|
|
|
echo "version is not 1.2!"
|
|
|
|
# TODO: warning (unknown version)
|
|
|
|
discard
|
|
|
|
lexer.lineEnding()
|
|
|
|
handleLineEnd(false)
|
|
|
|
of ldTag:
|
|
|
|
var shorthand, uri = ""
|
|
|
|
lexer.tagShorthand(shorthand)
|
|
|
|
lexer.tagUri(uri)
|
|
|
|
shorthands.add(shorthand, uri)
|
|
|
|
lexer.lineEnding()
|
|
|
|
handleLineEnd(false)
|
|
|
|
of ldUnknown:
|
|
|
|
# TODO: warning (unknown directive)
|
|
|
|
lexer.finishLine()
|
|
|
|
handleLineEnd(false)
|
|
|
|
of ' ', '\t':
|
2016-01-23 16:44:50 +00:00
|
|
|
while true:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of ' ', '\t':
|
|
|
|
discard
|
|
|
|
of '\x0A':
|
|
|
|
lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
break
|
|
|
|
of '\c':
|
|
|
|
lexer.bufpos = lexer.handleCR(lexer.bufpos)
|
|
|
|
break
|
|
|
|
of '#', EndOfFile:
|
|
|
|
lexer.lineEnding()
|
|
|
|
handleLineEnd(false)
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
indentation = lexer.getColNumber(lexer.bufpos)
|
|
|
|
yield startDocEvent()
|
|
|
|
state = fpBlockObjectStart
|
|
|
|
break
|
2016-01-20 20:15:33 +00:00
|
|
|
of '\x0A':
|
|
|
|
lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
of '\c':
|
|
|
|
lexer.bufpos = lexer.handleCR(lexer.bufpos)
|
|
|
|
of EndOfFile:
|
|
|
|
return
|
|
|
|
of '#':
|
|
|
|
lexer.lineEnding()
|
|
|
|
handleLineEnd(false)
|
|
|
|
of '-':
|
|
|
|
var token: LexedPossibleDirectivesEnd
|
2016-01-22 20:36:11 +00:00
|
|
|
lexer.directivesEnd(token)
|
2016-01-20 20:15:33 +00:00
|
|
|
yield startDocEvent()
|
|
|
|
case token
|
|
|
|
of lpdeDirectivesEnd:
|
2016-01-22 20:36:11 +00:00
|
|
|
lexer.bufpos.inc(3)
|
2016-01-20 20:15:33 +00:00
|
|
|
state = fpBlockObjectStart
|
|
|
|
of lpdeSequenceItem:
|
|
|
|
indentation = 0
|
2016-01-22 20:36:11 +00:00
|
|
|
lexer.bufpos.inc()
|
|
|
|
handleBlockSequenceIndicator()
|
2016-01-20 20:15:33 +00:00
|
|
|
state = fpBlockObjectStart
|
|
|
|
of lpdeScalarContent:
|
2016-01-22 20:36:11 +00:00
|
|
|
content = ""
|
2016-01-20 20:15:33 +00:00
|
|
|
lexer.plainScalar(content, cBlockOut)
|
|
|
|
state = fpBlockAfterPlainScalar
|
|
|
|
else:
|
|
|
|
yield startDocEvent()
|
|
|
|
state = fpBlockLineStart
|
|
|
|
of fpBlockLineStart:
|
|
|
|
debug("state: blockLineStart")
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of '-':
|
|
|
|
var token: LexedPossibleDirectivesEnd
|
2016-01-22 20:36:11 +00:00
|
|
|
lexer.directivesEnd(token)
|
2016-01-20 20:15:33 +00:00
|
|
|
case token
|
|
|
|
of lpdeDirectivesEnd:
|
2016-01-22 20:36:11 +00:00
|
|
|
lexer.bufpos.inc(3)
|
2016-01-20 20:15:33 +00:00
|
|
|
closeEverything()
|
|
|
|
initDocValues()
|
|
|
|
yield startDocEvent()
|
|
|
|
state = fpBlockObjectStart
|
|
|
|
of lpdeSequenceItem:
|
|
|
|
indentation = 0
|
|
|
|
closeMoreIndentedLevels()
|
2016-01-22 20:36:11 +00:00
|
|
|
lexer.bufpos.inc()
|
|
|
|
handleBlockSequenceIndicator()
|
2016-01-20 20:15:33 +00:00
|
|
|
state = fpBlockObjectStart
|
|
|
|
of lpdeScalarContent:
|
2016-01-22 20:36:11 +00:00
|
|
|
case level.kind
|
|
|
|
of fplScalar:
|
2016-01-20 20:15:33 +00:00
|
|
|
continueMultilineScalar()
|
2016-01-22 20:36:11 +00:00
|
|
|
of fplUnknown:
|
|
|
|
handlePossibleMapStart()
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
2016-01-22 20:36:11 +00:00
|
|
|
ensureCorrectIndentation()
|
|
|
|
ancestry.add(level)
|
2016-01-23 16:44:50 +00:00
|
|
|
level = FastParseLevel(kind: fplUnknown, indentation: -1)
|
2016-01-22 20:36:11 +00:00
|
|
|
content = ""
|
2016-01-20 20:15:33 +00:00
|
|
|
lexer.plainScalar(content, cBlockOut)
|
|
|
|
state = fpBlockAfterPlainScalar
|
|
|
|
of '.':
|
|
|
|
var isDocumentEnd: bool
|
2016-01-22 20:36:11 +00:00
|
|
|
lexer.documentEnd(isDocumentEnd)
|
2016-01-20 20:15:33 +00:00
|
|
|
if isDocumentEnd:
|
2016-01-22 20:36:11 +00:00
|
|
|
lexer.bufpos.inc(3)
|
2016-01-20 20:15:33 +00:00
|
|
|
lexer.lineEnding()
|
2016-01-22 20:36:11 +00:00
|
|
|
handleLineEnd(true)
|
2016-01-20 20:15:33 +00:00
|
|
|
closeEverything()
|
|
|
|
initDocValues()
|
|
|
|
state = fpInitial
|
|
|
|
else:
|
2016-01-22 20:36:11 +00:00
|
|
|
indentation = 0
|
|
|
|
closeMoreIndentedLevels()
|
|
|
|
case level.kind
|
|
|
|
of fplUnknown:
|
|
|
|
handlePossibleMapStart()
|
|
|
|
of fplScalar:
|
|
|
|
continueMultilineScalar()
|
|
|
|
else:
|
|
|
|
ensureCorrectIndentation()
|
|
|
|
ancestry.add(level)
|
2016-01-23 16:44:50 +00:00
|
|
|
level = FastParseLevel(kind: fplUnknown, indentation: -1)
|
2016-01-22 20:36:11 +00:00
|
|
|
content = ""
|
|
|
|
lexer.plainScalar(content, cBlockOut)
|
|
|
|
state = fpBlockAfterPlainScalar
|
2016-01-20 20:15:33 +00:00
|
|
|
of ' ':
|
|
|
|
lexer.skipIndentation()
|
2016-01-23 13:09:52 +00:00
|
|
|
if lexer.buf[lexer.bufpos] in ['\t', '\x0A', '\c', '#']:
|
|
|
|
lexer.lineEnding()
|
|
|
|
handleLineEnd(true)
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
2016-01-23 13:09:52 +00:00
|
|
|
indentation = lexer.getColNumber(lexer.bufpos)
|
|
|
|
closeMoreIndentedLevels()
|
|
|
|
case level.kind
|
|
|
|
of fplScalar:
|
|
|
|
state = fpBlockContinueScalar
|
|
|
|
of fplUnknown:
|
|
|
|
state = fpBlockObjectStart
|
|
|
|
else:
|
|
|
|
ensureCorrectIndentation()
|
|
|
|
state = fpBlockObjectStart
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
|
|
|
indentation = 0
|
|
|
|
closeMoreIndentedLevels()
|
|
|
|
case level.kind
|
|
|
|
of fplScalar:
|
|
|
|
state = fpBlockContinueScalar
|
|
|
|
of fplUnknown:
|
|
|
|
state = fpBlockObjectStart
|
|
|
|
else:
|
2016-01-22 20:36:11 +00:00
|
|
|
ensureCorrectIndentation()
|
2016-01-20 20:15:33 +00:00
|
|
|
state = fpBlockObjectStart
|
|
|
|
of fpBlockContinueScalar:
|
|
|
|
debug("state: blockAfterPlainScalar")
|
|
|
|
lexer.skipWhitespace()
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of '\x0A':
|
|
|
|
newlines.inc()
|
|
|
|
lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
state = fpBlockLineStart
|
|
|
|
of '\c':
|
|
|
|
newlines.inc()
|
|
|
|
lexer.bufpos = lexer.handleCR(lexer.bufpos)
|
|
|
|
of ':':
|
|
|
|
if lexer.isPlainSafe(lexer.bufpos + 1, cBlockOut):
|
|
|
|
continueMultilineScalar()
|
|
|
|
else:
|
|
|
|
raiseError("Unexpected token", lexer.bufpos)
|
|
|
|
of '#':
|
2016-01-22 20:36:11 +00:00
|
|
|
yield scalarEvent(content, tag, anchor)
|
2016-01-20 20:15:33 +00:00
|
|
|
lexer.lineEnding()
|
|
|
|
handleLineEnd(true)
|
2016-01-22 20:36:11 +00:00
|
|
|
handleObjectEnd(fpBlockLineStart)
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
|
|
|
continueMultilineScalar()
|
|
|
|
of fpBlockAfterPlainScalar:
|
|
|
|
debug("state: blockAfterPlainScalar")
|
|
|
|
lexer.skipWhitespace()
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of '\x0A':
|
|
|
|
if level.kind notin [fplUnknown, fplScalar]:
|
|
|
|
raiseError("Unexpected scalar")
|
|
|
|
newlines = 1
|
|
|
|
level.kind = fplScalar
|
|
|
|
lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
state = fpBlockLineStart
|
|
|
|
of '\c':
|
|
|
|
if level.kind notin [fplUnknown, fplScalar]:
|
|
|
|
raiseError("Unexpected scalar")
|
|
|
|
newlines = 1
|
|
|
|
level.kind = fplScalar
|
|
|
|
lexer.bufpos = lexer.handleCR(lexer.bufpos)
|
|
|
|
state = fpBlockLineStart
|
|
|
|
else:
|
2016-01-22 20:36:11 +00:00
|
|
|
yield scalarEvent(content, tag, anchor)
|
|
|
|
handleObjectEnd(fpBlockAfterObject)
|
|
|
|
of fpBlockAfterObject:
|
|
|
|
debug("state: blockAfterObject")
|
2016-01-20 20:15:33 +00:00
|
|
|
lexer.skipWhitespace()
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of EndOfFile:
|
|
|
|
closeEverything()
|
|
|
|
break
|
|
|
|
of '\x0A':
|
2016-01-22 20:36:11 +00:00
|
|
|
state = fpBlockLineStart
|
2016-01-20 20:15:33 +00:00
|
|
|
lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
of '\c':
|
2016-01-22 20:36:11 +00:00
|
|
|
state = fpBlockLineStart
|
2016-01-20 20:15:33 +00:00
|
|
|
lexer.bufpos = lexer.handleCR(lexer.bufpos)
|
|
|
|
of ':':
|
|
|
|
case level.kind
|
|
|
|
of fplUnknown:
|
2016-01-23 16:44:50 +00:00
|
|
|
handleObjectStart(yamlStartMap)
|
2016-01-20 20:15:33 +00:00
|
|
|
of fplMapKey:
|
2016-01-22 20:36:11 +00:00
|
|
|
yield scalarEvent("", yTagQuestionMark, yAnchorNone)
|
|
|
|
level.kind = fplMapValue
|
|
|
|
ancestry.add(level)
|
2016-01-23 16:44:50 +00:00
|
|
|
level = FastParseLevel(kind: fplUnknown, indentation: -1)
|
2016-01-22 20:36:11 +00:00
|
|
|
of fplMapValue:
|
|
|
|
level.kind = fplMapValue
|
|
|
|
ancestry.add(level)
|
2016-01-23 16:44:50 +00:00
|
|
|
level = FastParseLevel(kind: fplUnknown, indentation: -1)
|
2016-01-20 20:15:33 +00:00
|
|
|
of fplSequence:
|
|
|
|
raiseError("Illegal token (expected sequence item)")
|
|
|
|
of fplScalar:
|
|
|
|
raiseError("Multiline scalars may not be implicit map keys")
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
lexer.skipWhitespace()
|
|
|
|
indentation = lexer.getColNumber(lexer.bufpos)
|
|
|
|
state = fpBlockObjectStart
|
|
|
|
of '#':
|
|
|
|
lexer.lineEnding()
|
|
|
|
handleLineEnd(true)
|
2016-01-22 20:36:11 +00:00
|
|
|
handleObjectEnd(fpBlockLineStart)
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
|
|
|
raiseError("Illegal token (expected ':', comment or line end)",
|
|
|
|
lexer.bufpos)
|
|
|
|
of fpBlockObjectStart:
|
|
|
|
debug("state: blockObjectStart")
|
|
|
|
lexer.skipWhitespace()
|
2016-01-22 20:36:11 +00:00
|
|
|
indentation = lexer.getColNumber(lexer.bufpos)
|
2016-01-20 20:15:33 +00:00
|
|
|
let objectStart = lexer.getColNumber(lexer.bufpos)
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of '\x0A':
|
|
|
|
lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
state = fpBlockLineStart
|
2016-01-23 16:44:50 +00:00
|
|
|
level.indentation = -1
|
2016-01-20 20:15:33 +00:00
|
|
|
of '\c':
|
|
|
|
lexer.bufpos = lexer.handleCR(lexer.bufpos)
|
|
|
|
state = fpBlockLineStart
|
2016-01-23 16:44:50 +00:00
|
|
|
level.indentation = -1
|
2016-01-20 20:15:33 +00:00
|
|
|
of EndOfFile:
|
|
|
|
closeEverything()
|
|
|
|
return
|
|
|
|
of '#':
|
|
|
|
lexer.lineEnding()
|
|
|
|
handleLineEnd(true)
|
|
|
|
of '\'':
|
2016-01-22 20:36:11 +00:00
|
|
|
handleBlockItemStart()
|
2016-01-20 20:15:33 +00:00
|
|
|
content = ""
|
|
|
|
lexer.singleQuotedScalar(content)
|
|
|
|
if tag == yTagQuestionMark:
|
|
|
|
tag = yTagExclamationMark
|
2016-01-22 20:36:11 +00:00
|
|
|
yield scalarEvent(content, tag, anchor)
|
|
|
|
handleObjectEnd(fpBlockAfterObject)
|
2016-01-20 20:15:33 +00:00
|
|
|
of '"':
|
2016-01-22 20:36:11 +00:00
|
|
|
handleBlockItemStart()
|
2016-01-20 20:15:33 +00:00
|
|
|
content = ""
|
2016-01-23 14:29:45 +00:00
|
|
|
lexer.doubleQuotedScalar(content)
|
2016-01-20 20:15:33 +00:00
|
|
|
if tag == yTagQuestionMark:
|
|
|
|
tag = yTagExclamationMark
|
2016-01-22 20:36:11 +00:00
|
|
|
yield scalarEvent(content, tag, anchor)
|
|
|
|
handleObjectEnd(fpBlockAfterObject)
|
2016-01-23 13:09:52 +00:00
|
|
|
of '|', '>':
|
2016-01-23 16:44:50 +00:00
|
|
|
# TODO: this will scan for possible map start, which is not
|
|
|
|
# neccessary in this case
|
2016-01-23 13:09:52 +00:00
|
|
|
handleBlockItemStart()
|
|
|
|
var stateAfter: FastParseState
|
2016-01-23 16:44:50 +00:00
|
|
|
content = ""
|
2016-01-23 13:09:52 +00:00
|
|
|
lexer.blockScalar(content, stateAfter)
|
|
|
|
if tag == yTagQuestionmark:
|
|
|
|
tag = yTagExclamationmark
|
|
|
|
yield scalarEvent(content, tag, anchor)
|
|
|
|
handleObjectEnd(stateAfter)
|
2016-01-20 20:15:33 +00:00
|
|
|
of '-':
|
|
|
|
if lexer.isPlainSafe(lexer.bufpos + 1, cBlockOut):
|
2016-01-22 20:36:11 +00:00
|
|
|
handleBlockItemStart()
|
2016-01-23 16:44:50 +00:00
|
|
|
content = ""
|
2016-01-20 20:15:33 +00:00
|
|
|
lexer.tokenstart = lexer.getColNumber(lexer.bufpos)
|
|
|
|
lexer.plainScalar(content, cBlockOut)
|
|
|
|
state = fpBlockAfterPlainScalar
|
|
|
|
else:
|
|
|
|
lexer.bufpos.inc()
|
2016-01-22 20:36:11 +00:00
|
|
|
handleBlockSequenceIndicator()
|
2016-01-20 20:15:33 +00:00
|
|
|
of '!':
|
2016-01-22 20:36:11 +00:00
|
|
|
handleBlockItemStart()
|
2016-01-20 22:38:39 +00:00
|
|
|
handleTagHandle()
|
2016-01-20 20:15:33 +00:00
|
|
|
of '&':
|
2016-01-22 20:36:11 +00:00
|
|
|
handleBlockItemStart()
|
2016-01-20 22:38:39 +00:00
|
|
|
handleAnchor()
|
2016-01-20 20:15:33 +00:00
|
|
|
of '*':
|
2016-01-22 20:36:11 +00:00
|
|
|
handleBlockItemStart()
|
2016-01-20 22:38:39 +00:00
|
|
|
handleAlias()
|
|
|
|
of '[', '{':
|
2016-01-22 20:36:11 +00:00
|
|
|
handleBlockItemStart()
|
2016-01-20 22:38:39 +00:00
|
|
|
state = fpFlow
|
2016-01-22 22:44:26 +00:00
|
|
|
of '?':
|
|
|
|
if lexer.isPlainSafe(lexer.bufpos + 1, cBlockOut):
|
|
|
|
handleBlockItemStart()
|
2016-01-23 16:44:50 +00:00
|
|
|
content = ""
|
2016-01-22 22:44:26 +00:00
|
|
|
lexer.tokenstart = lexer.getColNumber(lexer.bufpos)
|
|
|
|
lexer.plainScalar(content, cBlockOut)
|
|
|
|
state = fpBlockAfterPlainScalar
|
|
|
|
else:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
handleMapKeyIndicator()
|
|
|
|
of ':':
|
|
|
|
if lexer.isPlainSafe(lexer.bufpos + 1, cBlockOut):
|
|
|
|
handleBlockItemStart()
|
2016-01-23 16:44:50 +00:00
|
|
|
content = ""
|
2016-01-22 22:44:26 +00:00
|
|
|
lexer.tokenstart = lexer.getColNumber(lexer.bufpos)
|
|
|
|
lexer.plainScalar(content, cBlockOut)
|
|
|
|
state = fpBlockAfterPlainScalar
|
|
|
|
else:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
handleMapValueIndicator()
|
2016-01-23 16:44:50 +00:00
|
|
|
of '@', '`':
|
|
|
|
raiseError("Reserved characters cannot start a plain scalar",
|
|
|
|
lexer.bufpos)
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
2016-01-22 20:36:11 +00:00
|
|
|
handleBlockItemStart()
|
2016-01-20 20:15:33 +00:00
|
|
|
content = ""
|
|
|
|
lexer.plainScalar(content, cBlockOut)
|
|
|
|
state = fpBlockAfterPlainScalar
|
|
|
|
of fpExpectDocEnd:
|
2016-01-20 22:38:39 +00:00
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of '-':
|
|
|
|
var token: LexedPossibleDirectivesEnd
|
2016-01-22 20:36:11 +00:00
|
|
|
lexer.directivesEnd(token)
|
2016-01-20 22:38:39 +00:00
|
|
|
case token
|
|
|
|
of lpdeDirectivesEnd:
|
2016-01-22 20:36:11 +00:00
|
|
|
lexer.bufpos.inc(3)
|
2016-01-20 22:38:39 +00:00
|
|
|
yield endDocEvent()
|
|
|
|
initDocValues()
|
|
|
|
yield startDocEvent()
|
|
|
|
state = fpBlockObjectStart
|
|
|
|
else:
|
|
|
|
raiseError("Unexpected content (expected document end)")
|
|
|
|
of '.':
|
|
|
|
var isDocumentEnd: bool
|
2016-01-22 20:36:11 +00:00
|
|
|
lexer.documentEnd(isDocumentEnd)
|
2016-01-20 22:38:39 +00:00
|
|
|
if isDocumentEnd:
|
2016-01-22 20:36:11 +00:00
|
|
|
lexer.bufpos.inc(3)
|
2016-01-20 22:38:39 +00:00
|
|
|
yield endDocEvent()
|
|
|
|
initDocValues()
|
|
|
|
state = fpInitial
|
|
|
|
else:
|
|
|
|
raiseError("Unexpected content (expected document end)")
|
|
|
|
of ' ', '\t', '#':
|
|
|
|
lexer.lineEnding()
|
|
|
|
handleLineEnd(true)
|
|
|
|
of '\x0A':
|
|
|
|
lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
of '\c':
|
|
|
|
lexer.bufpos = lexer.handleCR(lexer.bufpos)
|
|
|
|
of EndOfFile:
|
|
|
|
yield endDocEvent()
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
raiseError("Unexpected content (expected document end)")
|
|
|
|
of fpFlow:
|
2016-01-22 20:36:11 +00:00
|
|
|
debug("state: flow")
|
2016-01-20 22:38:39 +00:00
|
|
|
lexer.skipWhitespaceAndNewlines()
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of '{':
|
2016-01-23 16:44:50 +00:00
|
|
|
handleObjectStart(yamlStartMap)
|
2016-01-20 22:38:39 +00:00
|
|
|
flowdepth.inc()
|
|
|
|
lexer.bufpos.inc()
|
2016-01-22 22:44:26 +00:00
|
|
|
explicitFlowKey = false
|
2016-01-20 22:38:39 +00:00
|
|
|
of '[':
|
2016-01-23 16:44:50 +00:00
|
|
|
handleObjectStart(yamlStartSequence)
|
2016-01-20 22:38:39 +00:00
|
|
|
flowdepth.inc()
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
of '}':
|
|
|
|
assert(level.kind == fplUnknown)
|
|
|
|
level = ancestry.pop()
|
|
|
|
case level.kind
|
|
|
|
of fplMapValue:
|
|
|
|
yield scalarEvent("", tag, anchor)
|
|
|
|
tag = yTagQuestionmark
|
|
|
|
anchor = yAnchorNone
|
2016-01-22 20:36:11 +00:00
|
|
|
level.kind = fplMapKey
|
2016-01-20 22:38:39 +00:00
|
|
|
of fplMapKey:
|
2016-01-22 22:44:26 +00:00
|
|
|
if tag != yTagQuestionmark or anchor != yAnchorNone or
|
|
|
|
explicitFlowKey:
|
2016-01-22 21:40:22 +00:00
|
|
|
yield scalarEvent("", tag, anchor)
|
|
|
|
tag = yTagQuestionmark
|
|
|
|
anchor = yAnchorNone
|
|
|
|
yield scalarEvent("", tag, anchor)
|
2016-01-20 22:38:39 +00:00
|
|
|
of fplSequence:
|
|
|
|
raiseError("Unexpected token (expected ']')", lexer.bufpos)
|
|
|
|
of fplUnknown, fplScalar:
|
|
|
|
assert(false)
|
|
|
|
lexer.bufpos.inc()
|
2016-01-22 20:36:11 +00:00
|
|
|
leaveFlowLevel()
|
2016-01-20 22:38:39 +00:00
|
|
|
of ']':
|
|
|
|
assert(level.kind == fplUnknown)
|
|
|
|
level = ancestry.pop()
|
|
|
|
case level.kind
|
|
|
|
of fplSequence:
|
2016-01-22 21:40:22 +00:00
|
|
|
if tag != yTagQuestionmark or anchor != yAnchorNone:
|
|
|
|
yield scalarEvent("", tag, anchor)
|
|
|
|
tag = yTagQuestionmark
|
|
|
|
anchor = yAnchorNone
|
2016-01-20 22:38:39 +00:00
|
|
|
of fplMapKey, fplMapValue:
|
|
|
|
raiseError("Unexpected token (expected '}')", lexer.bufpos)
|
|
|
|
of fplUnknown, fplScalar:
|
|
|
|
assert(false)
|
|
|
|
lexer.bufpos.inc()
|
2016-01-22 20:36:11 +00:00
|
|
|
leaveFlowLevel()
|
2016-01-20 22:38:39 +00:00
|
|
|
of ',':
|
|
|
|
assert(level.kind == fplUnknown)
|
|
|
|
level = ancestry.pop()
|
|
|
|
case level.kind
|
|
|
|
of fplSequence:
|
|
|
|
yield scalarEvent("", tag, anchor)
|
|
|
|
tag = yTagQuestionmark
|
|
|
|
anchor = yAnchorNone
|
|
|
|
of fplMapValue:
|
|
|
|
yield scalarEvent("", tag, anchor)
|
|
|
|
tag = yTagQuestionmark
|
|
|
|
anchor = yAnchorNone
|
|
|
|
level.kind = fplMapKey
|
2016-01-22 22:44:26 +00:00
|
|
|
explicitFlowKey = false
|
2016-01-20 22:38:39 +00:00
|
|
|
of fplMapKey:
|
|
|
|
yield scalarEvent("", tag, anchor)
|
|
|
|
tag = yTagQuestionmark
|
|
|
|
anchor = yAnchorNone
|
|
|
|
yield scalarEvent("", tag, anchor)
|
2016-01-22 22:44:26 +00:00
|
|
|
explicitFlowKey = false
|
2016-01-20 22:38:39 +00:00
|
|
|
of fplUnknown, fplScalar:
|
|
|
|
assert(false)
|
|
|
|
ancestry.add(level)
|
2016-01-23 16:44:50 +00:00
|
|
|
level = FastParseLevel(kind: fplUnknown, indentation: -1)
|
2016-01-20 22:38:39 +00:00
|
|
|
lexer.bufpos.inc()
|
|
|
|
of ':':
|
|
|
|
assert(level.kind == fplUnknown)
|
|
|
|
if lexer.isPlainSafe(lexer.bufpos + 1, cFlowIn):
|
|
|
|
level = ancestry.pop()
|
|
|
|
case level.kind
|
|
|
|
of fplSequence, fplMapValue:
|
|
|
|
raiseError("Unexpected token (expected ',')", lexer.bufpos)
|
|
|
|
of fplMapKey:
|
|
|
|
yield scalarEvent("", tag, anchor)
|
|
|
|
tag = yTagQuestionmark
|
|
|
|
anchor = yAnchorNone
|
|
|
|
level.kind = fplMapValue
|
|
|
|
of fplUnknown, fplScalar:
|
|
|
|
assert(false)
|
|
|
|
ancestry.add(level)
|
2016-01-23 16:44:50 +00:00
|
|
|
level = FastParseLevel(kind: fplUnknown, indentation: -1)
|
2016-01-20 22:38:39 +00:00
|
|
|
lexer.bufpos.inc()
|
|
|
|
else:
|
|
|
|
handleFlowPlainScalar()
|
2016-01-21 18:23:45 +00:00
|
|
|
of '\'':
|
|
|
|
content = ""
|
|
|
|
lexer.singleQuotedScalar(content)
|
|
|
|
if tag == yTagQuestionMark:
|
|
|
|
tag = yTagExclamationMark
|
|
|
|
yield scalarEvent(content, tag, anchor)
|
2016-01-22 20:36:11 +00:00
|
|
|
handleObjectEnd(fpFlowAfterObject)
|
2016-01-21 18:23:45 +00:00
|
|
|
of '"':
|
|
|
|
content = ""
|
2016-01-23 14:29:45 +00:00
|
|
|
lexer.doubleQuotedScalar(content)
|
2016-01-21 18:23:45 +00:00
|
|
|
if tag == yTagQuestionmark:
|
|
|
|
tag = yTagExclamationmark
|
|
|
|
yield scalarEvent(content, tag, anchor)
|
2016-01-22 20:36:11 +00:00
|
|
|
handleObjectEnd(fpFlowAfterObject)
|
2016-01-20 22:38:39 +00:00
|
|
|
of '!':
|
|
|
|
handleTagHandle()
|
|
|
|
of '&':
|
|
|
|
handleAnchor()
|
|
|
|
of '*':
|
|
|
|
handleAlias()
|
2016-01-21 18:23:45 +00:00
|
|
|
state = fpFlowAfterObject
|
2016-01-22 22:44:26 +00:00
|
|
|
of '?':
|
|
|
|
if lexer.isPlainSafe(lexer.bufpos + 1, cFlowOut):
|
|
|
|
handleFlowPlainScalar()
|
|
|
|
elif explicitFlowKey:
|
|
|
|
raiseError("Duplicate '?' in flow mapping", lexer.bufpos)
|
|
|
|
else:
|
|
|
|
explicitFlowKey = true
|
|
|
|
lexer.bufpos.inc()
|
2016-01-20 22:38:39 +00:00
|
|
|
else:
|
|
|
|
handleFlowPlainScalar()
|
|
|
|
of fpFlowAfterObject:
|
2016-01-22 20:36:11 +00:00
|
|
|
debug("state: flowAfterObject")
|
2016-01-20 22:38:39 +00:00
|
|
|
lexer.skipWhitespaceAndNewlines()
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of ']':
|
|
|
|
case level.kind
|
|
|
|
of fplSequence:
|
2016-01-22 20:36:11 +00:00
|
|
|
discard
|
2016-01-20 22:38:39 +00:00
|
|
|
of fplMapKey, fplMapValue:
|
|
|
|
raiseError("Unexpected token (expected '}')", lexer.bufpos)
|
|
|
|
of fplScalar, fplUnknown:
|
|
|
|
assert(false)
|
2016-01-22 20:36:11 +00:00
|
|
|
lexer.bufpos.inc()
|
2016-01-20 22:38:39 +00:00
|
|
|
leaveFlowLevel()
|
|
|
|
of '}':
|
|
|
|
case level.kind
|
2016-01-22 20:36:11 +00:00
|
|
|
of [fplMapKey, fplMapValue]:
|
|
|
|
discard
|
2016-01-20 22:38:39 +00:00
|
|
|
of fplSequence:
|
|
|
|
raiseError("Unexpected token (expected ']')", lexer.bufpos)
|
|
|
|
of fplUnknown, fplScalar:
|
|
|
|
assert(false)
|
2016-01-22 20:36:11 +00:00
|
|
|
lexer.bufpos.inc()
|
2016-01-20 22:38:39 +00:00
|
|
|
leaveFlowLevel()
|
|
|
|
of ',':
|
|
|
|
case level.kind
|
|
|
|
of fplSequence:
|
|
|
|
discard
|
|
|
|
of fplMapValue:
|
2016-01-22 20:36:11 +00:00
|
|
|
yield scalarEvent("", yTagQuestionmark, yAnchorNone)
|
2016-01-20 22:38:39 +00:00
|
|
|
level.kind = fplMapKey
|
2016-01-22 22:44:26 +00:00
|
|
|
explicitFlowKey = false
|
2016-01-22 20:36:11 +00:00
|
|
|
of fplMapKey:
|
2016-01-22 22:44:26 +00:00
|
|
|
explicitFlowKey = false
|
2016-01-20 22:38:39 +00:00
|
|
|
of fplUnknown, fplScalar:
|
|
|
|
assert(false)
|
|
|
|
ancestry.add(level)
|
2016-01-23 16:44:50 +00:00
|
|
|
level = FastParseLevel(kind: fplUnknown, indentation: -1)
|
2016-01-20 22:38:39 +00:00
|
|
|
state = fpFlow
|
2016-01-22 20:36:11 +00:00
|
|
|
lexer.bufpos.inc()
|
2016-01-20 22:38:39 +00:00
|
|
|
of ':':
|
|
|
|
case level.kind
|
2016-01-22 20:36:11 +00:00
|
|
|
of fplSequence, fplMapKey:
|
2016-01-20 22:38:39 +00:00
|
|
|
raiseError("Unexpected token (expected ',')", lexer.bufpos)
|
2016-01-22 20:36:11 +00:00
|
|
|
of fplMapValue:
|
2016-01-20 22:38:39 +00:00
|
|
|
level.kind = fplMapValue
|
|
|
|
of fplUnknown, fplScalar:
|
|
|
|
assert(false)
|
|
|
|
ancestry.add(level)
|
2016-01-23 16:44:50 +00:00
|
|
|
level = FastParseLevel(kind: fplUnknown, indentation: -1)
|
2016-01-20 22:38:39 +00:00
|
|
|
state = fpFlow
|
2016-01-22 20:36:11 +00:00
|
|
|
lexer.bufpos.inc()
|
2016-01-20 22:38:39 +00:00
|
|
|
of '#':
|
|
|
|
lexer.lineEnding()
|
2016-01-22 20:36:11 +00:00
|
|
|
handleLineEnd(true)
|
2016-01-20 22:38:39 +00:00
|
|
|
of EndOfFile:
|
|
|
|
raiseError("Unclosed flow content", lexer.bufpos)
|
|
|
|
else:
|
|
|
|
raiseError("Unexpected content (expected flow indicator)",
|
|
|
|
lexer.bufpos)
|