2016-01-24 10:44:10 +00:00
|
|
|
# NimYAML - YAML implementation in Nim
|
|
|
|
# (c) Copyright 2015 Felix Krause
|
|
|
|
#
|
|
|
|
# See the file "copying.txt", included in this
|
|
|
|
# distribution, for details about the copyright.
|
|
|
|
|
2016-01-20 20:15:33 +00:00
|
|
|
type
|
|
|
|
FastParseState = enum
|
2016-03-20 13:19:46 +00:00
|
|
|
fpInitial, fpBlockAfterObject, fpBlockAfterPlainScalar, fpBlockObjectStart,
|
|
|
|
fpExpectDocEnd, fpFlow, fpFlowAfterObject, fpAfterDocument
|
2016-01-20 20:15:33 +00:00
|
|
|
|
|
|
|
FastParseLevelKind = enum
|
2016-03-17 19:28:00 +00:00
|
|
|
fplUnknown, fplSequence, fplMapKey, fplMapValue, fplSinglePairKey,
|
2016-03-19 21:43:03 +00:00
|
|
|
fplSinglePairValue, fplScalar, fplDocument
|
2016-01-20 20:15:33 +00:00
|
|
|
|
2016-03-25 16:28:58 +00:00
|
|
|
ScalarType = enum
|
|
|
|
stFlow, stLiteral, stFolded
|
|
|
|
|
2016-01-20 20:15:33 +00:00
|
|
|
FastParseLevel = object
|
|
|
|
kind: FastParseLevelKind
|
|
|
|
indentation: int
|
|
|
|
|
|
|
|
LexedDirective = enum
|
|
|
|
ldYaml, ldTag, ldUnknown
|
2016-04-03 09:45:48 +00:00
|
|
|
|
2016-01-20 20:15:33 +00:00
|
|
|
YamlContext = enum
|
2016-02-19 17:25:01 +00:00
|
|
|
cBlock, cFlow
|
2016-01-20 20:15:33 +00:00
|
|
|
|
2016-03-25 16:28:58 +00:00
|
|
|
ChompType = enum
|
|
|
|
ctKeep, ctClip, ctStrip
|
|
|
|
|
2016-01-20 20:15:33 +00:00
|
|
|
const
|
2016-03-20 11:09:04 +00:00
|
|
|
space = {' ', '\t'}
|
|
|
|
lineEnd = {'\l', '\c', EndOfFile}
|
|
|
|
spaceOrLineEnd = {' ', '\t', '\l', '\c', EndOfFile}
|
|
|
|
digits = {'0'..'9'}
|
|
|
|
flowIndicators = {'[', ']', '{', '}', ','}
|
2016-01-24 10:44:10 +00:00
|
|
|
|
|
|
|
UTF8NextLine = toUTF8(0x85.Rune)
|
|
|
|
UTF8NonBreakingSpace = toUTF8(0xA0.Rune)
|
|
|
|
UTF8LineSeparator = toUTF8(0x2028.Rune)
|
|
|
|
UTF8ParagraphSeparator = toUTF8(0x2029.Rune)
|
2016-03-19 21:43:03 +00:00
|
|
|
UnknownIndentation = int.low
|
2016-01-24 10:44:10 +00:00
|
|
|
|
|
|
|
proc newYamlParser*(tagLib: TagLibrary = initExtendedTagLibrary(),
|
|
|
|
callback: WarningCallback = nil): YamlParser =
|
|
|
|
new(result)
|
|
|
|
result.tagLib = tagLib
|
|
|
|
result.callback = callback
|
2016-01-20 20:15:33 +00:00
|
|
|
|
2016-01-24 17:24:09 +00:00
|
|
|
proc getLineNumber*(p: YamlParser): int = p.lexer.lineNumber
|
|
|
|
|
|
|
|
proc getColNumber*(p: YamlParser): int = p.tokenstart + 1 # column is 1-based
|
|
|
|
|
|
|
|
proc getLineContent*(p: YamlParser, marker: bool = true): string =
|
|
|
|
result = p.lexer.getCurrentLine(false)
|
|
|
|
if marker:
|
|
|
|
result.add(repeat(' ', p.tokenstart) & "^\n")
|
|
|
|
|
2016-01-20 20:15:33 +00:00
|
|
|
template debug(message: string) {.dirty.} =
|
|
|
|
when defined(yamlDebug):
|
|
|
|
try: styledWriteLine(stdout, fgBlue, message)
|
|
|
|
except IOError: discard
|
|
|
|
|
2016-03-19 21:43:03 +00:00
|
|
|
template debugFail() {.dirty.} =
|
|
|
|
when not defined(release):
|
|
|
|
echo "internal error at line: ", instantiationInfo().line
|
|
|
|
assert(false)
|
|
|
|
|
2016-01-24 10:44:10 +00:00
|
|
|
template parserError(message: string) {.dirty.} =
|
2016-01-20 20:15:33 +00:00
|
|
|
var e = newException(YamlParserError, message)
|
2016-01-24 10:44:10 +00:00
|
|
|
e.line = p.lexer.lineNumber
|
2016-01-24 17:24:09 +00:00
|
|
|
e.column = p.tokenstart + 1
|
|
|
|
e.lineContent = p.getLineContent(true)
|
2016-01-20 20:15:33 +00:00
|
|
|
raise e
|
|
|
|
|
2016-01-24 10:44:10 +00:00
|
|
|
template lexerError(lx: BaseLexer, message: string) {.dirty.} =
|
2016-01-20 20:15:33 +00:00
|
|
|
var e = newException(YamlParserError, message)
|
2016-01-24 10:44:10 +00:00
|
|
|
e.line = lx.lineNumber
|
2016-01-24 17:24:09 +00:00
|
|
|
e.column = lx.bufpos + 1
|
2016-01-24 10:44:10 +00:00
|
|
|
e.lineContent = lx.getCurrentLine(false) &
|
|
|
|
repeat(' ', lx.getColNumber(lx.bufpos)) & "^\n"
|
2016-01-20 20:15:33 +00:00
|
|
|
raise e
|
|
|
|
|
2016-03-25 17:22:25 +00:00
|
|
|
template addMultiple(s: var string, c: char, num: int) =
|
|
|
|
for i in 1..num:
|
|
|
|
s.add(c)
|
|
|
|
|
|
|
|
template startContent() {.dirty.} = content.setLen(0)
|
|
|
|
|
2016-03-19 21:43:03 +00:00
|
|
|
template initLevel(k: FastParseLevelKind): FastParseLevel =
|
|
|
|
FastParseLevel(kind: k, indentation: UnknownIndentation)
|
|
|
|
|
2016-03-19 10:16:24 +00:00
|
|
|
template yieldEmptyScalar() {.dirty.} =
|
|
|
|
yield scalarEvent("", tag, anchor)
|
|
|
|
tag = yTagQuestionMark
|
|
|
|
anchor = yAnchorNone
|
|
|
|
|
2016-03-20 11:09:04 +00:00
|
|
|
template yieldShallowScalar(content: string) {.dirty.} =
|
|
|
|
var e = YamlStreamEvent(kind: yamlScalar, scalarTag: tag,
|
|
|
|
scalarAnchor: anchor)
|
|
|
|
shallowCopy(e.scalarContent, content)
|
|
|
|
yield e
|
|
|
|
|
2016-01-22 20:36:11 +00:00
|
|
|
template yieldLevelEnd() {.dirty.} =
|
2016-01-20 20:15:33 +00:00
|
|
|
case level.kind
|
2016-03-19 21:43:03 +00:00
|
|
|
of fplSequence: yield endSeqEvent()
|
|
|
|
of fplMapKey: yield endMapEvent()
|
2016-03-17 19:28:00 +00:00
|
|
|
of fplMapValue, fplSinglePairValue:
|
2016-03-19 10:16:24 +00:00
|
|
|
yieldEmptyScalar()
|
2016-01-20 20:15:33 +00:00
|
|
|
yield endMapEvent()
|
|
|
|
of fplScalar:
|
2016-03-25 16:28:58 +00:00
|
|
|
if scalarType != stFlow:
|
|
|
|
case chomp
|
|
|
|
of ctKeep:
|
|
|
|
if content.len == 0: newlines.inc(-1)
|
2016-03-25 17:22:25 +00:00
|
|
|
content.addMultiple('\l', newlines)
|
2016-03-25 16:28:58 +00:00
|
|
|
of ctClip:
|
|
|
|
if content.len != 0: content.add('\l')
|
|
|
|
of ctStrip: discard
|
2016-03-20 11:09:04 +00:00
|
|
|
yieldShallowScalar(content)
|
2016-01-23 16:44:50 +00:00
|
|
|
tag = yTagQuestionMark
|
|
|
|
anchor = yAnchorNone
|
2016-03-19 21:43:03 +00:00
|
|
|
of fplUnknown:
|
|
|
|
if ancestry.len > 1: yieldEmptyScalar() # don't yield scalar for empty doc
|
|
|
|
of fplSinglePairKey, fplDocument: debugFail()
|
2016-01-20 20:15:33 +00:00
|
|
|
|
|
|
|
template handleLineEnd(insideDocument: bool) {.dirty.} =
|
2016-01-24 10:44:10 +00:00
|
|
|
case p.lexer.buf[p.lexer.bufpos]
|
2016-03-19 10:11:38 +00:00
|
|
|
of '\l': p.lexer.bufpos = p.lexer.handleLF(p.lexer.bufpos)
|
|
|
|
of '\c': p.lexer.bufpos = p.lexer.handleCR(p.lexer.bufpos)
|
2016-01-20 20:15:33 +00:00
|
|
|
of EndOfFile:
|
2016-03-19 10:11:38 +00:00
|
|
|
when insideDocument: closeEverything()
|
2016-01-20 20:15:33 +00:00
|
|
|
return
|
2016-03-19 10:11:38 +00:00
|
|
|
else: discard
|
2016-03-04 20:51:37 +00:00
|
|
|
newlines.inc()
|
2016-01-20 20:15:33 +00:00
|
|
|
|
2016-01-22 20:36:11 +00:00
|
|
|
template handleObjectEnd(nextState: FastParseState) {.dirty.} =
|
2016-03-19 21:43:03 +00:00
|
|
|
level = ancestry.pop()
|
|
|
|
if level.kind == fplSinglePairValue:
|
|
|
|
yield endMapEvent()
|
2016-01-22 20:36:11 +00:00
|
|
|
level = ancestry.pop()
|
2016-03-19 21:43:03 +00:00
|
|
|
state = if level.kind == fplDocument: fpExpectDocEnd else: nextState
|
|
|
|
tag = yTagQuestionMark
|
|
|
|
anchor = yAnchorNone
|
|
|
|
case level.kind
|
|
|
|
of fplMapKey: level.kind = fplMapValue
|
|
|
|
of fplSinglePairKey: level.kind = fplSinglePairValue
|
|
|
|
of fplMapValue: level.kind = fplMapKey
|
|
|
|
of fplSequence, fplDocument: discard
|
|
|
|
of fplUnknown, fplScalar, fplSinglePairValue: debugFail()
|
2016-01-20 20:15:33 +00:00
|
|
|
|
2016-03-17 19:28:00 +00:00
|
|
|
template handleObjectStart(k: YamlStreamEventKind, single: bool = false)
|
|
|
|
{.dirty.} =
|
2016-01-22 20:36:11 +00:00
|
|
|
assert(level.kind == fplUnknown)
|
2016-01-20 20:15:33 +00:00
|
|
|
when k == yamlStartMap:
|
2016-01-23 16:44:50 +00:00
|
|
|
yield startMapEvent(tag, anchor)
|
2016-03-17 19:28:00 +00:00
|
|
|
if single:
|
|
|
|
debug("started single-pair map at " &
|
2016-03-19 21:43:03 +00:00
|
|
|
(if level.indentation == UnknownIndentation: $indentation else:
|
|
|
|
$level.indentation))
|
2016-03-17 19:28:00 +00:00
|
|
|
level.kind = fplSinglePairKey
|
|
|
|
else:
|
|
|
|
debug("started map at " &
|
2016-03-19 21:43:03 +00:00
|
|
|
(if level.indentation == UnknownIndentation: $indentation else:
|
|
|
|
$level.indentation))
|
2016-03-17 19:28:00 +00:00
|
|
|
level.kind = fplMapKey
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
2016-01-23 16:44:50 +00:00
|
|
|
yield startSeqEvent(tag, anchor)
|
2016-03-19 21:43:03 +00:00
|
|
|
debug("started sequence at " &
|
|
|
|
(if level.indentation == UnknownIndentation: $indentation else:
|
|
|
|
$level.indentation))
|
2016-01-22 20:36:11 +00:00
|
|
|
level.kind = fplSequence
|
2016-01-24 18:15:50 +00:00
|
|
|
tag = yTagQuestionMark
|
2016-01-23 16:44:50 +00:00
|
|
|
anchor = yAnchorNone
|
2016-03-19 21:43:03 +00:00
|
|
|
if level.indentation == UnknownIndentation: level.indentation = indentation
|
2016-01-22 20:36:11 +00:00
|
|
|
ancestry.add(level)
|
2016-03-19 21:43:03 +00:00
|
|
|
level = initLevel(fplUnknown)
|
2016-01-20 20:15:33 +00:00
|
|
|
|
2016-02-19 16:31:57 +00:00
|
|
|
template closeMoreIndentedLevels(atSequenceItem: bool = false) {.dirty.} =
|
2016-03-19 21:43:03 +00:00
|
|
|
while level.kind != fplDocument:
|
2016-01-20 20:15:33 +00:00
|
|
|
let parent = ancestry[ancestry.high]
|
|
|
|
if parent.indentation >= indentation:
|
2016-02-19 16:31:57 +00:00
|
|
|
when atSequenceItem:
|
|
|
|
if (indentation == level.indentation and level.kind == fplSequence) or
|
2016-03-09 19:26:16 +00:00
|
|
|
(indentation == parent.indentation and level.kind == fplUnknown and
|
|
|
|
parent.kind != fplSequence):
|
2016-02-19 16:31:57 +00:00
|
|
|
break
|
2016-01-22 20:36:11 +00:00
|
|
|
debug("Closing because parent.indentation (" & $parent.indentation &
|
|
|
|
") >= indentation(" & $indentation & ")")
|
|
|
|
yieldLevelEnd()
|
2016-03-11 17:09:34 +00:00
|
|
|
handleObjectEnd(state)
|
|
|
|
else: break
|
2016-03-19 21:43:03 +00:00
|
|
|
if level.kind == fplDocument: state = fpExpectDocEnd
|
2016-01-20 20:15:33 +00:00
|
|
|
|
|
|
|
template closeEverything() {.dirty.} =
|
|
|
|
indentation = 0
|
|
|
|
closeMoreIndentedLevels()
|
2016-03-19 21:43:03 +00:00
|
|
|
case level.kind
|
|
|
|
of fplUnknown: discard ancestry.pop()
|
|
|
|
of fplDocument: discard
|
|
|
|
else:
|
|
|
|
yieldLevelEnd()
|
|
|
|
discard ancestry.pop()
|
2016-01-20 20:15:33 +00:00
|
|
|
yield endDocEvent()
|
|
|
|
|
2016-01-22 20:36:11 +00:00
|
|
|
template handleBlockSequenceIndicator() {.dirty.} =
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
2016-01-20 20:15:33 +00:00
|
|
|
case level.kind
|
2016-03-19 10:11:38 +00:00
|
|
|
of fplUnknown: handleObjectStart(yamlStartSeq)
|
2016-01-20 20:15:33 +00:00
|
|
|
of fplSequence:
|
|
|
|
if level.indentation != indentation:
|
2016-01-24 10:44:10 +00:00
|
|
|
parserError("Invalid indentation of block sequence indicator")
|
2016-01-22 20:36:11 +00:00
|
|
|
ancestry.add(level)
|
2016-03-19 21:43:03 +00:00
|
|
|
level = initLevel(fplUnknown)
|
2016-03-19 10:11:38 +00:00
|
|
|
else: parserError("Illegal sequence item in map")
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.skipWhitespace()
|
|
|
|
indentation = p.lexer.getColNumber(p.lexer.bufpos)
|
2016-01-22 22:44:26 +00:00
|
|
|
|
|
|
|
template handleMapKeyIndicator() {.dirty.} =
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
2016-01-22 22:44:26 +00:00
|
|
|
case level.kind
|
2016-03-17 19:28:00 +00:00
|
|
|
of fplUnknown: handleObjectStart(yamlStartMap)
|
2016-01-22 22:44:26 +00:00
|
|
|
of fplMapValue:
|
|
|
|
if level.indentation != indentation:
|
2016-01-24 10:44:10 +00:00
|
|
|
parserError("Invalid indentation of map key indicator")
|
2016-01-24 18:15:50 +00:00
|
|
|
yield scalarEvent("", yTagQuestionMark, yAnchorNone)
|
2016-01-22 22:44:26 +00:00
|
|
|
level.kind = fplMapKey
|
|
|
|
ancestry.add(level)
|
2016-03-19 21:43:03 +00:00
|
|
|
level = initLevel(fplUnknown)
|
2016-01-22 22:44:26 +00:00
|
|
|
of fplMapKey:
|
|
|
|
if level.indentation != indentation:
|
2016-01-24 10:44:10 +00:00
|
|
|
parserError("Invalid indentation of map key indicator")
|
2016-01-22 22:44:26 +00:00
|
|
|
ancestry.add(level)
|
2016-03-19 21:43:03 +00:00
|
|
|
level = initLevel(fplUnknown)
|
2016-01-22 22:44:26 +00:00
|
|
|
of fplSequence:
|
2016-01-24 10:44:10 +00:00
|
|
|
parserError("Unexpected map key indicator (expected '- ')")
|
2016-01-22 22:44:26 +00:00
|
|
|
of fplScalar:
|
2016-01-24 10:44:10 +00:00
|
|
|
parserError("Unexpected map key indicator (expected multiline scalar end)")
|
2016-03-19 21:43:03 +00:00
|
|
|
of fplSinglePairKey, fplSinglePairValue, fplDocument: debugFail()
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.skipWhitespace()
|
|
|
|
indentation = p.lexer.getColNumber(p.lexer.bufpos)
|
2016-01-22 22:44:26 +00:00
|
|
|
|
|
|
|
template handleMapValueIndicator() {.dirty.} =
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
2016-01-22 22:44:26 +00:00
|
|
|
case level.kind
|
|
|
|
of fplUnknown:
|
2016-03-19 21:43:03 +00:00
|
|
|
if level.indentation == UnknownIndentation:
|
2016-01-23 16:44:50 +00:00
|
|
|
handleObjectStart(yamlStartMap)
|
2016-01-24 18:15:50 +00:00
|
|
|
yield scalarEvent("", yTagQuestionMark, yAnchorNone)
|
2016-03-19 10:16:24 +00:00
|
|
|
else: yieldEmptyScalar()
|
2016-01-23 16:44:50 +00:00
|
|
|
ancestry[ancestry.high].kind = fplMapValue
|
2016-01-22 22:44:26 +00:00
|
|
|
of fplMapKey:
|
|
|
|
if level.indentation != indentation:
|
2016-01-24 10:44:10 +00:00
|
|
|
parserError("Invalid indentation of map key indicator")
|
2016-01-24 18:15:50 +00:00
|
|
|
yield scalarEvent("", yTagQuestionMark, yAnchorNone)
|
2016-01-22 22:44:26 +00:00
|
|
|
level.kind = fplMapValue
|
|
|
|
ancestry.add(level)
|
2016-03-19 21:43:03 +00:00
|
|
|
level = initLevel(fplUnknown)
|
2016-01-22 22:44:26 +00:00
|
|
|
of fplMapValue:
|
|
|
|
if level.indentation != indentation:
|
2016-01-24 10:44:10 +00:00
|
|
|
parserError("Invalid indentation of map key indicator")
|
2016-01-22 22:44:26 +00:00
|
|
|
ancestry.add(level)
|
2016-03-19 21:43:03 +00:00
|
|
|
level = initLevel(fplUnknown)
|
2016-01-22 22:44:26 +00:00
|
|
|
of fplSequence:
|
2016-01-24 10:44:10 +00:00
|
|
|
parserError("Unexpected map value indicator (expected '- ')")
|
2016-01-22 22:44:26 +00:00
|
|
|
of fplScalar:
|
2016-01-24 10:44:10 +00:00
|
|
|
parserError(
|
|
|
|
"Unexpected map value indicator (expected multiline scalar end)")
|
2016-03-19 21:43:03 +00:00
|
|
|
of fplSinglePairKey, fplSinglePairValue, fplDocument: debugFail()
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.skipWhitespace()
|
|
|
|
indentation = p.lexer.getColNumber(p.lexer.bufpos)
|
2016-01-20 20:15:33 +00:00
|
|
|
|
|
|
|
template initDocValues() {.dirty.} =
|
|
|
|
shorthands = initTable[string, string]()
|
|
|
|
anchors = initTable[string, AnchorId]()
|
|
|
|
shorthands["!"] = "!"
|
|
|
|
shorthands["!!"] = "tag:yaml.org,2002:"
|
|
|
|
nextAnchorId = 0.AnchorId
|
2016-03-19 21:43:03 +00:00
|
|
|
level = initLevel(fplUnknown)
|
2016-01-24 18:15:50 +00:00
|
|
|
tag = yTagQuestionMark
|
2016-01-20 22:38:39 +00:00
|
|
|
anchor = yAnchorNone
|
2016-03-19 21:43:03 +00:00
|
|
|
ancestry.add(FastParseLevel(kind: fplDocument, indentation: -1))
|
2016-01-20 20:15:33 +00:00
|
|
|
|
2016-01-20 22:38:39 +00:00
|
|
|
template handleTagHandle() {.dirty.} =
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
2016-03-17 18:30:40 +00:00
|
|
|
if level.kind != fplUnknown: parserError("Unexpected tag handle")
|
2016-01-24 18:15:50 +00:00
|
|
|
if tag != yTagQuestionMark:
|
2016-01-24 10:44:10 +00:00
|
|
|
parserError("Only one tag handle is allowed per node")
|
2016-03-25 17:22:25 +00:00
|
|
|
startContent()
|
2016-01-20 22:38:39 +00:00
|
|
|
var
|
|
|
|
shorthandEnd: int
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.tagHandle(content, shorthandEnd)
|
2016-01-20 22:38:39 +00:00
|
|
|
if shorthandEnd != -1:
|
|
|
|
try:
|
2016-03-20 11:09:04 +00:00
|
|
|
tagUri.setLen(0)
|
|
|
|
tagUri.add(shorthands[content[0..shorthandEnd]])
|
|
|
|
tagUri.add(content[shorthandEnd + 1 .. ^1])
|
2016-01-20 22:38:39 +00:00
|
|
|
except KeyError:
|
2016-01-24 10:44:10 +00:00
|
|
|
parserError("Undefined tag shorthand: " & content[0..shorthandEnd])
|
2016-03-20 11:09:04 +00:00
|
|
|
try: tag = p.tagLib.tags[tagUri]
|
|
|
|
except KeyError: tag = p.tagLib.registerUri(tagUri)
|
|
|
|
else:
|
|
|
|
try: tag = p.tagLib.tags[content]
|
|
|
|
except KeyError: tag = p.tagLib.registerUri(content)
|
2016-01-20 22:38:39 +00:00
|
|
|
|
|
|
|
template handleAnchor() {.dirty.} =
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
2016-03-17 18:30:40 +00:00
|
|
|
if level.kind != fplUnknown: parserError("Unexpected token")
|
2016-01-20 22:38:39 +00:00
|
|
|
if anchor != yAnchorNone:
|
2016-01-24 10:44:10 +00:00
|
|
|
parserError("Only one anchor is allowed per node")
|
2016-03-25 17:22:25 +00:00
|
|
|
startContent()
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.anchorName(content)
|
2016-01-20 22:38:39 +00:00
|
|
|
anchor = nextAnchorId
|
|
|
|
anchors[content] = anchor
|
|
|
|
nextAnchorId = cast[AnchorId](cast[int](nextAnchorId) + 1)
|
|
|
|
|
|
|
|
template handleAlias() {.dirty.} =
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
2016-03-17 18:30:40 +00:00
|
|
|
if level.kind != fplUnknown: parserError("Unexpected token")
|
2016-01-24 18:15:50 +00:00
|
|
|
if anchor != yAnchorNone or tag != yTagQuestionMark:
|
2016-01-24 10:44:10 +00:00
|
|
|
parserError("Alias may not have anchor or tag")
|
2016-03-25 17:22:25 +00:00
|
|
|
startContent()
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.anchorName(content)
|
2016-01-22 20:36:11 +00:00
|
|
|
var id: AnchorId
|
2016-03-17 18:30:40 +00:00
|
|
|
try: id = anchors[content]
|
|
|
|
except KeyError: parserError("Unknown anchor")
|
2016-01-22 20:36:11 +00:00
|
|
|
yield aliasEvent(id)
|
|
|
|
handleObjectEnd(fpBlockAfterObject)
|
2016-01-20 22:38:39 +00:00
|
|
|
|
|
|
|
template leaveFlowLevel() {.dirty.} =
|
|
|
|
flowdepth.inc(-1)
|
2016-01-22 20:36:11 +00:00
|
|
|
if flowdepth == 0:
|
|
|
|
yieldLevelEnd()
|
|
|
|
handleObjectEnd(fpBlockAfterObject)
|
2016-01-20 22:38:39 +00:00
|
|
|
else:
|
2016-01-22 20:36:11 +00:00
|
|
|
yieldLevelEnd()
|
|
|
|
handleObjectEnd(fpFlowAfterObject)
|
|
|
|
|
2016-03-17 21:50:41 +00:00
|
|
|
template handlePossibleMapStart(flow: bool = false,
|
|
|
|
single: bool = false) {.dirty.} =
|
2016-03-19 21:43:03 +00:00
|
|
|
if level.indentation == UnknownIndentation:
|
2016-01-23 16:44:50 +00:00
|
|
|
var flowDepth = 0
|
2016-02-25 20:16:03 +00:00
|
|
|
var pos = p.lexer.bufpos
|
2016-03-17 21:50:41 +00:00
|
|
|
var recentJsonStyle = false
|
2016-02-25 20:16:03 +00:00
|
|
|
while pos < p.lexer.bufpos + 1024:
|
2016-01-24 10:44:10 +00:00
|
|
|
case p.lexer.buf[pos]
|
2016-01-23 16:44:50 +00:00
|
|
|
of ':':
|
2016-03-17 21:50:41 +00:00
|
|
|
if flowDepth == 0 and (p.lexer.buf[pos + 1] in spaceOrLineEnd or
|
|
|
|
recentJsonStyle):
|
2016-03-17 19:28:00 +00:00
|
|
|
handleObjectStart(yamlStartMap, single)
|
2016-01-23 16:44:50 +00:00
|
|
|
break
|
2016-03-17 18:30:40 +00:00
|
|
|
of lineEnd: break
|
2016-02-25 20:16:03 +00:00
|
|
|
of '[', '{': flowDepth.inc()
|
2016-03-17 19:28:00 +00:00
|
|
|
of '}', ']':
|
|
|
|
flowDepth.inc(-1)
|
|
|
|
if flowDepth < 0: break
|
|
|
|
of '?', ',':
|
2016-01-23 16:44:50 +00:00
|
|
|
if flowDepth == 0: break
|
|
|
|
of '#':
|
2016-02-25 20:16:03 +00:00
|
|
|
if p.lexer.buf[pos - 1] in space: break
|
|
|
|
of '"':
|
|
|
|
pos.inc()
|
2016-02-26 18:28:28 +00:00
|
|
|
while p.lexer.buf[pos] notin {'"', EndOfFile, '\l', '\c'}:
|
2016-02-25 20:16:03 +00:00
|
|
|
if p.lexer.buf[pos] == '\\': pos.inc()
|
|
|
|
pos.inc()
|
|
|
|
if p.lexer.buf[pos] != '"': break
|
2016-03-10 18:59:56 +00:00
|
|
|
of '\'':
|
|
|
|
pos.inc()
|
|
|
|
while p.lexer.buf[pos] notin {'\'', '\l', '\c', EndOfFile}:
|
|
|
|
pos.inc()
|
|
|
|
of '&', '*', '!':
|
2016-03-25 16:28:58 +00:00
|
|
|
if pos == p.lexer.bufpos or p.lexer.buf[p.lexer.bufpos] in space:
|
2016-03-10 18:59:56 +00:00
|
|
|
pos.inc()
|
2016-03-25 16:28:58 +00:00
|
|
|
while p.lexer.buf[pos] notin spaceOrLineEnd:
|
2016-03-20 09:27:09 +00:00
|
|
|
pos.inc()
|
|
|
|
continue
|
2016-02-25 20:16:03 +00:00
|
|
|
else: discard
|
2016-03-25 16:28:58 +00:00
|
|
|
if flow and p.lexer.buf[pos] notin space:
|
2016-03-17 21:50:41 +00:00
|
|
|
recentJsonStyle = p.lexer.buf[pos] in {']', '}', '\'', '"'}
|
2016-02-25 20:16:03 +00:00
|
|
|
pos.inc()
|
2016-03-19 21:43:03 +00:00
|
|
|
if level.indentation == UnknownIndentation: level.indentation = indentation
|
2016-01-22 20:36:11 +00:00
|
|
|
|
|
|
|
template handleBlockItemStart() {.dirty.} =
|
|
|
|
case level.kind
|
2016-03-17 18:30:40 +00:00
|
|
|
of fplUnknown: handlePossibleMapStart()
|
2016-01-22 20:36:11 +00:00
|
|
|
of fplSequence:
|
2016-01-24 10:44:10 +00:00
|
|
|
parserError("Unexpected token (expected block sequence indicator)")
|
2016-01-22 20:36:11 +00:00
|
|
|
of fplMapKey:
|
|
|
|
ancestry.add(level)
|
2016-01-23 16:44:50 +00:00
|
|
|
level = FastParseLevel(kind: fplUnknown, indentation: indentation)
|
2016-01-22 20:36:11 +00:00
|
|
|
of fplMapValue:
|
2016-03-19 10:16:24 +00:00
|
|
|
yieldEmptyScalar()
|
2016-01-22 20:36:11 +00:00
|
|
|
level.kind = fplMapKey
|
|
|
|
ancestry.add(level)
|
2016-01-23 16:44:50 +00:00
|
|
|
level = FastParseLevel(kind: fplUnknown, indentation: indentation)
|
2016-03-19 21:43:03 +00:00
|
|
|
of fplScalar, fplSinglePairKey, fplSinglePairValue, fplDocument: debugFail()
|
2016-03-17 19:28:00 +00:00
|
|
|
|
|
|
|
template handleFlowItemStart() {.dirty.} =
|
2016-03-19 21:43:03 +00:00
|
|
|
if level.kind == fplUnknown and ancestry[ancestry.high].kind == fplSequence:
|
2016-03-17 21:50:41 +00:00
|
|
|
handlePossibleMapStart(true, true)
|
2016-01-20 22:38:39 +00:00
|
|
|
|
2016-01-24 10:44:10 +00:00
|
|
|
template startToken() {.dirty.} =
|
|
|
|
p.tokenstart = p.lexer.getColNumber(p.lexer.bufpos)
|
|
|
|
|
|
|
|
template finishLine(lexer: BaseLexer) =
|
2016-01-20 20:15:33 +00:00
|
|
|
debug("lex: finishLine")
|
|
|
|
while lexer.buf[lexer.bufpos] notin lineEnd:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
|
2016-01-24 10:44:10 +00:00
|
|
|
template skipWhitespace(lexer: BaseLexer) =
|
2016-01-20 20:15:33 +00:00
|
|
|
debug("lex: skipWhitespace")
|
|
|
|
while lexer.buf[lexer.bufpos] in space: lexer.bufpos.inc()
|
|
|
|
|
2016-03-16 18:30:51 +00:00
|
|
|
template skipWhitespaceCommentsAndNewlines(lexer: BaseLexer) =
|
|
|
|
debug("lex: skipWhitespaceCommentsAndNewlines")
|
|
|
|
if lexer.buf[lexer.bufpos] != '#':
|
|
|
|
while true:
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of space: lexer.bufpos.inc()
|
|
|
|
of '\l': lexer.bufpos = lexer.handleLF(lexer.bufpos)
|
|
|
|
of '\c': lexer.bufpos = lexer.handleCR(lexer.bufpos)
|
|
|
|
of '#': # also skip comments
|
|
|
|
lexer.bufpos.inc()
|
2016-03-25 16:28:58 +00:00
|
|
|
while lexer.buf[lexer.bufpos] notin lineEnd:
|
2016-03-16 18:30:51 +00:00
|
|
|
lexer.bufpos.inc()
|
|
|
|
else: break
|
2016-01-20 22:38:39 +00:00
|
|
|
|
2016-01-24 10:44:10 +00:00
|
|
|
template skipIndentation(lexer: BaseLexer) =
|
2016-01-20 20:15:33 +00:00
|
|
|
debug("lex: skipIndentation")
|
|
|
|
while lexer.buf[lexer.bufpos] == ' ': lexer.bufpos.inc()
|
|
|
|
|
2016-01-24 10:44:10 +00:00
|
|
|
template directiveName(lexer: BaseLexer, directive: var LexedDirective) =
|
2016-01-20 20:15:33 +00:00
|
|
|
debug("lex: directiveName")
|
|
|
|
directive = ldUnknown
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] == 'Y':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] == 'A':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] == 'M':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] == 'L':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] in spaceOrLineEnd:
|
|
|
|
directive = ldYaml
|
|
|
|
elif lexer.buf[lexer.bufpos] == 'T':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] == 'A':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] == 'G':
|
|
|
|
lexer.bufpos.inc()
|
2016-03-25 16:28:58 +00:00
|
|
|
if lexer.buf[lexer.bufpos] in spaceOrLineEnd:
|
2016-01-20 20:15:33 +00:00
|
|
|
directive = ldTag
|
|
|
|
while lexer.buf[lexer.bufpos] notin spaceOrLineEnd:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
|
2016-01-24 10:44:10 +00:00
|
|
|
template yamlVersion(lexer: BaseLexer, o: var string) =
|
2016-01-20 20:15:33 +00:00
|
|
|
debug("lex: yamlVersion")
|
2016-03-17 18:30:40 +00:00
|
|
|
while lexer.buf[lexer.bufpos] in space: lexer.bufpos.inc()
|
2016-01-20 20:15:33 +00:00
|
|
|
var c = lexer.buf[lexer.bufpos]
|
2016-03-17 18:30:40 +00:00
|
|
|
if c notin digits: lexerError(lexer, "Invalid YAML version number")
|
2016-01-20 20:15:33 +00:00
|
|
|
o.add(c)
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
c = lexer.buf[lexer.bufpos]
|
|
|
|
while c in digits:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
o.add(c)
|
|
|
|
c = lexer.buf[lexer.bufpos]
|
|
|
|
if lexer.buf[lexer.bufpos] != '.':
|
2016-01-24 10:44:10 +00:00
|
|
|
lexerError(lexer, "Invalid YAML version number")
|
2016-01-24 17:24:09 +00:00
|
|
|
o.add('.')
|
2016-01-20 20:15:33 +00:00
|
|
|
lexer.bufpos.inc()
|
2016-01-24 17:24:09 +00:00
|
|
|
c = lexer.buf[lexer.bufpos]
|
2016-03-17 18:30:40 +00:00
|
|
|
if c notin digits: lexerError(lexer, "Invalid YAML version number")
|
2016-01-24 17:24:09 +00:00
|
|
|
o.add(c)
|
2016-01-20 20:15:33 +00:00
|
|
|
lexer.bufpos.inc()
|
2016-01-24 17:24:09 +00:00
|
|
|
c = lexer.buf[lexer.bufpos]
|
|
|
|
while c in digits:
|
|
|
|
o.add(c)
|
2016-01-20 20:15:33 +00:00
|
|
|
lexer.bufpos.inc()
|
2016-01-24 17:24:09 +00:00
|
|
|
c = lexer.buf[lexer.bufpos]
|
2016-01-20 20:15:33 +00:00
|
|
|
if lexer.buf[lexer.bufpos] notin spaceOrLineEnd:
|
2016-01-24 10:44:10 +00:00
|
|
|
lexerError(lexer, "Invalid YAML version number")
|
2016-01-20 20:15:33 +00:00
|
|
|
|
2016-01-24 10:44:10 +00:00
|
|
|
template lineEnding(lexer: BaseLexer) =
|
2016-01-20 20:15:33 +00:00
|
|
|
debug("lex: lineEnding")
|
|
|
|
if lexer.buf[lexer.bufpos] notin lineEnd:
|
2016-03-17 18:30:40 +00:00
|
|
|
while lexer.buf[lexer.bufpos] in space: lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] in lineEnd: discard
|
2016-01-20 20:15:33 +00:00
|
|
|
elif lexer.buf[lexer.bufpos] == '#':
|
2016-03-17 18:30:40 +00:00
|
|
|
while lexer.buf[lexer.bufpos] notin lineEnd: lexer.bufpos.inc()
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
parserError("Unexpected token (expected comment or line end)")
|
2016-01-20 20:15:33 +00:00
|
|
|
|
2016-01-24 10:44:10 +00:00
|
|
|
template tagShorthand(lexer: BaseLexer, shorthand: var string) =
|
2016-01-20 20:15:33 +00:00
|
|
|
debug("lex: tagShorthand")
|
2016-03-17 18:30:40 +00:00
|
|
|
while lexer.buf[lexer.bufpos] in space: lexer.bufpos.inc()
|
2016-01-24 10:44:10 +00:00
|
|
|
assert lexer.buf[lexer.bufpos] == '!'
|
2016-01-20 20:15:33 +00:00
|
|
|
shorthand.add('!')
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
var c = lexer.buf[lexer.bufpos]
|
2016-03-17 18:30:40 +00:00
|
|
|
if c in spaceOrLineEnd: discard
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
|
|
|
while c != '!':
|
|
|
|
case c
|
|
|
|
of 'a' .. 'z', 'A' .. 'Z', '0' .. '9', '-':
|
|
|
|
shorthand.add(c)
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
c = lexer.buf[lexer.bufpos]
|
2016-03-17 18:30:40 +00:00
|
|
|
else: lexerError(lexer, "Illegal character in tag shorthand")
|
2016-01-20 20:15:33 +00:00
|
|
|
shorthand.add(c)
|
2016-03-11 17:32:17 +00:00
|
|
|
lexer.bufpos.inc()
|
2016-01-20 20:15:33 +00:00
|
|
|
if lexer.buf[lexer.bufpos] notin spaceOrLineEnd:
|
2016-01-24 10:44:10 +00:00
|
|
|
lexerError(lexer, "Missing space after tag shorthand")
|
2016-01-20 20:15:33 +00:00
|
|
|
|
2016-03-20 11:09:04 +00:00
|
|
|
template tagUriMapping(lexer: BaseLexer, uri: var string) =
|
|
|
|
debug("lex: tagUriMapping")
|
2016-01-20 20:15:33 +00:00
|
|
|
while lexer.buf[lexer.bufpos] in space:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
var c = lexer.buf[lexer.bufpos]
|
2016-03-16 18:20:18 +00:00
|
|
|
if c == '!':
|
|
|
|
uri.add(c)
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
c = lexer.buf[lexer.bufpos]
|
2016-01-20 20:15:33 +00:00
|
|
|
while c notin spaceOrLineEnd:
|
|
|
|
case c
|
|
|
|
of 'a' .. 'z', 'A' .. 'Z', '0' .. '9', '#', ';', '/', '?', ':', '@', '&',
|
|
|
|
'-', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')':
|
|
|
|
uri.add(c)
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
c = lexer.buf[lexer.bufpos]
|
2016-03-17 18:30:40 +00:00
|
|
|
else: lexerError(lexer, "Invalid tag uri")
|
2016-01-20 20:15:33 +00:00
|
|
|
|
2016-03-20 12:50:00 +00:00
|
|
|
template directivesEndMarker(lexer: var BaseLexer, success: var bool) =
|
|
|
|
debug("lex: directivesEndMarker")
|
|
|
|
success = true
|
|
|
|
for i in 0..2:
|
|
|
|
if lexer.buf[lexer.bufpos + i] != '-':
|
|
|
|
success = false
|
|
|
|
break
|
|
|
|
if success: success = lexer.buf[lexer.bufpos + 3] in spaceOrLineEnd
|
2016-01-20 20:15:33 +00:00
|
|
|
|
2016-03-20 12:50:00 +00:00
|
|
|
template documentEndMarker(lexer: var BaseLexer, success: var bool) =
|
|
|
|
debug("lex: documentEndMarker")
|
|
|
|
success = true
|
|
|
|
for i in 0..2:
|
|
|
|
if lexer.buf[lexer.bufpos + i] != '.':
|
|
|
|
success = false
|
|
|
|
break
|
|
|
|
if success: success = lexer.buf[lexer.bufpos + 3] in spaceOrLineEnd
|
2016-01-20 20:15:33 +00:00
|
|
|
|
2016-01-24 10:44:10 +00:00
|
|
|
proc unicodeSequence(lexer: var BaseLexer, length: int):
|
2016-01-20 20:15:33 +00:00
|
|
|
string {.raises: [YamlParserError].} =
|
|
|
|
debug("lex: unicodeSequence")
|
2016-01-24 10:44:10 +00:00
|
|
|
var unicodeChar = 0.int
|
2016-01-20 20:15:33 +00:00
|
|
|
for i in countup(0, length - 1):
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
let
|
|
|
|
digitPosition = length - i - 1
|
|
|
|
c = lexer.buf[lexer.bufpos]
|
|
|
|
case c
|
2016-03-17 20:25:30 +00:00
|
|
|
of EndOFFile, '\l', '\c':
|
2016-01-24 10:44:10 +00:00
|
|
|
lexerError(lexer, "Unfinished unicode escape sequence")
|
2016-01-20 20:15:33 +00:00
|
|
|
of '0' .. '9':
|
|
|
|
unicodeChar = unicodechar or
|
|
|
|
(cast[int](c) - 0x30) shl (digitPosition * 4)
|
|
|
|
of 'A' .. 'F':
|
|
|
|
unicodeChar = unicodechar or
|
|
|
|
(cast[int](c) - 0x37) shl (digitPosition * 4)
|
|
|
|
of 'a' .. 'f':
|
|
|
|
unicodeChar = unicodechar or
|
|
|
|
(cast[int](c) - 0x57) shl (digitPosition * 4)
|
2016-03-17 17:44:35 +00:00
|
|
|
else: lexerError(lexer, "Invalid character in unicode escape sequence")
|
2016-01-24 10:44:10 +00:00
|
|
|
return toUTF8(cast[Rune](unicodeChar))
|
2016-01-20 20:15:33 +00:00
|
|
|
|
2016-03-17 17:44:35 +00:00
|
|
|
proc byteSequence(lexer: var BaseLexer): char {.raises: [YamlParserError].} =
|
|
|
|
debug("lex: byteSequence")
|
|
|
|
var charCode = 0.int8
|
|
|
|
for i in 0 .. 1:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
let
|
|
|
|
digitPosition = int8(1 - i)
|
|
|
|
c = lexer.buf[lexer.bufpos]
|
|
|
|
case c
|
|
|
|
of EndOfFile, '\l', 'r':
|
|
|
|
lexerError(lexer, "Unfinished octet escape sequence")
|
|
|
|
of '0' .. '9':
|
|
|
|
charCode = charCode or (int8(c) - 0x30.int8) shl (digitPosition * 4)
|
|
|
|
of 'A' .. 'F':
|
|
|
|
charCode = charCode or (int8(c) - 0x37.int8) shl (digitPosition * 4)
|
|
|
|
of 'a' .. 'f':
|
|
|
|
charCode = charCode or (int8(c) - 0x57.int8) shl (digitPosition * 4)
|
|
|
|
else: lexerError(lexer, "Invalid character in octet escape sequence")
|
|
|
|
return char(charCode)
|
|
|
|
|
2016-03-17 20:25:30 +00:00
|
|
|
template processQuotedWhitespace(newlines: var int) {.dirty.} =
|
2016-03-20 11:09:04 +00:00
|
|
|
after.setLen(0)
|
2016-01-23 14:29:45 +00:00
|
|
|
block outer:
|
|
|
|
while true:
|
2016-01-24 10:44:10 +00:00
|
|
|
case p.lexer.buf[p.lexer.bufpos]
|
2016-03-17 18:30:40 +00:00
|
|
|
of ' ', '\t': after.add(p.lexer.buf[p.lexer.bufpos])
|
2016-02-26 18:28:28 +00:00
|
|
|
of '\l':
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos = p.lexer.handleLF(p.lexer.bufpos)
|
2016-01-23 14:29:45 +00:00
|
|
|
break
|
|
|
|
of '\c':
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos = p.lexer.handleLF(p.lexer.bufpos)
|
2016-01-23 14:29:45 +00:00
|
|
|
break
|
|
|
|
else:
|
|
|
|
content.add(after)
|
|
|
|
break outer
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos.inc()
|
2016-01-23 14:29:45 +00:00
|
|
|
while true:
|
2016-01-24 10:44:10 +00:00
|
|
|
case p.lexer.buf[p.lexer.bufpos]
|
2016-03-17 18:30:40 +00:00
|
|
|
of ' ', '\t': discard
|
2016-02-26 18:28:28 +00:00
|
|
|
of '\l':
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos = p.lexer.handleLF(p.lexer.bufpos)
|
2016-01-23 14:29:45 +00:00
|
|
|
newlines.inc()
|
2016-03-17 18:30:40 +00:00
|
|
|
continue
|
2016-01-23 14:29:45 +00:00
|
|
|
of '\c':
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos = p.lexer.handleCR(p.lexer.bufpos)
|
2016-01-23 14:29:45 +00:00
|
|
|
newlines.inc()
|
2016-03-17 18:30:40 +00:00
|
|
|
continue
|
2016-01-23 14:29:45 +00:00
|
|
|
else:
|
2016-03-17 18:30:40 +00:00
|
|
|
if newlines == 0: discard
|
|
|
|
elif newlines == 1: content.add(' ')
|
2016-03-25 17:22:25 +00:00
|
|
|
else: content.addMultiple('\l', newlines - 1)
|
2016-01-23 14:29:45 +00:00
|
|
|
break
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos.inc()
|
2016-01-23 14:29:45 +00:00
|
|
|
|
2016-01-24 10:44:10 +00:00
|
|
|
template doubleQuotedScalar(lexer: BaseLexer, content: var string) =
|
2016-01-23 14:29:45 +00:00
|
|
|
debug("lex: doubleQuotedScalar")
|
|
|
|
lexer.bufpos.inc()
|
2016-01-20 20:15:33 +00:00
|
|
|
while true:
|
2016-01-23 14:29:45 +00:00
|
|
|
var c = lexer.buf[lexer.bufpos]
|
2016-01-20 20:15:33 +00:00
|
|
|
case c
|
|
|
|
of EndOfFile:
|
2016-01-24 10:44:10 +00:00
|
|
|
lexerError(lexer, "Unfinished double quoted string")
|
2016-01-20 20:15:33 +00:00
|
|
|
of '\\':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of EndOfFile:
|
2016-01-24 10:44:10 +00:00
|
|
|
lexerError(lexer, "Unfinished escape sequence")
|
2016-01-20 20:15:33 +00:00
|
|
|
of '0': content.add('\0')
|
|
|
|
of 'a': content.add('\x07')
|
|
|
|
of 'b': content.add('\x08')
|
|
|
|
of '\t', 't': content.add('\t')
|
2016-02-26 18:28:28 +00:00
|
|
|
of 'n': content.add('\l')
|
2016-01-20 20:15:33 +00:00
|
|
|
of 'v': content.add('\v')
|
|
|
|
of 'f': content.add('\f')
|
2016-03-17 20:25:30 +00:00
|
|
|
of 'r': content.add('\c')
|
2016-01-20 20:15:33 +00:00
|
|
|
of 'e': content.add('\e')
|
|
|
|
of ' ': content.add(' ')
|
|
|
|
of '"': content.add('"')
|
|
|
|
of '/': content.add('/')
|
|
|
|
of '\\': content.add('\\')
|
|
|
|
of 'N': content.add(UTF8NextLine)
|
|
|
|
of '_': content.add(UTF8NonBreakingSpace)
|
|
|
|
of 'L': content.add(UTF8LineSeparator)
|
|
|
|
of 'P': content.add(UTF8ParagraphSeparator)
|
|
|
|
of 'x': content.add(lexer.unicodeSequence(2))
|
|
|
|
of 'u': content.add(lexer.unicodeSequence(4))
|
|
|
|
of 'U': content.add(lexer.unicodeSequence(8))
|
2016-02-26 18:28:28 +00:00
|
|
|
of '\l', '\c':
|
2016-01-23 14:29:45 +00:00
|
|
|
var newlines = 0
|
2016-03-17 20:25:30 +00:00
|
|
|
processQuotedWhitespace(newlines)
|
2016-01-23 14:29:45 +00:00
|
|
|
continue
|
2016-03-17 18:30:40 +00:00
|
|
|
else: lexerError(lexer, "Illegal character in escape sequence")
|
2016-01-20 20:15:33 +00:00
|
|
|
of '"':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
break
|
2016-02-26 18:28:28 +00:00
|
|
|
of '\l', '\c', '\t', ' ':
|
2016-01-23 14:29:45 +00:00
|
|
|
var newlines = 1
|
2016-03-17 20:25:30 +00:00
|
|
|
processQuotedWhitespace(newlines)
|
2016-01-23 14:29:45 +00:00
|
|
|
continue
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
|
|
|
content.add(c)
|
2016-01-23 14:29:45 +00:00
|
|
|
lexer.bufpos.inc()
|
2016-01-20 20:15:33 +00:00
|
|
|
|
2016-03-17 20:25:30 +00:00
|
|
|
template singleQuotedScalar(lexer: BaseLexer, content: var string) =
|
|
|
|
debug("lex: singleQuotedScalar")
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
while true:
|
|
|
|
case lexer.buf[lexer.bufpos]
|
|
|
|
of '\'':
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] == '\'': content.add('\'')
|
|
|
|
else: break
|
|
|
|
of EndOfFile: lexerError(lexer, "Unfinished single quoted string")
|
|
|
|
of '\l', '\c', '\t', ' ':
|
|
|
|
var newlines = 1
|
|
|
|
processQuotedWhitespace(newlines)
|
|
|
|
continue
|
|
|
|
else: content.add(lexer.buf[lexer.bufpos])
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
|
2016-01-24 10:44:10 +00:00
|
|
|
proc isPlainSafe(lexer: BaseLexer, index: int, context: YamlContext): bool =
|
2016-01-20 20:15:33 +00:00
|
|
|
case lexer.buf[lexer.bufpos + 1]
|
2016-03-17 18:30:40 +00:00
|
|
|
of spaceOrLineEnd: result = false
|
|
|
|
of flowIndicators: result = context == cBlock
|
|
|
|
else: result = true
|
2016-01-20 20:15:33 +00:00
|
|
|
|
2016-03-20 11:09:04 +00:00
|
|
|
|
|
|
|
# tried this for performance optimization, but it didn't optimize any
|
|
|
|
# performance. keeping it around for future reference.
|
|
|
|
#const
|
|
|
|
# plainCharOut = {'!', '\"', '$'..'9', ';'..'\xFF'}
|
|
|
|
# plainCharIn = {'!', '\"', '$'..'+', '-'..'9', ';'..'Z', '\\', '^'..'z',
|
|
|
|
# '|', '~'..'\xFF'}
|
|
|
|
#template isPlainChar(c: char, context: YamlContext): bool =
|
|
|
|
# when context == cBlock: c in plainCharOut
|
|
|
|
# else: c in plainCharIn
|
|
|
|
|
2016-01-24 10:44:10 +00:00
|
|
|
template plainScalar(lexer: BaseLexer, content: var string,
|
2016-01-20 20:15:33 +00:00
|
|
|
context: YamlContext) =
|
|
|
|
debug("lex: plainScalar")
|
|
|
|
content.add(lexer.buf[lexer.bufpos])
|
|
|
|
block outer:
|
|
|
|
while true:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
let c = lexer.buf[lexer.bufpos]
|
|
|
|
case c
|
|
|
|
of ' ', '\t':
|
2016-03-20 11:09:04 +00:00
|
|
|
after.setLen(1)
|
|
|
|
after[0] = c
|
2016-01-20 20:15:33 +00:00
|
|
|
while true:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
let c2 = lexer.buf[lexer.bufpos]
|
|
|
|
case c2
|
2016-03-11 16:56:42 +00:00
|
|
|
of ' ', '\t': after.add(c2)
|
|
|
|
of lineEnd: break outer
|
2016-01-20 20:15:33 +00:00
|
|
|
of ':':
|
|
|
|
if lexer.isPlainSafe(lexer.bufpos + 1, context):
|
|
|
|
content.add(after & ':')
|
2016-03-11 16:56:42 +00:00
|
|
|
break
|
|
|
|
else: break outer
|
|
|
|
of '#': break outer
|
2016-02-18 21:14:24 +00:00
|
|
|
of flowIndicators:
|
2016-02-19 17:25:01 +00:00
|
|
|
if context == cBlock:
|
2016-02-18 21:14:24 +00:00
|
|
|
content.add(after)
|
|
|
|
content.add(c2)
|
|
|
|
break
|
2016-03-11 16:56:42 +00:00
|
|
|
else: break outer
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
|
|
|
content.add(after)
|
|
|
|
content.add(c2)
|
|
|
|
break
|
2016-03-20 12:50:00 +00:00
|
|
|
of flowIndicators:
|
|
|
|
when context == cFlow: break
|
|
|
|
else: content.add(c)
|
|
|
|
of lineEnd: break
|
2016-01-20 20:15:33 +00:00
|
|
|
of ':':
|
2016-03-11 16:56:42 +00:00
|
|
|
if lexer.isPlainSafe(lexer.bufpos + 1, context): content.add(':')
|
|
|
|
else: break outer
|
|
|
|
else: content.add(c)
|
|
|
|
debug("lex: \"" & content & '\"')
|
2016-01-20 20:15:33 +00:00
|
|
|
|
|
|
|
template continueMultilineScalar() {.dirty.} =
|
2016-02-26 18:28:28 +00:00
|
|
|
content.add(if newlines == 1: " " else: repeat('\l', newlines - 1))
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
2016-02-19 17:25:01 +00:00
|
|
|
p.lexer.plainScalar(content, cBlock)
|
2016-01-20 20:15:33 +00:00
|
|
|
state = fpBlockAfterPlainScalar
|
|
|
|
|
2016-01-20 22:38:39 +00:00
|
|
|
template handleFlowPlainScalar() {.dirty.} =
|
2016-03-25 17:22:25 +00:00
|
|
|
startContent()
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
2016-02-19 17:25:01 +00:00
|
|
|
p.lexer.plainScalar(content, cFlow)
|
2016-03-09 19:29:18 +00:00
|
|
|
if p.lexer.buf[p.lexer.bufpos] in {'{', '}', '[', ']', ',', ':', '#'}:
|
2016-01-20 22:38:39 +00:00
|
|
|
discard
|
|
|
|
else:
|
2016-03-25 16:28:58 +00:00
|
|
|
newlines = 0
|
2016-01-20 22:38:39 +00:00
|
|
|
while true:
|
2016-01-24 10:44:10 +00:00
|
|
|
case p.lexer.buf[p.lexer.bufpos]
|
2016-01-20 22:38:39 +00:00
|
|
|
of ':':
|
2016-02-19 17:25:01 +00:00
|
|
|
if p.lexer.isPlainSafe(p.lexer.bufpos + 1, cFlow):
|
2016-01-22 22:44:26 +00:00
|
|
|
if newlines == 1:
|
|
|
|
content.add(' ')
|
|
|
|
newlines = 0
|
|
|
|
elif newlines > 1:
|
2016-03-25 17:22:25 +00:00
|
|
|
content.addMultiple(' ', newlines - 1)
|
2016-01-22 22:44:26 +00:00
|
|
|
newlines = 0
|
2016-02-19 17:25:01 +00:00
|
|
|
p.lexer.plainScalar(content, cFlow)
|
2016-03-19 14:54:19 +00:00
|
|
|
break
|
2016-03-17 18:30:40 +00:00
|
|
|
of '#', EndOfFile: break
|
2016-02-26 18:28:28 +00:00
|
|
|
of '\l':
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos = p.lexer.handleLF(p.lexer.bufpos)
|
2016-01-20 22:38:39 +00:00
|
|
|
newlines.inc()
|
|
|
|
of '\c':
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos = p.lexer.handleCR(p.lexer.bufpos)
|
2016-01-20 22:38:39 +00:00
|
|
|
newlines.inc()
|
2016-03-17 18:30:40 +00:00
|
|
|
of flowIndicators: break
|
|
|
|
of ' ', '\t': p.lexer.skipWhitespace()
|
2016-01-20 22:38:39 +00:00
|
|
|
else:
|
|
|
|
if newlines == 1:
|
|
|
|
content.add(' ')
|
|
|
|
newlines = 0
|
|
|
|
elif newlines > 1:
|
2016-03-25 17:22:25 +00:00
|
|
|
content.addMultiple(' ', newlines - 1)
|
2016-01-20 22:38:39 +00:00
|
|
|
newlines = 0
|
2016-02-19 17:25:01 +00:00
|
|
|
p.lexer.plainScalar(content, cFlow)
|
2016-03-20 11:09:04 +00:00
|
|
|
yieldShallowScalar(content)
|
2016-01-22 20:36:11 +00:00
|
|
|
handleObjectEnd(fpFlowAfterObject)
|
|
|
|
|
2016-01-24 10:44:10 +00:00
|
|
|
template tagHandle(lexer: var BaseLexer, content: var string,
|
2016-01-20 20:15:33 +00:00
|
|
|
shorthandEnd: var int) =
|
|
|
|
debug("lex: tagHandle")
|
|
|
|
shorthandEnd = 0
|
|
|
|
content.add(lexer.buf[lexer.bufpos])
|
|
|
|
var i = 0
|
|
|
|
while true:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
i.inc()
|
|
|
|
let c = lexer.buf[lexer.bufpos]
|
|
|
|
case c
|
|
|
|
of spaceOrLineEnd:
|
2016-03-17 18:30:40 +00:00
|
|
|
if shorthandEnd == -1: lexerError(lexer, "Unclosed verbatim tag")
|
2016-01-20 20:15:33 +00:00
|
|
|
break
|
|
|
|
of '!':
|
|
|
|
if shorthandEnd == -1 and i == 2:
|
|
|
|
content.add(c)
|
2016-03-17 17:44:35 +00:00
|
|
|
continue
|
2016-01-20 20:15:33 +00:00
|
|
|
elif shorthandEnd != 0:
|
2016-01-24 10:44:10 +00:00
|
|
|
lexerError(lexer, "Illegal character in tag suffix")
|
2016-01-20 20:15:33 +00:00
|
|
|
shorthandEnd = i
|
|
|
|
content.add(c)
|
|
|
|
of 'a' .. 'z', 'A' .. 'Z', '0' .. '9', '#', ';', '/', '?', ':', '@', '&',
|
2016-03-17 18:45:01 +00:00
|
|
|
'-', '=', '+', '$', '_', '.', '~', '*', '\'', '(', ')':
|
|
|
|
content.add(c)
|
|
|
|
of ',':
|
|
|
|
if shortHandEnd > 0: break # ',' after shorthand is flow indicator
|
2016-01-20 20:15:33 +00:00
|
|
|
content.add(c)
|
|
|
|
of '<':
|
|
|
|
if i == 1:
|
|
|
|
shorthandEnd = -1
|
2016-03-25 17:22:25 +00:00
|
|
|
startContent()
|
2016-03-17 17:44:35 +00:00
|
|
|
else: lexerError(lexer, "Illegal character in tag handle")
|
2016-01-20 20:15:33 +00:00
|
|
|
of '>':
|
|
|
|
if shorthandEnd == -1:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
if lexer.buf[lexer.bufpos] notin spaceOrLineEnd:
|
2016-01-24 10:44:10 +00:00
|
|
|
lexerError(lexer, "Missing space after verbatim tag handle")
|
2016-01-20 20:15:33 +00:00
|
|
|
break
|
2016-03-17 17:44:35 +00:00
|
|
|
else: lexerError(lexer, "Illegal character in tag handle")
|
|
|
|
of '%':
|
2016-03-17 18:30:40 +00:00
|
|
|
if shorthandEnd != 0: content.add(lexer.byteSequence())
|
2016-03-17 17:44:35 +00:00
|
|
|
else: lexerError(lexer, "Illegal character in tag handle")
|
|
|
|
else: lexerError(lexer, "Illegal character in tag handle")
|
2016-01-20 20:15:33 +00:00
|
|
|
|
2016-01-24 10:44:10 +00:00
|
|
|
template anchorName(lexer: BaseLexer, content: var string) =
|
2016-01-20 20:15:33 +00:00
|
|
|
debug("lex: anchorName")
|
|
|
|
while true:
|
|
|
|
lexer.bufpos.inc()
|
|
|
|
let c = lexer.buf[lexer.bufpos]
|
|
|
|
case c
|
2016-03-17 18:30:40 +00:00
|
|
|
of spaceOrLineEnd, '[', ']', '{', '}', ',': break
|
|
|
|
else: content.add(c)
|
2016-01-20 20:15:33 +00:00
|
|
|
|
2016-03-19 14:03:09 +00:00
|
|
|
template consumeLineIfEmpty(lex: BaseLexer): bool =
|
|
|
|
var result = true
|
|
|
|
while true:
|
|
|
|
lex.bufpos.inc()
|
|
|
|
case lex.buf[lex.bufpos]
|
|
|
|
of ' ', '\t': discard
|
|
|
|
of '\l':
|
|
|
|
lex.bufpos = lex.handleLF(lex.bufpos)
|
|
|
|
break
|
|
|
|
of '\c':
|
|
|
|
lex.bufpos = lex.handleCR(lex.bufpos)
|
|
|
|
break
|
|
|
|
of '#', EndOfFile:
|
|
|
|
lex.lineEnding()
|
|
|
|
handleLineEnd(false)
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
result = false
|
|
|
|
break
|
|
|
|
result
|
|
|
|
|
2016-03-25 16:28:58 +00:00
|
|
|
template startScalar(t: ScalarType) {.dirty.} =
|
|
|
|
newlines = 0
|
|
|
|
level.kind = fplScalar
|
|
|
|
scalarType = t
|
|
|
|
|
|
|
|
template blockScalarHeader() {.dirty.} =
|
|
|
|
debug("lex: blockScalarHeader")
|
|
|
|
chomp = ctClip
|
|
|
|
level.indentation = UnknownIndentation
|
|
|
|
if tag == yTagQuestionMark: tag = yTagExclamationMark
|
|
|
|
let t = if p.lexer.buf[p.lexer.bufpos] == '|': stLiteral else: stFolded
|
|
|
|
while true:
|
|
|
|
p.lexer.bufpos.inc()
|
|
|
|
case p.lexer.buf[p.lexer.bufpos]
|
|
|
|
of '+':
|
|
|
|
if chomp != ctClip:
|
|
|
|
lexerError(p.lexer, "Only one chomping indicator is allowed")
|
|
|
|
chomp = ctKeep
|
|
|
|
of '-':
|
|
|
|
if chomp != ctClip:
|
|
|
|
lexerError(p.lexer, "Only one chomping indicator is allowed")
|
|
|
|
chomp = ctStrip
|
|
|
|
of '1'..'9':
|
|
|
|
if level.indentation != UnknownIndentation:
|
|
|
|
lexerError(p.lexer, "Only one indentation indicator is allowed")
|
|
|
|
level.indentation = ancestry[ancestry.high].indentation +
|
|
|
|
ord(p.lexer.buf[p.lexer.bufpos]) - ord('\x30')
|
|
|
|
of spaceOrLineEnd: break
|
|
|
|
else: lexerError(p.lexer, "Illegal character in block scalar header: '" &
|
|
|
|
p.lexer.buf[p.lexer.bufpos] & "'")
|
|
|
|
recentWasMoreIndented = false
|
|
|
|
p.lexer.lineEnding()
|
|
|
|
handleLineEnd(true)
|
|
|
|
startScalar(t)
|
2016-03-25 17:22:25 +00:00
|
|
|
startContent()
|
2016-03-25 16:28:58 +00:00
|
|
|
|
|
|
|
template blockScalarLine() {.dirty.} =
|
|
|
|
debug("lex: blockScalarLine")
|
|
|
|
if indentation < level.indentation:
|
|
|
|
if p.lexer.buf[p.lexer.bufpos] == '#':
|
|
|
|
# skip all following comment lines
|
|
|
|
while indentation > ancestry[ancestry.high].indentation:
|
|
|
|
p.lexer.lineEnding()
|
|
|
|
handleLineEnd(true)
|
|
|
|
newlines.inc(-1)
|
|
|
|
p.lexer.skipIndentation()
|
|
|
|
indentation = p.lexer.getColNumber(p.lexer.bufpos)
|
|
|
|
if indentation > ancestry[ancestry.high].indentation:
|
|
|
|
p.lexer.lexerError("Invalid content in block scalar after comments")
|
|
|
|
closeMoreIndentedLevels()
|
|
|
|
else:
|
|
|
|
p.lexer.lexerError(
|
|
|
|
"Invalid indentation (expected indentation of at least " &
|
|
|
|
$level.indentation & " spaces)")
|
|
|
|
else:
|
|
|
|
if level.indentation == UnknownIndentation:
|
|
|
|
if p.lexer.buf[p.lexer.bufpos] in lineEnd:
|
|
|
|
handleLineEnd(true)
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
level.indentation = indentation
|
2016-03-25 17:22:25 +00:00
|
|
|
content.addMultiple('\l', newlines)
|
2016-03-25 16:28:58 +00:00
|
|
|
elif indentation > level.indentation or p.lexer.buf[p.lexer.bufpos] == '\t':
|
2016-03-25 17:22:25 +00:00
|
|
|
content.addMultiple('\l', newlines)
|
2016-03-25 16:28:58 +00:00
|
|
|
recentWasMoreIndented = true
|
2016-03-25 17:22:25 +00:00
|
|
|
content.addMultiple(' ', indentation - level.indentation)
|
2016-03-25 16:28:58 +00:00
|
|
|
elif scalarType == stFolded:
|
|
|
|
if recentWasMoreIndented:
|
|
|
|
recentWasMoreIndented = false
|
|
|
|
newlines.inc()
|
|
|
|
if newlines == 0: discard
|
|
|
|
elif newlines == 1: content.add(' ')
|
2016-03-25 17:22:25 +00:00
|
|
|
else: content.addMultiple('\l', newlines - 1)
|
|
|
|
else: content.addMultiple('\l', newlines)
|
2016-03-25 16:28:58 +00:00
|
|
|
newlines = 0
|
|
|
|
while p.lexer.buf[p.lexer.bufpos] notin lineEnd:
|
|
|
|
content.add(p.lexer.buf[p.lexer.bufpos])
|
|
|
|
p.lexer.bufpos.inc()
|
|
|
|
handleLineEnd(true)
|
|
|
|
|
2016-01-24 10:44:10 +00:00
|
|
|
proc parse*(p: YamlParser, s: Stream): YamlStream =
|
2016-02-12 18:53:25 +00:00
|
|
|
var backend = iterator(): YamlStreamEvent =
|
2016-01-20 20:15:33 +00:00
|
|
|
var
|
|
|
|
state = fpInitial
|
|
|
|
shorthands: Table[string, string]
|
|
|
|
anchors: Table[string, AnchorId]
|
|
|
|
nextAnchorId: AnchorId
|
2016-03-25 17:22:25 +00:00
|
|
|
content: string = ""
|
2016-03-20 11:09:04 +00:00
|
|
|
after: string = ""
|
|
|
|
tagUri: string = ""
|
2016-01-23 16:44:50 +00:00
|
|
|
tag: TagId
|
|
|
|
anchor: AnchorId
|
2016-01-20 20:15:33 +00:00
|
|
|
ancestry = newSeq[FastParseLevel]()
|
|
|
|
level: FastParseLevel
|
|
|
|
indentation: int
|
|
|
|
newlines: int
|
2016-01-20 22:38:39 +00:00
|
|
|
flowdepth: int = 0
|
2016-01-22 22:44:26 +00:00
|
|
|
explicitFlowKey: bool
|
2016-03-25 16:28:58 +00:00
|
|
|
scalarType: ScalarType
|
|
|
|
recentWasMoreIndented: bool
|
|
|
|
chomp: ChompType
|
2016-01-20 20:15:33 +00:00
|
|
|
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.open(s)
|
2016-01-20 20:15:33 +00:00
|
|
|
initDocValues()
|
|
|
|
|
|
|
|
while true:
|
|
|
|
case state
|
|
|
|
of fpInitial:
|
|
|
|
debug("state: initial")
|
2016-01-24 10:44:10 +00:00
|
|
|
case p.lexer.buf[p.lexer.bufpos]
|
2016-01-20 20:15:33 +00:00
|
|
|
of '%':
|
|
|
|
var ld: LexedDirective
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
p.lexer.directiveName(ld)
|
2016-01-20 20:15:33 +00:00
|
|
|
case ld
|
|
|
|
of ldYaml:
|
|
|
|
var version = ""
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
p.lexer.yamlVersion(version)
|
2016-01-20 20:15:33 +00:00
|
|
|
if version != "1.2":
|
2016-01-24 17:24:09 +00:00
|
|
|
if p.callback != nil:
|
|
|
|
p.callback(p.lexer.lineNumber, p.getColNumber(),
|
|
|
|
p.getLineContent(),
|
|
|
|
"Version is not 1.2, but " & version)
|
2016-01-20 20:15:33 +00:00
|
|
|
discard
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.lineEnding()
|
2016-01-20 20:15:33 +00:00
|
|
|
handleLineEnd(false)
|
|
|
|
of ldTag:
|
2016-03-20 11:09:04 +00:00
|
|
|
var shorthand = ""
|
|
|
|
tagUri.setLen(0)
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
p.lexer.tagShorthand(shorthand)
|
2016-03-20 11:09:04 +00:00
|
|
|
p.lexer.tagUriMapping(tagUri)
|
|
|
|
shorthands[shorthand] = tagUri
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.lineEnding()
|
2016-01-20 20:15:33 +00:00
|
|
|
handleLineEnd(false)
|
|
|
|
of ldUnknown:
|
2016-01-24 17:24:09 +00:00
|
|
|
if p.callback != nil:
|
|
|
|
p.callback(p.lexer.lineNumber, p.getColNumber(),
|
|
|
|
p.getLineContent(), "Unknown directive")
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.finishLine()
|
2016-01-20 20:15:33 +00:00
|
|
|
handleLineEnd(false)
|
|
|
|
of ' ', '\t':
|
2016-03-19 14:03:09 +00:00
|
|
|
if not p.lexer.consumeLineIfEmpty():
|
|
|
|
indentation = p.lexer.getColNumber(p.lexer.bufpos)
|
|
|
|
yield startDocEvent()
|
|
|
|
state = fpBlockObjectStart
|
2016-03-17 18:30:40 +00:00
|
|
|
of '\l': p.lexer.bufpos = p.lexer.handleLF(p.lexer.bufpos)
|
|
|
|
of '\c': p.lexer.bufpos = p.lexer.handleCR(p.lexer.bufpos)
|
|
|
|
of EndOfFile: return
|
2016-01-20 20:15:33 +00:00
|
|
|
of '#':
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.lineEnding()
|
2016-01-20 20:15:33 +00:00
|
|
|
handleLineEnd(false)
|
|
|
|
of '-':
|
2016-03-20 12:50:00 +00:00
|
|
|
var success: bool
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
2016-03-20 12:50:00 +00:00
|
|
|
p.lexer.directivesEndMarker(success)
|
2016-01-20 20:15:33 +00:00
|
|
|
yield startDocEvent()
|
2016-03-20 12:50:00 +00:00
|
|
|
if success:
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos.inc(3)
|
2016-03-20 12:50:00 +00:00
|
|
|
state = fpBlockObjectStart
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
|
|
|
yield startDocEvent()
|
2016-03-20 12:50:00 +00:00
|
|
|
state = fpBlockObjectStart
|
2016-01-20 20:15:33 +00:00
|
|
|
of fpBlockAfterPlainScalar:
|
|
|
|
debug("state: blockAfterPlainScalar")
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.skipWhitespace()
|
|
|
|
case p.lexer.buf[p.lexer.bufpos]
|
2016-02-26 18:28:28 +00:00
|
|
|
of '\l':
|
2016-03-09 19:29:18 +00:00
|
|
|
if level.kind notin {fplUnknown, fplScalar}:
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
parserError("Unexpected scalar")
|
2016-03-25 16:28:58 +00:00
|
|
|
startScalar(stFlow)
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos = p.lexer.handleLF(p.lexer.bufpos)
|
2016-03-25 16:28:58 +00:00
|
|
|
newlines.inc()
|
2016-03-20 12:50:00 +00:00
|
|
|
state = fpBlockObjectStart
|
2016-01-20 20:15:33 +00:00
|
|
|
of '\c':
|
2016-03-09 19:29:18 +00:00
|
|
|
if level.kind notin {fplUnknown, fplScalar}:
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
parserError("Unexpected scalar")
|
2016-03-25 16:28:58 +00:00
|
|
|
startScalar(stFlow)
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos = p.lexer.handleCR(p.lexer.bufpos)
|
2016-03-25 16:28:58 +00:00
|
|
|
newlines.inc()
|
2016-03-20 12:50:00 +00:00
|
|
|
state = fpBlockObjectStart
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
2016-03-20 11:09:04 +00:00
|
|
|
yieldShallowScalar(content)
|
2016-01-22 20:36:11 +00:00
|
|
|
handleObjectEnd(fpBlockAfterObject)
|
|
|
|
of fpBlockAfterObject:
|
|
|
|
debug("state: blockAfterObject")
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.skipWhitespace()
|
|
|
|
case p.lexer.buf[p.lexer.bufpos]
|
2016-01-20 20:15:33 +00:00
|
|
|
of EndOfFile:
|
|
|
|
closeEverything()
|
|
|
|
break
|
2016-02-26 18:28:28 +00:00
|
|
|
of '\l':
|
2016-03-20 12:50:00 +00:00
|
|
|
state = fpBlockObjectStart
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos = p.lexer.handleLF(p.lexer.bufpos)
|
2016-01-20 20:15:33 +00:00
|
|
|
of '\c':
|
2016-03-20 12:50:00 +00:00
|
|
|
state = fpBlockObjectStart
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos = p.lexer.handleCR(p.lexer.bufpos)
|
2016-01-20 20:15:33 +00:00
|
|
|
of ':':
|
|
|
|
case level.kind
|
|
|
|
of fplUnknown:
|
2016-01-23 16:44:50 +00:00
|
|
|
handleObjectStart(yamlStartMap)
|
2016-01-20 20:15:33 +00:00
|
|
|
of fplMapKey:
|
2016-01-22 20:36:11 +00:00
|
|
|
yield scalarEvent("", yTagQuestionMark, yAnchorNone)
|
|
|
|
level.kind = fplMapValue
|
|
|
|
ancestry.add(level)
|
2016-03-19 21:43:03 +00:00
|
|
|
level = initLevel(fplUnknown)
|
2016-01-22 20:36:11 +00:00
|
|
|
of fplMapValue:
|
|
|
|
level.kind = fplMapValue
|
|
|
|
ancestry.add(level)
|
2016-03-19 21:43:03 +00:00
|
|
|
level = initLevel(fplUnknown)
|
2016-01-20 20:15:33 +00:00
|
|
|
of fplSequence:
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
parserError("Illegal token (expected sequence item)")
|
2016-01-20 20:15:33 +00:00
|
|
|
of fplScalar:
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
parserError("Multiline scalars may not be implicit map keys")
|
2016-03-19 21:43:03 +00:00
|
|
|
of fplSinglePairKey, fplSinglePairValue, fplDocument: debugFail()
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos.inc()
|
|
|
|
p.lexer.skipWhitespace()
|
|
|
|
indentation = p.lexer.getColNumber(p.lexer.bufpos)
|
2016-01-20 20:15:33 +00:00
|
|
|
state = fpBlockObjectStart
|
|
|
|
of '#':
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.lineEnding()
|
2016-01-20 20:15:33 +00:00
|
|
|
handleLineEnd(true)
|
2016-03-20 12:50:00 +00:00
|
|
|
state = fpBlockObjectStart
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
parserError("Illegal token (expected ':', comment or line end)")
|
2016-01-20 20:15:33 +00:00
|
|
|
of fpBlockObjectStart:
|
|
|
|
debug("state: blockObjectStart")
|
2016-03-20 12:50:00 +00:00
|
|
|
p.lexer.skipIndentation()
|
2016-01-24 10:44:10 +00:00
|
|
|
indentation = p.lexer.getColNumber(p.lexer.bufpos)
|
2016-03-20 12:50:00 +00:00
|
|
|
if indentation == 0:
|
|
|
|
var success: bool
|
|
|
|
p.lexer.directivesEndMarker(success)
|
|
|
|
if success:
|
|
|
|
p.lexer.bufpos.inc(3)
|
|
|
|
closeEverything()
|
|
|
|
initDocValues()
|
|
|
|
yield startDocEvent()
|
|
|
|
continue
|
|
|
|
p.lexer.documentEndMarker(success)
|
|
|
|
if success:
|
|
|
|
closeEverything()
|
|
|
|
p.lexer.bufpos.inc(3)
|
|
|
|
p.lexer.lineEnding()
|
|
|
|
handleLineEnd(false)
|
|
|
|
state = fpAfterDocument
|
|
|
|
continue
|
2016-03-25 16:28:58 +00:00
|
|
|
if indentation <= ancestry[ancestry.high].indentation:
|
|
|
|
if p.lexer.buf[p.lexer.bufpos] in lineEnd:
|
|
|
|
handleLineEnd(true)
|
|
|
|
continue
|
|
|
|
elif p.lexer.buf[p.lexer.bufpos] == '#':
|
|
|
|
p.lexer.lineEnding()
|
|
|
|
handleLineEnd(true)
|
|
|
|
continue
|
|
|
|
elif p.lexer.buf[p.lexer.bufpos] == '-' and not
|
|
|
|
p.lexer.isPlainSafe(p.lexer.bufpos + 1, cBlock):
|
|
|
|
closeMoreIndentedLevels(true)
|
|
|
|
else: closeMoreIndentedLevels()
|
|
|
|
elif indentation <= level.indentation and
|
|
|
|
p.lexer.buf[p.lexer.bufpos] in lineEnd:
|
|
|
|
handleLineEnd(true)
|
|
|
|
continue
|
|
|
|
if level.kind == fplScalar and scalarType != stFlow:
|
|
|
|
blockScalarLine()
|
|
|
|
continue
|
2016-01-24 10:44:10 +00:00
|
|
|
case p.lexer.buf[p.lexer.bufpos]
|
2016-02-26 18:28:28 +00:00
|
|
|
of '\l':
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos = p.lexer.handleLF(p.lexer.bufpos)
|
2016-03-20 12:50:00 +00:00
|
|
|
newlines.inc()
|
2016-03-25 16:28:58 +00:00
|
|
|
if level.kind == fplUnknown: level.indentation = UnknownIndentation
|
2016-01-20 20:15:33 +00:00
|
|
|
of '\c':
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos = p.lexer.handleCR(p.lexer.bufpos)
|
2016-03-20 12:50:00 +00:00
|
|
|
newlines.inc()
|
2016-03-25 16:28:58 +00:00
|
|
|
if level.kind == fplUnknown: level.indentation = UnknownIndentation
|
2016-01-20 20:15:33 +00:00
|
|
|
of EndOfFile:
|
|
|
|
closeEverything()
|
|
|
|
return
|
|
|
|
of '#':
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.lineEnding()
|
2016-01-20 20:15:33 +00:00
|
|
|
handleLineEnd(true)
|
2016-03-20 12:50:00 +00:00
|
|
|
if level.kind == fplUnknown: level.indentation = UnknownIndentation
|
2016-01-20 20:15:33 +00:00
|
|
|
of '\'':
|
2016-01-22 20:36:11 +00:00
|
|
|
handleBlockItemStart()
|
2016-03-25 17:22:25 +00:00
|
|
|
startContent()
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
p.lexer.singleQuotedScalar(content)
|
2016-03-17 18:30:40 +00:00
|
|
|
if tag == yTagQuestionMark: tag = yTagExclamationMark
|
2016-03-20 11:09:04 +00:00
|
|
|
yieldShallowScalar(content)
|
2016-01-22 20:36:11 +00:00
|
|
|
handleObjectEnd(fpBlockAfterObject)
|
2016-01-20 20:15:33 +00:00
|
|
|
of '"':
|
2016-01-22 20:36:11 +00:00
|
|
|
handleBlockItemStart()
|
2016-03-25 17:22:25 +00:00
|
|
|
startContent()
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
p.lexer.doubleQuotedScalar(content)
|
2016-03-17 18:30:40 +00:00
|
|
|
if tag == yTagQuestionMark: tag = yTagExclamationMark
|
2016-03-20 11:09:04 +00:00
|
|
|
yieldShallowScalar(content)
|
2016-01-22 20:36:11 +00:00
|
|
|
handleObjectEnd(fpBlockAfterObject)
|
2016-01-23 13:09:52 +00:00
|
|
|
of '|', '>':
|
2016-03-25 16:28:58 +00:00
|
|
|
blockScalarHeader()
|
|
|
|
continue
|
2016-01-20 20:15:33 +00:00
|
|
|
of '-':
|
2016-02-19 17:25:01 +00:00
|
|
|
if p.lexer.isPlainSafe(p.lexer.bufpos + 1, cBlock):
|
2016-03-20 12:50:00 +00:00
|
|
|
if level.kind == fplScalar: continueMultilineScalar()
|
|
|
|
else:
|
|
|
|
handleBlockItemStart()
|
2016-03-25 17:22:25 +00:00
|
|
|
startContent()
|
2016-03-20 12:50:00 +00:00
|
|
|
startToken()
|
|
|
|
p.lexer.plainScalar(content, cBlock)
|
|
|
|
state = fpBlockAfterPlainScalar
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos.inc()
|
2016-01-22 20:36:11 +00:00
|
|
|
handleBlockSequenceIndicator()
|
2016-01-20 20:15:33 +00:00
|
|
|
of '!':
|
2016-01-22 20:36:11 +00:00
|
|
|
handleBlockItemStart()
|
2016-01-20 22:38:39 +00:00
|
|
|
handleTagHandle()
|
2016-01-20 20:15:33 +00:00
|
|
|
of '&':
|
2016-01-22 20:36:11 +00:00
|
|
|
handleBlockItemStart()
|
2016-01-20 22:38:39 +00:00
|
|
|
handleAnchor()
|
2016-01-20 20:15:33 +00:00
|
|
|
of '*':
|
2016-01-22 20:36:11 +00:00
|
|
|
handleBlockItemStart()
|
2016-01-20 22:38:39 +00:00
|
|
|
handleAlias()
|
|
|
|
of '[', '{':
|
2016-01-22 20:36:11 +00:00
|
|
|
handleBlockItemStart()
|
2016-01-20 22:38:39 +00:00
|
|
|
state = fpFlow
|
2016-01-22 22:44:26 +00:00
|
|
|
of '?':
|
2016-02-19 17:25:01 +00:00
|
|
|
if p.lexer.isPlainSafe(p.lexer.bufpos + 1, cBlock):
|
2016-03-20 12:50:00 +00:00
|
|
|
if level.kind == fplScalar: continueMultilineScalar()
|
|
|
|
else:
|
|
|
|
handleBlockItemStart()
|
2016-03-25 17:22:25 +00:00
|
|
|
startContent()
|
2016-03-20 12:50:00 +00:00
|
|
|
startToken()
|
|
|
|
p.lexer.plainScalar(content, cBlock)
|
|
|
|
state = fpBlockAfterPlainScalar
|
2016-01-22 22:44:26 +00:00
|
|
|
else:
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos.inc()
|
2016-01-22 22:44:26 +00:00
|
|
|
handleMapKeyIndicator()
|
|
|
|
of ':':
|
2016-02-19 17:25:01 +00:00
|
|
|
if p.lexer.isPlainSafe(p.lexer.bufpos + 1, cBlock):
|
2016-03-20 12:50:00 +00:00
|
|
|
if level.kind == fplScalar: continueMultilineScalar()
|
|
|
|
else:
|
|
|
|
handleBlockItemStart()
|
2016-03-25 17:22:25 +00:00
|
|
|
startContent()
|
2016-03-20 12:50:00 +00:00
|
|
|
startToken()
|
|
|
|
p.lexer.plainScalar(content, cBlock)
|
|
|
|
state = fpBlockAfterPlainScalar
|
2016-01-22 22:44:26 +00:00
|
|
|
else:
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos.inc()
|
2016-01-22 22:44:26 +00:00
|
|
|
handleMapValueIndicator()
|
2016-01-23 16:44:50 +00:00
|
|
|
of '@', '`':
|
2016-01-24 10:44:10 +00:00
|
|
|
lexerError(p.lexer, "Reserved characters cannot start a plain scalar")
|
2016-03-20 12:50:00 +00:00
|
|
|
of '\t':
|
|
|
|
if level.kind == fplScalar:
|
|
|
|
p.lexer.skipWhitespace()
|
|
|
|
continueMultilineScalar()
|
|
|
|
else: lexerError(p.lexer, "\\t cannot start any token")
|
2016-01-20 20:15:33 +00:00
|
|
|
else:
|
2016-03-20 12:50:00 +00:00
|
|
|
if level.kind == fplScalar: continueMultilineScalar()
|
|
|
|
else:
|
|
|
|
handleBlockItemStart()
|
2016-03-25 17:22:25 +00:00
|
|
|
startContent()
|
2016-03-20 12:50:00 +00:00
|
|
|
startToken()
|
|
|
|
p.lexer.plainScalar(content, cBlock)
|
|
|
|
state = fpBlockAfterPlainScalar
|
2016-01-20 20:15:33 +00:00
|
|
|
of fpExpectDocEnd:
|
2016-03-19 14:03:09 +00:00
|
|
|
debug("state: expectDocEnd")
|
2016-01-24 10:44:10 +00:00
|
|
|
case p.lexer.buf[p.lexer.bufpos]
|
2016-01-20 22:38:39 +00:00
|
|
|
of '-':
|
2016-03-20 12:50:00 +00:00
|
|
|
var success: bool
|
|
|
|
p.lexer.directivesEndMarker(success)
|
|
|
|
if success:
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos.inc(3)
|
2016-01-20 22:38:39 +00:00
|
|
|
yield endDocEvent()
|
2016-03-19 21:43:03 +00:00
|
|
|
discard ancestry.pop()
|
2016-01-20 22:38:39 +00:00
|
|
|
initDocValues()
|
|
|
|
yield startDocEvent()
|
|
|
|
state = fpBlockObjectStart
|
2016-03-20 12:50:00 +00:00
|
|
|
else: parserError("Unexpected content (expected document end)")
|
2016-01-20 22:38:39 +00:00
|
|
|
of '.':
|
|
|
|
var isDocumentEnd: bool
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
2016-03-19 21:43:03 +00:00
|
|
|
p.lexer.documentEndMarker(isDocumentEnd)
|
2016-01-20 22:38:39 +00:00
|
|
|
if isDocumentEnd:
|
2016-03-19 14:03:09 +00:00
|
|
|
closeEverything()
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos.inc(3)
|
2016-03-19 14:03:09 +00:00
|
|
|
p.lexer.lineEnding()
|
|
|
|
handleLineEnd(false)
|
|
|
|
state = fpAfterDocument
|
|
|
|
else: parserError("Unexpected content (expected document end)")
|
2016-01-20 22:38:39 +00:00
|
|
|
of ' ', '\t', '#':
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.lineEnding()
|
2016-01-20 22:38:39 +00:00
|
|
|
handleLineEnd(true)
|
2016-03-17 18:30:40 +00:00
|
|
|
of '\l': p.lexer.bufpos = p.lexer.handleLF(p.lexer.bufpos)
|
|
|
|
of '\c': p.lexer.bufpos = p.lexer.handleCR(p.lexer.bufpos)
|
2016-01-20 22:38:39 +00:00
|
|
|
of EndOfFile:
|
|
|
|
yield endDocEvent()
|
|
|
|
break
|
|
|
|
else:
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
parserError("Unexpected content (expected document end)")
|
2016-03-19 14:03:09 +00:00
|
|
|
of fpAfterDocument:
|
|
|
|
debug("state: afterDocument")
|
|
|
|
case p.lexer.buf[p.lexer.bufpos]
|
|
|
|
of '.':
|
|
|
|
var isDocumentEnd: bool
|
|
|
|
startToken()
|
2016-03-19 21:43:03 +00:00
|
|
|
p.lexer.documentEndMarker(isDocumentEnd)
|
2016-03-19 14:03:09 +00:00
|
|
|
if isDocumentEnd:
|
|
|
|
p.lexer.bufpos.inc(3)
|
|
|
|
p.lexer.lineEnding()
|
|
|
|
handleLineEnd(false)
|
|
|
|
else:
|
|
|
|
initDocValues()
|
|
|
|
yield startDocEvent()
|
2016-03-20 12:50:00 +00:00
|
|
|
state = fpBlockObjectStart
|
2016-03-19 14:03:09 +00:00
|
|
|
of '#':
|
|
|
|
p.lexer.lineEnding()
|
|
|
|
handleLineEnd(false)
|
|
|
|
of '\t', ' ':
|
|
|
|
if not p.lexer.consumeLineIfEmpty():
|
|
|
|
indentation = p.lexer.getColNumber(p.lexer.bufpos)
|
|
|
|
initDocValues()
|
|
|
|
yield startDocEvent()
|
|
|
|
state = fpBlockObjectStart
|
|
|
|
of EndOfFile: break
|
|
|
|
else:
|
|
|
|
initDocValues()
|
|
|
|
state = fpInitial
|
2016-01-20 22:38:39 +00:00
|
|
|
of fpFlow:
|
2016-01-22 20:36:11 +00:00
|
|
|
debug("state: flow")
|
2016-03-16 18:30:51 +00:00
|
|
|
p.lexer.skipWhitespaceCommentsAndNewlines()
|
2016-01-24 10:44:10 +00:00
|
|
|
case p.lexer.buf[p.lexer.bufpos]
|
2016-01-20 22:38:39 +00:00
|
|
|
of '{':
|
2016-03-17 19:28:00 +00:00
|
|
|
handleFlowItemStart()
|
2016-01-23 16:44:50 +00:00
|
|
|
handleObjectStart(yamlStartMap)
|
2016-01-20 22:38:39 +00:00
|
|
|
flowdepth.inc()
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos.inc()
|
2016-01-22 22:44:26 +00:00
|
|
|
explicitFlowKey = false
|
2016-01-20 22:38:39 +00:00
|
|
|
of '[':
|
2016-03-17 19:28:00 +00:00
|
|
|
handleFlowItemStart()
|
2016-03-14 17:10:34 +00:00
|
|
|
handleObjectStart(yamlStartSeq)
|
2016-01-20 22:38:39 +00:00
|
|
|
flowdepth.inc()
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos.inc()
|
2016-01-20 22:38:39 +00:00
|
|
|
of '}':
|
|
|
|
assert(level.kind == fplUnknown)
|
|
|
|
level = ancestry.pop()
|
|
|
|
case level.kind
|
|
|
|
of fplMapValue:
|
2016-03-19 10:16:24 +00:00
|
|
|
yieldEmptyScalar()
|
2016-01-22 20:36:11 +00:00
|
|
|
level.kind = fplMapKey
|
2016-01-20 22:38:39 +00:00
|
|
|
of fplMapKey:
|
2016-01-24 18:15:50 +00:00
|
|
|
if tag != yTagQuestionMark or anchor != yAnchorNone or
|
2016-01-22 22:44:26 +00:00
|
|
|
explicitFlowKey:
|
2016-03-19 10:16:24 +00:00
|
|
|
yieldEmptyScalar()
|
|
|
|
yield scalarEvent("", yTagQuestionMark, yAnchorNone)
|
2016-01-20 22:38:39 +00:00
|
|
|
of fplSequence:
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
parserError("Unexpected token (expected ']')")
|
2016-03-17 19:28:00 +00:00
|
|
|
of fplSinglePairValue:
|
|
|
|
startToken()
|
|
|
|
parserError("Unexpected token (expected ']')")
|
2016-03-19 21:43:03 +00:00
|
|
|
of fplUnknown, fplScalar, fplSinglePairKey, fplDocument: debugFail()
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos.inc()
|
2016-01-22 20:36:11 +00:00
|
|
|
leaveFlowLevel()
|
2016-01-20 22:38:39 +00:00
|
|
|
of ']':
|
|
|
|
assert(level.kind == fplUnknown)
|
|
|
|
level = ancestry.pop()
|
|
|
|
case level.kind
|
|
|
|
of fplSequence:
|
2016-01-24 18:15:50 +00:00
|
|
|
if tag != yTagQuestionMark or anchor != yAnchorNone:
|
2016-03-19 10:16:24 +00:00
|
|
|
yieldEmptyScalar()
|
2016-03-17 19:28:00 +00:00
|
|
|
of fplSinglePairValue:
|
2016-03-19 10:16:24 +00:00
|
|
|
yieldEmptyScalar()
|
2016-03-17 19:28:00 +00:00
|
|
|
level = ancestry.pop()
|
|
|
|
yield endMapEvent()
|
|
|
|
assert(level.kind == fplSequence)
|
2016-01-20 22:38:39 +00:00
|
|
|
of fplMapKey, fplMapValue:
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
parserError("Unexpected token (expected '}')")
|
2016-03-19 21:43:03 +00:00
|
|
|
of fplUnknown, fplScalar, fplSinglePairKey, fplDocument: debugFail()
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos.inc()
|
2016-01-22 20:36:11 +00:00
|
|
|
leaveFlowLevel()
|
2016-01-20 22:38:39 +00:00
|
|
|
of ',':
|
|
|
|
assert(level.kind == fplUnknown)
|
|
|
|
level = ancestry.pop()
|
|
|
|
case level.kind
|
2016-03-19 10:16:24 +00:00
|
|
|
of fplSequence: yieldEmptyScalar()
|
2016-01-20 22:38:39 +00:00
|
|
|
of fplMapValue:
|
2016-03-19 10:16:24 +00:00
|
|
|
yieldEmptyScalar()
|
2016-01-20 22:38:39 +00:00
|
|
|
level.kind = fplMapKey
|
2016-01-22 22:44:26 +00:00
|
|
|
explicitFlowKey = false
|
2016-01-20 22:38:39 +00:00
|
|
|
of fplMapKey:
|
2016-03-19 10:16:24 +00:00
|
|
|
yieldEmptyScalar
|
|
|
|
yield scalarEvent("", yTagQuestionMark, yAnchorNone)
|
2016-01-22 22:44:26 +00:00
|
|
|
explicitFlowKey = false
|
2016-03-17 19:28:00 +00:00
|
|
|
of fplSinglePairValue:
|
2016-03-19 10:16:24 +00:00
|
|
|
yieldEmptyScalar()
|
2016-03-17 19:28:00 +00:00
|
|
|
level = ancestry.pop()
|
|
|
|
yield endMapEvent()
|
|
|
|
assert(level.kind == fplSequence)
|
2016-03-19 21:43:03 +00:00
|
|
|
of fplUnknown, fplScalar, fplSinglePairKey, fplDocument: debugFail()
|
2016-01-20 22:38:39 +00:00
|
|
|
ancestry.add(level)
|
2016-03-19 21:43:03 +00:00
|
|
|
level = initLevel(fplUnknown)
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos.inc()
|
2016-01-20 22:38:39 +00:00
|
|
|
of ':':
|
2016-02-19 17:25:01 +00:00
|
|
|
if p.lexer.isPlainSafe(p.lexer.bufpos + 1, cFlow):
|
2016-03-17 19:28:00 +00:00
|
|
|
handleFlowItemStart()
|
2016-03-17 17:57:14 +00:00
|
|
|
handleFlowPlainScalar()
|
|
|
|
else:
|
2016-01-20 22:38:39 +00:00
|
|
|
level = ancestry.pop()
|
|
|
|
case level.kind
|
2016-03-17 21:50:41 +00:00
|
|
|
of fplSequence:
|
|
|
|
yield startMapEvent(tag, anchor)
|
2016-03-19 21:43:03 +00:00
|
|
|
debug("started single-pair map at " &
|
|
|
|
(if level.indentation == UnknownIndentation:
|
|
|
|
$indentation else: $level.indentation))
|
2016-03-17 21:50:41 +00:00
|
|
|
tag = yTagQuestionMark
|
|
|
|
anchor = yAnchorNone
|
2016-03-19 21:43:03 +00:00
|
|
|
if level.indentation == UnknownIndentation:
|
|
|
|
level.indentation = indentation
|
2016-03-17 21:50:41 +00:00
|
|
|
ancestry.add(level)
|
2016-03-19 21:43:03 +00:00
|
|
|
level = initLevel(fplSinglePairValue)
|
2016-03-17 21:50:41 +00:00
|
|
|
yield scalarEvent("")
|
|
|
|
of fplMapValue, fplSinglePairValue:
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
parserError("Unexpected token (expected ',')")
|
2016-01-20 22:38:39 +00:00
|
|
|
of fplMapKey:
|
2016-03-19 10:16:24 +00:00
|
|
|
yieldEmptyScalar()
|
2016-01-20 22:38:39 +00:00
|
|
|
level.kind = fplMapValue
|
2016-03-17 19:28:00 +00:00
|
|
|
of fplSinglePairKey:
|
2016-03-19 10:16:24 +00:00
|
|
|
yieldEmptyScalar()
|
2016-03-17 19:28:00 +00:00
|
|
|
level.kind = fplSinglePairValue
|
2016-03-19 21:43:03 +00:00
|
|
|
of fplUnknown, fplScalar, fplDocument: debugFail()
|
2016-01-20 22:38:39 +00:00
|
|
|
ancestry.add(level)
|
2016-03-19 21:43:03 +00:00
|
|
|
level = initLevel(fplUnknown)
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos.inc()
|
2016-01-21 18:23:45 +00:00
|
|
|
of '\'':
|
2016-03-17 19:28:00 +00:00
|
|
|
handleFlowItemStart()
|
2016-03-25 17:22:25 +00:00
|
|
|
startContent()
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
p.lexer.singleQuotedScalar(content)
|
2016-03-17 18:30:40 +00:00
|
|
|
if tag == yTagQuestionMark: tag = yTagExclamationMark
|
2016-03-20 11:09:04 +00:00
|
|
|
yieldShallowScalar(content)
|
2016-01-22 20:36:11 +00:00
|
|
|
handleObjectEnd(fpFlowAfterObject)
|
2016-01-21 18:23:45 +00:00
|
|
|
of '"':
|
2016-03-17 19:28:00 +00:00
|
|
|
handleFlowItemStart()
|
2016-03-25 17:22:25 +00:00
|
|
|
startContent()
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
p.lexer.doubleQuotedScalar(content)
|
2016-03-17 18:30:40 +00:00
|
|
|
if tag == yTagQuestionMark: tag = yTagExclamationMark
|
2016-03-20 11:09:04 +00:00
|
|
|
yieldShallowScalar(content)
|
2016-01-22 20:36:11 +00:00
|
|
|
handleObjectEnd(fpFlowAfterObject)
|
2016-03-17 19:28:00 +00:00
|
|
|
of '!':
|
|
|
|
handleFlowItemStart()
|
|
|
|
handleTagHandle()
|
|
|
|
of '&':
|
|
|
|
handleFlowItemStart()
|
|
|
|
handleAnchor()
|
2016-01-20 22:38:39 +00:00
|
|
|
of '*':
|
|
|
|
handleAlias()
|
2016-01-21 18:23:45 +00:00
|
|
|
state = fpFlowAfterObject
|
2016-01-22 22:44:26 +00:00
|
|
|
of '?':
|
2016-02-19 17:25:01 +00:00
|
|
|
if p.lexer.isPlainSafe(p.lexer.bufpos + 1, cFlow):
|
2016-03-17 19:28:00 +00:00
|
|
|
handleFlowItemStart()
|
2016-01-22 22:44:26 +00:00
|
|
|
handleFlowPlainScalar()
|
|
|
|
elif explicitFlowKey:
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
parserError("Duplicate '?' in flow mapping")
|
2016-03-17 21:50:41 +00:00
|
|
|
elif level.kind == fplUnknown:
|
2016-03-19 21:43:03 +00:00
|
|
|
case ancestry[ancestry.high].kind
|
|
|
|
of fplMapKey, fplMapValue, fplDocument: discard
|
|
|
|
of fplSequence: handleObjectStart(yamlStartMap, true)
|
|
|
|
else:
|
|
|
|
startToken()
|
|
|
|
parserError("Unexpected token")
|
2016-03-17 21:50:41 +00:00
|
|
|
explicitFlowKey = true
|
|
|
|
p.lexer.bufpos.inc()
|
2016-01-22 22:44:26 +00:00
|
|
|
else:
|
|
|
|
explicitFlowKey = true
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos.inc()
|
2016-03-17 19:28:00 +00:00
|
|
|
else:
|
|
|
|
handleFlowItemStart()
|
|
|
|
handleFlowPlainScalar()
|
2016-01-20 22:38:39 +00:00
|
|
|
of fpFlowAfterObject:
|
2016-01-22 20:36:11 +00:00
|
|
|
debug("state: flowAfterObject")
|
2016-03-16 18:30:51 +00:00
|
|
|
p.lexer.skipWhitespaceCommentsAndNewlines()
|
2016-01-24 10:44:10 +00:00
|
|
|
case p.lexer.buf[p.lexer.bufpos]
|
2016-01-20 22:38:39 +00:00
|
|
|
of ']':
|
|
|
|
case level.kind
|
2016-03-17 17:57:14 +00:00
|
|
|
of fplSequence: discard
|
2016-01-20 22:38:39 +00:00
|
|
|
of fplMapKey, fplMapValue:
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
parserError("Unexpected token (expected '}')")
|
2016-03-17 19:28:00 +00:00
|
|
|
of fplSinglePairValue:
|
|
|
|
level = ancestry.pop()
|
|
|
|
assert(level.kind == fplSequence)
|
|
|
|
yield endMapEvent()
|
2016-03-19 21:43:03 +00:00
|
|
|
of fplScalar, fplUnknown, fplSinglePairKey, fplDocument: debugFail()
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos.inc()
|
2016-01-20 22:38:39 +00:00
|
|
|
leaveFlowLevel()
|
|
|
|
of '}':
|
|
|
|
case level.kind
|
2016-03-17 17:57:14 +00:00
|
|
|
of fplMapKey, fplMapValue: discard
|
2016-03-17 19:28:00 +00:00
|
|
|
of fplSequence, fplSinglePairValue:
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
parserError("Unexpected token (expected ']')")
|
2016-03-19 21:43:03 +00:00
|
|
|
of fplUnknown, fplScalar, fplSinglePairKey, fplDocument: debugFail()
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos.inc()
|
2016-01-20 22:38:39 +00:00
|
|
|
leaveFlowLevel()
|
|
|
|
of ',':
|
|
|
|
case level.kind
|
2016-03-17 17:57:14 +00:00
|
|
|
of fplSequence: discard
|
2016-01-20 22:38:39 +00:00
|
|
|
of fplMapValue:
|
2016-01-24 18:15:50 +00:00
|
|
|
yield scalarEvent("", yTagQuestionMark, yAnchorNone)
|
2016-01-20 22:38:39 +00:00
|
|
|
level.kind = fplMapKey
|
2016-01-22 22:44:26 +00:00
|
|
|
explicitFlowKey = false
|
2016-03-17 19:28:00 +00:00
|
|
|
of fplSinglePairValue:
|
|
|
|
level = ancestry.pop()
|
|
|
|
assert(level.kind == fplSequence)
|
|
|
|
yield endMapEvent()
|
2016-03-17 17:57:14 +00:00
|
|
|
of fplMapKey: explicitFlowKey = false
|
2016-03-19 21:43:03 +00:00
|
|
|
of fplUnknown, fplScalar, fplSinglePairKey, fplDocument: debugFail()
|
2016-01-20 22:38:39 +00:00
|
|
|
ancestry.add(level)
|
2016-03-19 21:43:03 +00:00
|
|
|
level = initLevel(fplUnknown)
|
2016-01-20 22:38:39 +00:00
|
|
|
state = fpFlow
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos.inc()
|
2016-01-20 22:38:39 +00:00
|
|
|
of ':':
|
|
|
|
case level.kind
|
2016-01-22 20:36:11 +00:00
|
|
|
of fplSequence, fplMapKey:
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
parserError("Unexpected token (expected ',')")
|
2016-03-17 19:28:00 +00:00
|
|
|
of fplMapValue, fplSinglePairValue: discard
|
2016-03-19 21:43:03 +00:00
|
|
|
of fplUnknown, fplScalar, fplSinglePairKey, fplDocument: debugFail()
|
2016-01-20 22:38:39 +00:00
|
|
|
ancestry.add(level)
|
2016-03-19 21:43:03 +00:00
|
|
|
level = initLevel(fplUnknown)
|
2016-01-20 22:38:39 +00:00
|
|
|
state = fpFlow
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.bufpos.inc()
|
2016-01-20 22:38:39 +00:00
|
|
|
of '#':
|
2016-01-24 10:44:10 +00:00
|
|
|
p.lexer.lineEnding()
|
2016-01-22 20:36:11 +00:00
|
|
|
handleLineEnd(true)
|
2016-01-20 22:38:39 +00:00
|
|
|
of EndOfFile:
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
parserError("Unclosed flow content")
|
2016-01-20 22:38:39 +00:00
|
|
|
else:
|
2016-01-24 10:44:10 +00:00
|
|
|
startToken()
|
|
|
|
parserError("Unexpected content (expected flow indicator)")
|
2016-03-17 17:57:14 +00:00
|
|
|
try: result = initYamlStream(backend)
|
2016-03-19 21:43:03 +00:00
|
|
|
except Exception: debugFail() # compiler error
|