trivial memory-based speedups (#2205)

* trivial memory-based speedups

* HashKey becomes non-ref
* use openArray instead of seq in lots of places
* avoid sequtils.reversed when unnecessary
* add basic perf stats to test_coredb

* copyright
This commit is contained in:
Jacek Sieka 2024-05-23 17:37:51 +02:00 committed by GitHub
parent fe296213cf
commit f38c5e631e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 127 additions and 92 deletions

View File

@ -21,17 +21,17 @@ import
# Private helper
# ------------------------------------------------------------------------------
proc load64(data: Blob; start: var int): Result[uint64,AristoError] =
proc load64(data: openArray[byte]; start: var int): Result[uint64,AristoError] =
if data.len < start + 9:
return err(DeblobPayloadTooShortInt64)
let val = uint64.fromBytesBE(data[start ..< start + 8])
let val = uint64.fromBytesBE(data.toOpenArray(start, start + 7))
start += 8
ok val
proc load256(data: Blob; start: var int): Result[UInt256,AristoError] =
proc load256(data: openArray[byte]; start: var int): Result[UInt256,AristoError] =
if data.len < start + 33:
return err(DeblobPayloadTooShortInt256)
let val = UInt256.fromBytesBE(data[start ..< start + 32])
let val = UInt256.fromBytesBE(data.toOpenArray(start, start + 31))
start += 32
ok val
@ -272,7 +272,7 @@ proc blobify*(vFqs: openArray[(QueueID,QueueID)]): Blob =
# -------------
proc deblobify(data: Blob; pyl: var PayloadRef): Result[void,AristoError] =
proc deblobify(data: openArray[byte]; pyl: var PayloadRef): Result[void,AristoError] =
if data.len == 0:
pyl = PayloadRef(pType: RawData)
return ok()
@ -328,7 +328,7 @@ proc deblobify(data: Blob; pyl: var PayloadRef): Result[void,AristoError] =
pyl = pAcc
ok()
proc deblobify*(record: Blob; vtx: var VertexRef): Result[void,AristoError] =
proc deblobify*(record: openArray[byte]; vtx: var VertexRef): Result[void,AristoError] =
## De-serialise a data record encoded with `blobify()`. The second
## argument `vtx` can be `nil`.
if record.len < 3: # minimum `Leaf` record
@ -348,14 +348,14 @@ proc deblobify*(record: Blob; vtx: var VertexRef): Result[void,AristoError] =
aIny = record.len - 2
var
offs = 0
access = uint16.fromBytesBE record[aInx..aIny] # bitmap
access = uint16.fromBytesBE record.toOpenArray(aInx, aIny) # bitmap
vtxList: array[16,VertexID]
while access != 0:
if maxOffset < offs:
return err(DeblobBranchInxOutOfRange)
let n = access.firstSetBit - 1
access.clearBit n
vtxList[n] = (uint64.fromBytesBE record[offs ..< offs+8]).VertexID
vtxList[n] = (uint64.fromBytesBE record.toOpenArray(offs, offs + 7)).VertexID
offs += 8
# End `while`
vtx = VertexRef(
@ -370,12 +370,12 @@ proc deblobify*(record: Blob; vtx: var VertexRef): Result[void,AristoError] =
return err(DeblobExtTooShort)
if 8 + sLen != rLen: # => slen is at least 1
return err(DeblobExtSizeGarbled)
let (isLeaf, pathSegment) = hexPrefixDecode record[8 ..< rLen]
let (isLeaf, pathSegment) = hexPrefixDecode record.toOpenArray(8, rLen - 1)
if isLeaf:
return err(DeblobExtGotLeafPrefix)
vtx = VertexRef(
vType: Extension,
eVid: (uint64.fromBytesBE record[0 ..< 8]).VertexID,
eVid: (uint64.fromBytesBE record.toOpenArray(0, 7)).VertexID,
ePfx: pathSegment)
of 3: # `Leaf` vertex
@ -385,11 +385,11 @@ proc deblobify*(record: Blob; vtx: var VertexRef): Result[void,AristoError] =
pLen = rLen - sLen # payload length
if rLen < sLen:
return err(DeblobLeafSizeGarbled)
let (isLeaf, pathSegment) = hexPrefixDecode record[pLen ..< rLen]
let (isLeaf, pathSegment) = hexPrefixDecode record.toOpenArray(pLen, rLen-1)
if not isLeaf:
return err(DeblobLeafGotExtPrefix)
var pyl: PayloadRef
? record[0 ..< pLen].deblobify(pyl)
? record.toOpenArray(0, pLen - 1).deblobify(pyl)
vtx = VertexRef(
vType: Leaf,
lPfx: pathSegment,
@ -399,14 +399,14 @@ proc deblobify*(record: Blob; vtx: var VertexRef): Result[void,AristoError] =
return err(DeblobUnknown)
ok()
proc deblobify*(data: Blob; T: type VertexRef): Result[T,AristoError] =
proc deblobify*(data: openArray[byte]; T: type VertexRef): Result[T,AristoError] =
## Variant of `deblobify()` for vertex deserialisation.
var vtx = T(nil) # will be auto-initialised
? data.deblobify vtx
ok vtx
proc deblobify*(data: Blob; vGen: var seq[VertexID]): Result[void,AristoError] =
proc deblobify*(data: openArray[byte]; vGen: var seq[VertexID]): Result[void,AristoError] =
## De-serialise the data record encoded with `blobify()` into the vertex ID
## generator argument `vGen`.
if data.len == 0:
@ -418,10 +418,10 @@ proc deblobify*(data: Blob; vGen: var seq[VertexID]): Result[void,AristoError] =
return err(DeblobWrongType)
for n in 0 ..< (data.len div 8):
let w = n * 8
vGen.add (uint64.fromBytesBE data[w ..< w + 8]).VertexID
vGen.add (uint64.fromBytesBE data.toOpenArray(w, w+7)).VertexID
ok()
proc deblobify*(data: Blob; T: type seq[VertexID]): Result[T,AristoError] =
proc deblobify*(data: openArray[byte]; T: type seq[VertexID]): Result[T,AristoError] =
## Variant of `deblobify()` for deserialising the vertex ID generator state
var vGen: seq[VertexID]
? data.deblobify vGen
@ -436,25 +436,25 @@ proc deblobify*(data: Blob; filter: var FilterRef): Result[void,AristoError] =
func deblob(data: openArray[byte]; shortKey: bool): Result[HashKey,void] =
if shortKey:
HashKey.fromBytes data[1 .. min(data[0],31)]
HashKey.fromBytes data.toOpenArray(1, min(int data[0],31))
else:
HashKey.fromBytes data
let f = FilterRef()
f.fid = (uint64.fromBytesBE data[0 ..< 8]).FilterID
f.fid = (uint64.fromBytesBE data.toOpenArray(0, 7)).FilterID
(addr f.src.data[0]).copyMem(unsafeAddr data[8], 32)
(addr f.trg.data[0]).copyMem(unsafeAddr data[40], 32)
let
nVids = uint32.fromBytesBE data[72 ..< 76]
nTriplets = uint32.fromBytesBE data[76 ..< 80]
nVids = uint32.fromBytesBE data.toOpenArray(72, 75)
nTriplets = uint32.fromBytesBE data.toOpenArray(76, 79)
nTrplStart = (80 + nVids * 8).int
if data.len < nTrplStart:
return err(DeblobFilterGenTooShort)
for n in 0 ..< nVids:
let w = 80 + n * 8
f.vGen.add (uint64.fromBytesBE data[w ..< w + 8]).VertexID
f.vGen.add (uint64.fromBytesBE data.toOpenArray(int w, int w+7)).VertexID
var offs = nTrplStart
for n in 0 ..< nTriplets:
@ -463,20 +463,20 @@ proc deblobify*(data: Blob; filter: var FilterRef): Result[void,AristoError] =
let
keyFlag = data[offs] shr 6
vtxFlag = ((uint32.fromBytesBE data[offs ..< offs+4]) and 0x3fff_ffff).int
vtxFlag = ((uint32.fromBytesBE data.toOpenArray(offs, offs+3)) and 0x3fff_ffff).int
vLen = if vtxFlag == 0x3fff_ffff: 0 else: vtxFlag
if keyFlag == 0 and vtxFlag == 0:
return err(DeblobFilterTrpVtxSizeGarbled) # no blind records
offs = offs + 4
let vid = (uint64.fromBytesBE data[offs ..< offs + 8]).VertexID
let vid = (uint64.fromBytesBE data.toOpenArray(offs, offs+7)).VertexID
offs = offs + 8
if data.len < offs + (1 < keyFlag).ord * 32 + vLen:
return err(DeblobFilterTrpTooShort)
if 1 < keyFlag:
f.kMap[vid] = data[offs ..< offs + 32].deblob(keyFlag == 3).valueOr:
f.kMap[vid] = data.toOpenArray(offs, offs+31).deblob(keyFlag == 3).valueOr:
return err(DeblobHashKeyExpected)
offs = offs + 32
elif keyFlag == 1:
@ -486,7 +486,7 @@ proc deblobify*(data: Blob; filter: var FilterRef): Result[void,AristoError] =
f.sTab[vid] = VertexRef(nil)
elif 0 < vLen:
var vtx: VertexRef
? data[offs ..< offs + vLen].deblobify vtx
? data.toOpenArray(offs, offs + vLen - 1).deblobify vtx
f.sTab[vid] = vtx
offs = offs + vLen
@ -518,8 +518,8 @@ proc deblobify*(
for n in 0 ..< (data.len div 16):
let
w = n * 16
a = (uint64.fromBytesBE data[w + 0 ..< w + 8]).QueueID
b = (uint64.fromBytesBE data[w + 8 ..< w + 16]).QueueID
a = (uint64.fromBytesBE data.toOpenArray(w, w + 7)).QueueID
b = (uint64.fromBytesBE data.toOpenArray(w + 8, w + 15)).QueueID
vFqs.add (a,b)
ok()

View File

@ -98,7 +98,7 @@ func getOrVoid*[W](tab: Table[W,HashSet[VertexID]]; w: W): HashSet[VertexID] =
# --------
func isValid*(vtx: VertexRef): bool =
vtx != VertexRef(nil)
vtx != VertexRef(nil)
func isValid*(nd: NodeRef): bool =
nd != NodeRef(nil)
@ -260,6 +260,11 @@ proc forgetOthers*(db: AristoDbRef): Result[void,AristoError] =
db.dudes = DudesRef(nil)
ok()
iterator rstack*(db: AristoDbRef): LayerRef =
# Stack in reverse order
for i in 0..<db.stack.len:
yield db.stack[db.stack.len - i - 1]
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -53,11 +53,11 @@ type
## `Hash256` type of the Keccak hash of an empty `Blob` (see constant
## `EMPTY_ROOT_HASH`.)
##
case isHash: bool
of true:
key: Hash256 ## Merkle hash tacked to a vertex
else:
blob: Blob ## Optionally encoded small node data
## For performance, we avoid storing blobs as `seq`, instead storing their
## length and sharing the data "space".
## TODO can we skip one byte of hash and reduce this type to 32 bytes?
buf: array[32, byte] # Either Hash256 or blob data, depending on `len`
len: int8 # length in the case of blobs, or 32 when it's a hash
PathID* = object
## Path into the `Patricia Trie`. This is a chain of maximal 64 nibbles
@ -198,15 +198,18 @@ func `==`*(a, b: PathID): bool =
func cmp*(a, b: PathID): int =
if a < b: -1 elif b < a: 1 else: 0
template data*(lid: HashKey): openArray[byte] =
lid.buf.toOpenArray(0, lid.len - 1)
func to*(lid: HashKey; T: type PathID): T =
## Helper to bowrrow certain properties from `PathID`
if lid.isHash:
PathID(pfx: UInt256.fromBytesBE lid.key.data, length: 64)
elif 0 < lid.blob.len:
doAssert lid.blob.len < 32
if lid.len == 32:
PathID(pfx: UInt256.fromBytesBE lid.data, length: 64)
elif 0 < lid.len:
doAssert lid.len < 32
var a32: array[32,byte]
(addr a32[0]).copyMem(unsafeAddr lid.blob[0], lid.blob.len)
PathID(pfx: UInt256.fromBytesBE a32, length: 2 * lid.blob.len.uint8)
(addr a32[0]).copyMem(unsafeAddr lid.data[0], lid.len)
PathID(pfx: UInt256.fromBytesBE a32, length: 2 * lid.len.uint8)
else:
PathID()
@ -215,7 +218,7 @@ func to*(lid: HashKey; T: type PathID): T =
# ------------------------------------------------------------------------------
func len*(lid: HashKey): int =
if lid.isHash: 32 else: lid.blob.len
lid.len.int # if lid.isHash: 32 else: lid.blob.len
func fromBytes*(T: type HashKey; data: openArray[byte]): Result[T,void] =
## Write argument `data` of length 0 or between 2 and 32 bytes as a `HashKey`.
@ -228,13 +231,16 @@ func fromBytes*(T: type HashKey; data: openArray[byte]): Result[T,void] =
##
if data.len == 32:
var lid: T
lid.isHash = true
(addr lid.key.data[0]).copyMem(unsafeAddr data[0], data.len)
lid.len = 32
(addr lid.data[0]).copyMem(unsafeAddr data[0], data.len)
return ok lid
if data.len == 0:
return ok HashKey()
if 1 < data.len and data.len < 32 and data[0].int == 0xbf + data.len:
return ok T(isHash: false, blob: @data)
var lid: T
lid.len = int8 data.len
(addr lid.data[0]).copyMem(unsafeAddr data[0], data.len)
return ok lid
err()
func `<`*(a, b: HashKey): bool =
@ -242,16 +248,11 @@ func `<`*(a, b: HashKey): bool =
a.to(PathID) < b.to(PathID)
func `==`*(a, b: HashKey): bool =
if a.isHash != b.isHash:
false
elif a.isHash:
a.key == b.key
else:
a.blob == b.blob
a.data == b.data
func cmp*(a, b: HashKey): int =
## Slow, but useful for debug sorting
if a < b: -1 elif b < a: 1 else: 0
cmp(a.data, b.data)
# ------------------------------------------------------------------------------
# Public helpers: `LeafTie` ordered scalar data model
@ -292,16 +293,13 @@ func cmp*(a, b: LeafTie): int =
# Public helpers: Reversible conversions between `PathID`, `HashKey`, etc.
# ------------------------------------------------------------------------------
func to*(key: HashKey; T: type Blob): T =
func to*(key: HashKey; T: type Blob): T {.deprecated.} =
## Rewrite `HashKey` argument as `Blob` type of length between 0 and 32. A
## blob of length 32 is taken as a representation of a `HashKey` type while
## samller blobs are expected to represent an RLP encoded small node.
if key.isHash:
@(key.key.data)
else:
key.blob
@(key.data)
func `@`*(lid: HashKey): Blob =
func `@`*(lid: HashKey): Blob {.deprecated.} =
## Variant of `to(Blob)`
lid.to(Blob)
@ -323,10 +321,10 @@ func `@`*(pid: PathID): Blob =
func to*(lid: HashKey; T: type Hash256): T =
## Returns the `Hash236` key if available, otherwise the Keccak hash of
## the `Blob` version.
if lid.isHash:
lid.key
elif 0 < lid.blob.len:
lid.blob.keccakHash
if lid.len == 32:
Hash256(data: lid.buf)
elif 0 < lid.len:
lid.data.keccakHash
else:
EMPTY_ROOT_HASH
@ -336,7 +334,7 @@ func to*(key: Hash256; T: type HashKey): T =
if key == EMPTY_ROOT_HASH:
T()
else:
T(isHash: true, key: key)
T(len: 32, buf: key.data)
func to*(n: SomeUnsignedInt|UInt256; T: type PathID): T =
## Representation of a scalar as `PathID` (preserving full information)
@ -349,11 +347,14 @@ func to*(n: SomeUnsignedInt|UInt256; T: type PathID): T =
func digestTo*(data: openArray[byte]; T: type HashKey): T =
## For argument `data` with length smaller than 32, import them as-is into
## the result. Otherwise import the Keccak hash of the argument `data`.
if data.len < 32:
result.blob = @data
if data.len == 0:
result.len = 0
elif data.len < 32:
result.len = int8 data.len
(addr result.data[0]).copyMem(unsafeAddr data[0], data.len)
else:
result.isHash = true
result.key = data.keccakHash
result.len = 32
result.buf = data.keccakHash.data
func normal*(a: PathID): PathID =
## Normalise path ID representation
@ -376,12 +377,7 @@ func hash*(a: PathID): Hash =
func hash*(a: HashKey): Hash =
## Table/KeyedQueue mixin
var h: Hash = 0
if a.isHash:
h = h !& a.key.hash
else:
h = h !& a.blob.hash
!$h
hash(a.data)
# ------------------------------------------------------------------------------
# Miscellaneous helpers
@ -415,10 +411,7 @@ func `$`*(key: Hash256): string =
w.toHex
func `$`*(key: HashKey): string =
if key.isHash:
$key.key
else:
key.blob.toHex & "[#" & $key.blob.len & "]"
toHex(key.data)
func `$`*(a: PathID): string =
if a.pfx.isZero.not:

View File

@ -54,7 +54,7 @@ iterator walk*(
break walkBody
let
pfx = StorageType(key[0])
id = uint64.fromBytesBE key[1..^1]
id = uint64.fromBytesBE key.toOpenArray(1, key.len - 1)
yield (pfx, id, val)

View File

@ -69,7 +69,7 @@ proc layersGetVtx*(db: AristoDbRef; vid: VertexID): Result[VertexRef,void] =
if db.top.delta.sTab.hasKey vid:
return ok(db.top.delta.sTab.getOrVoid vid)
for w in db.stack.reversed:
for w in db.rstack:
if w.delta.sTab.hasKey vid:
return ok(w.delta.sTab.getOrVoid vid)
@ -89,7 +89,7 @@ proc layersGetKey*(db: AristoDbRef; vid: VertexID): Result[HashKey,void] =
# dirty, there is an empty `kMap[]` entry on this layer.
return ok(db.top.delta.kMap.getOrVoid vid)
for w in db.stack.reversed:
for w in db.rstack:
if w.delta.kMap.hasKey vid:
# Same reasoning as above regarding the `dirty` flag.
return ok(w.delta.kMap.getOrVoid vid)
@ -233,7 +233,7 @@ iterator layersWalkVtx*(
yield (vid,vtx)
seen.incl vid
for w in db.stack.reversed:
for w in db.rstack:
for (vid,vtx) in w.delta.sTab.pairs:
if vid notin seen:
yield (vid,vtx)
@ -258,7 +258,7 @@ iterator layersWalkKey*(
yield (vid,key)
seen.incl vid
for w in db.stack.reversed:
for w in db.rstack:
for (vid,key) in w.delta.kMap.pairs:
if vid notin seen:
yield (vid,key)

View File

@ -45,9 +45,9 @@ func pathAsBlob*(tag: PathID): Blob =
## used to index database leaf values can be represented as `Blob`, i.e.
## `PathID` type paths with an even number of nibbles.
if 0 < tag.length:
let key = @(tag.pfx.toBytesBE)
let key = tag.pfx.toBytesBE
if 64 <= tag.length:
return key
return @key
else:
return key[0 .. (tag.length - 1) div 2]

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -133,9 +133,9 @@ proc append*(writer: var RlpWriter; node: NodeRef) =
## list.
func addHashKey(w: var RlpWriter; key: HashKey) =
if 1 < key.len and key.len < 32:
w.appendRawBytes @key
w.appendRawBytes key.data
else:
w.append @key
w.append key.data
if node.error != AristoError(0):
writer.startList(0)

View File

@ -191,6 +191,11 @@ proc forgetOthers*(db: KvtDbRef): Result[void,KvtError] =
db.dudes = DudesRef(nil)
ok()
iterator rstack*(db: KvtDbRef): LayerRef =
# Stack in reverse order
for i in 0..<db.stack.len:
yield db.stack[db.stack.len - i - 1]
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -37,7 +37,7 @@ proc layersHasKey*(db: KvtDbRef; key: openArray[byte]): bool =
if db.top.delta.sTab.hasKey @key:
return true
for w in db.stack.reversed:
for w in db.rstack:
if w.delta.sTab.hasKey @key:
return true
@ -49,7 +49,7 @@ proc layersGet*(db: KvtDbRef; key: openArray[byte]): Result[Blob,void] =
if db.top.delta.sTab.hasKey @key:
return ok(db.top.delta.sTab.getOrVoid @key)
for w in db.stack.reversed:
for w in db.rstack:
if w.delta.sTab.hasKey @key:
return ok(w.delta.sTab.getOrVoid @key)
@ -101,7 +101,7 @@ iterator layersWalk*(
yield (key,val)
seen.incl key
for w in db.stack.reversed:
for w in db.rstack:
for (key,val) in w.delta.sTab.pairs:
if key notin seen:
yield (key,val)

View File

@ -199,6 +199,20 @@ proc test_chainSync*(
# This will enable printing the `era1` covered block ranges (if any)
undump_blocks_era1.noisy = noisy
var
blocks = 0
total = 0
begin = toUnixFloat(getTime())
sample = begin
template sayPerf =
if blocks > 0:
total += blocks
let done {.inject.} = toUnixFloat(getTime())
noisy.say "", &"{blocks:3} blocks, {(done-sample):2.3}s, {(blocks.float / (done-sample)):4.3f} b/s, avg {(total.float / (done-begin)):4.3f} b/s"
blocks = 0
sample = done
for w in files.undumpBlocks(least = start):
let (fromBlock, toBlock) = (w[0][0].blockNumber, w[0][^1].blockNumber)
if fromBlock == 0.u256:
@ -209,9 +223,12 @@ proc test_chainSync*(
if toBlock < lastBlock:
# Message if `[fromBlock,toBlock]` contains a multiple of `sayBlocks`
if fromBlock + (toBlock mod sayBlocks.u256) <= toBlock:
noisy.say "***", &"processing ...[#{fromBlock},#{toBlock}]..."
sayPerf
noisy.whisper "***", &"processing ...[#{fromBlock:>8},#{toBlock:>8}]..."
if enaLogging:
noisy.startLogging(w[0][0].blockNumber)
noisy.stopLoggingAfter():
let runPersistBlocksRc = chain.persistBlocks(w[0], w[1])
xCheck runPersistBlocksRc == ValidationResult.OK:
@ -222,6 +239,7 @@ proc test_chainSync*(
com.db.trackNewApi = false
com.db.trackLedgerApi = false
discard chain.persistBlocks(w[0], w[1])
blocks += w[0].len
continue
# Last group or single block
@ -241,7 +259,8 @@ proc test_chainSync*(
let
headers1 = w[0][0 ..< pivot]
bodies1 = w[1][0 ..< pivot]
noisy.say "***", &"processing {dotsOrSpace}[#{fromBlock},#{lastBlock-1}]"
sayPerf
noisy.whisper "***", &"processing {dotsOrSpace}[#{fromBlock:>8},#{(lastBlock-1):>8}]"
let runPersistBlocks1Rc = chain.persistBlocks(headers1, bodies1)
xCheck runPersistBlocks1Rc == ValidationResult.OK
dotsOrSpace = " "
@ -251,16 +270,19 @@ proc test_chainSync*(
let
headers0 = headers9[0..0]
bodies0 = bodies9[0..0]
noisy.say "***", &"processing {dotsOrSpace}[#{lastBlock},#{lastBlock}]"
sayPerf
noisy.whisper "***", &"processing {dotsOrSpace}[#{lastBlock:>8},#{lastBlock:>8}]"
noisy.stopLoggingAfter():
let runPersistBlocks0Rc = chain.persistBlocks(headers0, bodies0)
xCheck runPersistBlocks0Rc == ValidationResult.OK
else:
noisy.say "***", &"processing {dotsOrSpace}[#{lastBlock},#{toBlock}]"
sayPerf
noisy.whisper "***", &"processing {dotsOrSpace}[#{lastBlock:>8},#{toBlock:>8}]"
noisy.stopLoggingAfter():
let runPersistBlocks9Rc = chain.persistBlocks(headers9, bodies9)
xCheck runPersistBlocks9Rc == ValidationResult.OK
break
sayPerf
true

View File

@ -53,6 +53,16 @@ proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
else:
echo pfx, args.toSeq.join
proc whisper*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
if noisy:
if args.len == 0:
stdout.write("*** ", pfx)
elif 0 < pfx.len and pfx[^1] != ' ':
stdout.write(pfx, " ", args.toSeq.join)
else:
stdout.write(pfx, args.toSeq.join)
stdout.flushFile()
proc toPfx*(indent: int): string =
"\n" & " ".repeat(indent)