mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-01-13 05:44:40 +00:00
avoid some trivial memory allocations (#2587)
* pre-allocate `blobify` data and remove redundant error handling (cannot fail on correct data) * use threadvar for temporary storage when decoding rdb, avoiding closure env * speed up database walkers by avoiding many temporaries ~5% perf improvement on block import, 100x on database iteration (useful for building analysis tooling)
This commit is contained in:
parent
a25ea63dec
commit
ef1bab0802
@ -203,12 +203,12 @@ proc blobifyTo*(vtx: VertexRef; data: var Blob): Result[void,AristoError] =
|
||||
if vtx.ePfx.len > 0:
|
||||
vtx.ePfx.toHexPrefix(isleaf = false)
|
||||
else:
|
||||
@[]
|
||||
default(HexPrefixBuf)
|
||||
psLen = pSegm.len.byte
|
||||
if 33 < psLen:
|
||||
return err(BlobifyExtPathOverflow)
|
||||
|
||||
data &= pSegm
|
||||
data &= pSegm.data()
|
||||
data &= lens.toBytesBE
|
||||
data &= [0x80u8 or psLen]
|
||||
|
||||
@ -219,16 +219,16 @@ proc blobifyTo*(vtx: VertexRef; data: var Blob): Result[void,AristoError] =
|
||||
if psLen == 0 or 33 < psLen:
|
||||
return err(BlobifyLeafPathOverflow)
|
||||
vtx.lData.blobifyTo(data)
|
||||
data &= pSegm
|
||||
data &= pSegm.data()
|
||||
data &= [0xC0u8 or psLen]
|
||||
|
||||
ok()
|
||||
|
||||
proc blobify*(vtx: VertexRef): Result[Blob, AristoError] =
|
||||
proc blobify*(vtx: VertexRef): Blob =
|
||||
## Variant of `blobify()`
|
||||
var data: Blob
|
||||
? vtx.blobifyTo data
|
||||
ok(move(data))
|
||||
result = newSeqOfCap[byte](128)
|
||||
if vtx.blobifyTo(result).isErr:
|
||||
result.setLen(0) # blobify only fails on invalid verticies
|
||||
|
||||
proc blobifyTo*(lSst: SavedState; data: var Blob): Result[void,AristoError] =
|
||||
## Serialise a last saved state record
|
||||
@ -246,45 +246,48 @@ proc blobify*(lSst: SavedState): Result[Blob,AristoError] =
|
||||
# -------------
|
||||
proc deblobify(
|
||||
data: openArray[byte];
|
||||
T: type LeafPayload;
|
||||
): Result[LeafPayload,AristoError] =
|
||||
pyl: var LeafPayload;
|
||||
): Result[void,AristoError] =
|
||||
if data.len == 0:
|
||||
return ok LeafPayload(pType: RawData)
|
||||
pyl = LeafPayload(pType: RawData)
|
||||
return ok()
|
||||
|
||||
let mask = data[^1]
|
||||
if (mask and 0x10) > 0: # unstructured payload
|
||||
return ok LeafPayload(pType: RawData, rawBlob: data[0 .. ^2])
|
||||
pyl = LeafPayload(pType: RawData, rawBlob: data[0 .. ^2])
|
||||
return ok()
|
||||
|
||||
if (mask and 0x20) > 0: # Slot storage data
|
||||
return ok LeafPayload(
|
||||
pyl = LeafPayload(
|
||||
pType: StoData,
|
||||
stoData: ?deblobify(data.toOpenArray(0, data.len - 2), UInt256))
|
||||
return ok()
|
||||
|
||||
pyl = LeafPayload(pType: AccountData)
|
||||
var
|
||||
pAcc = LeafPayload(pType: AccountData)
|
||||
start = 0
|
||||
lens = uint16.fromBytesBE(data.toOpenArray(data.len - 3, data.len - 2))
|
||||
|
||||
if (mask and 0x01) > 0:
|
||||
let len = lens and 0b111
|
||||
pAcc.account.nonce = ? load64(data, start, int(len + 1))
|
||||
pyl.account.nonce = ? load64(data, start, int(len + 1))
|
||||
|
||||
if (mask and 0x02) > 0:
|
||||
let len = (lens shr 3) and 0b11111
|
||||
pAcc.account.balance = ? load256(data, start, int(len + 1))
|
||||
pyl.account.balance = ? load256(data, start, int(len + 1))
|
||||
|
||||
if (mask and 0x04) > 0:
|
||||
let len = (lens shr 8) and 0b111
|
||||
pAcc.stoID = (true, VertexID(? load64(data, start, int(len + 1))))
|
||||
pyl.stoID = (true, VertexID(? load64(data, start, int(len + 1))))
|
||||
|
||||
if (mask and 0x08) > 0:
|
||||
if data.len() < start + 32:
|
||||
return err(DeblobCodeLenUnsupported)
|
||||
discard pAcc.account.codeHash.data.copyFrom(data.toOpenArray(start, start + 31))
|
||||
discard pyl.account.codeHash.data.copyFrom(data.toOpenArray(start, start + 31))
|
||||
else:
|
||||
pAcc.account.codeHash = EMPTY_CODE_HASH
|
||||
pyl.account.codeHash = EMPTY_CODE_HASH
|
||||
|
||||
ok(pAcc)
|
||||
ok()
|
||||
|
||||
proc deblobify*(
|
||||
record: openArray[byte];
|
||||
@ -336,11 +339,12 @@ proc deblobify*(
|
||||
NibblesBuf.fromHexPrefix record.toOpenArray(pLen, rLen-1)
|
||||
if not isLeaf:
|
||||
return err(DeblobLeafGotExtPrefix)
|
||||
let pyl = ? record.toOpenArray(0, pLen - 1).deblobify(LeafPayload)
|
||||
VertexRef(
|
||||
let vtx = VertexRef(
|
||||
vType: Leaf,
|
||||
lPfx: pathSegment,
|
||||
lData: pyl)
|
||||
lPfx: pathSegment)
|
||||
|
||||
? record.toOpenArray(0, pLen - 1).deblobify(vtx.lData)
|
||||
vtx
|
||||
|
||||
else:
|
||||
return err(DeblobUnknown)
|
||||
|
@ -68,7 +68,7 @@ proc computeKeyImpl(
|
||||
case vtx.vType:
|
||||
of Leaf:
|
||||
writer.startList(2)
|
||||
writer.append(vtx.lPfx.toHexPrefix(isLeaf = true))
|
||||
writer.append(vtx.lPfx.toHexPrefix(isLeaf = true).data())
|
||||
|
||||
case vtx.lData.pType
|
||||
of AccountData:
|
||||
@ -111,7 +111,7 @@ proc computeKeyImpl(
|
||||
writeBranch(bwriter)
|
||||
|
||||
writer.startList(2)
|
||||
writer.append(vtx.ePfx.toHexPrefix(isleaf = false))
|
||||
writer.append(vtx.ePfx.toHexPrefix(isleaf = false).data())
|
||||
writer.append(bwriter.finish().digestTo(HashKey))
|
||||
else:
|
||||
writeBranch(writer)
|
||||
|
@ -8,15 +8,20 @@
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
import stew/arrayops
|
||||
import stew/[arraybuf, arrayops]
|
||||
|
||||
type NibblesBuf* = object
|
||||
## Allocation-free type for storing up to 64 4-bit nibbles, as seen in the
|
||||
## Ethereum MPT
|
||||
bytes: array[32, byte]
|
||||
ibegin, iend: int8
|
||||
# Where valid nibbles can be found - we use indices here to avoid copies
|
||||
# wen slicing - iend not inclusive
|
||||
export arraybuf
|
||||
|
||||
type
|
||||
NibblesBuf* = object
|
||||
## Allocation-free type for storing up to 64 4-bit nibbles, as seen in the
|
||||
## Ethereum MPT
|
||||
bytes: array[32, byte]
|
||||
ibegin, iend: int8
|
||||
# Where valid nibbles can be found - we use indices here to avoid copies
|
||||
# wen slicing - iend not inclusive
|
||||
|
||||
HexPrefixBuf* = ArrayBuf[33, byte]
|
||||
|
||||
func high*(T: type NibblesBuf): int =
|
||||
63
|
||||
@ -61,7 +66,7 @@ func `$`*(r: NibblesBuf): string =
|
||||
const chars = "0123456789abcdef"
|
||||
result.add chars[r[i]]
|
||||
|
||||
func slice*(r: NibblesBuf, ibegin: int, iend = -1): NibblesBuf =
|
||||
func slice*(r: NibblesBuf, ibegin: int, iend = -1): NibblesBuf {.noinit.} =
|
||||
result.bytes = r.bytes
|
||||
result.ibegin = r.ibegin + ibegin.int8
|
||||
let e =
|
||||
@ -75,7 +80,7 @@ func slice*(r: NibblesBuf, ibegin: int, iend = -1): NibblesBuf =
|
||||
template writeFirstByte(nibbleCountExpr) {.dirty.} =
|
||||
let nibbleCount = nibbleCountExpr
|
||||
var oddnessFlag = (nibbleCount and 1) != 0
|
||||
newSeq(result, (nibbleCount div 2) + 1)
|
||||
result.setLen((nibbleCount div 2) + 1)
|
||||
result[0] = byte((int(isLeaf) * 2 + int(oddnessFlag)) shl 4)
|
||||
var writeHead = 0
|
||||
|
||||
@ -89,11 +94,11 @@ template writeNibbles(r) {.dirty.} =
|
||||
result[writeHead] = nextNibble shl 4
|
||||
oddnessFlag = not oddnessFlag
|
||||
|
||||
func toHexPrefix*(r: NibblesBuf, isLeaf = false): seq[byte] =
|
||||
func toHexPrefix*(r: NibblesBuf, isLeaf = false): HexPrefixBuf =
|
||||
writeFirstByte(r.len)
|
||||
writeNibbles(r)
|
||||
|
||||
func toHexPrefix*(r1, r2: NibblesBuf, isLeaf = false): seq[byte] =
|
||||
func toHexPrefix*(r1, r2: NibblesBuf, isLeaf = false): HexPrefixBuf =
|
||||
writeFirstByte(r1.len + r2.len)
|
||||
writeNibbles(r1)
|
||||
writeNibbles(r2)
|
||||
@ -131,7 +136,7 @@ func fromHexPrefix*(
|
||||
else:
|
||||
result.isLeaf = false
|
||||
|
||||
func `&`*(a, b: NibblesBuf): NibblesBuf =
|
||||
func `&`*(a, b: NibblesBuf): NibblesBuf {.noinit.} =
|
||||
for i in 0 ..< a.len:
|
||||
result[i] = a[i]
|
||||
|
||||
|
@ -133,14 +133,7 @@ proc putVtxFn(db: MemBackendRef): PutVtxFn =
|
||||
let hdl = hdl.getSession db
|
||||
if hdl.error.isNil:
|
||||
if vtx.isValid:
|
||||
let rc = vtx.blobify()
|
||||
if rc.isErr:
|
||||
hdl.error = TypedPutHdlErrRef(
|
||||
pfx: VtxPfx,
|
||||
vid: rvid.vid,
|
||||
code: rc.error)
|
||||
return
|
||||
hdl.sTab[rvid] = rc.value
|
||||
hdl.sTab[rvid] = vtx.blobify()
|
||||
else:
|
||||
hdl.sTab[rvid] = EmptyBlob
|
||||
|
||||
|
@ -312,10 +312,8 @@ iterator walkVtx*(
|
||||
be: RdbBackendRef;
|
||||
): tuple[evid: RootedVertexID, vtx: VertexRef] =
|
||||
## Variant of `walk()` iteration over the vertex sub-table.
|
||||
for (rvid, data) in be.rdb.walkVtx:
|
||||
let rc = data.deblobify VertexRef
|
||||
if rc.isOk:
|
||||
yield (rvid, rc.value)
|
||||
for (rvid, vtx) in be.rdb.walkVtx:
|
||||
yield (rvid, vtx)
|
||||
|
||||
iterator walkKey*(
|
||||
be: RdbBackendRef;
|
||||
|
@ -63,10 +63,10 @@ proc getKey*(
|
||||
return ok(move(rc.value))
|
||||
|
||||
# Otherwise fetch from backend database
|
||||
var res: Result[HashKey,(AristoError,string)]
|
||||
# A threadvar is used to avoid allocating an environment for onData
|
||||
var res{.threadvar.}: Opt[HashKey]
|
||||
let onData = proc(data: openArray[byte]) =
|
||||
res = HashKey.fromBytes(data).mapErr(proc(): auto =
|
||||
(RdbHashKeyExpected,""))
|
||||
res = HashKey.fromBytes(data)
|
||||
|
||||
let gotData = rdb.keyCol.get(rvid.blobify().data(), onData).valueOr:
|
||||
const errSym = RdbBeDriverGetKeyError
|
||||
@ -76,9 +76,9 @@ proc getKey*(
|
||||
|
||||
# Correct result if needed
|
||||
if not gotData:
|
||||
res = ok(VOID_HASH_KEY)
|
||||
res.ok(VOID_HASH_KEY)
|
||||
elif res.isErr():
|
||||
return res # Parsing failed
|
||||
return err((RdbHashKeyExpected,"")) # Parsing failed
|
||||
|
||||
# Update cache and return
|
||||
ok rdb.rdKeyLru.lruAppend(rvid.vid, res.value(), RdKeyLruMaxSize)
|
||||
@ -93,10 +93,10 @@ proc getVtx*(
|
||||
return ok(move(rc.value))
|
||||
|
||||
# Otherwise fetch from backend database
|
||||
var res: Result[VertexRef,(AristoError,string)]
|
||||
# A threadvar is used to avoid allocating an environment for onData
|
||||
var res {.threadvar.}: Result[VertexRef,AristoError]
|
||||
let onData = proc(data: openArray[byte]) =
|
||||
res = data.deblobify(VertexRef).mapErr(proc(error: AristoError): auto =
|
||||
(error,""))
|
||||
res = data.deblobify(VertexRef)
|
||||
|
||||
let gotData = rdb.vtxCol.get(rvid.blobify().data(), onData).valueOr:
|
||||
const errSym = RdbBeDriverGetVtxError
|
||||
@ -105,9 +105,9 @@ proc getVtx*(
|
||||
return err((errSym,error))
|
||||
|
||||
if not gotData:
|
||||
res = ok(VertexRef(nil))
|
||||
res.ok(VertexRef(nil))
|
||||
elif res.isErr():
|
||||
return res # Parsing failed
|
||||
return err((res.error(), "Parsing failed")) # Parsing failed
|
||||
|
||||
# Update cache and return
|
||||
ok rdb.rdVtxLru.lruAppend(rvid.vid, res.value(), RdVtxLruMaxSize)
|
||||
|
@ -122,12 +122,7 @@ proc putVtx*(
|
||||
): Result[void,(VertexID,AristoError,string)] =
|
||||
let dsc = rdb.session
|
||||
if vtx.isValid:
|
||||
let rc = vtx.blobify()
|
||||
if rc.isErr:
|
||||
# Caller must `rollback()` which will flush the `rdVtxLru` cache
|
||||
return err((rvid.vid,rc.error,""))
|
||||
|
||||
dsc.put(rvid.blobify().data(), rc.value, rdb.vtxCol.handle()).isOkOr:
|
||||
dsc.put(rvid.blobify().data(), vtx.blobify(), rdb.vtxCol.handle()).isOkOr:
|
||||
# Caller must `rollback()` which will flush the `rdVtxLru` cache
|
||||
const errSym = RdbBeDriverPutVtxError
|
||||
when extraTraceMessages:
|
||||
|
@ -72,8 +72,8 @@ iterator walkKey*(rdb: RdbInst): tuple[rvid: RootedVertexID, data: Blob] =
|
||||
yield (rvid, val)
|
||||
|
||||
|
||||
iterator walkVtx*(rdb: RdbInst): tuple[rvid: RootedVertexID, data: Blob] =
|
||||
## Walk over key-value pairs of the hash key column of the database.
|
||||
iterator walkVtx*(rdb: RdbInst): tuple[rvid: RootedVertexID, data: VertexRef] =
|
||||
## Walk over key-value pairs of the vertex column of the database.
|
||||
##
|
||||
## Non-decodable entries are are ignored.
|
||||
##
|
||||
@ -84,12 +84,32 @@ iterator walkVtx*(rdb: RdbInst): tuple[rvid: RootedVertexID, data: Blob] =
|
||||
break walkBody
|
||||
defer: rit.close()
|
||||
|
||||
for (key,val) in rit.pairs:
|
||||
if val.len != 0:
|
||||
let rvid = key.deblobify(RootedVertexID).valueOr:
|
||||
continue
|
||||
rit.seekToFirst()
|
||||
var key: RootedVertexID
|
||||
var value: VertexRef
|
||||
while rit.isValid():
|
||||
var valid = true
|
||||
rit.key(
|
||||
proc(data: openArray[byte]) =
|
||||
key = deblobify(data, RootedVertexID).valueOr:
|
||||
valid = false
|
||||
default(RootedVertexID)
|
||||
)
|
||||
if not valid:
|
||||
continue
|
||||
|
||||
yield (rvid, val)
|
||||
rit.value(
|
||||
proc(data: openArray[byte]) =
|
||||
value = deblobify(data, VertexRef).valueOr:
|
||||
valid = false
|
||||
default(VertexRef)
|
||||
)
|
||||
if not valid:
|
||||
continue
|
||||
|
||||
rit.next()
|
||||
yield (key, value)
|
||||
rit.close()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
@ -86,7 +86,7 @@ proc to*(node: NodeRef; T: type seq[Blob]): T =
|
||||
|
||||
var wrx = initRlpWriter()
|
||||
wrx.startList(2)
|
||||
wrx.append node.ePfx.toHexPrefix(isleaf = false)
|
||||
wrx.append node.ePfx.toHexPrefix(isleaf = false).data()
|
||||
wrx.append brHash
|
||||
|
||||
result.add wrx.finish()
|
||||
@ -104,7 +104,7 @@ proc to*(node: NodeRef; T: type seq[Blob]): T =
|
||||
|
||||
var wr = initRlpWriter()
|
||||
wr.startList(2)
|
||||
wr.append node.lPfx.toHexPrefix(isleaf = true)
|
||||
wr.append node.lPfx.toHexPrefix(isleaf = true).data()
|
||||
wr.append node.lData.serialise(getKey0).value
|
||||
|
||||
result.add (wr.finish())
|
||||
@ -127,7 +127,7 @@ proc digestTo*(node: NodeRef; T: type HashKey): T =
|
||||
let brHash = wr.finish().digestTo(HashKey)
|
||||
wr = initRlpWriter()
|
||||
wr.startList(2)
|
||||
wr.append node.ePfx.toHexPrefix(isleaf = false)
|
||||
wr.append node.ePfx.toHexPrefix(isleaf = false).data()
|
||||
wr.append brHash
|
||||
|
||||
of Leaf:
|
||||
@ -138,7 +138,7 @@ proc digestTo*(node: NodeRef; T: type HashKey): T =
|
||||
ok(node.key[0]) # always succeeds
|
||||
|
||||
wr.startList(2)
|
||||
wr.append node.lPfx.toHexPrefix(isleaf = true)
|
||||
wr.append node.lPfx.toHexPrefix(isleaf = true).data()
|
||||
wr.append node.lData.serialise(getKey0).value
|
||||
|
||||
wr.finish().digestTo(HashKey)
|
||||
|
@ -65,8 +65,8 @@ suite "Aristo blobify":
|
||||
)
|
||||
|
||||
check:
|
||||
deblobify(blobify(leafRawData)[], VertexRef)[] == leafRawData
|
||||
deblobify(blobify(leafAccount)[], VertexRef)[] == leafAccount
|
||||
deblobify(blobify(leafStoData)[], VertexRef)[] == leafStoData
|
||||
deblobify(blobify(branch)[], VertexRef)[] == branch
|
||||
deblobify(blobify(extension)[], VertexRef)[] == extension
|
||||
deblobify(blobify(leafRawData), VertexRef)[] == leafRawData
|
||||
deblobify(blobify(leafAccount), VertexRef)[] == leafAccount
|
||||
deblobify(blobify(leafStoData), VertexRef)[] == leafStoData
|
||||
deblobify(blobify(branch), VertexRef)[] == branch
|
||||
deblobify(blobify(extension), VertexRef)[] == extension
|
||||
|
@ -115,7 +115,7 @@ func asExtension(b: Blob; path: Hash256): Blob =
|
||||
var wr = initRlpWriter()
|
||||
|
||||
wr.startList(2)
|
||||
wr.append NibblesBuf.fromBytes(@[nibble]).slice(1).toHexPrefix(isleaf=false)
|
||||
wr.append NibblesBuf.fromBytes(@[nibble]).slice(1).toHexPrefix(isleaf=false).data()
|
||||
wr.append node.listElem(nibble.int).toBytes
|
||||
wr.finish()
|
||||
|
||||
|
2
vendor/nim-stew
vendored
2
vendor/nim-stew
vendored
@ -1 +1 @@
|
||||
Subproject commit 54cc67cbb83f61b6e3168b09701758c5b805120a
|
||||
Subproject commit fc09b2e023ab2d73e425f7d15cf94871c7867868
|
Loading…
x
Reference in New Issue
Block a user