mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-01-29 13:35:38 +00:00
avoid some trivial memory allocations (#2587)
* pre-allocate `blobify` data and remove redundant error handling (cannot fail on correct data) * use threadvar for temporary storage when decoding rdb, avoiding closure env * speed up database walkers by avoiding many temporaries ~5% perf improvement on block import, 100x on database iteration (useful for building analysis tooling)
This commit is contained in:
parent
a25ea63dec
commit
ef1bab0802
@ -203,12 +203,12 @@ proc blobifyTo*(vtx: VertexRef; data: var Blob): Result[void,AristoError] =
|
|||||||
if vtx.ePfx.len > 0:
|
if vtx.ePfx.len > 0:
|
||||||
vtx.ePfx.toHexPrefix(isleaf = false)
|
vtx.ePfx.toHexPrefix(isleaf = false)
|
||||||
else:
|
else:
|
||||||
@[]
|
default(HexPrefixBuf)
|
||||||
psLen = pSegm.len.byte
|
psLen = pSegm.len.byte
|
||||||
if 33 < psLen:
|
if 33 < psLen:
|
||||||
return err(BlobifyExtPathOverflow)
|
return err(BlobifyExtPathOverflow)
|
||||||
|
|
||||||
data &= pSegm
|
data &= pSegm.data()
|
||||||
data &= lens.toBytesBE
|
data &= lens.toBytesBE
|
||||||
data &= [0x80u8 or psLen]
|
data &= [0x80u8 or psLen]
|
||||||
|
|
||||||
@ -219,16 +219,16 @@ proc blobifyTo*(vtx: VertexRef; data: var Blob): Result[void,AristoError] =
|
|||||||
if psLen == 0 or 33 < psLen:
|
if psLen == 0 or 33 < psLen:
|
||||||
return err(BlobifyLeafPathOverflow)
|
return err(BlobifyLeafPathOverflow)
|
||||||
vtx.lData.blobifyTo(data)
|
vtx.lData.blobifyTo(data)
|
||||||
data &= pSegm
|
data &= pSegm.data()
|
||||||
data &= [0xC0u8 or psLen]
|
data &= [0xC0u8 or psLen]
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
proc blobify*(vtx: VertexRef): Result[Blob, AristoError] =
|
proc blobify*(vtx: VertexRef): Blob =
|
||||||
## Variant of `blobify()`
|
## Variant of `blobify()`
|
||||||
var data: Blob
|
result = newSeqOfCap[byte](128)
|
||||||
? vtx.blobifyTo data
|
if vtx.blobifyTo(result).isErr:
|
||||||
ok(move(data))
|
result.setLen(0) # blobify only fails on invalid verticies
|
||||||
|
|
||||||
proc blobifyTo*(lSst: SavedState; data: var Blob): Result[void,AristoError] =
|
proc blobifyTo*(lSst: SavedState; data: var Blob): Result[void,AristoError] =
|
||||||
## Serialise a last saved state record
|
## Serialise a last saved state record
|
||||||
@ -246,45 +246,48 @@ proc blobify*(lSst: SavedState): Result[Blob,AristoError] =
|
|||||||
# -------------
|
# -------------
|
||||||
proc deblobify(
|
proc deblobify(
|
||||||
data: openArray[byte];
|
data: openArray[byte];
|
||||||
T: type LeafPayload;
|
pyl: var LeafPayload;
|
||||||
): Result[LeafPayload,AristoError] =
|
): Result[void,AristoError] =
|
||||||
if data.len == 0:
|
if data.len == 0:
|
||||||
return ok LeafPayload(pType: RawData)
|
pyl = LeafPayload(pType: RawData)
|
||||||
|
return ok()
|
||||||
|
|
||||||
let mask = data[^1]
|
let mask = data[^1]
|
||||||
if (mask and 0x10) > 0: # unstructured payload
|
if (mask and 0x10) > 0: # unstructured payload
|
||||||
return ok LeafPayload(pType: RawData, rawBlob: data[0 .. ^2])
|
pyl = LeafPayload(pType: RawData, rawBlob: data[0 .. ^2])
|
||||||
|
return ok()
|
||||||
|
|
||||||
if (mask and 0x20) > 0: # Slot storage data
|
if (mask and 0x20) > 0: # Slot storage data
|
||||||
return ok LeafPayload(
|
pyl = LeafPayload(
|
||||||
pType: StoData,
|
pType: StoData,
|
||||||
stoData: ?deblobify(data.toOpenArray(0, data.len - 2), UInt256))
|
stoData: ?deblobify(data.toOpenArray(0, data.len - 2), UInt256))
|
||||||
|
return ok()
|
||||||
|
|
||||||
|
pyl = LeafPayload(pType: AccountData)
|
||||||
var
|
var
|
||||||
pAcc = LeafPayload(pType: AccountData)
|
|
||||||
start = 0
|
start = 0
|
||||||
lens = uint16.fromBytesBE(data.toOpenArray(data.len - 3, data.len - 2))
|
lens = uint16.fromBytesBE(data.toOpenArray(data.len - 3, data.len - 2))
|
||||||
|
|
||||||
if (mask and 0x01) > 0:
|
if (mask and 0x01) > 0:
|
||||||
let len = lens and 0b111
|
let len = lens and 0b111
|
||||||
pAcc.account.nonce = ? load64(data, start, int(len + 1))
|
pyl.account.nonce = ? load64(data, start, int(len + 1))
|
||||||
|
|
||||||
if (mask and 0x02) > 0:
|
if (mask and 0x02) > 0:
|
||||||
let len = (lens shr 3) and 0b11111
|
let len = (lens shr 3) and 0b11111
|
||||||
pAcc.account.balance = ? load256(data, start, int(len + 1))
|
pyl.account.balance = ? load256(data, start, int(len + 1))
|
||||||
|
|
||||||
if (mask and 0x04) > 0:
|
if (mask and 0x04) > 0:
|
||||||
let len = (lens shr 8) and 0b111
|
let len = (lens shr 8) and 0b111
|
||||||
pAcc.stoID = (true, VertexID(? load64(data, start, int(len + 1))))
|
pyl.stoID = (true, VertexID(? load64(data, start, int(len + 1))))
|
||||||
|
|
||||||
if (mask and 0x08) > 0:
|
if (mask and 0x08) > 0:
|
||||||
if data.len() < start + 32:
|
if data.len() < start + 32:
|
||||||
return err(DeblobCodeLenUnsupported)
|
return err(DeblobCodeLenUnsupported)
|
||||||
discard pAcc.account.codeHash.data.copyFrom(data.toOpenArray(start, start + 31))
|
discard pyl.account.codeHash.data.copyFrom(data.toOpenArray(start, start + 31))
|
||||||
else:
|
else:
|
||||||
pAcc.account.codeHash = EMPTY_CODE_HASH
|
pyl.account.codeHash = EMPTY_CODE_HASH
|
||||||
|
|
||||||
ok(pAcc)
|
ok()
|
||||||
|
|
||||||
proc deblobify*(
|
proc deblobify*(
|
||||||
record: openArray[byte];
|
record: openArray[byte];
|
||||||
@ -336,11 +339,12 @@ proc deblobify*(
|
|||||||
NibblesBuf.fromHexPrefix record.toOpenArray(pLen, rLen-1)
|
NibblesBuf.fromHexPrefix record.toOpenArray(pLen, rLen-1)
|
||||||
if not isLeaf:
|
if not isLeaf:
|
||||||
return err(DeblobLeafGotExtPrefix)
|
return err(DeblobLeafGotExtPrefix)
|
||||||
let pyl = ? record.toOpenArray(0, pLen - 1).deblobify(LeafPayload)
|
let vtx = VertexRef(
|
||||||
VertexRef(
|
|
||||||
vType: Leaf,
|
vType: Leaf,
|
||||||
lPfx: pathSegment,
|
lPfx: pathSegment)
|
||||||
lData: pyl)
|
|
||||||
|
? record.toOpenArray(0, pLen - 1).deblobify(vtx.lData)
|
||||||
|
vtx
|
||||||
|
|
||||||
else:
|
else:
|
||||||
return err(DeblobUnknown)
|
return err(DeblobUnknown)
|
||||||
|
@ -68,7 +68,7 @@ proc computeKeyImpl(
|
|||||||
case vtx.vType:
|
case vtx.vType:
|
||||||
of Leaf:
|
of Leaf:
|
||||||
writer.startList(2)
|
writer.startList(2)
|
||||||
writer.append(vtx.lPfx.toHexPrefix(isLeaf = true))
|
writer.append(vtx.lPfx.toHexPrefix(isLeaf = true).data())
|
||||||
|
|
||||||
case vtx.lData.pType
|
case vtx.lData.pType
|
||||||
of AccountData:
|
of AccountData:
|
||||||
@ -111,7 +111,7 @@ proc computeKeyImpl(
|
|||||||
writeBranch(bwriter)
|
writeBranch(bwriter)
|
||||||
|
|
||||||
writer.startList(2)
|
writer.startList(2)
|
||||||
writer.append(vtx.ePfx.toHexPrefix(isleaf = false))
|
writer.append(vtx.ePfx.toHexPrefix(isleaf = false).data())
|
||||||
writer.append(bwriter.finish().digestTo(HashKey))
|
writer.append(bwriter.finish().digestTo(HashKey))
|
||||||
else:
|
else:
|
||||||
writeBranch(writer)
|
writeBranch(writer)
|
||||||
|
@ -8,15 +8,20 @@
|
|||||||
# at your option. This file may not be copied, modified, or distributed
|
# at your option. This file may not be copied, modified, or distributed
|
||||||
# except according to those terms.
|
# except according to those terms.
|
||||||
|
|
||||||
import stew/arrayops
|
import stew/[arraybuf, arrayops]
|
||||||
|
|
||||||
type NibblesBuf* = object
|
export arraybuf
|
||||||
## Allocation-free type for storing up to 64 4-bit nibbles, as seen in the
|
|
||||||
## Ethereum MPT
|
type
|
||||||
bytes: array[32, byte]
|
NibblesBuf* = object
|
||||||
ibegin, iend: int8
|
## Allocation-free type for storing up to 64 4-bit nibbles, as seen in the
|
||||||
# Where valid nibbles can be found - we use indices here to avoid copies
|
## Ethereum MPT
|
||||||
# wen slicing - iend not inclusive
|
bytes: array[32, byte]
|
||||||
|
ibegin, iend: int8
|
||||||
|
# Where valid nibbles can be found - we use indices here to avoid copies
|
||||||
|
# wen slicing - iend not inclusive
|
||||||
|
|
||||||
|
HexPrefixBuf* = ArrayBuf[33, byte]
|
||||||
|
|
||||||
func high*(T: type NibblesBuf): int =
|
func high*(T: type NibblesBuf): int =
|
||||||
63
|
63
|
||||||
@ -61,7 +66,7 @@ func `$`*(r: NibblesBuf): string =
|
|||||||
const chars = "0123456789abcdef"
|
const chars = "0123456789abcdef"
|
||||||
result.add chars[r[i]]
|
result.add chars[r[i]]
|
||||||
|
|
||||||
func slice*(r: NibblesBuf, ibegin: int, iend = -1): NibblesBuf =
|
func slice*(r: NibblesBuf, ibegin: int, iend = -1): NibblesBuf {.noinit.} =
|
||||||
result.bytes = r.bytes
|
result.bytes = r.bytes
|
||||||
result.ibegin = r.ibegin + ibegin.int8
|
result.ibegin = r.ibegin + ibegin.int8
|
||||||
let e =
|
let e =
|
||||||
@ -75,7 +80,7 @@ func slice*(r: NibblesBuf, ibegin: int, iend = -1): NibblesBuf =
|
|||||||
template writeFirstByte(nibbleCountExpr) {.dirty.} =
|
template writeFirstByte(nibbleCountExpr) {.dirty.} =
|
||||||
let nibbleCount = nibbleCountExpr
|
let nibbleCount = nibbleCountExpr
|
||||||
var oddnessFlag = (nibbleCount and 1) != 0
|
var oddnessFlag = (nibbleCount and 1) != 0
|
||||||
newSeq(result, (nibbleCount div 2) + 1)
|
result.setLen((nibbleCount div 2) + 1)
|
||||||
result[0] = byte((int(isLeaf) * 2 + int(oddnessFlag)) shl 4)
|
result[0] = byte((int(isLeaf) * 2 + int(oddnessFlag)) shl 4)
|
||||||
var writeHead = 0
|
var writeHead = 0
|
||||||
|
|
||||||
@ -89,11 +94,11 @@ template writeNibbles(r) {.dirty.} =
|
|||||||
result[writeHead] = nextNibble shl 4
|
result[writeHead] = nextNibble shl 4
|
||||||
oddnessFlag = not oddnessFlag
|
oddnessFlag = not oddnessFlag
|
||||||
|
|
||||||
func toHexPrefix*(r: NibblesBuf, isLeaf = false): seq[byte] =
|
func toHexPrefix*(r: NibblesBuf, isLeaf = false): HexPrefixBuf =
|
||||||
writeFirstByte(r.len)
|
writeFirstByte(r.len)
|
||||||
writeNibbles(r)
|
writeNibbles(r)
|
||||||
|
|
||||||
func toHexPrefix*(r1, r2: NibblesBuf, isLeaf = false): seq[byte] =
|
func toHexPrefix*(r1, r2: NibblesBuf, isLeaf = false): HexPrefixBuf =
|
||||||
writeFirstByte(r1.len + r2.len)
|
writeFirstByte(r1.len + r2.len)
|
||||||
writeNibbles(r1)
|
writeNibbles(r1)
|
||||||
writeNibbles(r2)
|
writeNibbles(r2)
|
||||||
@ -131,7 +136,7 @@ func fromHexPrefix*(
|
|||||||
else:
|
else:
|
||||||
result.isLeaf = false
|
result.isLeaf = false
|
||||||
|
|
||||||
func `&`*(a, b: NibblesBuf): NibblesBuf =
|
func `&`*(a, b: NibblesBuf): NibblesBuf {.noinit.} =
|
||||||
for i in 0 ..< a.len:
|
for i in 0 ..< a.len:
|
||||||
result[i] = a[i]
|
result[i] = a[i]
|
||||||
|
|
||||||
|
@ -133,14 +133,7 @@ proc putVtxFn(db: MemBackendRef): PutVtxFn =
|
|||||||
let hdl = hdl.getSession db
|
let hdl = hdl.getSession db
|
||||||
if hdl.error.isNil:
|
if hdl.error.isNil:
|
||||||
if vtx.isValid:
|
if vtx.isValid:
|
||||||
let rc = vtx.blobify()
|
hdl.sTab[rvid] = vtx.blobify()
|
||||||
if rc.isErr:
|
|
||||||
hdl.error = TypedPutHdlErrRef(
|
|
||||||
pfx: VtxPfx,
|
|
||||||
vid: rvid.vid,
|
|
||||||
code: rc.error)
|
|
||||||
return
|
|
||||||
hdl.sTab[rvid] = rc.value
|
|
||||||
else:
|
else:
|
||||||
hdl.sTab[rvid] = EmptyBlob
|
hdl.sTab[rvid] = EmptyBlob
|
||||||
|
|
||||||
|
@ -312,10 +312,8 @@ iterator walkVtx*(
|
|||||||
be: RdbBackendRef;
|
be: RdbBackendRef;
|
||||||
): tuple[evid: RootedVertexID, vtx: VertexRef] =
|
): tuple[evid: RootedVertexID, vtx: VertexRef] =
|
||||||
## Variant of `walk()` iteration over the vertex sub-table.
|
## Variant of `walk()` iteration over the vertex sub-table.
|
||||||
for (rvid, data) in be.rdb.walkVtx:
|
for (rvid, vtx) in be.rdb.walkVtx:
|
||||||
let rc = data.deblobify VertexRef
|
yield (rvid, vtx)
|
||||||
if rc.isOk:
|
|
||||||
yield (rvid, rc.value)
|
|
||||||
|
|
||||||
iterator walkKey*(
|
iterator walkKey*(
|
||||||
be: RdbBackendRef;
|
be: RdbBackendRef;
|
||||||
|
@ -63,10 +63,10 @@ proc getKey*(
|
|||||||
return ok(move(rc.value))
|
return ok(move(rc.value))
|
||||||
|
|
||||||
# Otherwise fetch from backend database
|
# Otherwise fetch from backend database
|
||||||
var res: Result[HashKey,(AristoError,string)]
|
# A threadvar is used to avoid allocating an environment for onData
|
||||||
|
var res{.threadvar.}: Opt[HashKey]
|
||||||
let onData = proc(data: openArray[byte]) =
|
let onData = proc(data: openArray[byte]) =
|
||||||
res = HashKey.fromBytes(data).mapErr(proc(): auto =
|
res = HashKey.fromBytes(data)
|
||||||
(RdbHashKeyExpected,""))
|
|
||||||
|
|
||||||
let gotData = rdb.keyCol.get(rvid.blobify().data(), onData).valueOr:
|
let gotData = rdb.keyCol.get(rvid.blobify().data(), onData).valueOr:
|
||||||
const errSym = RdbBeDriverGetKeyError
|
const errSym = RdbBeDriverGetKeyError
|
||||||
@ -76,9 +76,9 @@ proc getKey*(
|
|||||||
|
|
||||||
# Correct result if needed
|
# Correct result if needed
|
||||||
if not gotData:
|
if not gotData:
|
||||||
res = ok(VOID_HASH_KEY)
|
res.ok(VOID_HASH_KEY)
|
||||||
elif res.isErr():
|
elif res.isErr():
|
||||||
return res # Parsing failed
|
return err((RdbHashKeyExpected,"")) # Parsing failed
|
||||||
|
|
||||||
# Update cache and return
|
# Update cache and return
|
||||||
ok rdb.rdKeyLru.lruAppend(rvid.vid, res.value(), RdKeyLruMaxSize)
|
ok rdb.rdKeyLru.lruAppend(rvid.vid, res.value(), RdKeyLruMaxSize)
|
||||||
@ -93,10 +93,10 @@ proc getVtx*(
|
|||||||
return ok(move(rc.value))
|
return ok(move(rc.value))
|
||||||
|
|
||||||
# Otherwise fetch from backend database
|
# Otherwise fetch from backend database
|
||||||
var res: Result[VertexRef,(AristoError,string)]
|
# A threadvar is used to avoid allocating an environment for onData
|
||||||
|
var res {.threadvar.}: Result[VertexRef,AristoError]
|
||||||
let onData = proc(data: openArray[byte]) =
|
let onData = proc(data: openArray[byte]) =
|
||||||
res = data.deblobify(VertexRef).mapErr(proc(error: AristoError): auto =
|
res = data.deblobify(VertexRef)
|
||||||
(error,""))
|
|
||||||
|
|
||||||
let gotData = rdb.vtxCol.get(rvid.blobify().data(), onData).valueOr:
|
let gotData = rdb.vtxCol.get(rvid.blobify().data(), onData).valueOr:
|
||||||
const errSym = RdbBeDriverGetVtxError
|
const errSym = RdbBeDriverGetVtxError
|
||||||
@ -105,9 +105,9 @@ proc getVtx*(
|
|||||||
return err((errSym,error))
|
return err((errSym,error))
|
||||||
|
|
||||||
if not gotData:
|
if not gotData:
|
||||||
res = ok(VertexRef(nil))
|
res.ok(VertexRef(nil))
|
||||||
elif res.isErr():
|
elif res.isErr():
|
||||||
return res # Parsing failed
|
return err((res.error(), "Parsing failed")) # Parsing failed
|
||||||
|
|
||||||
# Update cache and return
|
# Update cache and return
|
||||||
ok rdb.rdVtxLru.lruAppend(rvid.vid, res.value(), RdVtxLruMaxSize)
|
ok rdb.rdVtxLru.lruAppend(rvid.vid, res.value(), RdVtxLruMaxSize)
|
||||||
|
@ -122,12 +122,7 @@ proc putVtx*(
|
|||||||
): Result[void,(VertexID,AristoError,string)] =
|
): Result[void,(VertexID,AristoError,string)] =
|
||||||
let dsc = rdb.session
|
let dsc = rdb.session
|
||||||
if vtx.isValid:
|
if vtx.isValid:
|
||||||
let rc = vtx.blobify()
|
dsc.put(rvid.blobify().data(), vtx.blobify(), rdb.vtxCol.handle()).isOkOr:
|
||||||
if rc.isErr:
|
|
||||||
# Caller must `rollback()` which will flush the `rdVtxLru` cache
|
|
||||||
return err((rvid.vid,rc.error,""))
|
|
||||||
|
|
||||||
dsc.put(rvid.blobify().data(), rc.value, rdb.vtxCol.handle()).isOkOr:
|
|
||||||
# Caller must `rollback()` which will flush the `rdVtxLru` cache
|
# Caller must `rollback()` which will flush the `rdVtxLru` cache
|
||||||
const errSym = RdbBeDriverPutVtxError
|
const errSym = RdbBeDriverPutVtxError
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
|
@ -72,8 +72,8 @@ iterator walkKey*(rdb: RdbInst): tuple[rvid: RootedVertexID, data: Blob] =
|
|||||||
yield (rvid, val)
|
yield (rvid, val)
|
||||||
|
|
||||||
|
|
||||||
iterator walkVtx*(rdb: RdbInst): tuple[rvid: RootedVertexID, data: Blob] =
|
iterator walkVtx*(rdb: RdbInst): tuple[rvid: RootedVertexID, data: VertexRef] =
|
||||||
## Walk over key-value pairs of the hash key column of the database.
|
## Walk over key-value pairs of the vertex column of the database.
|
||||||
##
|
##
|
||||||
## Non-decodable entries are are ignored.
|
## Non-decodable entries are are ignored.
|
||||||
##
|
##
|
||||||
@ -84,12 +84,32 @@ iterator walkVtx*(rdb: RdbInst): tuple[rvid: RootedVertexID, data: Blob] =
|
|||||||
break walkBody
|
break walkBody
|
||||||
defer: rit.close()
|
defer: rit.close()
|
||||||
|
|
||||||
for (key,val) in rit.pairs:
|
rit.seekToFirst()
|
||||||
if val.len != 0:
|
var key: RootedVertexID
|
||||||
let rvid = key.deblobify(RootedVertexID).valueOr:
|
var value: VertexRef
|
||||||
continue
|
while rit.isValid():
|
||||||
|
var valid = true
|
||||||
|
rit.key(
|
||||||
|
proc(data: openArray[byte]) =
|
||||||
|
key = deblobify(data, RootedVertexID).valueOr:
|
||||||
|
valid = false
|
||||||
|
default(RootedVertexID)
|
||||||
|
)
|
||||||
|
if not valid:
|
||||||
|
continue
|
||||||
|
|
||||||
yield (rvid, val)
|
rit.value(
|
||||||
|
proc(data: openArray[byte]) =
|
||||||
|
value = deblobify(data, VertexRef).valueOr:
|
||||||
|
valid = false
|
||||||
|
default(VertexRef)
|
||||||
|
)
|
||||||
|
if not valid:
|
||||||
|
continue
|
||||||
|
|
||||||
|
rit.next()
|
||||||
|
yield (key, value)
|
||||||
|
rit.close()
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
@ -86,7 +86,7 @@ proc to*(node: NodeRef; T: type seq[Blob]): T =
|
|||||||
|
|
||||||
var wrx = initRlpWriter()
|
var wrx = initRlpWriter()
|
||||||
wrx.startList(2)
|
wrx.startList(2)
|
||||||
wrx.append node.ePfx.toHexPrefix(isleaf = false)
|
wrx.append node.ePfx.toHexPrefix(isleaf = false).data()
|
||||||
wrx.append brHash
|
wrx.append brHash
|
||||||
|
|
||||||
result.add wrx.finish()
|
result.add wrx.finish()
|
||||||
@ -104,7 +104,7 @@ proc to*(node: NodeRef; T: type seq[Blob]): T =
|
|||||||
|
|
||||||
var wr = initRlpWriter()
|
var wr = initRlpWriter()
|
||||||
wr.startList(2)
|
wr.startList(2)
|
||||||
wr.append node.lPfx.toHexPrefix(isleaf = true)
|
wr.append node.lPfx.toHexPrefix(isleaf = true).data()
|
||||||
wr.append node.lData.serialise(getKey0).value
|
wr.append node.lData.serialise(getKey0).value
|
||||||
|
|
||||||
result.add (wr.finish())
|
result.add (wr.finish())
|
||||||
@ -127,7 +127,7 @@ proc digestTo*(node: NodeRef; T: type HashKey): T =
|
|||||||
let brHash = wr.finish().digestTo(HashKey)
|
let brHash = wr.finish().digestTo(HashKey)
|
||||||
wr = initRlpWriter()
|
wr = initRlpWriter()
|
||||||
wr.startList(2)
|
wr.startList(2)
|
||||||
wr.append node.ePfx.toHexPrefix(isleaf = false)
|
wr.append node.ePfx.toHexPrefix(isleaf = false).data()
|
||||||
wr.append brHash
|
wr.append brHash
|
||||||
|
|
||||||
of Leaf:
|
of Leaf:
|
||||||
@ -138,7 +138,7 @@ proc digestTo*(node: NodeRef; T: type HashKey): T =
|
|||||||
ok(node.key[0]) # always succeeds
|
ok(node.key[0]) # always succeeds
|
||||||
|
|
||||||
wr.startList(2)
|
wr.startList(2)
|
||||||
wr.append node.lPfx.toHexPrefix(isleaf = true)
|
wr.append node.lPfx.toHexPrefix(isleaf = true).data()
|
||||||
wr.append node.lData.serialise(getKey0).value
|
wr.append node.lData.serialise(getKey0).value
|
||||||
|
|
||||||
wr.finish().digestTo(HashKey)
|
wr.finish().digestTo(HashKey)
|
||||||
|
@ -65,8 +65,8 @@ suite "Aristo blobify":
|
|||||||
)
|
)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
deblobify(blobify(leafRawData)[], VertexRef)[] == leafRawData
|
deblobify(blobify(leafRawData), VertexRef)[] == leafRawData
|
||||||
deblobify(blobify(leafAccount)[], VertexRef)[] == leafAccount
|
deblobify(blobify(leafAccount), VertexRef)[] == leafAccount
|
||||||
deblobify(blobify(leafStoData)[], VertexRef)[] == leafStoData
|
deblobify(blobify(leafStoData), VertexRef)[] == leafStoData
|
||||||
deblobify(blobify(branch)[], VertexRef)[] == branch
|
deblobify(blobify(branch), VertexRef)[] == branch
|
||||||
deblobify(blobify(extension)[], VertexRef)[] == extension
|
deblobify(blobify(extension), VertexRef)[] == extension
|
||||||
|
@ -115,7 +115,7 @@ func asExtension(b: Blob; path: Hash256): Blob =
|
|||||||
var wr = initRlpWriter()
|
var wr = initRlpWriter()
|
||||||
|
|
||||||
wr.startList(2)
|
wr.startList(2)
|
||||||
wr.append NibblesBuf.fromBytes(@[nibble]).slice(1).toHexPrefix(isleaf=false)
|
wr.append NibblesBuf.fromBytes(@[nibble]).slice(1).toHexPrefix(isleaf=false).data()
|
||||||
wr.append node.listElem(nibble.int).toBytes
|
wr.append node.listElem(nibble.int).toBytes
|
||||||
wr.finish()
|
wr.finish()
|
||||||
|
|
||||||
|
2
vendor/nim-stew
vendored
2
vendor/nim-stew
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 54cc67cbb83f61b6e3168b09701758c5b805120a
|
Subproject commit fc09b2e023ab2d73e425f7d15cf94871c7867868
|
Loading…
x
Reference in New Issue
Block a user