Ordered trie (#2712)

Speed up trie computations and remove redundant ways of performing this
operation.

Co-authored-by: jangko <jangko128@gmail.com>
This commit is contained in:
Jacek Sieka 2024-10-09 09:44:15 +02:00 committed by GitHub
parent e59d2825f4
commit 11646ad3c4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 194 additions and 359 deletions

View File

@ -51,6 +51,7 @@ EXCLUDED_NIM_PACKAGES := \
vendor/nimbus-eth2/vendor/nim-presto \
vendor/nimbus-eth2/vendor/nim-zxcvbn \
vendor/nimbus-eth2/vendor/nim-kzg4844 \
vendor/nimbus-eth2/vendor/nim-minilru \
vendor/nimbus-eth2/vendor/nimbus-security-resources \
vendor/nimbus-eth2/vendor/NimYAML

View File

@ -11,7 +11,7 @@ import
results,
chronos,
chronicles,
eth/[trie, trie/db],
eth/trie/ordered_trie,
eth/common/[hashes, headers_rlp, blocks_rlp, receipts_rlp, transactions_rlp],
eth/p2p/discoveryv5/[protocol, enr],
../../common/common_types,
@ -163,27 +163,6 @@ func encode*(receipts: seq[Receipt]): seq[byte] =
# TODO: Failures on validation and perhaps deserialisation should be punished
# for if/when peer scoring/banning is added.
proc calcRootHash(items: Transactions | PortalReceipts | Withdrawals): Hash32 =
var tr = initHexaryTrie(newMemoryDB(), isPruning = false)
for i, item in items:
try:
tr.put(rlp.encode(i.uint), item.asSeq())
except RlpError as e:
# RlpError should not occur
# TODO: trace down why it might raise this
raiseAssert(e.msg)
return tr.rootHash
template calcTxsRoot*(transactions: Transactions): Hash32 =
calcRootHash(transactions)
template calcReceiptsRoot*(receipts: PortalReceipts): Hash32 =
calcRootHash(receipts)
template calcWithdrawalsRoot*(receipts: Withdrawals): Hash32 =
calcRootHash(receipts)
func validateBlockHeader*(header: Header, blockHash: Hash32): Result[void, string] =
if not (header.rlpHash() == blockHash):
err("Block header hash does not match")
@ -217,6 +196,15 @@ func validateBlockHeaderBytes*(
ok(header)
template append*(w: var RlpWriter, v: TransactionByteList) =
w.appendRawBytes(v.asSeq)
template append*(w: var RlpWriter, v: WithdrawalByteList) =
w.appendRawBytes(v.asSeq)
template append*(w: var RlpWriter, v: ReceiptByteList) =
w.appendRawBytes(v.asSeq)
proc validateBlockBody*(
body: PortalBlockBodyLegacy, header: Header
): Result[void, string] =
@ -225,7 +213,7 @@ proc validateBlockBody*(
if calculatedOmmersHash != header.ommersHash:
return err("Invalid ommers hash")
let calculatedTxsRoot = calcTxsRoot(body.transactions)
let calculatedTxsRoot = orderedTrieRoot(body.transactions.asSeq)
if calculatedTxsRoot != header.txRoot:
return err(
"Invalid transactions root: expected " & $header.txRoot & " - got " &
@ -244,7 +232,7 @@ proc validateBlockBody*(
if body.uncles.asSeq() != @[byte 0xc0]:
return err("Invalid ommers hash, uncles list is not empty")
let calculatedTxsRoot = calcTxsRoot(body.transactions)
let calculatedTxsRoot = orderedTrieRoot(body.transactions.asSeq)
if calculatedTxsRoot != header.txRoot:
return err(
"Invalid transactions root: expected " & $header.txRoot & " - got " &
@ -256,7 +244,7 @@ proc validateBlockBody*(
doAssert(header.withdrawalsRoot.isSome())
let
calculatedWithdrawalsRoot = calcWithdrawalsRoot(body.withdrawals)
calculatedWithdrawalsRoot = orderedTrieRoot(body.withdrawals.asSeq)
headerWithdrawalsRoot = header.withdrawalsRoot.get()
if calculatedWithdrawalsRoot != headerWithdrawalsRoot:
return err(
@ -314,7 +302,7 @@ proc validateBlockBodyBytes*(
proc validateReceipts*(
receipts: PortalReceipts, receiptsRoot: Hash32
): Result[void, string] =
if calcReceiptsRoot(receipts) != receiptsRoot:
if orderedTrieRoot(receipts.asSeq) != receiptsRoot:
err("Unexpected receipt root")
else:
ok()

View File

@ -41,7 +41,7 @@ import
chronicles,
chronos,
confutils,
eth/[rlp, trie, trie/db],
eth/[rlp, trie/ordered_trie],
eth/common/keys,
eth/common/[base, headers_rlp, blocks_rlp],
beacon_chain/el/[el_manager, engine_api_conversions],
@ -62,43 +62,24 @@ import
from beacon_chain/gossip_processing/block_processor import newExecutionPayload
from beacon_chain/gossip_processing/eth2_processor import toValidationResult
proc calculateTransactionData(
items: openArray[TypedTransaction]
): Hash32 {.raises: [].} =
var tr = initHexaryTrie(newMemoryDB(), isPruning = false)
for i, t in items:
try:
let tx = distinctBase(t)
tr.put(rlp.encode(uint64 i), tx)
except CatchableError as e:
# tr.put interface can raise exception
raiseAssert(e.msg)
template append(w: var RlpWriter, t: TypedTransaction) =
w.appendRawBytes(distinctBase t)
return tr.rootHash()
# TODO: Since Capella we can also access ExecutionPayloadHeader and thus
# could get the Roots through there instead.
proc calculateWithdrawalsRoot(items: openArray[WithdrawalV1]): Hash32 {.raises: [].} =
var tr = initHexaryTrie(newMemoryDB(), isPruning = false)
for i, w in items:
try:
let withdrawal = blocks.Withdrawal(
index: distinctBase(w.index),
validatorIndex: distinctBase(w.validatorIndex),
address: w.address,
amount: distinctBase(w.amount),
)
tr.put(rlp.encode(uint64 i), rlp.encode(withdrawal))
except CatchableError as e:
raiseAssert(e.msg)
return tr.rootHash()
template append(w: var RlpWriter, t: WithdrawalV1) =
# TODO: Since Capella we can also access ExecutionPayloadHeader and thus
# could get the Roots through there instead.
w.append blocks.Withdrawal(
index: distinctBase(t.index),
validatorIndex: distinctBase(t.validatorIndex),
address: t.address,
amount: distinctBase(t.amount),
)
proc asPortalBlockData*(
payload: ExecutionPayloadV1
): (Hash32, BlockHeaderWithProof, PortalBlockBodyLegacy) =
let
txRoot = calculateTransactionData(payload.transactions)
txRoot = orderedTrieRoot(payload.transactions)
header = Header(
parentHash: payload.parentHash,
@ -139,8 +120,8 @@ proc asPortalBlockData*(
payload: ExecutionPayloadV2 | ExecutionPayloadV3 | ExecutionPayloadV4
): (Hash32, BlockHeaderWithProof, PortalBlockBodyShanghai) =
let
txRoot = calculateTransactionData(payload.transactions)
withdrawalsRoot = Opt.some(calculateWithdrawalsRoot(payload.withdrawals))
txRoot = orderedTrieRoot(payload.transactions)
withdrawalsRoot = Opt.some(orderedTrieRoot(payload.withdrawals))
# TODO: adjust blobGasUsed & excessBlobGas according to deneb fork!
header = Header(

View File

@ -14,9 +14,9 @@
{.push raises: [].}
import
aristo/[aristo_api, aristo_constants, aristo_sign]
aristo/[aristo_api, aristo_constants]
export
aristo_api, aristo_constants, aristo_sign
aristo_api, aristo_constants
import
aristo/aristo_init
@ -48,7 +48,6 @@ export
AristoDbRef,
AristoError,
AristoTxRef,
MerkleSignRef,
isValid
# End

View File

@ -507,12 +507,6 @@ proc pp*(w: Hash32; codeHashOk: bool): string =
proc pp*(n: NibblesBuf): string =
n.ppPathPfx()
proc pp*(w: HashKey; sig: MerkleSignRef): string =
w.ppKey(sig.db)
proc pp*(w: Hash32; sig: MerkleSignRef): string =
w.to(HashKey).ppKey(sig.db)
proc pp*(w: HashKey; db = AristoDbRef(nil)): string =
w.ppKey(db.orDefault)
@ -740,14 +734,6 @@ proc pp*(
elif balancerOk:
result &= indent.toPfx & db.balancer.ppBalancer(db, indent+1)
proc pp*(sdb: MerkleSignRef; indent = 4): string =
result = "" &
"count=" & $sdb.count &
" root=" & sdb.root.pp
if sdb.error != AristoError(0):
result &= " error=" & $sdb.error
result &= "\n db\n " & sdb.db.pp(indent=indent+1)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -46,14 +46,6 @@ type
txUid*: uint ## Unique ID among transactions
level*: int ## Stack index for this transaction
MerkleSignRef* = ref object
## Simple Merkle signature calculatior for key-value lists
root*: VertexID ## Not accounts tree, e.g. `VertexID(2)`
db*: AristoDbRef
count*: uint
error*: AristoError
errKey*: seq[byte]
DudesRef = ref object
## List of peers accessing the same database. This list is layzily allocated
## and might be kept with a single entry, i.e. so that `{centre} == peers`.

View File

@ -1,82 +0,0 @@
# nimbus-eth1
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Aristo DB -- Sign Helper
## ========================
##
{.push raises: [].}
import
eth/common,
results,
"."/[aristo_compute, aristo_desc, aristo_get, aristo_init,
aristo_delete, aristo_merge]
# ------------------------------------------------------------------------------
# Public functions, signature generator
# ------------------------------------------------------------------------------
proc merkleSignBegin*(): MerkleSignRef =
## Start signature calculator for a list of key-value items.
let
db = AristoDbRef.init VoidBackendRef
vid = VertexID(2)
MerkleSignRef(
root: vid,
db: db)
proc merkleSignAdd*(
sdb: MerkleSignRef;
key: openArray[byte];
val: openArray[byte];
) =
## Add key-value item to the signature list. The order of the items to add
## is irrelevant.
if sdb.error == AristoError(0):
sdb.count.inc
discard sdb.db.mergeGenericData(sdb.root, key, val).valueOr:
sdb.`error` = error
sdb.errKey = @key
return
proc merkleSignDelete*(
sdb: MerkleSignRef;
key: openArray[byte];
) =
## Add key-value item to the signature list. The order of the items to add
## is irrelevant.
if sdb.error == AristoError(0):
sdb.count.inc
discard sdb.db.deleteGenericData(sdb.root, key).valueOr:
sdb.`error` = error
sdb.errKey = @key
return
proc merkleSignCommit*(
sdb: MerkleSignRef;
): Result[Hash32,(seq[byte],AristoError)] =
## Finish with the list, calculate signature and return it.
if sdb.count == 0:
return ok EMPTY_ROOTHASH
if sdb.error != AristoError(0):
return err((sdb.errKey, sdb.error))
let sign = sdb.db.computeKey((sdb.root, sdb.root)).valueOr:
if error == GetVtxNotFound:
if not sdb.db.getVtx((sdb.root, sdb.root)).isValid:
return ok EMPTY_ROOTHASH
raiseAssert "merkleSignCommit(): " & $error
ok sign.to(Hash32)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -12,32 +12,24 @@
import
std/[math, times, strutils],
eth/[rlp, common/eth_types_rlp],
eth/[common/eth_types_rlp, trie/ordered_trie],
stew/byteutils,
nimcrypto,
results,
../db/aristo/aristo_sign,
nimcrypto/sha2,
../constants
export eth_types_rlp
proc calcRootHash[T](items: openArray[T]): Hash256 {.gcsafe.} =
let sig = merkleSignBegin()
for i, t in items:
sig.merkleSignAdd(rlp.encode(i.uint), rlp.encode(t))
sig.merkleSignCommit.value
template calcTxRoot*(transactions: openArray[Transaction]): Root =
orderedTrieRoot(transactions)
template calcTxRoot*(transactions: openArray[Transaction]): Hash256 =
calcRootHash(transactions)
template calcWithdrawalsRoot*(withdrawals: openArray[Withdrawal]): Root =
orderedTrieRoot(withdrawals)
template calcWithdrawalsRoot*(withdrawals: openArray[Withdrawal]): Hash256 =
calcRootHash(withdrawals)
template calcReceiptsRoot*(receipts: openArray[Receipt]): Root =
orderedTrieRoot(receipts)
template calcReceiptsRoot*(receipts: openArray[Receipt]): Hash256 =
calcRootHash(receipts)
template calcRequestsRoot*(requests: openArray[Request]): Hash256 =
calcRootHash(requests)
template calcRequestsRoot*(requests: openArray[Request]): Root =
orderedTrieRoot(requests)
func sumHash*(hashes: varargs[Hash256]): Hash256 =
var ctx: sha256

View File

@ -21,7 +21,7 @@ import
./test_aristo/test_blobify,
./test_aristo/test_merge_proof,
./test_aristo/test_portal_proof,
./test_aristo/test_short_keys,
./test_aristo/test_compute,
./test_aristo/[
test_balancer, test_helpers, test_samples_xx, test_tx,
undump_accounts, undump_storages]

View File

@ -0,0 +1,150 @@
# Nimbus
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
{.used.}
import
std/[algorithm, sets],
stew/byteutils,
unittest2,
../../nimbus/db/aristo/[
aristo_check, aristo_compute, aristo_delete, aristo_get, aristo_merge, aristo_desc,
aristo_utils, aristo_serialise, aristo_init,
]
func x(s: string): seq[byte] =
s.hexToSeqByte
func k(s: string): HashKey =
HashKey.fromBytes(s.x).value
let samples = [
# From InvalidBlocks/bc4895-withdrawals/twoIdenticalIndex.json
@[
(
"80".x,
"da808094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x,
hash32"27f166f1d7c789251299535cb176ba34116e44894476a7886fe5d73d9be5c973",
VOID_HASH_KEY,
),
(
"01".x,
"da028094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x,
hash32"81eac5f476f48feb289af40ee764015f6b49036760438ea45df90d5342b6ae61",
VOID_HASH_KEY,
),
(
"02".x,
"da018094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x,
hash32"463769ae507fcc6d6231c8888425191c5622f330fdd4b78a7b24c4521137b573",
VOID_HASH_KEY,
),
(
"03".x,
"da028094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x,
hash32"a95b9a7b58a6b3cb4001eb0be67951c5517141cb0183a255b5cae027a7b10b36",
VOID_HASH_KEY,
),
],
# Somew on-the-fly provided stuff
@[
(
"0000".x,
"0000".x,
hash32"69a4785bd4f5a1590e329138d4248b6f887fa37e41bfc510a55f21b44f98be61",
"c783200000820000".k,
),
(
"0000".x,
"0001".x,
hash32"910fa1155b667666abe7b4b1cb4864c1dc91c57c9528e1c5f5f9f95e003afece",
"c783200000820001".k,
),
(
"0001".x,
"0001".x,
hash32"d082d2bfe8586142d6f40df0245e56365043819e51e2c9799c660558eeea0db5",
"dd821000d9c420820001c420820001808080808080808080808080808080".k,
),
(
"0002".x,
"0000".x,
hash32"d56ea5154fbad18e0ff1eaeafa2310d0879b59adf189c12ff1b2701e54db07b2",
VOID_HASH_KEY,
),
(
"0100".x,
"0100".x,
hash32"d1c0699fe7928a536e0183c6400ae34eb1174ce6b21f9d117b061385034743ad",
VOID_HASH_KEY,
),
(
"0101".x,
"0101".x,
hash32"74ddb98cb56e2dd7e8fa090b9ce740c3de589b72403b20136af75fb6168b1d19",
VOID_HASH_KEY,
),
(
"0200".x,
"0200".x,
hash32"2e777f06ab2de1a460a8412b8691f10fdcb162077ab5cbb1865636668bcb6471",
VOID_HASH_KEY,
),
],
]
suite "Aristo compute":
for n, sample in samples:
test "Add and delete entries " & $n:
let
db = AristoDbRef.init VoidBackendRef
root = VertexID(2)
for inx, (k, v, r, s) in sample:
checkpoint("k = " & k.toHex & ", v = " & v.toHex())
check:
db.mergeGenericData(root, k, v) == Result[bool, AristoError].ok(true)
# Check state against expected value
let w = db.computeKey((root, root)).expect("no errors")
check r == w.to(Hash32)
# Check raw node if given, check nor ref against expected value
if s.isValid:
let z = db.getVtx((root, root)).toNode(root, db).value.digestTo(HashKey)
check s == z
let rc = db.check
check rc == typeof(rc).ok()
# Reverse run deleting entries
var deletedKeys: HashSet[seq[byte]]
for iny, (k, v, r, s) in sample.reversed:
# Check whether key was already deleted
if k in deletedKeys:
continue
deletedKeys.incl k
# Check state against expected value
let w = db.computeKey((root, root)).value.to(Hash32)
check r == w
# Check raw node if given, check nor ref against expected value
if s.isValid:
let z = db.getVtx((root, root)).toNode(root, db).value.digestTo(HashKey)
check s == z
check:
db.deleteGenericData(root, k).isOk
let rc = db.check
check rc == typeof(rc).ok()

View File

@ -1,172 +0,0 @@
# Nimbus
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
{.used.}
import
std/[algorithm, sets],
eth/common,
results,
stew/byteutils,
unittest2,
../../nimbus/db/aristo,
../../nimbus/db/aristo/[
aristo_check, aristo_get, aristo_debug, aristo_desc, aristo_serialise,
aristo_utils],
../replay/xcheck,
./test_helpers
func x(s: string): seq[byte] = s.hexToSeqByte
func k(s: string): HashKey = HashKey.fromBytes(s.x).value
let samples = [
# From InvalidBlocks/bc4895-withdrawals/twoIdenticalIndex.json
@[("80".x,
"da808094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x,
"27f166f1d7c789251299535cb176ba34116e44894476a7886fe5d73d9be5c973".k,
VOID_HASH_KEY),
("01".x,
"da028094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x,
"81eac5f476f48feb289af40ee764015f6b49036760438ea45df90d5342b6ae61".k,
VOID_HASH_KEY),
("02".x,
"da018094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x,
"463769ae507fcc6d6231c8888425191c5622f330fdd4b78a7b24c4521137b573".k,
VOID_HASH_KEY),
("03".x,
"da028094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x,
"a95b9a7b58a6b3cb4001eb0be67951c5517141cb0183a255b5cae027a7b10b36".k,
VOID_HASH_KEY)],
# Somew on-the-fly provided stuff
@[("0000".x, "0000".x,
"69a4785bd4f5a1590e329138d4248b6f887fa37e41bfc510a55f21b44f98be61".k,
"c783200000820000".k),
("0000".x, "0001".x,
"910fa1155b667666abe7b4b1cb4864c1dc91c57c9528e1c5f5f9f95e003afece".k,
"c783200000820001".k),
("0001".x, "0001".x,
"d082d2bfe8586142d6f40df0245e56365043819e51e2c9799c660558eeea0db5".k,
"dd821000d9c420820001c420820001808080808080808080808080808080".k),
("0002".x, "0000".x,
"d56ea5154fbad18e0ff1eaeafa2310d0879b59adf189c12ff1b2701e54db07b2".k,
VOID_HASH_KEY),
("0100".x, "0100".x,
"d1c0699fe7928a536e0183c6400ae34eb1174ce6b21f9d117b061385034743ad".k,
VOID_HASH_KEY),
("0101".x, "0101".x,
"74ddb98cb56e2dd7e8fa090b9ce740c3de589b72403b20136af75fb6168b1d19".k,
VOID_HASH_KEY),
("0200".x, "0200".x,
"2e777f06ab2de1a460a8412b8691f10fdcb162077ab5cbb1865636668bcb6471".k,
VOID_HASH_KEY)]]
let
noisy = true
gossip = false # or noisy
# ------------------------------------------------------------------------------
# Public test function
# ------------------------------------------------------------------------------
suite "Aristo short keys":
test "Add and delete entries":
# Check for some pathological cases
for n,sample in samples:
let
sig = merkleSignBegin()
root = (sig.root,sig.root)
db = sig.db
for inx,(k,v,r,s) in sample:
sig.merkleSignAdd(k,v)
gossip.say "*** testShortkeys (1)", "n=", n, " inx=", inx,
"\n k=", k.toHex, " v=", v.toHex,
"\n r=", r.pp(sig),
"\n ", sig.pp(),
"\n"
# Check state against expected value
let w = sig.merkleSignCommit().value
gossip.say "*** testShortkeys (2)", "n=", n, " inx=", inx,
"\n k=", k.toHex, " v=", v.toHex,
"\n r=", r.pp(sig),
"\n R=", w.pp(sig),
"\n ", sig.pp(),
"\n ----------------",
"\n"
xCheck r == w.to(HashKey):
noisy.say "*** testShortkeys (2.1)", "n=", n, " inx=", inx,
"\n k=", k.toHex, " v=", v.toHex,
"\n r=", r.pp(sig),
"\n R=", w.pp(sig),
"\n ", sig.pp(),
"\n"
# Check raw node if given, check nor ref against expected value
if s.isValid:
let z = db.getVtx(root).toNode(root[0],db).value.digestTo(HashKey)
xCheck s == z:
noisy.say "*** testShortkeys (2.2)", "n=", n, " inx=", inx,
"\n k=", k.toHex, " v=", v.toHex,
"\n r=", r.pp(sig),
"\n R=", w.pp(sig),
"\n ", sig.pp(),
"\n"
let rc = sig.db.check
xCheckRc rc.error == (0,0):
noisy.say "*** testShortkeys (2.3)", "n=", n, " inx=", inx,
"\n k=", k.toHex, " v=", v.toHex, " rc=", rc.error,
"\n r=", r.pp(sig),
"\n R=", w.pp(sig),
"\n ", sig.pp(),
"\n"
# Reverse run deleting entries
var deletedKeys: HashSet[seq[byte]]
for iny,(k,v,r,s) in sample.reversed:
let inx = sample.len - 1 - iny
# Check whether key was already deleted
if k in deletedKeys:
continue
deletedKeys.incl k
# Check state against expected value
let w = sig.merkleSignCommit().value
gossip.say "*** testShortkeys, pre-del (5)", "n=", n, " inx=", inx,
"\n k", k.toHex, " v=", v.toHex,
"\n r=", r.pp(sig),
"\n R=", w.pp(sig),
"\n ", sig.pp(),
"\n"
xCheck r == w.to(HashKey)
# Check raw node if given, check nor ref against expected value
if s.isValid:
let z = db.getVtx(root).toNode(root[0],db).value.digestTo(HashKey)
xCheck s == z
sig.merkleSignDelete(k)
gossip.say "*** testShortkeys, post-del (6)", "n=", n, " inx=", inx,
"\n k", k.toHex, " v=", v.toHex,
"\n ", sig.pp(),
"\n"
let rc = sig.db.check
xCheckRc rc.error == (0,0)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

2
vendor/nimbus-eth2 vendored

@ -1 +1 @@
Subproject commit 8f0d910422b12497852f80c22d1cee48a971d246
Subproject commit b8a424991d4f8590f5e5364b18ffeabef72e1ff4