Bump submodules and related fixes (#1729)

* Bump submodules and related fixes

* Fix some warnings due to chronos and Nim bumps

* Seems macOS and Windows have different behavior
This commit is contained in:
Kim De Mey 2023-10-17 14:19:50 +02:00 committed by GitHub
parent 750a075a6c
commit ca61a7009d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 57 additions and 40 deletions

View File

@ -34,11 +34,11 @@ func decodeRlp*(input: openArray[byte], T: type): Result[T, string] =
func decodeSsz*(input: openArray[byte], T: type): Result[T, string] =
try:
ok(SSZ.decode(input, T))
except SszError as e:
except SerializationError as e:
err(e.msg)
func decodeSszOrRaise*(input: openArray[byte], T: type): T =
try:
SSZ.decode(input, T)
except SszError as e:
except SerializationError as e:
raiseAssert(e.msg)

View File

@ -155,7 +155,7 @@ proc getSszDecoded(kv: KvStoreRef, key: openArray[byte], T: type auto): Opt[T] =
if res.isSome():
try:
Opt.some(SSZ.decode(res.get(), T))
except SszError:
except SerializationError:
raiseAssert("Stored data should always be serialized correctly")
else:
Opt.none(T)

View File

@ -26,7 +26,7 @@ proc readAccumulator*(file: string): Result[FinishedAccumulator, string] =
try:
ok(SSZ.decode(encodedAccumulator, FinishedAccumulator))
except SszError as e:
except SerializationError as e:
err("Failed decoding accumulator: " & e.msg)
proc readEpochAccumulator*(file: string): Result[EpochAccumulator, string] =
@ -34,7 +34,7 @@ proc readEpochAccumulator*(file: string): Result[EpochAccumulator, string] =
try:
ok(SSZ.decode(encodedAccumulator, EpochAccumulator))
except SszError as e:
except SerializationError as e:
err("Decoding epoch accumulator failed: " & e.msg)
proc readEpochAccumulatorCached*(file: string): Result[EpochAccumulatorCached, string] =
@ -42,7 +42,7 @@ proc readEpochAccumulatorCached*(file: string): Result[EpochAccumulatorCached, s
try:
ok(SSZ.decode(encodedAccumulator, EpochAccumulatorCached))
except SszError as e:
except SerializationError as e:
err("Decoding epoch accumulator failed: " & e.msg)
# Reading data in e2s format

View File

@ -85,7 +85,7 @@ func encode*(contentKey: ContentKey): ByteList =
func decode*(contentKey: ByteList): Opt[ContentKey] =
try:
Opt.some(SSZ.decode(contentKey.asSeq(), ContentKey))
except SszError:
except SerializationError:
return Opt.none(ContentKey)
func toContentId*(contentKey: ByteList): ContentId =

View File

@ -289,4 +289,4 @@ proc stop*(n: LightClientNetwork) =
n.portalProtocol.stop()
if not n.processContentLoop.isNil:
n.processContentLoop.cancel()
n.processContentLoop.cancelSoon()

View File

@ -48,7 +48,7 @@ func encode*(contentKey: ContentKey): ByteList =
func decode*(contentKey: ByteList): Option[ContentKey] =
try:
some(SSZ.decode(contentKey.asSeq(), ContentKey))
except SszError:
except SerializationError:
return none[ContentKey]()
func toContentId*(contentKey: ByteList): ContentId =

View File

@ -77,7 +77,7 @@ func encode*(contentKey: ContentKey): ByteList =
func decode*(contentKey: ByteList): Opt[ContentKey] =
try:
Opt.some(SSZ.decode(contentKey.asSeq(), ContentKey))
except SszError:
except SerializationError:
return Opt.none(ContentKey)
func toContentId*(contentKey: ByteList): ContentId =

View File

@ -335,7 +335,7 @@ proc get(db: ContentDB, T: type BlockHeader, contentId: ContentId): Opt[T] =
let headerWithProof =
try:
SSZ.decode(contentFromDB.get(), BlockHeaderWithProof)
except SszError as e:
except SerializationError as e:
raiseAssert(e.msg)
let res = decodeRlp(headerWithProof.header.asSeq(), T)
@ -801,4 +801,4 @@ proc stop*(n: HistoryNetwork) =
n.portalProtocol.stop()
if not n.processContentLoop.isNil:
n.processContentLoop.cancel()
n.processContentLoop.cancelSoon()

View File

@ -72,7 +72,7 @@ func encode*(contentKey: ContentKey): ByteList =
func decode*(contentKey: ByteList): Opt[ContentKey] =
try:
Opt.some(SSZ.decode(contentKey.asSeq(), ContentKey))
except SszError:
except SerializationError:
return Opt.none(ContentKey)
template computeContentId*(digestCtxType: type, body: untyped): ContentId =

View File

@ -107,4 +107,4 @@ proc stop*(n: StateNetwork) =
n.portalProtocol.stop()
if not n.processContentLoop.isNil:
n.processContentLoop.cancel()
n.processContentLoop.cancelSoon()

View File

@ -147,7 +147,7 @@ func decodeMessage*(body: openArray[byte]): Result[Message, string] =
if body.len < 1: # TODO: This check should probably move a layer down
return err("No message data, peer might not support this talk protocol")
ok(SSZ.decode(body, Message))
except SszError as e:
except SerializationError as e:
err("Invalid message encoding: " & e.msg)
template innerMessage[T: SomeMessage](

View File

@ -268,7 +268,7 @@ func handlePing(
# pings from different nodes to clear the LRU.
let customPayloadDecoded =
try: SSZ.decode(ping.customPayload.asSeq(), CustomPayload)
except MalformedSszError, SszSizeMismatchError:
except SerializationError:
# invalid custom payload, send empty back
return @[]
p.radiusCache.put(srcId, customPayloadDecoded.dataRadius)
@ -1052,7 +1052,7 @@ proc contentLookup*(p: PortalProtocol, target: ByteList, targetId: UInt256):
of Content:
# cancel any pending queries as the content has been found
for f in pendingQueries:
f.cancel()
f.cancelSoon()
portal_lookup_content_requests.observe(requestAmount)
return Opt.some(ContentLookupResult.init(
content.content, content.utpTransfer, nodesWithoutContent))
@ -1357,12 +1357,12 @@ proc start*(p: PortalProtocol) =
proc stop*(p: PortalProtocol) =
if not p.revalidateLoop.isNil:
p.revalidateLoop.cancel()
p.revalidateLoop.cancelSoon()
if not p.refreshLoop.isNil:
p.refreshLoop.cancel()
p.refreshLoop.cancelSoon()
for worker in p.offerWorkers:
worker.cancel()
worker.cancelSoon()
p.offerWorkers = @[]
proc resolve*(p: PortalProtocol, id: NodeId): Future[Opt[Node]] {.async.} =

View File

@ -234,6 +234,17 @@ procSuite "History Content Network":
check offerResult.isOk()
# Make sure the content got processed out of content queue
while not historyNode2.historyNetwork.contentQueue.empty():
await sleepAsync(1.milliseconds)
# Note: It seems something changed in chronos, causing different behavior.
# Seems that validateContent called through processContentLoop used to
# run immediatly in case of a "non async shortpath". This is no longer the
# case and causes the content not yet to be validated and thus stored at
# this step. Add an await here so that the store can happen.
await sleepAsync(100.milliseconds)
for i, contentKV in contentKVs:
let id = toContentId(contentKV.contentKey)
if i < len(contentKVs) - 1:
@ -298,6 +309,12 @@ procSuite "History Content Network":
check offerResult.isOk()
# Make sure the content got processed out of content queue
while not historyNode2.historyNetwork.contentQueue.empty():
await sleepAsync(1.milliseconds)
await sleepAsync(100.milliseconds)
for contentKV in contentKVs:
let id = toContentId(contentKV.contentKey)
check historyNode2.containsId(id) == true

View File

@ -92,14 +92,14 @@ import
from stew/objects import checkedEnumAssign
from stew/byteutils import readHexChar
from web3/ethtypes import BlockHash
from web3/ethtypes as web3types import BlockHash
from beacon_chain/gossip_processing/block_processor import newExecutionPayload
from beacon_chain/gossip_processing/eth2_processor import toValidationResult
type Hash256 = etypes.Hash256
template asEthHash(hash: ethtypes.BlockHash): Hash256 =
template asEthHash(hash: web3types.BlockHash): Hash256 =
Hash256(data: distinctBase(hash))
# TODO: Ugh why isn't gasLimit and gasUsed a uint64 in nim-eth / nimbus-eth1 :(
@ -451,8 +451,8 @@ proc run(config: BeaconBridgeConf) {.raises: [CatchableError].} =
withBlck(signedBlock):
when consensusFork >= ConsensusFork.Bellatrix:
if blck.message.is_execution_block:
template payload(): auto = blck.message.body.execution_payload
if forkyBlck.message.is_execution_block:
template payload(): auto = forkyBlck.message.body.execution_payload
# TODO: Get rid of the asEngineExecutionPayload step?
let executionPayload = payload.asEngineExecutionPayload()

View File

@ -209,7 +209,7 @@ proc mptMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbMptFns =
fetchFn: proc(k: openArray[byte]): CoreDbRc[Blob] =
db.mapRlpException("legacy/mpt/get()"):
return ok(mpt.trie.get(k))
discard,
,
deleteFn: proc(k: openArray[byte]): CoreDbRc[void] =
db.mapRlpException("legacy/mpt/del()"):
@ -224,7 +224,7 @@ proc mptMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbMptFns =
containsFn: proc(k: openArray[byte]): CoreDbRc[bool] =
db.mapRlpException("legacy/mpt/put()"):
return ok(mpt.trie.contains(k))
discard,
,
rootVidFn: proc(): CoreDbVidRef =
db.bless(LegacyCoreDbVid(vHash: mpt.trie.rootHash)),
@ -236,13 +236,13 @@ proc mptMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbMptFns =
reraiseRlpException("legacy/mpt/pairs()"):
for k,v in mpt.trie.pairs():
yield (k,v)
discard,
,
replicateIt: iterator: (Blob,Blob) {.gcsafe, raises: [LegacyApiRlpError].} =
reraiseRlpException("legacy/mpt/replicate()"):
for k,v in mpt.trie.replicate():
yield (k,v)
discard)
)
proc accMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbAccFns =
## Hexary trie database handlers
@ -254,7 +254,7 @@ proc accMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbAccFns =
const info = "legacy/mpt/getAccount()"
db.mapRlpException info:
return ok mpt.trie.get(k.keccakHash.data).toCoreDbAccount(db)
return err(db.bless LegacyCoreDbError(error: MptNotFound, ctx: info)),
,
deleteFn: proc(k: EthAddress): CoreDbRc[void] =
db.mapRlpException("legacy/mpt/del()"):
@ -269,7 +269,7 @@ proc accMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbAccFns =
containsFn: proc(k: EthAddress): CoreDbRc[bool] =
db.mapRlpException("legacy/mpt/put()"):
return ok(mpt.trie.contains k.keccakHash.data)
discard,
,
rootVidFn: proc(): CoreDbVidRef =
db.bless(LegacyCoreDbVid(vHash: mpt.trie.rootHash)),
@ -329,7 +329,7 @@ proc baseMethods(
destroyFn: proc(ignore: bool) =
if not closeDb.isNil:
closeDb()
discard,
,
vidHashFn: proc(vid: CoreDbVidRef): Result[Hash256,void] =
ok(vid.lvHash),

View File

@ -109,8 +109,8 @@ proc run(config: VerifiedProxyConf) {.raises: [CatchableError].} =
wallSlot = getBeaconTime().slotOrZero
withBlck(signedBlock):
when consensusFork >= ConsensusFork.Bellatrix:
if blck.message.is_execution_block:
template payload(): auto = blck.message.body.execution_payload
if forkyBlck.message.is_execution_block:
template payload(): auto = forkyBlck.message.body.execution_payload
blockCache.add(asExecutionData(payload.asEngineExecutionPayload()))
else: discard
return

2
vendor/nim-chronos vendored

@ -1 +1 @@
Subproject commit 00614476c68f0553432b4bb505e24d6ad5586ae4
Subproject commit 253bc3cfc079de35f9b96b9934ce702605400a51

@ -1 +1 @@
Subproject commit f64d55f7ff480ed13eca9c97c15450a2c4106078
Subproject commit 85b7ea093cb85ee4f433a617b97571bd709d30df

@ -1 +1 @@
Subproject commit bc46b4c1c1730cc25bf5fb5f3d64bd708a6ad89e
Subproject commit 4bdbc29e54fe54049950e352bb969aab97173b35

@ -1 +1 @@
Subproject commit 3f8946ab2de304a9130aa80874bc2e4025d62303
Subproject commit edf07d4f7e0cb27afd207aa183c23cf448082d1b

2
vendor/nimbus-eth2 vendored

@ -1 +1 @@
Subproject commit 5c88e74c08bf2c15d230b8b05d959809065a2894
Subproject commit 35bf03a3fbb6a55c52911479760c9bbb69e7c2cc