Bump submodules and fix related issues (#1448)

* Updated to the latest nim-eth, nim-rocksdb, nim-web3

* Bump nimbus-eth2 module and fix related issues

Temporarily disabling Portal beacon light client network as it is
a lot of copy pasted code that did not yet take into account
forks. This will require a bigger rework and was not yet tested
in an actual network anyhow.

* More nimbus fixes after module bumps

---------

Co-authored-by: Adam Spitz <adamspitz@status.im>
Co-authored-by: jangko <jangko128@gmail.com>
This commit is contained in:
Kim De Mey 2023-01-27 15:57:48 +01:00 committed by GitHub
parent a669b51ec5
commit 8523d8b40e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 133 additions and 120 deletions

View File

@ -172,7 +172,8 @@ proc contains(db: ContentDB, key: openArray[byte]): bool =
db.kv.contains(key).expectDb()
proc del(db: ContentDB, key: openArray[byte]) =
db.kv.del(key).expectDb()
# TODO: Do we want to return the bool here too?
discard db.kv.del(key).expectDb()
proc getSszDecoded*(
db: ContentDB, key: openArray[byte], T: type auto): Opt[T] =

View File

@ -14,22 +14,22 @@ import
json_rpc/rpcproxy, stew/[byteutils, io2],
eth/keys, eth/net/nat,
eth/p2p/discoveryv5/protocol as discv5_protocol,
beacon_chain/beacon_clock,
beacon_chain/spec/forks,
beacon_chain/spec/datatypes/altair,
beacon_chain/gossip_processing/light_client_processor,
# beacon_chain/beacon_clock,
# beacon_chain/spec/forks,
# beacon_chain/spec/datatypes/altair,
# beacon_chain/gossip_processing/light_client_processor,
./conf, ./network_metadata, ./common/common_utils,
./rpc/[rpc_eth_api, bridge_client, rpc_discovery_api, rpc_portal_api,
rpc_portal_debug_api],
./network/state/[state_network, state_content],
./network/history/[history_network, history_content],
./network/beacon_light_client/[
light_client_init_loader,
light_client_content,
beacon_light_client,
light_client_db,
light_client_network
],
# ./network/beacon_light_client/[
# light_client_init_loader,
# light_client_content,
# beacon_light_client,
# light_client_db,
# light_client_network
# ],
./network/wire/[portal_stream, portal_protocol_config],
./eth_data/history_data_ssz_e2s,
./content_db
@ -177,74 +177,74 @@ proc run(config: PortalConf) {.raises: [CatchableError, Defect].} =
# TODO: Currently disabled by default as it is not stable/polished enough,
# ultimatetely this should probably be always on.
if config.trustedBlockRoot.isSome():
# fluffy light client works only over mainnet data
let
networkData = loadNetworkData("mainnet")
# if config.trustedBlockRoot.isSome():
# # fluffy light client works only over mainnet data
# let
# networkData = loadNetworkData("mainnet")
db = LightClientDb.new(config.dataDir / "lightClientDb")
# db = LightClientDb.new(config.dataDir / "lightClientDb")
lightClientNetwork = LightClientNetwork.new(
d,
db,
streamManager,
networkData.forks,
bootstrapRecords = bootstrapRecords)
# lightClientNetwork = LightClientNetwork.new(
# d,
# db,
# streamManager,
# networkData.forks,
# bootstrapRecords = bootstrapRecords)
getBeaconTime = networkData.clock.getBeaconTimeFn()
# getBeaconTime = networkData.clock.getBeaconTimeFn()
refDigests = newClone networkData.forks
# refDigests = newClone networkData.forks
lc = LightClient.new(
lightClientNetwork,
rng,
networkData.metadata.cfg,
refDigests,
getBeaconTime,
networkData.genesis_validators_root,
LightClientFinalizationMode.Optimistic
)
# lc = LightClient.new(
# lightClientNetwork,
# rng,
# networkData.metadata.cfg,
# refDigests,
# getBeaconTime,
# networkData.genesis_validators_root,
# LightClientFinalizationMode.Optimistic
# )
# TODO: For now just log headers. Ultimately we should also use callbacks for each
# lc object to save them to db and offer them to the network.
proc onFinalizedHeader(
lightClient: LightClient, finalizedHeader: BeaconBlockHeader) =
info "New LC finalized header",
finalized_header = shortLog(finalizedHeader)
# # TODO: For now just log headers. Ultimately we should also use callbacks for each
# # lc object to save them to db and offer them to the network.
# proc onFinalizedHeader(
# lightClient: LightClient, finalizedHeader: BeaconBlockHeader) =
# info "New LC finalized header",
# finalized_header = shortLog(finalizedHeader)
proc onOptimisticHeader(
lightClient: LightClient, optimisticHeader: BeaconBlockHeader) =
info "New LC optimistic header",
optimistic_header = shortLog(optimisticHeader)
# proc onOptimisticHeader(
# lightClient: LightClient, optimisticHeader: BeaconBlockHeader) =
# info "New LC optimistic header",
# optimistic_header = shortLog(optimisticHeader)
lc.onFinalizedHeader = onFinalizedHeader
lc.onOptimisticHeader = onOptimisticHeader
lc.trustedBlockRoot = config.trustedBlockRoot
# lc.onFinalizedHeader = onFinalizedHeader
# lc.onOptimisticHeader = onOptimisticHeader
# lc.trustedBlockRoot = config.trustedBlockRoot
proc onSecond(time: Moment) =
let wallSlot = getBeaconTime().slotOrZero()
# TODO this is a place to enable/disable gossip based on the current status
# of light client
# lc.updateGossipStatus(wallSlot + 1)
# proc onSecond(time: Moment) =
# let wallSlot = getBeaconTime().slotOrZero()
# # TODO this is a place to enable/disable gossip based on the current status
# # of light client
# # lc.updateGossipStatus(wallSlot + 1)
proc runOnSecondLoop() {.async.} =
let sleepTime = chronos.seconds(1)
while true:
let start = chronos.now(chronos.Moment)
await chronos.sleepAsync(sleepTime)
let afterSleep = chronos.now(chronos.Moment)
let sleepTime = afterSleep - start
onSecond(start)
let finished = chronos.now(chronos.Moment)
let processingTime = finished - afterSleep
trace "onSecond task completed", sleepTime, processingTime
# proc runOnSecondLoop() {.async.} =
# let sleepTime = chronos.seconds(1)
# while true:
# let start = chronos.now(chronos.Moment)
# await chronos.sleepAsync(sleepTime)
# let afterSleep = chronos.now(chronos.Moment)
# let sleepTime = afterSleep - start
# onSecond(start)
# let finished = chronos.now(chronos.Moment)
# let processingTime = finished - afterSleep
# trace "onSecond task completed", sleepTime, processingTime
onSecond(Moment.now())
# onSecond(Moment.now())
lightClientNetwork.start()
lc.start()
# lightClientNetwork.start()
# lc.start()
asyncSpawn runOnSecondLoop()
# asyncSpawn runOnSecondLoop()
historyNetwork.start()
stateNetwork.start()

View File

@ -99,7 +99,7 @@ template unsafeQuantityToInt64(q: Quantity): int64 =
int64 q
proc asPortalBlockData*(
payload: ExecutionPayloadV1 | ExecutionPayloadV2):
payload: ExecutionPayloadV1 | ExecutionPayloadV2 | ExecutionPayloadV3):
(common_types.BlockHash, BlockHeaderWithProof, BlockBodySSZ) =
proc calculateTransactionData(
items: openArray[TypedTransaction]):
@ -120,6 +120,7 @@ proc asPortalBlockData*(
let
txRoot = calculateTransactionData(payload.transactions)
# TODO: update according to payload type
header = etypes.BlockHeader(
parentHash: payload.parentHash.asEthHash,
ommersHash: EMPTY_UNCLE_HASH,
@ -298,31 +299,33 @@ proc run() {.raises: [Exception, Defect].} =
waitFor network.start()
proc onFinalizedHeader(
lightClient: LightClient, finalizedHeader: BeaconBlockHeader) =
info "New LC finalized header",
finalized_header = shortLog(finalizedHeader)
lightClient: LightClient, finalizedHeader: ForkedLightClientHeader) =
withForkyHeader(finalizedHeader):
when lcDataFork > LightClientDataFork.None:
info "New LC finalized header",
finalized_header = shortLog(forkyHeader)
proc onOptimisticHeader(
lightClient: LightClient, optimisticHeader: BeaconBlockHeader) =
info "New LC optimistic header",
optimistic_header = shortLog(optimisticHeader)
optimisticProcessor.setOptimisticHeader(optimisticHeader)
lightClient: LightClient, optimisticHeader: ForkedLightClientHeader) =
withForkyHeader(optimisticHeader):
when lcDataFork > LightClientDataFork.None:
info "New LC optimistic header",
optimistic_header = shortLog(forkyHeader)
optimisticProcessor.setOptimisticHeader(forkyHeader.beacon)
lightClient.onFinalizedHeader = onFinalizedHeader
lightClient.onOptimisticHeader = onOptimisticHeader
lightClient.trustedBlockRoot = some config.trustedBlockRoot
func shouldSyncOptimistically(wallSlot: Slot): bool =
# Check whether light client is used
let optimisticHeader = lightClient.optimisticHeader.valueOr:
return false
# Check whether light client has synced sufficiently close to wall slot
const maxAge = 2 * SLOTS_PER_EPOCH
if optimisticHeader.slot < max(wallSlot, maxAge.Slot) - maxAge:
return false
true
let optimisticHeader = lightClient.optimisticHeader
withForkyHeader(optimisticHeader):
when lcDataFork > LightClientDataFork.None:
# Check whether light client has synced sufficiently close to wall slot
const maxAge = 2 * SLOTS_PER_EPOCH
forkyHeader.beacon.slot >= max(wallSlot, maxAge.Slot) - maxAge
else:
false
var blocksGossipState: GossipState = {}
proc updateBlocksGossipStatus(slot: Slot) =

View File

@ -19,7 +19,7 @@ type
gasLimit* : GasInt
gasUsed* : GasInt
timestamp* : EthTime
extraData* : Blob
extraData* : common.Blob
baseFeePerGas*: UInt256
blockHash* : Hash256
transactions* : seq[Transaction]
@ -35,7 +35,7 @@ type
gasLimit* : Option[GasInt]
gasUsed* : Option[GasInt]
timestamp* : Option[EthTime]
extraData* : Option[Blob]
extraData* : Option[common.Blob]
baseFeePerGas*: Option[UInt256]
blockHash* : Option[Hash256]
transactions* : Option[seq[Transaction]]
@ -299,6 +299,6 @@ proc generateInvalidPayload*(basePayload: ExecutionPayloadV1,
proc txInPayload*(payload: ExecutionPayloadV1, txHash: Hash256): bool =
for txBytes in payload.transactions:
let currTx = rlp.decode(Blob txBytes, Transaction)
let currTx = rlp.decode(common.Blob txBytes, Transaction)
if rlpHash(currTx) == txHash:
return true

View File

@ -84,7 +84,7 @@ task fluffy_test, "Run fluffy tests":
# Running tests with a low `mergeBlockNumber` to make the tests faster.
# Using the real mainnet merge block number is not realistic for these tests.
test "fluffy/tests", "all_fluffy_tests", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite -d:PREFER_BLST_SHA256=false -d:mergeBlockNumber:38130"
test "fluffy/tests/beacon_light_client_tests", "all_beacon_light_client_tests", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite -d:PREFER_BLST_SHA256=false"
# test "fluffy/tests/beacon_light_client_tests", "all_beacon_light_client_tests", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite -d:PREFER_BLST_SHA256=false"
task fluffy_tools, "Build fluffy tools":
buildBinary "beacon_chain_bridge", "fluffy/tools/bridge/", "-d:chronicles_log_level=TRACE -d:chronosStrictException -d:PREFER_BLST_SHA256=false -d:libp2p_pki_schemes=secp256k1"

View File

@ -14,7 +14,6 @@
import
../../../common/common,
../../../transaction,
../../../vm_state,
../../../vm_types,
../../validate,
@ -25,6 +24,8 @@ import
chronicles,
eth/keys
import ../../../transaction except GasPrice, GasPriceEx # already in tx_item
{.push raises: [Defect].}
logScope:

View File

@ -26,9 +26,12 @@ proc put*(db: RocksStoreRef, key, value: openArray[byte]): KvResult[void] =
proc contains*(db: RocksStoreRef, key: openArray[byte]): KvResult[bool] =
db.store.contains(key)
proc del*(db: RocksStoreRef, key: openArray[byte]): KvResult[void] =
proc del*(db: RocksStoreRef, key: openArray[byte]): KvResult[bool] =
db.store.del(key)
proc clear*(db: RocksStoreRef): KvResult[bool] =
db.store.clear()
proc close*(db: RocksStoreRef) =
db.store.close

View File

@ -42,7 +42,7 @@ proc put*(db: ChainDB, key, value: openArray[byte]) =
proc contains*(db: ChainDB, key: openArray[byte]): bool =
db.kv.contains(key).expect("working database")
proc del*(db: ChainDB, key: openArray[byte]) =
proc del*(db: ChainDB, key: openArray[byte]): bool =
db.kv.del(key).expect("working database")
when dbBackend == sqlite:

View File

@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
stew/endians2, stew/ranges/ptr_arith,
stew/endians2,
eth/common/eth_types,
../../../constants
@ -91,5 +91,4 @@ func safeInt*(x: UInt256): int {.inline.} =
func toInt*(x: EthAddress): int =
type T = uint32
const len = sizeof(T)
fromBytesBE(T, makeOpenArray(x[x.len-len].unsafeAddr, len)).int
fromBytesBE(T, x.toOpenArray(x.len-len, x.len-1)).int

View File

@ -157,31 +157,33 @@ proc run() {.raises: [Exception, Defect].} =
waitFor verifiedProxy.verifyChaindId()
proc onFinalizedHeader(
lightClient: LightClient, finalizedHeader: BeaconBlockHeader) =
info "New LC finalized header",
finalized_header = shortLog(finalizedHeader)
lightClient: LightClient, finalizedHeader: ForkedLightClientHeader) =
withForkyHeader(finalizedHeader):
when lcDataFork > LightClientDataFork.None:
info "New LC finalized header",
finalized_header = shortLog(forkyHeader)
proc onOptimisticHeader(
lightClient: LightClient, optimisticHeader: BeaconBlockHeader) =
info "New LC optimistic header",
optimistic_header = shortLog(optimisticHeader)
optimisticProcessor.setOptimisticHeader(optimisticHeader)
lightClient: LightClient, optimisticHeader: ForkedLightClientHeader) =
withForkyHeader(optimisticHeader):
when lcDataFork > LightClientDataFork.None:
info "New LC optimistic header",
optimistic_header = shortLog(forkyHeader)
optimisticProcessor.setOptimisticHeader(forkyHeader.beacon)
lightClient.onFinalizedHeader = onFinalizedHeader
lightClient.onOptimisticHeader = onOptimisticHeader
lightClient.trustedBlockRoot = some config.trustedBlockRoot
func shouldSyncOptimistically(wallSlot: Slot): bool =
# Check whether light client is used
let optimisticHeader = lightClient.optimisticHeader.valueOr:
return false
# Check whether light client has synced sufficiently close to wall slot
const maxAge = 2 * SLOTS_PER_EPOCH
if optimisticHeader.slot < max(wallSlot, maxAge.Slot) - maxAge:
return false
true
let optimisticHeader = lightClient.optimisticHeader
withForkyHeader(optimisticHeader):
when lcDataFork > LightClientDataFork.None:
# Check whether light client has synced sufficiently close to wall slot
const maxAge = 2 * SLOTS_PER_EPOCH
forkyHeader.beacon.slot >= max(wallSlot, maxAge.Slot) - maxAge
else:
false
var blocksGossipState: GossipState = {}
proc updateBlocksGossipStatus(slot: Slot) =

View File

@ -36,7 +36,7 @@ type
withdrawals*: seq[WithdrawalV1]
proc asExecutionData*(
payload: ExecutionPayloadV1 | ExecutionPayloadV2): ExecutionData =
payload: ExecutionPayloadV1 | ExecutionPayloadV2 | ExecutionPayloadV3): ExecutionData =
when payload is ExecutionPayloadV1:
return ExecutionData(
parentHash: payload.parentHash,
@ -56,6 +56,7 @@ proc asExecutionData*(
withdrawals: @[]
)
else:
# TODO: Deal with different payload types
return ExecutionData(
parentHash: payload.parentHash,
feeRecipient: payload.feeRecipient,

View File

@ -17,4 +17,4 @@ suite "RocksStoreRef":
defer:
db.close()
testKvStore(kvStore db, false)
testKvStore(kvStore db, false, false)

View File

@ -9,13 +9,16 @@ import
../nimbus/evm/[async_operations, types],
../nimbus/vm_internals,
../nimbus/transaction/[call_common, call_evm],
../nimbus/[transaction, vm_types, vm_state],
../nimbus/[vm_types, vm_state],
../nimbus/core/pow/difficulty
# Need to exclude ServerCommand because it contains something
# called Stop that interferes with the EVM operation named Stop.
import chronos except ServerCommand
# Ditto, for GasPrice.
import ../nimbus/transaction except GasPrice
export byteutils
{.experimental: "dynamicBindSym".}

View File

@ -1,6 +1,6 @@
import
std/[os],
unittest2, stew/byteutils,
unittest2,
eth/common/eth_types,
../nimbus/vm_internals,
../nimbus/core/pow/header

2
vendor/nim-eth vendored

@ -1 +1 @@
Subproject commit 40ec601d072cf62cb3d3a7929448c43896b3f936
Subproject commit 20d2d318f063d05a0a989b01be80e7560cda34c5

2
vendor/nim-rocksdb vendored

@ -1 +1 @@
Subproject commit b7f07282251b043fa07fc071405727dd17c61654
Subproject commit 724b72fe104bd0af5e1520326e68e9cee0890ec8

2
vendor/nim-web3 vendored

@ -1 +1 @@
Subproject commit df2ef09c63f43eb9a2597bdb3e832ddbf1461871
Subproject commit 98fba0fb0471abffdbe69fb8e66bb59152a7075c

2
vendor/nimbus-eth2 vendored

@ -1 +1 @@
Subproject commit 6b5682df70714ab6f8b91206840176d16e02db0c
Subproject commit 968e27bc971edb753d16bbb7e52cfc3b47ba99e9