Bump submodules and fix related issues (#1448)
* Updated to the latest nim-eth, nim-rocksdb, nim-web3 * Bump nimbus-eth2 module and fix related issues Temporarily disabling Portal beacon light client network as it is a lot of copy pasted code that did not yet take into account forks. This will require a bigger rework and was not yet tested in an actual network anyhow. * More nimbus fixes after module bumps --------- Co-authored-by: Adam Spitz <adamspitz@status.im> Co-authored-by: jangko <jangko128@gmail.com>
This commit is contained in:
parent
a669b51ec5
commit
8523d8b40e
|
@ -172,7 +172,8 @@ proc contains(db: ContentDB, key: openArray[byte]): bool =
|
||||||
db.kv.contains(key).expectDb()
|
db.kv.contains(key).expectDb()
|
||||||
|
|
||||||
proc del(db: ContentDB, key: openArray[byte]) =
|
proc del(db: ContentDB, key: openArray[byte]) =
|
||||||
db.kv.del(key).expectDb()
|
# TODO: Do we want to return the bool here too?
|
||||||
|
discard db.kv.del(key).expectDb()
|
||||||
|
|
||||||
proc getSszDecoded*(
|
proc getSszDecoded*(
|
||||||
db: ContentDB, key: openArray[byte], T: type auto): Opt[T] =
|
db: ContentDB, key: openArray[byte], T: type auto): Opt[T] =
|
||||||
|
|
|
@ -14,22 +14,22 @@ import
|
||||||
json_rpc/rpcproxy, stew/[byteutils, io2],
|
json_rpc/rpcproxy, stew/[byteutils, io2],
|
||||||
eth/keys, eth/net/nat,
|
eth/keys, eth/net/nat,
|
||||||
eth/p2p/discoveryv5/protocol as discv5_protocol,
|
eth/p2p/discoveryv5/protocol as discv5_protocol,
|
||||||
beacon_chain/beacon_clock,
|
# beacon_chain/beacon_clock,
|
||||||
beacon_chain/spec/forks,
|
# beacon_chain/spec/forks,
|
||||||
beacon_chain/spec/datatypes/altair,
|
# beacon_chain/spec/datatypes/altair,
|
||||||
beacon_chain/gossip_processing/light_client_processor,
|
# beacon_chain/gossip_processing/light_client_processor,
|
||||||
./conf, ./network_metadata, ./common/common_utils,
|
./conf, ./network_metadata, ./common/common_utils,
|
||||||
./rpc/[rpc_eth_api, bridge_client, rpc_discovery_api, rpc_portal_api,
|
./rpc/[rpc_eth_api, bridge_client, rpc_discovery_api, rpc_portal_api,
|
||||||
rpc_portal_debug_api],
|
rpc_portal_debug_api],
|
||||||
./network/state/[state_network, state_content],
|
./network/state/[state_network, state_content],
|
||||||
./network/history/[history_network, history_content],
|
./network/history/[history_network, history_content],
|
||||||
./network/beacon_light_client/[
|
# ./network/beacon_light_client/[
|
||||||
light_client_init_loader,
|
# light_client_init_loader,
|
||||||
light_client_content,
|
# light_client_content,
|
||||||
beacon_light_client,
|
# beacon_light_client,
|
||||||
light_client_db,
|
# light_client_db,
|
||||||
light_client_network
|
# light_client_network
|
||||||
],
|
# ],
|
||||||
./network/wire/[portal_stream, portal_protocol_config],
|
./network/wire/[portal_stream, portal_protocol_config],
|
||||||
./eth_data/history_data_ssz_e2s,
|
./eth_data/history_data_ssz_e2s,
|
||||||
./content_db
|
./content_db
|
||||||
|
@ -177,74 +177,74 @@ proc run(config: PortalConf) {.raises: [CatchableError, Defect].} =
|
||||||
|
|
||||||
# TODO: Currently disabled by default as it is not stable/polished enough,
|
# TODO: Currently disabled by default as it is not stable/polished enough,
|
||||||
# ultimatetely this should probably be always on.
|
# ultimatetely this should probably be always on.
|
||||||
if config.trustedBlockRoot.isSome():
|
# if config.trustedBlockRoot.isSome():
|
||||||
# fluffy light client works only over mainnet data
|
# # fluffy light client works only over mainnet data
|
||||||
let
|
# let
|
||||||
networkData = loadNetworkData("mainnet")
|
# networkData = loadNetworkData("mainnet")
|
||||||
|
|
||||||
db = LightClientDb.new(config.dataDir / "lightClientDb")
|
# db = LightClientDb.new(config.dataDir / "lightClientDb")
|
||||||
|
|
||||||
lightClientNetwork = LightClientNetwork.new(
|
# lightClientNetwork = LightClientNetwork.new(
|
||||||
d,
|
# d,
|
||||||
db,
|
# db,
|
||||||
streamManager,
|
# streamManager,
|
||||||
networkData.forks,
|
# networkData.forks,
|
||||||
bootstrapRecords = bootstrapRecords)
|
# bootstrapRecords = bootstrapRecords)
|
||||||
|
|
||||||
getBeaconTime = networkData.clock.getBeaconTimeFn()
|
# getBeaconTime = networkData.clock.getBeaconTimeFn()
|
||||||
|
|
||||||
refDigests = newClone networkData.forks
|
# refDigests = newClone networkData.forks
|
||||||
|
|
||||||
lc = LightClient.new(
|
# lc = LightClient.new(
|
||||||
lightClientNetwork,
|
# lightClientNetwork,
|
||||||
rng,
|
# rng,
|
||||||
networkData.metadata.cfg,
|
# networkData.metadata.cfg,
|
||||||
refDigests,
|
# refDigests,
|
||||||
getBeaconTime,
|
# getBeaconTime,
|
||||||
networkData.genesis_validators_root,
|
# networkData.genesis_validators_root,
|
||||||
LightClientFinalizationMode.Optimistic
|
# LightClientFinalizationMode.Optimistic
|
||||||
)
|
# )
|
||||||
|
|
||||||
# TODO: For now just log headers. Ultimately we should also use callbacks for each
|
# # TODO: For now just log headers. Ultimately we should also use callbacks for each
|
||||||
# lc object to save them to db and offer them to the network.
|
# # lc object to save them to db and offer them to the network.
|
||||||
proc onFinalizedHeader(
|
# proc onFinalizedHeader(
|
||||||
lightClient: LightClient, finalizedHeader: BeaconBlockHeader) =
|
# lightClient: LightClient, finalizedHeader: BeaconBlockHeader) =
|
||||||
info "New LC finalized header",
|
# info "New LC finalized header",
|
||||||
finalized_header = shortLog(finalizedHeader)
|
# finalized_header = shortLog(finalizedHeader)
|
||||||
|
|
||||||
proc onOptimisticHeader(
|
# proc onOptimisticHeader(
|
||||||
lightClient: LightClient, optimisticHeader: BeaconBlockHeader) =
|
# lightClient: LightClient, optimisticHeader: BeaconBlockHeader) =
|
||||||
info "New LC optimistic header",
|
# info "New LC optimistic header",
|
||||||
optimistic_header = shortLog(optimisticHeader)
|
# optimistic_header = shortLog(optimisticHeader)
|
||||||
|
|
||||||
lc.onFinalizedHeader = onFinalizedHeader
|
# lc.onFinalizedHeader = onFinalizedHeader
|
||||||
lc.onOptimisticHeader = onOptimisticHeader
|
# lc.onOptimisticHeader = onOptimisticHeader
|
||||||
lc.trustedBlockRoot = config.trustedBlockRoot
|
# lc.trustedBlockRoot = config.trustedBlockRoot
|
||||||
|
|
||||||
proc onSecond(time: Moment) =
|
# proc onSecond(time: Moment) =
|
||||||
let wallSlot = getBeaconTime().slotOrZero()
|
# let wallSlot = getBeaconTime().slotOrZero()
|
||||||
# TODO this is a place to enable/disable gossip based on the current status
|
# # TODO this is a place to enable/disable gossip based on the current status
|
||||||
# of light client
|
# # of light client
|
||||||
# lc.updateGossipStatus(wallSlot + 1)
|
# # lc.updateGossipStatus(wallSlot + 1)
|
||||||
|
|
||||||
proc runOnSecondLoop() {.async.} =
|
# proc runOnSecondLoop() {.async.} =
|
||||||
let sleepTime = chronos.seconds(1)
|
# let sleepTime = chronos.seconds(1)
|
||||||
while true:
|
# while true:
|
||||||
let start = chronos.now(chronos.Moment)
|
# let start = chronos.now(chronos.Moment)
|
||||||
await chronos.sleepAsync(sleepTime)
|
# await chronos.sleepAsync(sleepTime)
|
||||||
let afterSleep = chronos.now(chronos.Moment)
|
# let afterSleep = chronos.now(chronos.Moment)
|
||||||
let sleepTime = afterSleep - start
|
# let sleepTime = afterSleep - start
|
||||||
onSecond(start)
|
# onSecond(start)
|
||||||
let finished = chronos.now(chronos.Moment)
|
# let finished = chronos.now(chronos.Moment)
|
||||||
let processingTime = finished - afterSleep
|
# let processingTime = finished - afterSleep
|
||||||
trace "onSecond task completed", sleepTime, processingTime
|
# trace "onSecond task completed", sleepTime, processingTime
|
||||||
|
|
||||||
onSecond(Moment.now())
|
# onSecond(Moment.now())
|
||||||
|
|
||||||
lightClientNetwork.start()
|
# lightClientNetwork.start()
|
||||||
lc.start()
|
# lc.start()
|
||||||
|
|
||||||
asyncSpawn runOnSecondLoop()
|
# asyncSpawn runOnSecondLoop()
|
||||||
|
|
||||||
historyNetwork.start()
|
historyNetwork.start()
|
||||||
stateNetwork.start()
|
stateNetwork.start()
|
||||||
|
|
|
@ -99,7 +99,7 @@ template unsafeQuantityToInt64(q: Quantity): int64 =
|
||||||
int64 q
|
int64 q
|
||||||
|
|
||||||
proc asPortalBlockData*(
|
proc asPortalBlockData*(
|
||||||
payload: ExecutionPayloadV1 | ExecutionPayloadV2):
|
payload: ExecutionPayloadV1 | ExecutionPayloadV2 | ExecutionPayloadV3):
|
||||||
(common_types.BlockHash, BlockHeaderWithProof, BlockBodySSZ) =
|
(common_types.BlockHash, BlockHeaderWithProof, BlockBodySSZ) =
|
||||||
proc calculateTransactionData(
|
proc calculateTransactionData(
|
||||||
items: openArray[TypedTransaction]):
|
items: openArray[TypedTransaction]):
|
||||||
|
@ -120,6 +120,7 @@ proc asPortalBlockData*(
|
||||||
let
|
let
|
||||||
txRoot = calculateTransactionData(payload.transactions)
|
txRoot = calculateTransactionData(payload.transactions)
|
||||||
|
|
||||||
|
# TODO: update according to payload type
|
||||||
header = etypes.BlockHeader(
|
header = etypes.BlockHeader(
|
||||||
parentHash: payload.parentHash.asEthHash,
|
parentHash: payload.parentHash.asEthHash,
|
||||||
ommersHash: EMPTY_UNCLE_HASH,
|
ommersHash: EMPTY_UNCLE_HASH,
|
||||||
|
@ -298,31 +299,33 @@ proc run() {.raises: [Exception, Defect].} =
|
||||||
waitFor network.start()
|
waitFor network.start()
|
||||||
|
|
||||||
proc onFinalizedHeader(
|
proc onFinalizedHeader(
|
||||||
lightClient: LightClient, finalizedHeader: BeaconBlockHeader) =
|
lightClient: LightClient, finalizedHeader: ForkedLightClientHeader) =
|
||||||
|
withForkyHeader(finalizedHeader):
|
||||||
|
when lcDataFork > LightClientDataFork.None:
|
||||||
info "New LC finalized header",
|
info "New LC finalized header",
|
||||||
finalized_header = shortLog(finalizedHeader)
|
finalized_header = shortLog(forkyHeader)
|
||||||
|
|
||||||
proc onOptimisticHeader(
|
proc onOptimisticHeader(
|
||||||
lightClient: LightClient, optimisticHeader: BeaconBlockHeader) =
|
lightClient: LightClient, optimisticHeader: ForkedLightClientHeader) =
|
||||||
|
withForkyHeader(optimisticHeader):
|
||||||
|
when lcDataFork > LightClientDataFork.None:
|
||||||
info "New LC optimistic header",
|
info "New LC optimistic header",
|
||||||
optimistic_header = shortLog(optimisticHeader)
|
optimistic_header = shortLog(forkyHeader)
|
||||||
optimisticProcessor.setOptimisticHeader(optimisticHeader)
|
optimisticProcessor.setOptimisticHeader(forkyHeader.beacon)
|
||||||
|
|
||||||
lightClient.onFinalizedHeader = onFinalizedHeader
|
lightClient.onFinalizedHeader = onFinalizedHeader
|
||||||
lightClient.onOptimisticHeader = onOptimisticHeader
|
lightClient.onOptimisticHeader = onOptimisticHeader
|
||||||
lightClient.trustedBlockRoot = some config.trustedBlockRoot
|
lightClient.trustedBlockRoot = some config.trustedBlockRoot
|
||||||
|
|
||||||
func shouldSyncOptimistically(wallSlot: Slot): bool =
|
func shouldSyncOptimistically(wallSlot: Slot): bool =
|
||||||
# Check whether light client is used
|
let optimisticHeader = lightClient.optimisticHeader
|
||||||
let optimisticHeader = lightClient.optimisticHeader.valueOr:
|
withForkyHeader(optimisticHeader):
|
||||||
return false
|
when lcDataFork > LightClientDataFork.None:
|
||||||
|
|
||||||
# Check whether light client has synced sufficiently close to wall slot
|
# Check whether light client has synced sufficiently close to wall slot
|
||||||
const maxAge = 2 * SLOTS_PER_EPOCH
|
const maxAge = 2 * SLOTS_PER_EPOCH
|
||||||
if optimisticHeader.slot < max(wallSlot, maxAge.Slot) - maxAge:
|
forkyHeader.beacon.slot >= max(wallSlot, maxAge.Slot) - maxAge
|
||||||
return false
|
else:
|
||||||
|
false
|
||||||
true
|
|
||||||
|
|
||||||
var blocksGossipState: GossipState = {}
|
var blocksGossipState: GossipState = {}
|
||||||
proc updateBlocksGossipStatus(slot: Slot) =
|
proc updateBlocksGossipStatus(slot: Slot) =
|
||||||
|
|
|
@ -19,7 +19,7 @@ type
|
||||||
gasLimit* : GasInt
|
gasLimit* : GasInt
|
||||||
gasUsed* : GasInt
|
gasUsed* : GasInt
|
||||||
timestamp* : EthTime
|
timestamp* : EthTime
|
||||||
extraData* : Blob
|
extraData* : common.Blob
|
||||||
baseFeePerGas*: UInt256
|
baseFeePerGas*: UInt256
|
||||||
blockHash* : Hash256
|
blockHash* : Hash256
|
||||||
transactions* : seq[Transaction]
|
transactions* : seq[Transaction]
|
||||||
|
@ -35,7 +35,7 @@ type
|
||||||
gasLimit* : Option[GasInt]
|
gasLimit* : Option[GasInt]
|
||||||
gasUsed* : Option[GasInt]
|
gasUsed* : Option[GasInt]
|
||||||
timestamp* : Option[EthTime]
|
timestamp* : Option[EthTime]
|
||||||
extraData* : Option[Blob]
|
extraData* : Option[common.Blob]
|
||||||
baseFeePerGas*: Option[UInt256]
|
baseFeePerGas*: Option[UInt256]
|
||||||
blockHash* : Option[Hash256]
|
blockHash* : Option[Hash256]
|
||||||
transactions* : Option[seq[Transaction]]
|
transactions* : Option[seq[Transaction]]
|
||||||
|
@ -299,6 +299,6 @@ proc generateInvalidPayload*(basePayload: ExecutionPayloadV1,
|
||||||
|
|
||||||
proc txInPayload*(payload: ExecutionPayloadV1, txHash: Hash256): bool =
|
proc txInPayload*(payload: ExecutionPayloadV1, txHash: Hash256): bool =
|
||||||
for txBytes in payload.transactions:
|
for txBytes in payload.transactions:
|
||||||
let currTx = rlp.decode(Blob txBytes, Transaction)
|
let currTx = rlp.decode(common.Blob txBytes, Transaction)
|
||||||
if rlpHash(currTx) == txHash:
|
if rlpHash(currTx) == txHash:
|
||||||
return true
|
return true
|
||||||
|
|
|
@ -84,7 +84,7 @@ task fluffy_test, "Run fluffy tests":
|
||||||
# Running tests with a low `mergeBlockNumber` to make the tests faster.
|
# Running tests with a low `mergeBlockNumber` to make the tests faster.
|
||||||
# Using the real mainnet merge block number is not realistic for these tests.
|
# Using the real mainnet merge block number is not realistic for these tests.
|
||||||
test "fluffy/tests", "all_fluffy_tests", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite -d:PREFER_BLST_SHA256=false -d:mergeBlockNumber:38130"
|
test "fluffy/tests", "all_fluffy_tests", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite -d:PREFER_BLST_SHA256=false -d:mergeBlockNumber:38130"
|
||||||
test "fluffy/tests/beacon_light_client_tests", "all_beacon_light_client_tests", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite -d:PREFER_BLST_SHA256=false"
|
# test "fluffy/tests/beacon_light_client_tests", "all_beacon_light_client_tests", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite -d:PREFER_BLST_SHA256=false"
|
||||||
|
|
||||||
task fluffy_tools, "Build fluffy tools":
|
task fluffy_tools, "Build fluffy tools":
|
||||||
buildBinary "beacon_chain_bridge", "fluffy/tools/bridge/", "-d:chronicles_log_level=TRACE -d:chronosStrictException -d:PREFER_BLST_SHA256=false -d:libp2p_pki_schemes=secp256k1"
|
buildBinary "beacon_chain_bridge", "fluffy/tools/bridge/", "-d:chronicles_log_level=TRACE -d:chronosStrictException -d:PREFER_BLST_SHA256=false -d:libp2p_pki_schemes=secp256k1"
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
../../../common/common,
|
../../../common/common,
|
||||||
../../../transaction,
|
|
||||||
../../../vm_state,
|
../../../vm_state,
|
||||||
../../../vm_types,
|
../../../vm_types,
|
||||||
../../validate,
|
../../validate,
|
||||||
|
@ -25,6 +24,8 @@ import
|
||||||
chronicles,
|
chronicles,
|
||||||
eth/keys
|
eth/keys
|
||||||
|
|
||||||
|
import ../../../transaction except GasPrice, GasPriceEx # already in tx_item
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
|
|
|
@ -26,9 +26,12 @@ proc put*(db: RocksStoreRef, key, value: openArray[byte]): KvResult[void] =
|
||||||
proc contains*(db: RocksStoreRef, key: openArray[byte]): KvResult[bool] =
|
proc contains*(db: RocksStoreRef, key: openArray[byte]): KvResult[bool] =
|
||||||
db.store.contains(key)
|
db.store.contains(key)
|
||||||
|
|
||||||
proc del*(db: RocksStoreRef, key: openArray[byte]): KvResult[void] =
|
proc del*(db: RocksStoreRef, key: openArray[byte]): KvResult[bool] =
|
||||||
db.store.del(key)
|
db.store.del(key)
|
||||||
|
|
||||||
|
proc clear*(db: RocksStoreRef): KvResult[bool] =
|
||||||
|
db.store.clear()
|
||||||
|
|
||||||
proc close*(db: RocksStoreRef) =
|
proc close*(db: RocksStoreRef) =
|
||||||
db.store.close
|
db.store.close
|
||||||
|
|
||||||
|
|
|
@ -42,7 +42,7 @@ proc put*(db: ChainDB, key, value: openArray[byte]) =
|
||||||
proc contains*(db: ChainDB, key: openArray[byte]): bool =
|
proc contains*(db: ChainDB, key: openArray[byte]): bool =
|
||||||
db.kv.contains(key).expect("working database")
|
db.kv.contains(key).expect("working database")
|
||||||
|
|
||||||
proc del*(db: ChainDB, key: openArray[byte]) =
|
proc del*(db: ChainDB, key: openArray[byte]): bool =
|
||||||
db.kv.del(key).expect("working database")
|
db.kv.del(key).expect("working database")
|
||||||
|
|
||||||
when dbBackend == sqlite:
|
when dbBackend == sqlite:
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
import
|
import
|
||||||
stew/endians2, stew/ranges/ptr_arith,
|
stew/endians2,
|
||||||
eth/common/eth_types,
|
eth/common/eth_types,
|
||||||
../../../constants
|
../../../constants
|
||||||
|
|
||||||
|
@ -91,5 +91,4 @@ func safeInt*(x: UInt256): int {.inline.} =
|
||||||
func toInt*(x: EthAddress): int =
|
func toInt*(x: EthAddress): int =
|
||||||
type T = uint32
|
type T = uint32
|
||||||
const len = sizeof(T)
|
const len = sizeof(T)
|
||||||
fromBytesBE(T, makeOpenArray(x[x.len-len].unsafeAddr, len)).int
|
fromBytesBE(T, x.toOpenArray(x.len-len, x.len-1)).int
|
||||||
|
|
||||||
|
|
|
@ -157,31 +157,33 @@ proc run() {.raises: [Exception, Defect].} =
|
||||||
waitFor verifiedProxy.verifyChaindId()
|
waitFor verifiedProxy.verifyChaindId()
|
||||||
|
|
||||||
proc onFinalizedHeader(
|
proc onFinalizedHeader(
|
||||||
lightClient: LightClient, finalizedHeader: BeaconBlockHeader) =
|
lightClient: LightClient, finalizedHeader: ForkedLightClientHeader) =
|
||||||
|
withForkyHeader(finalizedHeader):
|
||||||
|
when lcDataFork > LightClientDataFork.None:
|
||||||
info "New LC finalized header",
|
info "New LC finalized header",
|
||||||
finalized_header = shortLog(finalizedHeader)
|
finalized_header = shortLog(forkyHeader)
|
||||||
|
|
||||||
proc onOptimisticHeader(
|
proc onOptimisticHeader(
|
||||||
lightClient: LightClient, optimisticHeader: BeaconBlockHeader) =
|
lightClient: LightClient, optimisticHeader: ForkedLightClientHeader) =
|
||||||
|
withForkyHeader(optimisticHeader):
|
||||||
|
when lcDataFork > LightClientDataFork.None:
|
||||||
info "New LC optimistic header",
|
info "New LC optimistic header",
|
||||||
optimistic_header = shortLog(optimisticHeader)
|
optimistic_header = shortLog(forkyHeader)
|
||||||
optimisticProcessor.setOptimisticHeader(optimisticHeader)
|
optimisticProcessor.setOptimisticHeader(forkyHeader.beacon)
|
||||||
|
|
||||||
lightClient.onFinalizedHeader = onFinalizedHeader
|
lightClient.onFinalizedHeader = onFinalizedHeader
|
||||||
lightClient.onOptimisticHeader = onOptimisticHeader
|
lightClient.onOptimisticHeader = onOptimisticHeader
|
||||||
lightClient.trustedBlockRoot = some config.trustedBlockRoot
|
lightClient.trustedBlockRoot = some config.trustedBlockRoot
|
||||||
|
|
||||||
func shouldSyncOptimistically(wallSlot: Slot): bool =
|
func shouldSyncOptimistically(wallSlot: Slot): bool =
|
||||||
# Check whether light client is used
|
let optimisticHeader = lightClient.optimisticHeader
|
||||||
let optimisticHeader = lightClient.optimisticHeader.valueOr:
|
withForkyHeader(optimisticHeader):
|
||||||
return false
|
when lcDataFork > LightClientDataFork.None:
|
||||||
|
|
||||||
# Check whether light client has synced sufficiently close to wall slot
|
# Check whether light client has synced sufficiently close to wall slot
|
||||||
const maxAge = 2 * SLOTS_PER_EPOCH
|
const maxAge = 2 * SLOTS_PER_EPOCH
|
||||||
if optimisticHeader.slot < max(wallSlot, maxAge.Slot) - maxAge:
|
forkyHeader.beacon.slot >= max(wallSlot, maxAge.Slot) - maxAge
|
||||||
return false
|
else:
|
||||||
|
false
|
||||||
true
|
|
||||||
|
|
||||||
var blocksGossipState: GossipState = {}
|
var blocksGossipState: GossipState = {}
|
||||||
proc updateBlocksGossipStatus(slot: Slot) =
|
proc updateBlocksGossipStatus(slot: Slot) =
|
||||||
|
|
|
@ -36,7 +36,7 @@ type
|
||||||
withdrawals*: seq[WithdrawalV1]
|
withdrawals*: seq[WithdrawalV1]
|
||||||
|
|
||||||
proc asExecutionData*(
|
proc asExecutionData*(
|
||||||
payload: ExecutionPayloadV1 | ExecutionPayloadV2): ExecutionData =
|
payload: ExecutionPayloadV1 | ExecutionPayloadV2 | ExecutionPayloadV3): ExecutionData =
|
||||||
when payload is ExecutionPayloadV1:
|
when payload is ExecutionPayloadV1:
|
||||||
return ExecutionData(
|
return ExecutionData(
|
||||||
parentHash: payload.parentHash,
|
parentHash: payload.parentHash,
|
||||||
|
@ -56,6 +56,7 @@ proc asExecutionData*(
|
||||||
withdrawals: @[]
|
withdrawals: @[]
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
|
# TODO: Deal with different payload types
|
||||||
return ExecutionData(
|
return ExecutionData(
|
||||||
parentHash: payload.parentHash,
|
parentHash: payload.parentHash,
|
||||||
feeRecipient: payload.feeRecipient,
|
feeRecipient: payload.feeRecipient,
|
||||||
|
|
|
@ -17,4 +17,4 @@ suite "RocksStoreRef":
|
||||||
defer:
|
defer:
|
||||||
db.close()
|
db.close()
|
||||||
|
|
||||||
testKvStore(kvStore db, false)
|
testKvStore(kvStore db, false, false)
|
||||||
|
|
|
@ -9,13 +9,16 @@ import
|
||||||
../nimbus/evm/[async_operations, types],
|
../nimbus/evm/[async_operations, types],
|
||||||
../nimbus/vm_internals,
|
../nimbus/vm_internals,
|
||||||
../nimbus/transaction/[call_common, call_evm],
|
../nimbus/transaction/[call_common, call_evm],
|
||||||
../nimbus/[transaction, vm_types, vm_state],
|
../nimbus/[vm_types, vm_state],
|
||||||
../nimbus/core/pow/difficulty
|
../nimbus/core/pow/difficulty
|
||||||
|
|
||||||
# Need to exclude ServerCommand because it contains something
|
# Need to exclude ServerCommand because it contains something
|
||||||
# called Stop that interferes with the EVM operation named Stop.
|
# called Stop that interferes with the EVM operation named Stop.
|
||||||
import chronos except ServerCommand
|
import chronos except ServerCommand
|
||||||
|
|
||||||
|
# Ditto, for GasPrice.
|
||||||
|
import ../nimbus/transaction except GasPrice
|
||||||
|
|
||||||
export byteutils
|
export byteutils
|
||||||
{.experimental: "dynamicBindSym".}
|
{.experimental: "dynamicBindSym".}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import
|
import
|
||||||
std/[os],
|
std/[os],
|
||||||
unittest2, stew/byteutils,
|
unittest2,
|
||||||
eth/common/eth_types,
|
eth/common/eth_types,
|
||||||
../nimbus/vm_internals,
|
../nimbus/vm_internals,
|
||||||
../nimbus/core/pow/header
|
../nimbus/core/pow/header
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 40ec601d072cf62cb3d3a7929448c43896b3f936
|
Subproject commit 20d2d318f063d05a0a989b01be80e7560cda34c5
|
|
@ -1 +1 @@
|
||||||
Subproject commit b7f07282251b043fa07fc071405727dd17c61654
|
Subproject commit 724b72fe104bd0af5e1520326e68e9cee0890ec8
|
|
@ -1 +1 @@
|
||||||
Subproject commit df2ef09c63f43eb9a2597bdb3e832ddbf1461871
|
Subproject commit 98fba0fb0471abffdbe69fb8e66bb59152a7075c
|
|
@ -1 +1 @@
|
||||||
Subproject commit 6b5682df70714ab6f8b91206840176d16e02db0c
|
Subproject commit 968e27bc971edb753d16bbb7e52cfc3b47ba99e9
|
Loading…
Reference in New Issue