nimbus-eth2/beacon_chain/beacon_node_light_client.nim
Eugene Kabanov 18409a69e1
Light forward sync mechanism (#6515)
* Initial commit.

* Add hybrid syncing.

* Compilation fixes.

* Cast custom event for our purposes.

* Instantiate AsyncEventQueue properly.

* Fix mistype.

* Further research on optimistic updates.

* Fixing circular deps.

* Add backfilling.

* Add block download feature.

* Add block store.

* Update backfill information before storing block.

* Use custom block verifier for backfilling sync.

* Skip signature verification in backfilling.

* Add one more generic reload to storeBackfillBlock().

* Add block verification debugging statements.

* Add more debugging

* Do not use database for backfilling, part 1.

* Fix for stash.

* Stash fixes part 2.

* Prepare for testing.

* Fix assertion.

* Fix post-restart syncing process.

* Update backfill loading log statement.
Use proper backfill slot callback for sync manager.

* Add handling of Duplicates.

* Fix store duration and block backfilled log statements.

* Add proper syncing state log statement.

* Add snappy compression to beaconchain_file.
Format syncing speed properly.

* Add blobs verification.

* Add `slot` number to file structure for easy navigation over stream of compressed objects.

* Change database filename.

* Fix structure size.

* Add more consistency properties.

* Fix checkRepair() issues.

* Preparation to state rebuild process.

* Add plain & compressed size.

* Debugging snappy encode process.

* Add one more debugging line.

* Dump blocks.

* One more filedump.

* Fix chunk corruption code.

* Fix detection issue.

* Some fixes in state rebuilding process.

* Add more clearance steps.

* Move updateHead() back to block_processor.

* Fix compilation issues.

* Make code more async friendly.

* Fix async issues.
Add more information when proposer verification failed.

* Fix 8192 slots issue.

* Fix Future double completion issue.

* Pass updateFlags to some of the core procedures.

* Fix tests.

* Improve initial sync handling mechanism.

* Fix checkStateTransition() performance improvements.

* Add some performance tuning and meters.

* Light client performance tuning.

* Remove debugging statement.

* Use single file descriptor for blockchain file.

* Attempt to fix LC.

* Fix timeleft calculation when untrusted sync backfilling started right after LC block received.

* Workaround for `chronicles` + `results` `error` issue.
Remove some compilation warnings.
Fix `CatchableError` leaks on Windows.

* Address review comments.

* Address review comments part 2.

* Address review comments part 1.

* Rebase and fix the issues.

* Address review comments part 3.

* Add tests and fix some issues in auto-repair mechanism.

* Add tests to all_tests.

* Rename binary test file to pass restrictions.

* Add `bin` extension to excluded list.
Recover binary test data.

* Rename fixture file to .bin again.

* Update AllTests.

* Address review comments part 4.

* Address review comments part 5 and fix tests.

* Address review comments part 6.

* Eliminate foldl and combine from blobs processing.
Add some tests to ensure that checkResponse() also checks for correct order.

* Fix forgotten place.

* Post rebase fixes.

* Add unique slots tests.

* Optimize updateHead() code.

* Add forgotten changes.

* Address review comments on state as argument.
2024-10-30 05:38:53 +00:00

194 lines
7.3 KiB
Nim

# beacon_chain
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import
chronicles, web3/engine_api_types,
./beacon_node
logScope: topics = "beacnde"
func shouldSyncOptimistically*(node: BeaconNode, wallSlot: Slot): bool =
let optimisticHeader = node.lightClient.optimisticHeader
withForkyHeader(optimisticHeader):
when lcDataFork > LightClientDataFork.None:
shouldSyncOptimistically(
optimisticSlot = forkyHeader.beacon.slot,
dagSlot = getStateField(node.dag.headState, slot),
wallSlot = wallSlot)
else:
false
proc initLightClient*(
node: BeaconNode,
rng: ref HmacDrbgContext,
cfg: RuntimeConfig,
forkDigests: ref ForkDigests,
getBeaconTime: GetBeaconTimeFn,
genesis_validators_root: Eth2Digest) =
template config(): auto = node.config
# Creating a light client is not dependent on `syncLightClient`
# because the light client module also handles gossip subscriptions
# for broadcasting light client data as a server.
let
optimisticHandler = proc(
signedBlock: ForkedSignedBeaconBlock
): Future[void] {.async: (raises: [CancelledError]).} =
withBlck(signedBlock):
when consensusFork >= ConsensusFork.Bellatrix:
if forkyBlck.message.is_execution_block:
template payload(): auto = forkyBlck.message.body.execution_payload
if not payload.block_hash.isZero:
discard await node.elManager.newExecutionPayload(
forkyBlck.message)
else: discard
optimisticProcessor = initOptimisticProcessor(
getBeaconTime, optimisticHandler)
shouldInhibitSync = func(): bool =
if isNil(node.syncOverseer):
false
else:
not node.syncOverseer.syncInProgress # No LC sync needed if DAG is in sync
lightClient = createLightClient(
node.network, rng, config, cfg, forkDigests, getBeaconTime,
genesis_validators_root, LightClientFinalizationMode.Strict,
shouldInhibitSync = shouldInhibitSync)
if config.syncLightClient:
proc onOptimisticHeader(
lightClient: LightClient,
optimisticHeader: ForkedLightClientHeader) =
if node.optimisticFcuFut != nil:
return
withForkyHeader(optimisticHeader):
when lcDataFork > LightClientDataFork.None:
let bid = forkyHeader.beacon.toBlockId()
logScope:
opt = bid
dag = node.dag.head.bid
wallSlot = node.currentSlot
when lcDataFork >= LightClientDataFork.Capella:
let
consensusFork = node.dag.cfg.consensusForkAtEpoch(bid.slot.epoch)
blockHash = forkyHeader.execution.block_hash
# Retain optimistic head for other `forkchoiceUpdated` callers.
# May temporarily block `forkchoiceUpdated` calls, e.g., Geth:
# - Refuses `newPayload`: "Ignoring payload while snap syncing"
# - Refuses `fcU`: "Forkchoice requested unknown head"
# Once DAG sync catches up or as new optimistic heads are fetched
# the situation recovers
debug "New LC optimistic header"
node.consensusManager[].setOptimisticHead(bid, blockHash)
if not node.consensusManager[]
.shouldSyncOptimistically(node.currentSlot):
return
# engine_forkchoiceUpdated
let beaconHead = node.attestationPool[].getBeaconHead(nil)
withConsensusFork(consensusFork):
when lcDataForkAtConsensusFork(consensusFork) == lcDataFork:
node.optimisticFcuFut = node.elManager.forkchoiceUpdated(
headBlockHash = blockHash,
safeBlockHash = beaconHead.safeExecutionBlockHash,
finalizedBlockHash = beaconHead.finalizedExecutionBlockHash,
payloadAttributes = Opt.none consensusFork.PayloadAttributes)
node.optimisticFcuFut.addCallback do (future: pointer):
node.optimisticFcuFut = nil
else:
# The execution block hash is only available from Capella onward
info "Ignoring new LC optimistic header until Capella"
proc onFinalizedHeader(
lightClient: LightClient,
finalizedHeader: ForkedLightClientHeader) =
if not node.consensusManager[].shouldSyncOptimistically(node.currentSlot):
return
node.eventBus.optFinHeaderUpdateQueue.emit(finalizedHeader)
lightClient.onOptimisticHeader = onOptimisticHeader
lightClient.onFinalizedHeader = onFinalizedHeader
lightClient.trustedBlockRoot = config.trustedBlockRoot
elif config.trustedBlockRoot.isSome:
warn "Ignoring `trustedBlockRoot`, light client not enabled",
syncLightClient = config.syncLightClient,
trustedBlockRoot = config.trustedBlockRoot
node.optimisticProcessor = optimisticProcessor
node.lightClient = lightClient
proc startLightClient*(node: BeaconNode) =
if not node.config.syncLightClient:
return
node.lightClient.start()
proc installLightClientMessageValidators*(node: BeaconNode) =
let eth2Processor =
if node.config.lightClientDataServe:
# Process gossip using both full node and light client
node.processor
elif node.config.syncLightClient:
# Only process gossip using light client
nil
else:
# Light client topics will never be subscribed to, no validators needed
return
node.lightClient.installMessageValidators(eth2Processor)
proc updateLightClientGossipStatus*(
node: BeaconNode, slot: Slot, dagIsBehind: bool) =
let isBehind =
if node.config.lightClientDataServe:
# Forward DAG's readiness to handle light client gossip
dagIsBehind
else:
# Full node is not interested in gossip
true
node.lightClient.updateGossipStatus(slot, some isBehind)
proc updateLightClientFromDag*(node: BeaconNode) =
if not node.config.syncLightClient:
return
if node.config.trustedBlockRoot.isSome:
return
let
dagHead = node.dag.finalizedHead
dagPeriod = dagHead.slot.sync_committee_period
if dagHead.slot < node.dag.cfg.ALTAIR_FORK_EPOCH.start_slot:
return
let lcHeader = node.lightClient.finalizedHeader
withForkyHeader(lcHeader):
when lcDataFork > LightClientDataFork.None:
if dagPeriod <= forkyHeader.beacon.slot.sync_committee_period:
return
let bdata = node.dag.getForkedBlock(dagHead.blck.bid).valueOr:
return
var header: ForkedLightClientHeader
withBlck(bdata):
const lcDataFork = lcDataForkAtConsensusFork(consensusFork)
when lcDataFork > LightClientDataFork.None:
header = ForkedLightClientHeader.init(
forkyBlck.toLightClientHeader(lcDataFork))
else: raiseAssert "Unreachable"
let current_sync_committee = block:
let tmpState = assignClone(node.dag.headState)
node.dag.currentSyncCommitteeForPeriod(tmpState[], dagPeriod).valueOr:
return
node.lightClient.resetToFinalizedHeader(header, current_sync_committee)