accelerate EL sync with LC with `--sync-light-client` (#4041)
When the BN-embedded LC makes sync progress, pass the corresponding execution block hash to the EL via `engine_forkchoiceUpdatedV1`. This allows the EL to sync to wall slot while the chain DAG is behind. Renamed `--light-client` to `--sync-light-client` for clarity, and `--light-client-trusted-block-root` to `--trusted-block-root` for consistency with `nimbus_light_client`. Note that this does not work well in practice at this time: - Geth sticks to the optimistic sync: "Ignoring payload while snap syncing" (when passing the LC head) "Forkchoice requested unknown head" (when updating to LC head) - Nethermind syncs to LC head but does not report ancestors as VALID, so the main forward sync is still stuck in optimistic mode: "Pre-pivot block, ignored and returned Syncing" To aid EL client teams in fixing those issues, having this available as a hidden option is still useful.
This commit is contained in:
parent
2545d1d053
commit
613f4a9a50
2
Makefile
2
Makefile
|
@ -383,7 +383,7 @@ define CONNECT_TO_NETWORK_IN_DEV_MODE
|
|||
--network=$(1) $(3) $(GOERLI_TESTNETS_PARAMS) \
|
||||
--log-level="DEBUG; TRACE:discv5,networking; REQUIRED:none; DISABLED:none" \
|
||||
--data-dir=build/data/shared_$(1)_$(NODE_ID) \
|
||||
--light-client=on \
|
||||
--sync-light-client=on \
|
||||
--dump $(NODE_PARAMS)
|
||||
endef
|
||||
|
||||
|
|
|
@ -17,22 +17,15 @@ import
|
|||
logScope: topics = "beacnde"
|
||||
|
||||
func shouldSyncOptimistically*(node: BeaconNode, wallSlot: Slot): bool =
|
||||
# Check whether light client is used for syncing
|
||||
if node.eth1Monitor == nil:
|
||||
return false
|
||||
let optimisticHeader = node.lightClient.optimisticHeader.valueOr:
|
||||
return false
|
||||
|
||||
# Check whether light client is sufficiently ahead of DAG
|
||||
const minProgress = 8 * SLOTS_PER_EPOCH # Set arbitrarily
|
||||
let dagSlot = getStateField(node.dag.headState, slot)
|
||||
if dagSlot + minProgress > optimisticHeader.slot:
|
||||
return false
|
||||
|
||||
# Check whether light client has synced sufficiently close to wall slot
|
||||
const maxAge = 2 * SLOTS_PER_EPOCH
|
||||
if optimisticHeader.slot < max(wallSlot, maxAge.Slot) - maxAge:
|
||||
return false
|
||||
|
||||
true
|
||||
shouldSyncOptimistically(
|
||||
optimisticSlot = optimisticHeader.slot,
|
||||
dagSlot = getStateField(node.dag.headState, slot),
|
||||
wallSlot = wallSlot)
|
||||
|
||||
proc initLightClient*(
|
||||
node: BeaconNode,
|
||||
|
@ -43,18 +36,44 @@ proc initLightClient*(
|
|||
genesis_validators_root: Eth2Digest) =
|
||||
template config(): auto = node.config
|
||||
|
||||
# Creating a light client is not dependent on `lightClientEnable`
|
||||
# Creating a light client is not dependent on `syncLightClient`
|
||||
# because the light client module also handles gossip subscriptions
|
||||
# for broadcasting light client data as a server.
|
||||
|
||||
let
|
||||
optimisticHandler = proc(signedBlock: ForkedMsgTrustedSignedBeaconBlock):
|
||||
Future[void] {.async.} =
|
||||
debug "New LC optimistic block",
|
||||
info "New LC optimistic block",
|
||||
opt = signedBlock.toBlockId(),
|
||||
dag = node.dag.head.bid,
|
||||
wallSlot = node.currentSlot
|
||||
return
|
||||
withBlck(signedBlock):
|
||||
when stateFork >= BeaconStateFork.Bellatrix:
|
||||
if blck.message.is_execution_block:
|
||||
template payload(): auto = blck.message.body.execution_payload
|
||||
|
||||
let eth1Monitor = node.eth1Monitor
|
||||
if eth1Monitor != nil and not payload.block_hash.isZero:
|
||||
# engine_newPayloadV1
|
||||
discard await eth1Monitor.newExecutionPayload(payload)
|
||||
|
||||
# Retain optimistic head for other `forkchoiceUpdated` callers.
|
||||
# May temporarily block `forkchoiceUpdatedV1` calls, e.g., Geth:
|
||||
# - Refuses `newPayload`: "Ignoring payload while snap syncing"
|
||||
# - Refuses `fcU`: "Forkchoice requested unknown head"
|
||||
# Once DAG sync catches up or as new optimistic heads are fetched
|
||||
# the situation recovers
|
||||
node.consensusManager[].setOptimisticHead(
|
||||
blck.toBlockId(), payload.block_hash)
|
||||
|
||||
# engine_forkchoiceUpdatedV1
|
||||
let beaconHead = node.attestationPool[].getBeaconHead(nil)
|
||||
discard await eth1Monitor.runForkchoiceUpdated(
|
||||
headBlockRoot = payload.block_hash,
|
||||
safeBlockRoot = beaconHead.safeExecutionPayloadHash,
|
||||
finalizedBlockRoot = beaconHead.finalizedExecutionPayloadHash)
|
||||
else: discard
|
||||
|
||||
optimisticProcessor = initOptimisticProcessor(
|
||||
getBeaconTime, optimisticHandler)
|
||||
|
||||
|
@ -62,7 +81,7 @@ proc initLightClient*(
|
|||
node.network, rng, config, cfg, forkDigests, getBeaconTime,
|
||||
genesis_validators_root, LightClientFinalizationMode.Strict)
|
||||
|
||||
if config.lightClientEnable:
|
||||
if config.syncLightClient:
|
||||
proc onFinalizedHeader(
|
||||
lightClient: LightClient, finalizedHeader: BeaconBlockHeader) =
|
||||
optimisticProcessor.setFinalizedHeader(finalizedHeader)
|
||||
|
@ -73,18 +92,18 @@ proc initLightClient*(
|
|||
|
||||
lightClient.onFinalizedHeader = onFinalizedHeader
|
||||
lightClient.onOptimisticHeader = onOptimisticHeader
|
||||
lightClient.trustedBlockRoot = config.lightClientTrustedBlockRoot
|
||||
lightClient.trustedBlockRoot = config.trustedBlockRoot
|
||||
|
||||
elif config.lightClientTrustedBlockRoot.isSome:
|
||||
warn "Ignoring `lightClientTrustedBlockRoot`, light client not enabled",
|
||||
lightClientEnable = config.lightClientEnable,
|
||||
lightClientTrustedBlockRoot = config.lightClientTrustedBlockRoot
|
||||
elif config.trustedBlockRoot.isSome:
|
||||
warn "Ignoring `trustedBlockRoot`, light client not enabled",
|
||||
syncLightClient = config.syncLightClient,
|
||||
trustedBlockRoot = config.trustedBlockRoot
|
||||
|
||||
node.optimisticProcessor = optimisticProcessor
|
||||
node.lightClient = lightClient
|
||||
|
||||
proc startLightClient*(node: BeaconNode) =
|
||||
if not node.config.lightClientEnable:
|
||||
if not node.config.syncLightClient:
|
||||
return
|
||||
|
||||
node.lightClient.start()
|
||||
|
@ -94,7 +113,7 @@ proc installLightClientMessageValidators*(node: BeaconNode) =
|
|||
if node.config.lightClientDataServe:
|
||||
# Process gossip using both full node and light client
|
||||
node.processor
|
||||
elif node.config.lightClientEnable:
|
||||
elif node.config.syncLightClient:
|
||||
# Only process gossip using light client
|
||||
nil
|
||||
else:
|
||||
|
@ -116,9 +135,9 @@ proc updateLightClientGossipStatus*(
|
|||
node.lightClient.updateGossipStatus(slot, some isBehind)
|
||||
|
||||
proc updateLightClientFromDag*(node: BeaconNode) =
|
||||
if not node.config.lightClientEnable:
|
||||
if not node.config.syncLightClient:
|
||||
return
|
||||
if node.config.lightClientTrustedBlockRoot.isSome:
|
||||
if node.config.trustedBlockRoot.isSome:
|
||||
return
|
||||
|
||||
let
|
||||
|
|
|
@ -284,16 +284,16 @@ type
|
|||
desc: "Weak subjectivity checkpoint in the format block_root:epoch_number"
|
||||
name: "weak-subjectivity-checkpoint" .}: Option[Checkpoint]
|
||||
|
||||
lightClientEnable* {.
|
||||
syncLightClient* {.
|
||||
hidden
|
||||
desc: "BETA: Accelerate sync using light client."
|
||||
desc: "Accelerate sync using light client"
|
||||
defaultValue: false
|
||||
name: "light-client" .}: bool
|
||||
name: "sync-light-client" .}: bool
|
||||
|
||||
lightClientTrustedBlockRoot* {.
|
||||
trustedBlockRoot* {.
|
||||
hidden
|
||||
desc: "BETA: Recent trusted finalized block root to initialize light client from."
|
||||
name: "light-client-trusted-block-root" .}: Option[Eth2Digest]
|
||||
desc: "Recent trusted finalized block root to initialize light client from"
|
||||
name: "trusted-block-root" .}: Option[Eth2Digest]
|
||||
|
||||
finalizedCheckpointState* {.
|
||||
desc: "SSZ file specifying a recent finalized state"
|
||||
|
|
|
@ -740,7 +740,7 @@ type BeaconHead* = object
|
|||
safeExecutionPayloadHash*, finalizedExecutionPayloadHash*: Eth2Digest
|
||||
|
||||
proc getBeaconHead*(
|
||||
pool: var AttestationPool, headBlock: BlockRef): BeaconHead =
|
||||
pool: AttestationPool, headBlock: BlockRef): BeaconHead =
|
||||
let
|
||||
finalizedExecutionPayloadHash =
|
||||
pool.dag.loadExecutionBlockRoot(pool.dag.finalizedHead.blck)
|
||||
|
|
|
@ -56,6 +56,7 @@ type
|
|||
# Tracking last proposal forkchoiceUpdated payload information
|
||||
# ----------------------------------------------------------------
|
||||
forkchoiceUpdatedInfo*: Opt[ForkchoiceUpdatedInformation]
|
||||
optimisticHead: tuple[bid: BlockId, execution_block_hash: Eth2Digest]
|
||||
|
||||
# Initialization
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -117,6 +118,45 @@ from web3/engine_api_types import
|
|||
|
||||
func `$`(h: BlockHash): string = $h.asEth2Digest
|
||||
|
||||
func shouldSyncOptimistically*(
|
||||
optimisticSlot, dagSlot, wallSlot: Slot): bool =
|
||||
## Determine whether an optimistic execution block hash should be reported
|
||||
## to the EL client instead of the current head as determined by fork choice.
|
||||
|
||||
# Check whether optimistic head is sufficiently ahead of DAG
|
||||
const minProgress = 8 * SLOTS_PER_EPOCH # Set arbitrarily
|
||||
if optimisticSlot < dagSlot or optimisticSlot - dagSlot < minProgress:
|
||||
return false
|
||||
|
||||
# Check whether optimistic head has synced sufficiently close to wall slot
|
||||
const maxAge = 2 * SLOTS_PER_EPOCH # Set arbitrarily
|
||||
if optimisticSlot < max(wallSlot, maxAge.Slot) - maxAge:
|
||||
return false
|
||||
|
||||
true
|
||||
|
||||
func shouldSyncOptimistically*(self: ConsensusManager, wallSlot: Slot): bool =
|
||||
if self.eth1Monitor == nil:
|
||||
return false
|
||||
if self.optimisticHead.execution_block_hash.isZero:
|
||||
return false
|
||||
|
||||
shouldSyncOptimistically(
|
||||
optimisticSlot = self.optimisticHead.bid.slot,
|
||||
dagSlot = getStateField(self.dag.headState, slot),
|
||||
wallSlot = wallSlot)
|
||||
|
||||
func optimisticHead*(self: ConsensusManager): BlockId =
|
||||
self.optimisticHead.bid
|
||||
|
||||
func optimisticExecutionPayloadHash*(self: ConsensusManager): Eth2Digest =
|
||||
self.optimisticHead.execution_block_hash
|
||||
|
||||
func setOptimisticHead*(
|
||||
self: var ConsensusManager,
|
||||
bid: BlockId, execution_block_hash: Eth2Digest) =
|
||||
self.optimisticHead = (bid: bid, execution_block_hash: execution_block_hash)
|
||||
|
||||
proc runForkchoiceUpdated*(
|
||||
eth1Monitor: Eth1Monitor,
|
||||
headBlockRoot, safeBlockRoot, finalizedBlockRoot: Eth2Digest):
|
||||
|
@ -158,6 +198,12 @@ proc runForkchoiceUpdated*(
|
|||
err = err.msg
|
||||
return PayloadExecutionStatus.syncing
|
||||
|
||||
proc runForkchoiceUpdatedDiscardResult*(
|
||||
eth1Monitor: Eth1Monitor,
|
||||
headBlockRoot, safeBlockRoot, finalizedBlockRoot: Eth2Digest) {.async.} =
|
||||
discard await eth1Monitor.runForkchoiceUpdated(
|
||||
headBlockRoot, safeBlockRoot, finalizedBlockRoot)
|
||||
|
||||
proc updateExecutionClientHead(self: ref ConsensusManager, newHead: BeaconHead)
|
||||
{.async.} =
|
||||
if self.eth1Monitor.isNil:
|
||||
|
|
|
@ -422,7 +422,7 @@ proc get_head*(self: var ForkChoice,
|
|||
self.checkpoints.proposer_boost_root)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.3/fork_choice/safe-block.md#get_safe_beacon_block_root
|
||||
func get_safe_beacon_block_root*(self: var ForkChoice): Eth2Digest =
|
||||
func get_safe_beacon_block_root*(self: ForkChoice): Eth2Digest =
|
||||
# Use most recent justified block as a stopgap
|
||||
self.checkpoints.justified.checkpoint.root
|
||||
|
||||
|
|
|
@ -17,8 +17,9 @@ import
|
|||
../sszdump
|
||||
|
||||
from ../consensus_object_pools/consensus_manager import
|
||||
ConsensusManager, runForkchoiceUpdated, runProposalForkchoiceUpdated,
|
||||
updateHead, updateHeadWithExecution
|
||||
ConsensusManager, optimisticExecutionPayloadHash, runForkchoiceUpdated,
|
||||
runForkchoiceUpdatedDiscardResult, runProposalForkchoiceUpdated,
|
||||
shouldSyncOptimistically, updateHead, updateHeadWithExecution
|
||||
from ../beacon_clock import GetBeaconTimeFn, toFloatSeconds
|
||||
from ../consensus_object_pools/block_dag import BlockRef, root, slot
|
||||
from ../consensus_object_pools/block_pools_types import BlockError, EpochRef
|
||||
|
@ -294,25 +295,48 @@ proc storeBlock*(
|
|||
wallSlot.start_beacon_time)
|
||||
|
||||
if newHead.isOk:
|
||||
let headExecutionPayloadHash =
|
||||
self.consensusManager.dag.loadExecutionBlockRoot(newHead.get.blck)
|
||||
if headExecutionPayloadHash.isZero:
|
||||
# Blocks without execution payloads can't be optimistic.
|
||||
self.consensusManager[].updateHead(newHead.get.blck)
|
||||
elif not self.consensusManager.dag.is_optimistic newHead.get.blck.root:
|
||||
# Not `NOT_VALID`; either `VALID` or `INVALIDATED`, but latter wouldn't
|
||||
# be selected as head, so `VALID`. `forkchoiceUpdated` necessary for EL
|
||||
# client only.
|
||||
self.consensusManager[].updateHead(newHead.get.blck)
|
||||
asyncSpawn self.consensusManager.eth1Monitor.expectValidForkchoiceUpdated(
|
||||
headExecutionPayloadHash,
|
||||
newHead.get.safeExecutionPayloadHash,
|
||||
newHead.get.finalizedExecutionPayloadHash)
|
||||
template eth1Monitor(): auto = self.consensusManager.eth1Monitor
|
||||
if self.consensusManager[].shouldSyncOptimistically(wallSlot):
|
||||
# Optimistic head is far in the future; report it as head block to EL.
|
||||
|
||||
# TODO remove redundant fcU in case of proposal
|
||||
asyncSpawn self.consensusManager.runProposalForkchoiceUpdated()
|
||||
# Note that the specification allows an EL client to skip fcU processing
|
||||
# if an update to an ancestor is requested.
|
||||
# > Client software MAY skip an update of the forkchoice state and MUST
|
||||
# NOT begin a payload build process if `forkchoiceState.headBlockHash`
|
||||
# references an ancestor of the head of canonical chain.
|
||||
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.1/src/engine/specification.md#engine_forkchoiceupdatedv1
|
||||
#
|
||||
# However, in practice, an EL client may not have completed importing all
|
||||
# block headers, so may be unaware of a block's ancestor status.
|
||||
# Therefore, hopping back and forth between the optimistic head and the
|
||||
# chain DAG head does not work well in practice, e.g., Geth:
|
||||
# - "Beacon chain gapped" from DAG head to optimistic head,
|
||||
# - followed by "Beacon chain reorged" from optimistic head back to DAG.
|
||||
self.consensusManager[].updateHead(newHead.get.blck)
|
||||
asyncSpawn eth1Monitor.runForkchoiceUpdatedDiscardResult(
|
||||
headBlockRoot = self.consensusManager[].optimisticExecutionPayloadHash,
|
||||
safeBlockRoot = newHead.get.safeExecutionPayloadHash,
|
||||
finalizedBlockRoot = newHead.get.finalizedExecutionPayloadHash)
|
||||
else:
|
||||
asyncSpawn self.consensusManager.updateHeadWithExecution(newHead.get)
|
||||
let headExecutionPayloadHash =
|
||||
self.consensusManager.dag.loadExecutionBlockRoot(newHead.get.blck)
|
||||
if headExecutionPayloadHash.isZero:
|
||||
# Blocks without execution payloads can't be optimistic.
|
||||
self.consensusManager[].updateHead(newHead.get.blck)
|
||||
elif not self.consensusManager.dag.is_optimistic newHead.get.blck.root:
|
||||
# Not `NOT_VALID`; either `VALID` or `INVALIDATED`, but latter wouldn't
|
||||
# be selected as head, so `VALID`. `forkchoiceUpdated` necessary for EL
|
||||
# client only.
|
||||
self.consensusManager[].updateHead(newHead.get.blck)
|
||||
asyncSpawn eth1Monitor.expectValidForkchoiceUpdated(
|
||||
headBlockRoot = headExecutionPayloadHash,
|
||||
safeBlockRoot = newHead.get.safeExecutionPayloadHash,
|
||||
finalizedBlockRoot = newHead.get.finalizedExecutionPayloadHash)
|
||||
|
||||
# TODO remove redundant fcU in case of proposal
|
||||
asyncSpawn self.consensusManager.runProposalForkchoiceUpdated()
|
||||
else:
|
||||
asyncSpawn self.consensusManager.updateHeadWithExecution(newHead.get)
|
||||
else:
|
||||
warn "Head selection failed, using previous head",
|
||||
head = shortLog(self.consensusManager.dag.head), wallSlot
|
||||
|
|
|
@ -1285,13 +1285,21 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
|
|||
# above, this will be done just before the next slot starts
|
||||
await node.updateGossipStatus(slot + 1)
|
||||
|
||||
func syncStatus(node: BeaconNode): string =
|
||||
func syncStatus(node: BeaconNode, wallSlot: Slot): string =
|
||||
let optimistic_head = node.dag.is_optimistic(node.dag.head.root)
|
||||
if node.syncManager.inProgress:
|
||||
if optimistic_head:
|
||||
node.syncManager.syncStatus & "/opt"
|
||||
else:
|
||||
node.syncManager.syncStatus
|
||||
let
|
||||
optimisticSuffix =
|
||||
if optimistic_head:
|
||||
"/opt"
|
||||
else:
|
||||
""
|
||||
lightClientSuffix =
|
||||
if node.consensusManager[].shouldSyncOptimistically(wallSlot):
|
||||
" - lc: " & $shortLog(node.consensusManager[].optimisticHead)
|
||||
else:
|
||||
""
|
||||
node.syncManager.syncStatus & optimisticSuffix & lightClientSuffix
|
||||
elif node.backfiller.inProgress:
|
||||
"backfill: " & node.backfiller.syncStatus
|
||||
elif optimistic_head:
|
||||
|
@ -1318,7 +1326,7 @@ proc onSlotStart(node: BeaconNode, wallTime: BeaconTime,
|
|||
info "Slot start",
|
||||
slot = shortLog(wallSlot),
|
||||
epoch = shortLog(wallSlot.epoch),
|
||||
sync = node.syncStatus(),
|
||||
sync = node.syncStatus(wallSlot),
|
||||
peers = len(node.network.peerPool),
|
||||
head = shortLog(node.dag.head),
|
||||
finalized = shortLog(getStateField(
|
||||
|
@ -1751,7 +1759,7 @@ when not defined(windows):
|
|||
formatGwei(node.attachedValidatorBalanceTotal)
|
||||
|
||||
of "sync_status":
|
||||
node.syncStatus()
|
||||
node.syncStatus(node.currentSlot)
|
||||
else:
|
||||
# We ignore typos for now and just render the expression
|
||||
# as it was written. TODO: come up with a good way to show
|
||||
|
|
|
@ -81,7 +81,7 @@ programMain:
|
|||
if blck.message.is_execution_block:
|
||||
template payload(): auto = blck.message.body.execution_payload
|
||||
|
||||
if eth1Monitor != nil:
|
||||
if eth1Monitor != nil and not payload.block_hash.isZero:
|
||||
await eth1Monitor.ensureDataProvider()
|
||||
|
||||
# engine_newPayloadV1
|
||||
|
@ -93,7 +93,6 @@ programMain:
|
|||
safeBlockRoot = payload.block_hash, # stub value
|
||||
finalizedBlockRoot = ZERO_HASH)
|
||||
else: discard
|
||||
return
|
||||
optimisticProcessor = initOptimisticProcessor(
|
||||
getBeaconTime, optimisticHandler)
|
||||
|
||||
|
@ -139,7 +138,8 @@ programMain:
|
|||
lightClient.trustedBlockRoot = some config.trustedBlockRoot
|
||||
|
||||
# Full blocks gossip is required to portably drive an EL client:
|
||||
# - EL clients may not sync when only driven with `forkChoiceUpdated`
|
||||
# - EL clients may not sync when only driven with `forkChoiceUpdated`,
|
||||
# e.g., Geth: "Forkchoice requested unknown head"
|
||||
# - `newPayload` requires the full `ExecutionPayload` (most of block content)
|
||||
# - `ExecutionPayload` block root is not available in `BeaconBlockHeader`,
|
||||
# so won't be exchanged via light client gossip
|
||||
|
|
|
@ -35,6 +35,8 @@ The following options are available:
|
|||
--secrets-dir A directory containing validator keystore passwords.
|
||||
--wallets-dir A directory containing wallet files.
|
||||
--web3-url One or more execution layer Web3 provider URLs.
|
||||
--require-engine-api-in-bellatrix Require Nimbus to be configured with an Engine API end-point after the Bellatrix
|
||||
fork epoch [=true].
|
||||
--non-interactive Do not display interative prompts. Quit on missing configuration.
|
||||
--netkey-file Source of network (secp256k1) private key file (random|<path>) [=random].
|
||||
--insecure-netkey-password Use pre-generated INSECURE password for network private key file [=false].
|
||||
|
|
|
@ -986,7 +986,7 @@ for NUM_NODE in $(seq 0 $(( NUM_NODES - 1 ))); do
|
|||
--keymanager-token-file="${DATA_DIR}/keymanager-token" \
|
||||
--rest-port="$(( BASE_REST_PORT + NUM_NODE ))" \
|
||||
--metrics-port="$(( BASE_METRICS_PORT + NUM_NODE ))" \
|
||||
--light-client=on \
|
||||
--sync-light-client=on \
|
||||
${EXTRA_ARGS} \
|
||||
&> "${DATA_DIR}/log${NUM_NODE}.txt" &
|
||||
|
||||
|
|
Loading…
Reference in New Issue