nimbus-eth2/beacon_chain/beacon_node_light_client.nim

186 lines
7.1 KiB
Nim
Raw Normal View History

# beacon_chain
# Copyright (c) 2022-2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import
Support for driving multiple EL nodes from a single Nimbus BN (#4465) * Support for driving multiple EL nodes from a single Nimbus BN Full list of changes: * Eth1Monitor has been renamed to ELManager to match its current responsibilities better. * The ELManager is no longer optional in the code (it won't have a nil value under any circumstances). * The support for subscribing for headers was removed as it only worked with WebSockets and contributed significant complexity while bringing only a very minor advantage. * The `--web3-url` parameter has been deprecated in favor of a new `--el` parameter. The new parameter has a reasonable default value and supports specifying a different JWT for each connection. Each connection can also be configured with a different set of responsibilities (e.g. download deposits, validate blocks and/or produce blocks). On the command-line, these properties can be configured through URL properties stored in the #anchor part of the URL. In TOML files, they come with a very natural syntax (althrough the URL scheme is also supported). * The previously scattered EL-related state and logic is now moved to `eth1_monitor.nim` (this module will be renamed to `el_manager.nim` in a follow-up commit). State is assigned properly either to the `ELManager` or the to individual `ELConnection` objects where appropriate. The ELManager executes all Engine API requests against all attached EL nodes, in parallel. It compares their results and if there is a disagreement regarding the validity of a certain payload, this is detected and the beacon node is protected from publishing a block with a potential execution layer consensus bug in it. The BN provides metrics per EL node for the number of successful or failed requests for each type Engine API requests. If an EL node goes offline and connectivity is resoted later, we report the problem and the remedy in edge-triggered fashion. * More progress towards implementing Deneb block production in the VC and comparing the value of blocks produced by the EL and the builder API. * Adds a Makefile target for the zhejiang testnet
2023-03-05 01:40:21 +00:00
chronicles, web3/engine_api_types,
./beacon_node
logScope: topics = "beacnde"
func shouldSyncOptimistically*(node: BeaconNode, wallSlot: Slot): bool =
let optimisticHeader = node.lightClient.optimisticHeader
withForkyHeader(optimisticHeader):
when lcDataFork > LightClientDataFork.None:
shouldSyncOptimistically(
optimisticSlot = forkyHeader.beacon.slot,
dagSlot = getStateField(node.dag.headState, slot),
wallSlot = wallSlot)
else:
false
proc initLightClient*(
node: BeaconNode,
2022-06-21 08:29:16 +00:00
rng: ref HmacDrbgContext,
cfg: RuntimeConfig,
forkDigests: ref ForkDigests,
getBeaconTime: GetBeaconTimeFn,
genesis_validators_root: Eth2Digest) =
template config(): auto = node.config
# Creating a light client is not dependent on `syncLightClient`
# because the light client module also handles gossip subscriptions
# for broadcasting light client data as a server.
let
optimisticHandler = proc(signedBlock: ForkedMsgTrustedSignedBeaconBlock):
Support for driving multiple EL nodes from a single Nimbus BN (#4465) * Support for driving multiple EL nodes from a single Nimbus BN Full list of changes: * Eth1Monitor has been renamed to ELManager to match its current responsibilities better. * The ELManager is no longer optional in the code (it won't have a nil value under any circumstances). * The support for subscribing for headers was removed as it only worked with WebSockets and contributed significant complexity while bringing only a very minor advantage. * The `--web3-url` parameter has been deprecated in favor of a new `--el` parameter. The new parameter has a reasonable default value and supports specifying a different JWT for each connection. Each connection can also be configured with a different set of responsibilities (e.g. download deposits, validate blocks and/or produce blocks). On the command-line, these properties can be configured through URL properties stored in the #anchor part of the URL. In TOML files, they come with a very natural syntax (althrough the URL scheme is also supported). * The previously scattered EL-related state and logic is now moved to `eth1_monitor.nim` (this module will be renamed to `el_manager.nim` in a follow-up commit). State is assigned properly either to the `ELManager` or the to individual `ELConnection` objects where appropriate. The ELManager executes all Engine API requests against all attached EL nodes, in parallel. It compares their results and if there is a disagreement regarding the validity of a certain payload, this is detected and the beacon node is protected from publishing a block with a potential execution layer consensus bug in it. The BN provides metrics per EL node for the number of successful or failed requests for each type Engine API requests. If an EL node goes offline and connectivity is resoted later, we report the problem and the remedy in edge-triggered fashion. * More progress towards implementing Deneb block production in the VC and comparing the value of blocks produced by the EL and the builder API. * Adds a Makefile target for the zhejiang testnet
2023-03-05 01:40:21 +00:00
Future[void] {.async.} =
debug "New LC optimistic block",
opt = signedBlock.toBlockId(),
dag = node.dag.head.bid,
wallSlot = node.currentSlot
withBlck(signedBlock):
when consensusFork >= ConsensusFork.Bellatrix:
if forkyBlck.message.is_execution_block:
template blckPayload(): auto =
forkyBlck.message.body.execution_payload
if not blckPayload.block_hash.isZero:
# engine_newPayloadV1
discard await node.elManager.newExecutionPayload(
forkyBlck.message)
# Retain optimistic head for other `forkchoiceUpdated` callers.
# May temporarily block `forkchoiceUpdatedV1` calls, e.g., Geth:
# - Refuses `newPayload`: "Ignoring payload while snap syncing"
# - Refuses `fcU`: "Forkchoice requested unknown head"
# Once DAG sync catches up or as new optimistic heads are fetched
# the situation recovers
node.consensusManager[].setOptimisticHead(
forkyBlck.toBlockId(), blckPayload.block_hash)
# engine_forkchoiceUpdatedV1 or engine_forkchoiceUpdatedV2,
# depending on pre or post-Shapella
let beaconHead = node.attestationPool[].getBeaconHead(nil)
template callForkchoiceUpdated(attributes: untyped) =
discard await node.elManager.forkchoiceUpdated(
headBlockHash = blckPayload.block_hash,
safeBlockHash = beaconHead.safeExecutionPayloadHash,
finalizedBlockHash = beaconHead.finalizedExecutionPayloadHash,
payloadAttributes = none attributes)
case node.dag.cfg.consensusForkAtEpoch(
forkyBlck.message.slot.epoch)
2023-08-15 23:00:35 +00:00
of ConsensusFork.Deneb:
callForkchoiceUpdated(PayloadAttributesV3)
of ConsensusFork.Capella:
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/shanghai.md#specification-1
# Consensus layer client MUST call this method instead of
# `engine_forkchoiceUpdatedV1` under any of the following
# conditions:
# `headBlockHash` references a block which `timestamp` is
# greater or equal to the Shanghai timestamp
callForkchoiceUpdated(PayloadAttributesV2)
of ConsensusFork.Bellatrix:
callForkchoiceUpdated(PayloadAttributesV1)
of ConsensusFork.Phase0, ConsensusFork.Altair:
discard
else: discard
optimisticProcessor = initOptimisticProcessor(
getBeaconTime, optimisticHandler)
lightClient = createLightClient(
node.network, rng, config, cfg, forkDigests, getBeaconTime,
genesis_validators_root, LightClientFinalizationMode.Strict)
if config.syncLightClient:
proc onOptimisticHeader(
lightClient: LightClient,
optimisticHeader: ForkedLightClientHeader) =
withForkyHeader(optimisticHeader):
when lcDataFork > LightClientDataFork.None:
optimisticProcessor.setOptimisticHeader(forkyHeader.beacon)
lightClient.onOptimisticHeader = onOptimisticHeader
lightClient.trustedBlockRoot = config.trustedBlockRoot
elif config.trustedBlockRoot.isSome:
warn "Ignoring `trustedBlockRoot`, light client not enabled",
syncLightClient = config.syncLightClient,
trustedBlockRoot = config.trustedBlockRoot
node.optimisticProcessor = optimisticProcessor
node.lightClient = lightClient
proc startLightClient*(node: BeaconNode) =
if not node.config.syncLightClient:
return
node.lightClient.start()
proc installLightClientMessageValidators*(node: BeaconNode) =
let eth2Processor =
if node.config.lightClientDataServe:
# Process gossip using both full node and light client
node.processor
elif node.config.syncLightClient:
# Only process gossip using light client
nil
else:
# Light client topics will never be subscribed to, no validators needed
return
node.lightClient.installMessageValidators(eth2Processor)
proc updateLightClientGossipStatus*(
node: BeaconNode, slot: Slot, dagIsBehind: bool) =
let isBehind =
if node.config.lightClientDataServe:
# Forward DAG's readiness to handle light client gossip
dagIsBehind
else:
# Full node is not interested in gossip
true
node.lightClient.updateGossipStatus(slot, some isBehind)
proc updateLightClientFromDag*(node: BeaconNode) =
if not node.config.syncLightClient:
return
if node.config.trustedBlockRoot.isSome:
return
let
dagHead = node.dag.finalizedHead
dagPeriod = dagHead.slot.sync_committee_period
if dagHead.slot < node.dag.cfg.ALTAIR_FORK_EPOCH.start_slot:
return
let lcHeader = node.lightClient.finalizedHeader
withForkyHeader(lcHeader):
when lcDataFork > LightClientDataFork.None:
if dagPeriod <= forkyHeader.beacon.slot.sync_committee_period:
return
let bdata = node.dag.getForkedBlock(dagHead.blck.bid).valueOr:
return
var header {.noinit.}: ForkedLightClientHeader
withBlck(bdata):
const lcDataFork = lcDataForkAtConsensusFork(consensusFork)
when lcDataFork > LightClientDataFork.None:
header = ForkedLightClientHeader(kind: lcDataFork)
header.forky(lcDataFork) = forkyBlck.toLightClientHeader(lcDataFork)
else: raiseAssert "Unreachable"
let current_sync_committee = block:
let tmpState = assignClone(node.dag.headState)
node.dag.currentSyncCommitteeForPeriod(tmpState[], dagPeriod).valueOr:
return
node.lightClient.resetToFinalizedHeader(header, current_sync_committee)