remove TTD monitoring (#4486)
This commit is contained in:
parent
9cceb1b4a0
commit
aea7a0c8b8
|
@ -94,6 +94,7 @@ type
|
||||||
dynamicFeeRecipientsStore*: ref DynamicFeeRecipientsStore
|
dynamicFeeRecipientsStore*: ref DynamicFeeRecipientsStore
|
||||||
externalBuilderRegistrations*:
|
externalBuilderRegistrations*:
|
||||||
Table[ValidatorPubKey, SignedValidatorRegistrationV1]
|
Table[ValidatorPubKey, SignedValidatorRegistrationV1]
|
||||||
|
mergeAtEpoch*: Epoch
|
||||||
|
|
||||||
const
|
const
|
||||||
MaxEmptySlotCount* = uint64(10*60) div SECONDS_PER_SLOT
|
MaxEmptySlotCount* = uint64(10*60) div SECONDS_PER_SLOT
|
||||||
|
|
|
@ -224,6 +224,12 @@ type
|
||||||
desc: "The slashing DB flavour to use"
|
desc: "The slashing DB flavour to use"
|
||||||
name: "slashing-db-kind" .}: SlashingDbKind
|
name: "slashing-db-kind" .}: SlashingDbKind
|
||||||
|
|
||||||
|
mergeAtEpoch* {.
|
||||||
|
hidden
|
||||||
|
desc: "Debugging argument not for external use; may be removed at any time"
|
||||||
|
defaultValue: FAR_FUTURE_EPOCH
|
||||||
|
name: "merge-at-epoch-debug-internal" .}: uint64
|
||||||
|
|
||||||
numThreads* {.
|
numThreads* {.
|
||||||
defaultValue: 0,
|
defaultValue: 0,
|
||||||
desc: "Number of worker threads (\"0\" = use as many threads as there are CPU cores available)"
|
desc: "Number of worker threads (\"0\" = use as many threads as there are CPU cores available)"
|
||||||
|
|
|
@ -131,14 +131,11 @@ type
|
||||||
eth1Progress: AsyncEvent
|
eth1Progress: AsyncEvent
|
||||||
|
|
||||||
exchangedConfiguration*: bool
|
exchangedConfiguration*: bool
|
||||||
terminalBlockHash*: Option[BlockHash]
|
|
||||||
|
|
||||||
runFut: Future[void]
|
runFut: Future[void]
|
||||||
stopFut: Future[void]
|
stopFut: Future[void]
|
||||||
getBeaconTime: GetBeaconTimeFn
|
getBeaconTime: GetBeaconTimeFn
|
||||||
|
|
||||||
ttdReachedField: bool
|
|
||||||
|
|
||||||
Web3DataProvider* = object
|
Web3DataProvider* = object
|
||||||
url: string
|
url: string
|
||||||
web3: Web3
|
web3: Web3
|
||||||
|
@ -188,9 +185,6 @@ declareGauge eth1_finalized_deposits,
|
||||||
declareGauge eth1_chain_len,
|
declareGauge eth1_chain_len,
|
||||||
"The length of the in-memory chain of Eth1 blocks"
|
"The length of the in-memory chain of Eth1 blocks"
|
||||||
|
|
||||||
func ttdReached*(m: Eth1Monitor): bool =
|
|
||||||
m.ttdReachedField
|
|
||||||
|
|
||||||
template cfg(m: Eth1Monitor): auto =
|
template cfg(m: Eth1Monitor): auto =
|
||||||
m.depositsChain.cfg
|
m.depositsChain.cfg
|
||||||
|
|
||||||
|
@ -1125,8 +1119,7 @@ proc init*(T: type Eth1Monitor,
|
||||||
web3Urls: seq[string],
|
web3Urls: seq[string],
|
||||||
eth1Network: Option[Eth1Network],
|
eth1Network: Option[Eth1Network],
|
||||||
forcePolling: bool,
|
forcePolling: bool,
|
||||||
jwtSecret: Option[seq[byte]],
|
jwtSecret: Option[seq[byte]]): T =
|
||||||
ttdReached: bool): T =
|
|
||||||
doAssert web3Urls.len > 0
|
doAssert web3Urls.len > 0
|
||||||
var web3Urls = web3Urls
|
var web3Urls = web3Urls
|
||||||
for url in mitems(web3Urls):
|
for url in mitems(web3Urls):
|
||||||
|
@ -1147,8 +1140,7 @@ proc init*(T: type Eth1Monitor,
|
||||||
eth1Progress: newAsyncEvent(),
|
eth1Progress: newAsyncEvent(),
|
||||||
forcePolling: forcePolling,
|
forcePolling: forcePolling,
|
||||||
jwtSecret: jwtSecret,
|
jwtSecret: jwtSecret,
|
||||||
blocksPerLogsRequest: targetBlocksPerLogsRequest,
|
blocksPerLogsRequest: targetBlocksPerLogsRequest)
|
||||||
ttdReachedField: ttdReached)
|
|
||||||
|
|
||||||
proc safeCancel(fut: var Future[void]) =
|
proc safeCancel(fut: var Future[void]) =
|
||||||
if not fut.isNil and not fut.finished:
|
if not fut.isNil and not fut.finished:
|
||||||
|
@ -1346,51 +1338,6 @@ func init(T: type FullBlockId, blk: Eth1BlockHeader|BlockObject): T =
|
||||||
func isNewLastBlock(m: Eth1Monitor, blk: Eth1BlockHeader|BlockObject): bool =
|
func isNewLastBlock(m: Eth1Monitor, blk: Eth1BlockHeader|BlockObject): bool =
|
||||||
m.latestEth1Block.isNone or blk.number.uint64 > m.latestEth1BlockNumber
|
m.latestEth1Block.isNone or blk.number.uint64 > m.latestEth1BlockNumber
|
||||||
|
|
||||||
proc findTerminalBlock(provider: Web3DataProviderRef,
|
|
||||||
ttd: Uint256): Future[BlockObject] {.async.} =
|
|
||||||
## Find the first execution block with a difficulty higher than the
|
|
||||||
## specified `ttd`.
|
|
||||||
var
|
|
||||||
cache = initTable[uint64, BlockObject]()
|
|
||||||
step = -0x4000'i64
|
|
||||||
|
|
||||||
proc next(x: BlockObject): Future[BlockObject] {.async.} =
|
|
||||||
## Returns the next block that's `step` steps away.
|
|
||||||
let key = uint64(max(int64(x.number) + step, 1))
|
|
||||||
# Check if present in cache.
|
|
||||||
if key in cache:
|
|
||||||
return cache[key]
|
|
||||||
# Not cached, fetch.
|
|
||||||
let value = awaitWithRetries provider.getBlockByNumber(key)
|
|
||||||
cache[key] = value
|
|
||||||
return value
|
|
||||||
|
|
||||||
# Block A follows, B leads.
|
|
||||||
var a = awaitWithRetries(
|
|
||||||
provider.web3.provider.eth_getBlockByNumber("latest", false))
|
|
||||||
|
|
||||||
if a.number.uint64 == 0 and a.totalDifficulty >= ttd:
|
|
||||||
return a
|
|
||||||
|
|
||||||
var b = await next(a)
|
|
||||||
|
|
||||||
while true:
|
|
||||||
let one = a.totalDifficulty >= ttd
|
|
||||||
let two = b.totalDifficulty >= ttd
|
|
||||||
if one != two:
|
|
||||||
step = step div -2i64
|
|
||||||
if step == 0:
|
|
||||||
# Since we can't know in advance from which side the block is
|
|
||||||
# approached, one last check is needed to determine the proper
|
|
||||||
# terminal block.
|
|
||||||
if one: return a
|
|
||||||
else : return b
|
|
||||||
a = b
|
|
||||||
b = await next(b)
|
|
||||||
|
|
||||||
# This is unreachable.
|
|
||||||
doAssert(false)
|
|
||||||
|
|
||||||
proc startEth1Syncing(m: Eth1Monitor, delayBeforeStart: Duration) {.async.} =
|
proc startEth1Syncing(m: Eth1Monitor, delayBeforeStart: Duration) {.async.} =
|
||||||
if m.state == Started:
|
if m.state == Started:
|
||||||
return
|
return
|
||||||
|
@ -1521,12 +1468,6 @@ proc startEth1Syncing(m: Eth1Monitor, delayBeforeStart: Duration) {.async.} =
|
||||||
|
|
||||||
debug "Starting Eth1 syncing", `from` = shortLog(m.depositsChain.blocks[^1])
|
debug "Starting Eth1 syncing", `from` = shortLog(m.depositsChain.blocks[^1])
|
||||||
|
|
||||||
let shouldCheckForMergeTransition = block:
|
|
||||||
const FAR_FUTURE_TOTAL_DIFFICULTY =
|
|
||||||
u256"115792089237316195423570985008687907853269984665640564039457584007913129638912"
|
|
||||||
(not m.ttdReachedField) and
|
|
||||||
(m.cfg.TERMINAL_TOTAL_DIFFICULTY != FAR_FUTURE_TOTAL_DIFFICULTY)
|
|
||||||
|
|
||||||
var didPollOnce = false
|
var didPollOnce = false
|
||||||
while true:
|
while true:
|
||||||
if bnStatus == BeaconNodeStatus.Stopping:
|
if bnStatus == BeaconNodeStatus.Stopping:
|
||||||
|
@ -1566,23 +1507,6 @@ proc startEth1Syncing(m: Eth1Monitor, delayBeforeStart: Duration) {.async.} =
|
||||||
doAssert m.latestEth1Block.isSome
|
doAssert m.latestEth1Block.isSome
|
||||||
awaitWithRetries m.dataProvider.getBlockByHash(m.latestEth1Block.get.hash)
|
awaitWithRetries m.dataProvider.getBlockByHash(m.latestEth1Block.get.hash)
|
||||||
|
|
||||||
# TODO when a terminal block hash is configured in cfg.TERMINAL_BLOCK_HASH,
|
|
||||||
# we should try to fetch that block from the EL - this facility is not
|
|
||||||
# in use on any current network, but should be implemented for full
|
|
||||||
# compliance
|
|
||||||
if m.terminalBlockHash.isNone and shouldCheckForMergeTransition:
|
|
||||||
let terminalBlock = await findTerminalBlock(m.dataProvider, m.cfg.TERMINAL_TOTAL_DIFFICULTY)
|
|
||||||
m.terminalBlockHash = some(terminalBlock.hash)
|
|
||||||
m.ttdReachedField = true
|
|
||||||
|
|
||||||
debug "startEth1Syncing: found merge terminal block",
|
|
||||||
currentEpoch = m.currentEpoch,
|
|
||||||
BELLATRIX_FORK_EPOCH = m.cfg.BELLATRIX_FORK_EPOCH,
|
|
||||||
totalDifficulty = $nextBlock.totalDifficulty,
|
|
||||||
ttd = $m.cfg.TERMINAL_TOTAL_DIFFICULTY,
|
|
||||||
terminalBlockHash = m.terminalBlockHash,
|
|
||||||
candidateBlockNumber = distinctBase(terminalBlock.number)
|
|
||||||
|
|
||||||
if shouldProcessDeposits:
|
if shouldProcessDeposits:
|
||||||
if m.latestEth1BlockNumber <= m.cfg.ETH1_FOLLOW_DISTANCE:
|
if m.latestEth1BlockNumber <= m.cfg.ETH1_FOLLOW_DISTANCE:
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -324,12 +324,6 @@ proc getExecutionValidity(
|
||||||
blck: bellatrix.SignedBeaconBlock | capella.SignedBeaconBlock |
|
blck: bellatrix.SignedBeaconBlock | capella.SignedBeaconBlock |
|
||||||
eip4844.SignedBeaconBlock):
|
eip4844.SignedBeaconBlock):
|
||||||
Future[NewPayloadStatus] {.async.} =
|
Future[NewPayloadStatus] {.async.} =
|
||||||
# Eth1 syncing is asynchronous from this
|
|
||||||
# TODO self.consensusManager.eth1Monitor.ttdReached
|
|
||||||
# should gate this when it works more reliably
|
|
||||||
# TODO detect have-TTD-but-not-is_execution_block case, and where
|
|
||||||
# execution payload was non-zero when TTD detection more reliable
|
|
||||||
|
|
||||||
if not blck.message.is_execution_block:
|
if not blck.message.is_execution_block:
|
||||||
return NewPayloadStatus.valid # vacuously
|
return NewPayloadStatus.valid # vacuously
|
||||||
|
|
||||||
|
|
|
@ -571,8 +571,7 @@ proc init*(T: type BeaconNode,
|
||||||
config.web3Urls,
|
config.web3Urls,
|
||||||
eth1Network,
|
eth1Network,
|
||||||
config.web3ForcePolling,
|
config.web3ForcePolling,
|
||||||
optJwtSecret,
|
optJwtSecret)
|
||||||
ttdReached = not dag.loadExecutionBlockRoot(dag.finalizedHead.blck).isZero)
|
|
||||||
|
|
||||||
if config.rpcEnabled.isSome:
|
if config.rpcEnabled.isSome:
|
||||||
warn "Nimbus's JSON-RPC server has been removed. This includes the --rpc, --rpc-port, and --rpc-address configuration options. https://nimbus.guide/rest-api.html shows how to enable and configure the REST Beacon API server which replaces it."
|
warn "Nimbus's JSON-RPC server has been removed. This includes the --rpc, --rpc-port, and --rpc-address configuration options. https://nimbus.guide/rest-api.html shows how to enable and configure the REST Beacon API server which replaces it."
|
||||||
|
@ -674,7 +673,8 @@ proc init*(T: type BeaconNode,
|
||||||
# Delay first call by that time to allow for EL syncing to begin; it can
|
# Delay first call by that time to allow for EL syncing to begin; it can
|
||||||
# otherwise generate an EL warning by claiming a zero merge block.
|
# otherwise generate an EL warning by claiming a zero merge block.
|
||||||
Moment.now + chronos.seconds(60),
|
Moment.now + chronos.seconds(60),
|
||||||
dynamicFeeRecipientsStore: newClone(DynamicFeeRecipientsStore.init()))
|
dynamicFeeRecipientsStore: newClone(DynamicFeeRecipientsStore.init()),
|
||||||
|
mergeAtEpoch: config.mergeAtEpoch.Epoch)
|
||||||
|
|
||||||
node.initLightClient(
|
node.initLightClient(
|
||||||
rng, cfg, dag.forkDigests, getBeaconTime, dag.genesis_validators_root)
|
rng, cfg, dag.forkDigests, getBeaconTime, dag.genesis_validators_root)
|
||||||
|
|
|
@ -99,10 +99,7 @@ programMain:
|
||||||
config.web3Urls,
|
config.web3Urls,
|
||||||
metadata.eth1Network,
|
metadata.eth1Network,
|
||||||
forcePolling = false,
|
forcePolling = false,
|
||||||
rng[].loadJwtSecret(config, allowCreate = false),
|
rng[].loadJwtSecret(config, allowCreate = false))
|
||||||
# TTD is not relevant for the light client, so it's safe
|
|
||||||
# to assume that the TTD has been reached.
|
|
||||||
ttdReached = true)
|
|
||||||
waitFor res.ensureDataProvider()
|
waitFor res.ensureDataProvider()
|
||||||
res
|
res
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -387,10 +387,8 @@ proc getExecutionPayload[T](
|
||||||
latestHead =
|
latestHead =
|
||||||
if not executionBlockRoot.isZero:
|
if not executionBlockRoot.isZero:
|
||||||
executionBlockRoot
|
executionBlockRoot
|
||||||
elif node.eth1Monitor.terminalBlockHash.isSome:
|
|
||||||
node.eth1Monitor.terminalBlockHash.get.asEth2Digest
|
|
||||||
else:
|
else:
|
||||||
default(Eth2Digest)
|
(static(default(Eth2Digest)))
|
||||||
latestSafe = beaconHead.safeExecutionPayloadHash
|
latestSafe = beaconHead.safeExecutionPayloadHash
|
||||||
latestFinalized = beaconHead.finalizedExecutionPayloadHash
|
latestFinalized = beaconHead.finalizedExecutionPayloadHash
|
||||||
lastFcU = node.consensusManager.forkchoiceUpdatedInfo
|
lastFcU = node.consensusManager.forkchoiceUpdatedInfo
|
||||||
|
@ -472,10 +470,9 @@ proc makeBeaconBlockForHeadAndSlot*[EP](
|
||||||
let fut = newFuture[Opt[EP]]("given-payload")
|
let fut = newFuture[Opt[EP]]("given-payload")
|
||||||
fut.complete(execution_payload)
|
fut.complete(execution_payload)
|
||||||
fut
|
fut
|
||||||
elif slot.epoch < node.dag.cfg.BELLATRIX_FORK_EPOCH or
|
elif slot.epoch < node.dag.cfg.BELLATRIX_FORK_EPOCH or not (
|
||||||
not (
|
state[].is_merge_transition_complete or
|
||||||
is_merge_transition_complete(state[]) or
|
slot.epoch >= node.mergeAtEpoch):
|
||||||
((not node.eth1Monitor.isNil) and node.eth1Monitor.ttdReached)):
|
|
||||||
let fut = newFuture[Opt[EP]]("empty-payload")
|
let fut = newFuture[Opt[EP]]("empty-payload")
|
||||||
# https://github.com/nim-lang/Nim/issues/19802
|
# https://github.com/nim-lang/Nim/issues/19802
|
||||||
fut.complete(Opt.some(default(EP)))
|
fut.complete(Opt.some(default(EP)))
|
||||||
|
|
Loading…
Reference in New Issue