Remove web3-mode, always keep web3 monitor enabled when given (#3042)

* Init some components fully before BeaconNode, per component dependency
graph
* remove `--web3-mode` option
* fixes https://github.com/status-im/nimbus-eth2/issues/2685
* reshuffle some beacon node init code
This commit is contained in:
Jacek Sieka 2021-11-01 15:50:24 +01:00 committed by GitHub
parent 74f2350a2c
commit d6cd1cd46c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 81 additions and 106 deletions

View File

@ -65,11 +65,6 @@ type
Web3Cmd* {.pure.} = enum Web3Cmd* {.pure.} = enum
test = "Test a web3 provider" test = "Test a web3 provider"
Web3Mode* {.pure.} = enum
auto # Enabled only when validators are attached
enabled # Always enabled
disabled # Always disabled
SlashingDbKind* {.pure.} = enum SlashingDbKind* {.pure.} = enum
v1 v1
v2 v2
@ -119,12 +114,6 @@ type
desc: "One or more Web3 provider URLs used for obtaining deposit contract data" desc: "One or more Web3 provider URLs used for obtaining deposit contract data"
name: "web3-url" }: seq[string] name: "web3-url" }: seq[string]
web3Mode* {.
hidden
defaultValue: Web3Mode.auto
desc: "URL of the Web3 server to observe Eth1"
name: "web3-mode" }: Web3Mode
nonInteractive* {. nonInteractive* {.
desc: "Do not display interative prompts. Quit on missing configuration" desc: "Do not display interative prompts. Quit on missing configuration"
name: "non-interactive" }: bool name: "non-interactive" }: bool

View File

@ -1151,7 +1151,7 @@ proc start(m: Eth1Monitor, delayBeforeStart: Duration) =
if runFut.failed: if runFut.failed:
if runFut.error[] of CatchableError: if runFut.error[] of CatchableError:
if runFut == m.runFut: if runFut == m.runFut:
error "Eth1 chain monitoring failure, restarting", err = runFut.error.msg warn "Eth1 chain monitoring failure, restarting", err = runFut.error.msg
m.state = Failed m.state = Failed
else: else:
fatal "Fatal exception reached", err = runFut.error.msg fatal "Fatal exception reached", err = runFut.error.msg

View File

@ -217,13 +217,13 @@ const
clientId* = "Nimbus beacon node " & fullVersionStr clientId* = "Nimbus beacon node " & fullVersionStr
nodeMetadataFilename = "node-metadata.json" nodeMetadataFilename = "node-metadata.json"
NewPeerScore* = 200 NewPeerScore = 200
## Score which will be assigned to new connected Peer ## Score which will be assigned to new connected Peer
PeerScoreLowLimit* = 0 PeerScoreLowLimit = 0
## Score after which peer will be kicked ## Score after which peer will be kicked
PeerScoreHighLimit* = 1000 PeerScoreHighLimit = 1000
## Max value of peer's score ## Max value of peer's score
PeerScoreInvalidRequest* = -500 PeerScoreInvalidRequest = -500
## This peer is sending malformed or nonsensical data ## This peer is sending malformed or nonsensical data
ConcurrentConnections = 20 ConcurrentConnections = 20
@ -1363,6 +1363,26 @@ proc new*(T: type Eth2Node, config: BeaconNodeConf, runtimeCfg: RuntimeConfig,
switch.addConnEventHandler(peerHook, ConnEventKind.Connected) switch.addConnEventHandler(peerHook, ConnEventKind.Connected)
switch.addConnEventHandler(peerHook, ConnEventKind.Disconnected) switch.addConnEventHandler(peerHook, ConnEventKind.Disconnected)
proc scoreCheck(peer: Peer): bool =
peer.score >= PeerScoreLowLimit
proc onDeletePeer(peer: Peer) =
if peer.connectionState notin {ConnectionState.Disconnecting,
ConnectionState.Disconnected}:
if peer.score < PeerScoreLowLimit:
debug "Peer was removed from PeerPool due to low score", peer = peer,
peer_score = peer.score, score_low_limit = PeerScoreLowLimit,
score_high_limit = PeerScoreHighLimit
asyncSpawn(peer.disconnect(PeerScoreLow))
else:
debug "Peer was removed from PeerPool", peer = peer,
peer_score = peer.score, score_low_limit = PeerScoreLowLimit,
score_high_limit = PeerScoreHighLimit
asyncSpawn(peer.disconnect(FaultOrError)) # Shouldn't actually happen!
node.peerPool.setScoreCheck(scoreCheck)
node.peerPool.setOnDeletePeer(onDeletePeer)
node node
template publicKey*(node: Eth2Node): keys.PublicKey = template publicKey*(node: Eth2Node): keys.PublicKey =

View File

@ -79,6 +79,9 @@ template init(T: type RestServerRef, ip: ValidIpAddress, port: Port): T =
reason = res.error() reason = res.error()
nil nil
else: else:
notice "Starting REST HTTP server",
url = "http://" & $ip & ":" & $port & "/"
res.get() res.get()
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics # https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics
@ -376,6 +379,15 @@ proc init*(T: type BeaconNode,
info "Loading slashing protection database (v2)", info "Loading slashing protection database (v2)",
path = config.validatorsDir() path = config.validatorsDir()
func getLocalHeadSlot(): Slot =
dag.head.slot
proc getLocalWallSlot(): Slot =
beaconClock.now.slotOrZero
func getFirstSlotAtFinalizedEpoch(): Slot =
dag.finalizedHead.slot
let let
slashingProtectionDB = slashingProtectionDB =
SlashingProtectionDB.init( SlashingProtectionDB.init(
@ -393,6 +405,9 @@ proc init*(T: type BeaconNode,
config.doppelgangerDetection, config.doppelgangerDetection,
blockProcessor, dag, attestationPool, exitPool, validatorPool, blockProcessor, dag, attestationPool, exitPool, validatorPool,
syncCommitteeMsgPool, quarantine, rng, getBeaconTime, taskpool) syncCommitteeMsgPool, quarantine, rng, getBeaconTime, taskpool)
syncManager = newSyncManager[Peer, PeerID](
network.peerPool, getLocalHeadSlot, getLocalWallSlot,
getFirstSlotAtFinalizedEpoch, blockProcessor, chunkSize = 32)
var node = BeaconNode( var node = BeaconNode(
nickname: nickname, nickname: nickname,
@ -402,22 +417,23 @@ proc init*(T: type BeaconNode,
netKeys: netKeys, netKeys: netKeys,
db: db, db: db,
config: config, config: config,
attachedValidators: validatorPool,
dag: dag, dag: dag,
gossipState: GossipState.Disconnected,
quarantine: quarantine, quarantine: quarantine,
attestationPool: attestationPool, attestationPool: attestationPool,
syncCommitteeMsgPool: syncCommitteeMsgPool, syncCommitteeMsgPool: syncCommitteeMsgPool,
attachedValidators: validatorPool,
exitPool: exitPool, exitPool: exitPool,
eth1Monitor: eth1Monitor, eth1Monitor: eth1Monitor,
rpcServer: rpcServer, rpcServer: rpcServer,
restServer: restServer, restServer: restServer,
eventBus: eventBus, eventBus: eventBus,
requestManager: RequestManager.init(network, blockProcessor),
syncManager: syncManager,
actionTracker: ActionTracker.init(rng, config.subscribeAllSubnets), actionTracker: ActionTracker.init(rng, config.subscribeAllSubnets),
processor: processor, processor: processor,
blockProcessor: blockProcessor, blockProcessor: blockProcessor,
consensusManager: consensusManager, consensusManager: consensusManager,
requestManager: RequestManager.init(network, blockProcessor), gossipState: GossipState.Disconnected,
beaconClock: beaconClock, beaconClock: beaconClock,
taskpool: taskpool, taskpool: taskpool,
onAttestationSent: onAttestationSent, onAttestationSent: onAttestationSent,
@ -938,47 +954,6 @@ proc runOnSecondLoop(node: BeaconNode) {.async.} =
ticks_delay.set(sleepTime.nanoseconds.float / nanosecondsIn1s) ticks_delay.set(sleepTime.nanoseconds.float / nanosecondsIn1s)
trace "onSecond task completed", sleepTime, processingTime trace "onSecond task completed", sleepTime, processingTime
proc startSyncManager(node: BeaconNode) =
func getLocalHeadSlot(): Slot =
node.dag.head.slot
proc getLocalWallSlot(): Slot =
node.beaconClock.now.slotOrZero
func getFirstSlotAtFinalizedEpoch(): Slot =
node.dag.finalizedHead.slot
proc scoreCheck(peer: Peer): bool =
if peer.score < PeerScoreLowLimit:
false
else:
true
proc onDeletePeer(peer: Peer) =
if peer.connectionState notin {ConnectionState.Disconnecting,
ConnectionState.Disconnected}:
if peer.score < PeerScoreLowLimit:
debug "Peer was removed from PeerPool due to low score", peer = peer,
peer_score = peer.score, score_low_limit = PeerScoreLowLimit,
score_high_limit = PeerScoreHighLimit
asyncSpawn(try: peer.disconnect(PeerScoreLow)
except Exception as exc: raiseAssert exc.msg) # Shouldn't actually happen!
else:
debug "Peer was removed from PeerPool", peer = peer,
peer_score = peer.score, score_low_limit = PeerScoreLowLimit,
score_high_limit = PeerScoreHighLimit
asyncSpawn(try: peer.disconnect(FaultOrError)
except Exception as exc: raiseAssert exc.msg) # Shouldn't actually happen!
node.network.peerPool.setScoreCheck(scoreCheck)
node.network.peerPool.setOnDeletePeer(onDeletePeer)
node.syncManager = newSyncManager[Peer, PeerID](
node.network.peerPool, getLocalHeadSlot, getLocalWallSlot,
getFirstSlotAtFinalizedEpoch, node.blockProcessor, chunkSize = 32
)
node.syncManager.start()
func connectedPeersCount(node: BeaconNode): int = func connectedPeersCount(node: BeaconNode): int =
len(node.network.peerPool) len(node.network.peerPool)
@ -1110,33 +1085,28 @@ proc stop*(node: BeaconNode) =
notice "Databases closed" notice "Databases closed"
proc run*(node: BeaconNode) {.raises: [Defect, CatchableError].} = proc run*(node: BeaconNode) {.raises: [Defect, CatchableError].} =
if bnStatus == BeaconNodeStatus.Starting: bnStatus = BeaconNodeStatus.Running
# it might have been set to "Stopping" with Ctrl+C
bnStatus = BeaconNodeStatus.Running
if not(isNil(node.rpcServer)): if not(isNil(node.rpcServer)):
node.rpcServer.installRpcHandlers(node) node.rpcServer.installRpcHandlers(node)
node.rpcServer.start() node.rpcServer.start()
if not(isNil(node.restServer)): if not(isNil(node.restServer)):
node.restServer.installRestHandlers(node) node.restServer.installRestHandlers(node)
node.restServer.start() node.restServer.start()
node.installMessageValidators() let
wallTime = node.beaconClock.now()
wallSlot = wallTime.slotOrZero()
let node.requestManager.start()
wallTime = node.beaconClock.now() node.syncManager.start()
wallSlot = wallTime.slotOrZero()
node.requestManager.start() waitFor node.updateGossipStatus(wallSlot)
node.startSyncManager()
waitFor node.updateGossipStatus(wallSlot)
asyncSpawn runSlotLoop(node, wallTime, onSlotStart)
asyncSpawn runOnSecondLoop(node)
asyncSpawn runQueueProcessingLoop(node.blockProcessor)
asyncSpawn runSlotLoop(node, wallTime, onSlotStart)
asyncSpawn runOnSecondLoop(node)
asyncSpawn runQueueProcessingLoop(node.blockProcessor)
## Ctrl+C handling ## Ctrl+C handling
proc controlCHandler() {.noconv.} = proc controlCHandler() {.noconv.} =
@ -1173,6 +1143,8 @@ proc createPidFile(filename: string) {.raises: [Defect, IOError].} =
addQuitProc proc {.noconv.} = discard io2.removeFile(gPidFile) addQuitProc proc {.noconv.} = discard io2.removeFile(gPidFile)
proc initializeNetworking(node: BeaconNode) {.async.} = proc initializeNetworking(node: BeaconNode) {.async.} =
node.installMessageValidators()
info "Listening to incoming network requests" info "Listening to incoming network requests"
await node.network.startListening() await node.network.startListening()
@ -1181,10 +1153,6 @@ proc initializeNetworking(node: BeaconNode) {.async.} =
await node.network.start() await node.network.start()
func shouldWeStartWeb3(node: BeaconNode): bool =
(node.config.web3Mode == Web3Mode.enabled) or
(node.config.web3Mode == Web3Mode.auto and node.attachedValidators[].count > 0)
proc start(node: BeaconNode) {.raises: [Defect, CatchableError].} = proc start(node: BeaconNode) {.raises: [Defect, CatchableError].} =
let let
head = node.dag.head head = node.dag.head
@ -1210,9 +1178,10 @@ proc start(node: BeaconNode) {.raises: [Defect, CatchableError].} =
waitFor node.initializeNetworking() waitFor node.initializeNetworking()
# TODO this does not account for validators getting attached "later" if node.eth1Monitor != nil:
if node.eth1Monitor != nil and node.shouldWeStartWeb3:
node.eth1Monitor.start() node.eth1Monitor.start()
else:
notice "Running without execution chain monitor, block producation partially disabled"
node.run() node.run()
@ -1502,24 +1471,6 @@ proc loadEth2Network(config: BeaconNodeConf): Eth2NetworkMetadata {.raises: [Def
echo "Must specify network on non-mainnet node" echo "Must specify network on non-mainnet node"
quit 1 quit 1
proc loadBeaconNode(config: var BeaconNodeConf, rng: ref BrHmacDrbgContext): BeaconNode {.
raises: [Defect, CatchableError].} =
let metadata = config.loadEth2Network()
# Updating the config based on the metadata certainly is not beautiful but it
# works
for node in metadata.bootstrapNodes:
config.bootstrapNodes.add node
BeaconNode.init(
metadata.cfg,
rng,
config,
metadata.depositContractDeployedAt,
metadata.eth1Network,
metadata.genesisData,
metadata.genesisDepositsSnapshot)
proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref BrHmacDrbgContext) {.raises: [Defect, CatchableError].} = proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref BrHmacDrbgContext) {.raises: [Defect, CatchableError].} =
info "Launching beacon node", info "Launching beacon node",
version = fullVersionStr, version = fullVersionStr,
@ -1543,7 +1494,22 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref BrHmacDrbgContext) {.r
# There are no managed event loops in here, to do a graceful shutdown, but # There are no managed event loops in here, to do a graceful shutdown, but
# letting the default Ctrl+C handler exit is safe, since we only read from # letting the default Ctrl+C handler exit is safe, since we only read from
# the db. # the db.
let node = loadBeaconNode(config, rng)
let metadata = config.loadEth2Network()
# Updating the config based on the metadata certainly is not beautiful but it
# works
for node in metadata.bootstrapNodes:
config.bootstrapNodes.add node
let node = BeaconNode.init(
metadata.cfg,
rng,
config,
metadata.depositContractDeployedAt,
metadata.eth1Network,
metadata.genesisData,
metadata.genesisDepositsSnapshot)
if bnStatus == BeaconNodeStatus.Stopping: if bnStatus == BeaconNodeStatus.Stopping:
return return