Attempt to reduce the risk of dropped network connections during the loading of KeyStores
This commit is contained in:
parent
ff49932bb9
commit
c773e10c1a
|
@ -254,9 +254,6 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
|
||||||
proc connectToNetwork(node: BeaconNode) {.async.} =
|
proc connectToNetwork(node: BeaconNode) {.async.} =
|
||||||
await node.network.connectToNetwork()
|
await node.network.connectToNetwork()
|
||||||
|
|
||||||
let addressFile = node.config.dataDir / "beacon_node.address"
|
|
||||||
writeFile(addressFile, node.network.announcedENR.toURI)
|
|
||||||
|
|
||||||
proc onAttestation(node: BeaconNode, attestation: Attestation) =
|
proc onAttestation(node: BeaconNode, attestation: Attestation) =
|
||||||
# We received an attestation from the network but don't know much about it
|
# We received an attestation from the network but don't know much about it
|
||||||
# yet - in particular, we haven't verified that it belongs to particular chain
|
# yet - in particular, we haven't verified that it belongs to particular chain
|
||||||
|
@ -538,7 +535,7 @@ proc runForwardSyncLoop(node: BeaconNode) {.async.} =
|
||||||
result = node.blockPool.head.blck.slot
|
result = node.blockPool.head.blck.slot
|
||||||
|
|
||||||
proc getLocalWallSlot(): Slot {.gcsafe.} =
|
proc getLocalWallSlot(): Slot {.gcsafe.} =
|
||||||
let epoch = node.beaconClock.now().toSlot().slot.compute_epoch_at_slot() +
|
let epoch = node.beaconClock.now().slotOrZero.compute_epoch_at_slot() +
|
||||||
1'u64
|
1'u64
|
||||||
result = epoch.compute_start_slot_at_epoch()
|
result = epoch.compute_start_slot_at_epoch()
|
||||||
|
|
||||||
|
@ -815,8 +812,6 @@ proc start(node: BeaconNode) =
|
||||||
# actually need to make this part of normal application flow -
|
# actually need to make this part of normal application flow -
|
||||||
# losing all connections might happen at any time and we should be
|
# losing all connections might happen at any time and we should be
|
||||||
# prepared to handle it.
|
# prepared to handle it.
|
||||||
waitFor node.connectToNetwork()
|
|
||||||
|
|
||||||
let
|
let
|
||||||
head = node.blockPool.head
|
head = node.blockPool.head
|
||||||
finalizedHead = node.blockPool.finalizedHead
|
finalizedHead = node.blockPool.finalizedHead
|
||||||
|
@ -837,12 +832,21 @@ proc start(node: BeaconNode) =
|
||||||
cat = "init",
|
cat = "init",
|
||||||
pcs = "start_beacon_node"
|
pcs = "start_beacon_node"
|
||||||
|
|
||||||
let
|
node.network.startListening()
|
||||||
bs = BlockSlot(blck: head.blck, slot: head.blck.slot)
|
let addressFile = node.config.dataDir / "beacon_node.address"
|
||||||
|
writeFile(addressFile, node.network.announcedENR.toURI)
|
||||||
|
|
||||||
|
let bs = BlockSlot(blck: head.blck, slot: head.blck.slot)
|
||||||
|
|
||||||
node.blockPool.withState(node.blockPool.tmpState, bs):
|
node.blockPool.withState(node.blockPool.tmpState, bs):
|
||||||
node.addLocalValidators(state)
|
for validatorKey in node.config.validatorKeys:
|
||||||
|
node.addLocalValidator state, validatorKey
|
||||||
|
# Allow some network events to be processed:
|
||||||
|
waitFor sleepAsync(1)
|
||||||
|
|
||||||
|
info "Local validators attached ", count = node.attachedValidators.count
|
||||||
|
|
||||||
|
waitFor node.network.connectToNetwork()
|
||||||
node.run()
|
node.run()
|
||||||
|
|
||||||
func formatGwei(amount: uint64): string =
|
func formatGwei(amount: uint64): string =
|
||||||
|
|
|
@ -821,17 +821,19 @@ proc init*(T: type Eth2Node, conf: BeaconNodeConf, enrForkId: ENRForkID,
|
||||||
if msg.protocolMounter != nil:
|
if msg.protocolMounter != nil:
|
||||||
msg.protocolMounter result
|
msg.protocolMounter result
|
||||||
|
|
||||||
for i in 0 ..< ConcurrentConnections:
|
|
||||||
result.connWorkers.add(connectWorker(result))
|
|
||||||
|
|
||||||
template publicKey*(node: Eth2Node): keys.PublicKey =
|
template publicKey*(node: Eth2Node): keys.PublicKey =
|
||||||
node.discovery.privKey.toPublicKey.tryGet()
|
node.discovery.privKey.toPublicKey.tryGet()
|
||||||
|
|
||||||
template addKnownPeer*(node: Eth2Node, peer: enr.Record) =
|
template addKnownPeer*(node: Eth2Node, peer: enr.Record) =
|
||||||
node.discovery.addNode peer
|
node.discovery.addNode peer
|
||||||
|
|
||||||
proc start*(node: Eth2Node) {.async.} =
|
proc startListening*(node: Eth2Node) =
|
||||||
node.discovery.open()
|
node.discovery.open()
|
||||||
|
|
||||||
|
proc start*(node: Eth2Node) {.async.} =
|
||||||
|
for i in 0 ..< ConcurrentConnections:
|
||||||
|
node.connWorkers.add connectWorker(node)
|
||||||
|
|
||||||
node.discovery.start()
|
node.discovery.start()
|
||||||
node.libp2pTransportLoops = await node.switch.start()
|
node.libp2pTransportLoops = await node.switch.start()
|
||||||
node.discoveryLoop = node.runDiscoveryLoop()
|
node.discoveryLoop = node.runDiscoveryLoop()
|
||||||
|
|
|
@ -53,12 +53,6 @@ proc addLocalValidator*(node: BeaconNode,
|
||||||
|
|
||||||
node.attachedValidators.addLocalValidator(pubKey, privKey)
|
node.attachedValidators.addLocalValidator(pubKey, privKey)
|
||||||
|
|
||||||
proc addLocalValidators*(node: BeaconNode, state: BeaconState) =
|
|
||||||
for validatorKey in node.config.validatorKeys:
|
|
||||||
node.addLocalValidator state, validatorKey
|
|
||||||
|
|
||||||
info "Local validators attached ", count = node.attachedValidators.count
|
|
||||||
|
|
||||||
func getAttachedValidator*(node: BeaconNode,
|
func getAttachedValidator*(node: BeaconNode,
|
||||||
state: BeaconState,
|
state: BeaconState,
|
||||||
idx: ValidatorIndex): AttachedValidator =
|
idx: ValidatorIndex): AttachedValidator =
|
||||||
|
|
Loading…
Reference in New Issue