Fluffy state network now enabled by default and improve status logs (#2640)
* Enable state network by default. Create status log loop for state and beacon networks. Create status log loop for portal node. Implement stop functions.
This commit is contained in:
parent
2fe8cc4551
commit
a9ad10cadc
|
@ -89,7 +89,7 @@ type
|
||||||
|
|
||||||
portalSubnetworks* {.
|
portalSubnetworks* {.
|
||||||
desc: "Select which networks (Portal sub-protocols) to enable",
|
desc: "Select which networks (Portal sub-protocols) to enable",
|
||||||
defaultValue: {PortalSubnetwork.history},
|
defaultValue: {PortalSubnetwork.history, PortalSubnetwork.state},
|
||||||
name: "portal-subnetworks"
|
name: "portal-subnetworks"
|
||||||
.}: set[PortalSubnetwork]
|
.}: set[PortalSubnetwork]
|
||||||
|
|
||||||
|
|
|
@ -178,10 +178,13 @@ proc new*(
|
||||||
)
|
)
|
||||||
|
|
||||||
proc start*(lightClient: LightClient) =
|
proc start*(lightClient: LightClient) =
|
||||||
notice "Starting beacon light client",
|
info "Starting beacon light client", trusted_block_root = lightClient.trustedBlockRoot
|
||||||
trusted_block_root = lightClient.trustedBlockRoot
|
|
||||||
lightClient.manager.start()
|
lightClient.manager.start()
|
||||||
|
|
||||||
|
proc stop*(lightClient: LightClient) =
|
||||||
|
info "Stopping beacon light client"
|
||||||
|
discard lightClient.manager.stop()
|
||||||
|
|
||||||
proc resetToFinalizedHeader*(
|
proc resetToFinalizedHeader*(
|
||||||
lightClient: LightClient,
|
lightClient: LightClient,
|
||||||
header: ForkedLightClientHeader,
|
header: ForkedLightClientHeader,
|
||||||
|
|
|
@ -31,6 +31,7 @@ type BeaconNetwork* = ref object
|
||||||
forkDigests*: ForkDigests
|
forkDigests*: ForkDigests
|
||||||
trustedBlockRoot: Opt[Eth2Digest]
|
trustedBlockRoot: Opt[Eth2Digest]
|
||||||
processContentLoop: Future[void]
|
processContentLoop: Future[void]
|
||||||
|
statusLogLoop: Future[void]
|
||||||
|
|
||||||
func toContentIdHandler(contentKey: ContentKeyByteList): results.Opt[ContentId] =
|
func toContentIdHandler(contentKey: ContentKeyByteList): results.Opt[ContentId] =
|
||||||
ok(toContentId(contentKey))
|
ok(toContentId(contentKey))
|
||||||
|
@ -364,13 +365,29 @@ proc processContentLoop(n: BeaconNetwork) {.async: (raises: []).} =
|
||||||
except CancelledError:
|
except CancelledError:
|
||||||
trace "processContentLoop canceled"
|
trace "processContentLoop canceled"
|
||||||
|
|
||||||
|
proc statusLogLoop(n: BeaconNetwork) {.async: (raises: []).} =
|
||||||
|
try:
|
||||||
|
while true:
|
||||||
|
info "Beacon network status",
|
||||||
|
routingTableNodes = n.portalProtocol.routingTable.len()
|
||||||
|
|
||||||
|
await sleepAsync(60.seconds)
|
||||||
|
except CancelledError:
|
||||||
|
trace "statusLogLoop canceled"
|
||||||
|
|
||||||
proc start*(n: BeaconNetwork) =
|
proc start*(n: BeaconNetwork) =
|
||||||
info "Starting Portal beacon chain network"
|
info "Starting Portal beacon chain network"
|
||||||
|
|
||||||
n.portalProtocol.start()
|
n.portalProtocol.start()
|
||||||
n.processContentLoop = processContentLoop(n)
|
n.processContentLoop = processContentLoop(n)
|
||||||
|
|
||||||
proc stop*(n: BeaconNetwork) =
|
proc stop*(n: BeaconNetwork) =
|
||||||
|
info "Stopping Portal beacon chain network"
|
||||||
|
|
||||||
n.portalProtocol.stop()
|
n.portalProtocol.stop()
|
||||||
|
|
||||||
if not n.processContentLoop.isNil:
|
if not n.processContentLoop.isNil:
|
||||||
n.processContentLoop.cancelSoon()
|
n.processContentLoop.cancelSoon()
|
||||||
|
|
||||||
|
if not n.statusLogLoop.isNil():
|
||||||
|
n.statusLogLoop.cancelSoon()
|
||||||
|
|
|
@ -704,17 +704,7 @@ proc processContentLoop(n: HistoryNetwork) {.async: (raises: []).} =
|
||||||
proc statusLogLoop(n: HistoryNetwork) {.async: (raises: []).} =
|
proc statusLogLoop(n: HistoryNetwork) {.async: (raises: []).} =
|
||||||
try:
|
try:
|
||||||
while true:
|
while true:
|
||||||
# This is the data radius percentage compared to full storage. This will
|
|
||||||
# drop a lot when using the logbase2 scale, namely `/ 2` per 1 logaritmic
|
|
||||||
# radius drop.
|
|
||||||
# TODO: Get some float precision calculus?
|
|
||||||
let radiusPercentage =
|
|
||||||
n.portalProtocol.dataRadius() div (UInt256.high() div u256(100))
|
|
||||||
|
|
||||||
info "History network status",
|
info "History network status",
|
||||||
radiusPercentage = radiusPercentage.toString(10) & "%",
|
|
||||||
radius = n.portalProtocol.dataRadius().toHex(),
|
|
||||||
dbSize = $(n.contentDB.size() div 1000) & "kb",
|
|
||||||
routingTableNodes = n.portalProtocol.routingTable.len()
|
routingTableNodes = n.portalProtocol.routingTable.len()
|
||||||
|
|
||||||
await sleepAsync(60.seconds)
|
await sleepAsync(60.seconds)
|
||||||
|
@ -725,6 +715,7 @@ proc start*(n: HistoryNetwork) =
|
||||||
info "Starting Portal execution history network",
|
info "Starting Portal execution history network",
|
||||||
protocolId = n.portalProtocol.protocolId,
|
protocolId = n.portalProtocol.protocolId,
|
||||||
accumulatorRoot = hash_tree_root(n.accumulator)
|
accumulatorRoot = hash_tree_root(n.accumulator)
|
||||||
|
|
||||||
n.portalProtocol.start()
|
n.portalProtocol.start()
|
||||||
|
|
||||||
n.processContentLoop = processContentLoop(n)
|
n.processContentLoop = processContentLoop(n)
|
||||||
|
@ -732,10 +723,12 @@ proc start*(n: HistoryNetwork) =
|
||||||
pruneDeprecatedAccumulatorRecords(n.accumulator, n.contentDB)
|
pruneDeprecatedAccumulatorRecords(n.accumulator, n.contentDB)
|
||||||
|
|
||||||
proc stop*(n: HistoryNetwork) =
|
proc stop*(n: HistoryNetwork) =
|
||||||
|
info "Stopping Portal execution history network"
|
||||||
|
|
||||||
n.portalProtocol.stop()
|
n.portalProtocol.stop()
|
||||||
|
|
||||||
if not n.processContentLoop.isNil:
|
if not n.processContentLoop.isNil:
|
||||||
n.processContentLoop.cancelSoon()
|
n.processContentLoop.cancelSoon()
|
||||||
|
|
||||||
if not n.processContentLoop.isNil:
|
if not n.statusLogLoop.isNil:
|
||||||
n.statusLogLoop.cancelSoon()
|
n.statusLogLoop.cancelSoon()
|
||||||
|
|
|
@ -128,7 +128,6 @@ proc gossipOffer*(
|
||||||
debug "Offered content gossipped successfully with peers", keyBytes, peers
|
debug "Offered content gossipped successfully with peers", keyBytes, peers
|
||||||
|
|
||||||
# Currently only used for testing to gossip an entire account trie proof
|
# Currently only used for testing to gossip an entire account trie proof
|
||||||
# This may also be useful for the state network bridge
|
|
||||||
proc recursiveGossipOffer*(
|
proc recursiveGossipOffer*(
|
||||||
p: PortalProtocol,
|
p: PortalProtocol,
|
||||||
srcNodeId: Opt[NodeId],
|
srcNodeId: Opt[NodeId],
|
||||||
|
|
|
@ -31,6 +31,7 @@ type StateNetwork* = ref object
|
||||||
contentDB*: ContentDB
|
contentDB*: ContentDB
|
||||||
contentQueue*: AsyncQueue[(Opt[NodeId], ContentKeysList, seq[seq[byte]])]
|
contentQueue*: AsyncQueue[(Opt[NodeId], ContentKeysList, seq[seq[byte]])]
|
||||||
processContentLoop: Future[void]
|
processContentLoop: Future[void]
|
||||||
|
statusLogLoop: Future[void]
|
||||||
historyNetwork: Opt[HistoryNetwork]
|
historyNetwork: Opt[HistoryNetwork]
|
||||||
validateStateIsCanonical: bool
|
validateStateIsCanonical: bool
|
||||||
|
|
||||||
|
@ -221,15 +222,32 @@ proc processContentLoop(n: StateNetwork) {.async: (raises: []).} =
|
||||||
except CancelledError:
|
except CancelledError:
|
||||||
trace "processContentLoop canceled"
|
trace "processContentLoop canceled"
|
||||||
|
|
||||||
|
proc statusLogLoop(n: StateNetwork) {.async: (raises: []).} =
|
||||||
|
try:
|
||||||
|
while true:
|
||||||
|
info "State network status",
|
||||||
|
routingTableNodes = n.portalProtocol.routingTable.len()
|
||||||
|
|
||||||
|
await sleepAsync(60.seconds)
|
||||||
|
except CancelledError:
|
||||||
|
trace "statusLogLoop canceled"
|
||||||
|
|
||||||
proc start*(n: StateNetwork) =
|
proc start*(n: StateNetwork) =
|
||||||
info "Starting Portal execution state network",
|
info "Starting Portal execution state network",
|
||||||
protocolId = n.portalProtocol.protocolId
|
protocolId = n.portalProtocol.protocolId
|
||||||
|
|
||||||
n.portalProtocol.start()
|
n.portalProtocol.start()
|
||||||
|
|
||||||
n.processContentLoop = processContentLoop(n)
|
n.processContentLoop = processContentLoop(n)
|
||||||
|
n.statusLogLoop = statusLogLoop(n)
|
||||||
|
|
||||||
proc stop*(n: StateNetwork) =
|
proc stop*(n: StateNetwork) =
|
||||||
|
info "Stopping Portal execution state network"
|
||||||
|
|
||||||
n.portalProtocol.stop()
|
n.portalProtocol.stop()
|
||||||
|
|
||||||
if not n.processContentLoop.isNil:
|
if not n.processContentLoop.isNil():
|
||||||
n.processContentLoop.cancelSoon()
|
n.processContentLoop.cancelSoon()
|
||||||
|
|
||||||
|
if not n.statusLogLoop.isNil():
|
||||||
|
n.statusLogLoop.cancelSoon()
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
results,
|
results,
|
||||||
|
chronos,
|
||||||
eth/p2p/discoveryv5/protocol,
|
eth/p2p/discoveryv5/protocol,
|
||||||
beacon_chain/spec/forks,
|
beacon_chain/spec/forks,
|
||||||
./network_metadata,
|
./network_metadata,
|
||||||
|
@ -39,6 +40,7 @@ type
|
||||||
historyNetwork*: Opt[HistoryNetwork]
|
historyNetwork*: Opt[HistoryNetwork]
|
||||||
stateNetwork*: Opt[StateNetwork]
|
stateNetwork*: Opt[StateNetwork]
|
||||||
beaconLightClient*: Opt[LightClient]
|
beaconLightClient*: Opt[LightClient]
|
||||||
|
statusLogLoop: Future[void]
|
||||||
|
|
||||||
# Beacon light client application callbacks triggered when new finalized header
|
# Beacon light client application callbacks triggered when new finalized header
|
||||||
# or optimistic header is available.
|
# or optimistic header is available.
|
||||||
|
@ -179,7 +181,27 @@ proc new*(
|
||||||
beaconLightClient: beaconLightClient,
|
beaconLightClient: beaconLightClient,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
proc statusLogLoop(n: PortalNode) {.async: (raises: []).} =
|
||||||
|
try:
|
||||||
|
while true:
|
||||||
|
# This is the data radius percentage compared to full storage. This will
|
||||||
|
# drop a lot when using the logbase2 scale, namely `/ 2` per 1 logaritmic
|
||||||
|
# radius drop.
|
||||||
|
# TODO: Get some float precision calculus?
|
||||||
|
let radiusPercentage = n.contentDB.dataRadius div (UInt256.high() div u256(100))
|
||||||
|
|
||||||
|
info "Portal node status",
|
||||||
|
radiusPercentage = radiusPercentage.toString(10) & "%",
|
||||||
|
radius = n.contentDB.dataRadius.toHex(),
|
||||||
|
dbSize = $(n.contentDB.size() div 1000) & "kb"
|
||||||
|
|
||||||
|
await sleepAsync(60.seconds)
|
||||||
|
except CancelledError:
|
||||||
|
trace "statusLogLoop canceled"
|
||||||
|
|
||||||
proc start*(n: PortalNode) =
|
proc start*(n: PortalNode) =
|
||||||
|
debug "Starting Portal node"
|
||||||
|
|
||||||
if n.beaconNetwork.isSome():
|
if n.beaconNetwork.isSome():
|
||||||
n.beaconNetwork.value.start()
|
n.beaconNetwork.value.start()
|
||||||
if n.historyNetwork.isSome():
|
if n.historyNetwork.isSome():
|
||||||
|
@ -189,3 +211,21 @@ proc start*(n: PortalNode) =
|
||||||
|
|
||||||
if n.beaconLightClient.isSome():
|
if n.beaconLightClient.isSome():
|
||||||
n.beaconLightClient.value.start()
|
n.beaconLightClient.value.start()
|
||||||
|
|
||||||
|
n.statusLogLoop = statusLogLoop(n)
|
||||||
|
|
||||||
|
proc stop*(n: PortalNode) =
|
||||||
|
debug "Stopping Portal node"
|
||||||
|
|
||||||
|
if n.beaconNetwork.isSome():
|
||||||
|
n.beaconNetwork.value.stop()
|
||||||
|
if n.historyNetwork.isSome():
|
||||||
|
n.historyNetwork.value.stop()
|
||||||
|
if n.stateNetwork.isSome():
|
||||||
|
n.stateNetwork.value.stop()
|
||||||
|
|
||||||
|
if n.beaconLightClient.isSome():
|
||||||
|
n.beaconLightClient.value.stop()
|
||||||
|
|
||||||
|
if not n.statusLogLoop.isNil:
|
||||||
|
n.statusLogLoop.cancelSoon()
|
||||||
|
|
|
@ -298,7 +298,9 @@ proc runBackfillGossipBlockOffersLoop(
|
||||||
for k, v in offersMap:
|
for k, v in offersMap:
|
||||||
try:
|
try:
|
||||||
let numPeers = await portalClient.portal_stateGossip(k.to0xHex(), v.to0xHex())
|
let numPeers = await portalClient.portal_stateGossip(k.to0xHex(), v.to0xHex())
|
||||||
if numPeers == 0:
|
if numPeers > 0:
|
||||||
|
debug "Offer successfully gossipped to peers: ", numPeers, workerId
|
||||||
|
elif numPeers == 0:
|
||||||
warn "Offer gossipped to no peers", workerId
|
warn "Offer gossipped to no peers", workerId
|
||||||
retryGossip = true
|
retryGossip = true
|
||||||
break
|
break
|
||||||
|
|
Loading…
Reference in New Issue