less async optimistic sync (#3842)

* less async optimistic sync

* use asyncSpawn; adapt changes to message router
This commit is contained in:
tersec 2022-07-07 16:57:52 +00:00 committed by GitHub
parent b00eac7a50
commit 1250c56e32
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 31 additions and 30 deletions

View File

@ -169,12 +169,11 @@ from ../consensus_object_pools/spec_cache import get_attesting_indices
from ../spec/datatypes/phase0 import TrustedSignedBeaconBlock from ../spec/datatypes/phase0 import TrustedSignedBeaconBlock
proc storeBlock*( proc storeBlock*(
self: ref BlockProcessor, self: var BlockProcessor,
src: MsgSource, wallTime: BeaconTime, src: MsgSource, wallTime: BeaconTime,
signedBlock: ForkySignedBeaconBlock, payloadValid: bool, signedBlock: ForkySignedBeaconBlock, payloadValid: bool,
queueTick: Moment = Moment.now(), queueTick: Moment = Moment.now(),
validationDur = Duration()): validationDur = Duration()): Result[BlockRef, BlockError] =
Future[Result[BlockRef, BlockError]] {.async.} =
## storeBlock is the main entry point for unvalidated blocks - all untrusted ## storeBlock is the main entry point for unvalidated blocks - all untrusted
## blocks, regardless of origin, pass through here. When storing a block, ## blocks, regardless of origin, pass through here. When storing a block,
## we will add it to the dag and pass it to all block consumers that need ## we will add it to the dag and pass it to all block consumers that need
@ -217,7 +216,7 @@ proc storeBlock*(
trustedBlock.message.slot, trustedBlock.root, trustedBlock.message.slot, trustedBlock.root,
state.data.current_sync_committee.pubkeys.data[i]) state.data.current_sync_committee.pubkeys.data[i])
self[].dumpBlock(signedBlock, blck) self.dumpBlock(signedBlock, blck)
# There can be a scenario where we receive a block we already received. # There can be a scenario where we receive a block we already received.
# However this block was before the last finalized epoch and so its parent # However this block was before the last finalized epoch and so its parent
@ -260,11 +259,8 @@ proc storeBlock*(
# called valid blocks have already been registered as verified. The head # called valid blocks have already been registered as verified. The head
# can lag a slot behind wall clock, complicating detecting synced status # can lag a slot behind wall clock, complicating detecting synced status
# for validating, otherwise. # for validating, otherwise.
# asyncSpawn self.consensusManager.updateHeadWithExecution(
# TODO have a third version which is fire-and-forget for when it is merge wallTime.slotOrZero)
# but payloadValid is true, i.e. fcU is for EL's benefit, not CL. Current
# behavior adds unnecessary latency to CL event loop.
await self.consensusManager.updateHeadWithExecution(wallTime.slotOrZero)
let let
updateHeadTick = Moment.now() updateHeadTick = Moment.now()
@ -281,7 +277,7 @@ proc storeBlock*(
for quarantined in self.consensusManager.quarantine[].pop(blck.get().root): for quarantined in self.consensusManager.quarantine[].pop(blck.get().root):
# Process the blocks that had the newly accepted block as parent # Process the blocks that had the newly accepted block as parent
self[].addBlock(MsgSource.gossip, quarantined) self.addBlock(MsgSource.gossip, quarantined)
return blck return blck
@ -324,8 +320,7 @@ proc addBlock*(
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc processBlock( proc processBlock(
self: ref BlockProcessor, entry: BlockEntry, payloadValid: bool) self: var BlockProcessor, entry: BlockEntry, payloadValid: bool) =
{.async.} =
logScope: logScope:
blockRoot = shortLog(entry.blck.root) blockRoot = shortLog(entry.blck.root)
@ -338,7 +333,7 @@ proc processBlock(
quit 1 quit 1
let res = withBlck(entry.blck): let res = withBlck(entry.blck):
await self.storeBlock( self.storeBlock(
entry.src, wallTime, blck, payloadValid, entry.queueTick, entry.src, wallTime, blck, payloadValid, entry.queueTick,
entry.validationDur) entry.validationDur)
@ -489,7 +484,7 @@ proc runQueueProcessingLoop*(self: ref BlockProcessor) {.async.} =
else: else:
if executionPayloadStatus == PayloadExecutionStatus.valid or if executionPayloadStatus == PayloadExecutionStatus.valid or
self[].is_optimistic_candidate_block(blck.blck): self[].is_optimistic_candidate_block(blck.blck):
await self.processBlock( self[].processBlock(
blck, executionPayloadStatus == PayloadExecutionStatus.valid) blck, executionPayloadStatus == PayloadExecutionStatus.valid)
else: else:
debug "runQueueProcessingLoop: block cannot be optimistically imported", debug "runQueueProcessingLoop: block cannot be optimistically imported",

View File

@ -183,20 +183,24 @@ proc updateHeadWithExecution*(self: ref ConsensusManager, wallSlot: Slot)
## `pruneFinalized` must be called for pruning. ## `pruneFinalized` must be called for pruning.
# Grab the new head according to our latest attestation data # Grab the new head according to our latest attestation data
let newHead = self.attestationPool[].selectOptimisticHead( try:
wallSlot.start_beacon_time).valueOr: let newHead = self.attestationPool[].selectOptimisticHead(
warn "Head selection failed, using previous head", wallSlot.start_beacon_time).valueOr:
head = shortLog(self.dag.head), wallSlot warn "Head selection failed, using previous head",
return head = shortLog(self.dag.head), wallSlot
return
# Ensure dag.updateHead has most current information # Ensure dag.updateHead has most current information
await self.updateExecutionClientHead(newHead) await self.updateExecutionClientHead(newHead)
# Store the new head in the chain DAG - this may cause epochs to be # Store the new head in the chain DAG - this may cause epochs to be
# justified and finalized # justified and finalized
self.dag.updateHead(newHead, self.quarantine[]) self.dag.updateHead(newHead, self.quarantine[])
self[].checkExpectedBlock() self[].checkExpectedBlock()
except CatchableError as exc:
debug "updateHeadWithExecution error",
error = exc.msg
proc pruneStateCachesAndForkChoice*(self: var ConsensusManager) = proc pruneStateCachesAndForkChoice*(self: var ConsensusManager) =
## Prune unneeded and invalidated data after finalization ## Prune unneeded and invalidated data after finalization

View File

@ -151,7 +151,7 @@ proc routeSignedBeaconBlock*(
signature = shortLog(blck.signature), error = res.error() signature = shortLog(blck.signature), error = res.error()
let let
newBlockRef = await router[].blockProcessor.storeBlock( newBlockRef = router[].blockProcessor[].storeBlock(
MsgSource.api, sendTime, blck, true) MsgSource.api, sendTime, blck, true)
# The boolean we return tells the caller whether the block was integrated # The boolean we return tells the caller whether the block was integrated

View File

@ -47,8 +47,9 @@ suite "Block processor" & preset():
validatorMonitor, getTimeFn, safeSlotsToImportOptimistically = 128) validatorMonitor, getTimeFn, safeSlotsToImportOptimistically = 128)
test "Reverse order block add & get" & preset(): test "Reverse order block add & get" & preset():
let missing = waitFor processor.storeBlock( let missing = processor[].storeBlock(
MsgSource.gossip, b2.message.slot.start_beacon_time(), b2, payloadValid = true) MsgSource.gossip, b2.message.slot.start_beacon_time(), b2,
payloadValid = true)
check: missing.error == BlockError.MissingParent check: missing.error == BlockError.MissingParent
check: check:
@ -57,8 +58,9 @@ suite "Block processor" & preset():
FetchRecord(root: b1.root) in quarantine[].checkMissing() FetchRecord(root: b1.root) in quarantine[].checkMissing()
let let
status = waitFor processor.storeBlock( status = processor[].storeBlock(
MsgSource.gossip, b2.message.slot.start_beacon_time(), b1, payloadValid = true) MsgSource.gossip, b2.message.slot.start_beacon_time(), b1,
payloadValid = true)
b1Get = dag.getBlockRef(b1.root) b1Get = dag.getBlockRef(b1.root)
check: check: