Revert "include small dedup in block processor to handle blockByRoot blocks (#4814)" (#4840)

This reverts commit 8b3ffec0d5.

Syncing was broken with this: https://github.com/status-im/infra-nimbus/issues/132#issuecomment-1514465481
This commit is contained in:
tersec 2023-04-19 19:16:27 +00:00 committed by GitHub
parent 12d640b691
commit 2246a6ec95
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 1 additions and 27 deletions

View File

@ -13,7 +13,6 @@ import
../spec/[signatures, signatures_batch], ../spec/[signatures, signatures_batch],
../sszdump ../sszdump
from std/deques import Deque, addLast, contains, initDeque, items, len, shrink
from ../consensus_object_pools/consensus_manager import from ../consensus_object_pools/consensus_manager import
ConsensusManager, checkNextProposer, optimisticExecutionPayloadHash, ConsensusManager, checkNextProposer, optimisticExecutionPayloadHash,
runProposalForkchoiceUpdated, shouldSyncOptimistically, updateHead, runProposalForkchoiceUpdated, shouldSyncOptimistically, updateHead,
@ -47,10 +46,6 @@ const
## syncing the finalized part of the chain ## syncing the finalized part of the chain
PAYLOAD_PRE_WALL_SLOTS = SLOTS_PER_EPOCH * 2 PAYLOAD_PRE_WALL_SLOTS = SLOTS_PER_EPOCH * 2
## Number of slots from wall time that we start processing every payload ## Number of slots from wall time that we start processing every payload
MAX_DEDUP_QUEUE_LEN = 16
## Number of blocks, with FIFO discipline, against which to check queued
## blocks before being processed to avoid spamming ELs. This should stay
## small enough that even O(n) algorithms are reasonable.
type type
BlobSidecars* = seq[ref BlobSidecar] BlobSidecars* = seq[ref BlobSidecar]
@ -107,9 +102,6 @@ type
## The slot at which we sent a payload to the execution client the last ## The slot at which we sent a payload to the execution client the last
## time ## time
dupBlckBuf: Deque[(Eth2Digest, ValidatorSig)]
# Small buffer to allow for filtering of duplicate blocks in block queue
NewPayloadStatus {.pure.} = enum NewPayloadStatus {.pure.} = enum
valid valid
notValid notValid
@ -147,9 +139,7 @@ proc new*(T: type BlockProcessor,
validatorMonitor: validatorMonitor, validatorMonitor: validatorMonitor,
blobQuarantine: blobQuarantine, blobQuarantine: blobQuarantine,
getBeaconTime: getBeaconTime, getBeaconTime: getBeaconTime,
verifier: BatchVerifier(rng: rng, taskpool: taskpool), verifier: BatchVerifier(rng: rng, taskpool: taskpool)
dupBlckBuf: initDeque[(Eth2Digest, ValidatorSig)](
initialSize = MAX_DEDUP_QUEUE_LEN)
) )
# Sync callbacks # Sync callbacks
@ -689,19 +679,6 @@ proc addBlock*(
except AsyncQueueFullError: except AsyncQueueFullError:
raiseAssert "unbounded queue" raiseAssert "unbounded queue"
# Dedup
# ------------------------------------------------------------------------------
func checkDuplicateBlocks(self: ref BlockProcessor, entry: BlockEntry): bool =
let key = (entry.blck.root, entry.blck.signature)
if self.dupBlckBuf.contains key:
return true
doAssert self.dupBlckBuf.len <= MAX_DEDUP_QUEUE_LEN
if self.dupBlckBuf.len >= MAX_DEDUP_QUEUE_LEN:
self.dupBlckBuf.shrink(fromFirst = 1)
self.dupBlckBuf.addLast key
false
# Event Loop # Event Loop
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -718,9 +695,6 @@ proc processBlock(
error "Processing block before genesis, clock turned back?" error "Processing block before genesis, clock turned back?"
quit 1 quit 1
if self.checkDuplicateBlocks(entry):
return
let res = withBlck(entry.blck): let res = withBlck(entry.blck):
await self.storeBlock( await self.storeBlock(
entry.src, wallTime, blck, entry.blobs, entry.maybeFinalized, entry.src, wallTime, blck, entry.blobs, entry.maybeFinalized,