support for enqueueing whichever is activated blob/data_column
This commit is contained in:
parent
07d33b3abd
commit
325bdfd4a2
|
@ -431,7 +431,7 @@ proc enqueueBlock*(
|
|||
self.blockQueue.addLastNoWait(BlockEntry(
|
||||
blck: blck,
|
||||
blobs: blobs,
|
||||
data_columns: data_columns,
|
||||
data_columns: Opt.none(DataColumnSidecars),
|
||||
maybeFinalized: maybeFinalized,
|
||||
resfut: resfut, queueTick: Moment.now(),
|
||||
validationDur: validationDur,
|
||||
|
@ -542,6 +542,11 @@ proc storeBlock(
|
|||
else:
|
||||
Opt.none BlobSidecars
|
||||
|
||||
if blobsOk:
|
||||
debug "Loaded parent block from storage", parent_root
|
||||
self[].enqueueBlock(
|
||||
MsgSource.gossip, parentBlck.unsafeGet().asSigned(), blobs, Opt.none(DataColumnSidecars))
|
||||
|
||||
var columnsOk = true
|
||||
let data_columns =
|
||||
withBlck(parentBlck.get()):
|
||||
|
@ -556,10 +561,10 @@ proc storeBlock(
|
|||
Opt.some data_column_sidecars
|
||||
else:
|
||||
Opt.none DataColumnSidecars
|
||||
if blobsOk and columnsOk:
|
||||
if columnsOk:
|
||||
debug "Loaded parent block from storage", parent_root
|
||||
self[].enqueueBlock(
|
||||
MsgSource.gossip, parentBlck.unsafeGet().asSigned(), blobs, data_columns)
|
||||
MsgSource.gossip, parentBlck.unsafeGet().asSigned(), Opt.none(BlobSidecars), data_columns)
|
||||
|
||||
return handleVerifierError(parent.error())
|
||||
|
||||
|
@ -835,16 +840,23 @@ proc storeBlock(
|
|||
blck = shortLog(forkyBlck),
|
||||
error = res.error()
|
||||
continue
|
||||
if self.blobQuarantine[].hasBlobs(forkyBlck) and self.dataColumnQuarantine[].hasDataColumns(forkyBlck):
|
||||
if self.blobQuarantine[].hasBlobs(forkyBlck):
|
||||
let blobs = self.blobQuarantine[].popBlobs(
|
||||
forkyBlck.root, forkyBlck)
|
||||
let data_columns = self.dataColumnQuarantine[].popDataColumns(
|
||||
forkyBlck.root, forkyBlck)
|
||||
self[].enqueueBlock(MsgSource.gossip, quarantined, Opt.some(blobs), Opt.some(data_columns))
|
||||
self[].enqueueBlock(MsgSource.gossip, quarantined, Opt.some(blobs), Opt.none(DataColumnSidecars))
|
||||
else:
|
||||
discard self.consensusManager.quarantine[].addBlobless(
|
||||
dag.finalizedHead.slot, forkyBlck)
|
||||
|
||||
if self.dataColumnQuarantine[].hasDataColumns(forkyBlck):
|
||||
let data_columns = self.dataColumnQuarantine[].popDataColumns(
|
||||
forkyBlck.root, forkyBlck)
|
||||
self[].enqueueBlock(MsgSource.gossip, quarantined, Opt.none(BlobSidecars), Opt.some(data_columns))
|
||||
|
||||
else:
|
||||
discard self.consensusManager.quarantine[].addColumnless(
|
||||
dag.finalizedHead.slot, forkyBlck)
|
||||
|
||||
ok blck.value()
|
||||
|
||||
# Enqueue
|
||||
|
|
|
@ -261,6 +261,14 @@ proc processSignedBeaconBlock*(
|
|||
else:
|
||||
Opt.none(BlobSidecars)
|
||||
|
||||
self.blockProcessor[].enqueueBlock(
|
||||
src, ForkedSignedBeaconBlock.init(signedBlock),
|
||||
blobs,
|
||||
Opt.none(DataColumnSidecars),
|
||||
maybeFinalized = maybeFinalized,
|
||||
validationDur = nanoseconds(
|
||||
(self.getCurrentBeaconTime() - wallTime).nanoseconds))
|
||||
|
||||
let data_columns =
|
||||
when typeof(signedBlock).kind >= ConsensusFork.Deneb:
|
||||
if self.dataColumnQuarantine[].hasDataColumns(signedBlock):
|
||||
|
@ -274,7 +282,7 @@ proc processSignedBeaconBlock*(
|
|||
|
||||
self.blockProcessor[].enqueueBlock(
|
||||
src, ForkedSignedBeaconBlock.init(signedBlock),
|
||||
blobs,
|
||||
Opt.none(BlobSidecars),
|
||||
data_columns,
|
||||
maybeFinalized = maybeFinalized,
|
||||
validationDur = nanoseconds(
|
||||
|
|
|
@ -736,38 +736,6 @@ proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T],
|
|||
req.item.updateScore(PeerScoreBadValues)
|
||||
break
|
||||
|
||||
var counter = 0
|
||||
for blk, dc in sq.das_blocks(item):
|
||||
res = await sq.blockVerifier(blk[], Opt.none(BlobSidecars), dc, maybeFinalized)
|
||||
inc(counter)
|
||||
|
||||
if res.isOk():
|
||||
goodBlock = some(blk[].slot)
|
||||
else:
|
||||
case res.error()
|
||||
of VerifierError.MissingParent:
|
||||
missingParentSlot = some(blk[].slot)
|
||||
break
|
||||
of VerifierError.Duplicate:
|
||||
# Keep going, happens naturally
|
||||
discard
|
||||
of VerifierError.UnviableFork:
|
||||
# Keep going so as to register other unviable blocks with the
|
||||
# qurantine
|
||||
if unviableBlock.isNone:
|
||||
# Remember the first unviable block, so we can log it
|
||||
unviableBlock = some((blk[].root, blk[].slot))
|
||||
|
||||
of VerifierError.Invalid:
|
||||
hasInvalidBlock = true
|
||||
|
||||
let req = item.request
|
||||
notice "Received invalid sequence of blocks", request = req,
|
||||
blocks_count = len(item.data),
|
||||
blocks_map = getShortMap(req, item.data)
|
||||
req.item.updateScore(PeerScoreBadValues)
|
||||
break
|
||||
|
||||
# When errors happen while processing blocks, we retry the same request
|
||||
# with, hopefully, a different peer
|
||||
let retryRequest =
|
||||
|
|
Loading…
Reference in New Issue