use hasEnoughDataColumns for DA
This commit is contained in:
parent
986e1f5d83
commit
e161f5e6f1
|
@ -412,50 +412,20 @@ proc initFullNode(
|
||||||
Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} =
|
Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} =
|
||||||
withBlck(signedBlock):
|
withBlck(signedBlock):
|
||||||
when consensusFork >= ConsensusFork.Deneb:
|
when consensusFork >= ConsensusFork.Deneb:
|
||||||
let
|
if not dataColumnQuarantine[].hasEnoughDataColumns(forkyBlck):
|
||||||
localSubnetCount =
|
|
||||||
if supernode:
|
|
||||||
DATA_COLUMN_SIDECAR_SUBNET_COUNT.uint64
|
|
||||||
else:
|
|
||||||
CUSTODY_REQUIREMENT.uint64
|
|
||||||
localCustodyColumns = get_custody_columns(node.network.nodeId,
|
|
||||||
max(SAMPLES_PER_SLOT.uint64,
|
|
||||||
localSubnetCount))
|
|
||||||
accumulatedColumns = dataColumnQuarantine[].accumulateDataColumns(forkyBlck)
|
|
||||||
if accumulatedColumns.len == 0:
|
|
||||||
return await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
|
|
||||||
Opt.none(BlobSidecars), Opt.none(DataColumnSidecars),
|
|
||||||
maybeFinalized = maybeFinalized)
|
|
||||||
elif supernode == true and accumulatedColumns.len <= localCustodyColumns.len div 2 :
|
|
||||||
# We don't have all the data columns for this block, so we have
|
# We don't have all the data columns for this block, so we have
|
||||||
# to put it in columnless quarantine.
|
# to put it in columnless quarantine.
|
||||||
if not quarantine[].addColumnless(dag.finalizedHead.slot, forkyBlck):
|
if not quarantine[].addColumnless(dag.finalizedHead.slot, forkyBlck):
|
||||||
return err(VerifierError.UnviableFork)
|
err(VerifierError.UnviableFork)
|
||||||
else:
|
else:
|
||||||
return err(VerifierError.MissingParent)
|
err(VerifierError.MissingParent)
|
||||||
elif supernode == true and accumulatedColumns.len == localCustodyColumns.len:
|
|
||||||
let data_columns = dataColumnQuarantine[].popDataColumns(forkyBlck.root, forkyBlck)
|
|
||||||
return await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
|
|
||||||
Opt.none(BlobSidecars), Opt.some(data_columns),
|
|
||||||
maybeFinalized = maybeFinalized)
|
|
||||||
elif supernode == true and accumulatedColumns.len >= localCustodyColumns.len div 2:
|
|
||||||
let data_columns = dataColumnQuarantine[].popDataColumns(forkyBlck.root, forkyBlck)
|
|
||||||
return await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
|
|
||||||
Opt.none(BlobSidecars), Opt.some(data_columns),
|
|
||||||
maybeFinalized = maybeFinalized)
|
|
||||||
elif supernode == false and accumulatedColumns.len <= localCustodyColumns.len:
|
|
||||||
# We don't have all the data columns for this block, so we have
|
|
||||||
# to put it in columnless quarantine.
|
|
||||||
if not quarantine[].addColumnless(dag.finalizedHead.slot, forkyBlck):
|
|
||||||
return err(VerifierError.UnviableFork)
|
|
||||||
else:
|
|
||||||
return err(VerifierError.MissingParent)
|
|
||||||
else:
|
else:
|
||||||
return await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
|
let data_columns = dataColumnQuarantine[].popDataColumns(forkyBlck.root, forkyBlck)
|
||||||
Opt.none(BlobSidecars), Opt.none(DataColumnSidecars),
|
await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
|
||||||
|
Opt.none(BlobSidecars), Opt.some(data_columns),
|
||||||
maybeFinalized = maybeFinalized)
|
maybeFinalized = maybeFinalized)
|
||||||
else:
|
else:
|
||||||
return await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
|
await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
|
||||||
Opt.none(BlobSidecars), Opt.none(DataColumnSidecars),
|
Opt.none(BlobSidecars), Opt.none(DataColumnSidecars),
|
||||||
maybeFinalized = maybeFinalized)
|
maybeFinalized = maybeFinalized)
|
||||||
|
|
||||||
|
|
|
@ -708,13 +708,9 @@ proc syncWorker[A, B](man: SyncManager[A, B], index: int) {.async: (raises: [Can
|
||||||
await man.notInSyncEvent.wait()
|
await man.notInSyncEvent.wait()
|
||||||
man.workers[index].status = SyncWorkerStatus.WaitingPeer
|
man.workers[index].status = SyncWorkerStatus.WaitingPeer
|
||||||
peer = await man.pool.acquire()
|
peer = await man.pool.acquire()
|
||||||
if peer.remoteAgent == Eth2Agent.Prysm:
|
await man.syncStep(index, peer)
|
||||||
await man.syncStep(index, peer)
|
man.pool.release(peer)
|
||||||
man.pool.release(peer)
|
peer = nil
|
||||||
peer = nil
|
|
||||||
else:
|
|
||||||
man.pool.release(peer)
|
|
||||||
peer = nil
|
|
||||||
finally:
|
finally:
|
||||||
if not(isNil(peer)):
|
if not(isNil(peer)):
|
||||||
man.pool.release(peer)
|
man.pool.release(peer)
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 21cbe3a91a70811522554e89e6a791172cebfef2
|
Subproject commit b5fb7b3a97d8977d969d786633f70c4094cd0eaf
|
Loading…
Reference in New Issue