mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-01-20 03:20:32 +00:00
data availability fix
This commit is contained in:
parent
44fac84a4f
commit
1927366eaa
@ -69,6 +69,19 @@ func hasDataColumn*(
|
|||||||
return true
|
return true
|
||||||
false
|
false
|
||||||
|
|
||||||
|
func accumulateDataColumns*(quarantine: DataColumnQuarantine,
|
||||||
|
blck: deneb.SignedBeaconBlock |
|
||||||
|
electra.SignedBeaconBlock): seq[ColumnIndex] =
|
||||||
|
# This method copies the DataColumns that were received via
|
||||||
|
# gossip and are accumulated to an array
|
||||||
|
var indices: seq[ColumnIndex]
|
||||||
|
for i in 0..<NUMBER_OF_COLUMNS:
|
||||||
|
let idx = ColumnIndex(i)
|
||||||
|
if quarantine.data_columns.hasKey(
|
||||||
|
(blck.root, idx)):
|
||||||
|
indices.add(idx)
|
||||||
|
indices
|
||||||
|
|
||||||
func popDataColumns*(
|
func popDataColumns*(
|
||||||
quarantine: var DataColumnQuarantine, digest: Eth2Digest,
|
quarantine: var DataColumnQuarantine, digest: Eth2Digest,
|
||||||
blck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock):
|
blck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock):
|
||||||
@ -96,10 +109,10 @@ func hasDataColumns*(quarantine: DataColumnQuarantine,
|
|||||||
quarantine.data_columns.hasKey(
|
quarantine.data_columns.hasKey(
|
||||||
(blck.root, idx)):
|
(blck.root, idx)):
|
||||||
inc counter
|
inc counter
|
||||||
if counter == max(SAMPLES_PER_SLOT, CUSTODY_REQUIREMENT) and
|
if counter != max(SAMPLES_PER_SLOT, CUSTODY_REQUIREMENT) or
|
||||||
counter == NUMBER_OF_COLUMNS:
|
counter != NUMBER_OF_COLUMNS:
|
||||||
return true
|
return false
|
||||||
false
|
true
|
||||||
|
|
||||||
func dataColumnFetchRecord*(quarantine: DataColumnQuarantine,
|
func dataColumnFetchRecord*(quarantine: DataColumnQuarantine,
|
||||||
blck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock): DataColumnFetchRecord =
|
blck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock): DataColumnFetchRecord =
|
||||||
|
@ -370,7 +370,7 @@ proc initFullNode(
|
|||||||
|
|
||||||
func getFrontfillSlot(): Slot =
|
func getFrontfillSlot(): Slot =
|
||||||
max(dag.frontfill.get(BlockId()).slot, dag.horizon)
|
max(dag.frontfill.get(BlockId()).slot, dag.horizon)
|
||||||
|
var supernode = node.config.subscribeAllSubnets
|
||||||
let
|
let
|
||||||
quarantine = newClone(
|
quarantine = newClone(
|
||||||
Quarantine.init())
|
Quarantine.init())
|
||||||
@ -408,39 +408,33 @@ proc initFullNode(
|
|||||||
maybeFinalized: bool):
|
maybeFinalized: bool):
|
||||||
Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} =
|
Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} =
|
||||||
withBlck(signedBlock):
|
withBlck(signedBlock):
|
||||||
# when consensusFork >= ConsensusFork.Deneb:
|
|
||||||
# if not blobQuarantine[].hasBlobs(forkyBlck):
|
|
||||||
# # We don't have all the blobs for this block, so we have
|
|
||||||
# # to put it in blobless quarantine.
|
|
||||||
# if not quarantine[].addBlobless(dag.finalizedHead.slot, forkyBlck):
|
|
||||||
# err(VerifierError.UnviableFork)
|
|
||||||
# else:
|
|
||||||
# err(VerifierError.MissingParent)
|
|
||||||
# else:
|
|
||||||
# let blobs = blobQuarantine[].popBlobs(forkyBlck.root, forkyBlck)
|
|
||||||
# await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
|
|
||||||
# Opt.some(blobs), Opt.none(DataColumnSidecars),
|
|
||||||
# maybeFinalized = maybeFinalized)
|
|
||||||
# else:
|
|
||||||
# await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
|
|
||||||
# Opt.none(BlobSidecars), Opt.none(DataColumnSidecars),
|
|
||||||
# maybeFinalized = maybeFinalized)
|
|
||||||
|
|
||||||
when consensusFork >= ConsensusFork.Deneb:
|
when consensusFork >= ConsensusFork.Deneb:
|
||||||
if not dataColumnQuarantine[].checkForInitialDcSidecars(forkyBlck):
|
let
|
||||||
|
localSubnetCount =
|
||||||
|
if supernode:
|
||||||
|
DATA_COLUMN_SIDECAR_SUBNET_COUNT.uint64
|
||||||
|
else:
|
||||||
|
CUSTODY_REQUIREMENT.uint64
|
||||||
|
localCustodyColumns = get_custody_columns(node.network.nodeId,
|
||||||
|
max(SAMPLES_PER_SLOT.uint64,
|
||||||
|
localSubnetCount))
|
||||||
|
accumulatedColumns = dataColumnQuarantine[].accumulateDataColumns(forkyBlck)
|
||||||
|
|
||||||
|
for ac in accumulatedColumns:
|
||||||
|
if ac notin localCustodyColumns:
|
||||||
# We don't have all the data columns for this block, so we have
|
# We don't have all the data columns for this block, so we have
|
||||||
# to put it in columnless quarantine.
|
# to put it in columnless quarantine.
|
||||||
if not quarantine[].addColumnless(dag.finalizedHead.slot, forkyBlck):
|
if not quarantine[].addColumnless(dag.finalizedHead.slot, forkyBlck):
|
||||||
err(VerifierError.UnviableFork)
|
return err(VerifierError.UnviableFork)
|
||||||
else:
|
|
||||||
err(VerifierError.MissingParent)
|
return err(VerifierError.MissingParent)
|
||||||
else:
|
else:
|
||||||
let data_columns = dataColumnQuarantine[].popDataColumns(forkyBlck.root, forkyBlck)
|
let data_columns = dataColumnQuarantine[].popDataColumns(forkyBlck.root, forkyBlck)
|
||||||
await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
|
return await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
|
||||||
Opt.none(BlobSidecars), Opt.some(data_columns),
|
Opt.none(BlobSidecars), Opt.some(data_columns),
|
||||||
maybeFinalized = maybeFinalized)
|
maybeFinalized = maybeFinalized)
|
||||||
else:
|
else:
|
||||||
await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
|
return await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
|
||||||
Opt.none(BlobSidecars), Opt.none(DataColumnSidecars),
|
Opt.none(BlobSidecars), Opt.none(DataColumnSidecars),
|
||||||
maybeFinalized = maybeFinalized)
|
maybeFinalized = maybeFinalized)
|
||||||
|
|
||||||
@ -473,7 +467,6 @@ proc initFullNode(
|
|||||||
processor: processor,
|
processor: processor,
|
||||||
network: node.network)
|
network: node.network)
|
||||||
|
|
||||||
var supernode = node.config.subscribeAllSubnets
|
|
||||||
let
|
let
|
||||||
syncManager = newSyncManager[Peer, PeerId](
|
syncManager = newSyncManager[Peer, PeerId](
|
||||||
node.network.peerPool,
|
node.network.peerPool,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user