clean up critical path

This commit is contained in:
Agnish Ghosh 2024-11-13 18:33:32 +07:00
parent e05cb51ffe
commit b6cf733f91
No known key found for this signature in database
GPG Key ID: 7BDDA05D1B25E9F8
2 changed files with 34 additions and 15 deletions

View File

@ -401,18 +401,15 @@ proc processDataColumnSidecar*(
let columnless = o.unsafeGet()
withBlck(columnless):
when consensusFork >= ConsensusFork.Deneb:
if self.dataColumnQuarantine[].gatherDataColumns(block_root).len ==
max(SAMPLES_PER_SLOT, CUSTODY_REQUIREMENT) and
self.dataColumnQuarantine[].hasMissingDataColumns(forkyBlck):
if self.dataColumnQuarantine[].supernode == false:
let columns =
self.dataColumnQuarantine[].gatherDataColumns(block_root).mapIt(it[])
for gdc in columns:
self.dataColumnQuarantine[].put(newClone(gdc))
self.blockProcessor[].enqueueBlock(
MsgSource.gossip, columnless,
Opt.none(BlobSidecars),
Opt.some(self.dataColumnQuarantine[].popDataColumns(block_root, forkyBlck)))
if self.dataColumnQuarantine[].supernode == false:
let columns =
self.dataColumnQuarantine[].gatherDataColumns(block_root).mapIt(it[])
for gdc in columns:
self.dataColumnQuarantine[].put(newClone(gdc))
self.blockProcessor[].enqueueBlock(
MsgSource.gossip, columnless,
Opt.none(BlobSidecars),
Opt.some(self.dataColumnQuarantine[].popDataColumns(block_root, forkyBlck)))
elif self.dataColumnQuarantine[].hasEnoughDataColumns(forkyBlck):
# let
# columns = self.dataColumnQuarantine[].gatherDataColumns(block_root)

View File

@ -1510,7 +1510,7 @@ proc pruneDataColumns(node: BeaconNode, slot: Slot) =
count = count + 1
debug "pruned data columns", count, dataColumnPruneEpoch
proc trySendingReconstructedColumns* (self: BeaconNode,
proc tryReconstructingDataColumns* (self: BeaconNode,
signed_block: deneb.TrustedSignedBeaconBlock |
electra.TrustedSignedBeaconBlock):
Future[Result[seq[DataColumnSidecar], string]] {.async.} =
@ -1545,7 +1545,29 @@ proc trySendingReconstructedColumns* (self: BeaconNode,
data_column_sidecars.add data_column[]
storedColumns.add data_column.index
ok(data_column_sidecars)
debugEcho "Pre stored columns"
debugEcho storedColumns
# storedColumn number is less than the NUMBER_OF_COLUMNS
# then reconstruction is not possible, and if all the data columns
# are already stored then we do not need to reconstruct at all
if not storedColumns.len < NUMBER_OF_COLUMNS div 2 and storedColumns.len != NUMBER_OF_COLUMNS:
# Recover blobs from saved data column sidecars
let recovered_cps = recover_cells_and_proofs(data_column_sidecars, signed_block)
if not recovered_cps.isOk:
return err("Error recovering cells and proofs from data columns")
# Reconstruct data column sidecars from recovered blobs
let reconstructedDataColumns = get_data_column_sidecars(signed_block, recovered_cps.get)
debugEcho "Reconstructed Data Columns len"
debugEcho reconstructedDataColumns.len
for data_column in reconstructedDataColumns:
if data_column.index notin custodiedColumnIndices:
continue
finalisedDataColumns.add(data_column)
for fc in finalisedDataColumns:
db.putDataColumnSidecar(fc)
debug "Reconstructed data column written to database",
data_column = shortLog(fc)
ok(finalisedDataColumns)
proc reconstructAndSendDataColumns*(node: BeaconNode) {.async.} =
let
@ -1556,7 +1578,7 @@ proc reconstructAndSendDataColumns*(node: BeaconNode) {.async.} =
withBlck(blck):
when typeof(forkyBlck).kind >= ConsensusFork.Deneb:
if node.config.subscribeAllSubnets:
let data_column_sidecars = await node.trySendingReconstructedColumns(forkyBlck)
let data_column_sidecars = await node.tryReconstructingDataColumns(forkyBlck)
if not data_column_sidecars.isOk():
return
notice "Data Column Reconstructed and Saved Successfully"