fix database persistence issue

This commit is contained in:
Agnish Ghosh 2024-11-12 11:18:34 +07:00
parent f4502b5696
commit fae0079f94
No known key found for this signature in database
GPG Key ID: 7BDDA05D1B25E9F8
2 changed files with 10 additions and 33 deletions

View File

@ -274,7 +274,7 @@ proc processSignedBeaconBlock*(
let data_columns =
when typeof(signedBlock).kind >= ConsensusFork.Deneb:
if self.dataColumnQuarantine[].hasEnoughDataColumns(signedBlock):
if self.dataColumnQuarantine[].hasMissingDataColumns(signedBlock):
Opt.some(self.dataColumnQuarantine[].popDataColumns(signedBlock.root, signedBlock))
else:
discard self.quarantine[].addColumnless(self.dag.finalizedHead.slot,
@ -393,7 +393,6 @@ proc processDataColumnSidecar*(
debug "Data column validated, putting data column in quarantine"
self.dataColumnQuarantine[].put(newClone(dataColumnSidecar))
if self.dataColumnQuarantine[].supernode == false:
self.dag.db.putDataColumnSidecar(dataColumnSidecar)
debug "Validated column belongs to custody, attempting to persist",
data_column = shortLog(dataColumnSidecar)

View File

@ -413,6 +413,9 @@ proc initFullNode(
withBlck(signedBlock):
when consensusFork >= ConsensusFork.Deneb:
if not dataColumnQuarantine[].checkForInitialDcSidecars(forkyBlck):
# We effectively check for whether there were blob transactions
# against this block or not, if we don't see all the blob kzg
# commitments there were no blobs known.
# We don't have all the data columns for this block, so we have
# to put it in columnless quarantine.
if not quarantine[].addColumnless(dag.finalizedHead.slot, forkyBlck):
@ -1480,7 +1483,7 @@ proc pruneDataColumns(node: BeaconNode, slot: Slot) =
count = count + 1
debug "pruned data columns", count, dataColumnPruneEpoch
proc tryReconstructingDataColumns* (self: BeaconNode,
proc trySendingReconstructedColumns* (self: BeaconNode,
signed_block: deneb.TrustedSignedBeaconBlock |
electra.TrustedSignedBeaconBlock):
Future[Result[seq[DataColumnSidecar], string]] {.async.} =
@ -1515,32 +1518,7 @@ proc tryReconstructingDataColumns* (self: BeaconNode,
data_column_sidecars.add data_column[]
storedColumns.add data_column.index
debugEcho "Pre stored columns"
debugEcho storedColumns
# storedColumn number is less than the NUMBER_OF_COLUMNS
# then reconstruction is not possible, and if all the data columns
# are already stored then we do not need to reconstruct at all
if not storedColumns.len < NUMBER_OF_COLUMNS div 2 and storedColumns.len != NUMBER_OF_COLUMNS:
# Recover blobs from saved data column sidecars
let recovered_cps = recover_cells_and_proofs(data_column_sidecars, signed_block)
if not recovered_cps.isOk:
return err("Error recovering cells and proofs from data columns")
# Reconstruct data column sidecars from recovered blobs
let reconstructedDataColumns = get_data_column_sidecars(signed_block, recovered_cps.get)
debugEcho "Reconstructed Data Columns len"
debugEcho reconstructedDataColumns.len
for data_column in reconstructedDataColumns:
if data_column.index notin custodiedColumnIndices:
continue
finalisedDataColumns.add(data_column)
for fc in finalisedDataColumns:
db.putDataColumnSidecar(fc)
debug "Reconstructed data column written to database",
data_column = shortLog(fc)
ok(finalisedDataColumns)
ok(data_column_sidecars)
proc reconstructAndSendDataColumns*(node: BeaconNode) {.async.} =
let
@ -1551,7 +1529,7 @@ proc reconstructAndSendDataColumns*(node: BeaconNode) {.async.} =
withBlck(blck):
when typeof(forkyBlck).kind >= ConsensusFork.Deneb:
if node.config.subscribeAllSubnets:
let data_column_sidecars = await node.tryReconstructingDataColumns(forkyBlck)
let data_column_sidecars = await node.trySendingReconstructedColumns(forkyBlck)
if not data_column_sidecars.isOk():
return
notice "Data Column Reconstructed and Saved Successfully"