From 2444b3190eccb4577e8eb25b0ffdfe6f5d23f278 Mon Sep 17 00:00:00 2001 From: Agnish Ghosh Date: Mon, 19 Aug 2024 20:08:01 +0530 Subject: [PATCH] init flag for csc, reduce debug messages --- beacon_chain/conf.nim | 5 +++++ beacon_chain/nimbus_beacon_node.nim | 11 +++-------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/beacon_chain/conf.nim b/beacon_chain/conf.nim index 5dd83c062..6c2d5bed8 100644 --- a/beacon_chain/conf.nim +++ b/beacon_chain/conf.nim @@ -557,6 +557,11 @@ type desc: "Maximum number of sync committee periods to retain light client data" name: "light-client-data-max-periods" .}: Option[uint64] + custodySubnetCount* {. + desc: "Number of custody subnets the BN wants to be subscribed to" & + "Must be in the range: Custody Requirement to Max Data Column Subnet Count" + name: "custodySubnetCount" .}: Option[uint64] + inProcessValidators* {. desc: "Disable the push model (the beacon node tells a signing process with the private keys of the validators what to sign and when) and load the validators in the beacon node itself" defaultValue: true # the use of the nimbus_signing_process binary by default will be delayed until async I/O over stdin/stdout is developed for the child process. diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 94527d9b9..52ba77b77 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -1475,7 +1475,7 @@ proc pruneDataColumns(node: BeaconNode, slot: Slot) = proc tryReconstructingDataColumns* (self: BeaconNode, signed_block: deneb.TrustedSignedBeaconBlock | electra.TrustedSignedBeaconBlock): - Result[seq[DataColumnSidecar], string] = + Future[Result[seq[DataColumnSidecar], string]] {.async.} = # Checks whether the data columns can be reconstructed # or not from the recovery matrix @@ -1508,9 +1508,6 @@ proc tryReconstructingDataColumns* (self: BeaconNode, data_column_sidecars.add data_column[] storedColumns.add data_column.index - if columnsOk: - debug "Loaded data column for reconstruction" - # storedColumn number is less than the NUMBER_OF_COLUMNS # then reconstruction is not possible, and if all the data columns # are already stored then we do not need to reconstruct at all @@ -1520,8 +1517,6 @@ proc tryReconstructingDataColumns* (self: BeaconNode, let recovered_cps = recover_cells_and_proofs(data_column_sidecars, storedColumns.len, signed_block) if not recovered_cps.isOk: return err("Error recovering cells and proofs from data columns") - else: - debug "Computed Cells and Proofs successfully!" # Reconstruct data column sidecars from recovered blobs let reconstructedDataColumns = get_data_column_sidecars(signed_block, recovered_cps.get) @@ -1532,7 +1527,6 @@ proc tryReconstructingDataColumns* (self: BeaconNode, finalisedDataColumns.add(data_column) db.putDataColumnSidecar(data_column) - notice "Data Column Reconstructed and Saved Successfully" ok(finalisedDataColumns) @@ -1545,9 +1539,10 @@ proc reconstructAndSendDataColumns*(node: BeaconNode) {.async.} = withBlck(blck): when typeof(forkyBlck).kind < ConsensusFork.Deneb: return else: - let data_column_sidecars = node.tryReconstructingDataColumns(forkyBlck) + let data_column_sidecars = await node.tryReconstructingDataColumns(forkyBlck) if not data_column_sidecars.isOk(): return + notice "Data Column Reconstructed and Saved Successfully" let dc = data_column_sidecars.get var das_workers = newSeq[Future[SendResult]](len(dc)) for i in 0..