make column syncing simpler and avoid reconstruction

This commit is contained in:
agnxsh 2024-11-30 19:37:54 +05:30
parent 5d10725737
commit b33ff8a0e3
No known key found for this signature in database
GPG Key ID: F9DCC22B4388C910
1 changed files with 17 additions and 17 deletions

View File

@ -359,8 +359,8 @@ proc groupDataColumns*[T](req: SyncRequest[T],
Result[seq[DataColumnSidecars], string] = Result[seq[DataColumnSidecars], string] =
var var
grouped = newSeq[DataColumnSidecars](len(blocks)) grouped = newSeq[DataColumnSidecars](len(blocks))
groupedAndReconstructed = # groupedAndReconstructed =
newSeq[DataColumnSidecars](len(blocks)) # newSeq[DataColumnSidecars](len(blocks))
column_cursor = 0 column_cursor = 0
for block_idx, blck in blocks: for block_idx, blck in blocks:
withBlck(blck[]): withBlck(blck[]):
@ -379,20 +379,20 @@ proc groupDataColumns*[T](req: SyncRequest[T],
grouped[block_idx].add(data_column_sidecar) grouped[block_idx].add(data_column_sidecar)
inc column_cursor inc column_cursor
for block_idx, blck in blocks: # for block_idx, blck in blocks:
withBlck(blck[]): # withBlck(blck[]):
when consensusFork >= ConsensusFork.Deneb: # when consensusFork >= ConsensusFork.Deneb:
template kzgs: untyped = forkyBlck.message.body.blob_kzg_commitments # template kzgs: untyped = forkyBlck.message.body.blob_kzg_commitments
if kzgs.len == 0: # if kzgs.len == 0:
continue # continue
if grouped[block_idx].len >= (NUMBER_OF_COLUMNS div 2): # if grouped[block_idx].len >= (NUMBER_OF_COLUMNS div 2):
let # let
recovered_cps = recover_cells_and_proofs(grouped[block_idx].mapIt(it[])) # recovered_cps = recover_cells_and_proofs(grouped[block_idx].mapIt(it[]))
recovered_cols = get_data_column_sidecars(forkyBlck, recovered_cps.get) # recovered_cols = get_data_column_sidecars(forkyBlck, recovered_cps.get)
refSeq = recovered_cols.mapIt(newClone it) # refSeq = recovered_cols.mapIt(newClone it)
groupedAndReconstructed[block_idx].add(refSeq) # groupedAndReconstructed[block_idx].add(refSeq)
else: # else:
groupedAndReconstructed[block_idx].add(grouped[block_idx]) # groupedAndReconstructed[block_idx].add(grouped[block_idx])
# if column_cursor != len(data_columns): # if column_cursor != len(data_columns):
# # we reached end of blocks without consuming all data columns so either # # we reached end of blocks without consuming all data columns so either
@ -400,7 +400,7 @@ proc groupDataColumns*[T](req: SyncRequest[T],
# # peer is sending us spurious data columns. # # peer is sending us spurious data columns.
# Result[seq[DataColumnSidecars], string].err "invalid block or data column sequence" # Result[seq[DataColumnSidecars], string].err "invalid block or data column sequence"
# else: # else:
Result[seq[DataColumnSidecars], string].ok groupedAndReconstructed Result[seq[DataColumnSidecars], string].ok grouped
proc checkDataColumns(data_columns: seq[DataColumnSidecars]): Result[void, string] = proc checkDataColumns(data_columns: seq[DataColumnSidecars]): Result[void, string] =
for data_column_sidecars in data_columns: for data_column_sidecars in data_columns: