fix: reviews, pass1

This commit is contained in:
Agnish Ghosh 2024-06-21 14:51:54 +05:30
parent f0cae305d6
commit e2afc583cb
No known key found for this signature in database
GPG Key ID: 7BDDA05D1B25E9F8
6 changed files with 17 additions and 11 deletions

View File

@ -254,7 +254,7 @@ func blobkey(root: Eth2Digest, index: BlobIndex) : array[40, byte] =
ret ret
func columnkey(root: Eth2Digest, index: ColumnIndex) : array[40, byte] = func columnkey(root: Eth2Digest, index: ColumnIndex): array[40, byte] =
var ret: array[40, byte] var ret: array[40, byte]
ret[0..<8] = toBytes(index) ret[0..<8] = toBytes(index)
ret[8..<40] = root.data ret[8..<40] = root.data

View File

@ -22,7 +22,8 @@ import
./el/el_manager, ./el/el_manager,
./consensus_object_pools/[ ./consensus_object_pools/[
blockchain_dag, blob_quarantine, block_quarantine, consensus_manager, blockchain_dag, blob_quarantine, block_quarantine, consensus_manager,
data_column_quarantine, attestation_pool, sync_committee_msg_pool, validator_change_pool], data_column_quarantine, attestation_pool, sync_committee_msg_pool,
validator_change_pool],
./spec/datatypes/[base, altair], ./spec/datatypes/[base, altair],
./spec/eth2_apis/dynamic_fee_recipients, ./spec/eth2_apis/dynamic_fee_recipients,
./sync/[sync_manager, request_manager], ./sync/[sync_manager, request_manager],

View File

@ -40,11 +40,11 @@ func shortLog*(x: seq[DataColumnFetchRecord]): string =
func put*(quarantine: var DataColumnQuarantine, dataColumnSidecar: ref DataColumnSidecar) = func put*(quarantine: var DataColumnQuarantine, dataColumnSidecar: ref DataColumnSidecar) =
if quarantine.data_columns.lenu64 >= MaxDataColumns: if quarantine.data_columns.lenu64 >= MaxDataColumns:
# FIFO if full. For example, sync manager and request manager can race to # FIFO if full. For example, sync manager and request manager can race to
# put blobs in at the same time, so one gets blob insert -> block resolve # put data columns in at the same time, so one gets blob insert -> block resolve
# -> blob insert sequence, which leaves garbage blobs. # -> data columns insert sequence, which leaves garbage data columns.
# #
# This also therefore automatically garbage-collects otherwise valid garbage # This also therefore automatically garbage-collects otherwise valid garbage
# blobs which are correctly signed, point to either correct block roots or a # data columns which are correctly signed, point to either correct block roots or a
# block root which isn't ever seen, and then are for any reason simply never # block root which isn't ever seen, and then are for any reason simply never
# used. # used.
var oldest_column_key: (Eth2Digest, ColumnIndex) var oldest_column_key: (Eth2Digest, ColumnIndex)

View File

@ -291,7 +291,7 @@ proc fetchDataColumnsFromNetwork(rman: RequestManager,
discard await rman.blockVerifier(col, false) discard await rman.blockVerifier(col, false)
else: else:
debug "Data Columns by root request failed", debug "Data columns by root request failed",
peer = peer, columns = shortLog(colIdList), err = columns.error() peer = peer, columns = shortLog(colIdList), err = columns.error()
peer.updateScore(PeerScoreNoValues) peer.updateScore(PeerScoreNoValues)
@ -472,7 +472,8 @@ proc getMissingDataColumns(rman: RequestManager): seq[DataColumnIdentifier] =
wallTime = rman.getBeaconTime() wallTime = rman.getBeaconTime()
wallSlot = wallTime.slotOrZero() wallSlot = wallTime.slotOrZero()
delay = wallTime - wallSlot.start_beacon_time() delay = wallTime - wallSlot.start_beacon_time()
waitDur = TimeDiff(nanoseconds: DATA_COLUMN_GOSSIP_WAIT_TIME_NS)
const waitDur = TimeDiff(nanoseconds: DATA_COLUMN_GOSSIP_WAIT_TIME_NS)
var var
fetches: seq[DataColumnIdentifier] fetches: seq[DataColumnIdentifier]
@ -496,7 +497,7 @@ proc getMissingDataColumns(rman: RequestManager): seq[DataColumnIdentifier] =
if id notin fetches: if id notin fetches:
fetches.add(id) fetches.add(id)
else: else:
# this is a programming error and it should occur # this is a programming error and it not should occur
warn "missing data column handler found columnless block with all data columns", warn "missing data column handler found columnless block with all data columns",
blk = columnless.root, blk = columnless.root,
commitments=len(forkyBlck.message.body.blob_kzg_commitments) commitments=len(forkyBlck.message.body.blob_kzg_commitments)

View File

@ -80,7 +80,8 @@ type
BeaconBlocksRes = BeaconBlocksRes =
NetRes[List[ref ForkedSignedBeaconBlock, Limit MAX_REQUEST_BLOCKS]] NetRes[List[ref ForkedSignedBeaconBlock, Limit MAX_REQUEST_BLOCKS]]
BlobSidecarsRes = NetRes[List[ref BlobSidecar, Limit(MAX_REQUEST_BLOB_SIDECARS)]] BlobSidecarsRes = NetRes[List[ref BlobSidecar, Limit(MAX_REQUEST_BLOB_SIDECARS)]]
DataColumnSidecarsRes = NetRes[List[ref DataColumnSidecar, Limit(MAX_REQUEST_DATA_COLUMNS)]] DataColumnSidecarsRes =
NetRes[List[ref DataColumnSidecar, Limit(MAX_REQUEST_DATA_COLUMNS)]]
proc now*(sm: typedesc[SyncMoment], slots: uint64): SyncMoment {.inline.} = proc now*(sm: typedesc[SyncMoment], slots: uint64): SyncMoment {.inline.} =
SyncMoment(stamp: now(chronos.Moment), slots: slots) SyncMoment(stamp: now(chronos.Moment), slots: slots)

View File

@ -38,8 +38,8 @@ type
slot: Slot slot: Slot
BlockRootsList* = List[Eth2Digest, Limit MAX_REQUEST_BLOCKS] BlockRootsList* = List[Eth2Digest, Limit MAX_REQUEST_BLOCKS]
BlobIdentifierList* = List[BlobIdentifier, Limit (MAX_REQUEST_BLOB_SIDECARS)] BlobIdentifierList* = List[BlobIdentifier, Limit MAX_REQUEST_BLOB_SIDECARS]
DataColumnIdentifierList* = List[DataColumnIdentifier, Limit (MAX_REQUEST_DATA_COLUMNS)] DataColumnIdentifierList* = List[DataColumnIdentifier, Limit MAX_REQUEST_DATA_COLUMNS]
proc readChunkPayload*( proc readChunkPayload*(
conn: Connection, peer: Peer, MsgType: type (ref ForkedSignedBeaconBlock)): conn: Connection, peer: Peer, MsgType: type (ref ForkedSignedBeaconBlock)):
@ -403,6 +403,9 @@ p2pProtocol BeaconSync(version = 1,
if columnIds.len == 0: if columnIds.len == 0:
raise newException(InvalidInputsError, "No data columns requested") raise newException(InvalidInputsError, "No data columns requested")
if columnIds.lenu64 > MAX_REQUEST_DATA_COLUMNS:
raise newException(InvalidInputsError, "Exceeding data column request limit")
let let
dag = peer.networkState.dag dag = peer.networkState.dag
count = columnIds.len count = columnIds.len