fix: reviews, pass1
This commit is contained in:
parent
f0cae305d6
commit
e2afc583cb
|
@ -22,7 +22,8 @@ import
|
|||
./el/el_manager,
|
||||
./consensus_object_pools/[
|
||||
blockchain_dag, blob_quarantine, block_quarantine, consensus_manager,
|
||||
data_column_quarantine, attestation_pool, sync_committee_msg_pool, validator_change_pool],
|
||||
data_column_quarantine, attestation_pool, sync_committee_msg_pool,
|
||||
validator_change_pool],
|
||||
./spec/datatypes/[base, altair],
|
||||
./spec/eth2_apis/dynamic_fee_recipients,
|
||||
./sync/[sync_manager, request_manager],
|
||||
|
|
|
@ -40,11 +40,11 @@ func shortLog*(x: seq[DataColumnFetchRecord]): string =
|
|||
func put*(quarantine: var DataColumnQuarantine, dataColumnSidecar: ref DataColumnSidecar) =
|
||||
if quarantine.data_columns.lenu64 >= MaxDataColumns:
|
||||
# FIFO if full. For example, sync manager and request manager can race to
|
||||
# put blobs in at the same time, so one gets blob insert -> block resolve
|
||||
# -> blob insert sequence, which leaves garbage blobs.
|
||||
# put data columns in at the same time, so one gets blob insert -> block resolve
|
||||
# -> data columns insert sequence, which leaves garbage data columns.
|
||||
#
|
||||
# This also therefore automatically garbage-collects otherwise valid garbage
|
||||
# blobs which are correctly signed, point to either correct block roots or a
|
||||
# data columns which are correctly signed, point to either correct block roots or a
|
||||
# block root which isn't ever seen, and then are for any reason simply never
|
||||
# used.
|
||||
var oldest_column_key: (Eth2Digest, ColumnIndex)
|
||||
|
|
|
@ -291,7 +291,7 @@ proc fetchDataColumnsFromNetwork(rman: RequestManager,
|
|||
discard await rman.blockVerifier(col, false)
|
||||
|
||||
else:
|
||||
debug "Data Columns by root request failed",
|
||||
debug "Data columns by root request failed",
|
||||
peer = peer, columns = shortLog(colIdList), err = columns.error()
|
||||
peer.updateScore(PeerScoreNoValues)
|
||||
|
||||
|
@ -472,7 +472,8 @@ proc getMissingDataColumns(rman: RequestManager): seq[DataColumnIdentifier] =
|
|||
wallTime = rman.getBeaconTime()
|
||||
wallSlot = wallTime.slotOrZero()
|
||||
delay = wallTime - wallSlot.start_beacon_time()
|
||||
waitDur = TimeDiff(nanoseconds: DATA_COLUMN_GOSSIP_WAIT_TIME_NS)
|
||||
|
||||
const waitDur = TimeDiff(nanoseconds: DATA_COLUMN_GOSSIP_WAIT_TIME_NS)
|
||||
|
||||
var
|
||||
fetches: seq[DataColumnIdentifier]
|
||||
|
@ -496,7 +497,7 @@ proc getMissingDataColumns(rman: RequestManager): seq[DataColumnIdentifier] =
|
|||
if id notin fetches:
|
||||
fetches.add(id)
|
||||
else:
|
||||
# this is a programming error and it should occur
|
||||
# this is a programming error and it not should occur
|
||||
warn "missing data column handler found columnless block with all data columns",
|
||||
blk = columnless.root,
|
||||
commitments=len(forkyBlck.message.body.blob_kzg_commitments)
|
||||
|
|
|
@ -80,7 +80,8 @@ type
|
|||
BeaconBlocksRes =
|
||||
NetRes[List[ref ForkedSignedBeaconBlock, Limit MAX_REQUEST_BLOCKS]]
|
||||
BlobSidecarsRes = NetRes[List[ref BlobSidecar, Limit(MAX_REQUEST_BLOB_SIDECARS)]]
|
||||
DataColumnSidecarsRes = NetRes[List[ref DataColumnSidecar, Limit(MAX_REQUEST_DATA_COLUMNS)]]
|
||||
DataColumnSidecarsRes =
|
||||
NetRes[List[ref DataColumnSidecar, Limit(MAX_REQUEST_DATA_COLUMNS)]]
|
||||
|
||||
proc now*(sm: typedesc[SyncMoment], slots: uint64): SyncMoment {.inline.} =
|
||||
SyncMoment(stamp: now(chronos.Moment), slots: slots)
|
||||
|
|
|
@ -38,8 +38,8 @@ type
|
|||
slot: Slot
|
||||
|
||||
BlockRootsList* = List[Eth2Digest, Limit MAX_REQUEST_BLOCKS]
|
||||
BlobIdentifierList* = List[BlobIdentifier, Limit (MAX_REQUEST_BLOB_SIDECARS)]
|
||||
DataColumnIdentifierList* = List[DataColumnIdentifier, Limit (MAX_REQUEST_DATA_COLUMNS)]
|
||||
BlobIdentifierList* = List[BlobIdentifier, Limit MAX_REQUEST_BLOB_SIDECARS]
|
||||
DataColumnIdentifierList* = List[DataColumnIdentifier, Limit MAX_REQUEST_DATA_COLUMNS]
|
||||
|
||||
proc readChunkPayload*(
|
||||
conn: Connection, peer: Peer, MsgType: type (ref ForkedSignedBeaconBlock)):
|
||||
|
@ -403,6 +403,9 @@ p2pProtocol BeaconSync(version = 1,
|
|||
if columnIds.len == 0:
|
||||
raise newException(InvalidInputsError, "No data columns requested")
|
||||
|
||||
if columnIds.lenu64 > MAX_REQUEST_DATA_COLUMNS:
|
||||
raise newException(InvalidInputsError, "Exceeding data column request limit")
|
||||
|
||||
let
|
||||
dag = peer.networkState.dag
|
||||
count = columnIds.len
|
||||
|
|
Loading…
Reference in New Issue