save work push, build failing

This commit is contained in:
Agnish Ghosh 2024-06-15 00:23:47 +05:30
parent 02e5430468
commit 986a2bdcdc
No known key found for this signature in database
GPG Key ID: 7BDDA05D1B25E9F8
3 changed files with 160 additions and 0 deletions

View File

@ -213,6 +213,8 @@ const
"LC finality update unavailable"
LCOptUpdateUnavailable* =
"LC optimistic update unavailable"
DataColumnsOutOfRange* =
"Requested slot is out of data column window"
DeprecatedRemovalBeaconBlocksDebugStateV1* =
"v1/beacon/blocks/{block_id} and v1/debug/beacon/states/{state_id} " &
"endpoints were deprecated and replaced by v2: " &

View File

@ -80,6 +80,7 @@ type
BeaconBlocksRes =
NetRes[List[ref ForkedSignedBeaconBlock, Limit MAX_REQUEST_BLOCKS]]
BlobSidecarsRes = NetRes[List[ref BlobSidecar, Limit(MAX_REQUEST_BLOB_SIDECARS)]]
DataColumnSidecarsRes = NetRes[List[ref DataColumnSidecar, Limit(MAX_REQUEST_DATA_COLUMNS)]]
proc now*(sm: typedesc[SyncMoment], slots: uint64): SyncMoment {.inline.} =
SyncMoment(stamp: now(chronos.Moment), slots: slots)

View File

@ -24,6 +24,8 @@ const
## Allow syncing ~64 blocks/sec (minus request costs)
blobResponseCost = allowedOpsPerSecondCost(1000)
## Multiple can exist per block, they are much smaller than blocks
dataColumnResponseCost = allowedOpsPerSecondCost(1000)
## 1 blob has an equivalent memory of 8 data columns
type
BeaconSyncNetworkState* {.final.} = ref object of RootObj
@ -369,6 +371,161 @@ p2pProtocol BeaconSync(version = 1,
debug "BlobSidecar range request done",
peer, startSlot, count = reqCount, found
proc dataColumnSidecarsByRoot(
peer: Peer,
columnIds: DataColumnIdentifierList,
response: MultipleChunksResponse[
ref DataColumnSidecar, Limit(MAX_REQUEST_DATA_COLUMNS)])
{.async, libp2pProtocol("data_column_sidecars_by_root", 1).} =
# TODO Semantically, this request should return a non-ref, but doing so
# runs into extreme inefficiency due to the compiler introducing
# hidden copies - in future nim versions with move support, this should
# be revisited
# TODO This code is more complicated than it needs to be, since the type
# of the multiple chunks response is not actually used in this server
# implementation (it's used to derive the signature of the client
# function, not in the code below!)
# TODO although you can't tell from this function definition, a magic
# client call that returns `seq[ref BlobSidecar]` will
# will be generated by the libp2p macro - we guarantee that seq items
# are `not-nil` in the implementation
trace "got data columns range request", peer, len = blobIds.len
if columnIds.len == 0:
raise newException(InvalidInputsError, "No data columns requested")
let
dag = peer.networkState.dag
count = columnIds.len
var
found = 0
bytes: seq[byte]
for i in 0..<count:
let blockRef = dag.getBlockRef(columnIds[i].block_root).valueOr:
continue
let index = columnIds[i].index
if dag.db.getDataColumnSidecarSZ(blockRef.bid.root, index, bytes):
let uncompressedLen = uncompressedLenFramed(bytes).valueOr:
warn "Cannot read data column size, database corrupt?",
bytes = bytes.len(), blck = shortLog(blockRef), blobindex = index
continue
peer.awaitQuota(blobResponseCost, "data_column_sidecars_by_root/1")
peer.network.awaitQuota(blobResponseCost, "data_column_sidecars_by_root/1")
await response.writeBytesSZ(
uncompressedLen, bytes,
peer.network.forkDigestAtEpoch(blockRef.slot.epoch).data)
inc found
debug "Data column root request done",
peer, roots = columnIds.len, count, found
# # https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/p2p-interface.md#datacolumnsidecarsbyroot-v1
# proc dataColumnSidecarsByRoot(
# peer: Peer,
# columnIds: DataColumnIdentifierList,
# response: MultipleChunksResponse[
# ref DataColumnSidecar, Limit(MAX_REQUEST_DATA_COLUMN_SIDECARS)])
# {.async, libp2pProtocol("data_column_sidecars_by_root", 1).} =
# trace "got data columns range request", peer, len = columnIds.len
# if columnIds.len == 0:
# raise newException(InvalidInputsError, "No data columns requested")
# let
# dag = peer.networkState.dag
# count = columnIds.len
# var
# found = 0
# bytes: seq[byte]
# for i in 0..<count:
# let blockRef = dag.getBlockRef(columnIds[i].block_root).valueOr:
# continue
# let index = columnIds[i].index
# if dag.db.getDataColumnSidecarSZ(blockRef.bid.root, index, bytes):
# let uncompressedLen = uncompressedLenFramed(bytes).valueOr:
# warn "Cannot read data column size, database corrupt",
# bytes = bytes.len(), blck = shortLog(blockRef), columnindex = index
# continue
# peer.awaitQuota(dataColumnResponseCost, "data_column_sidecars_by_root/1")
# peer.network.awaitQuota(dataColumnResponseCost, "data_column_sidecars_by_root/1")
# await response.writeBytesSSZ(
# uncompressedLen, bytes,
# peer.network.forkDigestAtEpoch(blockRef.slot.epoch).data)
# inc found
# debug "Data Column root request done",
# peer, roots = columnIds.len, count, found
# # https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/p2p-interface.md#datacolumnsidecarsbyrange-v1
# proc dataColumnSidecarByRange(
# peer: Peer,
# startSlot: Slot,
# reqCount: uint64,
# reqColumns: List[ColumnIndex, NUMBER_OF_COLUMNS],
# response: MultipleChunksResponse[
# ref DataColumnSidecar, Limit[MAX_REQUEST_DATA_COLUMN_SIDECARS]])
# {.async, libp2pProtocol("data_column_sidecars_by_range", 1).} =
# trace "got data columns range request", peer, startSlot,
# count = reqCount, columns = reqColumns
# if reqCount == 0 or reqColumns.len == 0:
# raise newException(InvalidInputsError, "Empty range requested")
# let
# dag = peer.networkState.dag
# epochBoundary =
# if dag.cfg.MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS >= dag.head.slot.epoch:
# GENESIS_EPOCH
# else:
# dag.head.slot.epoch - dag.cfg.MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
# if startSlot.epoch < epochBoundary:
# raise newException(ResourceUnavailableError, DataColumnsOutOfRange)
# var blockIds: array[int(MAX_REQUEST_DATA_COLUMNS), BlockId]
# let
# count = int min(reqCount, blockIds.lenu64)
# endIndex = count - 1
# startIndex =
# dag.getBlockRange(startSlot, 1, blockIds.toOpenArray(0, endIndex))
# var
# found = 0
# bytes: seq[byte]
# for i in startIndex..endIndex:
# for j in 0..<MAX_REQUEST_DATA_COLUMNS:
# if dag.db.getDataColumnSidecarSZ(blockIds[i].root, ColumnIndex(j), bytes):
# if blockIds[i].slot.epoch >= dag.cfg.BELLATRIX_FORK_EPOCH and
# not dag.head.executionValid:
# continue
# let uncompressedLen = uncompressedLenFramed(bytes).valueOr:
# warn "Cannot read data column sidecar size, database, corrupt",
# bytes = bytes.len(), blck = shortLog(blockIds[i])
# continue
# peer.awaitQuota(dataColumnResponseCost, "data_column_sidecars_by_range/1")
# peer.network.awaitQuota(dataColumnResponseCost, "data_column_sidecars_by_range/1")
# await response.writeBytesSSZ(
# uncompressedLen, bytes,
# peer.network.forkDigestAtEpoch(blockIds[i].slot.epoch).data)
# inc found
# else:
# break
# debug "DataColumnSidecar range request done",
# peer, startSlot, count = reqCount, columns = reqColumns, found
proc init*(T: type BeaconSync.NetworkState, dag: ChainDAGRef): T =
T(
dag: dag,