2021-03-26 06:52:01 +00:00
|
|
|
# beacon_chain
|
2024-01-06 14:26:56 +00:00
|
|
|
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
2021-03-26 06:52:01 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2023-01-20 14:14:37 +00:00
|
|
|
{.push raises: [].}
|
2021-03-26 06:52:01 +00:00
|
|
|
|
2018-11-23 23:58:49 +00:00
|
|
|
import
|
2023-01-28 12:30:38 +00:00
|
|
|
chronicles, chronos, snappy, snappy/codec,
|
2024-06-14 19:42:32 +00:00
|
|
|
../spec/datatypes/[phase0, altair, bellatrix, capella, deneb, eip7594],
|
2021-08-12 13:08:20 +00:00
|
|
|
../spec/[helpers, forks, network],
|
2021-10-19 14:09:26 +00:00
|
|
|
".."/[beacon_clock],
|
2021-03-05 13:12:00 +00:00
|
|
|
../networking/eth2_network,
|
2022-06-19 05:57:52 +00:00
|
|
|
../consensus_object_pools/blockchain_dag,
|
|
|
|
../rpc/rest_constants
|
2019-03-12 15:03:14 +00:00
|
|
|
|
2019-11-11 01:28:13 +00:00
|
|
|
logScope:
|
2024-01-13 09:54:24 +00:00
|
|
|
topics = "sync_proto"
|
2019-11-11 01:28:13 +00:00
|
|
|
|
2020-05-23 22:24:47 +00:00
|
|
|
const
|
2024-03-22 01:36:08 +00:00
|
|
|
blockResponseCost = allowedOpsPerSecondCost(64)
|
|
|
|
## Allow syncing ~64 blocks/sec (minus request costs)
|
|
|
|
blobResponseCost = allowedOpsPerSecondCost(1000)
|
|
|
|
## Multiple can exist per block, they are much smaller than blocks
|
2024-10-17 13:57:58 +00:00
|
|
|
dataColumnResponseCost = allowedOpsPerSecondCost(1000)
|
2024-06-14 18:53:47 +00:00
|
|
|
## 1 blob has an equivalent memory of 8 data columns
|
2022-05-23 12:02:54 +00:00
|
|
|
|
2018-11-23 23:58:49 +00:00
|
|
|
type
|
2024-01-13 09:54:24 +00:00
|
|
|
BeaconSyncNetworkState* {.final.} = ref object of RootObj
|
2022-05-31 10:45:37 +00:00
|
|
|
dag: ChainDAGRef
|
|
|
|
cfg: RuntimeConfig
|
|
|
|
genesisBlockRoot: Eth2Digest
|
2019-09-08 22:03:41 +00:00
|
|
|
|
2019-07-03 07:35:05 +00:00
|
|
|
BlockRootSlot* = object
|
|
|
|
blockRoot: Eth2Digest
|
|
|
|
slot: Slot
|
|
|
|
|
2020-08-05 23:22:12 +00:00
|
|
|
BlockRootsList* = List[Eth2Digest, Limit MAX_REQUEST_BLOCKS]
|
2024-06-21 09:21:54 +00:00
|
|
|
BlobIdentifierList* = List[BlobIdentifier, Limit MAX_REQUEST_BLOB_SIDECARS]
|
|
|
|
DataColumnIdentifierList* = List[DataColumnIdentifier, Limit MAX_REQUEST_DATA_COLUMNS]
|
2019-11-25 14:36:25 +00:00
|
|
|
|
2022-02-07 17:20:10 +00:00
|
|
|
proc readChunkPayload*(
|
2022-10-27 16:51:43 +00:00
|
|
|
conn: Connection, peer: Peer, MsgType: type (ref ForkedSignedBeaconBlock)):
|
2024-01-19 21:05:52 +00:00
|
|
|
Future[NetRes[MsgType]] {.async: (raises: [CancelledError]).} =
|
2021-07-07 09:09:47 +00:00
|
|
|
var contextBytes: ForkDigest
|
|
|
|
try:
|
|
|
|
await conn.readExactly(addr contextBytes, sizeof contextBytes)
|
2024-01-19 21:05:52 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
2021-12-05 17:32:41 +00:00
|
|
|
except CatchableError:
|
2021-07-07 09:09:47 +00:00
|
|
|
return neterr UnexpectedEOF
|
|
|
|
|
2024-04-19 19:20:45 +00:00
|
|
|
static: doAssert ConsensusFork.high == ConsensusFork.Electra
|
2021-07-07 09:09:47 +00:00
|
|
|
if contextBytes == peer.network.forkDigests.phase0:
|
2022-05-05 09:17:14 +00:00
|
|
|
let res = await readChunkPayload(conn, peer, phase0.SignedBeaconBlock)
|
2021-07-07 09:09:47 +00:00
|
|
|
if res.isOk:
|
2022-02-07 17:20:10 +00:00
|
|
|
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
2021-07-07 09:09:47 +00:00
|
|
|
else:
|
|
|
|
return err(res.error)
|
|
|
|
elif contextBytes == peer.network.forkDigests.altair:
|
2022-05-05 09:17:14 +00:00
|
|
|
let res = await readChunkPayload(conn, peer, altair.SignedBeaconBlock)
|
2021-07-07 09:09:47 +00:00
|
|
|
if res.isOk:
|
2022-02-07 17:20:10 +00:00
|
|
|
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
2021-07-07 09:09:47 +00:00
|
|
|
else:
|
|
|
|
return err(res.error)
|
2022-01-05 14:24:15 +00:00
|
|
|
elif contextBytes == peer.network.forkDigests.bellatrix:
|
2022-05-05 09:17:14 +00:00
|
|
|
let res = await readChunkPayload(conn, peer, bellatrix.SignedBeaconBlock)
|
2021-09-29 16:44:43 +00:00
|
|
|
if res.isOk:
|
2022-02-07 17:20:10 +00:00
|
|
|
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
2021-09-29 16:44:43 +00:00
|
|
|
else:
|
|
|
|
return err(res.error)
|
2022-12-14 01:30:34 +00:00
|
|
|
elif contextBytes == peer.network.forkDigests.capella:
|
|
|
|
let res = await readChunkPayload(conn, peer, capella.SignedBeaconBlock)
|
|
|
|
if res.isOk:
|
|
|
|
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
|
|
|
else:
|
|
|
|
return err(res.error)
|
2023-03-10 17:13:40 +00:00
|
|
|
elif contextBytes == peer.network.forkDigests.deneb:
|
2023-03-09 00:34:17 +00:00
|
|
|
let res = await readChunkPayload(conn, peer, deneb.SignedBeaconBlock)
|
2023-02-11 20:48:35 +00:00
|
|
|
if res.isOk:
|
|
|
|
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
|
|
|
else:
|
|
|
|
return err(res.error)
|
2024-04-19 19:20:45 +00:00
|
|
|
elif contextBytes == peer.network.forkDigests.electra:
|
|
|
|
let res = await readChunkPayload(conn, peer, electra.SignedBeaconBlock)
|
|
|
|
if res.isOk:
|
|
|
|
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
|
|
|
else:
|
|
|
|
return err(res.error)
|
2021-07-07 09:09:47 +00:00
|
|
|
else:
|
|
|
|
return neterr InvalidContextBytes
|
|
|
|
|
2023-02-27 20:16:59 +00:00
|
|
|
proc readChunkPayload*(
|
|
|
|
conn: Connection, peer: Peer, MsgType: type (ref BlobSidecar)):
|
2024-01-19 21:05:52 +00:00
|
|
|
Future[NetRes[MsgType]] {.async: (raises: [CancelledError]).} =
|
2023-02-27 20:16:59 +00:00
|
|
|
var contextBytes: ForkDigest
|
|
|
|
try:
|
|
|
|
await conn.readExactly(addr contextBytes, sizeof contextBytes)
|
2024-01-19 21:05:52 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
2023-02-27 20:16:59 +00:00
|
|
|
except CatchableError:
|
|
|
|
return neterr UnexpectedEOF
|
|
|
|
|
2023-03-10 17:13:40 +00:00
|
|
|
if contextBytes == peer.network.forkDigests.deneb:
|
2023-02-27 20:16:59 +00:00
|
|
|
let res = await readChunkPayload(conn, peer, BlobSidecar)
|
|
|
|
if res.isOk:
|
|
|
|
return ok newClone(res.get)
|
|
|
|
else:
|
|
|
|
return err(res.error)
|
|
|
|
else:
|
|
|
|
return neterr InvalidContextBytes
|
|
|
|
|
2024-06-14 19:42:32 +00:00
|
|
|
proc readChunkPayload*(
|
|
|
|
conn: Connection, peer: Peer, MsgType: type (ref DataColumnSidecar)):
|
|
|
|
Future[NetRes[MsgType]] {.async: (raises: [CancelledError]).} =
|
|
|
|
var contextBytes: ForkDigest
|
|
|
|
try:
|
|
|
|
await conn.readExactly(addr contextBytes, sizeof contextBytes)
|
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except CatchableError:
|
|
|
|
return neterr UnexpectedEOF
|
|
|
|
|
|
|
|
if contextBytes == peer.network.forkDigests.deneb:
|
|
|
|
let res = await readChunkPayload(conn, peer, DataColumnSidecar)
|
|
|
|
if res.isOk:
|
|
|
|
return ok newClone(res.get)
|
|
|
|
else:
|
|
|
|
return err(res.error)
|
|
|
|
else:
|
|
|
|
return neterr InvalidContextBytes
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
{.pop.} # TODO fix p2p macro for raises
|
2021-07-07 09:09:47 +00:00
|
|
|
|
2018-11-29 01:08:34 +00:00
|
|
|
p2pProtocol BeaconSync(version = 1,
|
2024-01-13 09:54:24 +00:00
|
|
|
networkState = BeaconSyncNetworkState):
|
2021-07-07 09:09:47 +00:00
|
|
|
proc beaconBlocksByRange_v2(
|
|
|
|
peer: Peer,
|
|
|
|
startSlot: Slot,
|
|
|
|
reqCount: uint64,
|
|
|
|
reqStep: uint64,
|
2022-10-27 16:51:43 +00:00
|
|
|
response: MultipleChunksResponse[
|
2023-08-12 03:10:12 +00:00
|
|
|
ref ForkedSignedBeaconBlock, Limit MAX_REQUEST_BLOCKS])
|
2021-07-07 09:09:47 +00:00
|
|
|
{.async, libp2pProtocol("beacon_blocks_by_range", 2).} =
|
2022-02-07 17:20:10 +00:00
|
|
|
# TODO Semantically, this request should return a non-ref, but doing so
|
|
|
|
# runs into extreme inefficiency due to the compiler introducing
|
|
|
|
# hidden copies - in future nim versions with move support, this should
|
|
|
|
# be revisited
|
|
|
|
# TODO This code is more complicated than it needs to be, since the type
|
|
|
|
# of the multiple chunks response is not actually used in this server
|
|
|
|
# implementation (it's used to derive the signature of the client
|
|
|
|
# function, not in the code below!)
|
|
|
|
# TODO although you can't tell from this function definition, a magic
|
|
|
|
# client call that returns `seq[ref ForkedSignedBeaconBlock]` will
|
|
|
|
# will be generated by the libp2p macro - we guarantee that seq items
|
|
|
|
# are `not-nil` in the implementation
|
2022-06-06 13:56:59 +00:00
|
|
|
# TODO reqStep is deprecated - future versions can remove support for
|
|
|
|
# values != 1: https://github.com/ethereum/consensus-specs/pull/2856
|
2022-02-07 17:20:10 +00:00
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
trace "got range request", peer, startSlot,
|
|
|
|
count = reqCount, step = reqStep
|
2022-02-07 17:20:10 +00:00
|
|
|
if reqCount == 0 or reqStep == 0:
|
2021-07-07 09:09:47 +00:00
|
|
|
raise newException(InvalidInputsError, "Empty range requested")
|
|
|
|
|
2023-08-12 03:10:12 +00:00
|
|
|
var blocks: array[MAX_REQUEST_BLOCKS.int, BlockId]
|
2022-02-07 17:20:10 +00:00
|
|
|
let
|
|
|
|
dag = peer.networkState.dag
|
|
|
|
# Limit number of blocks in response
|
|
|
|
count = int min(reqCount, blocks.lenu64)
|
|
|
|
endIndex = count - 1
|
|
|
|
startIndex =
|
|
|
|
dag.getBlockRange(startSlot, reqStep,
|
|
|
|
blocks.toOpenArray(0, endIndex))
|
|
|
|
|
|
|
|
var
|
|
|
|
found = 0
|
|
|
|
bytes: seq[byte]
|
|
|
|
|
|
|
|
for i in startIndex..endIndex:
|
2022-05-05 11:00:02 +00:00
|
|
|
if dag.getBlockSZ(blocks[i], bytes):
|
2022-07-04 20:35:33 +00:00
|
|
|
# In general, there is not much intermediate time between post-merge
|
|
|
|
# blocks all being optimistic and none of them being optimistic. The
|
|
|
|
# EL catches up, tells the CL the head is verified, and that's it.
|
|
|
|
if blocks[i].slot.epoch >= dag.cfg.BELLATRIX_FORK_EPOCH and
|
2023-05-20 12:18:51 +00:00
|
|
|
not dag.head.executionValid:
|
2022-07-04 20:35:33 +00:00
|
|
|
continue
|
|
|
|
|
2022-05-05 11:00:02 +00:00
|
|
|
let uncompressedLen = uncompressedLenFramed(bytes).valueOr:
|
|
|
|
warn "Cannot read block size, database corrupt?",
|
|
|
|
bytes = bytes.len(), blck = shortLog(blocks[i])
|
|
|
|
continue
|
|
|
|
|
2022-11-02 10:46:53 +00:00
|
|
|
# TODO extract from libp2pProtocol
|
|
|
|
peer.awaitQuota(blockResponseCost, "beacon_blocks_by_range/2")
|
|
|
|
peer.network.awaitQuota(blockResponseCost, "beacon_blocks_by_range/2")
|
2022-02-07 17:20:10 +00:00
|
|
|
|
2022-05-05 11:00:02 +00:00
|
|
|
await response.writeBytesSZ(
|
|
|
|
uncompressedLen, bytes,
|
2024-01-13 09:54:24 +00:00
|
|
|
peer.network.forkDigestAtEpoch(blocks[i].slot.epoch).data)
|
2022-02-07 17:20:10 +00:00
|
|
|
|
|
|
|
inc found
|
|
|
|
|
|
|
|
debug "Block range request done",
|
|
|
|
peer, startSlot, count, reqStep
|
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
proc beaconBlocksByRoot_v2(
|
|
|
|
peer: Peer,
|
|
|
|
# Please note that the SSZ list here ensures that the
|
|
|
|
# spec constant MAX_REQUEST_BLOCKS is enforced:
|
|
|
|
blockRoots: BlockRootsList,
|
2022-10-27 16:51:43 +00:00
|
|
|
response: MultipleChunksResponse[
|
2023-08-12 03:10:12 +00:00
|
|
|
ref ForkedSignedBeaconBlock, Limit MAX_REQUEST_BLOCKS])
|
2021-07-07 09:09:47 +00:00
|
|
|
{.async, libp2pProtocol("beacon_blocks_by_root", 2).} =
|
2022-02-07 17:20:10 +00:00
|
|
|
# TODO Semantically, this request should return a non-ref, but doing so
|
|
|
|
# runs into extreme inefficiency due to the compiler introducing
|
|
|
|
# hidden copies - in future nim versions with move support, this should
|
|
|
|
# be revisited
|
|
|
|
# TODO This code is more complicated than it needs to be, since the type
|
|
|
|
# of the multiple chunks response is not actually used in this server
|
|
|
|
# implementation (it's used to derive the signature of the client
|
|
|
|
# function, not in the code below!)
|
|
|
|
# TODO although you can't tell from this function definition, a magic
|
|
|
|
# client call that returns `seq[ref ForkedSignedBeaconBlock]` will
|
|
|
|
# will be generated by the libp2p macro - we guarantee that seq items
|
|
|
|
# are `not-nil` in the implementation
|
2021-07-07 09:09:47 +00:00
|
|
|
if blockRoots.len == 0:
|
|
|
|
raise newException(InvalidInputsError, "No blocks requested")
|
|
|
|
|
|
|
|
let
|
|
|
|
dag = peer.networkState.dag
|
|
|
|
count = blockRoots.len
|
|
|
|
|
2022-02-07 17:20:10 +00:00
|
|
|
var
|
|
|
|
found = 0
|
|
|
|
bytes: seq[byte]
|
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
for i in 0..<count:
|
2022-02-07 17:20:10 +00:00
|
|
|
let
|
|
|
|
blockRef = dag.getBlockRef(blockRoots[i]).valueOr:
|
|
|
|
continue
|
2019-08-05 00:00:49 +00:00
|
|
|
|
2022-05-05 11:00:02 +00:00
|
|
|
if dag.getBlockSZ(blockRef.bid, bytes):
|
2022-07-04 20:35:33 +00:00
|
|
|
# In general, there is not much intermediate time between post-merge
|
|
|
|
# blocks all being optimistic and none of them being optimistic. The
|
|
|
|
# EL catches up, tells the CL the head is verified, and that's it.
|
|
|
|
if blockRef.slot.epoch >= dag.cfg.BELLATRIX_FORK_EPOCH and
|
2023-05-20 12:18:51 +00:00
|
|
|
not dag.head.executionValid:
|
2022-07-04 20:35:33 +00:00
|
|
|
continue
|
|
|
|
|
2022-05-05 11:00:02 +00:00
|
|
|
let uncompressedLen = uncompressedLenFramed(bytes).valueOr:
|
|
|
|
warn "Cannot read block size, database corrupt?",
|
|
|
|
bytes = bytes.len(), blck = shortLog(blockRef)
|
|
|
|
continue
|
|
|
|
|
2022-11-02 10:46:53 +00:00
|
|
|
# TODO extract from libp2pProtocol
|
|
|
|
peer.awaitQuota(blockResponseCost, "beacon_blocks_by_root/2")
|
|
|
|
peer.network.awaitQuota(blockResponseCost, "beacon_blocks_by_root/2")
|
2022-02-07 17:20:10 +00:00
|
|
|
|
2022-05-05 11:00:02 +00:00
|
|
|
await response.writeBytesSZ(
|
|
|
|
uncompressedLen, bytes,
|
2024-01-13 09:54:24 +00:00
|
|
|
peer.network.forkDigestAtEpoch(blockRef.slot.epoch).data)
|
2022-02-07 17:20:10 +00:00
|
|
|
|
|
|
|
inc found
|
2020-10-09 13:44:51 +00:00
|
|
|
|
2020-05-23 22:24:47 +00:00
|
|
|
debug "Block root request done",
|
|
|
|
peer, roots = blockRoots.len, count, found
|
|
|
|
|
2023-05-11 09:54:29 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/p2p-interface.md#blobsidecarsbyroot-v1
|
2023-02-27 20:16:59 +00:00
|
|
|
proc blobSidecarsByRoot(
|
|
|
|
peer: Peer,
|
|
|
|
blobIds: BlobIdentifierList,
|
|
|
|
response: MultipleChunksResponse[
|
2023-04-17 17:18:54 +00:00
|
|
|
ref BlobSidecar, Limit(MAX_REQUEST_BLOB_SIDECARS)])
|
2023-02-27 20:16:59 +00:00
|
|
|
{.async, libp2pProtocol("blob_sidecars_by_root", 1).} =
|
|
|
|
# TODO Semantically, this request should return a non-ref, but doing so
|
|
|
|
# runs into extreme inefficiency due to the compiler introducing
|
|
|
|
# hidden copies - in future nim versions with move support, this should
|
|
|
|
# be revisited
|
|
|
|
# TODO This code is more complicated than it needs to be, since the type
|
|
|
|
# of the multiple chunks response is not actually used in this server
|
|
|
|
# implementation (it's used to derive the signature of the client
|
|
|
|
# function, not in the code below!)
|
|
|
|
# TODO although you can't tell from this function definition, a magic
|
|
|
|
# client call that returns `seq[ref BlobSidecar]` will
|
|
|
|
# will be generated by the libp2p macro - we guarantee that seq items
|
|
|
|
# are `not-nil` in the implementation
|
|
|
|
trace "got blobs range request", peer, len = blobIds.len
|
|
|
|
if blobIds.len == 0:
|
|
|
|
raise newException(InvalidInputsError, "No blobs requested")
|
|
|
|
|
|
|
|
let
|
|
|
|
dag = peer.networkState.dag
|
|
|
|
count = blobIds.len
|
|
|
|
|
|
|
|
var
|
|
|
|
found = 0
|
|
|
|
bytes: seq[byte]
|
|
|
|
|
|
|
|
for i in 0..<count:
|
|
|
|
let blockRef = dag.getBlockRef(blobIds[i].block_root).valueOr:
|
|
|
|
continue
|
|
|
|
let index = blobIds[i].index
|
|
|
|
if dag.db.getBlobSidecarSZ(blockRef.bid.root, index, bytes):
|
|
|
|
let uncompressedLen = uncompressedLenFramed(bytes).valueOr:
|
|
|
|
warn "Cannot read blob size, database corrupt?",
|
|
|
|
bytes = bytes.len(), blck = shortLog(blockRef), blobindex = index
|
|
|
|
continue
|
|
|
|
|
2024-03-22 01:36:08 +00:00
|
|
|
peer.awaitQuota(blobResponseCost, "blob_sidecars_by_root/1")
|
|
|
|
peer.network.awaitQuota(blobResponseCost, "blob_sidecars_by_root/1")
|
2023-02-27 20:16:59 +00:00
|
|
|
|
|
|
|
await response.writeBytesSZ(
|
|
|
|
uncompressedLen, bytes,
|
2024-01-13 09:54:24 +00:00
|
|
|
peer.network.forkDigestAtEpoch(blockRef.slot.epoch).data)
|
2023-02-27 20:16:59 +00:00
|
|
|
inc found
|
|
|
|
|
|
|
|
debug "Blob root request done",
|
|
|
|
peer, roots = blobIds.len, count, found
|
|
|
|
|
2023-05-10 14:04:48 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/p2p-interface.md#blobsidecarsbyrange-v1
|
2023-02-27 20:16:59 +00:00
|
|
|
proc blobSidecarsByRange(
|
|
|
|
peer: Peer,
|
|
|
|
startSlot: Slot,
|
|
|
|
reqCount: uint64,
|
2023-05-10 14:04:48 +00:00
|
|
|
response: MultipleChunksResponse[
|
|
|
|
ref BlobSidecar, Limit(MAX_REQUEST_BLOB_SIDECARS)])
|
2023-02-27 20:16:59 +00:00
|
|
|
{.async, libp2pProtocol("blob_sidecars_by_range", 1).} =
|
|
|
|
# TODO This code is more complicated than it needs to be, since the type
|
|
|
|
# of the multiple chunks response is not actually used in this server
|
|
|
|
# implementation (it's used to derive the signature of the client
|
|
|
|
# function, not in the code below!)
|
|
|
|
# TODO although you can't tell from this function definition, a magic
|
|
|
|
# client call that returns `seq[ref BlobSidecar]` will
|
|
|
|
# will be generated by the libp2p macro - we guarantee that seq items
|
|
|
|
# are `not-nil` in the implementation
|
|
|
|
|
|
|
|
trace "got blobs range request", peer, startSlot, count = reqCount
|
|
|
|
if reqCount == 0:
|
|
|
|
raise newException(InvalidInputsError, "Empty range requested")
|
|
|
|
|
|
|
|
let
|
|
|
|
dag = peer.networkState.dag
|
|
|
|
epochBoundary =
|
2023-11-09 20:41:17 +00:00
|
|
|
if dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS >= dag.head.slot.epoch:
|
2023-02-27 20:16:59 +00:00
|
|
|
GENESIS_EPOCH
|
|
|
|
else:
|
2023-11-09 20:41:17 +00:00
|
|
|
dag.head.slot.epoch - dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
2023-02-27 20:16:59 +00:00
|
|
|
|
|
|
|
if startSlot.epoch < epochBoundary:
|
|
|
|
raise newException(ResourceUnavailableError, BlobsOutOfRange)
|
|
|
|
|
2023-04-17 17:18:54 +00:00
|
|
|
var blockIds: array[int(MAX_REQUEST_BLOB_SIDECARS), BlockId]
|
2023-02-27 20:16:59 +00:00
|
|
|
let
|
|
|
|
count = int min(reqCount, blockIds.lenu64)
|
|
|
|
endIndex = count - 1
|
|
|
|
startIndex =
|
|
|
|
dag.getBlockRange(startSlot, 1, blockIds.toOpenArray(0, endIndex))
|
|
|
|
|
|
|
|
var
|
|
|
|
found = 0
|
|
|
|
bytes: seq[byte]
|
|
|
|
|
|
|
|
for i in startIndex..endIndex:
|
|
|
|
for j in 0..<MAX_BLOBS_PER_BLOCK:
|
|
|
|
if dag.db.getBlobSidecarSZ(blockIds[i].root, BlobIndex(j), bytes):
|
|
|
|
# In general, there is not much intermediate time between post-merge
|
|
|
|
# blocks all being optimistic and none of them being optimistic. The
|
|
|
|
# EL catches up, tells the CL the head is verified, and that's it.
|
|
|
|
if blockIds[i].slot.epoch >= dag.cfg.BELLATRIX_FORK_EPOCH and
|
2023-05-20 12:18:51 +00:00
|
|
|
not dag.head.executionValid:
|
2023-02-27 20:16:59 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
let uncompressedLen = uncompressedLenFramed(bytes).valueOr:
|
|
|
|
warn "Cannot read blobs sidecar size, database corrupt?",
|
|
|
|
bytes = bytes.len(), blck = shortLog(blockIds[i])
|
|
|
|
continue
|
|
|
|
|
|
|
|
# TODO extract from libp2pProtocol
|
2024-03-22 01:36:08 +00:00
|
|
|
peer.awaitQuota(blobResponseCost, "blobs_sidecars_by_range/1")
|
|
|
|
peer.network.awaitQuota(blobResponseCost, "blobs_sidecars_by_range/1")
|
2023-02-27 20:16:59 +00:00
|
|
|
|
|
|
|
await response.writeBytesSZ(
|
|
|
|
uncompressedLen, bytes,
|
2024-01-13 09:54:24 +00:00
|
|
|
peer.network.forkDigestAtEpoch(blockIds[i].slot.epoch).data)
|
2023-02-27 20:16:59 +00:00
|
|
|
inc found
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
|
|
|
debug "BlobSidecar range request done",
|
2023-05-19 20:14:41 +00:00
|
|
|
peer, startSlot, count = reqCount, found
|
2023-02-27 20:16:59 +00:00
|
|
|
|
2024-06-17 18:28:52 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/_features/eip7594/p2p-interface.md#datacolumnsidecarsbyroot-v1
|
2024-06-14 18:53:47 +00:00
|
|
|
proc dataColumnSidecarsByRoot(
|
|
|
|
peer: Peer,
|
|
|
|
columnIds: DataColumnIdentifierList,
|
|
|
|
response: MultipleChunksResponse[
|
|
|
|
ref DataColumnSidecar, Limit(MAX_REQUEST_DATA_COLUMNS)])
|
|
|
|
{.async, libp2pProtocol("data_column_sidecars_by_root", 1).} =
|
2024-06-15 04:59:43 +00:00
|
|
|
|
2024-06-14 19:42:32 +00:00
|
|
|
trace "got data columns range request", peer, len = columnIds.len
|
2024-06-14 18:53:47 +00:00
|
|
|
if columnIds.len == 0:
|
|
|
|
raise newException(InvalidInputsError, "No data columns requested")
|
|
|
|
|
2024-06-21 09:21:54 +00:00
|
|
|
if columnIds.lenu64 > MAX_REQUEST_DATA_COLUMNS:
|
|
|
|
raise newException(InvalidInputsError, "Exceeding data column request limit")
|
|
|
|
|
2024-06-14 18:53:47 +00:00
|
|
|
let
|
|
|
|
dag = peer.networkState.dag
|
|
|
|
count = columnIds.len
|
|
|
|
|
|
|
|
var
|
|
|
|
found = 0
|
|
|
|
bytes: seq[byte]
|
|
|
|
|
2024-09-12 10:49:55 +00:00
|
|
|
|
2024-06-14 18:53:47 +00:00
|
|
|
for i in 0..<count:
|
|
|
|
let blockRef = dag.getBlockRef(columnIds[i].block_root).valueOr:
|
|
|
|
continue
|
|
|
|
let index = columnIds[i].index
|
|
|
|
if dag.db.getDataColumnSidecarSZ(blockRef.bid.root, index, bytes):
|
|
|
|
let uncompressedLen = uncompressedLenFramed(bytes).valueOr:
|
|
|
|
warn "Cannot read data column size, database corrupt?",
|
2024-06-14 19:42:32 +00:00
|
|
|
bytes = bytes.len(), blck = shortLog(blockRef), columnIndex = index
|
2024-06-14 18:53:47 +00:00
|
|
|
continue
|
|
|
|
|
2024-06-14 19:42:32 +00:00
|
|
|
peer.awaitQuota(dataColumnResponseCost, "data_column_sidecars_by_root/1")
|
|
|
|
peer.network.awaitQuota(dataColumnResponseCost, "data_column_sidecars_by_root/1")
|
2024-06-14 18:53:47 +00:00
|
|
|
|
|
|
|
await response.writeBytesSZ(
|
|
|
|
uncompressedLen, bytes,
|
|
|
|
peer.network.forkDigestAtEpoch(blockRef.slot.epoch).data)
|
|
|
|
inc found
|
|
|
|
|
2024-09-02 08:07:07 +00:00
|
|
|
debug "Responded to Data Column Sidecar By Root",
|
|
|
|
peer, blck = shortLog(blockRef), columnIndex = index
|
|
|
|
|
2024-06-14 18:53:47 +00:00
|
|
|
debug "Data column root request done",
|
|
|
|
peer, roots = columnIds.len, count, found
|
|
|
|
|
2024-06-17 18:28:52 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/_features/eip7594/p2p-interface.md#datacolumnsidecarsbyrange-v1
|
2024-07-01 12:12:29 +00:00
|
|
|
proc dataColumnSidecarsByRange(
|
2024-06-14 19:42:32 +00:00
|
|
|
peer: Peer,
|
|
|
|
startSlot: Slot,
|
|
|
|
reqCount: uint64,
|
|
|
|
reqColumns: List[ColumnIndex, NUMBER_OF_COLUMNS],
|
|
|
|
response: MultipleChunksResponse[
|
2024-08-08 12:14:55 +00:00
|
|
|
ref DataColumnSidecar, Limit(MAX_REQUEST_DATA_COLUMNS)])
|
2024-06-14 19:42:32 +00:00
|
|
|
{.async, libp2pProtocol("data_column_sidecars_by_range", 1).} =
|
2024-06-14 18:53:47 +00:00
|
|
|
|
2024-06-14 19:42:32 +00:00
|
|
|
trace "got data columns range request", peer, startSlot,
|
|
|
|
count = reqCount, columns = reqColumns
|
|
|
|
|
|
|
|
if reqCount == 0 or reqColumns.len == 0:
|
|
|
|
raise newException(InvalidInputsError, "Empty range requested")
|
|
|
|
|
|
|
|
let
|
|
|
|
dag = peer.networkState.dag
|
2024-10-22 13:48:40 +00:00
|
|
|
# Using MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS for now, as cfg yaml
|
|
|
|
# isn't properly configured in this branch yet.
|
2024-06-14 19:42:32 +00:00
|
|
|
epochBoundary =
|
2024-10-22 13:48:40 +00:00
|
|
|
if dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS >= dag.head.slot.epoch:
|
2024-06-14 19:42:32 +00:00
|
|
|
GENESIS_EPOCH
|
|
|
|
else:
|
2024-10-22 13:48:40 +00:00
|
|
|
dag.head.slot.epoch - dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
2024-06-14 18:53:47 +00:00
|
|
|
|
2024-06-14 19:42:32 +00:00
|
|
|
if startSlot.epoch < epochBoundary:
|
|
|
|
raise newException(ResourceUnavailableError, DataColumnsOutOfRange)
|
|
|
|
|
|
|
|
var blockIds: array[int(MAX_REQUEST_DATA_COLUMNS), BlockId]
|
|
|
|
let
|
|
|
|
count = int min(reqCount, blockIds.lenu64)
|
|
|
|
endIndex = count - 1
|
|
|
|
startIndex =
|
|
|
|
dag.getBlockRange(startSlot, 1, blockIds.toOpenArray(0, endIndex))
|
|
|
|
|
|
|
|
var
|
|
|
|
found = 0
|
|
|
|
bytes: seq[byte]
|
2024-06-14 18:53:47 +00:00
|
|
|
|
2024-06-14 19:42:32 +00:00
|
|
|
for i in startIndex..endIndex:
|
2024-10-01 18:48:00 +00:00
|
|
|
for k in reqColumns:
|
|
|
|
if dag.db.getDataColumnSidecarSZ(blockIds[i].root, ColumnIndex(k), bytes):
|
2024-10-02 10:22:59 +00:00
|
|
|
if blockIds[i].slot.epoch >= dag.cfg.BELLATRIX_FORK_EPOCH and
|
2024-10-01 18:48:00 +00:00
|
|
|
not dag.head.executionValid:
|
|
|
|
continue
|
|
|
|
|
|
|
|
let uncompressedLen = uncompressedLenFramed(bytes).valueOr:
|
|
|
|
warn "Cannot read data column sidecar size, database, corrupt",
|
|
|
|
bytes = bytes.len(), blck = shortLog(blockIds[i])
|
|
|
|
continue
|
|
|
|
|
|
|
|
peer.awaitQuota(dataColumnResponseCost, "data_column_sidecars_by_range/1")
|
|
|
|
peer.network.awaitQuota(dataColumnResponseCost, "data_column_sidecars_by_range/1")
|
|
|
|
|
|
|
|
await response.writeBytesSZ(
|
|
|
|
uncompressedLen, bytes,
|
|
|
|
peer.network.forkDigestAtEpoch(blockIds[i].slot.epoch).data)
|
|
|
|
inc found
|
|
|
|
var cols: seq[ColumnIndex]
|
|
|
|
cols.add(k)
|
|
|
|
debug "Responded to DataColumnSidecar range request",
|
|
|
|
peer, blck = shortLog(blockIds[i]), columns = cols
|
2024-09-30 10:18:27 +00:00
|
|
|
|
2024-06-14 19:42:32 +00:00
|
|
|
debug "DataColumnSidecar range request done",
|
|
|
|
peer, startSlot, count = reqCount, columns = reqColumns, found
|
2024-06-14 18:53:47 +00:00
|
|
|
|
2024-01-13 09:54:24 +00:00
|
|
|
proc init*(T: type BeaconSync.NetworkState, dag: ChainDAGRef): T =
|
|
|
|
T(
|
|
|
|
dag: dag,
|
|
|
|
)
|