# beacon_chain # Copyright (c) 2018-2024 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. {.push raises: [].} import chronicles, chronos, snappy, snappy/codec, ../spec/datatypes/[phase0, altair, bellatrix, capella, deneb], ../spec/[helpers, forks, network], ".."/[beacon_clock], ../networking/eth2_network, ../consensus_object_pools/blockchain_dag, ../rpc/rest_constants logScope: topics = "sync_proto" const blockResponseCost = allowedOpsPerSecondCost(64) ## Allow syncing ~64 blocks/sec (minus request costs) blobResponseCost = allowedOpsPerSecondCost(1000) ## Multiple can exist per block, they are much smaller than blocks dataColumnResponseCost = allowedOpsPerSecondCost(8000) ## 8 data columns take the same memory as 1 blob approximately type BeaconSyncNetworkState* {.final.} = ref object of RootObj dag: ChainDAGRef cfg: RuntimeConfig genesisBlockRoot: Eth2Digest BlockRootSlot* = object blockRoot: Eth2Digest slot: Slot BlockRootsList* = List[Eth2Digest, Limit MAX_REQUEST_BLOCKS] BlobIdentifierList* = List[BlobIdentifier, Limit (MAX_REQUEST_BLOB_SIDECARS)] DataColumnIdentifierList* = List[DataColumnIdentifier, Limit (MAX_REQUEST_DATA_COLUMN_SIDECARS)] proc readChunkPayload*( conn: Connection, peer: Peer, MsgType: type (ref ForkedSignedBeaconBlock)): Future[NetRes[MsgType]] {.async: (raises: [CancelledError]).} = var contextBytes: ForkDigest try: await conn.readExactly(addr contextBytes, sizeof contextBytes) except CatchableError: return neterr UnexpectedEOF let contextFork = peer.network.forkDigests[].consensusForkForDigest(contextBytes).valueOr: return neterr InvalidContextBytes withConsensusFork(contextFork): let res = await readChunkPayload( conn, peer, consensusFork.SignedBeaconBlock) if res.isOk: return ok newClone(ForkedSignedBeaconBlock.init(res.get)) else: return err(res.error) proc readChunkPayload*( conn: Connection, peer: Peer, MsgType: type (ref BlobSidecar)): Future[NetRes[MsgType]] {.async: (raises: [CancelledError]).} = var contextBytes: ForkDigest try: await conn.readExactly(addr contextBytes, sizeof contextBytes) except CatchableError: return neterr UnexpectedEOF let contextFork = peer.network.forkDigests[].consensusForkForDigest(contextBytes).valueOr: return neterr InvalidContextBytes withConsensusFork(contextFork): when consensusFork >= ConsensusFork.Deneb: let res = await readChunkPayload(conn, peer, BlobSidecar) if res.isOk: return ok newClone(res.get) else: return err(res.error) else: return neterr InvalidContextBytes proc readChunkPayload*( conn: Connection, peer: Peer, MsgType: type (ref DataColumnSidecar)): Future[NetRes[MsgType]] {.async: (raises: [CancelledError]).} = var contextBytes: ForkDigest try: await conn.readExactly(addr contextBytes, sizeof contextBytes) except CatchableError: return neterr UnexpectedEOF let contextFork = peer.network.forkDigests[].consensusForkForDigest(contextBytes).valueOr: return neterr InvalidContextBytes withConsensusFork(contextFork): when consensusFork >= ConsensusFork.Fulu: let res = await readChunkPayload(conn, peer, DataColumnSidecar) if res.isOk: return ok newClone(res.get) else: return err(res.error) else: return neterr InvalidContextBytes {.pop.} # TODO fix p2p macro for raises template getBlobSidecarsByRoot( versionNumber: static string, peer: Peer, dag: ChainDAGRef, response: auto, blobIds: BlobIdentifierList) = trace "got v" & versionNumber & " blobs range request", peer, len = blobIds.len if blobIds.len == 0: raise newException(InvalidInputsError, "No blobs requested") let count = blobIds.len var found = 0 bytes: seq[byte] for i in 0..= dag.head.slot.epoch: GENESIS_EPOCH else: dag.head.slot.epoch - dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS if startSlot.epoch < epochBoundary: raise newException(ResourceUnavailableError, BlobsOutOfRange) var blockIds: array[int(maxReqSidecars), BlockId] let count = int min(reqCount, blockIds.lenu64) endIndex = count - 1 startIndex = dag.getBlockRange(startSlot, 1, blockIds.toOpenArray(0, endIndex)) var found = 0 bytes: seq[byte] for i in startIndex..endIndex: for j in 0..