2021-03-26 06:52:01 +00:00
|
|
|
# beacon_chain
|
2024-01-06 14:26:56 +00:00
|
|
|
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
2021-03-26 06:52:01 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2023-01-20 14:14:37 +00:00
|
|
|
{.push raises: [].}
|
2021-03-26 06:52:01 +00:00
|
|
|
|
2018-11-23 23:58:49 +00:00
|
|
|
import
|
2023-03-31 20:46:47 +00:00
|
|
|
std/[tables, sets, macros],
|
2023-01-28 12:30:38 +00:00
|
|
|
chronicles, chronos, snappy, snappy/codec,
|
2022-11-20 07:20:23 +00:00
|
|
|
libp2p/switch,
|
2023-03-09 00:34:17 +00:00
|
|
|
../spec/datatypes/[phase0, altair, bellatrix, capella, deneb],
|
2021-08-12 13:08:20 +00:00
|
|
|
../spec/[helpers, forks, network],
|
2021-10-19 14:09:26 +00:00
|
|
|
".."/[beacon_clock],
|
2021-03-05 13:12:00 +00:00
|
|
|
../networking/eth2_network,
|
2022-06-19 05:57:52 +00:00
|
|
|
../consensus_object_pools/blockchain_dag,
|
|
|
|
../rpc/rest_constants
|
2019-03-12 15:03:14 +00:00
|
|
|
|
2019-11-11 01:28:13 +00:00
|
|
|
logScope:
|
|
|
|
topics = "sync"
|
|
|
|
|
2020-05-23 22:24:47 +00:00
|
|
|
const
|
2022-11-02 10:46:53 +00:00
|
|
|
blockResponseCost = allowedOpsPerSecondCost(64) # Allow syncing ~64 blocks/sec (minus request costs)
|
2022-05-23 12:02:54 +00:00
|
|
|
|
2022-11-02 10:46:53 +00:00
|
|
|
lightClientBootstrapResponseCost = allowedOpsPerSecondCost(1)
|
|
|
|
## Only one bootstrap per peer should ever be needed - no need to allow more
|
|
|
|
lightClientUpdateResponseCost = allowedOpsPerSecondCost(1000)
|
|
|
|
## Updates are tiny - we can allow lots of them
|
2022-05-23 12:02:54 +00:00
|
|
|
lightClientFinalityUpdateResponseCost = allowedOpsPerSecondCost(100)
|
|
|
|
lightClientOptimisticUpdateResponseCost = allowedOpsPerSecondCost(100)
|
2020-10-09 13:44:51 +00:00
|
|
|
|
2018-11-23 23:58:49 +00:00
|
|
|
type
|
2020-04-20 14:59:18 +00:00
|
|
|
StatusMsg* = object
|
|
|
|
forkDigest*: ForkDigest
|
|
|
|
finalizedRoot*: Eth2Digest
|
|
|
|
finalizedEpoch*: Epoch
|
|
|
|
headRoot*: Eth2Digest
|
|
|
|
headSlot*: Slot
|
|
|
|
|
2019-07-08 13:19:52 +00:00
|
|
|
ValidatorSetDeltaFlags {.pure.} = enum
|
|
|
|
Activation = 0
|
|
|
|
Exit = 1
|
|
|
|
|
2018-11-23 23:58:49 +00:00
|
|
|
ValidatorChangeLogEntry* = object
|
|
|
|
case kind*: ValidatorSetDeltaFlags
|
2018-12-03 17:46:22 +00:00
|
|
|
of Activation:
|
2018-11-29 01:08:34 +00:00
|
|
|
pubkey: ValidatorPubKey
|
2018-11-23 23:58:49 +00:00
|
|
|
else:
|
|
|
|
index: uint32
|
|
|
|
|
2022-05-31 10:45:37 +00:00
|
|
|
BeaconSyncNetworkState = ref object
|
|
|
|
dag: ChainDAGRef
|
|
|
|
cfg: RuntimeConfig
|
|
|
|
forkDigests: ref ForkDigests
|
|
|
|
genesisBlockRoot: Eth2Digest
|
|
|
|
getBeaconTime: GetBeaconTimeFn
|
2019-02-18 10:34:39 +00:00
|
|
|
|
2019-09-08 22:03:41 +00:00
|
|
|
BeaconSyncPeerState* = ref object
|
2020-09-23 15:58:02 +00:00
|
|
|
statusLastTime*: chronos.Moment
|
2020-04-20 14:59:18 +00:00
|
|
|
statusMsg*: StatusMsg
|
2019-09-08 22:03:41 +00:00
|
|
|
|
2019-07-03 07:35:05 +00:00
|
|
|
BlockRootSlot* = object
|
|
|
|
blockRoot: Eth2Digest
|
|
|
|
slot: Slot
|
|
|
|
|
2020-08-05 23:22:12 +00:00
|
|
|
BlockRootsList* = List[Eth2Digest, Limit MAX_REQUEST_BLOCKS]
|
2023-04-17 17:18:54 +00:00
|
|
|
BlobIdentifierList* = List[BlobIdentifier, Limit (MAX_REQUEST_BLOB_SIDECARS)]
|
2019-11-25 14:36:25 +00:00
|
|
|
|
2022-05-05 09:17:14 +00:00
|
|
|
template readChunkPayload*(
|
|
|
|
conn: Connection, peer: Peer, MsgType: type ForkySignedBeaconBlock):
|
|
|
|
Future[NetRes[MsgType]] =
|
2022-10-27 16:51:43 +00:00
|
|
|
readChunkPayload(conn, peer, MsgType)
|
2022-05-05 09:17:14 +00:00
|
|
|
|
2022-02-07 17:20:10 +00:00
|
|
|
proc readChunkPayload*(
|
2022-10-27 16:51:43 +00:00
|
|
|
conn: Connection, peer: Peer, MsgType: type (ref ForkedSignedBeaconBlock)):
|
2022-02-07 17:20:10 +00:00
|
|
|
Future[NetRes[MsgType]] {.async.} =
|
2021-07-07 09:09:47 +00:00
|
|
|
var contextBytes: ForkDigest
|
|
|
|
try:
|
|
|
|
await conn.readExactly(addr contextBytes, sizeof contextBytes)
|
2021-12-05 17:32:41 +00:00
|
|
|
except CatchableError:
|
2021-07-07 09:09:47 +00:00
|
|
|
return neterr UnexpectedEOF
|
|
|
|
|
|
|
|
if contextBytes == peer.network.forkDigests.phase0:
|
2022-05-05 09:17:14 +00:00
|
|
|
let res = await readChunkPayload(conn, peer, phase0.SignedBeaconBlock)
|
2021-07-07 09:09:47 +00:00
|
|
|
if res.isOk:
|
2022-02-07 17:20:10 +00:00
|
|
|
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
2021-07-07 09:09:47 +00:00
|
|
|
else:
|
|
|
|
return err(res.error)
|
|
|
|
elif contextBytes == peer.network.forkDigests.altair:
|
2022-05-05 09:17:14 +00:00
|
|
|
let res = await readChunkPayload(conn, peer, altair.SignedBeaconBlock)
|
2021-07-07 09:09:47 +00:00
|
|
|
if res.isOk:
|
2022-02-07 17:20:10 +00:00
|
|
|
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
2021-07-07 09:09:47 +00:00
|
|
|
else:
|
|
|
|
return err(res.error)
|
2022-01-05 14:24:15 +00:00
|
|
|
elif contextBytes == peer.network.forkDigests.bellatrix:
|
2022-05-05 09:17:14 +00:00
|
|
|
let res = await readChunkPayload(conn, peer, bellatrix.SignedBeaconBlock)
|
2021-09-29 16:44:43 +00:00
|
|
|
if res.isOk:
|
2022-02-07 17:20:10 +00:00
|
|
|
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
2021-09-29 16:44:43 +00:00
|
|
|
else:
|
|
|
|
return err(res.error)
|
2022-12-14 01:30:34 +00:00
|
|
|
elif contextBytes == peer.network.forkDigests.capella:
|
|
|
|
let res = await readChunkPayload(conn, peer, capella.SignedBeaconBlock)
|
|
|
|
if res.isOk:
|
|
|
|
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
|
|
|
else:
|
|
|
|
return err(res.error)
|
2023-03-10 17:13:40 +00:00
|
|
|
elif contextBytes == peer.network.forkDigests.deneb:
|
2023-03-09 00:34:17 +00:00
|
|
|
let res = await readChunkPayload(conn, peer, deneb.SignedBeaconBlock)
|
2023-02-11 20:48:35 +00:00
|
|
|
if res.isOk:
|
|
|
|
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
|
|
|
else:
|
|
|
|
return err(res.error)
|
2021-07-07 09:09:47 +00:00
|
|
|
else:
|
|
|
|
return neterr InvalidContextBytes
|
|
|
|
|
2023-02-27 20:16:59 +00:00
|
|
|
proc readChunkPayload*(
|
|
|
|
conn: Connection, peer: Peer, MsgType: type (ref BlobSidecar)):
|
|
|
|
Future[NetRes[MsgType]] {.async.} =
|
|
|
|
var contextBytes: ForkDigest
|
|
|
|
try:
|
|
|
|
await conn.readExactly(addr contextBytes, sizeof contextBytes)
|
|
|
|
except CatchableError:
|
|
|
|
return neterr UnexpectedEOF
|
|
|
|
|
2023-03-10 17:13:40 +00:00
|
|
|
if contextBytes == peer.network.forkDigests.deneb:
|
2023-02-27 20:16:59 +00:00
|
|
|
let res = await readChunkPayload(conn, peer, BlobSidecar)
|
|
|
|
if res.isOk:
|
|
|
|
return ok newClone(res.get)
|
|
|
|
else:
|
|
|
|
return err(res.error)
|
|
|
|
else:
|
|
|
|
return neterr InvalidContextBytes
|
|
|
|
|
2022-05-23 12:02:54 +00:00
|
|
|
proc readChunkPayload*(
|
2023-01-12 17:11:38 +00:00
|
|
|
conn: Connection, peer: Peer, MsgType: type SomeForkedLightClientObject):
|
2022-05-23 12:02:54 +00:00
|
|
|
Future[NetRes[MsgType]] {.async.} =
|
|
|
|
var contextBytes: ForkDigest
|
|
|
|
try:
|
|
|
|
await conn.readExactly(addr contextBytes, sizeof contextBytes)
|
|
|
|
except CatchableError:
|
|
|
|
return neterr UnexpectedEOF
|
2023-01-12 17:11:38 +00:00
|
|
|
let contextFork =
|
2023-03-11 16:58:48 +00:00
|
|
|
peer.network.forkDigests[].consensusForkForDigest(contextBytes).valueOr:
|
2022-05-23 12:02:54 +00:00
|
|
|
return neterr InvalidContextBytes
|
|
|
|
|
2023-03-11 20:09:21 +00:00
|
|
|
withLcDataFork(lcDataForkAtConsensusFork(contextFork)):
|
2023-01-14 21:19:50 +00:00
|
|
|
when lcDataFork > LightClientDataFork.None:
|
|
|
|
let res = await eth2_network.readChunkPayload(
|
|
|
|
conn, peer, MsgType.Forky(lcDataFork))
|
|
|
|
if res.isOk:
|
|
|
|
if contextFork !=
|
2023-02-16 09:32:12 +00:00
|
|
|
peer.network.cfg.consensusForkAtEpoch(res.get.contextEpoch):
|
2023-01-14 21:19:50 +00:00
|
|
|
return neterr InvalidContextBytes
|
2023-10-04 16:11:45 +00:00
|
|
|
return ok MsgType.init(res.get)
|
2023-01-14 21:19:50 +00:00
|
|
|
else:
|
|
|
|
return err(res.error)
|
2022-05-23 12:02:54 +00:00
|
|
|
else:
|
2023-01-14 21:19:50 +00:00
|
|
|
return neterr InvalidContextBytes
|
2022-05-23 12:02:54 +00:00
|
|
|
|
2020-09-06 08:39:25 +00:00
|
|
|
func shortLog*(s: StatusMsg): auto =
|
2020-05-11 18:08:52 +00:00
|
|
|
(
|
|
|
|
forkDigest: s.forkDigest,
|
|
|
|
finalizedRoot: shortLog(s.finalizedRoot),
|
|
|
|
finalizedEpoch: shortLog(s.finalizedEpoch),
|
|
|
|
headRoot: shortLog(s.headRoot),
|
|
|
|
headSlot: shortLog(s.headSlot)
|
|
|
|
)
|
|
|
|
chronicles.formatIt(StatusMsg): shortLog(it)
|
|
|
|
|
2020-06-20 07:24:33 +00:00
|
|
|
func disconnectReasonName(reason: uint64): string =
|
|
|
|
# haha, nim doesn't support uint64 in `case`!
|
|
|
|
if reason == uint64(ClientShutDown): "Client shutdown"
|
|
|
|
elif reason == uint64(IrrelevantNetwork): "Irrelevant network"
|
|
|
|
elif reason == uint64(FaultOrError): "Fault or error"
|
|
|
|
else: "Disconnected (" & $reason & ")"
|
|
|
|
|
2022-05-31 10:45:37 +00:00
|
|
|
func forkDigestAtEpoch(state: BeaconSyncNetworkState,
|
|
|
|
epoch: Epoch): ForkDigest =
|
|
|
|
state.forkDigests[].atEpoch(epoch, state.cfg)
|
|
|
|
|
2022-01-26 16:20:21 +00:00
|
|
|
proc getCurrentStatus(state: BeaconSyncNetworkState): StatusMsg =
|
2019-09-09 23:55:01 +00:00
|
|
|
let
|
2021-06-01 11:13:40 +00:00
|
|
|
dag = state.dag
|
2022-01-26 16:20:21 +00:00
|
|
|
wallSlot = state.getBeaconTime().slotOrZero
|
2019-09-09 23:55:01 +00:00
|
|
|
|
2022-05-31 10:45:37 +00:00
|
|
|
if dag != nil:
|
|
|
|
StatusMsg(
|
|
|
|
forkDigest: state.forkDigestAtEpoch(wallSlot.epoch),
|
|
|
|
finalizedRoot: dag.finalizedHead.blck.root,
|
|
|
|
finalizedEpoch: dag.finalizedHead.slot.epoch,
|
|
|
|
headRoot: dag.head.root,
|
|
|
|
headSlot: dag.head.slot)
|
|
|
|
else:
|
|
|
|
StatusMsg(
|
|
|
|
forkDigest: state.forkDigestAtEpoch(wallSlot.epoch),
|
|
|
|
finalizedRoot: state.genesisBlockRoot,
|
|
|
|
finalizedEpoch: GENESIS_EPOCH,
|
|
|
|
headRoot: state.genesisBlockRoot,
|
|
|
|
headSlot: GENESIS_SLOT)
|
2022-01-26 16:20:21 +00:00
|
|
|
|
|
|
|
proc checkStatusMsg(state: BeaconSyncNetworkState, status: StatusMsg):
|
|
|
|
Result[void, cstring] =
|
|
|
|
let
|
|
|
|
dag = state.dag
|
|
|
|
wallSlot = (state.getBeaconTime() + MAXIMUM_GOSSIP_CLOCK_DISPARITY).slotOrZero
|
|
|
|
|
|
|
|
if status.finalizedEpoch > status.headSlot.epoch:
|
|
|
|
# Can be equal during genesis or checkpoint start
|
|
|
|
return err("finalized epoch newer than head")
|
|
|
|
|
|
|
|
if status.headSlot > wallSlot:
|
|
|
|
return err("head more recent than wall clock")
|
|
|
|
|
2022-05-31 10:45:37 +00:00
|
|
|
if state.forkDigestAtEpoch(wallSlot.epoch) != status.forkDigest:
|
2022-01-26 16:20:21 +00:00
|
|
|
return err("fork digests differ")
|
|
|
|
|
2022-05-31 10:45:37 +00:00
|
|
|
if dag != nil:
|
|
|
|
if status.finalizedEpoch <= dag.finalizedHead.slot.epoch:
|
|
|
|
let blockId = dag.getBlockIdAtSlot(status.finalizedEpoch.start_slot())
|
|
|
|
if blockId.isSome and
|
|
|
|
(not status.finalizedRoot.isZero) and
|
|
|
|
status.finalizedRoot != blockId.get().bid.root:
|
|
|
|
return err("peer following different finality")
|
|
|
|
else:
|
|
|
|
if status.finalizedEpoch == GENESIS_EPOCH:
|
|
|
|
if status.finalizedRoot != state.genesisBlockRoot:
|
|
|
|
return err("peer following different finality")
|
2022-01-26 16:20:21 +00:00
|
|
|
|
|
|
|
ok()
|
2019-09-08 22:03:41 +00:00
|
|
|
|
2020-05-13 06:37:58 +00:00
|
|
|
proc handleStatus(peer: Peer,
|
|
|
|
state: BeaconSyncNetworkState,
|
2022-01-26 16:20:21 +00:00
|
|
|
theirStatus: StatusMsg): Future[bool] {.gcsafe.}
|
2019-09-08 13:08:44 +00:00
|
|
|
|
2020-05-11 18:08:52 +00:00
|
|
|
proc setStatusMsg(peer: Peer, statusMsg: StatusMsg) {.gcsafe.}
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
{.pop.} # TODO fix p2p macro for raises
|
2021-07-07 09:09:47 +00:00
|
|
|
|
2018-11-29 01:08:34 +00:00
|
|
|
p2pProtocol BeaconSync(version = 1,
|
2019-09-08 22:03:41 +00:00
|
|
|
networkState = BeaconSyncNetworkState,
|
|
|
|
peerState = BeaconSyncPeerState):
|
2019-02-18 10:34:39 +00:00
|
|
|
|
2020-08-10 10:58:34 +00:00
|
|
|
onPeerConnected do (peer: Peer, incoming: bool) {.async.}:
|
2020-06-11 05:14:26 +00:00
|
|
|
debug "Peer connected",
|
2021-10-21 11:01:29 +00:00
|
|
|
peer, peerId = shortLog(peer.peerId), incoming
|
2020-08-10 10:58:34 +00:00
|
|
|
# Per the eth2 protocol, whoever dials must send a status message when
|
|
|
|
# connected for the first time, but because of how libp2p works, there may
|
|
|
|
# be a race between incoming and outgoing connections and disconnects that
|
|
|
|
# makes the incoming flag unreliable / obsolete by the time we get to
|
|
|
|
# this point - instead of making assumptions, we'll just send a status
|
|
|
|
# message redundantly.
|
2020-11-10 18:41:04 +00:00
|
|
|
# TODO(zah)
|
|
|
|
# the spec does not prohibit sending the extra status message on
|
2020-08-10 10:58:34 +00:00
|
|
|
# incoming connections, but it should not be necessary - this would
|
|
|
|
# need a dedicated flow in libp2p that resolves the race conditions -
|
|
|
|
# this needs more thinking around the ordering of events and the
|
|
|
|
# given incoming flag
|
|
|
|
let
|
|
|
|
ourStatus = peer.networkState.getCurrentStatus()
|
2023-08-12 03:10:12 +00:00
|
|
|
theirStatus = await peer.status(ourStatus, timeout = RESP_TIMEOUT_DUR)
|
2019-09-09 23:55:01 +00:00
|
|
|
|
2020-08-10 10:58:34 +00:00
|
|
|
if theirStatus.isOk:
|
2022-01-26 16:20:21 +00:00
|
|
|
discard await peer.handleStatus(peer.networkState, theirStatus.get())
|
2020-08-10 10:58:34 +00:00
|
|
|
else:
|
2020-10-01 18:56:42 +00:00
|
|
|
debug "Status response not received in time",
|
2021-06-10 09:17:17 +00:00
|
|
|
peer, errorKind = theirStatus.error.kind
|
2020-12-07 18:47:07 +00:00
|
|
|
await peer.disconnect(FaultOrError)
|
2019-02-18 10:34:39 +00:00
|
|
|
|
2020-05-23 22:24:47 +00:00
|
|
|
proc status(peer: Peer,
|
|
|
|
theirStatus: StatusMsg,
|
|
|
|
response: SingleChunkResponse[StatusMsg])
|
2022-05-31 10:45:37 +00:00
|
|
|
{.async, libp2pProtocol("status", 1, isRequired = true).} =
|
2020-05-23 22:24:47 +00:00
|
|
|
let ourStatus = peer.networkState.getCurrentStatus()
|
|
|
|
trace "Sending status message", peer = peer, status = ourStatus
|
|
|
|
await response.send(ourStatus)
|
2022-01-26 16:20:21 +00:00
|
|
|
discard await peer.handleStatus(peer.networkState, theirStatus)
|
2020-05-23 22:24:47 +00:00
|
|
|
|
|
|
|
proc ping(peer: Peer, value: uint64): uint64
|
2022-05-31 10:45:37 +00:00
|
|
|
{.libp2pProtocol("ping", 1, isRequired = true).} =
|
2020-05-23 22:24:47 +00:00
|
|
|
return peer.network.metadata.seq_number
|
|
|
|
|
2023-12-05 02:34:45 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/p2p-interface.md#transitioning-from-v1-to-v2
|
2023-02-13 11:02:20 +00:00
|
|
|
proc getMetaData(peer: Peer): uint64
|
2022-05-31 10:45:37 +00:00
|
|
|
{.libp2pProtocol("metadata", 1, isRequired = true).} =
|
2023-02-13 11:02:20 +00:00
|
|
|
raise newException(InvalidInputsError, "GetMetaData v1 unsupported")
|
2021-07-07 09:09:47 +00:00
|
|
|
|
|
|
|
proc getMetadata_v2(peer: Peer): altair.MetaData
|
2022-05-31 10:45:37 +00:00
|
|
|
{.libp2pProtocol("metadata", 2, isRequired = true).} =
|
2020-05-23 22:24:47 +00:00
|
|
|
return peer.network.metadata
|
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
proc beaconBlocksByRange_v2(
|
|
|
|
peer: Peer,
|
|
|
|
startSlot: Slot,
|
|
|
|
reqCount: uint64,
|
|
|
|
reqStep: uint64,
|
2022-10-27 16:51:43 +00:00
|
|
|
response: MultipleChunksResponse[
|
2023-08-12 03:10:12 +00:00
|
|
|
ref ForkedSignedBeaconBlock, Limit MAX_REQUEST_BLOCKS])
|
2021-07-07 09:09:47 +00:00
|
|
|
{.async, libp2pProtocol("beacon_blocks_by_range", 2).} =
|
2022-02-07 17:20:10 +00:00
|
|
|
# TODO Semantically, this request should return a non-ref, but doing so
|
|
|
|
# runs into extreme inefficiency due to the compiler introducing
|
|
|
|
# hidden copies - in future nim versions with move support, this should
|
|
|
|
# be revisited
|
|
|
|
# TODO This code is more complicated than it needs to be, since the type
|
|
|
|
# of the multiple chunks response is not actually used in this server
|
|
|
|
# implementation (it's used to derive the signature of the client
|
|
|
|
# function, not in the code below!)
|
|
|
|
# TODO although you can't tell from this function definition, a magic
|
|
|
|
# client call that returns `seq[ref ForkedSignedBeaconBlock]` will
|
|
|
|
# will be generated by the libp2p macro - we guarantee that seq items
|
|
|
|
# are `not-nil` in the implementation
|
2022-06-06 13:56:59 +00:00
|
|
|
# TODO reqStep is deprecated - future versions can remove support for
|
|
|
|
# values != 1: https://github.com/ethereum/consensus-specs/pull/2856
|
2022-02-07 17:20:10 +00:00
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
trace "got range request", peer, startSlot,
|
|
|
|
count = reqCount, step = reqStep
|
2022-02-07 17:20:10 +00:00
|
|
|
if reqCount == 0 or reqStep == 0:
|
2021-07-07 09:09:47 +00:00
|
|
|
raise newException(InvalidInputsError, "Empty range requested")
|
|
|
|
|
2023-08-12 03:10:12 +00:00
|
|
|
var blocks: array[MAX_REQUEST_BLOCKS.int, BlockId]
|
2022-02-07 17:20:10 +00:00
|
|
|
let
|
|
|
|
dag = peer.networkState.dag
|
|
|
|
# Limit number of blocks in response
|
|
|
|
count = int min(reqCount, blocks.lenu64)
|
|
|
|
endIndex = count - 1
|
|
|
|
startIndex =
|
|
|
|
dag.getBlockRange(startSlot, reqStep,
|
|
|
|
blocks.toOpenArray(0, endIndex))
|
|
|
|
|
|
|
|
var
|
|
|
|
found = 0
|
|
|
|
bytes: seq[byte]
|
|
|
|
|
|
|
|
for i in startIndex..endIndex:
|
2022-05-05 11:00:02 +00:00
|
|
|
if dag.getBlockSZ(blocks[i], bytes):
|
2022-07-04 20:35:33 +00:00
|
|
|
# In general, there is not much intermediate time between post-merge
|
|
|
|
# blocks all being optimistic and none of them being optimistic. The
|
|
|
|
# EL catches up, tells the CL the head is verified, and that's it.
|
|
|
|
if blocks[i].slot.epoch >= dag.cfg.BELLATRIX_FORK_EPOCH and
|
2023-05-20 12:18:51 +00:00
|
|
|
not dag.head.executionValid:
|
2022-07-04 20:35:33 +00:00
|
|
|
continue
|
|
|
|
|
2022-05-05 11:00:02 +00:00
|
|
|
let uncompressedLen = uncompressedLenFramed(bytes).valueOr:
|
|
|
|
warn "Cannot read block size, database corrupt?",
|
|
|
|
bytes = bytes.len(), blck = shortLog(blocks[i])
|
|
|
|
continue
|
|
|
|
|
2022-11-02 10:46:53 +00:00
|
|
|
# TODO extract from libp2pProtocol
|
|
|
|
peer.awaitQuota(blockResponseCost, "beacon_blocks_by_range/2")
|
|
|
|
peer.network.awaitQuota(blockResponseCost, "beacon_blocks_by_range/2")
|
2022-02-07 17:20:10 +00:00
|
|
|
|
2022-05-05 11:00:02 +00:00
|
|
|
await response.writeBytesSZ(
|
|
|
|
uncompressedLen, bytes,
|
2022-05-31 10:45:37 +00:00
|
|
|
peer.networkState.forkDigestAtEpoch(blocks[i].slot.epoch).data)
|
2022-02-07 17:20:10 +00:00
|
|
|
|
|
|
|
inc found
|
|
|
|
|
|
|
|
debug "Block range request done",
|
|
|
|
peer, startSlot, count, reqStep
|
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
proc beaconBlocksByRoot_v2(
|
|
|
|
peer: Peer,
|
|
|
|
# Please note that the SSZ list here ensures that the
|
|
|
|
# spec constant MAX_REQUEST_BLOCKS is enforced:
|
|
|
|
blockRoots: BlockRootsList,
|
2022-10-27 16:51:43 +00:00
|
|
|
response: MultipleChunksResponse[
|
2023-08-12 03:10:12 +00:00
|
|
|
ref ForkedSignedBeaconBlock, Limit MAX_REQUEST_BLOCKS])
|
2021-07-07 09:09:47 +00:00
|
|
|
{.async, libp2pProtocol("beacon_blocks_by_root", 2).} =
|
2022-02-07 17:20:10 +00:00
|
|
|
# TODO Semantically, this request should return a non-ref, but doing so
|
|
|
|
# runs into extreme inefficiency due to the compiler introducing
|
|
|
|
# hidden copies - in future nim versions with move support, this should
|
|
|
|
# be revisited
|
|
|
|
# TODO This code is more complicated than it needs to be, since the type
|
|
|
|
# of the multiple chunks response is not actually used in this server
|
|
|
|
# implementation (it's used to derive the signature of the client
|
|
|
|
# function, not in the code below!)
|
|
|
|
# TODO although you can't tell from this function definition, a magic
|
|
|
|
# client call that returns `seq[ref ForkedSignedBeaconBlock]` will
|
|
|
|
# will be generated by the libp2p macro - we guarantee that seq items
|
|
|
|
# are `not-nil` in the implementation
|
2021-07-07 09:09:47 +00:00
|
|
|
if blockRoots.len == 0:
|
|
|
|
raise newException(InvalidInputsError, "No blocks requested")
|
|
|
|
|
|
|
|
let
|
|
|
|
dag = peer.networkState.dag
|
|
|
|
count = blockRoots.len
|
|
|
|
|
2022-02-07 17:20:10 +00:00
|
|
|
var
|
|
|
|
found = 0
|
|
|
|
bytes: seq[byte]
|
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
for i in 0..<count:
|
2022-02-07 17:20:10 +00:00
|
|
|
let
|
|
|
|
blockRef = dag.getBlockRef(blockRoots[i]).valueOr:
|
|
|
|
continue
|
2019-08-05 00:00:49 +00:00
|
|
|
|
2022-05-05 11:00:02 +00:00
|
|
|
if dag.getBlockSZ(blockRef.bid, bytes):
|
2022-07-04 20:35:33 +00:00
|
|
|
# In general, there is not much intermediate time between post-merge
|
|
|
|
# blocks all being optimistic and none of them being optimistic. The
|
|
|
|
# EL catches up, tells the CL the head is verified, and that's it.
|
|
|
|
if blockRef.slot.epoch >= dag.cfg.BELLATRIX_FORK_EPOCH and
|
2023-05-20 12:18:51 +00:00
|
|
|
not dag.head.executionValid:
|
2022-07-04 20:35:33 +00:00
|
|
|
continue
|
|
|
|
|
2022-05-05 11:00:02 +00:00
|
|
|
let uncompressedLen = uncompressedLenFramed(bytes).valueOr:
|
|
|
|
warn "Cannot read block size, database corrupt?",
|
|
|
|
bytes = bytes.len(), blck = shortLog(blockRef)
|
|
|
|
continue
|
|
|
|
|
2022-11-02 10:46:53 +00:00
|
|
|
# TODO extract from libp2pProtocol
|
|
|
|
peer.awaitQuota(blockResponseCost, "beacon_blocks_by_root/2")
|
|
|
|
peer.network.awaitQuota(blockResponseCost, "beacon_blocks_by_root/2")
|
2022-02-07 17:20:10 +00:00
|
|
|
|
2022-05-05 11:00:02 +00:00
|
|
|
await response.writeBytesSZ(
|
|
|
|
uncompressedLen, bytes,
|
2022-05-31 10:45:37 +00:00
|
|
|
peer.networkState.forkDigestAtEpoch(blockRef.slot.epoch).data)
|
2022-02-07 17:20:10 +00:00
|
|
|
|
|
|
|
inc found
|
2020-10-09 13:44:51 +00:00
|
|
|
|
2020-05-23 22:24:47 +00:00
|
|
|
debug "Block root request done",
|
|
|
|
peer, roots = blockRoots.len, count, found
|
|
|
|
|
2023-01-16 17:08:16 +00:00
|
|
|
|
2023-05-11 09:54:29 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/p2p-interface.md#blobsidecarsbyroot-v1
|
2023-02-27 20:16:59 +00:00
|
|
|
proc blobSidecarsByRoot(
|
|
|
|
peer: Peer,
|
|
|
|
blobIds: BlobIdentifierList,
|
|
|
|
response: MultipleChunksResponse[
|
2023-04-17 17:18:54 +00:00
|
|
|
ref BlobSidecar, Limit(MAX_REQUEST_BLOB_SIDECARS)])
|
2023-02-27 20:16:59 +00:00
|
|
|
{.async, libp2pProtocol("blob_sidecars_by_root", 1).} =
|
|
|
|
# TODO Semantically, this request should return a non-ref, but doing so
|
|
|
|
# runs into extreme inefficiency due to the compiler introducing
|
|
|
|
# hidden copies - in future nim versions with move support, this should
|
|
|
|
# be revisited
|
|
|
|
# TODO This code is more complicated than it needs to be, since the type
|
|
|
|
# of the multiple chunks response is not actually used in this server
|
|
|
|
# implementation (it's used to derive the signature of the client
|
|
|
|
# function, not in the code below!)
|
|
|
|
# TODO although you can't tell from this function definition, a magic
|
|
|
|
# client call that returns `seq[ref BlobSidecar]` will
|
|
|
|
# will be generated by the libp2p macro - we guarantee that seq items
|
|
|
|
# are `not-nil` in the implementation
|
|
|
|
trace "got blobs range request", peer, len = blobIds.len
|
|
|
|
if blobIds.len == 0:
|
|
|
|
raise newException(InvalidInputsError, "No blobs requested")
|
|
|
|
|
|
|
|
let
|
|
|
|
dag = peer.networkState.dag
|
|
|
|
count = blobIds.len
|
|
|
|
|
|
|
|
var
|
|
|
|
found = 0
|
|
|
|
bytes: seq[byte]
|
|
|
|
|
|
|
|
for i in 0..<count:
|
|
|
|
let blockRef = dag.getBlockRef(blobIds[i].block_root).valueOr:
|
|
|
|
continue
|
|
|
|
let index = blobIds[i].index
|
|
|
|
if dag.db.getBlobSidecarSZ(blockRef.bid.root, index, bytes):
|
|
|
|
let uncompressedLen = uncompressedLenFramed(bytes).valueOr:
|
|
|
|
warn "Cannot read blob size, database corrupt?",
|
|
|
|
bytes = bytes.len(), blck = shortLog(blockRef), blobindex = index
|
|
|
|
continue
|
|
|
|
|
|
|
|
peer.awaitQuota(blockResponseCost, "blob_sidecars_by_root/1")
|
|
|
|
peer.network.awaitQuota(blockResponseCost, "blob_sidecars_by_root/1")
|
|
|
|
|
|
|
|
await response.writeBytesSZ(
|
|
|
|
uncompressedLen, bytes,
|
|
|
|
peer.networkState.forkDigestAtEpoch(blockRef.slot.epoch).data)
|
|
|
|
inc found
|
|
|
|
|
|
|
|
debug "Blob root request done",
|
|
|
|
peer, roots = blobIds.len, count, found
|
|
|
|
|
2023-05-10 14:04:48 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/p2p-interface.md#blobsidecarsbyrange-v1
|
2023-02-27 20:16:59 +00:00
|
|
|
proc blobSidecarsByRange(
|
|
|
|
peer: Peer,
|
|
|
|
startSlot: Slot,
|
|
|
|
reqCount: uint64,
|
2023-05-10 14:04:48 +00:00
|
|
|
response: MultipleChunksResponse[
|
|
|
|
ref BlobSidecar, Limit(MAX_REQUEST_BLOB_SIDECARS)])
|
2023-02-27 20:16:59 +00:00
|
|
|
{.async, libp2pProtocol("blob_sidecars_by_range", 1).} =
|
|
|
|
# TODO This code is more complicated than it needs to be, since the type
|
|
|
|
# of the multiple chunks response is not actually used in this server
|
|
|
|
# implementation (it's used to derive the signature of the client
|
|
|
|
# function, not in the code below!)
|
|
|
|
# TODO although you can't tell from this function definition, a magic
|
|
|
|
# client call that returns `seq[ref BlobSidecar]` will
|
|
|
|
# will be generated by the libp2p macro - we guarantee that seq items
|
|
|
|
# are `not-nil` in the implementation
|
|
|
|
|
|
|
|
trace "got blobs range request", peer, startSlot, count = reqCount
|
|
|
|
if reqCount == 0:
|
|
|
|
raise newException(InvalidInputsError, "Empty range requested")
|
|
|
|
|
|
|
|
let
|
|
|
|
dag = peer.networkState.dag
|
|
|
|
epochBoundary =
|
2023-11-09 20:41:17 +00:00
|
|
|
if dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS >= dag.head.slot.epoch:
|
2023-02-27 20:16:59 +00:00
|
|
|
GENESIS_EPOCH
|
|
|
|
else:
|
2023-11-09 20:41:17 +00:00
|
|
|
dag.head.slot.epoch - dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
2023-02-27 20:16:59 +00:00
|
|
|
|
|
|
|
if startSlot.epoch < epochBoundary:
|
|
|
|
raise newException(ResourceUnavailableError, BlobsOutOfRange)
|
|
|
|
|
2023-04-17 17:18:54 +00:00
|
|
|
var blockIds: array[int(MAX_REQUEST_BLOB_SIDECARS), BlockId]
|
2023-02-27 20:16:59 +00:00
|
|
|
let
|
|
|
|
count = int min(reqCount, blockIds.lenu64)
|
|
|
|
endIndex = count - 1
|
|
|
|
startIndex =
|
|
|
|
dag.getBlockRange(startSlot, 1, blockIds.toOpenArray(0, endIndex))
|
|
|
|
|
|
|
|
var
|
|
|
|
found = 0
|
|
|
|
bytes: seq[byte]
|
|
|
|
|
|
|
|
for i in startIndex..endIndex:
|
|
|
|
for j in 0..<MAX_BLOBS_PER_BLOCK:
|
|
|
|
if dag.db.getBlobSidecarSZ(blockIds[i].root, BlobIndex(j), bytes):
|
|
|
|
# In general, there is not much intermediate time between post-merge
|
|
|
|
# blocks all being optimistic and none of them being optimistic. The
|
|
|
|
# EL catches up, tells the CL the head is verified, and that's it.
|
|
|
|
if blockIds[i].slot.epoch >= dag.cfg.BELLATRIX_FORK_EPOCH and
|
2023-05-20 12:18:51 +00:00
|
|
|
not dag.head.executionValid:
|
2023-02-27 20:16:59 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
let uncompressedLen = uncompressedLenFramed(bytes).valueOr:
|
|
|
|
warn "Cannot read blobs sidecar size, database corrupt?",
|
|
|
|
bytes = bytes.len(), blck = shortLog(blockIds[i])
|
|
|
|
continue
|
|
|
|
|
|
|
|
# TODO extract from libp2pProtocol
|
|
|
|
peer.awaitQuota(blockResponseCost, "blobs_sidecars_by_range/1")
|
|
|
|
peer.network.awaitQuota(blockResponseCost, "blobs_sidecars_by_range/1")
|
|
|
|
|
|
|
|
await response.writeBytesSZ(
|
|
|
|
uncompressedLen, bytes,
|
|
|
|
peer.networkState.forkDigestAtEpoch(blockIds[i].slot.epoch).data)
|
|
|
|
inc found
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
|
|
|
debug "BlobSidecar range request done",
|
2023-05-19 20:14:41 +00:00
|
|
|
peer, startSlot, count = reqCount, found
|
2023-02-27 20:16:59 +00:00
|
|
|
|
2023-12-05 02:34:45 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#getlightclientbootstrap
|
2022-05-23 12:02:54 +00:00
|
|
|
proc lightClientBootstrap(
|
|
|
|
peer: Peer,
|
|
|
|
blockRoot: Eth2Digest,
|
2023-01-12 17:11:38 +00:00
|
|
|
response: SingleChunkResponse[ForkedLightClientBootstrap])
|
2022-07-29 08:45:39 +00:00
|
|
|
{.async, libp2pProtocol("light_client_bootstrap", 1,
|
2022-05-23 12:02:54 +00:00
|
|
|
isLightClientRequest = true).} =
|
|
|
|
trace "Received LC bootstrap request", peer, blockRoot
|
|
|
|
let dag = peer.networkState.dag
|
2022-06-24 14:57:50 +00:00
|
|
|
doAssert dag.lcDataStore.serve
|
2022-05-23 12:02:54 +00:00
|
|
|
|
|
|
|
let bootstrap = dag.getLightClientBootstrap(blockRoot)
|
2023-01-12 17:11:38 +00:00
|
|
|
withForkyBootstrap(bootstrap):
|
2023-01-14 21:19:50 +00:00
|
|
|
when lcDataFork > LightClientDataFork.None:
|
2023-01-12 17:11:38 +00:00
|
|
|
let
|
|
|
|
contextEpoch = forkyBootstrap.contextEpoch
|
|
|
|
contextBytes = peer.networkState.forkDigestAtEpoch(contextEpoch).data
|
2022-11-02 10:46:53 +00:00
|
|
|
|
2023-01-12 17:11:38 +00:00
|
|
|
# TODO extract from libp2pProtocol
|
|
|
|
peer.awaitQuota(
|
|
|
|
lightClientBootstrapResponseCost,
|
|
|
|
"light_client_bootstrap/1")
|
|
|
|
await response.sendSSZ(forkyBootstrap, contextBytes)
|
|
|
|
else:
|
|
|
|
raise newException(ResourceUnavailableError, LCBootstrapUnavailable)
|
2022-05-23 12:02:54 +00:00
|
|
|
|
|
|
|
debug "LC bootstrap request done", peer, blockRoot
|
|
|
|
|
2023-12-05 02:34:45 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange
|
2022-05-23 12:02:54 +00:00
|
|
|
proc lightClientUpdatesByRange(
|
2022-03-22 20:23:36 +00:00
|
|
|
peer: Peer,
|
|
|
|
startPeriod: SyncCommitteePeriod,
|
|
|
|
reqCount: uint64,
|
2022-10-27 16:51:43 +00:00
|
|
|
response: MultipleChunksResponse[
|
2023-01-12 17:11:38 +00:00
|
|
|
ForkedLightClientUpdate, MAX_REQUEST_LIGHT_CLIENT_UPDATES])
|
2022-07-29 08:45:39 +00:00
|
|
|
{.async, libp2pProtocol("light_client_updates_by_range", 1,
|
2022-03-22 20:23:36 +00:00
|
|
|
isLightClientRequest = true).} =
|
|
|
|
trace "Received LC updates by range request", peer, startPeriod, reqCount
|
|
|
|
let dag = peer.networkState.dag
|
2022-06-24 14:57:50 +00:00
|
|
|
doAssert dag.lcDataStore.serve
|
2022-03-22 20:23:36 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
headPeriod = dag.head.slot.sync_committee_period
|
|
|
|
# Limit number of updates in response
|
|
|
|
maxSupportedCount =
|
|
|
|
if startPeriod > headPeriod:
|
|
|
|
0'u64
|
|
|
|
else:
|
|
|
|
min(headPeriod + 1 - startPeriod, MAX_REQUEST_LIGHT_CLIENT_UPDATES)
|
|
|
|
count = min(reqCount, maxSupportedCount)
|
|
|
|
onePastPeriod = startPeriod + count
|
|
|
|
|
|
|
|
var found = 0
|
|
|
|
for period in startPeriod..<onePastPeriod:
|
2022-05-23 12:02:54 +00:00
|
|
|
let update = dag.getLightClientUpdateForPeriod(period)
|
2023-01-12 17:11:38 +00:00
|
|
|
withForkyUpdate(update):
|
2023-01-14 21:19:50 +00:00
|
|
|
when lcDataFork > LightClientDataFork.None:
|
2023-01-12 17:11:38 +00:00
|
|
|
let
|
|
|
|
contextEpoch = forkyUpdate.contextEpoch
|
|
|
|
contextBytes =
|
|
|
|
peer.networkState.forkDigestAtEpoch(contextEpoch).data
|
|
|
|
|
|
|
|
# TODO extract from libp2pProtocol
|
|
|
|
peer.awaitQuota(
|
|
|
|
lightClientUpdateResponseCost,
|
|
|
|
"light_client_updates_by_range/1")
|
|
|
|
await response.writeSSZ(forkyUpdate, contextBytes)
|
|
|
|
inc found
|
|
|
|
else:
|
|
|
|
discard
|
2022-03-22 20:23:36 +00:00
|
|
|
|
|
|
|
debug "LC updates by range request done", peer, startPeriod, count, found
|
|
|
|
|
2023-12-05 02:34:45 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate
|
2022-05-23 12:02:54 +00:00
|
|
|
proc lightClientFinalityUpdate(
|
2022-03-22 20:23:36 +00:00
|
|
|
peer: Peer,
|
2023-01-12 17:11:38 +00:00
|
|
|
response: SingleChunkResponse[ForkedLightClientFinalityUpdate])
|
2022-07-29 08:45:39 +00:00
|
|
|
{.async, libp2pProtocol("light_client_finality_update", 1,
|
2022-03-22 20:23:36 +00:00
|
|
|
isLightClientRequest = true).} =
|
2022-05-23 12:02:54 +00:00
|
|
|
trace "Received LC finality update request", peer
|
2022-03-22 20:23:36 +00:00
|
|
|
let dag = peer.networkState.dag
|
2022-06-24 14:57:50 +00:00
|
|
|
doAssert dag.lcDataStore.serve
|
2022-03-22 20:23:36 +00:00
|
|
|
|
2022-06-19 05:57:52 +00:00
|
|
|
let finality_update = dag.getLightClientFinalityUpdate()
|
2023-01-12 17:11:38 +00:00
|
|
|
withForkyFinalityUpdate(finality_update):
|
2023-01-14 21:19:50 +00:00
|
|
|
when lcDataFork > LightClientDataFork.None:
|
2023-01-12 17:11:38 +00:00
|
|
|
let
|
|
|
|
contextEpoch = forkyFinalityUpdate.contextEpoch
|
|
|
|
contextBytes = peer.networkState.forkDigestAtEpoch(contextEpoch).data
|
2022-05-23 12:02:54 +00:00
|
|
|
|
2023-01-12 17:11:38 +00:00
|
|
|
# TODO extract from libp2pProtocol
|
|
|
|
peer.awaitQuota(
|
|
|
|
lightClientFinalityUpdateResponseCost,
|
|
|
|
"light_client_finality_update/1")
|
|
|
|
await response.sendSSZ(forkyFinalityUpdate, contextBytes)
|
|
|
|
else:
|
|
|
|
raise newException(ResourceUnavailableError, LCFinUpdateUnavailable)
|
2022-03-22 20:23:36 +00:00
|
|
|
|
2022-05-23 12:02:54 +00:00
|
|
|
debug "LC finality update request done", peer
|
2022-03-22 20:23:36 +00:00
|
|
|
|
2023-12-05 02:34:45 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#getlightclientoptimisticupdate
|
2022-05-23 12:02:54 +00:00
|
|
|
proc lightClientOptimisticUpdate(
|
2022-03-22 20:23:36 +00:00
|
|
|
peer: Peer,
|
2023-01-12 17:11:38 +00:00
|
|
|
response: SingleChunkResponse[ForkedLightClientOptimisticUpdate])
|
2022-07-29 08:45:39 +00:00
|
|
|
{.async, libp2pProtocol("light_client_optimistic_update", 1,
|
2022-03-22 20:23:36 +00:00
|
|
|
isLightClientRequest = true).} =
|
2022-05-23 12:02:54 +00:00
|
|
|
trace "Received LC optimistic update request", peer
|
2022-03-22 20:23:36 +00:00
|
|
|
let dag = peer.networkState.dag
|
2022-06-24 14:57:50 +00:00
|
|
|
doAssert dag.lcDataStore.serve
|
2022-03-22 20:23:36 +00:00
|
|
|
|
2022-06-19 05:57:52 +00:00
|
|
|
let optimistic_update = dag.getLightClientOptimisticUpdate()
|
2023-01-12 17:11:38 +00:00
|
|
|
withForkyOptimisticUpdate(optimistic_update):
|
2023-01-14 21:19:50 +00:00
|
|
|
when lcDataFork > LightClientDataFork.None:
|
2023-01-12 17:11:38 +00:00
|
|
|
let
|
|
|
|
contextEpoch = forkyOptimisticUpdate.contextEpoch
|
|
|
|
contextBytes = peer.networkState.forkDigestAtEpoch(contextEpoch).data
|
2022-11-02 10:46:53 +00:00
|
|
|
|
2023-01-12 17:11:38 +00:00
|
|
|
# TODO extract from libp2pProtocol
|
|
|
|
peer.awaitQuota(
|
|
|
|
lightClientOptimisticUpdateResponseCost,
|
|
|
|
"light_client_optimistic_update/1")
|
|
|
|
await response.sendSSZ(forkyOptimisticUpdate, contextBytes)
|
|
|
|
else:
|
|
|
|
raise newException(ResourceUnavailableError, LCOptUpdateUnavailable)
|
2022-03-22 20:23:36 +00:00
|
|
|
|
2022-05-23 12:02:54 +00:00
|
|
|
debug "LC optimistic update request done", peer
|
2022-03-22 20:23:36 +00:00
|
|
|
|
2020-05-26 17:07:18 +00:00
|
|
|
proc goodbye(peer: Peer,
|
2020-06-20 07:24:33 +00:00
|
|
|
reason: uint64)
|
2022-05-31 10:45:37 +00:00
|
|
|
{.async, libp2pProtocol("goodbye", 1, isRequired = true).} =
|
2020-06-20 07:24:33 +00:00
|
|
|
debug "Received Goodbye message", reason = disconnectReasonName(reason), peer
|
2019-05-22 07:13:15 +00:00
|
|
|
|
2020-05-11 18:08:52 +00:00
|
|
|
proc setStatusMsg(peer: Peer, statusMsg: StatusMsg) =
|
2020-10-30 12:33:52 +00:00
|
|
|
debug "Peer status", peer, statusMsg
|
2020-05-11 18:08:52 +00:00
|
|
|
peer.state(BeaconSync).statusMsg = statusMsg
|
2020-09-23 15:58:02 +00:00
|
|
|
peer.state(BeaconSync).statusLastTime = Moment.now()
|
2020-05-11 18:08:52 +00:00
|
|
|
|
2022-01-26 16:20:21 +00:00
|
|
|
proc handleStatus(peer: Peer,
|
|
|
|
state: BeaconSyncNetworkState,
|
|
|
|
theirStatus: StatusMsg): Future[bool] {.async, gcsafe.} =
|
|
|
|
let
|
|
|
|
res = checkStatusMsg(state, theirStatus)
|
|
|
|
|
|
|
|
return if res.isErr():
|
|
|
|
debug "Irrelevant peer", peer, theirStatus, err = res.error()
|
|
|
|
await peer.disconnect(IrrelevantNetwork)
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
peer.setStatusMsg(theirStatus)
|
|
|
|
|
|
|
|
if peer.connectionState == Connecting:
|
|
|
|
# As soon as we get here it means that we passed handshake succesfully. So
|
|
|
|
# we can add this peer to PeerPool.
|
|
|
|
await peer.handlePeer()
|
|
|
|
true
|
|
|
|
|
2020-05-11 18:08:52 +00:00
|
|
|
proc updateStatus*(peer: Peer): Future[bool] {.async.} =
|
|
|
|
## Request `status` of remote peer ``peer``.
|
|
|
|
let
|
|
|
|
nstate = peer.networkState(BeaconSync)
|
|
|
|
ourStatus = getCurrentStatus(nstate)
|
|
|
|
|
2023-08-12 03:10:12 +00:00
|
|
|
let theirFut = awaitne peer.status(ourStatus, timeout = RESP_TIMEOUT_DUR)
|
2020-05-11 18:08:52 +00:00
|
|
|
if theirFut.failed():
|
2020-09-23 15:58:02 +00:00
|
|
|
return false
|
2020-05-11 18:08:52 +00:00
|
|
|
else:
|
|
|
|
let theirStatus = theirFut.read()
|
2020-05-12 22:37:07 +00:00
|
|
|
if theirStatus.isOk:
|
2022-01-26 16:20:21 +00:00
|
|
|
return await peer.handleStatus(nstate, theirStatus.get())
|
2020-09-23 15:58:02 +00:00
|
|
|
else:
|
|
|
|
return false
|
2020-05-11 18:08:52 +00:00
|
|
|
|
2020-11-20 10:00:22 +00:00
|
|
|
proc getHeadSlot*(peer: Peer): Slot =
|
2020-05-11 18:08:52 +00:00
|
|
|
## Returns head slot for specific peer ``peer``.
|
2021-08-17 09:51:39 +00:00
|
|
|
peer.state(BeaconSync).statusMsg.headSlot
|
2020-05-11 18:08:52 +00:00
|
|
|
|
2023-02-06 07:22:08 +00:00
|
|
|
proc getFinalizedEpoch*(peer: Peer): Epoch =
|
|
|
|
## Returns head slot for specific peer ``peer``.
|
|
|
|
peer.state(BeaconSync).statusMsg.finalizedEpoch
|
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
proc initBeaconSync*(network: Eth2Node, dag: ChainDAGRef,
|
2021-08-19 10:45:31 +00:00
|
|
|
getBeaconTime: GetBeaconTimeFn) =
|
2020-04-15 02:41:22 +00:00
|
|
|
var networkState = network.protocolState(BeaconSync)
|
2021-06-01 11:13:40 +00:00
|
|
|
networkState.dag = dag
|
2022-05-31 10:45:37 +00:00
|
|
|
networkState.cfg = dag.cfg
|
|
|
|
networkState.forkDigests = dag.forkDigests
|
|
|
|
networkState.genesisBlockRoot = dag.genesisBlockRoot
|
|
|
|
networkState.getBeaconTime = getBeaconTime
|
|
|
|
|
|
|
|
proc initBeaconSync*(network: Eth2Node,
|
|
|
|
cfg: RuntimeConfig,
|
|
|
|
forkDigests: ref ForkDigests,
|
|
|
|
genesisBlockRoot: Eth2Digest,
|
|
|
|
getBeaconTime: GetBeaconTimeFn) =
|
|
|
|
var networkState = network.protocolState(BeaconSync)
|
|
|
|
networkState.dag = nil
|
|
|
|
networkState.cfg = cfg
|
|
|
|
networkState.forkDigests = forkDigests
|
|
|
|
networkState.genesisBlockRoot = genesisBlockRoot
|
2021-08-19 10:45:31 +00:00
|
|
|
networkState.getBeaconTime = getBeaconTime
|