# beacon_chain # Copyright (c) 2018-2021 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. {.push raises: [Defect].} import options, tables, sets, macros, chronicles, chronos, stew/ranges/bitranges, libp2p/switch, ../spec/datatypes/[phase0, altair], ../spec/[helpers, forks, network], ".."/[beacon_node_types, beacon_clock], ../networking/eth2_network, ../consensus_object_pools/blockchain_dag logScope: topics = "sync" const MAX_REQUEST_BLOCKS = 1024 blockByRootLookupCost = allowedOpsPerSecondCost(50) blockResponseCost = allowedOpsPerSecondCost(100) blockByRangeLookupCost = allowedOpsPerSecondCost(20) type StatusMsg* = object forkDigest*: ForkDigest finalizedRoot*: Eth2Digest finalizedEpoch*: Epoch headRoot*: Eth2Digest headSlot*: Slot ValidatorSetDeltaFlags {.pure.} = enum Activation = 0 Exit = 1 ValidatorChangeLogEntry* = object case kind*: ValidatorSetDeltaFlags of Activation: pubkey: ValidatorPubKey else: index: uint32 BeaconSyncNetworkState* = ref object dag*: ChainDAGRef getBeaconTime*: GetBeaconTimeFn BeaconSyncPeerState* = ref object statusLastTime*: chronos.Moment statusMsg*: StatusMsg BlockRootSlot* = object blockRoot: Eth2Digest slot: Slot BlockRootsList* = List[Eth2Digest, Limit MAX_REQUEST_BLOCKS] proc readChunkPayload*(conn: Connection, peer: Peer, MsgType: type ForkedSignedBeaconBlock): Future[NetRes[ForkedSignedBeaconBlock]] {.async.} = var contextBytes: ForkDigest try: await conn.readExactly(addr contextBytes, sizeof contextBytes) except CatchableError as e: return neterr UnexpectedEOF if contextBytes == peer.network.forkDigests.phase0: let res = await readChunkPayload(conn, peer, phase0.SignedBeaconBlock) if res.isOk: return ok ForkedSignedBeaconBlock.init(res.get) else: return err(res.error) elif contextBytes == peer.network.forkDigests.altair: let res = await readChunkPayload(conn, peer, altair.SignedBeaconBlock) if res.isOk: return ok ForkedSignedBeaconBlock.init(res.get) else: return err(res.error) else: return neterr InvalidContextBytes proc sendResponseChunk*(response: UntypedResponse, val: ForkedSignedBeaconBlock): Future[void] = inc response.writtenChunks case val.kind of BeaconBlockFork.Phase0: response.stream.writeChunk(some ResponseCode.Success, SSZ.encode(val.phase0Block), response.peer.network.forkDigests.phase0.bytes) of BeaconBlockFork.Altair: response.stream.writeChunk(some ResponseCode.Success, SSZ.encode(val.altairBlock), response.peer.network.forkDigests.altair.bytes) of BeaconBlockFork.Merge: response.stream.writeChunk(some ResponseCode.Success, SSZ.encode(val.mergeBlock), response.peer.network.forkDigests.merge.bytes) func shortLog*(s: StatusMsg): auto = ( forkDigest: s.forkDigest, finalizedRoot: shortLog(s.finalizedRoot), finalizedEpoch: shortLog(s.finalizedEpoch), headRoot: shortLog(s.headRoot), headSlot: shortLog(s.headSlot) ) chronicles.formatIt(StatusMsg): shortLog(it) func disconnectReasonName(reason: uint64): string = # haha, nim doesn't support uint64 in `case`! if reason == uint64(ClientShutDown): "Client shutdown" elif reason == uint64(IrrelevantNetwork): "Irrelevant network" elif reason == uint64(FaultOrError): "Fault or error" else: "Disconnected (" & $reason & ")" proc getCurrentStatus*(state: BeaconSyncNetworkState): StatusMsg {.gcsafe.} = let dag = state.dag headBlock = dag.head wallTimeSlot = state.getBeaconTime().slotOrZero StatusMsg( forkDigest: state.dag.forkDigestAtEpoch(wallTimeSlot.epoch), finalizedRoot: getStateField(dag.headState.data, finalized_checkpoint).root, finalizedEpoch: getStateField(dag.headState.data, finalized_checkpoint).epoch, headRoot: headBlock.root, headSlot: headBlock.slot) proc handleStatus(peer: Peer, state: BeaconSyncNetworkState, ourStatus: StatusMsg, theirStatus: StatusMsg): Future[void] {.gcsafe.} proc setStatusMsg(peer: Peer, statusMsg: StatusMsg) {.gcsafe.} {.pop.} # TODO fix p2p macro for raises p2pProtocol BeaconSync(version = 1, networkState = BeaconSyncNetworkState, peerState = BeaconSyncPeerState): onPeerConnected do (peer: Peer, incoming: bool) {.async.}: debug "Peer connected", peer, peerInfo = shortLog(peer.info), incoming # Per the eth2 protocol, whoever dials must send a status message when # connected for the first time, but because of how libp2p works, there may # be a race between incoming and outgoing connections and disconnects that # makes the incoming flag unreliable / obsolete by the time we get to # this point - instead of making assumptions, we'll just send a status # message redundantly. # TODO(zah) # the spec does not prohibit sending the extra status message on # incoming connections, but it should not be necessary - this would # need a dedicated flow in libp2p that resolves the race conditions - # this needs more thinking around the ordering of events and the # given incoming flag let ourStatus = peer.networkState.getCurrentStatus() theirStatus = await peer.status(ourStatus, timeout = RESP_TIMEOUT) if theirStatus.isOk: await peer.handleStatus(peer.networkState, ourStatus, theirStatus.get()) else: debug "Status response not received in time", peer, errorKind = theirStatus.error.kind await peer.disconnect(FaultOrError) proc status(peer: Peer, theirStatus: StatusMsg, response: SingleChunkResponse[StatusMsg]) {.async, libp2pProtocol("status", 1).} = let ourStatus = peer.networkState.getCurrentStatus() trace "Sending status message", peer = peer, status = ourStatus await response.send(ourStatus) await peer.handleStatus(peer.networkState, ourStatus, theirStatus) proc ping(peer: Peer, value: uint64): uint64 {.libp2pProtocol("ping", 1).} = return peer.network.metadata.seq_number proc getMetaData(peer: Peer): phase0.MetaData {.libp2pProtocol("metadata", 1).} = return peer.network.phase0metadata proc getMetadata_v2(peer: Peer): altair.MetaData {.libp2pProtocol("metadata", 2).} = return peer.network.metadata proc beaconBlocksByRange( peer: Peer, startSlot: Slot, reqCount: uint64, reqStep: uint64, response: MultipleChunksResponse[phase0.SignedBeaconBlock]) {.async, libp2pProtocol("beacon_blocks_by_range", 1).} = trace "got range request", peer, startSlot, count = reqCount, step = reqStep if reqCount > 0'u64 and reqStep > 0'u64: var blocks: array[MAX_REQUEST_BLOCKS, BlockRef] let dag = peer.networkState.dag # Limit number of blocks in response count = int min(reqCount, blocks.lenu64) let endIndex = count - 1 startIndex = dag.getBlockRange(startSlot, reqStep, blocks.toOpenArray(0, endIndex)) peer.updateRequestQuota( blockByRangeLookupCost + max(0, endIndex - startIndex + 1).float * blockResponseCost) peer.awaitNonNegativeRequestQuota() for i in startIndex..endIndex: doAssert not blocks[i].isNil, "getBlockRange should return non-nil blocks only" trace "wrote response block", slot = blocks[i].slot, roor = shortLog(blocks[i].root) let blk = dag.get(blocks[i]).data case blk.kind of BeaconBlockFork.Phase0: await response.write(blk.phase0Block.asSigned) of BeaconBlockFork.Altair, BeaconBlockFork.Merge: # Skipping all subsequent blocks should be OK because the spec says: # "Clients MAY limit the number of blocks in the response." # https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#beaconblocksbyrange # # Also, our response would be indistinguishable from a node # that have been synced exactly to the altair transition slot. break debug "Block range request done", peer, startSlot, count, reqStep, found = count - startIndex else: raise newException(InvalidInputsError, "Empty range requested") proc beaconBlocksByRoot( peer: Peer, # Please note that the SSZ list here ensures that the # spec constant MAX_REQUEST_BLOCKS is enforced: blockRoots: BlockRootsList, response: MultipleChunksResponse[phase0.SignedBeaconBlock]) {.async, libp2pProtocol("beacon_blocks_by_root", 1).} = if blockRoots.len == 0: raise newException(InvalidInputsError, "No blocks requested") let dag = peer.networkState.dag count = blockRoots.len peer.updateRequestQuota(count.float * blockByRootLookupCost) peer.awaitNonNegativeRequestQuota() var found = 0 for i in 0.. 0'u64 and reqStep > 0'u64: var blocks: array[MAX_REQUEST_BLOCKS, BlockRef] let dag = peer.networkState.dag # Limit number of blocks in response count = int min(reqCount, blocks.lenu64) let endIndex = count - 1 startIndex = dag.getBlockRange(startSlot, reqStep, blocks.toOpenArray(0, endIndex)) peer.updateRequestQuota( blockByRangeLookupCost + max(0, endIndex - startIndex + 1).float * blockResponseCost) peer.awaitNonNegativeRequestQuota() for i in startIndex..endIndex: doAssert not blocks[i].isNil, "getBlockRange should return non-nil blocks only" trace "wrote response block", slot = blocks[i].slot, roor = shortLog(blocks[i].root) let blk = dag.getForkedBlock(blocks[i]) await response.write(blk.asSigned) debug "Block range request done", peer, startSlot, count, reqStep, found = count - startIndex else: raise newException(InvalidInputsError, "Empty range requested") proc beaconBlocksByRoot_v2( peer: Peer, # Please note that the SSZ list here ensures that the # spec constant MAX_REQUEST_BLOCKS is enforced: blockRoots: BlockRootsList, response: MultipleChunksResponse[ForkedSignedBeaconBlock]) {.async, libp2pProtocol("beacon_blocks_by_root", 2).} = if blockRoots.len == 0: raise newException(InvalidInputsError, "No blocks requested") let dag = peer.networkState.dag count = blockRoots.len peer.updateRequestQuota(count.float * blockByRootLookupCost) peer.awaitNonNegativeRequestQuota() var found = 0 for i in 0..= state.dag.cfg.ALTAIR_FORK_EPOCH proc useSyncV2*(peer: Peer): bool = peer.networkState(BeaconSync).useSyncV2() proc setStatusMsg(peer: Peer, statusMsg: StatusMsg) = debug "Peer status", peer, statusMsg peer.state(BeaconSync).statusMsg = statusMsg peer.state(BeaconSync).statusLastTime = Moment.now() proc updateStatus*(peer: Peer): Future[bool] {.async.} = ## Request `status` of remote peer ``peer``. let nstate = peer.networkState(BeaconSync) ourStatus = getCurrentStatus(nstate) let theirFut = awaitne peer.status(ourStatus, timeout = RESP_TIMEOUT) if theirFut.failed(): return false else: let theirStatus = theirFut.read() if theirStatus.isOk: peer.setStatusMsg(theirStatus.get) return true else: return false proc getHeadSlot*(peer: Peer): Slot = ## Returns head slot for specific peer ``peer``. peer.state(BeaconSync).statusMsg.headSlot proc handleStatus(peer: Peer, state: BeaconSyncNetworkState, ourStatus: StatusMsg, theirStatus: StatusMsg) {.async, gcsafe.} = if theirStatus.forkDigest != ourStatus.forkDigest: debug "Irrelevant peer", peer, theirStatus, ourStatus await peer.disconnect(IrrelevantNetwork) else: peer.setStatusMsg(theirStatus) if peer.connectionState == Connecting: # As soon as we get here it means that we passed handshake succesfully. So # we can add this peer to PeerPool. await peer.handlePeer() proc initBeaconSync*(network: Eth2Node, dag: ChainDAGRef, getBeaconTime: GetBeaconTimeFn) = var networkState = network.protocolState(BeaconSync) networkState.dag = dag networkState.getBeaconTime = getBeaconTime