2021-03-26 06:52:01 +00:00
|
|
|
# beacon_chain
|
2024-01-05 09:08:38 +00:00
|
|
|
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
2021-03-26 06:52:01 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2023-01-20 14:14:37 +00:00
|
|
|
{.push raises: [].}
|
2021-03-26 06:52:01 +00:00
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
import
|
2020-03-22 21:55:01 +00:00
|
|
|
# Std lib
|
2024-01-16 18:37:47 +00:00
|
|
|
std/[typetraits, os, sequtils, strutils, algorithm, math, tables, macrocache],
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
# Status libs
|
2024-01-16 22:37:14 +00:00
|
|
|
results,
|
|
|
|
stew/[leb128, endians2, byteutils, io2, bitops2],
|
2024-01-20 16:42:19 +00:00
|
|
|
stew/shims/macros,
|
2024-01-13 09:54:24 +00:00
|
|
|
snappy,
|
2021-08-27 16:54:51 +00:00
|
|
|
json_serialization, json_serialization/std/[net, sets, options],
|
2022-11-02 10:46:53 +00:00
|
|
|
chronos, chronos/ratelimit, chronicles, metrics,
|
2021-08-27 16:54:51 +00:00
|
|
|
libp2p/[switch, peerinfo, multiaddress, multicodec, crypto/crypto,
|
|
|
|
crypto/secp, builders],
|
|
|
|
libp2p/protocols/pubsub/[
|
|
|
|
pubsub, gossipsub, rpc/message, rpc/messages, peertable, pubsubpeer],
|
2020-09-06 08:39:25 +00:00
|
|
|
libp2p/stream/connection,
|
2024-01-13 09:54:24 +00:00
|
|
|
eth/[keys, async_utils],
|
2021-05-10 07:13:36 +00:00
|
|
|
eth/net/nat, eth/p2p/discoveryv5/[enr, node, random2],
|
2022-05-31 10:45:37 +00:00
|
|
|
".."/[version, conf, beacon_clock, conf_light_client],
|
2022-01-12 14:50:30 +00:00
|
|
|
../spec/datatypes/[phase0, altair, bellatrix],
|
2021-08-18 18:57:58 +00:00
|
|
|
../spec/[eth2_ssz_serialization, network, helpers, forks],
|
2021-03-05 13:12:00 +00:00
|
|
|
../validators/keystore_management,
|
2024-01-13 09:54:24 +00:00
|
|
|
"."/[eth2_discovery, eth2_protocol_dsl, libp2p_json_serialization, peer_pool, peer_scores]
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
export
|
2022-11-02 10:46:53 +00:00
|
|
|
tables, chronos, ratelimit, version, multiaddress, peerinfo, p2pProtocol,
|
|
|
|
connection, libp2p_json_serialization, eth2_ssz_serialization, results,
|
|
|
|
eth2_discovery, peer_pool, peer_scores
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
logScope:
|
|
|
|
topics = "networking"
|
2020-02-05 20:40:14 +00:00
|
|
|
|
|
|
|
type
|
2021-03-19 02:22:45 +00:00
|
|
|
NetKeyPair* = crypto.KeyPair
|
2020-02-19 08:58:10 +00:00
|
|
|
PublicKey* = crypto.PublicKey
|
|
|
|
PrivateKey* = crypto.PrivateKey
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2020-05-21 13:21:29 +00:00
|
|
|
ErrorMsg = List[byte, 256]
|
2022-06-15 08:14:47 +00:00
|
|
|
SendResult* = Result[void, cstring]
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2023-09-15 18:45:55 +00:00
|
|
|
DirectPeers = Table[PeerId, seq[MultiAddress]]
|
|
|
|
|
2020-03-24 11:13:07 +00:00
|
|
|
# TODO: This is here only to eradicate a compiler
|
|
|
|
# warning about unused import (rpc/messages).
|
|
|
|
GossipMsg = messages.Message
|
|
|
|
|
2020-06-03 08:46:29 +00:00
|
|
|
SeenItem* = object
|
2022-04-08 16:22:49 +00:00
|
|
|
peerId*: PeerId
|
2020-06-03 08:46:29 +00:00
|
|
|
stamp*: chronos.Moment
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
Eth2Node* = ref object of RootObj
|
|
|
|
switch*: Switch
|
2020-11-29 19:07:20 +00:00
|
|
|
pubsub*: GossipSub
|
2020-03-22 21:55:01 +00:00
|
|
|
discovery*: Eth2DiscoveryProtocol
|
2020-08-24 11:52:06 +00:00
|
|
|
discoveryEnabled*: bool
|
2020-03-22 21:55:01 +00:00
|
|
|
wantedPeers*: int
|
2022-03-11 10:51:53 +00:00
|
|
|
hardMaxPeers*: int
|
2022-04-08 16:22:49 +00:00
|
|
|
peerPool*: PeerPool[Peer, PeerId]
|
2024-01-13 09:54:24 +00:00
|
|
|
protocols: seq[ProtocolInfo]
|
|
|
|
## Protocols managed by the DSL and mounted on the switch
|
2020-03-22 21:55:01 +00:00
|
|
|
protocolStates*: seq[RootRef]
|
2021-07-07 09:09:47 +00:00
|
|
|
metadata*: altair.MetaData
|
2020-06-03 08:46:29 +00:00
|
|
|
connectTimeout*: chronos.Duration
|
|
|
|
seenThreshold*: chronos.Duration
|
2020-08-02 19:27:36 +00:00
|
|
|
connQueue: AsyncQueue[PeerAddr]
|
2022-04-08 16:22:49 +00:00
|
|
|
seenTable: Table[PeerId, SeenItem]
|
2024-01-19 21:05:52 +00:00
|
|
|
connWorkers: seq[Future[void].Raising([CancelledError])]
|
2022-04-08 16:22:49 +00:00
|
|
|
connTable: HashSet[PeerId]
|
2021-07-26 09:51:14 +00:00
|
|
|
forkId*: ENRForkID
|
2021-09-29 11:06:16 +00:00
|
|
|
discoveryForkId*: ENRForkID
|
2021-11-05 07:34:34 +00:00
|
|
|
forkDigests*: ref ForkDigests
|
2022-06-21 08:29:16 +00:00
|
|
|
rng*: ref HmacDrbgContext
|
2022-04-08 16:22:49 +00:00
|
|
|
peers*: Table[PeerId, Peer]
|
2023-09-15 18:45:55 +00:00
|
|
|
directPeers*: DirectPeers
|
2021-01-15 04:17:06 +00:00
|
|
|
validTopics: HashSet[string]
|
2024-01-19 21:05:52 +00:00
|
|
|
peerPingerHeartbeatFut: Future[void].Raising([CancelledError])
|
|
|
|
peerTrimmerHeartbeatFut: Future[void].Raising([CancelledError])
|
2021-08-19 10:45:31 +00:00
|
|
|
cfg: RuntimeConfig
|
|
|
|
getBeaconTime: GetBeaconTimeFn
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2022-11-02 10:46:53 +00:00
|
|
|
quota: TokenBucket ## Global quota mainly for high-bandwidth stuff
|
|
|
|
|
2020-07-25 06:12:23 +00:00
|
|
|
AverageThroughput* = object
|
|
|
|
count*: uint64
|
|
|
|
average*: float
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
Peer* = ref object
|
|
|
|
network*: Eth2Node
|
2021-10-21 11:01:29 +00:00
|
|
|
peerId*: PeerId
|
2020-03-22 21:55:01 +00:00
|
|
|
discoveryId*: Eth2DiscoveryId
|
|
|
|
connectionState*: ConnectionState
|
|
|
|
protocolStates*: seq[RootRef]
|
2020-07-25 06:12:23 +00:00
|
|
|
netThroughput: AverageThroughput
|
2020-03-22 21:55:01 +00:00
|
|
|
score*: int
|
2022-11-02 10:46:53 +00:00
|
|
|
quota*: TokenBucket
|
2020-10-09 13:44:51 +00:00
|
|
|
lastReqTime*: Moment
|
2020-08-10 10:58:34 +00:00
|
|
|
connections*: int
|
2024-01-13 09:54:24 +00:00
|
|
|
enr*: Opt[enr.Record]
|
|
|
|
metadata*: Opt[altair.MetaData]
|
2022-02-01 17:20:55 +00:00
|
|
|
failedMetadataRequests: int
|
2021-08-23 10:29:50 +00:00
|
|
|
lastMetadataTime*: Moment
|
2020-11-26 19:23:45 +00:00
|
|
|
direction*: PeerType
|
2020-08-10 10:58:34 +00:00
|
|
|
disconnectedFut: Future[void]
|
2022-09-19 09:37:42 +00:00
|
|
|
statistics*: SyncResponseStats
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-08-02 19:27:36 +00:00
|
|
|
PeerAddr* = object
|
2022-04-08 16:22:49 +00:00
|
|
|
peerId*: PeerId
|
2020-08-02 19:27:36 +00:00
|
|
|
addrs*: seq[MultiAddress]
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
ConnectionState* = enum
|
|
|
|
None,
|
|
|
|
Connecting,
|
|
|
|
Connected,
|
|
|
|
Disconnecting,
|
|
|
|
Disconnected
|
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
UntypedResponse* = ref object
|
2020-03-22 21:55:01 +00:00
|
|
|
peer*: Peer
|
2020-03-22 23:23:21 +00:00
|
|
|
stream*: Connection
|
2020-05-23 22:24:47 +00:00
|
|
|
writtenChunks*: int
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-05-23 22:24:47 +00:00
|
|
|
SingleChunkResponse*[MsgType] = distinct UntypedResponse
|
|
|
|
## Protocol requests using this type will produce request-making
|
|
|
|
## client-side procs that return `NetRes[MsgType]`
|
|
|
|
|
2022-10-27 16:51:43 +00:00
|
|
|
MultipleChunksResponse*[MsgType; maxLen: static Limit] = distinct UntypedResponse
|
2020-05-23 22:24:47 +00:00
|
|
|
## Protocol requests using this type will produce request-making
|
2022-10-27 16:51:43 +00:00
|
|
|
## client-side procs that return `NetRes[List[MsgType, maxLen]]`.
|
2020-05-23 22:24:47 +00:00
|
|
|
## In the future, such procs will return an `InputStream[NetRes[MsgType]]`.
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
MessageInfo* = object
|
|
|
|
name*: string
|
|
|
|
|
|
|
|
# Private fields:
|
|
|
|
libp2pCodecName: string
|
|
|
|
protocolMounter*: MounterProc
|
|
|
|
|
|
|
|
ProtocolInfoObj* = object
|
|
|
|
name*: string
|
|
|
|
messages*: seq[MessageInfo]
|
|
|
|
index*: int # the position of the protocol in the
|
|
|
|
# ordered list of supported protocols
|
|
|
|
|
|
|
|
# Private fields:
|
|
|
|
peerStateInitializer*: PeerStateInitializer
|
|
|
|
networkStateInitializer*: NetworkStateInitializer
|
2020-06-09 11:49:58 +00:00
|
|
|
onPeerConnected*: OnPeerConnectedHandler
|
|
|
|
onPeerDisconnected*: OnPeerDisconnectedHandler
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
ProtocolInfo* = ptr ProtocolInfoObj
|
|
|
|
|
|
|
|
ResponseCode* = enum
|
|
|
|
Success
|
|
|
|
InvalidRequest
|
|
|
|
ServerError
|
2022-03-09 14:03:58 +00:00
|
|
|
ResourceUnavailable
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2023-08-25 09:29:07 +00:00
|
|
|
PeerStateInitializer* = proc(peer: Peer): RootRef {.gcsafe, raises: [].}
|
2024-01-13 09:54:24 +00:00
|
|
|
NetworkStateInitializer* = proc(network: Eth2Node): RootRef {.gcsafe, raises: [].}
|
2024-01-19 21:05:52 +00:00
|
|
|
OnPeerConnectedHandler* = proc(peer: Peer, incoming: bool): Future[void] {.async: (raises: [CancelledError]).}
|
|
|
|
OnPeerDisconnectedHandler* = proc(peer: Peer): Future[void] {.async: (raises: [CancelledError]).}
|
2020-03-22 21:55:01 +00:00
|
|
|
ThunkProc* = LPProtoHandler
|
2024-01-13 09:54:24 +00:00
|
|
|
MounterProc* = proc(network: Eth2Node) {.gcsafe, raises: [].}
|
2023-08-25 09:29:07 +00:00
|
|
|
MessageContentPrinter* = proc(msg: pointer): string {.gcsafe, raises: [].}
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2023-12-16 02:27:06 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#goodbye
|
2020-03-22 21:55:01 +00:00
|
|
|
DisconnectionReason* = enum
|
2020-06-20 07:24:33 +00:00
|
|
|
# might see other values on the wire!
|
|
|
|
ClientShutDown = 1
|
|
|
|
IrrelevantNetwork = 2
|
|
|
|
FaultOrError = 3
|
2020-09-25 13:43:45 +00:00
|
|
|
# Clients MAY use reason codes above 128 to indicate alternative,
|
|
|
|
# erroneous request-specific responses.
|
|
|
|
PeerScoreLow = 237 # 79 * 3
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
TransmissionError* = object of CatchableError
|
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
Eth2NetworkingErrorKind* = enum
|
2023-05-19 12:01:27 +00:00
|
|
|
# Potentially benign errors (network conditions)
|
2020-05-12 22:37:07 +00:00
|
|
|
BrokenConnection
|
|
|
|
ReceivedErrorResponse
|
|
|
|
UnexpectedEOF
|
|
|
|
PotentiallyExpectedEOF
|
2023-05-19 12:01:27 +00:00
|
|
|
StreamOpenTimeout
|
|
|
|
ReadResponseTimeout
|
|
|
|
|
|
|
|
# Errors for which we descore heavily (protocol violations)
|
2020-05-12 22:37:07 +00:00
|
|
|
InvalidResponseCode
|
|
|
|
InvalidSnappyBytes
|
|
|
|
InvalidSszBytes
|
2023-05-19 12:01:27 +00:00
|
|
|
InvalidSizePrefix
|
2020-05-12 22:37:07 +00:00
|
|
|
ZeroSizePrefix
|
|
|
|
SizePrefixOverflow
|
2021-07-07 09:09:47 +00:00
|
|
|
InvalidContextBytes
|
2022-10-27 16:51:43 +00:00
|
|
|
ResponseChunkOverflow
|
2020-05-12 22:37:07 +00:00
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
UnknownError
|
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
Eth2NetworkingError = object
|
|
|
|
case kind*: Eth2NetworkingErrorKind
|
|
|
|
of ReceivedErrorResponse:
|
2023-01-31 23:25:08 +00:00
|
|
|
responseCode*: ResponseCode
|
|
|
|
errorMsg*: string
|
2020-05-12 22:37:07 +00:00
|
|
|
else:
|
|
|
|
discard
|
|
|
|
|
2020-10-09 13:44:51 +00:00
|
|
|
InvalidInputsError* = object of CatchableError
|
|
|
|
|
2022-03-09 14:03:58 +00:00
|
|
|
ResourceUnavailableError* = object of CatchableError
|
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
NetRes*[T] = Result[T, Eth2NetworkingError]
|
|
|
|
## This is type returned from all network requests
|
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
const
|
2020-11-09 14:23:15 +00:00
|
|
|
clientId* = "Nimbus beacon node " & fullVersionStr
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2022-11-02 10:46:53 +00:00
|
|
|
requestPrefix = "/eth2/beacon_chain/req/"
|
|
|
|
requestSuffix = "/ssz_snappy"
|
|
|
|
|
2021-10-11 09:16:58 +00:00
|
|
|
ConcurrentConnections = 20
|
2020-06-03 08:46:29 +00:00
|
|
|
## Maximum number of active concurrent connection requests.
|
|
|
|
|
2021-02-15 16:40:00 +00:00
|
|
|
SeenTableTimeTimeout =
|
2020-08-12 14:16:59 +00:00
|
|
|
when not defined(local_testnet): 5.minutes else: 10.seconds
|
|
|
|
|
2020-06-08 16:02:50 +00:00
|
|
|
## Seen period of time for timeout connections
|
2021-02-15 16:40:00 +00:00
|
|
|
SeenTableTimeDeadPeer =
|
2020-08-12 14:16:59 +00:00
|
|
|
when not defined(local_testnet): 5.minutes else: 10.seconds
|
|
|
|
|
2020-06-08 16:02:50 +00:00
|
|
|
## Period of time for dead peers.
|
2021-02-15 16:40:00 +00:00
|
|
|
SeenTableTimeIrrelevantNetwork = 24.hours
|
2020-06-08 16:02:50 +00:00
|
|
|
## Period of time for `IrrelevantNetwork` error reason.
|
2021-02-15 16:40:00 +00:00
|
|
|
SeenTableTimeClientShutDown = 10.minutes
|
2020-06-08 16:02:50 +00:00
|
|
|
## Period of time for `ClientShutDown` error reason.
|
2021-02-15 16:40:00 +00:00
|
|
|
SeenTableTimeFaultOrError = 10.minutes
|
2020-06-08 16:02:50 +00:00
|
|
|
## Period of time for `FaultOnError` error reason.
|
2021-02-15 16:40:00 +00:00
|
|
|
SeenTablePenaltyError = 60.minutes
|
2020-09-25 13:43:45 +00:00
|
|
|
## Period of time for peers which score below or equal to zero.
|
2021-02-15 16:40:00 +00:00
|
|
|
SeenTableTimeReconnect = 1.minutes
|
2020-11-26 19:23:45 +00:00
|
|
|
## Minimal time between disconnection and reconnection attempt
|
|
|
|
|
2023-05-19 12:01:27 +00:00
|
|
|
ProtocolViolations = {InvalidResponseCode..Eth2NetworkingErrorKind.high()}
|
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
template neterr*(kindParam: Eth2NetworkingErrorKind): auto =
|
2020-05-12 22:37:07 +00:00
|
|
|
err(type(result), Eth2NetworkingError(kind: kindParam))
|
|
|
|
|
2019-10-25 17:15:12 +00:00
|
|
|
# Metrics for tracking attestation and beacon block loss
|
2020-06-17 11:04:24 +00:00
|
|
|
declareCounter nbc_gossip_messages_sent,
|
2019-10-25 17:15:12 +00:00
|
|
|
"Number of gossip messages sent by this peer"
|
|
|
|
|
2020-06-17 11:04:24 +00:00
|
|
|
declareCounter nbc_gossip_messages_received,
|
2019-10-25 17:15:12 +00:00
|
|
|
"Number of gossip messages received by this peer"
|
|
|
|
|
2021-08-18 12:30:05 +00:00
|
|
|
declareCounter nbc_gossip_failed_snappy,
|
|
|
|
"Number of gossip messages that failed snappy decompression"
|
|
|
|
|
|
|
|
declareCounter nbc_gossip_failed_ssz,
|
|
|
|
"Number of gossip messages that failed SSZ parsing"
|
|
|
|
|
2020-11-28 18:53:51 +00:00
|
|
|
declareCounter nbc_successful_dials,
|
2020-03-22 23:23:21 +00:00
|
|
|
"Number of successfully dialed peers"
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-11-28 18:53:51 +00:00
|
|
|
declareCounter nbc_failed_dials,
|
2020-06-03 08:46:29 +00:00
|
|
|
"Number of dialing attempts that failed"
|
|
|
|
|
2020-11-28 18:53:51 +00:00
|
|
|
declareCounter nbc_timeout_dials,
|
2020-06-03 08:46:29 +00:00
|
|
|
"Number of dialing attempts that exceeded timeout"
|
|
|
|
|
2020-11-28 18:53:51 +00:00
|
|
|
declareGauge nbc_peers,
|
2020-03-22 23:23:21 +00:00
|
|
|
"Number of active libp2p peers"
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-11-28 18:53:51 +00:00
|
|
|
declareCounter nbc_successful_discoveries,
|
2021-09-01 16:08:24 +00:00
|
|
|
"Number of successful discoveries"
|
2020-11-26 19:23:45 +00:00
|
|
|
|
2020-11-28 18:53:51 +00:00
|
|
|
declareCounter nbc_failed_discoveries,
|
2020-11-26 19:23:45 +00:00
|
|
|
"Number of failed discoveries"
|
|
|
|
|
2021-09-28 07:58:03 +00:00
|
|
|
declareCounter nbc_cycling_kicked_peers,
|
|
|
|
"Number of peers kicked for peer cycling"
|
|
|
|
|
2022-02-01 17:20:55 +00:00
|
|
|
declareGauge nbc_gossipsub_low_fanout,
|
|
|
|
"numbers of topics with low fanout"
|
|
|
|
|
|
|
|
declareGauge nbc_gossipsub_good_fanout,
|
|
|
|
"numbers of topics with good fanout"
|
|
|
|
|
|
|
|
declareGauge nbc_gossipsub_healthy_fanout,
|
|
|
|
"numbers of topics with dHigh fanout"
|
|
|
|
|
2020-11-26 19:23:45 +00:00
|
|
|
declareHistogram nbc_resolve_time,
|
|
|
|
"Time(s) used while resolving peer information",
|
2022-08-19 10:30:07 +00:00
|
|
|
buckets = [1.0, 5.0, 10.0, 20.0, 40.0, 60.0]
|
2020-11-26 19:23:45 +00:00
|
|
|
|
2022-11-02 10:46:53 +00:00
|
|
|
declareCounter nbc_reqresp_messages_sent,
|
|
|
|
"Number of Req/Resp messages sent", labels = ["protocol"]
|
|
|
|
|
|
|
|
declareCounter nbc_reqresp_messages_received,
|
|
|
|
"Number of Req/Resp messages received", labels = ["protocol"]
|
|
|
|
|
|
|
|
declareCounter nbc_reqresp_messages_failed,
|
|
|
|
"Number of Req/Resp messages that failed decoding", labels = ["protocol"]
|
|
|
|
|
|
|
|
declareCounter nbc_reqresp_messages_throttled,
|
|
|
|
"Number of Req/Resp messages that were throttled", labels = ["protocol"]
|
|
|
|
|
2020-07-21 16:07:14 +00:00
|
|
|
const
|
|
|
|
libp2p_pki_schemes {.strdefine.} = ""
|
|
|
|
|
|
|
|
when libp2p_pki_schemes != "secp256k1":
|
|
|
|
{.fatal: "Incorrect building process, please use -d:\"libp2p_pki_schemes=secp256k1\"".}
|
|
|
|
|
2020-08-25 10:16:31 +00:00
|
|
|
const
|
|
|
|
NetworkInsecureKeyPassword = "INSECUREPASSWORD"
|
|
|
|
|
2024-01-13 09:54:24 +00:00
|
|
|
template libp2pProtocol*(name: string, version: int) {.pragma.}
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2021-10-21 11:01:29 +00:00
|
|
|
func shortLog*(peer: Peer): string = shortLog(peer.peerId)
|
2020-09-06 08:39:25 +00:00
|
|
|
chronicles.formatIt(Peer): shortLog(it)
|
2020-09-30 11:47:42 +00:00
|
|
|
chronicles.formatIt(PublicKey): byteutils.toHex(it.getBytes().tryGet())
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2022-11-02 10:46:53 +00:00
|
|
|
func shortProtocolId(protocolId: string): string =
|
|
|
|
let
|
|
|
|
start = if protocolId.startsWith(requestPrefix): requestPrefix.len else: 0
|
|
|
|
ends = if protocolId.endsWith(requestSuffix):
|
|
|
|
protocolId.high - requestSuffix.len
|
|
|
|
else:
|
|
|
|
protocolId.high
|
|
|
|
protocolId[start..ends]
|
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
proc openStream(node: Eth2Node,
|
|
|
|
peer: Peer,
|
2024-01-19 21:05:52 +00:00
|
|
|
protocolId: string): Future[NetRes[Connection]]
|
|
|
|
{.async: (raises: [CancelledError]).} =
|
2023-06-23 09:30:46 +00:00
|
|
|
# When dialing here, we do not provide addresses - all new connection
|
2020-11-29 13:43:41 +00:00
|
|
|
# attempts are handled via `connect` which also takes into account
|
|
|
|
# reconnection timeouts
|
2024-01-19 21:05:52 +00:00
|
|
|
try:
|
|
|
|
ok await dial(node.switch, peer.peerId, protocolId)
|
|
|
|
except LPError as exc:
|
2024-02-10 04:13:00 +00:00
|
|
|
debug "Dialing failed", exc = exc.msg
|
2024-01-19 21:05:52 +00:00
|
|
|
neterr BrokenConnection
|
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
|
|
|
# TODO remove once libp2p supports `raises`
|
2024-01-24 16:23:12 +00:00
|
|
|
debug "Unexpected error when opening stream", exc = exc.msg
|
2024-01-19 21:05:52 +00:00
|
|
|
neterr UnknownError
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc init(T: type Peer, network: Eth2Node, peerId: PeerId): Peer {.gcsafe.}
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2024-01-13 09:54:24 +00:00
|
|
|
proc getState*(peer: Peer, proto: ProtocolInfo): RootRef =
|
|
|
|
doAssert peer.protocolStates[proto.index] != nil, $proto.index
|
|
|
|
peer.protocolStates[proto.index]
|
|
|
|
|
|
|
|
template state*(peer: Peer, Protocol: type): untyped =
|
|
|
|
## Returns the state object of a particular protocol for a
|
|
|
|
## particular connection.
|
|
|
|
mixin State
|
|
|
|
bind getState
|
|
|
|
type S = Protocol.State
|
|
|
|
S(getState(peer, Protocol.protocolInfo))
|
|
|
|
|
|
|
|
proc getNetworkState*(node: Eth2Node, proto: ProtocolInfo): RootRef =
|
|
|
|
doAssert node.protocolStates[proto.index] != nil, $proto.index
|
|
|
|
node.protocolStates[proto.index]
|
|
|
|
|
|
|
|
template protocolState*(node: Eth2Node, Protocol: type): untyped =
|
|
|
|
mixin NetworkState
|
|
|
|
bind getNetworkState
|
|
|
|
type S = Protocol.NetworkState
|
|
|
|
S(getNetworkState(node, Protocol.protocolInfo))
|
|
|
|
|
|
|
|
proc initProtocolState*[T](state: T, x: Peer|Eth2Node)
|
|
|
|
{.gcsafe, raises: [].} =
|
|
|
|
discard
|
|
|
|
|
|
|
|
template networkState*(connection: Peer, Protocol: type): untyped =
|
|
|
|
## Returns the network state object of a particular protocol for a
|
|
|
|
## particular connection.
|
|
|
|
protocolState(connection.network, Protocol)
|
|
|
|
|
2022-04-08 16:22:49 +00:00
|
|
|
func peerId*(node: Eth2Node): PeerId =
|
2020-10-27 09:00:57 +00:00
|
|
|
node.switch.peerInfo.peerId
|
|
|
|
|
2023-06-23 09:30:46 +00:00
|
|
|
func nodeId*(node: Eth2Node): NodeId =
|
|
|
|
# `secp256k1` keys are always stored inside PeerId.
|
|
|
|
toNodeId(keys.PublicKey(node.switch.peerInfo.publicKey.skkey))
|
|
|
|
|
2020-10-27 09:00:57 +00:00
|
|
|
func enrRecord*(node: Eth2Node): Record =
|
|
|
|
node.discovery.localNode.record
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc getPeer(node: Eth2Node, peerId: PeerId): Peer =
|
2020-08-10 10:58:34 +00:00
|
|
|
node.peers.withValue(peerId, peer) do:
|
|
|
|
return peer[]
|
|
|
|
do:
|
2021-10-21 11:01:29 +00:00
|
|
|
let peer = Peer.init(node, peerId)
|
2021-08-10 20:46:35 +00:00
|
|
|
return node.peers.mgetOrPut(peerId, peer)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-08-10 13:18:17 +00:00
|
|
|
proc peerFromStream(network: Eth2Node, conn: Connection): Peer =
|
2021-10-21 11:01:29 +00:00
|
|
|
result = network.getPeer(conn.peerId)
|
|
|
|
result.peerId = conn.peerId
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
func getKey*(peer: Peer): PeerId {.inline.} =
|
2021-10-21 11:01:29 +00:00
|
|
|
peer.peerId
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc getFuture(peer: Peer): Future[void] {.inline.} =
|
2020-11-26 19:23:45 +00:00
|
|
|
if isNil(peer.disconnectedFut):
|
|
|
|
peer.disconnectedFut = newFuture[void]("Peer.disconnectedFut")
|
2020-09-25 13:43:45 +00:00
|
|
|
peer.disconnectedFut
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
func getScore*(a: Peer): int =
|
2020-07-25 06:12:23 +00:00
|
|
|
## Returns current score value for peer ``peer``.
|
2020-09-25 13:43:45 +00:00
|
|
|
a.score
|
2020-04-20 14:59:18 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
func updateScore*(peer: Peer, score: int) {.inline.} =
|
2020-04-23 15:31:00 +00:00
|
|
|
## Update peer's ``peer`` score with value ``score``.
|
|
|
|
peer.score = peer.score + score
|
2020-05-28 05:02:28 +00:00
|
|
|
if peer.score > PeerScoreHighLimit:
|
|
|
|
peer.score = PeerScoreHighLimit
|
2020-04-23 15:31:00 +00:00
|
|
|
|
2022-09-19 09:37:42 +00:00
|
|
|
func updateStats*(peer: Peer, index: SyncResponseKind,
|
|
|
|
value: uint64) {.inline.} =
|
|
|
|
## Update peer's ``peer`` specific ``index`` statistics with value ``value``.
|
|
|
|
peer.statistics.update(index, value)
|
|
|
|
|
|
|
|
func getStats*(peer: Peer, index: SyncResponseKind): uint64 {.inline.} =
|
|
|
|
## Returns current statistics value for peer ``peer`` and index ``index``.
|
|
|
|
peer.statistics.get(index)
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
func calcThroughput(dur: Duration, value: uint64): float =
|
2020-07-25 06:12:23 +00:00
|
|
|
let secs = float(chronos.seconds(1).nanoseconds)
|
|
|
|
if isZero(dur):
|
|
|
|
0.0
|
|
|
|
else:
|
|
|
|
float(value) * (secs / float(dur.nanoseconds))
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
func updateNetThroughput(peer: Peer, dur: Duration,
|
|
|
|
bytesCount: uint64) {.inline.} =
|
2020-07-25 06:12:23 +00:00
|
|
|
## Update peer's ``peer`` network throughput.
|
|
|
|
let bytesPerSecond = calcThroughput(dur, bytesCount)
|
|
|
|
let a = peer.netThroughput.average
|
|
|
|
let n = peer.netThroughput.count
|
|
|
|
peer.netThroughput.average = a + (bytesPerSecond - a) / float(n + 1)
|
|
|
|
inc(peer.netThroughput.count)
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
func netKbps*(peer: Peer): float {.inline.} =
|
2020-07-25 06:12:23 +00:00
|
|
|
## Returns current network throughput average value in Kbps for peer ``peer``.
|
|
|
|
round(((peer.netThroughput.average / 1024) * 10_000) / 10_000)
|
|
|
|
|
2024-04-10 12:09:37 +00:00
|
|
|
# /!\ Must be exported to be seen by `peerCmp`
|
|
|
|
func `<`*(a, b: Peer): bool =
|
|
|
|
## Comparison function indicating `true` if peer `a` ranks worse than peer `b`
|
|
|
|
if a.score != b.score:
|
|
|
|
a.score < b.score
|
|
|
|
elif a.netThroughput.average != b.netThroughput.average:
|
|
|
|
a.netThroughput.average < b.netThroughput.average
|
2020-07-25 06:12:23 +00:00
|
|
|
else:
|
2024-04-10 12:09:37 +00:00
|
|
|
system.`<`(a, b)
|
2020-07-25 06:12:23 +00:00
|
|
|
|
2020-10-09 13:44:51 +00:00
|
|
|
const
|
2022-11-02 10:46:53 +00:00
|
|
|
maxRequestQuota = 1000000
|
|
|
|
maxGlobalQuota = 2 * maxRequestQuota
|
|
|
|
## Roughly, this means we allow 2 peers to sync from us at a time
|
2020-10-09 13:44:51 +00:00
|
|
|
fullReplenishTime = 5.seconds
|
|
|
|
|
2022-11-02 10:46:53 +00:00
|
|
|
template awaitQuota*(peerParam: Peer, costParam: float, protocolIdParam: string) =
|
2020-10-09 13:44:51 +00:00
|
|
|
let
|
2022-11-02 10:46:53 +00:00
|
|
|
peer = peerParam
|
|
|
|
cost = int(costParam)
|
2020-10-09 13:44:51 +00:00
|
|
|
|
2022-11-02 10:46:53 +00:00
|
|
|
if not peer.quota.tryConsume(cost.int):
|
|
|
|
let protocolId = protocolIdParam
|
2024-03-22 01:36:08 +00:00
|
|
|
debug "Awaiting peer quota", peer, cost = cost, protocolId = protocolId
|
2022-11-02 10:46:53 +00:00
|
|
|
nbc_reqresp_messages_throttled.inc(1, [protocolId])
|
|
|
|
await peer.quota.consume(cost.int)
|
2020-10-09 13:44:51 +00:00
|
|
|
|
2024-03-22 01:36:08 +00:00
|
|
|
template awaitQuota*(
|
|
|
|
networkParam: Eth2Node, costParam: float, protocolIdParam: string) =
|
2022-11-02 10:46:53 +00:00
|
|
|
let
|
|
|
|
network = networkParam
|
|
|
|
cost = int(costParam)
|
|
|
|
|
|
|
|
if not network.quota.tryConsume(cost.int):
|
|
|
|
let protocolId = protocolIdParam
|
2024-03-22 01:36:08 +00:00
|
|
|
debug "Awaiting network quota", peer, cost = cost, protocolId = protocolId
|
2022-11-02 10:46:53 +00:00
|
|
|
nbc_reqresp_messages_throttled.inc(1, [protocolId])
|
|
|
|
await network.quota.consume(cost.int)
|
2020-10-09 13:44:51 +00:00
|
|
|
|
|
|
|
func allowedOpsPerSecondCost*(n: int): float =
|
2022-11-02 10:46:53 +00:00
|
|
|
const replenishRate = (maxRequestQuota / fullReplenishTime.nanoseconds.float)
|
2020-10-09 13:44:51 +00:00
|
|
|
(replenishRate * 1000000000'f / n.float)
|
|
|
|
|
2022-11-02 10:46:53 +00:00
|
|
|
const
|
|
|
|
libp2pRequestCost = allowedOpsPerSecondCost(8)
|
|
|
|
## Maximum number of libp2p requests per peer per second
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc isSeen(network: Eth2Node, peerId: PeerId): bool =
|
2020-09-21 16:02:27 +00:00
|
|
|
## Returns ``true`` if ``peerId`` present in SeenTable and time period is not
|
|
|
|
## yet expired.
|
2020-06-08 16:02:50 +00:00
|
|
|
let currentTime = now(chronos.Moment)
|
2020-08-02 19:27:36 +00:00
|
|
|
if peerId notin network.seenTable:
|
2020-09-25 13:43:45 +00:00
|
|
|
false
|
|
|
|
else:
|
2021-03-26 06:52:01 +00:00
|
|
|
let item = try: network.seenTable[peerId]
|
|
|
|
except KeyError: raiseAssert "checked with notin"
|
2020-09-25 13:43:45 +00:00
|
|
|
if currentTime >= item.stamp:
|
|
|
|
# Peer is in SeenTable, but the time period has expired.
|
|
|
|
network.seenTable.del(peerId)
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
true
|
2020-06-08 16:02:50 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc addSeen(network: Eth2Node, peerId: PeerId,
|
2020-06-08 16:02:50 +00:00
|
|
|
period: chronos.Duration) =
|
2022-04-08 16:22:49 +00:00
|
|
|
## Adds peer with PeerId ``peerId`` to SeenTable and timeout ``period``.
|
2020-08-02 19:27:36 +00:00
|
|
|
let item = SeenItem(peerId: peerId, stamp: now(chronos.Moment) + period)
|
2020-11-26 19:23:45 +00:00
|
|
|
withValue(network.seenTable, peerId, entry) do:
|
|
|
|
if entry.stamp < item.stamp:
|
|
|
|
entry.stamp = item.stamp
|
|
|
|
do:
|
|
|
|
network.seenTable[peerId] = item
|
2020-06-08 16:02:50 +00:00
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
proc disconnect*(peer: Peer, reason: DisconnectionReason,
|
2024-01-19 21:05:52 +00:00
|
|
|
notifyOtherPeer = false) {.async: (raises: [CancelledError]).} =
|
|
|
|
# Per the specification, we MAY send a disconnect reason to the other peer but
|
|
|
|
# we currently don't - the fact that we're disconnecting is obvious and the
|
|
|
|
# reason already known (wrong network is known from status message) or doesn't
|
|
|
|
# greatly matter for the listening side (since it can't be trusted anyway)
|
2020-09-25 13:43:45 +00:00
|
|
|
try:
|
|
|
|
if peer.connectionState notin {Disconnecting, Disconnected}:
|
|
|
|
peer.connectionState = Disconnecting
|
|
|
|
# We adding peer in SeenTable before actual disconnect to avoid races.
|
|
|
|
let seenTime = case reason
|
|
|
|
of ClientShutDown:
|
|
|
|
SeenTableTimeClientShutDown
|
|
|
|
of IrrelevantNetwork:
|
|
|
|
SeenTableTimeIrrelevantNetwork
|
|
|
|
of FaultOrError:
|
|
|
|
SeenTableTimeFaultOrError
|
|
|
|
of PeerScoreLow:
|
|
|
|
SeenTablePenaltyError
|
2021-10-21 11:01:29 +00:00
|
|
|
peer.network.addSeen(peer.peerId, seenTime)
|
|
|
|
await peer.network.switch.disconnect(peer.peerId)
|
2024-01-19 21:05:52 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
|
|
|
# switch.disconnect shouldn't raise
|
|
|
|
warn "Unexpected error while disconnecting peer",
|
|
|
|
peer = peer.peerId,
|
|
|
|
reason = reason,
|
|
|
|
exc = exc.msg
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
proc releasePeer(peer: Peer) =
|
2023-11-08 13:42:50 +00:00
|
|
|
## Checks for peer's score and disconnects peer if score is less than
|
|
|
|
## `PeerScoreLowLimit`.
|
|
|
|
if peer.connectionState notin {ConnectionState.Disconnecting,
|
|
|
|
ConnectionState.Disconnected}:
|
|
|
|
if peer.score < PeerScoreLowLimit:
|
|
|
|
debug "Peer was disconnected due to low score", peer = peer,
|
|
|
|
peer_score = peer.score, score_low_limit = PeerScoreLowLimit,
|
|
|
|
score_high_limit = PeerScoreHighLimit
|
|
|
|
asyncSpawn(peer.disconnect(PeerScoreLow))
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
proc getRequestProtoName(fn: NimNode): NimNode =
|
|
|
|
# `getCustomPragmaVal` doesn't work yet on regular nnkProcDef nodes
|
|
|
|
# (TODO: file as an issue)
|
|
|
|
|
|
|
|
let pragmas = fn.pragma
|
|
|
|
if pragmas.kind == nnkPragma and pragmas.len > 0:
|
|
|
|
for pragma in pragmas:
|
2021-03-26 06:52:01 +00:00
|
|
|
try:
|
|
|
|
if pragma.len > 0 and $pragma[0] == "libp2pProtocol":
|
|
|
|
let protoName = $(pragma[1])
|
|
|
|
let protoVer = $(pragma[2].intVal)
|
2022-11-02 10:46:53 +00:00
|
|
|
return newLit(requestPrefix & protoName & "/" & protoVer & requestSuffix)
|
2021-03-26 06:52:01 +00:00
|
|
|
except Exception as exc: raiseAssert exc.msg # TODO https://github.com/nim-lang/Nim/issues/17454
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
return newLit("")
|
|
|
|
|
2024-01-13 09:54:24 +00:00
|
|
|
proc add(s: var seq[byte], pos: var int, bytes: openArray[byte]) =
|
|
|
|
s[pos..<pos+bytes.len] = bytes
|
|
|
|
pos += bytes.len
|
2022-03-17 14:09:18 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc writeChunkSZ(
|
2024-01-13 09:54:24 +00:00
|
|
|
conn: Connection, responseCode: Opt[ResponseCode],
|
2022-05-05 11:00:02 +00:00
|
|
|
uncompressedLen: uint64, payloadSZ: openArray[byte],
|
|
|
|
contextBytes: openArray[byte] = []): Future[void] =
|
2024-01-13 09:54:24 +00:00
|
|
|
let
|
|
|
|
uncompressedLenBytes = toBytes(uncompressedLen, Leb128)
|
2022-05-05 11:00:02 +00:00
|
|
|
|
2024-01-13 09:54:24 +00:00
|
|
|
var
|
|
|
|
data = newSeqUninitialized[byte](
|
|
|
|
ord(responseCode.isSome) + contextBytes.len + uncompressedLenBytes.len +
|
|
|
|
payloadSZ.len)
|
|
|
|
pos = 0
|
|
|
|
|
|
|
|
if responseCode.isSome:
|
|
|
|
data.add(pos, [byte responseCode.get])
|
|
|
|
data.add(pos, contextBytes)
|
|
|
|
data.add(pos, uncompressedLenBytes.toOpenArray())
|
|
|
|
data.add(pos, payloadSZ)
|
|
|
|
conn.write(data)
|
2022-05-05 11:00:02 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc writeChunk(conn: Connection,
|
2024-01-13 09:54:24 +00:00
|
|
|
responseCode: Opt[ResponseCode],
|
2022-06-15 08:14:47 +00:00
|
|
|
payload: openArray[byte],
|
|
|
|
contextBytes: openArray[byte] = []): Future[void] =
|
2024-01-13 09:54:24 +00:00
|
|
|
let
|
|
|
|
uncompressedLenBytes = toBytes(payload.lenu64, Leb128)
|
|
|
|
var
|
|
|
|
data = newSeqUninitialized[byte](
|
|
|
|
ord(responseCode.isSome) + contextBytes.len + uncompressedLenBytes.len +
|
|
|
|
snappy.maxCompressedLenFramed(payload.len).int)
|
|
|
|
pos = 0
|
|
|
|
|
|
|
|
if responseCode.isSome:
|
|
|
|
data.add(pos, [byte responseCode.get])
|
|
|
|
data.add(pos, contextBytes)
|
|
|
|
data.add(pos, uncompressedLenBytes.toOpenArray())
|
|
|
|
let
|
|
|
|
pre = pos
|
|
|
|
written = snappy.compressFramed(payload, data.toOpenArray(pos, data.high))
|
|
|
|
.expect("compression shouldn't fail with correctly preallocated buffer")
|
|
|
|
data.setLen(pre + written)
|
2021-07-07 09:09:47 +00:00
|
|
|
|
2024-01-13 09:54:24 +00:00
|
|
|
conn.write(data)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-05-21 13:21:29 +00:00
|
|
|
template errorMsgLit(x: static string): ErrorMsg =
|
|
|
|
const val = ErrorMsg toBytes(x)
|
|
|
|
val
|
|
|
|
|
2020-12-14 14:09:26 +00:00
|
|
|
func formatErrorMsg(msg: ErrorMsg): string =
|
|
|
|
# ErrorMsg "usually" contains a human-readable string - we'll try to parse it
|
|
|
|
# as ASCII and return hex if that fails
|
|
|
|
for c in msg:
|
|
|
|
if c < 32 or c > 127:
|
2020-06-04 06:19:25 +00:00
|
|
|
return byteutils.toHex(asSeq(msg))
|
|
|
|
|
2020-12-14 14:09:26 +00:00
|
|
|
string.fromBytes(asSeq(msg))
|
2020-06-04 06:19:25 +00:00
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
proc sendErrorResponse(peer: Peer,
|
2020-03-22 23:23:21 +00:00
|
|
|
conn: Connection,
|
2020-03-22 21:55:01 +00:00
|
|
|
responseCode: ResponseCode,
|
2020-09-18 16:39:33 +00:00
|
|
|
errMsg: ErrorMsg): Future[void] =
|
2020-06-04 06:19:25 +00:00
|
|
|
debug "Error processing request",
|
|
|
|
peer, responseCode, errMsg = formatErrorMsg(errMsg)
|
2024-01-13 09:54:24 +00:00
|
|
|
conn.writeChunk(Opt.some responseCode, SSZ.encode(errMsg))
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
proc sendNotificationMsg(peer: Peer, protocolId: string, requestBytes: seq[byte])
|
|
|
|
{.async: (raises: [CancelledError]).} =
|
|
|
|
# Notifications are sent as a best effort, ie errors are not reported back
|
|
|
|
# to the caller
|
|
|
|
let
|
2023-08-12 03:10:12 +00:00
|
|
|
deadline = sleepAsync RESP_TIMEOUT_DUR
|
2024-01-19 21:05:52 +00:00
|
|
|
streamRes = awaitWithTimeout(peer.network.openStream(peer, protocolId), deadline):
|
|
|
|
debug "Timeout while opening stream for notification", peer, protocolId
|
|
|
|
return
|
2020-04-15 02:41:22 +00:00
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
let stream = streamRes.valueOr:
|
|
|
|
debug "Could not open stream for notification",
|
|
|
|
peer, protocolId, error = streamRes.error
|
|
|
|
return
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-04-14 16:49:46 +00:00
|
|
|
try:
|
2024-01-13 09:54:24 +00:00
|
|
|
await stream.writeChunk(Opt.none ResponseCode, requestBytes)
|
2024-01-19 21:05:52 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
|
|
|
debug "Error while writing notification", peer, protocolId, exc = exc.msg
|
2020-04-14 16:49:46 +00:00
|
|
|
finally:
|
2024-01-19 21:05:52 +00:00
|
|
|
try:
|
|
|
|
await noCancel stream.close()
|
|
|
|
except CatchableError as exc:
|
2024-01-24 16:23:12 +00:00
|
|
|
debug "Unexpected error while closing notification stream",
|
2024-01-19 21:05:52 +00:00
|
|
|
peer, protocolId, exc = exc.msg
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2022-05-05 11:00:02 +00:00
|
|
|
proc sendResponseChunkBytesSZ(
|
|
|
|
response: UntypedResponse, uncompressedLen: uint64,
|
|
|
|
payloadSZ: openArray[byte],
|
|
|
|
contextBytes: openArray[byte] = []): Future[void] =
|
|
|
|
inc response.writtenChunks
|
|
|
|
response.stream.writeChunkSZ(
|
2024-01-13 09:54:24 +00:00
|
|
|
Opt.some ResponseCode.Success, uncompressedLen, payloadSZ, contextBytes)
|
2022-05-05 11:00:02 +00:00
|
|
|
|
2022-02-07 17:20:10 +00:00
|
|
|
proc sendResponseChunkBytes(
|
|
|
|
response: UntypedResponse, payload: openArray[byte],
|
|
|
|
contextBytes: openArray[byte] = []): Future[void] =
|
2020-05-23 22:24:47 +00:00
|
|
|
inc response.writtenChunks
|
2024-01-13 09:54:24 +00:00
|
|
|
response.stream.writeChunk(Opt.some ResponseCode.Success, payload, contextBytes)
|
2020-05-23 22:24:47 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc sendResponseChunk(
|
2022-05-23 12:02:54 +00:00
|
|
|
response: UntypedResponse, val: auto,
|
|
|
|
contextBytes: openArray[byte] = []): Future[void] =
|
|
|
|
sendResponseChunkBytes(response, SSZ.encode(val), contextBytes)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-05-23 22:24:47 +00:00
|
|
|
template sendUserHandlerResultAsChunkImpl*(stream: Connection,
|
|
|
|
handlerResultFut: Future): untyped =
|
|
|
|
let handlerRes = await handlerResultFut
|
2024-01-13 09:54:24 +00:00
|
|
|
writeChunk(stream, Opt.some ResponseCode.Success, SSZ.encode(handlerRes))
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-05-23 22:24:47 +00:00
|
|
|
template sendUserHandlerResultAsChunkImpl*(stream: Connection,
|
|
|
|
handlerResult: auto): untyped =
|
2024-01-13 09:54:24 +00:00
|
|
|
writeChunk(stream, Opt.some ResponseCode.Success, SSZ.encode(handlerResult))
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc uncompressFramedStream(conn: Connection,
|
2024-01-19 21:05:52 +00:00
|
|
|
expectedSize: int): Future[Result[seq[byte], string]]
|
|
|
|
{.async: (raises: [CancelledError]).} =
|
2022-05-08 07:08:13 +00:00
|
|
|
var header: array[framingHeader.len, byte]
|
|
|
|
try:
|
|
|
|
await conn.readExactly(addr header[0], header.len)
|
|
|
|
except LPStreamEOFError, LPStreamIncompleteError:
|
|
|
|
return err "Unexpected EOF before snappy header"
|
2024-01-19 21:05:52 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
|
|
|
return err "Unexpected error reading header: " & exc.msg
|
2022-05-08 07:08:13 +00:00
|
|
|
|
|
|
|
if header != framingHeader:
|
|
|
|
return err "Incorrect snappy header"
|
|
|
|
|
|
|
|
static:
|
|
|
|
doAssert maxCompressedFrameDataLen >= maxUncompressedFrameDataLen.uint64
|
|
|
|
|
|
|
|
var
|
2023-07-11 06:01:45 +00:00
|
|
|
frameData = newSeqUninitialized[byte](maxCompressedFrameDataLen + 4)
|
2022-05-08 07:08:13 +00:00
|
|
|
output = newSeqUninitialized[byte](expectedSize)
|
|
|
|
written = 0
|
|
|
|
|
|
|
|
while written < expectedSize:
|
|
|
|
var frameHeader: array[4, byte]
|
|
|
|
try:
|
|
|
|
await conn.readExactly(addr frameHeader[0], frameHeader.len)
|
|
|
|
except LPStreamEOFError, LPStreamIncompleteError:
|
|
|
|
return err "Snappy frame header missing"
|
2024-01-19 21:05:52 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
|
|
|
return err "Unexpected error reading frame header: " & exc.msg
|
2022-05-08 07:08:13 +00:00
|
|
|
|
|
|
|
let (id, dataLen) = decodeFrameHeader(frameHeader)
|
|
|
|
|
|
|
|
if dataLen > frameData.len:
|
|
|
|
# In theory, compressed frames could be bigger and still result in a
|
|
|
|
# valid, small snappy frame, but this would mean they are not getting
|
|
|
|
# compressed correctly
|
|
|
|
return err "Snappy frame too big"
|
|
|
|
|
|
|
|
if dataLen > 0:
|
|
|
|
try:
|
|
|
|
await conn.readExactly(addr frameData[0], dataLen)
|
|
|
|
except LPStreamEOFError, LPStreamIncompleteError:
|
|
|
|
return err "Incomplete snappy frame"
|
2024-01-19 21:05:52 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
|
|
|
return err "Unexpected error reading frame data: " & exc.msg
|
2022-05-08 07:08:13 +00:00
|
|
|
|
|
|
|
if id == chunkCompressed:
|
|
|
|
if dataLen < 6: # At least CRC + 2 bytes of frame data
|
|
|
|
return err "Compressed snappy frame too small"
|
|
|
|
|
|
|
|
let
|
|
|
|
crc = uint32.fromBytesLE frameData.toOpenArray(0, 3)
|
|
|
|
uncompressed =
|
|
|
|
snappy.uncompress(
|
|
|
|
frameData.toOpenArray(4, dataLen - 1),
|
|
|
|
output.toOpenArray(written, output.high)).valueOr:
|
|
|
|
return err "Failed to decompress content"
|
|
|
|
|
|
|
|
if maskedCrc(
|
|
|
|
output.toOpenArray(written, written + uncompressed-1)) != crc:
|
|
|
|
return err "Snappy content CRC checksum failed"
|
|
|
|
|
|
|
|
written += uncompressed
|
|
|
|
|
|
|
|
elif id == chunkUncompressed:
|
|
|
|
if dataLen < 5: # At least one byte of data
|
|
|
|
return err "Uncompressed snappy frame too small"
|
|
|
|
|
|
|
|
let uncompressed = dataLen - 4
|
|
|
|
|
|
|
|
if uncompressed > maxUncompressedFrameDataLen.int:
|
|
|
|
return err "Snappy frame size too large"
|
|
|
|
|
|
|
|
if uncompressed > output.len - written:
|
|
|
|
return err "Too much data"
|
|
|
|
|
|
|
|
let crc = uint32.fromBytesLE frameData.toOpenArray(0, 3)
|
|
|
|
if maskedCrc(frameData.toOpenArray(4, dataLen - 1)) != crc:
|
|
|
|
return err "Snappy content CRC checksum failed"
|
|
|
|
|
|
|
|
output[written..<written + uncompressed] =
|
|
|
|
frameData.toOpenArray(4, dataLen-1)
|
|
|
|
written += uncompressed
|
|
|
|
|
|
|
|
elif id < 0x80:
|
|
|
|
# Reserved unskippable chunks (chunk types 0x02-0x7f)
|
|
|
|
# if we encounter this type of chunk, stop decoding
|
|
|
|
# the spec says it is an error
|
|
|
|
return err "Invalid snappy chunk type"
|
|
|
|
|
|
|
|
else:
|
|
|
|
# Reserved skippable chunks (chunk types 0x80-0xfe)
|
|
|
|
# including STREAM_HEADER (0xff) should be skipped
|
|
|
|
continue
|
|
|
|
|
|
|
|
return ok output
|
|
|
|
|
2022-10-27 16:51:43 +00:00
|
|
|
func chunkMaxSize[T](): uint32 =
|
|
|
|
# compiler error on (T: type) syntax...
|
2023-11-21 22:08:31 +00:00
|
|
|
when isFixedSize(T):
|
2022-10-27 16:51:43 +00:00
|
|
|
uint32 fixedPortionSize(T)
|
|
|
|
else:
|
2023-11-21 22:08:31 +00:00
|
|
|
static: doAssert MAX_CHUNK_SIZE < high(uint32).uint64
|
2023-08-12 03:10:12 +00:00
|
|
|
MAX_CHUNK_SIZE.uint32
|
2022-10-27 16:51:43 +00:00
|
|
|
|
2022-12-04 07:42:03 +00:00
|
|
|
from ../spec/datatypes/capella import SignedBeaconBlock
|
2023-03-09 00:34:17 +00:00
|
|
|
from ../spec/datatypes/deneb import SignedBeaconBlock
|
2022-12-04 07:42:03 +00:00
|
|
|
|
2022-10-27 16:51:43 +00:00
|
|
|
template gossipMaxSize(T: untyped): uint32 =
|
|
|
|
const maxSize = static:
|
|
|
|
when isFixedSize(T):
|
2023-08-12 03:10:12 +00:00
|
|
|
fixedPortionSize(T).uint32
|
2023-01-04 12:34:15 +00:00
|
|
|
elif T is bellatrix.SignedBeaconBlock or T is capella.SignedBeaconBlock or
|
2024-02-26 06:49:12 +00:00
|
|
|
T is deneb.SignedBeaconBlock or T is electra.SignedBeaconBlock:
|
2023-08-12 03:10:12 +00:00
|
|
|
GOSSIP_MAX_SIZE
|
2022-10-27 16:51:43 +00:00
|
|
|
# TODO https://github.com/status-im/nim-ssz-serialization/issues/20 for
|
|
|
|
# Attestation, AttesterSlashing, and SignedAggregateAndProof, which all
|
|
|
|
# have lists bounded at MAX_VALIDATORS_PER_COMMITTEE (2048) items, thus
|
|
|
|
# having max sizes significantly smaller than GOSSIP_MAX_SIZE.
|
2024-04-21 05:49:11 +00:00
|
|
|
elif T is phase0.Attestation or T is phase0.AttesterSlashing or
|
2022-10-27 16:51:43 +00:00
|
|
|
T is SignedAggregateAndProof or T is phase0.SignedBeaconBlock or
|
2023-01-14 21:19:50 +00:00
|
|
|
T is altair.SignedBeaconBlock or T is SomeForkyLightClientObject:
|
2023-08-12 03:10:12 +00:00
|
|
|
GOSSIP_MAX_SIZE
|
2022-10-27 16:51:43 +00:00
|
|
|
else:
|
|
|
|
{.fatal: "unknown type " & name(T).}
|
2023-08-12 03:10:12 +00:00
|
|
|
static: doAssert maxSize <= GOSSIP_MAX_SIZE
|
2022-10-27 16:51:43 +00:00
|
|
|
maxSize.uint32
|
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
proc readVarint2(conn: Connection): Future[NetRes[uint64]] {.
|
|
|
|
async: (raises: [CancelledError]).} =
|
2024-01-05 09:08:38 +00:00
|
|
|
try:
|
|
|
|
ok await conn.readVarint()
|
|
|
|
except LPStreamEOFError: #, LPStreamIncompleteError, InvalidVarintError
|
|
|
|
# TODO compiler error - haha, uncaught exception
|
|
|
|
# Error: unhandled exception: closureiters.nim(322, 17) `c[i].kind == nkType` [AssertionError]
|
|
|
|
neterr UnexpectedEOF
|
|
|
|
except LPStreamIncompleteError:
|
|
|
|
neterr UnexpectedEOF
|
|
|
|
except InvalidVarintError:
|
|
|
|
neterr InvalidSizePrefix
|
2024-01-19 21:05:52 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
2024-01-24 16:23:12 +00:00
|
|
|
debug "Unexpected error", exc = exc.msg
|
2024-01-19 21:05:52 +00:00
|
|
|
neterr UnknownError
|
2024-01-05 09:08:38 +00:00
|
|
|
|
2022-05-08 07:08:13 +00:00
|
|
|
proc readChunkPayload*(conn: Connection, peer: Peer,
|
2024-01-19 21:05:52 +00:00
|
|
|
MsgType: type): Future[NetRes[MsgType]]
|
|
|
|
{.async: (raises: [CancelledError]).} =
|
2024-01-05 09:08:38 +00:00
|
|
|
let
|
|
|
|
sm = now(chronos.Moment)
|
|
|
|
size = ? await readVarint2(conn)
|
2022-05-08 07:08:13 +00:00
|
|
|
|
2022-10-27 16:51:43 +00:00
|
|
|
const maxSize = chunkMaxSize[MsgType]()
|
|
|
|
if size > maxSize:
|
2022-05-08 07:08:13 +00:00
|
|
|
return neterr SizePrefixOverflow
|
|
|
|
if size == 0:
|
|
|
|
return neterr ZeroSizePrefix
|
|
|
|
|
|
|
|
# The `size.int` conversion is safe because `size` is bounded to `MAX_CHUNK_SIZE`
|
2024-01-05 09:08:38 +00:00
|
|
|
let
|
|
|
|
dataRes = await conn.uncompressFramedStream(size.int)
|
|
|
|
data = dataRes.valueOr:
|
|
|
|
debug "Snappy decompression/read failed", msg = $dataRes.error, conn
|
|
|
|
return neterr InvalidSnappyBytes
|
|
|
|
|
|
|
|
# `10` is the maximum size of variable integer on wire, so error could
|
|
|
|
# not be significant.
|
|
|
|
peer.updateNetThroughput(now(chronos.Moment) - sm,
|
|
|
|
uint64(10 + size))
|
2024-01-19 21:05:52 +00:00
|
|
|
try:
|
|
|
|
ok SSZ.decode(data, MsgType)
|
|
|
|
except SerializationError:
|
|
|
|
neterr InvalidSszBytes
|
2022-05-08 07:08:13 +00:00
|
|
|
|
2022-10-27 16:51:43 +00:00
|
|
|
proc readResponseChunk(
|
|
|
|
conn: Connection, peer: Peer, MsgType: typedesc):
|
2024-01-19 21:05:52 +00:00
|
|
|
Future[NetRes[MsgType]] {.async: (raises: [CancelledError]).} =
|
2022-05-08 07:08:13 +00:00
|
|
|
mixin readChunkPayload
|
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
var responseCodeByte: byte
|
2022-05-08 07:08:13 +00:00
|
|
|
try:
|
2024-01-19 21:05:52 +00:00
|
|
|
await conn.readExactly(addr responseCodeByte, 1)
|
|
|
|
except LPStreamEOFError, LPStreamIncompleteError:
|
|
|
|
return neterr PotentiallyExpectedEOF
|
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
|
|
|
warn "Unexpected error", exc = exc.msg
|
|
|
|
return neterr UnknownError
|
2022-05-08 07:08:13 +00:00
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
static: assert ResponseCode.low.ord == 0
|
|
|
|
if responseCodeByte > ResponseCode.high.byte:
|
|
|
|
return neterr InvalidResponseCode
|
2022-05-08 07:08:13 +00:00
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
let responseCode = ResponseCode responseCodeByte
|
|
|
|
case responseCode:
|
|
|
|
of InvalidRequest, ServerError, ResourceUnavailable:
|
|
|
|
let
|
|
|
|
errorMsg = ? await readChunkPayload(conn, peer, ErrorMsg)
|
|
|
|
errorMsgStr = toPrettyString(errorMsg.asSeq)
|
|
|
|
debug "Error response from peer", responseCode, errMsg = errorMsgStr
|
|
|
|
return err Eth2NetworkingError(kind: ReceivedErrorResponse,
|
|
|
|
responseCode: responseCode,
|
|
|
|
errorMsg: errorMsgStr)
|
|
|
|
of Success:
|
|
|
|
discard
|
2022-05-08 07:08:13 +00:00
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
return await readChunkPayload(conn, peer, MsgType)
|
2022-05-08 07:08:13 +00:00
|
|
|
|
2022-10-27 16:51:43 +00:00
|
|
|
proc readResponse(conn: Connection, peer: Peer,
|
2024-01-19 21:05:52 +00:00
|
|
|
MsgType: type, timeout: Duration): Future[NetRes[MsgType]]
|
|
|
|
{.async: (raises: [CancelledError]).} =
|
2022-10-27 16:51:43 +00:00
|
|
|
when MsgType is List:
|
|
|
|
type E = MsgType.T
|
2022-05-08 07:08:13 +00:00
|
|
|
var results: MsgType
|
|
|
|
while true:
|
|
|
|
# Because we interleave networking with response processing, it may
|
|
|
|
# happen that reading all chunks takes longer than a strict dealine
|
|
|
|
# timeout would allow, so we allow each chunk a new timeout instead.
|
|
|
|
# The problem is exacerbated by the large number of round-trips to the
|
|
|
|
# poll loop that each future along the way causes.
|
|
|
|
trace "reading chunk", conn
|
2022-10-27 16:51:43 +00:00
|
|
|
let nextFut = conn.readResponseChunk(peer, E)
|
2022-05-08 07:08:13 +00:00
|
|
|
if not await nextFut.withTimeout(timeout):
|
|
|
|
return neterr(ReadResponseTimeout)
|
2024-01-19 21:05:52 +00:00
|
|
|
let nextRes = await nextFut
|
2022-05-08 07:08:13 +00:00
|
|
|
if nextRes.isErr:
|
|
|
|
if nextRes.error.kind == PotentiallyExpectedEOF:
|
|
|
|
trace "EOF chunk", conn, err = nextRes.error
|
|
|
|
|
|
|
|
return ok results
|
|
|
|
trace "Error chunk", conn, err = nextRes.error
|
|
|
|
|
|
|
|
return err nextRes.error
|
|
|
|
else:
|
|
|
|
trace "Got chunk", conn
|
2022-10-27 16:51:43 +00:00
|
|
|
if not results.add nextRes.value:
|
|
|
|
return neterr(ResponseChunkOverflow)
|
2022-05-08 07:08:13 +00:00
|
|
|
else:
|
2022-10-27 16:51:43 +00:00
|
|
|
let nextFut = conn.readResponseChunk(peer, MsgType)
|
2022-05-08 07:08:13 +00:00
|
|
|
if not await nextFut.withTimeout(timeout):
|
|
|
|
return neterr(ReadResponseTimeout)
|
2024-01-19 21:05:52 +00:00
|
|
|
return await nextFut # Guaranteed to complete without waiting
|
2020-05-12 22:35:40 +00:00
|
|
|
|
2024-01-13 09:54:24 +00:00
|
|
|
proc makeEth2Request(peer: Peer, protocolId: string, requestBytes: seq[byte],
|
2020-03-22 21:55:01 +00:00
|
|
|
ResponseMsg: type,
|
2020-05-12 22:37:07 +00:00
|
|
|
timeout: Duration): Future[NetRes[ResponseMsg]]
|
2024-01-19 21:05:52 +00:00
|
|
|
{.async: (raises: [CancelledError]).} =
|
|
|
|
let
|
|
|
|
deadline = sleepAsync timeout
|
|
|
|
streamRes =
|
|
|
|
awaitWithTimeout(peer.network.openStream(peer, protocolId), deadline):
|
2024-03-24 04:37:47 +00:00
|
|
|
peer.updateScore(PeerScorePoorRequest)
|
2024-01-19 21:05:52 +00:00
|
|
|
return neterr StreamOpenTimeout
|
2024-03-24 04:37:47 +00:00
|
|
|
stream = streamRes.valueOr:
|
|
|
|
if streamRes.error().kind in ProtocolViolations:
|
|
|
|
peer.updateScore(PeerScoreInvalidRequest)
|
|
|
|
else:
|
|
|
|
peer.updateScore(PeerScorePoorRequest)
|
|
|
|
return err streamRes.error()
|
2024-01-19 21:05:52 +00:00
|
|
|
|
2020-04-14 16:49:46 +00:00
|
|
|
try:
|
|
|
|
# Send the request
|
2021-08-23 10:29:50 +00:00
|
|
|
# Some clients don't want a length sent for empty requests
|
|
|
|
# So don't send anything on empty requests
|
|
|
|
if requestBytes.len > 0:
|
2024-01-13 09:54:24 +00:00
|
|
|
await stream.writeChunk(Opt.none ResponseCode, requestBytes)
|
2020-09-10 19:40:09 +00:00
|
|
|
# Half-close the stream to mark the end of the request - if this is not
|
|
|
|
# done, the other peer might never send us the response.
|
|
|
|
await stream.close()
|
2020-04-14 16:49:46 +00:00
|
|
|
|
2022-11-02 10:46:53 +00:00
|
|
|
nbc_reqresp_messages_sent.inc(1, [shortProtocolId(protocolId)])
|
|
|
|
|
2020-04-14 16:49:46 +00:00
|
|
|
# Read the response
|
2023-05-19 12:01:27 +00:00
|
|
|
let res = await readResponse(stream, peer, ResponseMsg, timeout)
|
|
|
|
if res.isErr():
|
|
|
|
if res.error().kind in ProtocolViolations:
|
|
|
|
peer.updateScore(PeerScoreInvalidRequest)
|
|
|
|
else:
|
|
|
|
peer.updateScore(PeerScorePoorRequest)
|
2024-01-19 21:05:52 +00:00
|
|
|
res
|
|
|
|
except CancelledError as exc:
|
2023-05-19 12:01:27 +00:00
|
|
|
raise exc
|
2024-01-19 21:05:52 +00:00
|
|
|
except CatchableError:
|
|
|
|
peer.updateScore(PeerScorePoorRequest)
|
|
|
|
neterr BrokenConnection
|
2020-04-14 16:49:46 +00:00
|
|
|
finally:
|
2024-01-19 21:05:52 +00:00
|
|
|
try:
|
|
|
|
await noCancel stream.closeWithEOF()
|
|
|
|
except CatchableError as exc:
|
2024-01-24 16:23:12 +00:00
|
|
|
debug "Unexpected error while closing stream",
|
2024-01-19 21:05:52 +00:00
|
|
|
peer, protocolId, exc = exc.msg
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2022-10-27 16:51:43 +00:00
|
|
|
proc init*(T: type MultipleChunksResponse, peer: Peer, conn: Connection): T =
|
2020-08-10 13:18:17 +00:00
|
|
|
T(UntypedResponse(peer: peer, stream: conn))
|
2020-05-23 22:24:47 +00:00
|
|
|
|
|
|
|
proc init*[MsgType](T: type SingleChunkResponse[MsgType],
|
2020-08-10 13:18:17 +00:00
|
|
|
peer: Peer, conn: Connection): T =
|
|
|
|
T(UntypedResponse(peer: peer, stream: conn))
|
2020-05-23 22:24:47 +00:00
|
|
|
|
2022-10-27 16:51:43 +00:00
|
|
|
template write*[M; maxLen: static Limit](
|
|
|
|
r: MultipleChunksResponse[M, maxLen], val: M,
|
2022-05-23 12:02:54 +00:00
|
|
|
contextBytes: openArray[byte] = []): untyped =
|
2021-07-07 09:09:47 +00:00
|
|
|
mixin sendResponseChunk
|
2022-05-23 12:02:54 +00:00
|
|
|
sendResponseChunk(UntypedResponse(r), val, contextBytes)
|
2020-05-23 22:24:47 +00:00
|
|
|
|
2023-01-12 17:11:38 +00:00
|
|
|
template writeSSZ*[M; maxLen: static Limit](
|
|
|
|
r: MultipleChunksResponse[M, maxLen], val: auto,
|
|
|
|
contextBytes: openArray[byte] = []): untyped =
|
|
|
|
mixin sendResponseChunk
|
|
|
|
sendResponseChunk(UntypedResponse(r), val, contextBytes)
|
|
|
|
|
2022-10-27 16:51:43 +00:00
|
|
|
template writeBytesSZ*(
|
|
|
|
r: MultipleChunksResponse, uncompressedLen: uint64,
|
2022-05-05 11:00:02 +00:00
|
|
|
bytes: openArray[byte], contextBytes: openArray[byte]): untyped =
|
|
|
|
sendResponseChunkBytesSZ(UntypedResponse(r), uncompressedLen, bytes, contextBytes)
|
2022-02-07 17:20:10 +00:00
|
|
|
|
2022-05-23 12:02:54 +00:00
|
|
|
template send*[M](
|
|
|
|
r: SingleChunkResponse[M], val: M,
|
|
|
|
contextBytes: openArray[byte] = []): untyped =
|
2021-07-07 09:09:47 +00:00
|
|
|
mixin sendResponseChunk
|
2020-05-23 22:24:47 +00:00
|
|
|
doAssert UntypedResponse(r).writtenChunks == 0
|
2022-05-23 12:02:54 +00:00
|
|
|
sendResponseChunk(UntypedResponse(r), val, contextBytes)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2023-01-12 17:11:38 +00:00
|
|
|
template sendSSZ*[M](
|
|
|
|
r: SingleChunkResponse[M], val: auto,
|
|
|
|
contextBytes: openArray[byte] = []): untyped =
|
|
|
|
mixin sendResponseChunk
|
|
|
|
doAssert UntypedResponse(r).writtenChunks == 0
|
|
|
|
sendResponseChunk(UntypedResponse(r), val, contextBytes)
|
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
proc performProtocolHandshakes(peer: Peer, incoming: bool) {.async: (raises: [CancelledError]).} =
|
2020-08-10 10:58:34 +00:00
|
|
|
# Loop down serially because it's easier to reason about the connection state
|
|
|
|
# when there are fewer async races, specially during setup
|
2024-01-13 09:54:24 +00:00
|
|
|
for protocol in peer.network.protocols:
|
2020-06-09 11:49:58 +00:00
|
|
|
if protocol.onPeerConnected != nil:
|
2020-08-10 10:58:34 +00:00
|
|
|
await protocol.onPeerConnected(peer, incoming)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
proc initProtocol(name: string,
|
|
|
|
peerInit: PeerStateInitializer,
|
2024-01-13 09:54:24 +00:00
|
|
|
networkInit: NetworkStateInitializer,
|
|
|
|
index: int): ProtocolInfoObj =
|
2021-02-22 16:17:48 +00:00
|
|
|
ProtocolInfoObj(
|
|
|
|
name: name,
|
|
|
|
messages: @[],
|
2024-01-13 09:54:24 +00:00
|
|
|
index: index,
|
2021-02-22 16:17:48 +00:00
|
|
|
peerStateInitializer: peerInit,
|
|
|
|
networkStateInitializer: networkInit)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
proc setEventHandlers(p: ProtocolInfo,
|
2020-06-09 11:49:58 +00:00
|
|
|
onPeerConnected: OnPeerConnectedHandler,
|
|
|
|
onPeerDisconnected: OnPeerDisconnectedHandler) =
|
|
|
|
p.onPeerConnected = onPeerConnected
|
|
|
|
p.onPeerDisconnected = onPeerDisconnected
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2021-07-19 12:25:11 +00:00
|
|
|
proc implementSendProcBody(sendProc: SendProc) =
|
2020-03-22 21:55:01 +00:00
|
|
|
let
|
|
|
|
msg = sendProc.msg
|
2020-05-23 22:24:47 +00:00
|
|
|
UntypedResponse = bindSym "UntypedResponse"
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
proc sendCallGenerator(peer, bytes: NimNode): NimNode =
|
|
|
|
if msg.kind != msgResponse:
|
|
|
|
let msgProto = getRequestProtoName(msg.procDef)
|
|
|
|
case msg.kind
|
|
|
|
of msgRequest:
|
2020-05-23 22:24:47 +00:00
|
|
|
let ResponseRecord = msg.response.recName
|
2020-03-22 21:55:01 +00:00
|
|
|
quote:
|
|
|
|
makeEth2Request(`peer`, `msgProto`, `bytes`,
|
2020-05-23 22:24:47 +00:00
|
|
|
`ResponseRecord`, `timeoutVar`)
|
2020-03-22 21:55:01 +00:00
|
|
|
else:
|
|
|
|
quote: sendNotificationMsg(`peer`, `msgProto`, `bytes`)
|
|
|
|
else:
|
2020-05-23 22:24:47 +00:00
|
|
|
quote: sendResponseChunkBytes(`UntypedResponse`(`peer`), `bytes`)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
sendProc.useStandardBody(nil, nil, sendCallGenerator)
|
|
|
|
|
2020-05-06 22:24:55 +00:00
|
|
|
proc handleIncomingStream(network: Eth2Node,
|
|
|
|
conn: Connection,
|
2022-11-02 10:46:53 +00:00
|
|
|
protocolId: string,
|
2024-01-19 21:05:52 +00:00
|
|
|
MsgType: type) {.async: (raises: [CancelledError]).} =
|
2020-03-22 21:55:01 +00:00
|
|
|
mixin callUserHandler, RecType
|
2020-05-12 22:35:40 +00:00
|
|
|
|
|
|
|
type MsgRec = RecType(MsgType)
|
2020-05-15 12:41:00 +00:00
|
|
|
const msgName {.used.} = typetraits.name(MsgType)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
## Uncomment this to enable tracing on all incoming requests
|
|
|
|
## You can include `msgNameLit` in the condition to select
|
|
|
|
## more specific requests:
|
|
|
|
# when chronicles.runtimeFilteringEnabled:
|
|
|
|
# setLogLevel(LogLevel.TRACE)
|
|
|
|
# defer: setLogLevel(LogLevel.DEBUG)
|
2020-03-22 23:23:21 +00:00
|
|
|
# trace "incoming " & `msgNameLit` & " conn"
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-10-09 13:44:51 +00:00
|
|
|
let peer = peerFromStream(network, conn)
|
2020-03-22 21:55:01 +00:00
|
|
|
try:
|
2020-11-26 19:23:45 +00:00
|
|
|
case peer.connectionState
|
|
|
|
of Disconnecting, Disconnected, None:
|
|
|
|
# We got incoming stream request while disconnected or disconnecting.
|
2020-11-29 13:43:41 +00:00
|
|
|
debug "Got incoming request from disconnected peer", peer = peer,
|
2020-11-26 19:23:45 +00:00
|
|
|
message = msgName
|
|
|
|
return
|
|
|
|
of Connecting:
|
|
|
|
# We got incoming stream request while handshake is not yet finished,
|
|
|
|
# TODO: We could check it here.
|
|
|
|
debug "Got incoming request from peer while in handshake", peer = peer,
|
|
|
|
msgName
|
|
|
|
of Connected:
|
|
|
|
# We got incoming stream from peer with proper connection state.
|
|
|
|
debug "Got incoming request from peer", peer = peer, msgName
|
|
|
|
|
2020-05-21 13:21:29 +00:00
|
|
|
template returnInvalidRequest(msg: ErrorMsg) =
|
2020-10-09 13:44:51 +00:00
|
|
|
peer.updateScore(PeerScoreInvalidRequest)
|
2020-08-10 13:18:17 +00:00
|
|
|
await sendErrorResponse(peer, conn, InvalidRequest, msg)
|
2020-05-12 22:37:07 +00:00
|
|
|
return
|
2020-05-06 22:24:55 +00:00
|
|
|
|
2020-05-21 13:21:29 +00:00
|
|
|
template returnInvalidRequest(msg: string) =
|
|
|
|
returnInvalidRequest(ErrorMsg msg.toBytes)
|
|
|
|
|
2022-03-09 14:03:58 +00:00
|
|
|
template returnResourceUnavailable(msg: ErrorMsg) =
|
|
|
|
await sendErrorResponse(peer, conn, ResourceUnavailable, msg)
|
|
|
|
return
|
|
|
|
|
|
|
|
template returnResourceUnavailable(msg: string) =
|
|
|
|
returnResourceUnavailable(ErrorMsg msg.toBytes)
|
|
|
|
|
2022-11-02 10:46:53 +00:00
|
|
|
nbc_reqresp_messages_received.inc(1, [shortProtocolId(protocolId)])
|
|
|
|
|
2022-03-04 08:52:49 +00:00
|
|
|
const isEmptyMsg = when MsgRec is object:
|
|
|
|
# We need nested `when` statements here, because Nim doesn't properly
|
|
|
|
# apply boolean short-circuit logic at compile time and this causes
|
|
|
|
# `totalSerializedFields` to be applied to non-object types that it
|
|
|
|
# doesn't know how to support.
|
|
|
|
when totalSerializedFields(MsgRec) == 0: true
|
|
|
|
else: false
|
|
|
|
else:
|
|
|
|
false
|
|
|
|
|
2023-06-08 14:20:41 +00:00
|
|
|
let msg =
|
2020-05-15 17:56:34 +00:00
|
|
|
try:
|
2023-06-08 14:20:41 +00:00
|
|
|
when isEmptyMsg:
|
|
|
|
NetRes[MsgRec].ok default(MsgRec)
|
|
|
|
else:
|
2023-12-06 16:23:45 +00:00
|
|
|
# TODO(zah) The TTFB timeout is not implemented in LibP2P streams
|
|
|
|
# back-end
|
|
|
|
let deadline = sleepAsync RESP_TIMEOUT_DUR
|
|
|
|
|
2023-06-08 14:20:41 +00:00
|
|
|
awaitWithTimeout(
|
|
|
|
readChunkPayload(conn, peer, MsgRec), deadline):
|
|
|
|
# Timeout, e.g., cancellation due to fulfillment by different peer.
|
|
|
|
# Treat this similarly to `UnexpectedEOF`, `PotentiallyExpectedEOF`.
|
|
|
|
nbc_reqresp_messages_failed.inc(1, [shortProtocolId(protocolId)])
|
|
|
|
await sendErrorResponse(
|
|
|
|
peer, conn, InvalidRequest,
|
|
|
|
errorMsgLit "Request full data not sent in time")
|
|
|
|
return
|
2020-05-06 22:24:55 +00:00
|
|
|
|
2023-06-08 14:20:41 +00:00
|
|
|
finally:
|
|
|
|
# The request quota is shared between all requests - it represents the
|
|
|
|
# cost to perform a service on behalf of a client and is incurred
|
|
|
|
# regardless if the request succeeds or fails - we don't count waiting
|
|
|
|
# for this quota against timeouts so as not to prematurely disconnect
|
|
|
|
# clients that are on the edge - nonetheless, the client will count it.
|
|
|
|
|
|
|
|
# When a client exceeds their quota, they will be slowed down without
|
|
|
|
# notification - as long as they don't make parallel requests (which is
|
|
|
|
# limited by libp2p), this will naturally adapt them to the available
|
|
|
|
# quota.
|
|
|
|
|
|
|
|
# Note that the `msg` will be stored in memory while we wait for the
|
|
|
|
# quota to be available. The amount of such messages in memory is
|
|
|
|
# bounded by the libp2p limit of parallel streams
|
|
|
|
|
|
|
|
# This quota also applies to invalid requests thanks to the use of
|
|
|
|
# `finally`.
|
|
|
|
|
|
|
|
awaitQuota(peer, libp2pRequestCost, shortProtocolId(protocolId))
|
2020-05-12 22:35:40 +00:00
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
if msg.isErr:
|
2023-05-19 12:01:27 +00:00
|
|
|
if msg.error.kind in ProtocolViolations:
|
|
|
|
peer.updateScore(PeerScoreInvalidRequest)
|
|
|
|
else:
|
|
|
|
peer.updateScore(PeerScorePoorRequest)
|
|
|
|
|
2022-11-02 10:46:53 +00:00
|
|
|
nbc_reqresp_messages_failed.inc(1, [shortProtocolId(protocolId)])
|
2020-05-12 22:37:07 +00:00
|
|
|
let (responseCode, errMsg) = case msg.error.kind
|
|
|
|
of UnexpectedEOF, PotentiallyExpectedEOF:
|
2022-11-02 10:46:53 +00:00
|
|
|
nbc_reqresp_messages_failed.inc(1, [shortProtocolId(protocolId)])
|
2020-05-21 13:21:29 +00:00
|
|
|
(InvalidRequest, errorMsgLit "Incomplete request")
|
2020-05-12 22:35:40 +00:00
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
of InvalidContextBytes:
|
|
|
|
(ServerError, errorMsgLit "Unrecognized context bytes")
|
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
of InvalidSnappyBytes:
|
2020-05-21 13:21:29 +00:00
|
|
|
(InvalidRequest, errorMsgLit "Failed to decompress snappy payload")
|
2020-05-12 22:35:40 +00:00
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
of InvalidSszBytes:
|
2020-05-21 13:21:29 +00:00
|
|
|
(InvalidRequest, errorMsgLit "Failed to decode SSZ payload")
|
2020-05-12 22:37:07 +00:00
|
|
|
|
2023-05-19 12:01:27 +00:00
|
|
|
of InvalidSizePrefix:
|
|
|
|
(InvalidRequest, errorMsgLit "Invalid chunk size prefix")
|
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
of ZeroSizePrefix:
|
2020-05-21 13:21:29 +00:00
|
|
|
(InvalidRequest, errorMsgLit "The request chunk cannot have a size of zero")
|
2020-05-12 22:37:07 +00:00
|
|
|
|
|
|
|
of SizePrefixOverflow:
|
2020-05-21 13:21:29 +00:00
|
|
|
(InvalidRequest, errorMsgLit "The chunk size exceed the maximum allowed")
|
2020-05-12 22:37:07 +00:00
|
|
|
|
|
|
|
of InvalidResponseCode, ReceivedErrorResponse,
|
|
|
|
StreamOpenTimeout, ReadResponseTimeout:
|
|
|
|
# These shouldn't be possible in a request, because
|
|
|
|
# there are no response codes being read, no stream
|
|
|
|
# openings and no reading of responses:
|
2020-05-21 13:21:29 +00:00
|
|
|
(ServerError, errorMsgLit "Internal server error")
|
2020-05-12 22:37:07 +00:00
|
|
|
|
|
|
|
of BrokenConnection:
|
|
|
|
return
|
|
|
|
|
2022-10-27 16:51:43 +00:00
|
|
|
of ResponseChunkOverflow:
|
|
|
|
(InvalidRequest, errorMsgLit "Too many chunks in response")
|
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
of UnknownError:
|
|
|
|
(InvalidRequest, errorMsgLit "Unknown error while processing request")
|
|
|
|
|
2020-08-10 13:18:17 +00:00
|
|
|
await sendErrorResponse(peer, conn, responseCode, errMsg)
|
2020-05-12 22:37:07 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
2024-01-13 09:54:24 +00:00
|
|
|
# logReceivedMsg(peer, MsgType(msg.get))
|
2020-08-10 13:18:17 +00:00
|
|
|
await callUserHandler(MsgType, peer, conn, msg.get)
|
2024-01-19 21:05:52 +00:00
|
|
|
except InvalidInputsError as exc:
|
2022-11-02 10:46:53 +00:00
|
|
|
nbc_reqresp_messages_failed.inc(1, [shortProtocolId(protocolId)])
|
2024-01-19 21:05:52 +00:00
|
|
|
returnInvalidRequest exc.msg
|
|
|
|
except ResourceUnavailableError as exc:
|
|
|
|
returnResourceUnavailable exc.msg
|
|
|
|
except CatchableError as exc:
|
2022-11-02 10:46:53 +00:00
|
|
|
nbc_reqresp_messages_failed.inc(1, [shortProtocolId(protocolId)])
|
2024-01-19 21:05:52 +00:00
|
|
|
await sendErrorResponse(peer, conn, ServerError, ErrorMsg exc.msg.toBytes)
|
2020-05-06 22:24:55 +00:00
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
except CatchableError as exc:
|
2022-11-02 10:46:53 +00:00
|
|
|
nbc_reqresp_messages_failed.inc(1, [shortProtocolId(protocolId)])
|
2024-01-19 21:05:52 +00:00
|
|
|
debug "Error processing an incoming request", exc = exc.msg, msgName
|
2020-05-06 22:24:55 +00:00
|
|
|
|
2020-04-14 16:49:46 +00:00
|
|
|
finally:
|
2024-01-19 21:05:52 +00:00
|
|
|
try:
|
|
|
|
await noCancel conn.closeWithEOF()
|
|
|
|
except CatchableError as exc:
|
2024-01-24 16:23:12 +00:00
|
|
|
debug "Unexpected error while closing incoming connection", exc = exc.msg
|
2023-11-08 13:42:50 +00:00
|
|
|
releasePeer(peer)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-11-26 19:23:45 +00:00
|
|
|
proc toPeerAddr*(r: enr.TypedRecord,
|
2021-08-18 12:30:05 +00:00
|
|
|
proto: IpTransportProtocol): Result[PeerAddr, cstring] =
|
2020-08-02 19:27:36 +00:00
|
|
|
if not r.secp256k1.isSome:
|
|
|
|
return err("enr: no secp256k1 key in record")
|
|
|
|
|
|
|
|
let
|
|
|
|
pubKey = ? keys.PublicKey.fromRaw(r.secp256k1.get)
|
2022-04-08 16:22:49 +00:00
|
|
|
peerId = ? PeerId.init(crypto.PublicKey(
|
2020-08-02 19:27:36 +00:00
|
|
|
scheme: Secp256k1, skkey: secp.SkPublicKey(pubKey)))
|
|
|
|
|
|
|
|
var addrs = newSeq[MultiAddress]()
|
|
|
|
|
2020-11-26 19:23:45 +00:00
|
|
|
case proto
|
|
|
|
of tcpProtocol:
|
|
|
|
if r.ip.isSome and r.tcp.isSome:
|
2024-02-21 19:06:19 +00:00
|
|
|
let ip = IpAddress(
|
|
|
|
family: IpAddressFamily.IPv4,
|
|
|
|
address_v4: r.ip.get)
|
2020-11-26 08:05:23 +00:00
|
|
|
addrs.add MultiAddress.init(ip, tcpProtocol, Port r.tcp.get)
|
2020-11-26 19:23:45 +00:00
|
|
|
|
|
|
|
if r.ip6.isSome:
|
2024-02-21 19:06:19 +00:00
|
|
|
let ip = IpAddress(
|
|
|
|
family: IpAddressFamily.IPv6,
|
|
|
|
address_v6: r.ip6.get)
|
2020-11-26 19:23:45 +00:00
|
|
|
if r.tcp6.isSome:
|
|
|
|
addrs.add MultiAddress.init(ip, tcpProtocol, Port r.tcp6.get)
|
|
|
|
elif r.tcp.isSome:
|
|
|
|
addrs.add MultiAddress.init(ip, tcpProtocol, Port r.tcp.get)
|
|
|
|
else:
|
|
|
|
discard
|
|
|
|
|
|
|
|
of udpProtocol:
|
|
|
|
if r.ip.isSome and r.udp.isSome:
|
2024-02-21 19:06:19 +00:00
|
|
|
let ip = IpAddress(
|
|
|
|
family: IpAddressFamily.IPv4,
|
|
|
|
address_v4: r.ip.get)
|
2020-11-26 19:23:45 +00:00
|
|
|
addrs.add MultiAddress.init(ip, udpProtocol, Port r.udp.get)
|
|
|
|
|
|
|
|
if r.ip6.isSome:
|
2024-02-21 19:06:19 +00:00
|
|
|
let ip = IpAddress(
|
|
|
|
family: IpAddressFamily.IPv6,
|
|
|
|
address_v6: r.ip6.get)
|
2020-11-26 19:23:45 +00:00
|
|
|
if r.udp6.isSome:
|
|
|
|
addrs.add MultiAddress.init(ip, udpProtocol, Port r.udp6.get)
|
|
|
|
elif r.udp.isSome:
|
|
|
|
addrs.add MultiAddress.init(ip, udpProtocol, Port r.udp.get)
|
|
|
|
else:
|
|
|
|
discard
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-08-02 19:27:36 +00:00
|
|
|
if addrs.len == 0:
|
|
|
|
return err("enr: no addresses in record")
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-08-02 19:27:36 +00:00
|
|
|
ok(PeerAddr(peerId: peerId, addrs: addrs))
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-09-21 16:02:27 +00:00
|
|
|
proc checkPeer(node: Eth2Node, peerAddr: PeerAddr): bool =
|
2020-08-02 19:27:36 +00:00
|
|
|
logScope: peer = peerAddr.peerId
|
2020-09-21 16:02:27 +00:00
|
|
|
let peerId = peerAddr.peerId
|
|
|
|
if node.peerPool.hasPeer(peerId):
|
|
|
|
trace "Already connected"
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
if node.isSeen(peerId):
|
|
|
|
trace "Recently connected"
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
true
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
proc dialPeer(node: Eth2Node, peerAddr: PeerAddr, index = 0) {.async: (raises: [CancelledError]).} =
|
2020-09-21 16:02:27 +00:00
|
|
|
## Establish connection with remote peer identified by address ``peerAddr``.
|
|
|
|
logScope:
|
|
|
|
peer = peerAddr.peerId
|
|
|
|
index = index
|
2020-06-03 08:46:29 +00:00
|
|
|
|
2020-09-21 16:02:27 +00:00
|
|
|
if not(node.checkPeer(peerAddr)):
|
|
|
|
return
|
2020-07-23 20:51:56 +00:00
|
|
|
|
2020-09-21 16:02:27 +00:00
|
|
|
debug "Connecting to discovered peer"
|
|
|
|
var deadline = sleepAsync(node.connectTimeout)
|
2022-03-11 10:51:53 +00:00
|
|
|
var workfut = node.switch.connect(
|
|
|
|
peerAddr.peerId,
|
|
|
|
peerAddr.addrs,
|
|
|
|
forceDial = true
|
|
|
|
)
|
2020-06-03 08:46:29 +00:00
|
|
|
|
2020-09-21 16:02:27 +00:00
|
|
|
try:
|
|
|
|
# `or` operation will only raise exception of `workfut`, because `deadline`
|
|
|
|
# could not raise exception.
|
|
|
|
await workfut or deadline
|
|
|
|
if workfut.finished():
|
|
|
|
if not deadline.finished():
|
2023-09-22 11:06:27 +00:00
|
|
|
deadline.cancelSoon()
|
2020-09-21 16:02:27 +00:00
|
|
|
inc nbc_successful_dials
|
2020-06-03 08:46:29 +00:00
|
|
|
else:
|
2020-09-21 16:02:27 +00:00
|
|
|
debug "Connection to remote peer timed out"
|
|
|
|
inc nbc_timeout_dials
|
|
|
|
node.addSeen(peerAddr.peerId, SeenTableTimeTimeout)
|
2020-11-17 18:03:29 +00:00
|
|
|
await cancelAndWait(workfut)
|
2020-09-21 16:02:27 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
debug "Connection to remote peer failed", msg = exc.msg
|
|
|
|
inc nbc_failed_dials
|
|
|
|
node.addSeen(peerAddr.peerId, SeenTableTimeDeadPeer)
|
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
proc connectWorker(node: Eth2Node, index: int) {.async: (raises: [CancelledError]).} =
|
2020-09-21 16:02:27 +00:00
|
|
|
debug "Connection worker started", index = index
|
|
|
|
while true:
|
|
|
|
# This loop will never produce HIGH CPU usage because it will wait
|
|
|
|
# and block until it not obtains new peer from the queue ``connQueue``.
|
|
|
|
let remotePeerAddr = await node.connQueue.popFirst()
|
2021-03-22 09:17:14 +00:00
|
|
|
# Previous worker dial might have hit the maximum peers.
|
|
|
|
# TODO: could clear the whole connTable and connQueue here also, best
|
|
|
|
# would be to have this event based coming from peer pool or libp2p.
|
2022-03-11 10:51:53 +00:00
|
|
|
|
|
|
|
if node.peerPool.len < node.hardMaxPeers:
|
2021-03-22 09:17:14 +00:00
|
|
|
await node.dialPeer(remotePeerAddr, index)
|
2020-09-21 16:02:27 +00:00
|
|
|
# Peer was added to `connTable` before adding it to `connQueue`, so we
|
|
|
|
# excluding peer here after processing.
|
|
|
|
node.connTable.excl(remotePeerAddr.peerId)
|
|
|
|
|
2021-08-18 12:30:05 +00:00
|
|
|
proc toPeerAddr(node: Node): Result[PeerAddr, cstring] =
|
2020-09-21 16:02:27 +00:00
|
|
|
let nodeRecord = ? node.record.toTypedRecord()
|
2020-11-26 19:23:45 +00:00
|
|
|
let peerAddr = ? nodeRecord.toPeerAddr(tcpProtocol)
|
2020-09-21 16:02:27 +00:00
|
|
|
ok(peerAddr)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2022-03-11 10:51:53 +00:00
|
|
|
proc trimConnections(node: Eth2Node, count: int) =
|
2021-08-23 10:29:50 +00:00
|
|
|
# Kill `count` peers, scoring them to remove the least useful ones
|
|
|
|
|
2022-04-08 16:22:49 +00:00
|
|
|
var scores = initOrderedTable[PeerId, int]()
|
2021-08-23 10:29:50 +00:00
|
|
|
|
|
|
|
# Take into account the stabilitySubnets
|
|
|
|
# During sync, only this will be used to score peers
|
2022-03-11 10:51:53 +00:00
|
|
|
# since gossipsub is not running yet
|
2021-08-23 10:29:50 +00:00
|
|
|
#
|
|
|
|
# A peer subscribed to all stabilitySubnets will
|
|
|
|
# have 640 points
|
2022-03-11 10:51:53 +00:00
|
|
|
var peersInGracePeriod = 0
|
2021-08-23 10:29:50 +00:00
|
|
|
for peer in node.peers.values:
|
|
|
|
if peer.connectionState != Connected: continue
|
2022-03-11 10:51:53 +00:00
|
|
|
|
|
|
|
# Metadata pinger is used as grace period
|
|
|
|
if peer.metadata.isNone:
|
|
|
|
peersInGracePeriod.inc()
|
|
|
|
continue
|
2021-08-23 10:29:50 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
stabilitySubnets = peer.metadata.get().attnets
|
|
|
|
stabilitySubnetsCount = stabilitySubnets.countOnes()
|
|
|
|
thisPeersScore = 10 * stabilitySubnetsCount
|
|
|
|
|
2021-10-21 11:01:29 +00:00
|
|
|
scores[peer.peerId] = thisPeersScore
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-03-11 10:51:53 +00:00
|
|
|
|
|
|
|
# Safegard: if we have too many peers in the grace
|
|
|
|
# period, don't kick anyone. Otherwise, they will be
|
|
|
|
# preferred over long-standing peers
|
|
|
|
if peersInGracePeriod > scores.len div 2:
|
|
|
|
return
|
|
|
|
|
2021-08-23 10:29:50 +00:00
|
|
|
# Split a 1000 points for each topic's peers
|
2022-03-11 10:51:53 +00:00
|
|
|
# + 5 000 points for each subbed topic
|
2021-08-23 10:29:50 +00:00
|
|
|
# This gives priority to peers in topics with few peers
|
|
|
|
# For instance, a topic with `dHigh` peers will give 80 points to each peer
|
|
|
|
# Whereas a topic with `dLow` peers will give 250 points to each peer
|
2022-03-11 10:51:53 +00:00
|
|
|
#
|
|
|
|
# Then, use the average of all topics per peers, to avoid giving too much
|
|
|
|
# point to big peers
|
|
|
|
|
2022-04-08 16:22:49 +00:00
|
|
|
var gossipScores = initTable[PeerId, tuple[sum: int, count: int]]()
|
2022-02-01 17:20:55 +00:00
|
|
|
for topic, _ in node.pubsub.gossipsub:
|
2021-08-23 10:29:50 +00:00
|
|
|
let
|
2022-02-01 17:20:55 +00:00
|
|
|
peersInMesh = node.pubsub.mesh.peers(topic)
|
|
|
|
peersSubbed = node.pubsub.gossipsub.peers(topic)
|
2022-03-11 10:51:53 +00:00
|
|
|
scorePerMeshPeer = 5_000 div max(peersInMesh, 1)
|
2022-02-01 17:20:55 +00:00
|
|
|
scorePerSubbedPeer = 1_000 div max(peersSubbed, 1)
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-03-11 10:51:53 +00:00
|
|
|
for peer in node.pubsub.gossipsub.getOrDefault(topic):
|
2021-08-23 10:29:50 +00:00
|
|
|
if peer.peerId notin scores: continue
|
2022-03-11 10:51:53 +00:00
|
|
|
let currentVal = gossipScores.getOrDefault(peer.peerId)
|
|
|
|
gossipScores[peer.peerId] = (
|
|
|
|
currentVal.sum + scorePerSubbedPeer,
|
|
|
|
currentVal.count + 1
|
|
|
|
)
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-03-11 10:51:53 +00:00
|
|
|
# Avoid global topics (>75% of peers), which would greatly reduce
|
|
|
|
# the average score for small peers
|
|
|
|
if peersSubbed > scores.len div 4 * 3: continue
|
|
|
|
|
|
|
|
for peer in node.pubsub.mesh.getOrDefault(topic):
|
2022-02-01 17:20:55 +00:00
|
|
|
if peer.peerId notin scores: continue
|
2022-03-11 10:51:53 +00:00
|
|
|
let currentVal = gossipScores.getOrDefault(peer.peerId)
|
|
|
|
gossipScores[peer.peerId] = (
|
|
|
|
currentVal.sum + scorePerMeshPeer,
|
|
|
|
currentVal.count + 1
|
|
|
|
)
|
|
|
|
|
2022-05-10 10:03:40 +00:00
|
|
|
for peerId, gScore in gossipScores:
|
2022-03-11 10:51:53 +00:00
|
|
|
scores[peerId] =
|
|
|
|
scores.getOrDefault(peerId) + (gScore.sum div gScore.count)
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-04-08 16:22:49 +00:00
|
|
|
proc sortPerScore(a, b: (PeerId, int)): int =
|
2021-08-23 10:29:50 +00:00
|
|
|
system.cmp(a[1], b[1])
|
|
|
|
|
|
|
|
scores.sort(sortPerScore)
|
|
|
|
|
|
|
|
var toKick = count
|
|
|
|
|
|
|
|
for peerId in scores.keys:
|
2023-09-15 18:45:55 +00:00
|
|
|
if peerId in node.directPeers: continue
|
2021-09-28 07:58:03 +00:00
|
|
|
debug "kicking peer", peerId, score=scores[peerId]
|
2022-03-11 10:51:53 +00:00
|
|
|
asyncSpawn node.getPeer(peerId).disconnect(PeerScoreLow)
|
2021-08-23 10:29:50 +00:00
|
|
|
dec toKick
|
2021-09-28 07:58:03 +00:00
|
|
|
inc(nbc_cycling_kicked_peers)
|
2021-08-23 10:29:50 +00:00
|
|
|
if toKick <= 0: return
|
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
proc getLowSubnets(node: Eth2Node, epoch: Epoch): (AttnetBits, SyncnetBits) =
|
2021-08-23 10:29:50 +00:00
|
|
|
# Returns the subnets required to have a healthy mesh
|
|
|
|
# The subnets are computed, to, in order:
|
2022-01-04 13:37:04 +00:00
|
|
|
# - Have 0 subnet with < `dLow` peers from topic subscription
|
2021-08-23 10:29:50 +00:00
|
|
|
# - Have 0 subscribed subnet below `dLow`
|
|
|
|
# - Have 0 subscribed subnet below `dOut` outgoing peers
|
2022-01-04 13:37:04 +00:00
|
|
|
# - Have 0 subnet with < `dHigh` peers from topic subscription
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-02-01 17:20:55 +00:00
|
|
|
nbc_gossipsub_low_fanout.set(0)
|
|
|
|
nbc_gossipsub_good_fanout.set(0)
|
|
|
|
nbc_gossipsub_healthy_fanout.set(0)
|
|
|
|
|
2021-09-21 22:25:49 +00:00
|
|
|
template findLowSubnets(topicNameGenerator: untyped,
|
|
|
|
SubnetIdType: type,
|
|
|
|
totalSubnets: static int): auto =
|
|
|
|
var
|
|
|
|
lowOutgoingSubnets: BitArray[totalSubnets]
|
2022-01-04 13:37:04 +00:00
|
|
|
notHighOutgoingSubnets: BitArray[totalSubnets]
|
|
|
|
belowDSubnets: BitArray[totalSubnets]
|
2021-09-21 22:25:49 +00:00
|
|
|
belowDOutSubnets: BitArray[totalSubnets]
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2021-09-21 22:25:49 +00:00
|
|
|
for subNetId in 0 ..< totalSubnets:
|
|
|
|
let topic =
|
2022-04-08 16:22:49 +00:00
|
|
|
topicNameGenerator(node.forkId.fork_digest, SubnetIdType(subNetId))
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-01-04 13:37:04 +00:00
|
|
|
if node.pubsub.gossipsub.peers(topic) < node.pubsub.parameters.dLow:
|
2021-09-21 22:25:49 +00:00
|
|
|
lowOutgoingSubnets.setBit(subNetId)
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-01-04 13:37:04 +00:00
|
|
|
if node.pubsub.gossipsub.peers(topic) < node.pubsub.parameters.dHigh:
|
|
|
|
notHighOutgoingSubnets.setBit(subNetId)
|
|
|
|
|
2021-09-21 22:25:49 +00:00
|
|
|
# Not subscribed
|
|
|
|
if topic notin node.pubsub.mesh: continue
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2021-09-21 22:25:49 +00:00
|
|
|
if node.pubsub.mesh.peers(topic) < node.pubsub.parameters.dLow:
|
2022-01-04 13:37:04 +00:00
|
|
|
belowDSubnets.setBit(subNetId)
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
let outPeers = node.pubsub.mesh.getOrDefault(topic).countIt(it.outbound)
|
|
|
|
if outPeers < node.pubsub.parameters.dOut:
|
2021-09-21 22:25:49 +00:00
|
|
|
belowDOutSubnets.setBit(subNetId)
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-02-01 17:20:55 +00:00
|
|
|
nbc_gossipsub_low_fanout.inc(int64(lowOutgoingSubnets.countOnes()))
|
|
|
|
nbc_gossipsub_good_fanout.inc(int64(
|
|
|
|
notHighOutgoingSubnets.countOnes() -
|
|
|
|
lowOutgoingSubnets.countOnes()
|
|
|
|
))
|
|
|
|
nbc_gossipsub_healthy_fanout.inc(int64(
|
|
|
|
totalSubnets - notHighOutgoingSubnets.countOnes()))
|
|
|
|
|
2022-01-04 13:37:04 +00:00
|
|
|
if lowOutgoingSubnets.countOnes() > 0:
|
2021-09-21 22:25:49 +00:00
|
|
|
lowOutgoingSubnets
|
2022-01-04 13:37:04 +00:00
|
|
|
elif belowDSubnets.countOnes() > 0:
|
|
|
|
belowDSubnets
|
|
|
|
elif belowDOutSubnets.countOnes() > 0:
|
2021-09-21 22:25:49 +00:00
|
|
|
belowDOutSubnets
|
2022-01-04 13:37:04 +00:00
|
|
|
else:
|
|
|
|
notHighOutgoingSubnets
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2021-09-21 22:25:49 +00:00
|
|
|
return (
|
2022-09-19 09:07:46 +00:00
|
|
|
findLowSubnets(getAttestationTopic, SubnetId, ATTESTATION_SUBNET_COUNT.int),
|
2021-10-04 13:08:47 +00:00
|
|
|
# We start looking one epoch before the transition in order to allow
|
|
|
|
# some time for the gossip meshes to get healthy:
|
|
|
|
if epoch + 1 >= node.cfg.ALTAIR_FORK_EPOCH:
|
2021-10-20 16:32:46 +00:00
|
|
|
findLowSubnets(getSyncCommitteeTopic, SyncSubcommitteeIndex, SYNC_COMMITTEE_SUBNET_COUNT)
|
2021-10-04 13:08:47 +00:00
|
|
|
else:
|
2021-10-21 13:09:19 +00:00
|
|
|
default(SyncnetBits)
|
2021-09-21 22:25:49 +00:00
|
|
|
)
|
2021-03-24 10:48:53 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc runDiscoveryLoop(node: Eth2Node) {.async.} =
|
2020-03-22 21:55:01 +00:00
|
|
|
debug "Starting discovery loop"
|
2020-09-21 16:02:27 +00:00
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
while true:
|
2021-08-23 10:29:50 +00:00
|
|
|
let
|
2021-10-04 13:08:47 +00:00
|
|
|
currentEpoch = node.getBeaconTime().slotOrZero.epoch
|
|
|
|
(wantedAttnets, wantedSyncnets) = node.getLowSubnets(currentEpoch)
|
2021-08-23 10:29:50 +00:00
|
|
|
wantedAttnetsCount = wantedAttnets.countOnes()
|
2021-09-21 22:25:49 +00:00
|
|
|
wantedSyncnetsCount = wantedSyncnets.countOnes()
|
2022-03-11 10:51:53 +00:00
|
|
|
outgoingPeers = node.peerPool.lenCurrent({PeerType.Outgoing})
|
|
|
|
targetOutgoingPeers = max(node.wantedPeers div 10, 3)
|
|
|
|
|
|
|
|
if wantedAttnetsCount > 0 or wantedSyncnetsCount > 0 or
|
|
|
|
outgoingPeers < targetOutgoingPeers:
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-03-11 10:51:53 +00:00
|
|
|
let
|
|
|
|
minScore =
|
|
|
|
if wantedAttnetsCount > 0 or wantedSyncnetsCount > 0:
|
|
|
|
1
|
|
|
|
else:
|
|
|
|
0
|
|
|
|
discoveredNodes = await node.discovery.queryRandom(
|
|
|
|
node.discoveryForkId, wantedAttnets, wantedSyncnets, minScore)
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2021-09-28 07:58:03 +00:00
|
|
|
let newPeers = block:
|
|
|
|
var np = newSeq[PeerAddr]()
|
|
|
|
for discNode in discoveredNodes:
|
|
|
|
let res = discNode.toPeerAddr()
|
|
|
|
if res.isErr():
|
|
|
|
debug "Failed to decode discovery's node address",
|
2022-04-08 16:22:49 +00:00
|
|
|
node = discNode, errMsg = res.error
|
2021-09-28 07:58:03 +00:00
|
|
|
continue
|
|
|
|
|
2021-01-14 07:58:13 +00:00
|
|
|
let peerAddr = res.get()
|
2021-09-28 07:58:03 +00:00
|
|
|
if node.checkPeer(peerAddr) and
|
|
|
|
peerAddr.peerId notin node.connTable:
|
|
|
|
np.add(peerAddr)
|
|
|
|
np
|
|
|
|
|
|
|
|
let
|
2022-03-11 10:51:53 +00:00
|
|
|
roomCurrent = node.hardMaxPeers - len(node.peerPool)
|
|
|
|
peersToKick = min(newPeers.len - roomCurrent, node.hardMaxPeers div 5)
|
2021-09-28 07:58:03 +00:00
|
|
|
|
2022-03-11 10:51:53 +00:00
|
|
|
if peersToKick > 0 and newPeers.len > 0:
|
|
|
|
node.trimConnections(peersToKick)
|
2021-09-28 07:58:03 +00:00
|
|
|
|
|
|
|
for peerAddr in newPeers:
|
|
|
|
# We adding to pending connections table here, but going
|
|
|
|
# to remove it only in `connectWorker`.
|
|
|
|
node.connTable.incl(peerAddr.peerId)
|
|
|
|
await node.connQueue.addLast(peerAddr)
|
2021-01-14 07:58:13 +00:00
|
|
|
|
2022-03-11 10:51:53 +00:00
|
|
|
debug "Discovery tick",
|
|
|
|
wanted_peers = node.wantedPeers,
|
|
|
|
current_peers = len(node.peerPool),
|
2021-01-14 07:58:13 +00:00
|
|
|
discovered_nodes = len(discoveredNodes),
|
2021-09-28 07:58:03 +00:00
|
|
|
new_peers = len(newPeers)
|
2021-01-14 07:58:13 +00:00
|
|
|
|
2021-09-28 07:58:03 +00:00
|
|
|
if len(newPeers) == 0:
|
2022-03-11 10:51:53 +00:00
|
|
|
let currentPeers = len(node.peerPool)
|
2021-01-14 07:58:13 +00:00
|
|
|
if currentPeers <= node.wantedPeers shr 2: # 25%
|
|
|
|
warn "Peer count low, no new peers discovered",
|
|
|
|
discovered_nodes = len(discoveredNodes), new_peers = newPeers,
|
|
|
|
current_peers = currentPeers, wanted_peers = node.wantedPeers
|
2021-01-18 13:13:26 +00:00
|
|
|
|
|
|
|
# Discovery `queryRandom` can have a synchronous fast path for example
|
|
|
|
# when no peers are in the routing table. Don't run it in continuous loop.
|
2021-08-23 10:29:50 +00:00
|
|
|
#
|
|
|
|
# Also, give some time to dial the discovered nodes and update stats etc
|
|
|
|
await sleepAsync(5.seconds)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-11-26 19:23:45 +00:00
|
|
|
proc resolvePeer(peer: Peer) =
|
|
|
|
# Resolve task which performs searching of peer's public key and recovery of
|
|
|
|
# ENR using discovery5. We only resolve ENR for peers we know about to avoid
|
|
|
|
# querying the network - as of now, the ENR is not needed, except for
|
|
|
|
# debuggging
|
2021-10-21 11:01:29 +00:00
|
|
|
logScope: peer = peer.peerId
|
2020-11-26 19:23:45 +00:00
|
|
|
let startTime = now(chronos.Moment)
|
|
|
|
let nodeId =
|
|
|
|
block:
|
|
|
|
var key: PublicKey
|
2022-04-08 16:22:49 +00:00
|
|
|
# `secp256k1` keys are always stored inside PeerId.
|
2021-10-21 11:01:29 +00:00
|
|
|
discard peer.peerId.extractPublicKey(key)
|
2020-11-26 19:23:45 +00:00
|
|
|
keys.PublicKey.fromRaw(key.skkey.getBytes()).get().toNodeId()
|
|
|
|
|
|
|
|
debug "Peer's ENR recovery task started", node_id = $nodeId
|
|
|
|
|
|
|
|
# This is "fast-path" for peers which was dialed. In this case discovery
|
|
|
|
# already has most recent ENR information about this peer.
|
|
|
|
let gnode = peer.network.discovery.getNode(nodeId)
|
|
|
|
if gnode.isSome():
|
2024-01-13 09:54:24 +00:00
|
|
|
peer.enr = Opt.some(gnode.get().record)
|
2020-11-26 19:23:45 +00:00
|
|
|
inc(nbc_successful_discoveries)
|
|
|
|
let delay = now(chronos.Moment) - startTime
|
|
|
|
nbc_resolve_time.observe(delay.toFloatSeconds())
|
2021-01-21 17:42:57 +00:00
|
|
|
debug "Peer's ENR recovered", delay
|
2020-11-26 19:23:45 +00:00
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
proc handlePeer*(peer: Peer) {.async: (raises: [CancelledError]).} =
|
2020-11-26 19:23:45 +00:00
|
|
|
let res = peer.network.peerPool.addPeerNoWait(peer, peer.direction)
|
|
|
|
case res:
|
|
|
|
of PeerStatus.LowScoreError, PeerStatus.NoSpaceError:
|
|
|
|
# Peer has low score or we do not have enough space in PeerPool,
|
|
|
|
# we are going to disconnect it gracefully.
|
|
|
|
# Peer' state will be updated in connection event.
|
|
|
|
debug "Peer has low score or there no space in PeerPool",
|
|
|
|
peer = peer, reason = res
|
|
|
|
await peer.disconnect(FaultOrError)
|
|
|
|
of PeerStatus.DeadPeerError:
|
|
|
|
# Peer's lifetime future is finished, so its already dead,
|
|
|
|
# we do not need to perform gracefull disconect.
|
|
|
|
# Peer's state will be updated in connection event.
|
|
|
|
discard
|
|
|
|
of PeerStatus.DuplicateError:
|
|
|
|
# Peer is already present in PeerPool, we can't perform disconnect,
|
|
|
|
# because in such case we could kill both connections (connection
|
|
|
|
# which is present in PeerPool and new one).
|
|
|
|
# This is possible bug, because we could enter here only if number
|
|
|
|
# of `peer.connections == 1`, it means that Peer's lifetime is not
|
|
|
|
# tracked properly and we still not received `Disconnected` event.
|
|
|
|
debug "Peer is already present in PeerPool", peer = peer
|
|
|
|
of PeerStatus.Success:
|
|
|
|
# Peer was added to PeerPool.
|
|
|
|
peer.score = NewPeerScore
|
|
|
|
peer.connectionState = Connected
|
|
|
|
# We spawn task which will obtain ENR for this peer.
|
|
|
|
resolvePeer(peer)
|
|
|
|
debug "Peer successfully connected", peer = peer,
|
|
|
|
connections = peer.connections
|
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
proc onConnEvent(
|
|
|
|
node: Eth2Node, peerId: PeerId, event: ConnEvent) {.
|
|
|
|
async: (raises: [CancelledError]).} =
|
2020-08-10 10:58:34 +00:00
|
|
|
let peer = node.getPeer(peerId)
|
|
|
|
case event.kind
|
|
|
|
of ConnEventKind.Connected:
|
|
|
|
inc peer.connections
|
2020-11-26 19:23:45 +00:00
|
|
|
debug "Peer connection upgraded", peer = $peerId,
|
|
|
|
connections = peer.connections
|
2020-08-10 10:58:34 +00:00
|
|
|
if peer.connections == 1:
|
|
|
|
# Libp2p may connect multiple times to the same peer - using different
|
2020-08-08 20:52:02 +00:00
|
|
|
# transports for both incoming and outgoing. For now, we'll count our
|
2020-08-10 10:58:34 +00:00
|
|
|
# "fist" encounter with the peer as the true connection, leaving the
|
|
|
|
# other connections be - libp2p limits the number of concurrent
|
|
|
|
# connections to the same peer, and only one of these connections will be
|
|
|
|
# active. Nonetheless, this quirk will cause a number of odd behaviours:
|
|
|
|
# * For peer limits, we might miscount the incoming vs outgoing quota
|
|
|
|
# * Protocol handshakes are wonky: we'll not necessarily use the newly
|
|
|
|
# connected transport - instead we'll just pick a random one!
|
2020-11-26 19:23:45 +00:00
|
|
|
case peer.connectionState
|
|
|
|
of Disconnecting:
|
|
|
|
# We got connection with peer which we currently disconnecting.
|
|
|
|
# Normally this does not happen, but if a peer is being disconnected
|
|
|
|
# while a concurrent (incoming for example) connection attempt happens,
|
|
|
|
# we might end up here
|
|
|
|
debug "Got connection attempt from peer that we are disconnecting",
|
|
|
|
peer = peerId
|
2024-01-19 21:05:52 +00:00
|
|
|
try:
|
|
|
|
await node.switch.disconnect(peerId)
|
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
2024-01-24 16:23:12 +00:00
|
|
|
debug "Unexpected error while disconnecting peer", exc = exc.msg
|
2020-11-26 19:23:45 +00:00
|
|
|
return
|
|
|
|
of None:
|
|
|
|
# We have established a connection with the new peer.
|
|
|
|
peer.connectionState = Connecting
|
|
|
|
of Disconnected:
|
|
|
|
# We have established a connection with the peer that we have seen
|
|
|
|
# before - reusing the existing peer object is fine
|
|
|
|
peer.connectionState = Connecting
|
|
|
|
peer.score = 0 # Will be set to NewPeerScore after handshake
|
|
|
|
of Connecting, Connected:
|
|
|
|
# This means that we got notification event from peer which we already
|
|
|
|
# connected or connecting right now. If this situation will happened,
|
|
|
|
# it means bug on `nim-libp2p` side.
|
|
|
|
warn "Got connection attempt from peer which we already connected",
|
|
|
|
peer = peerId
|
|
|
|
await peer.disconnect(FaultOrError)
|
|
|
|
return
|
|
|
|
|
|
|
|
# Store connection direction inside Peer object.
|
|
|
|
if event.incoming:
|
|
|
|
peer.direction = PeerType.Incoming
|
|
|
|
else:
|
|
|
|
peer.direction = PeerType.Outgoing
|
2020-08-08 20:52:02 +00:00
|
|
|
|
2020-11-26 08:05:23 +00:00
|
|
|
await performProtocolHandshakes(peer, event.incoming)
|
|
|
|
|
2020-08-10 10:58:34 +00:00
|
|
|
of ConnEventKind.Disconnected:
|
|
|
|
dec peer.connections
|
2020-11-26 19:23:45 +00:00
|
|
|
debug "Lost connection to peer", peer = peerId,
|
|
|
|
connections = peer.connections
|
|
|
|
|
2020-08-10 10:58:34 +00:00
|
|
|
if peer.connections == 0:
|
2020-11-26 19:23:45 +00:00
|
|
|
debug "Peer disconnected", peer = $peerId, connections = peer.connections
|
|
|
|
|
|
|
|
# Whatever caused disconnection, avoid connection spamming
|
|
|
|
node.addSeen(peerId, SeenTableTimeReconnect)
|
|
|
|
|
2020-08-10 10:58:34 +00:00
|
|
|
let fut = peer.disconnectedFut
|
2020-11-26 19:23:45 +00:00
|
|
|
if not(isNil(fut)):
|
2020-11-26 08:05:23 +00:00
|
|
|
fut.complete()
|
2020-11-26 19:23:45 +00:00
|
|
|
peer.disconnectedFut = nil
|
|
|
|
else:
|
|
|
|
# TODO (cheatfate): This could be removed when bug will be fixed inside
|
|
|
|
# `nim-libp2p`.
|
|
|
|
debug "Got new event while peer is already disconnected",
|
|
|
|
peer = peerId, peer_state = peer.connectionState
|
|
|
|
peer.connectionState = Disconnected
|
2020-08-10 10:58:34 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc new(T: type Eth2Node,
|
|
|
|
config: BeaconNodeConf | LightClientConf, runtimeCfg: RuntimeConfig,
|
|
|
|
enrForkId: ENRForkID, discoveryForkId: ENRForkID,
|
|
|
|
forkDigests: ref ForkDigests, getBeaconTime: GetBeaconTimeFn,
|
|
|
|
switch: Switch, pubsub: GossipSub,
|
2023-11-10 15:58:48 +00:00
|
|
|
ip: Option[IpAddress], tcpPort, udpPort: Option[Port],
|
2022-06-15 08:14:47 +00:00
|
|
|
privKey: keys.PrivateKey, discovery: bool,
|
2023-09-15 18:45:55 +00:00
|
|
|
directPeers: DirectPeers,
|
2023-08-25 09:29:07 +00:00
|
|
|
rng: ref HmacDrbgContext): T {.raises: [CatchableError].} =
|
2020-08-12 14:16:59 +00:00
|
|
|
when not defined(local_testnet):
|
2021-02-22 16:17:48 +00:00
|
|
|
let
|
2022-05-31 10:45:37 +00:00
|
|
|
connectTimeout = chronos.minutes(1)
|
|
|
|
seenThreshold = chronos.minutes(5)
|
2020-08-12 14:16:59 +00:00
|
|
|
else:
|
2021-02-22 16:17:48 +00:00
|
|
|
let
|
2022-05-31 10:45:37 +00:00
|
|
|
connectTimeout = chronos.seconds(10)
|
|
|
|
seenThreshold = chronos.seconds(10)
|
2022-03-18 11:36:50 +00:00
|
|
|
type MetaData = altair.MetaData # Weird bug without this..
|
|
|
|
|
|
|
|
# Versions up to v22.3.0 would write an empty `MetaData` to
|
|
|
|
#`data-dir/node-metadata.json` which would then be reloaded on startup - don't
|
|
|
|
# write a file with this name or downgrades will break!
|
|
|
|
const metadata = MetaData()
|
2021-02-22 16:17:48 +00:00
|
|
|
|
|
|
|
let node = T(
|
|
|
|
switch: switch,
|
|
|
|
pubsub: pubsub,
|
|
|
|
wantedPeers: config.maxPeers,
|
2022-03-11 10:51:53 +00:00
|
|
|
hardMaxPeers: config.hardMaxPeers.get(config.maxPeers * 3 div 2), #*1.5
|
2021-08-19 10:45:31 +00:00
|
|
|
cfg: runtimeCfg,
|
2022-04-08 16:22:49 +00:00
|
|
|
peerPool: newPeerPool[Peer, PeerId](),
|
2021-02-22 16:17:48 +00:00
|
|
|
# Its important here to create AsyncQueue with limited size, otherwise
|
|
|
|
# it could produce HIGH cpu usage.
|
|
|
|
connQueue: newAsyncQueue[PeerAddr](ConcurrentConnections),
|
|
|
|
metadata: metadata,
|
|
|
|
forkId: enrForkId,
|
2021-09-29 11:06:16 +00:00
|
|
|
discoveryForkId: discoveryForkId,
|
2021-07-07 09:09:47 +00:00
|
|
|
forkDigests: forkDigests,
|
2021-08-19 10:45:31 +00:00
|
|
|
getBeaconTime: getBeaconTime,
|
2021-02-22 16:17:48 +00:00
|
|
|
discovery: Eth2DiscoveryProtocol.new(
|
|
|
|
config, ip, tcpPort, udpPort, privKey,
|
2021-08-10 06:19:13 +00:00
|
|
|
{
|
|
|
|
enrForkIdField: SSZ.encode(enrForkId),
|
|
|
|
enrAttestationSubnetsField: SSZ.encode(metadata.attnets)
|
|
|
|
},
|
2021-02-22 16:17:48 +00:00
|
|
|
rng),
|
|
|
|
discoveryEnabled: discovery,
|
|
|
|
rng: rng,
|
|
|
|
connectTimeout: connectTimeout,
|
2022-11-02 10:46:53 +00:00
|
|
|
seenThreshold: seenThreshold,
|
2023-09-15 18:45:55 +00:00
|
|
|
directPeers: directPeers,
|
2022-11-02 10:46:53 +00:00
|
|
|
quota: TokenBucket.new(maxGlobalQuota, fullReplenishTime)
|
2021-02-22 16:17:48 +00:00
|
|
|
)
|
|
|
|
|
2021-10-21 11:01:29 +00:00
|
|
|
proc peerHook(peerId: PeerId, event: ConnEvent): Future[void] {.gcsafe.} =
|
|
|
|
onConnEvent(node, peerId, event)
|
2020-08-10 10:58:34 +00:00
|
|
|
|
2021-10-04 18:42:34 +00:00
|
|
|
switch.addConnEventHandler(peerHook, ConnEventKind.Connected)
|
|
|
|
switch.addConnEventHandler(peerHook, ConnEventKind.Disconnected)
|
|
|
|
|
2021-11-01 14:50:24 +00:00
|
|
|
proc scoreCheck(peer: Peer): bool =
|
|
|
|
peer.score >= PeerScoreLowLimit
|
|
|
|
|
|
|
|
proc onDeletePeer(peer: Peer) =
|
2023-11-08 13:42:50 +00:00
|
|
|
peer.releasePeer()
|
2021-11-01 14:50:24 +00:00
|
|
|
|
|
|
|
node.peerPool.setScoreCheck(scoreCheck)
|
|
|
|
node.peerPool.setOnDeletePeer(onDeletePeer)
|
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
node
|
|
|
|
|
2024-01-13 09:54:24 +00:00
|
|
|
proc registerProtocol*(node: Eth2Node, Proto: type, state: Proto.NetworkState) =
|
|
|
|
# This convoluted registration process is a leftover from the shared p2p macro
|
|
|
|
# and should be refactored
|
|
|
|
let proto = Proto.protocolInfo()
|
|
|
|
node.protocols.add(proto)
|
|
|
|
node.protocolStates.setLen(max(proto.index + 1, node.protocolStates.len))
|
|
|
|
node.protocolStates[proto.index] = state
|
|
|
|
|
|
|
|
for msg in proto.messages:
|
|
|
|
if msg.protocolMounter != nil:
|
|
|
|
msg.protocolMounter node
|
|
|
|
|
2020-08-03 17:35:27 +00:00
|
|
|
proc startListening*(node: Eth2Node) {.async.} =
|
2020-08-24 11:52:06 +00:00
|
|
|
if node.discoveryEnabled:
|
2020-10-09 13:37:12 +00:00
|
|
|
try:
|
|
|
|
node.discovery.open()
|
2024-01-19 21:05:52 +00:00
|
|
|
except CatchableError as exc:
|
2023-01-12 17:58:42 +00:00
|
|
|
fatal "Failed to start discovery service. UDP port may be already in use",
|
2024-01-19 21:05:52 +00:00
|
|
|
exc = exc.msg
|
2020-10-09 13:37:12 +00:00
|
|
|
quit 1
|
|
|
|
|
|
|
|
try:
|
2021-12-17 11:39:24 +00:00
|
|
|
await node.switch.start()
|
2024-01-19 21:05:52 +00:00
|
|
|
except CatchableError as exc:
|
2023-01-12 17:58:42 +00:00
|
|
|
fatal "Failed to start LibP2P transport. TCP port may be already in use",
|
2024-01-19 21:05:52 +00:00
|
|
|
exc = exc.msg
|
2020-10-09 13:37:12 +00:00
|
|
|
quit 1
|
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
proc peerPingerHeartbeat(node: Eth2Node): Future[void] {.async: (raises: [CancelledError]).}
|
|
|
|
proc peerTrimmerHeartbeat(node: Eth2Node): Future[void] {.async: (raises: [CancelledError]).}
|
2020-09-16 10:00:11 +00:00
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
proc start*(node: Eth2Node) {.async: (raises: [CancelledError]).} =
|
2020-09-16 10:00:11 +00:00
|
|
|
proc onPeerCountChanged() =
|
2022-03-11 10:51:53 +00:00
|
|
|
trace "Number of peers has been changed", length = len(node.peerPool)
|
2020-09-16 10:00:11 +00:00
|
|
|
nbc_peers.set int64(len(node.peerPool))
|
|
|
|
|
|
|
|
node.peerPool.setPeerCounter(onPeerCountChanged)
|
|
|
|
|
2020-06-10 19:36:54 +00:00
|
|
|
for i in 0 ..< ConcurrentConnections:
|
2020-09-21 16:02:27 +00:00
|
|
|
node.connWorkers.add connectWorker(node, i)
|
2020-06-10 19:36:54 +00:00
|
|
|
|
2020-08-24 11:52:06 +00:00
|
|
|
if node.discoveryEnabled:
|
|
|
|
node.discovery.start()
|
|
|
|
traceAsyncErrors node.runDiscoveryLoop()
|
|
|
|
else:
|
2020-10-01 18:56:42 +00:00
|
|
|
notice "Discovery disabled; trying bootstrap nodes",
|
2020-08-24 11:52:06 +00:00
|
|
|
nodes = node.discovery.bootstrapRecords.len
|
|
|
|
for enr in node.discovery.bootstrapRecords:
|
|
|
|
let tr = enr.toTypedRecord()
|
|
|
|
if tr.isOk():
|
2020-11-26 19:23:45 +00:00
|
|
|
let pa = tr.get().toPeerAddr(tcpProtocol)
|
2020-08-24 11:52:06 +00:00
|
|
|
if pa.isOk():
|
|
|
|
await node.connQueue.addLast(pa.get())
|
2021-08-23 10:29:50 +00:00
|
|
|
node.peerPingerHeartbeatFut = node.peerPingerHeartbeat()
|
2022-03-11 10:51:53 +00:00
|
|
|
node.peerTrimmerHeartbeatFut = node.peerTrimmerHeartbeat()
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
proc stop*(node: Eth2Node) {.async: (raises: [CancelledError]).} =
|
2020-05-28 01:14:01 +00:00
|
|
|
# Ignore errors in futures, since we're shutting down (but log them on the
|
|
|
|
# TRACE level, if a timeout is reached).
|
2022-03-11 10:51:53 +00:00
|
|
|
var waitedFutures =
|
|
|
|
@[
|
|
|
|
node.switch.stop(),
|
|
|
|
node.peerPingerHeartbeat.cancelAndWait(),
|
|
|
|
node.peerTrimmerHeartbeatFut.cancelAndWait(),
|
|
|
|
]
|
|
|
|
|
|
|
|
if node.discoveryEnabled:
|
|
|
|
waitedFutures &= node.discovery.closeWait()
|
|
|
|
|
2020-05-28 01:14:01 +00:00
|
|
|
let
|
|
|
|
timeout = 5.seconds
|
|
|
|
completed = await withTimeout(allFutures(waitedFutures), timeout)
|
|
|
|
if not completed:
|
2020-08-20 16:30:47 +00:00
|
|
|
trace "Eth2Node.stop(): timeout reached", timeout,
|
|
|
|
futureErrors = waitedFutures.filterIt(it.error != nil).mapIt(it.error.msg)
|
2020-05-19 18:57:35 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc init(T: type Peer, network: Eth2Node, peerId: PeerId): Peer =
|
2020-11-26 19:23:45 +00:00
|
|
|
let res = Peer(
|
2021-10-21 11:01:29 +00:00
|
|
|
peerId: peerId,
|
2020-11-26 19:23:45 +00:00
|
|
|
network: network,
|
|
|
|
connectionState: ConnectionState.None,
|
|
|
|
lastReqTime: now(chronos.Moment),
|
2021-08-23 10:29:50 +00:00
|
|
|
lastMetadataTime: now(chronos.Moment),
|
2022-11-02 10:46:53 +00:00
|
|
|
quota: TokenBucket.new(maxRequestQuota.int, fullReplenishTime)
|
2020-11-26 19:23:45 +00:00
|
|
|
)
|
2024-01-13 09:54:24 +00:00
|
|
|
res.protocolStates.setLen(network.protocolStates.len())
|
|
|
|
for proto in network.protocols:
|
2020-11-26 19:23:45 +00:00
|
|
|
if not(isNil(proto.peerStateInitializer)):
|
2024-01-13 09:54:24 +00:00
|
|
|
res.protocolStates[proto.index] = proto.peerStateInitializer(res)
|
2020-11-26 19:23:45 +00:00
|
|
|
res
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
proc registerMsg(protocol: ProtocolInfo,
|
|
|
|
name: string,
|
|
|
|
mounter: MounterProc,
|
2024-01-13 09:54:24 +00:00
|
|
|
libp2pCodecName: string) =
|
2020-03-22 21:55:01 +00:00
|
|
|
protocol.messages.add MessageInfo(name: name,
|
|
|
|
protocolMounter: mounter,
|
2024-01-13 09:54:24 +00:00
|
|
|
libp2pCodecName: libp2pCodecName)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend =
|
|
|
|
var
|
|
|
|
Format = ident "SSZ"
|
2020-03-22 23:23:21 +00:00
|
|
|
Connection = bindSym "Connection"
|
2020-03-22 21:55:01 +00:00
|
|
|
Peer = bindSym "Peer"
|
|
|
|
Eth2Node = bindSym "Eth2Node"
|
|
|
|
registerMsg = bindSym "registerMsg"
|
|
|
|
initProtocol = bindSym "initProtocol"
|
|
|
|
msgVar = ident "msg"
|
|
|
|
networkVar = ident "network"
|
|
|
|
callUserHandler = ident "callUserHandler"
|
2020-05-23 22:24:47 +00:00
|
|
|
MSG = ident "MSG"
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
new result
|
|
|
|
|
|
|
|
result.PeerType = Peer
|
|
|
|
result.NetworkType = Eth2Node
|
|
|
|
result.setEventHandlers = bindSym "setEventHandlers"
|
|
|
|
result.SerializationFormat = Format
|
2020-05-12 22:37:07 +00:00
|
|
|
result.RequestResultsWrapper = ident "NetRes"
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2024-01-13 09:54:24 +00:00
|
|
|
result.implementMsg = proc (msg: eth2_protocol_dsl.Message) =
|
2020-05-23 22:24:47 +00:00
|
|
|
if msg.kind == msgResponse:
|
|
|
|
return
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
let
|
|
|
|
protocol = msg.protocol
|
|
|
|
msgName = $msg.ident
|
|
|
|
msgNameLit = newLit msgName
|
|
|
|
MsgRecName = msg.recName
|
|
|
|
MsgStrongRecName = msg.strongRecName
|
|
|
|
codecNameLit = getRequestProtoName(msg.procDef)
|
2020-05-26 17:07:18 +00:00
|
|
|
protocolMounterName = ident(msgName & "Mounter")
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
##
|
|
|
|
## Implement the Thunk:
|
|
|
|
##
|
2020-03-22 23:23:21 +00:00
|
|
|
## The protocol handlers in nim-libp2p receive only a `Connection`
|
2020-03-22 21:55:01 +00:00
|
|
|
## parameter and there is no way to access the wider context (such
|
|
|
|
## as the current `Switch`). In our handlers, we may need to list all
|
|
|
|
## peers in the current network, so we must keep a reference to the
|
|
|
|
## network object in the closure environment of the installed handlers.
|
|
|
|
##
|
|
|
|
## For this reason, we define a `protocol mounter` proc that will
|
|
|
|
## initialize the network object by creating handlers bound to the
|
|
|
|
## specific network.
|
|
|
|
##
|
2020-05-26 17:07:18 +00:00
|
|
|
var userHandlerCall = newTree(nnkDiscardStmt)
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
if msg.userHandler != nil:
|
2020-05-26 17:07:18 +00:00
|
|
|
var OutputParamType = if msg.kind == msgRequest: msg.outputParamType
|
|
|
|
else: nil
|
2020-05-23 22:24:47 +00:00
|
|
|
|
|
|
|
if OutputParamType == nil:
|
2020-05-26 17:07:18 +00:00
|
|
|
userHandlerCall = msg.genUserHandlerCall(msgVar, [peerVar])
|
|
|
|
if msg.kind == msgRequest:
|
|
|
|
userHandlerCall = newCall(ident"sendUserHandlerResultAsChunkImpl",
|
|
|
|
streamVar,
|
|
|
|
userHandlerCall)
|
2020-05-23 22:24:47 +00:00
|
|
|
else:
|
|
|
|
if OutputParamType.kind == nnkVarTy:
|
|
|
|
OutputParamType = OutputParamType[0]
|
|
|
|
|
|
|
|
let isChunkStream = eqIdent(OutputParamType[0], "MultipleChunksResponse")
|
|
|
|
msg.response.recName = if isChunkStream:
|
2022-10-27 16:51:43 +00:00
|
|
|
newTree(nnkBracketExpr, ident"List", OutputParamType[1], OutputParamType[2])
|
2020-05-23 22:24:47 +00:00
|
|
|
else:
|
|
|
|
OutputParamType[1]
|
|
|
|
|
|
|
|
let responseVar = ident("response")
|
|
|
|
userHandlerCall = newStmtList(
|
|
|
|
newVarStmt(responseVar,
|
|
|
|
newCall(ident"init", OutputParamType,
|
2020-08-10 13:18:17 +00:00
|
|
|
peerVar, streamVar)),
|
2020-05-23 22:24:47 +00:00
|
|
|
msg.genUserHandlerCall(msgVar, [peerVar], outputParam = responseVar))
|
|
|
|
|
2020-05-26 17:07:18 +00:00
|
|
|
protocol.outRecvProcs.add quote do:
|
|
|
|
template `callUserHandler`(`MSG`: type `MsgStrongRecName`,
|
|
|
|
`peerVar`: `Peer`,
|
|
|
|
`streamVar`: `Connection`,
|
|
|
|
`msgVar`: `MsgRecName`): untyped =
|
|
|
|
`userHandlerCall`
|
|
|
|
|
2024-01-13 09:54:24 +00:00
|
|
|
proc `protocolMounterName`(`networkVar`: `Eth2Node`) {.raises: [].} =
|
2020-05-26 17:07:18 +00:00
|
|
|
proc snappyThunk(`streamVar`: `Connection`,
|
|
|
|
`protocolVar`: string): Future[void] {.gcsafe.} =
|
2022-11-02 10:46:53 +00:00
|
|
|
return handleIncomingStream(`networkVar`, `streamVar`, `protocolVar`,
|
2020-05-26 17:07:18 +00:00
|
|
|
`MsgStrongRecName`)
|
|
|
|
|
2024-01-13 09:54:24 +00:00
|
|
|
try:
|
|
|
|
mount `networkVar`.switch,
|
2024-03-21 16:53:59 +00:00
|
|
|
LPProtocol.new(
|
|
|
|
codecs = @[`codecNameLit`], handler = snappyThunk)
|
2024-01-13 09:54:24 +00:00
|
|
|
except LPError as exc:
|
|
|
|
# Failure here indicates that the mounting was done incorrectly which
|
|
|
|
# would be a programming error
|
|
|
|
raiseAssert exc.msg
|
2020-03-22 21:55:01 +00:00
|
|
|
##
|
|
|
|
## Implement Senders and Handshake
|
|
|
|
##
|
|
|
|
if msg.kind == msgHandshake:
|
|
|
|
macros.error "Handshake messages are not supported in LibP2P protocols"
|
|
|
|
else:
|
|
|
|
var sendProc = msg.createSendProc()
|
|
|
|
implementSendProcBody sendProc
|
|
|
|
|
|
|
|
protocol.outProcRegistrations.add(
|
|
|
|
newCall(registerMsg,
|
|
|
|
protocol.protocolInfoVar,
|
|
|
|
msgNameLit,
|
2020-05-26 17:07:18 +00:00
|
|
|
protocolMounterName,
|
2024-01-13 09:54:24 +00:00
|
|
|
codecNameLit))
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
result.implementProtocolInit = proc (p: P2PProtocol): NimNode =
|
2024-01-16 18:37:47 +00:00
|
|
|
# This `macrocache` counter gives each protocol its own integer index which
|
|
|
|
# is later used to index per-protocol, per-instace data kept in the peer and
|
|
|
|
# network - the counter is global across all modules / protocols of the
|
|
|
|
# application
|
|
|
|
let
|
|
|
|
id = CacheCounter"eth2_network_protocol_id"
|
|
|
|
tmp = id.value
|
|
|
|
id.inc(1)
|
|
|
|
|
|
|
|
newCall(initProtocol, newLit(p.name), p.peerInit, p.netInit, newLit(tmp))
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2021-08-23 10:29:50 +00:00
|
|
|
#Must import here because of cyclicity
|
2024-01-13 09:54:24 +00:00
|
|
|
import ./peer_protocol
|
|
|
|
export peer_protocol
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
proc updatePeerMetadata(node: Eth2Node, peerId: PeerId) {.async: (raises: [CancelledError]).} =
|
2021-08-23 10:29:50 +00:00
|
|
|
trace "updating peer metadata", peerId
|
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
let
|
|
|
|
peer = node.getPeer(peerId)
|
|
|
|
newMetadataRes = await peer.getMetadata_v2()
|
|
|
|
newMetadata = newMetadataRes.valueOr:
|
|
|
|
debug "Failed to retrieve metadata from peer!", peerId, error = newMetadataRes.error
|
2023-02-13 11:02:20 +00:00
|
|
|
peer.failedMetadataRequests.inc()
|
|
|
|
return
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2024-01-13 09:54:24 +00:00
|
|
|
peer.metadata = Opt.some(newMetadata)
|
2022-02-01 17:20:55 +00:00
|
|
|
peer.failedMetadataRequests = 0
|
2021-08-23 10:29:50 +00:00
|
|
|
peer.lastMetadataTime = Moment.now()
|
|
|
|
|
2021-09-28 07:58:03 +00:00
|
|
|
const
|
|
|
|
# For Phase0, metadata change every +27 hours
|
|
|
|
MetadataRequestFrequency = 30.minutes
|
2022-02-01 17:20:55 +00:00
|
|
|
MetadataRequestMaxFailures = 3
|
2021-09-28 07:58:03 +00:00
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
proc peerPingerHeartbeat(node: Eth2Node) {.async: (raises: [CancelledError]).} =
|
2021-08-23 10:29:50 +00:00
|
|
|
while true:
|
|
|
|
let heartbeatStart_m = Moment.now()
|
|
|
|
var updateFutures: seq[Future[void]]
|
|
|
|
|
|
|
|
for peer in node.peers.values:
|
|
|
|
if peer.connectionState != Connected: continue
|
|
|
|
|
|
|
|
if peer.metadata.isNone or
|
2024-01-19 21:05:52 +00:00
|
|
|
heartbeatStart_m - peer.lastMetadataTime > MetadataRequestFrequency:
|
2021-10-21 11:01:29 +00:00
|
|
|
updateFutures.add(node.updatePeerMetadata(peer.peerId))
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-02-01 17:20:55 +00:00
|
|
|
await allFutures(updateFutures)
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2024-01-22 16:34:54 +00:00
|
|
|
reset(updateFutures)
|
|
|
|
|
2021-08-23 10:29:50 +00:00
|
|
|
for peer in node.peers.values:
|
|
|
|
if peer.connectionState != Connected: continue
|
|
|
|
|
2022-02-01 17:20:55 +00:00
|
|
|
if peer.failedMetadataRequests > MetadataRequestMaxFailures:
|
2021-09-28 07:58:03 +00:00
|
|
|
debug "no metadata from peer, kicking it", peer
|
2024-01-22 16:34:54 +00:00
|
|
|
updateFutures.add(peer.disconnect(PeerScoreLow))
|
|
|
|
|
|
|
|
await allFutures(updateFutures)
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2021-09-28 07:58:03 +00:00
|
|
|
await sleepAsync(5.seconds)
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2024-01-19 21:05:52 +00:00
|
|
|
proc peerTrimmerHeartbeat(node: Eth2Node) {.async: (raises: [CancelledError]).} =
|
|
|
|
# Disconnect peers in excess of the (soft) max peer count
|
2022-03-11 10:51:53 +00:00
|
|
|
while true:
|
2024-01-19 21:05:52 +00:00
|
|
|
# Only count Connected peers (to avoid counting Disconnecting ones)
|
|
|
|
let
|
|
|
|
connectedPeers = node.peers.values.countIt(
|
|
|
|
it.connectionState == Connected)
|
|
|
|
excessPeers = connectedPeers - node.wantedPeers
|
2022-03-11 10:51:53 +00:00
|
|
|
|
|
|
|
if excessPeers > 0:
|
2024-01-19 21:05:52 +00:00
|
|
|
# Let chronos take back control every trimming
|
2022-03-11 10:51:53 +00:00
|
|
|
node.trimConnections(1)
|
|
|
|
|
|
|
|
await sleepAsync(1.seconds div max(1, excessPeers))
|
|
|
|
|
2020-03-22 20:54:47 +00:00
|
|
|
func asEthKey*(key: PrivateKey): keys.PrivateKey =
|
2020-04-17 13:29:49 +00:00
|
|
|
keys.PrivateKey(key.skkey)
|
2019-11-03 23:02:27 +00:00
|
|
|
|
2020-03-22 20:54:47 +00:00
|
|
|
template tcpEndPoint(address, port): auto =
|
2020-06-05 15:08:50 +00:00
|
|
|
MultiAddress.init(address, tcpProtocol, port)
|
2020-03-22 20:54:47 +00:00
|
|
|
|
2022-07-13 21:26:16 +00:00
|
|
|
func initNetKeys(privKey: PrivateKey): NetKeyPair =
|
|
|
|
let pubKey = privKey.getPublicKey().expect("working public key from random")
|
2022-05-31 10:45:37 +00:00
|
|
|
NetKeyPair(seckey: privKey, pubkey: pubKey)
|
|
|
|
|
2022-07-13 21:26:16 +00:00
|
|
|
proc getRandomNetKeys*(rng: var HmacDrbgContext): NetKeyPair =
|
|
|
|
let privKey = PrivateKey.random(Secp256k1, rng).valueOr:
|
|
|
|
fatal "Could not generate random network key file"
|
|
|
|
quit QuitFailure
|
|
|
|
initNetKeys(privKey)
|
|
|
|
|
2023-02-23 02:10:07 +00:00
|
|
|
proc getPersistentNetKeys*(
|
2022-07-13 21:26:16 +00:00
|
|
|
rng: var HmacDrbgContext,
|
|
|
|
dataDir, netKeyFile: string,
|
|
|
|
netKeyInsecurePassword: bool,
|
|
|
|
allowLoadExisting: bool): NetKeyPair =
|
|
|
|
if netKeyFile == "random":
|
|
|
|
let
|
|
|
|
keys = rng.getRandomNetKeys()
|
|
|
|
pres = PeerId.init(keys.pubkey).valueOr:
|
2022-11-20 07:20:23 +00:00
|
|
|
fatal "Could not obtain PeerId from network key", error
|
2020-11-16 14:39:00 +00:00
|
|
|
quit QuitFailure
|
2022-07-13 21:26:16 +00:00
|
|
|
info "Generating new networking key",
|
|
|
|
network_public_key = keys.pubkey, network_peer_id = $pres
|
|
|
|
keys
|
|
|
|
else:
|
|
|
|
let
|
|
|
|
# Insecure password used only for automated testing.
|
|
|
|
insecurePassword =
|
|
|
|
if netKeyInsecurePassword:
|
2024-01-13 09:54:24 +00:00
|
|
|
Opt.some(NetworkInsecureKeyPassword)
|
2020-08-19 13:12:10 +00:00
|
|
|
else:
|
2024-01-13 09:54:24 +00:00
|
|
|
Opt.none(string)
|
2020-08-19 13:12:10 +00:00
|
|
|
|
2022-07-13 21:26:16 +00:00
|
|
|
keyPath =
|
|
|
|
if isAbsolute(netKeyFile):
|
|
|
|
netKeyFile
|
|
|
|
else:
|
|
|
|
dataDir / netKeyFile
|
|
|
|
logScope: key_path = keyPath
|
2020-08-25 10:16:31 +00:00
|
|
|
|
2022-07-13 21:26:16 +00:00
|
|
|
if fileAccessible(keyPath, {AccessFlags.Find}) and allowLoadExisting:
|
|
|
|
info "Network key storage is present, unlocking"
|
2020-08-25 10:16:31 +00:00
|
|
|
|
2022-07-13 21:26:16 +00:00
|
|
|
let
|
|
|
|
privKey = loadNetKeystore(keyPath, insecurePassword).valueOr:
|
2020-08-24 16:06:41 +00:00
|
|
|
fatal "Could not load network key file"
|
2020-08-19 13:12:10 +00:00
|
|
|
quit QuitFailure
|
2022-07-13 21:26:16 +00:00
|
|
|
keys = initNetKeys(privKey)
|
|
|
|
info "Network key storage was successfully unlocked",
|
|
|
|
network_public_key = keys.pubkey
|
|
|
|
keys
|
|
|
|
else:
|
|
|
|
if allowLoadExisting:
|
2020-08-24 16:06:41 +00:00
|
|
|
info "Network key storage is missing, creating a new one",
|
2022-07-13 21:26:16 +00:00
|
|
|
key_path = keyPath
|
|
|
|
let
|
|
|
|
keys = rng.getRandomNetKeys()
|
|
|
|
sres = saveNetKeystore(rng, keyPath, keys.seckey, insecurePassword)
|
|
|
|
if sres.isErr():
|
|
|
|
fatal "Could not create network key file"
|
|
|
|
quit QuitFailure
|
2020-08-25 10:16:31 +00:00
|
|
|
|
2022-07-13 21:26:16 +00:00
|
|
|
info "New network key storage was created",
|
|
|
|
network_public_key = keys.pubkey
|
|
|
|
keys
|
2020-08-19 13:12:10 +00:00
|
|
|
|
2022-07-13 21:26:16 +00:00
|
|
|
proc getPersistentNetKeys*(
|
|
|
|
rng: var HmacDrbgContext, config: BeaconNodeConf): NetKeyPair =
|
|
|
|
case config.cmd
|
|
|
|
of BNStartUpCmd.noCommand, BNStartUpCmd.record:
|
|
|
|
rng.getPersistentNetKeys(
|
|
|
|
string(config.dataDir), config.netKeyFile, config.netKeyInsecurePassword,
|
|
|
|
allowLoadExisting = true)
|
2020-08-19 13:12:10 +00:00
|
|
|
else:
|
2022-07-13 21:26:16 +00:00
|
|
|
rng.getRandomNetKeys()
|
2020-03-22 20:54:47 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
func gossipId(
|
2022-09-07 07:56:20 +00:00
|
|
|
data: openArray[byte], phase0Prefix, topic: string): seq[byte] =
|
2024-04-17 03:51:16 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#topics-and-messages
|
2024-03-14 06:26:36 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/p2p-interface.md#topics-and-messages
|
2023-09-14 12:37:35 +00:00
|
|
|
const MESSAGE_DOMAIN_VALID_SNAPPY = [0x01'u8, 0x00, 0x00, 0x00]
|
2020-10-20 12:31:20 +00:00
|
|
|
let messageDigest = withEth2Hash:
|
2022-03-09 10:30:31 +00:00
|
|
|
h.update(MESSAGE_DOMAIN_VALID_SNAPPY)
|
2021-10-21 13:09:19 +00:00
|
|
|
|
2022-09-07 07:56:20 +00:00
|
|
|
if not topic.startsWith(phase0Prefix):
|
|
|
|
# everything >= altair
|
2021-06-25 00:07:46 +00:00
|
|
|
h.update topic.len.uint64.toBytesLE
|
|
|
|
h.update topic
|
2020-10-20 08:54:11 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
h.update data
|
2021-07-07 09:09:47 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
messageDigest.data[0..19]
|
2020-06-28 20:06:50 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc newBeaconSwitch(config: BeaconNodeConf | LightClientConf,
|
|
|
|
seckey: PrivateKey, address: MultiAddress,
|
2023-08-25 09:29:07 +00:00
|
|
|
rng: ref HmacDrbgContext): Switch {.raises: [CatchableError].} =
|
2023-06-23 07:16:30 +00:00
|
|
|
var sb =
|
|
|
|
if config.enableYamux:
|
|
|
|
SwitchBuilder.new().withYamux()
|
|
|
|
else:
|
|
|
|
SwitchBuilder.new()
|
|
|
|
# Order of multiplexers matters, the first will be default
|
|
|
|
|
|
|
|
sb
|
2021-10-04 18:42:34 +00:00
|
|
|
.withPrivateKey(seckey)
|
|
|
|
.withAddress(address)
|
|
|
|
.withRng(rng)
|
|
|
|
.withNoise()
|
2022-05-31 10:45:37 +00:00
|
|
|
.withMplex(chronos.minutes(5), chronos.minutes(5))
|
2021-10-04 18:42:34 +00:00
|
|
|
.withMaxConnections(config.maxPeers)
|
|
|
|
.withAgentVersion(config.agentString)
|
|
|
|
.withTcpTransport({ServerFlags.ReuseAddr})
|
|
|
|
.build()
|
2020-11-28 07:00:36 +00:00
|
|
|
|
2022-06-21 08:29:16 +00:00
|
|
|
proc createEth2Node*(rng: ref HmacDrbgContext,
|
2022-05-31 10:45:37 +00:00
|
|
|
config: BeaconNodeConf | LightClientConf,
|
2021-03-19 02:22:45 +00:00
|
|
|
netKeys: NetKeyPair,
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
cfg: RuntimeConfig,
|
2021-11-05 07:34:34 +00:00
|
|
|
forkDigests: ref ForkDigests,
|
2021-08-19 10:45:31 +00:00
|
|
|
getBeaconTime: GetBeaconTimeFn,
|
2022-04-08 16:22:49 +00:00
|
|
|
genesis_validators_root: Eth2Digest): Eth2Node
|
2023-08-25 09:29:07 +00:00
|
|
|
{.raises: [CatchableError].} =
|
2021-08-10 20:46:35 +00:00
|
|
|
let
|
2021-08-19 10:45:31 +00:00
|
|
|
enrForkId = getENRForkID(
|
2022-04-08 16:22:49 +00:00
|
|
|
cfg, getBeaconTime().slotOrZero.epoch, genesis_validators_root)
|
2021-07-07 09:09:47 +00:00
|
|
|
|
2021-09-29 11:06:16 +00:00
|
|
|
discoveryForkId = getDiscoveryForkID(
|
2022-04-08 16:22:49 +00:00
|
|
|
cfg, getBeaconTime().slotOrZero.epoch, genesis_validators_root)
|
2021-09-29 11:06:16 +00:00
|
|
|
|
2024-05-01 19:29:39 +00:00
|
|
|
listenAddress =
|
|
|
|
if config.listenAddress.isSome():
|
|
|
|
config.listenAddress.get()
|
|
|
|
else:
|
|
|
|
getAutoAddress(Port(0)).toIpAddress()
|
|
|
|
|
|
|
|
(extIp, extTcpPort, extUdpPort) =
|
|
|
|
setupAddress(config.nat, listenAddress, config.tcpPort,
|
|
|
|
config.udpPort, clientId)
|
2021-07-07 09:09:47 +00:00
|
|
|
|
2023-09-15 18:45:55 +00:00
|
|
|
directPeers = block:
|
|
|
|
var res: DirectPeers
|
|
|
|
for s in config.directPeers:
|
|
|
|
let (peerId, address) =
|
|
|
|
if s.startsWith("enr:"):
|
|
|
|
let
|
|
|
|
typedEnr = parseBootstrapAddress(s).get().toTypedRecord().get()
|
|
|
|
peerAddress = toPeerAddr(typedEnr, tcpProtocol).get()
|
|
|
|
(peerAddress.peerId, peerAddress.addrs[0])
|
|
|
|
elif s.startsWith("/"):
|
|
|
|
parseFullAddress(s).tryGet()
|
|
|
|
else:
|
|
|
|
fatal "direct peers address should start with / (multiaddress) or enr:", conf=s
|
|
|
|
quit 1
|
|
|
|
res.mgetOrPut(peerId, @[]).add(address)
|
|
|
|
info "Adding privileged direct peer", peerId, address
|
|
|
|
res
|
|
|
|
|
2024-05-01 19:29:39 +00:00
|
|
|
hostAddress = tcpEndPoint(listenAddress, config.tcpPort)
|
2024-01-20 21:36:01 +00:00
|
|
|
announcedAddresses =
|
|
|
|
if extIp.isNone() or extTcpPort.isNone(): @[]
|
|
|
|
else: @[tcpEndPoint(extIp.get(), extTcpPort.get())]
|
2020-03-22 20:54:47 +00:00
|
|
|
|
2020-11-16 19:15:43 +00:00
|
|
|
debug "Initializing networking", hostAddress,
|
2020-11-28 07:00:36 +00:00
|
|
|
network_public_key = netKeys.pubkey,
|
|
|
|
announcedAddresses
|
2020-08-24 16:06:41 +00:00
|
|
|
|
2020-03-22 20:54:47 +00:00
|
|
|
# TODO nim-libp2p still doesn't have support for announcing addresses
|
|
|
|
# that are different from the host address (this is relevant when we
|
|
|
|
# are running behind a NAT).
|
2021-02-22 16:17:48 +00:00
|
|
|
var switch = newBeaconSwitch(config, netKeys.seckey, hostAddress, rng)
|
2021-10-21 13:09:19 +00:00
|
|
|
|
2022-09-07 07:56:20 +00:00
|
|
|
let phase0Prefix = "/eth2/" & $forkDigests.phase0
|
2021-10-21 13:09:19 +00:00
|
|
|
|
2022-02-25 12:22:48 +00:00
|
|
|
func msgIdProvider(m: messages.Message): Result[seq[byte], ValidationResult] =
|
2021-07-07 09:09:47 +00:00
|
|
|
try:
|
2022-01-03 16:20:15 +00:00
|
|
|
# This doesn't have to be a tight bound, just enough to avoid denial of
|
|
|
|
# service attacks.
|
2023-08-12 03:10:12 +00:00
|
|
|
let decoded = snappy.decode(m.data, static(GOSSIP_MAX_SIZE.uint32))
|
2024-03-27 10:16:57 +00:00
|
|
|
ok(gossipId(decoded, phase0Prefix, m.topic))
|
2021-07-07 09:09:47 +00:00
|
|
|
except CatchableError:
|
2022-03-09 10:30:31 +00:00
|
|
|
err(ValidationResult.Reject)
|
2021-07-07 09:09:47 +00:00
|
|
|
|
2020-11-23 09:57:03 +00:00
|
|
|
let
|
2024-03-27 15:53:02 +00:00
|
|
|
params = GossipSubParams.init(
|
|
|
|
pruneBackoff = chronos.minutes(1),
|
|
|
|
unsubscribeBackoff = chronos.seconds(10),
|
|
|
|
floodPublish = true,
|
|
|
|
gossipFactor = 0.05,
|
|
|
|
d = 8,
|
|
|
|
dLow = 6,
|
|
|
|
dHigh = 12,
|
|
|
|
dScore = 6,
|
|
|
|
dOut = 6 div 2, # less than dlow and no more than dlow/2
|
|
|
|
dLazy = 6,
|
|
|
|
heartbeatInterval = chronos.milliseconds(700),
|
|
|
|
historyLength = 6,
|
|
|
|
historyGossip = 3,
|
|
|
|
fanoutTTL = chronos.seconds(60),
|
2024-03-20 07:07:16 +00:00
|
|
|
# 2 epochs matching maximum valid attestation lifetime
|
2024-03-27 15:53:02 +00:00
|
|
|
seenTTL = chronos.seconds(int(SECONDS_PER_SLOT * SLOTS_PER_EPOCH * 2)),
|
|
|
|
gossipThreshold = -4000,
|
|
|
|
publishThreshold = -8000,
|
|
|
|
graylistThreshold = -16000, # also disconnect threshold
|
|
|
|
opportunisticGraftThreshold = 0,
|
|
|
|
decayInterval = chronos.seconds(12),
|
|
|
|
decayToZero = 0.01,
|
|
|
|
retainScore = chronos.seconds(385),
|
|
|
|
appSpecificWeight = 0.0,
|
|
|
|
ipColocationFactorWeight = -53.75,
|
|
|
|
ipColocationFactorThreshold = 3.0,
|
|
|
|
behaviourPenaltyWeight = -15.9,
|
|
|
|
behaviourPenaltyDecay = 0.986,
|
|
|
|
disconnectBadPeers = true,
|
|
|
|
directPeers = directPeers,
|
|
|
|
bandwidthEstimatebps = config.bandwidthEstimate.get(100_000_000)
|
2021-02-09 09:20:55 +00:00
|
|
|
)
|
2021-10-04 18:42:34 +00:00
|
|
|
pubsub = GossipSub.init(
|
2020-11-23 09:57:03 +00:00
|
|
|
switch = switch,
|
|
|
|
msgIdProvider = msgIdProvider,
|
2022-06-30 09:54:49 +00:00
|
|
|
# We process messages in the validator, so we don't need data callbacks
|
|
|
|
triggerSelf = false,
|
2020-11-23 09:57:03 +00:00
|
|
|
sign = false,
|
2020-11-26 19:23:45 +00:00
|
|
|
verifySignature = false,
|
2020-11-23 09:57:03 +00:00
|
|
|
anonymize = true,
|
2023-08-12 03:10:12 +00:00
|
|
|
maxMessageSize = static(GOSSIP_MAX_SIZE.int),
|
2020-11-29 19:07:20 +00:00
|
|
|
parameters = params)
|
2021-10-21 13:09:19 +00:00
|
|
|
|
2020-08-08 20:52:02 +00:00
|
|
|
switch.mount(pubsub)
|
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
let node = Eth2Node.new(
|
2021-09-29 11:06:16 +00:00
|
|
|
config, cfg, enrForkId, discoveryForkId, forkDigests, getBeaconTime, switch, pubsub, extIp,
|
2021-08-19 10:45:31 +00:00
|
|
|
extTcpPort, extUdpPort, netKeys.seckey.asEthKey,
|
2023-09-15 18:45:55 +00:00
|
|
|
discovery = config.discv5Enabled, directPeers, rng = rng)
|
2020-03-22 20:54:47 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
node.pubsub.subscriptionValidator =
|
2023-06-15 12:59:35 +00:00
|
|
|
proc(topic: string): bool {.gcsafe, raises: [].} =
|
2021-10-21 13:09:19 +00:00
|
|
|
topic in node.validTopics
|
|
|
|
|
|
|
|
node
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
func announcedENR*(node: Eth2Node): enr.Record =
|
2020-03-23 11:26:44 +00:00
|
|
|
doAssert node.discovery != nil, "The Eth2Node must be initialized"
|
|
|
|
node.discovery.localNode.record
|
2020-03-22 20:54:47 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
func shortForm*(id: NetKeyPair): string =
|
2022-04-08 16:22:49 +00:00
|
|
|
$PeerId.init(id.pubkey)
|
2020-03-22 20:54:47 +00:00
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc subscribe*(
|
|
|
|
node: Eth2Node, topic: string, topicParams: TopicParams,
|
2021-08-18 12:30:05 +00:00
|
|
|
enableTopicMetrics: bool = false) =
|
2021-01-12 03:27:09 +00:00
|
|
|
if enableTopicMetrics:
|
2021-10-21 13:09:19 +00:00
|
|
|
node.pubsub.knownTopics.incl(topic)
|
2021-01-12 03:27:09 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
node.pubsub.topicParams[topic] = topicParams
|
2020-08-20 16:30:47 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
# Passing in `nil` because we do all message processing in the validator
|
|
|
|
node.pubsub.subscribe(topic, nil)
|
2021-01-15 04:17:06 +00:00
|
|
|
|
2024-01-22 16:34:54 +00:00
|
|
|
proc newValidationResultFuture(v: ValidationResult): Future[ValidationResult]
|
|
|
|
{.async: (raises: [CancelledError], raw: true).} =
|
2021-04-26 20:39:44 +00:00
|
|
|
let res = newFuture[ValidationResult]("eth2_network.execValidator")
|
|
|
|
res.complete(v)
|
|
|
|
res
|
|
|
|
|
2020-08-11 15:08:44 +00:00
|
|
|
proc addValidator*[MsgType](node: Eth2Node,
|
|
|
|
topic: string,
|
2020-09-18 11:53:09 +00:00
|
|
|
msgValidator: proc(msg: MsgType):
|
2023-08-25 09:29:07 +00:00
|
|
|
ValidationResult {.gcsafe, raises: [].} ) =
|
2021-08-18 12:30:05 +00:00
|
|
|
# Message validators run when subscriptions are enabled - they validate the
|
|
|
|
# data and return an indication of whether the message should be broadcast
|
|
|
|
# or not - validation is `async` but implemented without the macro because
|
|
|
|
# this is a performance hotspot.
|
|
|
|
proc execValidator(topic: string, message: GossipMsg):
|
2023-06-15 12:59:35 +00:00
|
|
|
Future[ValidationResult] {.raises: [].} =
|
2020-10-20 08:54:11 +00:00
|
|
|
inc nbc_gossip_messages_received
|
2021-08-18 12:30:05 +00:00
|
|
|
trace "Validating incoming gossip message", len = message.data.len, topic
|
2020-11-12 10:45:28 +00:00
|
|
|
|
2022-01-03 16:20:15 +00:00
|
|
|
var decompressed = snappy.decode(message.data, gossipMaxSize(MsgType))
|
2021-08-18 12:30:05 +00:00
|
|
|
let res = if decompressed.len > 0:
|
2020-11-12 10:45:28 +00:00
|
|
|
try:
|
2021-08-18 12:30:05 +00:00
|
|
|
let decoded = SSZ.decode(decompressed, MsgType)
|
|
|
|
decompressed = newSeq[byte](0) # release memory before validating
|
|
|
|
msgValidator(decoded) # doesn't raise!
|
2023-06-15 16:29:13 +00:00
|
|
|
except SerializationError as e:
|
2021-08-18 12:30:05 +00:00
|
|
|
inc nbc_gossip_failed_ssz
|
|
|
|
debug "Error decoding gossip",
|
|
|
|
topic, len = message.data.len, decompressed = decompressed.len,
|
|
|
|
error = e.msg
|
|
|
|
ValidationResult.Reject
|
|
|
|
else: # snappy returns empty seq on failed decompression
|
|
|
|
inc nbc_gossip_failed_snappy
|
|
|
|
debug "Error decompressing gossip", topic, len = message.data.len
|
|
|
|
ValidationResult.Reject
|
|
|
|
|
|
|
|
newValidationResultFuture(res)
|
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
node.validTopics.incl topic # Only allow subscription to validated topics
|
|
|
|
node.pubsub.addValidator(topic, execValidator)
|
2020-06-27 10:16:43 +00:00
|
|
|
|
2021-04-02 14:36:43 +00:00
|
|
|
proc addAsyncValidator*[MsgType](node: Eth2Node,
|
|
|
|
topic: string,
|
|
|
|
msgValidator: proc(msg: MsgType):
|
2024-01-22 16:34:54 +00:00
|
|
|
Future[ValidationResult] {.async: (raises: [CancelledError]).} ) =
|
2021-08-18 12:30:05 +00:00
|
|
|
proc execValidator(topic: string, message: GossipMsg):
|
2024-01-22 16:34:54 +00:00
|
|
|
Future[ValidationResult] {.async: (raw: true).} =
|
2021-04-02 14:36:43 +00:00
|
|
|
inc nbc_gossip_messages_received
|
2021-08-18 12:30:05 +00:00
|
|
|
trace "Validating incoming gossip message", len = message.data.len, topic
|
2021-04-02 14:36:43 +00:00
|
|
|
|
2022-01-03 16:20:15 +00:00
|
|
|
var decompressed = snappy.decode(message.data, gossipMaxSize(MsgType))
|
2021-08-18 12:30:05 +00:00
|
|
|
if decompressed.len > 0:
|
2021-04-26 20:39:44 +00:00
|
|
|
try:
|
2021-08-18 12:30:05 +00:00
|
|
|
let decoded = SSZ.decode(decompressed, MsgType)
|
|
|
|
decompressed = newSeq[byte](0) # release memory before validating
|
|
|
|
msgValidator(decoded) # doesn't raise!
|
2023-06-15 16:29:13 +00:00
|
|
|
except SerializationError as e:
|
2021-08-18 12:30:05 +00:00
|
|
|
inc nbc_gossip_failed_ssz
|
|
|
|
debug "Error decoding gossip",
|
|
|
|
topic, len = message.data.len, decompressed = decompressed.len,
|
|
|
|
error = e.msg
|
|
|
|
newValidationResultFuture(ValidationResult.Reject)
|
|
|
|
else: # snappy returns empty seq on failed decompression
|
|
|
|
inc nbc_gossip_failed_snappy
|
|
|
|
debug "Error decompressing gossip", topic, len = message.data.len
|
|
|
|
newValidationResultFuture(ValidationResult.Reject)
|
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
node.validTopics.incl topic # Only allow subscription to validated topics
|
|
|
|
|
|
|
|
node.pubsub.addValidator(topic, execValidator)
|
2021-08-18 12:30:05 +00:00
|
|
|
|
|
|
|
proc unsubscribe*(node: Eth2Node, topic: string) =
|
2021-10-21 13:09:19 +00:00
|
|
|
node.pubsub.unsubscribeAll(topic)
|
2020-03-22 20:54:47 +00:00
|
|
|
|
2022-07-01 14:48:45 +00:00
|
|
|
proc gossipEncode(msg: auto): seq[byte] =
|
|
|
|
let uncompressed = SSZ.encode(msg)
|
|
|
|
# This function only for messages we create. A message this large amounts to
|
|
|
|
# an internal logic error.
|
2023-08-12 03:10:12 +00:00
|
|
|
doAssert uncompressed.lenu64 <= GOSSIP_MAX_SIZE
|
2021-03-26 06:52:01 +00:00
|
|
|
|
2022-07-01 14:48:45 +00:00
|
|
|
snappy.encode(uncompressed)
|
2022-06-11 01:16:40 +00:00
|
|
|
|
2022-07-01 14:48:45 +00:00
|
|
|
proc broadcast(node: Eth2Node, topic: string, msg: seq[byte]):
|
2024-01-19 21:05:52 +00:00
|
|
|
Future[SendResult] {.async: (raises: [CancelledError]).} =
|
|
|
|
let peers =
|
|
|
|
try:
|
|
|
|
await node.pubsub.publish(topic, msg)
|
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
2024-01-24 16:23:12 +00:00
|
|
|
debug "Unexpected error during broadcast", exc = exc.msg
|
2024-01-19 21:05:52 +00:00
|
|
|
return err("Broadcast failed")
|
2022-06-11 01:16:40 +00:00
|
|
|
|
2022-07-01 14:48:45 +00:00
|
|
|
# TODO remove workaround for sync committee BN/VC log spam
|
|
|
|
if peers > 0 or find(topic, "sync_committee_") != -1:
|
|
|
|
inc nbc_gossip_messages_sent
|
2024-01-19 21:05:52 +00:00
|
|
|
ok()
|
2022-07-01 14:48:45 +00:00
|
|
|
else:
|
|
|
|
# Increments libp2p_gossipsub_failed_publish metric
|
2024-01-19 21:05:52 +00:00
|
|
|
err("No peers on libp2p topic")
|
2021-03-26 06:52:01 +00:00
|
|
|
|
2022-07-01 14:48:45 +00:00
|
|
|
proc broadcast(node: Eth2Node, topic: string, msg: auto):
|
2024-01-19 21:05:52 +00:00
|
|
|
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
2022-07-01 14:48:45 +00:00
|
|
|
# Avoid {.async.} copies of message while broadcasting
|
|
|
|
broadcast(node, topic, gossipEncode(msg))
|
2021-05-10 07:13:36 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
proc subscribeAttestationSubnets*(
|
|
|
|
node: Eth2Node, subnets: AttnetBits, forkDigest: ForkDigest) =
|
2023-12-06 22:16:55 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#attestations-and-aggregation
|
2021-10-12 10:17:37 +00:00
|
|
|
# Nimbus won't score attestation subnets for now, we just rely on block and
|
|
|
|
# aggregate which are more stable and reliable
|
2021-05-10 07:13:36 +00:00
|
|
|
|
|
|
|
for subnet_id, enabled in subnets:
|
|
|
|
if enabled:
|
|
|
|
node.subscribe(getAttestationTopic(
|
2021-08-09 12:54:45 +00:00
|
|
|
forkDigest, SubnetId(subnet_id)), TopicParams.init()) # don't score attestation subnets for now
|
2021-05-10 07:13:36 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
proc unsubscribeAttestationSubnets*(
|
|
|
|
node: Eth2Node, subnets: AttnetBits, forkDigest: ForkDigest) =
|
2024-03-14 06:26:36 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#attestations-and-aggregation
|
2021-10-12 10:17:37 +00:00
|
|
|
# Nimbus won't score attestation subnets for now; we just rely on block and
|
|
|
|
# aggregate which are more stable and reliable
|
2021-05-10 07:13:36 +00:00
|
|
|
|
|
|
|
for subnet_id, enabled in subnets:
|
|
|
|
if enabled:
|
2021-08-09 12:54:45 +00:00
|
|
|
node.unsubscribe(getAttestationTopic(forkDigest, SubnetId(subnet_id)))
|
2021-05-10 07:13:36 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
proc updateStabilitySubnetMetadata*(node: Eth2Node, attnets: AttnetBits) =
|
2023-12-06 22:16:55 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#metadata
|
2021-10-18 09:11:44 +00:00
|
|
|
if node.metadata.attnets == attnets:
|
|
|
|
return
|
|
|
|
|
2021-05-10 07:13:36 +00:00
|
|
|
node.metadata.seq_number += 1
|
|
|
|
node.metadata.attnets = attnets
|
|
|
|
|
2023-12-06 22:16:55 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#attestation-subnet-subscription
|
2023-11-08 05:28:03 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#attestation-subnet-bitfield
|
2021-08-10 06:19:13 +00:00
|
|
|
let res = node.discovery.updateRecord({
|
|
|
|
enrAttestationSubnetsField: SSZ.encode(node.metadata.attnets)
|
|
|
|
})
|
2021-05-10 07:13:36 +00:00
|
|
|
if res.isErr():
|
|
|
|
# This should not occur in this scenario as the private key would always
|
|
|
|
# be the correct one and the ENR will not increase in size.
|
2021-08-10 06:19:13 +00:00
|
|
|
warn "Failed to update the ENR attnets field", error = res.error
|
2021-05-10 07:13:36 +00:00
|
|
|
else:
|
|
|
|
debug "Stability subnets changed; updated ENR attnets", attnets
|
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
proc updateSyncnetsMetadata*(node: Eth2Node, syncnets: SyncnetBits) =
|
2024-03-14 06:26:36 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/validator.md#sync-committee-subnet-stability
|
2021-10-21 13:09:19 +00:00
|
|
|
if node.metadata.syncnets == syncnets:
|
|
|
|
return
|
|
|
|
|
2021-08-10 06:19:13 +00:00
|
|
|
node.metadata.seq_number += 1
|
|
|
|
node.metadata.syncnets = syncnets
|
|
|
|
|
|
|
|
let res = node.discovery.updateRecord({
|
|
|
|
enrSyncSubnetsField: SSZ.encode(node.metadata.syncnets)
|
|
|
|
})
|
|
|
|
if res.isErr():
|
|
|
|
# This should not occur in this scenario as the private key would always
|
|
|
|
# be the correct one and the ENR will not increase in size.
|
|
|
|
warn "Failed to update the ENR syncnets field", error = res.error
|
|
|
|
else:
|
|
|
|
debug "Sync committees changed; updated ENR syncnets", syncnets
|
|
|
|
|
2021-09-29 11:06:16 +00:00
|
|
|
proc updateForkId(node: Eth2Node, value: ENRForkID) =
|
2021-08-10 06:19:13 +00:00
|
|
|
node.forkId = value
|
|
|
|
let res = node.discovery.updateRecord({enrForkIdField: SSZ.encode value})
|
|
|
|
if res.isErr():
|
|
|
|
# This should not occur in this scenario as the private key would always
|
|
|
|
# be the correct one and the ENR will not increase in size.
|
|
|
|
warn "Failed to update the ENR fork id", value, error = res.error
|
|
|
|
else:
|
|
|
|
debug "ENR fork id changed", value
|
|
|
|
|
2022-04-08 16:22:49 +00:00
|
|
|
proc updateForkId*(node: Eth2Node, epoch: Epoch, genesis_validators_root: Eth2Digest) =
|
|
|
|
node.updateForkId(getENRForkID(node.cfg, epoch, genesis_validators_root))
|
|
|
|
node.discoveryForkId = getDiscoveryForkID(node.cfg, epoch, genesis_validators_root)
|
2021-09-29 11:06:16 +00:00
|
|
|
|
2024-01-13 09:54:24 +00:00
|
|
|
func forkDigestAtEpoch*(node: Eth2Node, epoch: Epoch): ForkDigest =
|
2022-05-31 10:45:37 +00:00
|
|
|
node.forkDigests[].atEpoch(epoch, node.cfg)
|
2021-08-19 10:45:31 +00:00
|
|
|
|
|
|
|
proc getWallEpoch(node: Eth2Node): Epoch =
|
|
|
|
node.getBeaconTime().slotOrZero.epoch
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc broadcastAttestation*(
|
2024-04-17 20:44:29 +00:00
|
|
|
node: Eth2Node, subnet_id: SubnetId, attestation: phase0.Attestation):
|
2024-01-19 21:05:52 +00:00
|
|
|
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
2021-08-19 10:45:31 +00:00
|
|
|
# Regardless of the contents of the attestation,
|
2023-04-21 18:52:43 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/p2p-interface.md#transitioning-the-gossip
|
2021-08-19 10:45:31 +00:00
|
|
|
# implies that pre-fork, messages using post-fork digests might be
|
|
|
|
# ignored, whilst post-fork, there is effectively a seen_ttl-based
|
|
|
|
# timer unsubscription point that means no new pre-fork-forkdigest
|
|
|
|
# should be sent.
|
2022-06-15 08:14:47 +00:00
|
|
|
let
|
|
|
|
forkPrefix = node.forkDigestAtEpoch(node.getWallEpoch)
|
|
|
|
topic = getAttestationTopic(forkPrefix, subnet_id)
|
2021-08-23 10:41:48 +00:00
|
|
|
node.broadcast(topic, attestation)
|
2021-08-19 10:45:31 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc broadcastVoluntaryExit*(
|
2024-01-19 21:05:52 +00:00
|
|
|
node: Eth2Node, exit: SignedVoluntaryExit):
|
|
|
|
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
2022-06-15 08:14:47 +00:00
|
|
|
let topic = getVoluntaryExitsTopic(node.forkDigestAtEpoch(node.getWallEpoch))
|
2021-10-19 15:20:55 +00:00
|
|
|
node.broadcast(topic, exit)
|
2021-08-19 10:45:31 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc broadcastAttesterSlashing*(
|
2024-04-21 05:49:11 +00:00
|
|
|
node: Eth2Node, slashing: phase0.AttesterSlashing):
|
2024-01-19 21:05:52 +00:00
|
|
|
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
2021-10-19 15:20:55 +00:00
|
|
|
let topic = getAttesterSlashingsTopic(
|
2021-08-19 10:45:31 +00:00
|
|
|
node.forkDigestAtEpoch(node.getWallEpoch))
|
2021-10-19 15:20:55 +00:00
|
|
|
node.broadcast(topic, slashing)
|
2021-08-19 10:45:31 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc broadcastProposerSlashing*(
|
2024-01-19 21:05:52 +00:00
|
|
|
node: Eth2Node, slashing: ProposerSlashing):
|
|
|
|
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
2021-10-19 15:20:55 +00:00
|
|
|
let topic = getProposerSlashingsTopic(
|
2021-08-19 10:45:31 +00:00
|
|
|
node.forkDigestAtEpoch(node.getWallEpoch))
|
2021-10-19 15:20:55 +00:00
|
|
|
node.broadcast(topic, slashing)
|
2021-08-23 10:41:48 +00:00
|
|
|
|
2023-01-19 22:00:40 +00:00
|
|
|
proc broadcastBlsToExecutionChange*(
|
|
|
|
node: Eth2Node, bls_to_execution_change: SignedBLSToExecutionChange):
|
2024-01-19 21:05:52 +00:00
|
|
|
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
2023-01-19 22:00:40 +00:00
|
|
|
let topic = getBlsToExecutionChangeTopic(
|
|
|
|
node.forkDigestAtEpoch(node.getWallEpoch))
|
|
|
|
node.broadcast(topic, bls_to_execution_change)
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc broadcastAggregateAndProof*(
|
2024-01-19 21:05:52 +00:00
|
|
|
node: Eth2Node, proof: SignedAggregateAndProof):
|
|
|
|
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
2021-10-19 15:20:55 +00:00
|
|
|
let topic = getAggregateAndProofsTopic(
|
2021-08-23 10:41:48 +00:00
|
|
|
node.forkDigestAtEpoch(node.getWallEpoch))
|
2021-10-19 15:20:55 +00:00
|
|
|
node.broadcast(topic, proof)
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc broadcastBeaconBlock*(
|
2024-01-19 21:05:52 +00:00
|
|
|
node: Eth2Node, blck: phase0.SignedBeaconBlock):
|
|
|
|
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
2021-10-19 15:20:55 +00:00
|
|
|
let topic = getBeaconBlocksTopic(node.forkDigests.phase0)
|
|
|
|
node.broadcast(topic, blck)
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc broadcastBeaconBlock*(
|
2024-01-19 21:05:52 +00:00
|
|
|
node: Eth2Node, blck: altair.SignedBeaconBlock):
|
|
|
|
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
2021-10-19 15:20:55 +00:00
|
|
|
let topic = getBeaconBlocksTopic(node.forkDigests.altair)
|
|
|
|
node.broadcast(topic, blck)
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc broadcastBeaconBlock*(
|
2024-01-19 21:05:52 +00:00
|
|
|
node: Eth2Node, blck: bellatrix.SignedBeaconBlock):
|
|
|
|
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
2022-01-05 14:24:15 +00:00
|
|
|
let topic = getBeaconBlocksTopic(node.forkDigests.bellatrix)
|
2021-10-19 15:20:55 +00:00
|
|
|
node.broadcast(topic, blck)
|
2021-08-23 10:41:48 +00:00
|
|
|
|
2022-11-02 16:23:30 +00:00
|
|
|
proc broadcastBeaconBlock*(
|
2024-01-19 21:05:52 +00:00
|
|
|
node: Eth2Node, blck: capella.SignedBeaconBlock):
|
|
|
|
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
2022-11-02 16:23:30 +00:00
|
|
|
let topic = getBeaconBlocksTopic(node.forkDigests.capella)
|
|
|
|
node.broadcast(topic, blck)
|
|
|
|
|
2023-02-28 11:36:17 +00:00
|
|
|
proc broadcastBeaconBlock*(
|
2024-01-19 21:05:52 +00:00
|
|
|
node: Eth2Node, blck: deneb.SignedBeaconBlock):
|
|
|
|
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
2023-03-10 17:13:40 +00:00
|
|
|
let topic = getBeaconBlocksTopic(node.forkDigests.deneb)
|
2023-01-21 06:34:04 +00:00
|
|
|
node.broadcast(topic, blck)
|
2022-12-14 17:30:56 +00:00
|
|
|
|
2024-02-26 06:49:12 +00:00
|
|
|
proc broadcastBeaconBlock*(
|
|
|
|
node: Eth2Node, blck: electra.SignedBeaconBlock):
|
|
|
|
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
|
|
|
let topic = getBeaconBlocksTopic(node.forkDigests.electra)
|
|
|
|
node.broadcast(topic, blck)
|
|
|
|
|
2023-06-28 17:55:31 +00:00
|
|
|
proc broadcastBlobSidecar*(
|
2023-11-06 06:48:43 +00:00
|
|
|
node: Eth2Node, subnet_id: BlobId, blob: deneb.BlobSidecar):
|
2024-01-19 21:05:52 +00:00
|
|
|
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
2023-06-28 17:55:31 +00:00
|
|
|
let
|
|
|
|
forkPrefix = node.forkDigestAtEpoch(node.getWallEpoch)
|
|
|
|
topic = getBlobSidecarTopic(forkPrefix, subnet_id)
|
|
|
|
node.broadcast(topic, blob)
|
|
|
|
|
2021-08-28 09:00:00 +00:00
|
|
|
proc broadcastSyncCommitteeMessage*(
|
2022-01-08 23:28:49 +00:00
|
|
|
node: Eth2Node, msg: SyncCommitteeMessage,
|
2024-01-19 21:05:52 +00:00
|
|
|
subcommitteeIdx: SyncSubcommitteeIndex):
|
|
|
|
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
2022-04-28 10:51:49 +00:00
|
|
|
let topic = getSyncCommitteeTopic(
|
|
|
|
node.forkDigestAtEpoch(node.getWallEpoch), subcommitteeIdx)
|
2021-08-28 09:00:00 +00:00
|
|
|
node.broadcast(topic, msg)
|
|
|
|
|
|
|
|
proc broadcastSignedContributionAndProof*(
|
2024-01-19 21:05:52 +00:00
|
|
|
node: Eth2Node, msg: SignedContributionAndProof):
|
|
|
|
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
2022-04-28 10:51:49 +00:00
|
|
|
let topic = getSyncCommitteeContributionAndProofTopic(
|
|
|
|
node.forkDigestAtEpoch(node.getWallEpoch))
|
2021-08-28 09:00:00 +00:00
|
|
|
node.broadcast(topic, msg)
|
2022-03-17 20:11:29 +00:00
|
|
|
|
2022-05-23 12:02:54 +00:00
|
|
|
proc broadcastLightClientFinalityUpdate*(
|
2023-01-12 17:11:38 +00:00
|
|
|
node: Eth2Node, msg: ForkyLightClientFinalityUpdate):
|
2024-01-19 21:05:52 +00:00
|
|
|
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
2022-05-23 12:02:54 +00:00
|
|
|
let topic = getLightClientFinalityUpdateTopic(
|
2023-01-12 17:11:38 +00:00
|
|
|
node.forkDigestAtEpoch(msg.contextEpoch))
|
2022-05-23 12:02:54 +00:00
|
|
|
node.broadcast(topic, msg)
|
|
|
|
|
|
|
|
proc broadcastLightClientOptimisticUpdate*(
|
2023-01-12 17:11:38 +00:00
|
|
|
node: Eth2Node, msg: ForkyLightClientOptimisticUpdate):
|
2024-01-19 21:05:52 +00:00
|
|
|
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
2022-05-23 12:02:54 +00:00
|
|
|
let topic = getLightClientOptimisticUpdateTopic(
|
2023-01-12 17:11:38 +00:00
|
|
|
node.forkDigestAtEpoch(msg.contextEpoch))
|
2022-03-17 20:11:29 +00:00
|
|
|
node.broadcast(topic, msg)
|