2021-03-26 06:52:01 +00:00
|
|
|
# beacon_chain
|
2022-01-04 06:08:19 +00:00
|
|
|
# Copyright (c) 2018-2022 Status Research & Development GmbH
|
2021-03-26 06:52:01 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2022-07-29 10:53:42 +00:00
|
|
|
when (NimMajor, NimMinor) < (1, 4):
|
|
|
|
{.push raises: [Defect].}
|
|
|
|
else:
|
|
|
|
{.push raises: [].}
|
2021-03-26 06:52:01 +00:00
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
import
|
2020-03-22 21:55:01 +00:00
|
|
|
# Std lib
|
2021-10-21 13:09:19 +00:00
|
|
|
std/[typetraits, os, sequtils, strutils, algorithm, math, tables],
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
# Status libs
|
2022-06-21 08:29:16 +00:00
|
|
|
stew/[leb128, endians2, results, byteutils, io2, bitops2],
|
2020-06-05 15:08:50 +00:00
|
|
|
stew/shims/net as stewNet,
|
2021-08-27 16:54:51 +00:00
|
|
|
stew/shims/[macros],
|
Snappy revamp (#3564)
This PR makes the necessary adjustments to deal with the revamped snappy
API.
In practical terms for nimbus-eth2, there are performance increases to
gossip processing, database reading and writing as well as era file
processing. Exporting `.era` files for example, a snappy-heavy
operation, almost halves in total processing time:
Pre:
```
Average, StdDev, Min, Max, Samples, Test
39.088, 8.735, 23.619, 53.301, 50, tState
237.079, 46.692, 165.620, 355.481, 49, tBlocks
```
Post:
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
25.350, 5.303, 15.351, 41.856, 50, tState
141.238, 24.164, 99.990, 199.329, 49, tBlocks
```
2022-04-15 07:44:06 +00:00
|
|
|
faststreams/[inputs, outputs, buffers], snappy, snappy/faststreams,
|
2021-08-27 16:54:51 +00:00
|
|
|
json_serialization, json_serialization/std/[net, sets, options],
|
2020-03-22 21:55:01 +00:00
|
|
|
chronos, chronicles, metrics,
|
2021-08-27 16:54:51 +00:00
|
|
|
libp2p/[switch, peerinfo, multiaddress, multicodec, crypto/crypto,
|
|
|
|
crypto/secp, builders],
|
|
|
|
libp2p/protocols/pubsub/[
|
|
|
|
pubsub, gossipsub, rpc/message, rpc/messages, peertable, pubsubpeer],
|
2020-09-06 08:39:25 +00:00
|
|
|
libp2p/stream/connection,
|
2020-05-29 10:03:29 +00:00
|
|
|
eth/[keys, async_utils], eth/p2p/p2p_protocol_dsl,
|
2021-05-10 07:13:36 +00:00
|
|
|
eth/net/nat, eth/p2p/discoveryv5/[enr, node, random2],
|
2022-05-31 10:45:37 +00:00
|
|
|
".."/[version, conf, beacon_clock, conf_light_client],
|
2022-01-12 14:50:30 +00:00
|
|
|
../spec/datatypes/[phase0, altair, bellatrix],
|
2021-08-18 18:57:58 +00:00
|
|
|
../spec/[eth2_ssz_serialization, network, helpers, forks],
|
2021-03-05 13:12:00 +00:00
|
|
|
../validators/keystore_management,
|
2022-01-26 12:20:08 +00:00
|
|
|
"."/[eth2_discovery, libp2p_json_serialization, peer_pool, peer_scores]
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
export
|
2022-05-31 10:45:37 +00:00
|
|
|
tables, chronos, version, multiaddress, peerinfo, p2pProtocol, connection,
|
2022-01-26 12:20:08 +00:00
|
|
|
libp2p_json_serialization, eth2_ssz_serialization, results, eth2_discovery,
|
|
|
|
peer_pool, peer_scores
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
logScope:
|
|
|
|
topics = "networking"
|
2020-02-05 20:40:14 +00:00
|
|
|
|
|
|
|
type
|
2021-03-19 02:22:45 +00:00
|
|
|
NetKeyPair* = crypto.KeyPair
|
2020-02-19 08:58:10 +00:00
|
|
|
PublicKey* = crypto.PublicKey
|
|
|
|
PrivateKey* = crypto.PrivateKey
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
Bytes = seq[byte]
|
2020-05-21 13:21:29 +00:00
|
|
|
ErrorMsg = List[byte, 256]
|
2022-06-15 08:14:47 +00:00
|
|
|
SendResult* = Result[void, cstring]
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-03-24 11:13:07 +00:00
|
|
|
# TODO: This is here only to eradicate a compiler
|
|
|
|
# warning about unused import (rpc/messages).
|
|
|
|
GossipMsg = messages.Message
|
|
|
|
|
2020-06-03 08:46:29 +00:00
|
|
|
SeenItem* = object
|
2022-04-08 16:22:49 +00:00
|
|
|
peerId*: PeerId
|
2020-06-03 08:46:29 +00:00
|
|
|
stamp*: chronos.Moment
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
Eth2Node* = ref object of RootObj
|
|
|
|
switch*: Switch
|
2020-11-29 19:07:20 +00:00
|
|
|
pubsub*: GossipSub
|
2020-03-22 21:55:01 +00:00
|
|
|
discovery*: Eth2DiscoveryProtocol
|
2020-08-24 11:52:06 +00:00
|
|
|
discoveryEnabled*: bool
|
2020-03-22 21:55:01 +00:00
|
|
|
wantedPeers*: int
|
2022-03-11 10:51:53 +00:00
|
|
|
hardMaxPeers*: int
|
2022-04-08 16:22:49 +00:00
|
|
|
peerPool*: PeerPool[Peer, PeerId]
|
2020-03-22 21:55:01 +00:00
|
|
|
protocolStates*: seq[RootRef]
|
2021-07-07 09:09:47 +00:00
|
|
|
metadata*: altair.MetaData
|
2020-06-03 08:46:29 +00:00
|
|
|
connectTimeout*: chronos.Duration
|
|
|
|
seenThreshold*: chronos.Duration
|
2020-08-02 19:27:36 +00:00
|
|
|
connQueue: AsyncQueue[PeerAddr]
|
2022-04-08 16:22:49 +00:00
|
|
|
seenTable: Table[PeerId, SeenItem]
|
2020-06-03 08:46:29 +00:00
|
|
|
connWorkers: seq[Future[void]]
|
2022-04-08 16:22:49 +00:00
|
|
|
connTable: HashSet[PeerId]
|
2021-07-26 09:51:14 +00:00
|
|
|
forkId*: ENRForkID
|
2021-09-29 11:06:16 +00:00
|
|
|
discoveryForkId*: ENRForkID
|
2021-11-05 07:34:34 +00:00
|
|
|
forkDigests*: ref ForkDigests
|
2022-06-21 08:29:16 +00:00
|
|
|
rng*: ref HmacDrbgContext
|
2022-04-08 16:22:49 +00:00
|
|
|
peers*: Table[PeerId, Peer]
|
2021-01-15 04:17:06 +00:00
|
|
|
validTopics: HashSet[string]
|
2021-08-23 10:29:50 +00:00
|
|
|
peerPingerHeartbeatFut: Future[void]
|
2022-03-11 10:51:53 +00:00
|
|
|
peerTrimmerHeartbeatFut: Future[void]
|
2021-08-19 10:45:31 +00:00
|
|
|
cfg: RuntimeConfig
|
|
|
|
getBeaconTime: GetBeaconTimeFn
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
EthereumNode = Eth2Node # needed for the definitions in p2p_backends_helpers
|
|
|
|
|
2020-07-25 06:12:23 +00:00
|
|
|
AverageThroughput* = object
|
|
|
|
count*: uint64
|
|
|
|
average*: float
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
Peer* = ref object
|
|
|
|
network*: Eth2Node
|
2021-10-21 11:01:29 +00:00
|
|
|
peerId*: PeerId
|
2020-03-22 21:55:01 +00:00
|
|
|
discoveryId*: Eth2DiscoveryId
|
|
|
|
connectionState*: ConnectionState
|
|
|
|
protocolStates*: seq[RootRef]
|
2020-07-25 06:12:23 +00:00
|
|
|
netThroughput: AverageThroughput
|
2020-03-22 21:55:01 +00:00
|
|
|
score*: int
|
2020-10-09 13:44:51 +00:00
|
|
|
requestQuota*: float
|
|
|
|
lastReqTime*: Moment
|
2020-08-10 10:58:34 +00:00
|
|
|
connections*: int
|
2020-11-26 19:23:45 +00:00
|
|
|
enr*: Option[enr.Record]
|
2022-02-01 17:20:55 +00:00
|
|
|
metadata*: Option[altair.MetaData]
|
|
|
|
failedMetadataRequests: int
|
2021-08-23 10:29:50 +00:00
|
|
|
lastMetadataTime*: Moment
|
2020-11-26 19:23:45 +00:00
|
|
|
direction*: PeerType
|
2020-08-10 10:58:34 +00:00
|
|
|
disconnectedFut: Future[void]
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-08-02 19:27:36 +00:00
|
|
|
PeerAddr* = object
|
2022-04-08 16:22:49 +00:00
|
|
|
peerId*: PeerId
|
2020-08-02 19:27:36 +00:00
|
|
|
addrs*: seq[MultiAddress]
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
ConnectionState* = enum
|
|
|
|
None,
|
|
|
|
Connecting,
|
|
|
|
Connected,
|
|
|
|
Disconnecting,
|
|
|
|
Disconnected
|
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
UntypedResponse* = ref object
|
2020-03-22 21:55:01 +00:00
|
|
|
peer*: Peer
|
2020-03-22 23:23:21 +00:00
|
|
|
stream*: Connection
|
2020-05-23 22:24:47 +00:00
|
|
|
writtenChunks*: int
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-05-23 22:24:47 +00:00
|
|
|
SingleChunkResponse*[MsgType] = distinct UntypedResponse
|
|
|
|
## Protocol requests using this type will produce request-making
|
|
|
|
## client-side procs that return `NetRes[MsgType]`
|
|
|
|
|
|
|
|
MultipleChunksResponse*[MsgType] = distinct UntypedResponse
|
|
|
|
## Protocol requests using this type will produce request-making
|
|
|
|
## client-side procs that return `NetRes[seq[MsgType]]`.
|
|
|
|
## In the future, such procs will return an `InputStream[NetRes[MsgType]]`.
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
MessageInfo* = object
|
|
|
|
name*: string
|
|
|
|
|
|
|
|
# Private fields:
|
|
|
|
libp2pCodecName: string
|
|
|
|
protocolMounter*: MounterProc
|
2022-05-31 10:45:37 +00:00
|
|
|
isRequired, isLightClientRequest: bool
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
ProtocolInfoObj* = object
|
|
|
|
name*: string
|
|
|
|
messages*: seq[MessageInfo]
|
|
|
|
index*: int # the position of the protocol in the
|
|
|
|
# ordered list of supported protocols
|
|
|
|
|
|
|
|
# Private fields:
|
|
|
|
peerStateInitializer*: PeerStateInitializer
|
|
|
|
networkStateInitializer*: NetworkStateInitializer
|
2020-06-09 11:49:58 +00:00
|
|
|
onPeerConnected*: OnPeerConnectedHandler
|
|
|
|
onPeerDisconnected*: OnPeerDisconnectedHandler
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
ProtocolInfo* = ptr ProtocolInfoObj
|
|
|
|
|
|
|
|
ResponseCode* = enum
|
|
|
|
Success
|
|
|
|
InvalidRequest
|
|
|
|
ServerError
|
2022-03-09 14:03:58 +00:00
|
|
|
ResourceUnavailable
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2021-03-24 16:20:55 +00:00
|
|
|
PeerStateInitializer* = proc(peer: Peer): RootRef {.gcsafe, raises: [Defect].}
|
|
|
|
NetworkStateInitializer* = proc(network: EthereumNode): RootRef {.gcsafe, raises: [Defect].}
|
2022-03-22 08:42:28 +00:00
|
|
|
OnPeerConnectedHandler* = proc(peer: Peer, incoming: bool): Future[void] {.gcsafe, raises: [Defect].}
|
2021-03-24 16:20:55 +00:00
|
|
|
OnPeerDisconnectedHandler* = proc(peer: Peer): Future[void] {.gcsafe, raises: [Defect].}
|
2020-03-22 21:55:01 +00:00
|
|
|
ThunkProc* = LPProtoHandler
|
2021-03-26 06:52:01 +00:00
|
|
|
MounterProc* = proc(network: Eth2Node) {.gcsafe, raises: [Defect, CatchableError].}
|
2021-03-24 16:20:55 +00:00
|
|
|
MessageContentPrinter* = proc(msg: pointer): string {.gcsafe, raises: [Defect].}
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2022-08-17 11:33:19 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.2/specs/phase0/p2p-interface.md#goodbye
|
2020-03-22 21:55:01 +00:00
|
|
|
DisconnectionReason* = enum
|
2020-06-20 07:24:33 +00:00
|
|
|
# might see other values on the wire!
|
|
|
|
ClientShutDown = 1
|
|
|
|
IrrelevantNetwork = 2
|
|
|
|
FaultOrError = 3
|
2020-09-25 13:43:45 +00:00
|
|
|
# Clients MAY use reason codes above 128 to indicate alternative,
|
|
|
|
# erroneous request-specific responses.
|
|
|
|
PeerScoreLow = 237 # 79 * 3
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
PeerDisconnected* = object of CatchableError
|
|
|
|
reason*: DisconnectionReason
|
|
|
|
|
|
|
|
TransmissionError* = object of CatchableError
|
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
Eth2NetworkingErrorKind* = enum
|
|
|
|
BrokenConnection
|
|
|
|
ReceivedErrorResponse
|
|
|
|
UnexpectedEOF
|
|
|
|
PotentiallyExpectedEOF
|
|
|
|
InvalidResponseCode
|
|
|
|
InvalidSnappyBytes
|
|
|
|
InvalidSszBytes
|
|
|
|
StreamOpenTimeout
|
|
|
|
ReadResponseTimeout
|
|
|
|
ZeroSizePrefix
|
|
|
|
SizePrefixOverflow
|
2021-07-07 09:09:47 +00:00
|
|
|
InvalidContextBytes
|
2020-05-12 22:37:07 +00:00
|
|
|
|
|
|
|
Eth2NetworkingError = object
|
|
|
|
case kind*: Eth2NetworkingErrorKind
|
|
|
|
of ReceivedErrorResponse:
|
|
|
|
responseCode: ResponseCode
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
errorMsg: string
|
2020-05-12 22:37:07 +00:00
|
|
|
else:
|
|
|
|
discard
|
|
|
|
|
2020-10-09 13:44:51 +00:00
|
|
|
InvalidInputsError* = object of CatchableError
|
|
|
|
|
2022-03-09 14:03:58 +00:00
|
|
|
ResourceUnavailableError* = object of CatchableError
|
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
NetRes*[T] = Result[T, Eth2NetworkingError]
|
|
|
|
## This is type returned from all network requests
|
|
|
|
|
2021-07-26 09:51:14 +00:00
|
|
|
func phase0metadata*(node: Eth2Node): phase0.MetaData =
|
|
|
|
phase0.MetaData(
|
2021-07-07 09:09:47 +00:00
|
|
|
seq_number: node.metadata.seq_number,
|
|
|
|
attnets: node.metadata.attnets)
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
func toAltairMetadata(phase0: phase0.MetaData): altair.MetaData =
|
2022-02-01 17:20:55 +00:00
|
|
|
altair.MetaData(
|
|
|
|
seq_number: phase0.seq_number,
|
|
|
|
attnets: phase0.attnets)
|
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
const
|
2020-11-09 14:23:15 +00:00
|
|
|
clientId* = "Nimbus beacon node " & fullVersionStr
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2021-10-11 09:16:58 +00:00
|
|
|
ConcurrentConnections = 20
|
2020-06-03 08:46:29 +00:00
|
|
|
## Maximum number of active concurrent connection requests.
|
|
|
|
|
2021-02-15 16:40:00 +00:00
|
|
|
SeenTableTimeTimeout =
|
2020-08-12 14:16:59 +00:00
|
|
|
when not defined(local_testnet): 5.minutes else: 10.seconds
|
|
|
|
|
2020-06-08 16:02:50 +00:00
|
|
|
## Seen period of time for timeout connections
|
2021-02-15 16:40:00 +00:00
|
|
|
SeenTableTimeDeadPeer =
|
2020-08-12 14:16:59 +00:00
|
|
|
when not defined(local_testnet): 5.minutes else: 10.seconds
|
|
|
|
|
2020-06-08 16:02:50 +00:00
|
|
|
## Period of time for dead peers.
|
2021-02-15 16:40:00 +00:00
|
|
|
SeenTableTimeIrrelevantNetwork = 24.hours
|
2020-06-08 16:02:50 +00:00
|
|
|
## Period of time for `IrrelevantNetwork` error reason.
|
2021-02-15 16:40:00 +00:00
|
|
|
SeenTableTimeClientShutDown = 10.minutes
|
2020-06-08 16:02:50 +00:00
|
|
|
## Period of time for `ClientShutDown` error reason.
|
2021-02-15 16:40:00 +00:00
|
|
|
SeenTableTimeFaultOrError = 10.minutes
|
2020-06-08 16:02:50 +00:00
|
|
|
## Period of time for `FaultOnError` error reason.
|
2021-02-15 16:40:00 +00:00
|
|
|
SeenTablePenaltyError = 60.minutes
|
2020-09-25 13:43:45 +00:00
|
|
|
## Period of time for peers which score below or equal to zero.
|
2021-02-15 16:40:00 +00:00
|
|
|
SeenTableTimeReconnect = 1.minutes
|
2020-11-26 19:23:45 +00:00
|
|
|
## Minimal time between disconnection and reconnection attempt
|
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
template neterr*(kindParam: Eth2NetworkingErrorKind): auto =
|
2020-05-12 22:37:07 +00:00
|
|
|
err(type(result), Eth2NetworkingError(kind: kindParam))
|
|
|
|
|
2019-10-25 17:15:12 +00:00
|
|
|
# Metrics for tracking attestation and beacon block loss
|
2020-06-17 11:04:24 +00:00
|
|
|
declareCounter nbc_gossip_messages_sent,
|
2019-10-25 17:15:12 +00:00
|
|
|
"Number of gossip messages sent by this peer"
|
|
|
|
|
2020-06-17 11:04:24 +00:00
|
|
|
declareCounter nbc_gossip_messages_received,
|
2019-10-25 17:15:12 +00:00
|
|
|
"Number of gossip messages received by this peer"
|
|
|
|
|
2021-08-18 12:30:05 +00:00
|
|
|
declareCounter nbc_gossip_failed_snappy,
|
|
|
|
"Number of gossip messages that failed snappy decompression"
|
|
|
|
|
|
|
|
declareCounter nbc_gossip_failed_ssz,
|
|
|
|
"Number of gossip messages that failed SSZ parsing"
|
|
|
|
|
2020-11-28 18:53:51 +00:00
|
|
|
declareCounter nbc_successful_dials,
|
2020-03-22 23:23:21 +00:00
|
|
|
"Number of successfully dialed peers"
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-11-28 18:53:51 +00:00
|
|
|
declareCounter nbc_failed_dials,
|
2020-06-03 08:46:29 +00:00
|
|
|
"Number of dialing attempts that failed"
|
|
|
|
|
2020-11-28 18:53:51 +00:00
|
|
|
declareCounter nbc_timeout_dials,
|
2020-06-03 08:46:29 +00:00
|
|
|
"Number of dialing attempts that exceeded timeout"
|
|
|
|
|
2020-11-28 18:53:51 +00:00
|
|
|
declareGauge nbc_peers,
|
2020-03-22 23:23:21 +00:00
|
|
|
"Number of active libp2p peers"
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-11-28 18:53:51 +00:00
|
|
|
declareCounter nbc_successful_discoveries,
|
2021-09-01 16:08:24 +00:00
|
|
|
"Number of successful discoveries"
|
2020-11-26 19:23:45 +00:00
|
|
|
|
2020-11-28 18:53:51 +00:00
|
|
|
declareCounter nbc_failed_discoveries,
|
2020-11-26 19:23:45 +00:00
|
|
|
"Number of failed discoveries"
|
|
|
|
|
2021-09-28 07:58:03 +00:00
|
|
|
declareCounter nbc_cycling_kicked_peers,
|
|
|
|
"Number of peers kicked for peer cycling"
|
|
|
|
|
2022-02-01 17:20:55 +00:00
|
|
|
declareGauge nbc_gossipsub_low_fanout,
|
|
|
|
"numbers of topics with low fanout"
|
|
|
|
|
|
|
|
declareGauge nbc_gossipsub_good_fanout,
|
|
|
|
"numbers of topics with good fanout"
|
|
|
|
|
|
|
|
declareGauge nbc_gossipsub_healthy_fanout,
|
|
|
|
"numbers of topics with dHigh fanout"
|
|
|
|
|
2020-11-26 19:23:45 +00:00
|
|
|
declareHistogram nbc_resolve_time,
|
|
|
|
"Time(s) used while resolving peer information",
|
2022-08-19 10:30:07 +00:00
|
|
|
buckets = [1.0, 5.0, 10.0, 20.0, 40.0, 60.0]
|
2020-11-26 19:23:45 +00:00
|
|
|
|
2020-07-21 16:07:14 +00:00
|
|
|
const
|
|
|
|
libp2p_pki_schemes {.strdefine.} = ""
|
|
|
|
|
|
|
|
when libp2p_pki_schemes != "secp256k1":
|
|
|
|
{.fatal: "Incorrect building process, please use -d:\"libp2p_pki_schemes=secp256k1\"".}
|
|
|
|
|
2020-08-25 10:16:31 +00:00
|
|
|
const
|
|
|
|
NetworkInsecureKeyPassword = "INSECUREPASSWORD"
|
|
|
|
|
2022-03-17 14:09:18 +00:00
|
|
|
template libp2pProtocol*(name: string, version: int,
|
2022-05-31 10:45:37 +00:00
|
|
|
isRequired = false,
|
2022-03-17 14:09:18 +00:00
|
|
|
isLightClientRequest = false) {.pragma.}
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2021-10-21 11:01:29 +00:00
|
|
|
func shortLog*(peer: Peer): string = shortLog(peer.peerId)
|
2020-09-06 08:39:25 +00:00
|
|
|
chronicles.formatIt(Peer): shortLog(it)
|
2020-09-30 11:47:42 +00:00
|
|
|
chronicles.formatIt(PublicKey): byteutils.toHex(it.getBytes().tryGet())
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
proc openStream(node: Eth2Node,
|
|
|
|
peer: Peer,
|
|
|
|
protocolId: string): Future[Connection] {.async.} =
|
2020-11-29 13:43:41 +00:00
|
|
|
# When dialling here, we do not provide addresses - all new connection
|
|
|
|
# attempts are handled via `connect` which also takes into account
|
|
|
|
# reconnection timeouts
|
2020-08-10 13:18:17 +00:00
|
|
|
let
|
|
|
|
protocolId = protocolId & "ssz_snappy"
|
|
|
|
conn = await dial(
|
2021-10-21 11:01:29 +00:00
|
|
|
node.switch, peer.peerId, protocolId)
|
2020-08-10 13:18:17 +00:00
|
|
|
|
|
|
|
return conn
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc init(T: type Peer, network: Eth2Node, peerId: PeerId): Peer {.gcsafe.}
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2022-04-08 16:22:49 +00:00
|
|
|
func peerId*(node: Eth2Node): PeerId =
|
2020-10-27 09:00:57 +00:00
|
|
|
node.switch.peerInfo.peerId
|
|
|
|
|
|
|
|
func enrRecord*(node: Eth2Node): Record =
|
|
|
|
node.discovery.localNode.record
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc getPeer(node: Eth2Node, peerId: PeerId): Peer =
|
2020-08-10 10:58:34 +00:00
|
|
|
node.peers.withValue(peerId, peer) do:
|
|
|
|
return peer[]
|
|
|
|
do:
|
2021-10-21 11:01:29 +00:00
|
|
|
let peer = Peer.init(node, peerId)
|
2021-08-10 20:46:35 +00:00
|
|
|
return node.peers.mgetOrPut(peerId, peer)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-08-10 13:18:17 +00:00
|
|
|
proc peerFromStream(network: Eth2Node, conn: Connection): Peer =
|
2021-10-21 11:01:29 +00:00
|
|
|
result = network.getPeer(conn.peerId)
|
|
|
|
result.peerId = conn.peerId
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
func getKey*(peer: Peer): PeerId {.inline.} =
|
2021-10-21 11:01:29 +00:00
|
|
|
peer.peerId
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc getFuture(peer: Peer): Future[void] {.inline.} =
|
2020-11-26 19:23:45 +00:00
|
|
|
if isNil(peer.disconnectedFut):
|
|
|
|
peer.disconnectedFut = newFuture[void]("Peer.disconnectedFut")
|
2020-09-25 13:43:45 +00:00
|
|
|
peer.disconnectedFut
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
func getScore*(a: Peer): int =
|
2020-07-25 06:12:23 +00:00
|
|
|
## Returns current score value for peer ``peer``.
|
2020-09-25 13:43:45 +00:00
|
|
|
a.score
|
2020-04-20 14:59:18 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
func updateScore*(peer: Peer, score: int) {.inline.} =
|
2020-04-23 15:31:00 +00:00
|
|
|
## Update peer's ``peer`` score with value ``score``.
|
|
|
|
peer.score = peer.score + score
|
2020-05-28 05:02:28 +00:00
|
|
|
if peer.score > PeerScoreHighLimit:
|
|
|
|
peer.score = PeerScoreHighLimit
|
2020-04-23 15:31:00 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
func calcThroughput(dur: Duration, value: uint64): float =
|
2020-07-25 06:12:23 +00:00
|
|
|
let secs = float(chronos.seconds(1).nanoseconds)
|
|
|
|
if isZero(dur):
|
|
|
|
0.0
|
|
|
|
else:
|
|
|
|
float(value) * (secs / float(dur.nanoseconds))
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
func updateNetThroughput(peer: Peer, dur: Duration,
|
|
|
|
bytesCount: uint64) {.inline.} =
|
2020-07-25 06:12:23 +00:00
|
|
|
## Update peer's ``peer`` network throughput.
|
|
|
|
let bytesPerSecond = calcThroughput(dur, bytesCount)
|
|
|
|
let a = peer.netThroughput.average
|
|
|
|
let n = peer.netThroughput.count
|
|
|
|
peer.netThroughput.average = a + (bytesPerSecond - a) / float(n + 1)
|
|
|
|
inc(peer.netThroughput.count)
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
func netKbps*(peer: Peer): float {.inline.} =
|
2020-07-25 06:12:23 +00:00
|
|
|
## Returns current network throughput average value in Kbps for peer ``peer``.
|
|
|
|
round(((peer.netThroughput.average / 1024) * 10_000) / 10_000)
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
func `<`(a, b: Peer): bool =
|
2020-07-25 06:12:23 +00:00
|
|
|
## Comparison function, which first checks peer's scores, and if the peers'
|
|
|
|
## score is equal it compares peers' network throughput.
|
|
|
|
if a.score < b.score:
|
|
|
|
true
|
|
|
|
elif a.score == b.score:
|
|
|
|
(a.netThroughput.average < b.netThroughput.average)
|
|
|
|
else:
|
|
|
|
false
|
|
|
|
|
2020-10-09 13:44:51 +00:00
|
|
|
const
|
|
|
|
maxRequestQuota = 1000000.0
|
|
|
|
fullReplenishTime = 5.seconds
|
|
|
|
replenishRate = (maxRequestQuota / fullReplenishTime.nanoseconds.float)
|
|
|
|
|
|
|
|
proc updateRequestQuota*(peer: Peer, reqCost: float) =
|
|
|
|
let
|
|
|
|
currentTime = now(chronos.Moment)
|
|
|
|
nanosSinceLastReq = nanoseconds(currentTime - peer.lastReqTime)
|
|
|
|
replenishedQuota = peer.requestQuota + nanosSinceLastReq.float * replenishRate
|
|
|
|
|
|
|
|
peer.lastReqTime = currentTime
|
|
|
|
peer.requestQuota = min(replenishedQuota, maxRequestQuota) - reqCost
|
|
|
|
|
2020-10-13 12:37:25 +00:00
|
|
|
template awaitNonNegativeRequestQuota*(peer: Peer) =
|
|
|
|
let quota = peer.requestQuota
|
|
|
|
if quota < 0:
|
|
|
|
await sleepAsync(nanoseconds(int((-quota) / replenishRate)))
|
2020-10-09 13:44:51 +00:00
|
|
|
|
|
|
|
func allowedOpsPerSecondCost*(n: int): float =
|
|
|
|
(replenishRate * 1000000000'f / n.float)
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc isSeen(network: Eth2Node, peerId: PeerId): bool =
|
2020-09-21 16:02:27 +00:00
|
|
|
## Returns ``true`` if ``peerId`` present in SeenTable and time period is not
|
|
|
|
## yet expired.
|
2020-06-08 16:02:50 +00:00
|
|
|
let currentTime = now(chronos.Moment)
|
2020-08-02 19:27:36 +00:00
|
|
|
if peerId notin network.seenTable:
|
2020-09-25 13:43:45 +00:00
|
|
|
false
|
|
|
|
else:
|
2021-03-26 06:52:01 +00:00
|
|
|
let item = try: network.seenTable[peerId]
|
|
|
|
except KeyError: raiseAssert "checked with notin"
|
2020-09-25 13:43:45 +00:00
|
|
|
if currentTime >= item.stamp:
|
|
|
|
# Peer is in SeenTable, but the time period has expired.
|
|
|
|
network.seenTable.del(peerId)
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
true
|
2020-06-08 16:02:50 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc addSeen(network: Eth2Node, peerId: PeerId,
|
2020-06-08 16:02:50 +00:00
|
|
|
period: chronos.Duration) =
|
2022-04-08 16:22:49 +00:00
|
|
|
## Adds peer with PeerId ``peerId`` to SeenTable and timeout ``period``.
|
2020-08-02 19:27:36 +00:00
|
|
|
let item = SeenItem(peerId: peerId, stamp: now(chronos.Moment) + period)
|
2020-11-26 19:23:45 +00:00
|
|
|
withValue(network.seenTable, peerId, entry) do:
|
|
|
|
if entry.stamp < item.stamp:
|
|
|
|
entry.stamp = item.stamp
|
|
|
|
do:
|
|
|
|
network.seenTable[peerId] = item
|
2020-06-08 16:02:50 +00:00
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
proc disconnect*(peer: Peer, reason: DisconnectionReason,
|
|
|
|
notifyOtherPeer = false) {.async.} =
|
2020-11-10 18:41:04 +00:00
|
|
|
# TODO(zah): How should we notify the other peer?
|
2020-09-25 13:43:45 +00:00
|
|
|
try:
|
|
|
|
if peer.connectionState notin {Disconnecting, Disconnected}:
|
|
|
|
peer.connectionState = Disconnecting
|
|
|
|
# We adding peer in SeenTable before actual disconnect to avoid races.
|
|
|
|
let seenTime = case reason
|
|
|
|
of ClientShutDown:
|
|
|
|
SeenTableTimeClientShutDown
|
|
|
|
of IrrelevantNetwork:
|
|
|
|
SeenTableTimeIrrelevantNetwork
|
|
|
|
of FaultOrError:
|
|
|
|
SeenTableTimeFaultOrError
|
|
|
|
of PeerScoreLow:
|
|
|
|
SeenTablePenaltyError
|
2021-10-21 11:01:29 +00:00
|
|
|
peer.network.addSeen(peer.peerId, seenTime)
|
|
|
|
await peer.network.switch.disconnect(peer.peerId)
|
2020-11-17 10:14:53 +00:00
|
|
|
except CatchableError:
|
2020-09-25 13:43:45 +00:00
|
|
|
# We do not care about exceptions in disconnection procedure.
|
2021-10-21 11:01:29 +00:00
|
|
|
trace "Exception while disconnecting peer", peer = peer.peerId,
|
2020-09-25 13:43:45 +00:00
|
|
|
reason = reason
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
include eth/p2p/p2p_backends_helpers
|
|
|
|
include eth/p2p/p2p_tracing
|
|
|
|
|
|
|
|
proc getRequestProtoName(fn: NimNode): NimNode =
|
|
|
|
# `getCustomPragmaVal` doesn't work yet on regular nnkProcDef nodes
|
|
|
|
# (TODO: file as an issue)
|
|
|
|
|
|
|
|
let pragmas = fn.pragma
|
|
|
|
if pragmas.kind == nnkPragma and pragmas.len > 0:
|
|
|
|
for pragma in pragmas:
|
2021-03-26 06:52:01 +00:00
|
|
|
try:
|
|
|
|
if pragma.len > 0 and $pragma[0] == "libp2pProtocol":
|
|
|
|
let protoName = $(pragma[1])
|
|
|
|
let protoVer = $(pragma[2].intVal)
|
|
|
|
return newLit("/eth2/beacon_chain/req/" & protoName & "/" & protoVer & "/")
|
|
|
|
except Exception as exc: raiseAssert exc.msg # TODO https://github.com/nim-lang/Nim/issues/17454
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
return newLit("")
|
|
|
|
|
2022-05-31 10:45:37 +00:00
|
|
|
proc isRequiredProto(fn: NimNode): NimNode =
|
|
|
|
# `getCustomPragmaVal` doesn't work yet on regular nnkProcDef nodes
|
|
|
|
# (TODO: file as an issue)
|
|
|
|
|
|
|
|
let pragmas = fn.pragma
|
|
|
|
if pragmas.kind == nnkPragma and pragmas.len > 0:
|
|
|
|
for pragma in pragmas:
|
|
|
|
try:
|
|
|
|
if pragma.len > 0 and $pragma[0] == "libp2pProtocol":
|
|
|
|
if pragma.len <= 3:
|
|
|
|
return newLit(false)
|
|
|
|
for i in 3 ..< pragma.len:
|
|
|
|
let param = pragma[i]
|
|
|
|
case param.kind
|
|
|
|
of nnkExprEqExpr:
|
|
|
|
if $param[0] == "isRequired":
|
|
|
|
if $param[1] == "true":
|
|
|
|
return newLit(true)
|
|
|
|
if $param[1] == "false":
|
|
|
|
return newLit(false)
|
|
|
|
raiseAssert "Unexpected value: " & $param
|
|
|
|
if $param[0] != "isLightClientRequest":
|
|
|
|
raiseAssert "Unexpected param: " & $param
|
|
|
|
of nnkIdent:
|
|
|
|
if i == 3:
|
|
|
|
return newLit(param.boolVal)
|
|
|
|
else: raiseAssert "Unexpected kind: " & param.kind.repr
|
|
|
|
return newLit(false)
|
|
|
|
except Exception as exc: raiseAssert exc.msg # TODO https://github.com/nim-lang/Nim/issues/17454
|
|
|
|
|
|
|
|
return newLit(false)
|
|
|
|
|
2022-03-17 14:09:18 +00:00
|
|
|
proc isLightClientRequestProto(fn: NimNode): NimNode =
|
|
|
|
# `getCustomPragmaVal` doesn't work yet on regular nnkProcDef nodes
|
|
|
|
# (TODO: file as an issue)
|
|
|
|
|
|
|
|
let pragmas = fn.pragma
|
|
|
|
if pragmas.kind == nnkPragma and pragmas.len > 0:
|
|
|
|
for pragma in pragmas:
|
|
|
|
try:
|
|
|
|
if pragma.len > 0 and $pragma[0] == "libp2pProtocol":
|
|
|
|
if pragma.len <= 3:
|
|
|
|
return newLit(false)
|
2022-05-31 10:45:37 +00:00
|
|
|
for i in 3 ..< pragma.len:
|
|
|
|
let param = pragma[i]
|
|
|
|
case param.kind
|
|
|
|
of nnkExprEqExpr:
|
|
|
|
if $param[0] == "isLightClientRequest":
|
|
|
|
if $param[1] == "true":
|
|
|
|
return newLit(true)
|
|
|
|
if $param[1] == "false":
|
|
|
|
return newLit(false)
|
|
|
|
raiseAssert "Unexpected value: " & $param
|
|
|
|
if $param[0] != "isRequired":
|
|
|
|
raiseAssert "Unexpected param: " & $param
|
|
|
|
of nnkIdent:
|
|
|
|
if i == 4:
|
|
|
|
return newLit(param.boolVal)
|
|
|
|
else: raiseAssert "Unexpected kind: " & param.kind.repr
|
|
|
|
return newLit(false)
|
2022-03-17 14:09:18 +00:00
|
|
|
except Exception as exc: raiseAssert exc.msg # TODO https://github.com/nim-lang/Nim/issues/17454
|
|
|
|
|
|
|
|
return newLit(false)
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc writeChunkSZ(
|
2022-05-05 11:00:02 +00:00
|
|
|
conn: Connection, responseCode: Option[ResponseCode],
|
|
|
|
uncompressedLen: uint64, payloadSZ: openArray[byte],
|
|
|
|
contextBytes: openArray[byte] = []): Future[void] =
|
|
|
|
# max 10 bytes varint length + 1 byte response code + data
|
|
|
|
const numOverheadBytes = sizeof(byte) + Leb128.maxLen(typeof(uncompressedLen))
|
|
|
|
var output = memoryOutput(payloadSZ.len + contextBytes.len + numOverheadBytes)
|
|
|
|
try:
|
|
|
|
if responseCode.isSome:
|
|
|
|
output.write byte(responseCode.get)
|
|
|
|
|
|
|
|
if contextBytes.len > 0:
|
|
|
|
output.write contextBytes
|
|
|
|
|
|
|
|
output.write toBytes(uncompressedLen, Leb128).toOpenArray()
|
|
|
|
output.write payloadSZ
|
|
|
|
except IOError as exc:
|
|
|
|
raiseAssert exc.msg # memoryOutput shouldn't raise
|
|
|
|
|
|
|
|
conn.write(output.getOutput)
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc writeChunk(conn: Connection,
|
|
|
|
responseCode: Option[ResponseCode],
|
|
|
|
payload: openArray[byte],
|
|
|
|
contextBytes: openArray[byte] = []): Future[void] =
|
2020-05-12 22:37:07 +00:00
|
|
|
var output = memoryOutput()
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
try:
|
|
|
|
if responseCode.isSome:
|
|
|
|
output.write byte(responseCode.get)
|
2020-05-12 22:37:07 +00:00
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
if contextBytes.len > 0:
|
|
|
|
output.write contextBytes
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
output.write toBytes(payload.lenu64, Leb128).toOpenArray()
|
2021-07-07 09:09:47 +00:00
|
|
|
|
Snappy revamp (#3564)
This PR makes the necessary adjustments to deal with the revamped snappy
API.
In practical terms for nimbus-eth2, there are performance increases to
gossip processing, database reading and writing as well as era file
processing. Exporting `.era` files for example, a snappy-heavy
operation, almost halves in total processing time:
Pre:
```
Average, StdDev, Min, Max, Samples, Test
39.088, 8.735, 23.619, 53.301, 50, tState
237.079, 46.692, 165.620, 355.481, 49, tBlocks
```
Post:
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
25.350, 5.303, 15.351, 41.856, 50, tState
141.238, 24.164, 99.990, 199.329, 49, tBlocks
```
2022-04-15 07:44:06 +00:00
|
|
|
compressFramed(payload, output)
|
2021-03-26 06:52:01 +00:00
|
|
|
except IOError as exc:
|
|
|
|
raiseAssert exc.msg # memoryOutput shouldn't raise
|
2021-10-04 18:42:34 +00:00
|
|
|
conn.write(output.getOutput)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-05-21 13:21:29 +00:00
|
|
|
template errorMsgLit(x: static string): ErrorMsg =
|
|
|
|
const val = ErrorMsg toBytes(x)
|
|
|
|
val
|
|
|
|
|
2020-12-14 14:09:26 +00:00
|
|
|
func formatErrorMsg(msg: ErrorMsg): string =
|
|
|
|
# ErrorMsg "usually" contains a human-readable string - we'll try to parse it
|
|
|
|
# as ASCII and return hex if that fails
|
|
|
|
for c in msg:
|
|
|
|
if c < 32 or c > 127:
|
2020-06-04 06:19:25 +00:00
|
|
|
return byteutils.toHex(asSeq(msg))
|
|
|
|
|
2020-12-14 14:09:26 +00:00
|
|
|
string.fromBytes(asSeq(msg))
|
2020-06-04 06:19:25 +00:00
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
proc sendErrorResponse(peer: Peer,
|
2020-03-22 23:23:21 +00:00
|
|
|
conn: Connection,
|
2020-03-22 21:55:01 +00:00
|
|
|
responseCode: ResponseCode,
|
2020-09-18 16:39:33 +00:00
|
|
|
errMsg: ErrorMsg): Future[void] =
|
2020-06-04 06:19:25 +00:00
|
|
|
debug "Error processing request",
|
|
|
|
peer, responseCode, errMsg = formatErrorMsg(errMsg)
|
2020-09-18 16:39:33 +00:00
|
|
|
conn.writeChunk(some responseCode, SSZ.encode(errMsg))
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-11-17 15:07:05 +00:00
|
|
|
proc sendNotificationMsg(peer: Peer, protocolId: string, requestBytes: Bytes) {.async.} =
|
2020-04-15 02:41:22 +00:00
|
|
|
var
|
|
|
|
deadline = sleepAsync RESP_TIMEOUT
|
|
|
|
streamFut = peer.network.openStream(peer, protocolId)
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
await streamFut or deadline
|
2020-04-15 02:41:22 +00:00
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
if not streamFut.finished:
|
2020-11-17 15:07:05 +00:00
|
|
|
await streamFut.cancelAndWait()
|
2020-03-22 21:55:01 +00:00
|
|
|
raise newException(TransmissionError, "Failed to open LibP2P stream")
|
|
|
|
|
|
|
|
let stream = streamFut.read
|
2020-04-14 16:49:46 +00:00
|
|
|
try:
|
2020-08-10 13:18:17 +00:00
|
|
|
await stream.writeChunk(none ResponseCode, requestBytes)
|
2020-04-14 16:49:46 +00:00
|
|
|
finally:
|
2020-09-10 19:40:09 +00:00
|
|
|
await stream.close()
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2022-05-05 11:00:02 +00:00
|
|
|
proc sendResponseChunkBytesSZ(
|
|
|
|
response: UntypedResponse, uncompressedLen: uint64,
|
|
|
|
payloadSZ: openArray[byte],
|
|
|
|
contextBytes: openArray[byte] = []): Future[void] =
|
|
|
|
inc response.writtenChunks
|
|
|
|
response.stream.writeChunkSZ(
|
|
|
|
some Success, uncompressedLen, payloadSZ, contextBytes)
|
|
|
|
|
2022-02-07 17:20:10 +00:00
|
|
|
proc sendResponseChunkBytes(
|
|
|
|
response: UntypedResponse, payload: openArray[byte],
|
|
|
|
contextBytes: openArray[byte] = []): Future[void] =
|
2020-05-23 22:24:47 +00:00
|
|
|
inc response.writtenChunks
|
2022-02-07 17:20:10 +00:00
|
|
|
response.stream.writeChunk(some Success, payload, contextBytes)
|
2020-05-23 22:24:47 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc sendResponseChunk(
|
2022-05-23 12:02:54 +00:00
|
|
|
response: UntypedResponse, val: auto,
|
|
|
|
contextBytes: openArray[byte] = []): Future[void] =
|
|
|
|
sendResponseChunkBytes(response, SSZ.encode(val), contextBytes)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-05-23 22:24:47 +00:00
|
|
|
template sendUserHandlerResultAsChunkImpl*(stream: Connection,
|
|
|
|
handlerResultFut: Future): untyped =
|
|
|
|
let handlerRes = await handlerResultFut
|
2020-08-10 13:18:17 +00:00
|
|
|
writeChunk(stream, some Success, SSZ.encode(handlerRes))
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-05-23 22:24:47 +00:00
|
|
|
template sendUserHandlerResultAsChunkImpl*(stream: Connection,
|
|
|
|
handlerResult: auto): untyped =
|
2020-08-10 13:18:17 +00:00
|
|
|
writeChunk(stream, some Success, SSZ.encode(handlerResult))
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc uncompressFramedStream(conn: Connection,
|
|
|
|
expectedSize: int): Future[Result[seq[byte], cstring]]
|
2022-05-08 07:08:13 +00:00
|
|
|
{.async.} =
|
|
|
|
var header: array[framingHeader.len, byte]
|
|
|
|
try:
|
|
|
|
await conn.readExactly(addr header[0], header.len)
|
|
|
|
except LPStreamEOFError, LPStreamIncompleteError:
|
|
|
|
return err "Unexpected EOF before snappy header"
|
|
|
|
|
|
|
|
if header != framingHeader:
|
|
|
|
return err "Incorrect snappy header"
|
|
|
|
|
|
|
|
static:
|
|
|
|
doAssert maxCompressedFrameDataLen >= maxUncompressedFrameDataLen.uint64
|
|
|
|
|
|
|
|
var
|
|
|
|
frameData = newSeq[byte](maxCompressedFrameDataLen + 4)
|
|
|
|
output = newSeqUninitialized[byte](expectedSize)
|
|
|
|
written = 0
|
|
|
|
|
|
|
|
while written < expectedSize:
|
|
|
|
var frameHeader: array[4, byte]
|
|
|
|
try:
|
|
|
|
await conn.readExactly(addr frameHeader[0], frameHeader.len)
|
|
|
|
except LPStreamEOFError, LPStreamIncompleteError:
|
|
|
|
return err "Snappy frame header missing"
|
|
|
|
|
|
|
|
let (id, dataLen) = decodeFrameHeader(frameHeader)
|
|
|
|
|
|
|
|
if dataLen > frameData.len:
|
|
|
|
# In theory, compressed frames could be bigger and still result in a
|
|
|
|
# valid, small snappy frame, but this would mean they are not getting
|
|
|
|
# compressed correctly
|
|
|
|
return err "Snappy frame too big"
|
|
|
|
|
|
|
|
if dataLen > 0:
|
|
|
|
try:
|
|
|
|
await conn.readExactly(addr frameData[0], dataLen)
|
|
|
|
except LPStreamEOFError, LPStreamIncompleteError:
|
|
|
|
return err "Incomplete snappy frame"
|
|
|
|
|
|
|
|
if id == chunkCompressed:
|
|
|
|
if dataLen < 6: # At least CRC + 2 bytes of frame data
|
|
|
|
return err "Compressed snappy frame too small"
|
|
|
|
|
|
|
|
let
|
|
|
|
crc = uint32.fromBytesLE frameData.toOpenArray(0, 3)
|
|
|
|
uncompressed =
|
|
|
|
snappy.uncompress(
|
|
|
|
frameData.toOpenArray(4, dataLen - 1),
|
|
|
|
output.toOpenArray(written, output.high)).valueOr:
|
|
|
|
return err "Failed to decompress content"
|
|
|
|
|
|
|
|
if maskedCrc(
|
|
|
|
output.toOpenArray(written, written + uncompressed-1)) != crc:
|
|
|
|
return err "Snappy content CRC checksum failed"
|
|
|
|
|
|
|
|
written += uncompressed
|
|
|
|
|
|
|
|
elif id == chunkUncompressed:
|
|
|
|
if dataLen < 5: # At least one byte of data
|
|
|
|
return err "Uncompressed snappy frame too small"
|
|
|
|
|
|
|
|
let uncompressed = dataLen - 4
|
|
|
|
|
|
|
|
if uncompressed > maxUncompressedFrameDataLen.int:
|
|
|
|
return err "Snappy frame size too large"
|
|
|
|
|
|
|
|
if uncompressed > output.len - written:
|
|
|
|
return err "Too much data"
|
|
|
|
|
|
|
|
let crc = uint32.fromBytesLE frameData.toOpenArray(0, 3)
|
|
|
|
if maskedCrc(frameData.toOpenArray(4, dataLen - 1)) != crc:
|
|
|
|
return err "Snappy content CRC checksum failed"
|
|
|
|
|
|
|
|
output[written..<written + uncompressed] =
|
|
|
|
frameData.toOpenArray(4, dataLen-1)
|
|
|
|
written += uncompressed
|
|
|
|
|
|
|
|
elif id < 0x80:
|
|
|
|
# Reserved unskippable chunks (chunk types 0x02-0x7f)
|
|
|
|
# if we encounter this type of chunk, stop decoding
|
|
|
|
# the spec says it is an error
|
|
|
|
return err "Invalid snappy chunk type"
|
|
|
|
|
|
|
|
else:
|
|
|
|
# Reserved skippable chunks (chunk types 0x80-0xfe)
|
|
|
|
# including STREAM_HEADER (0xff) should be skipped
|
|
|
|
continue
|
|
|
|
|
|
|
|
return ok output
|
|
|
|
|
|
|
|
proc readChunkPayload*(conn: Connection, peer: Peer,
|
|
|
|
maxChunkSize: uint32,
|
|
|
|
MsgType: type): Future[NetRes[MsgType]] {.async.} =
|
|
|
|
let sm = now(chronos.Moment)
|
|
|
|
let size =
|
|
|
|
try: await conn.readVarint()
|
|
|
|
except LPStreamEOFError: #, LPStreamIncompleteError, InvalidVarintError
|
|
|
|
# TODO compiler error - haha, uncaught exception
|
|
|
|
# Error: unhandled exception: closureiters.nim(322, 17) `c[i].kind == nkType` [AssertionError]
|
|
|
|
return neterr UnexpectedEOF
|
|
|
|
except LPStreamIncompleteError:
|
|
|
|
return neterr UnexpectedEOF
|
|
|
|
except InvalidVarintError:
|
|
|
|
return neterr UnexpectedEOF
|
|
|
|
|
|
|
|
if size > maxChunkSize:
|
|
|
|
return neterr SizePrefixOverflow
|
|
|
|
if size == 0:
|
|
|
|
return neterr ZeroSizePrefix
|
|
|
|
|
|
|
|
# The `size.int` conversion is safe because `size` is bounded to `MAX_CHUNK_SIZE`
|
|
|
|
let data = await conn.uncompressFramedStream(size.int)
|
|
|
|
if data.isOk:
|
|
|
|
# `10` is the maximum size of variable integer on wire, so error could
|
|
|
|
# not be significant.
|
|
|
|
peer.updateNetThroughput(now(chronos.Moment) - sm,
|
|
|
|
uint64(10 + size))
|
|
|
|
return ok SSZ.decode(data.get(), MsgType)
|
|
|
|
else:
|
|
|
|
debug "Snappy decompression/read failed", msg = $data.error, conn
|
|
|
|
return neterr InvalidSnappyBytes
|
|
|
|
|
|
|
|
proc readResponseChunk(conn: Connection, peer: Peer, maxChunkSize: uint32,
|
|
|
|
MsgType: typedesc): Future[NetRes[MsgType]] {.async.} =
|
|
|
|
mixin readChunkPayload
|
|
|
|
|
|
|
|
try:
|
|
|
|
var responseCodeByte: byte
|
|
|
|
try:
|
|
|
|
await conn.readExactly(addr responseCodeByte, 1)
|
|
|
|
except LPStreamEOFError, LPStreamIncompleteError:
|
|
|
|
return neterr PotentiallyExpectedEOF
|
|
|
|
|
|
|
|
static: assert ResponseCode.low.ord == 0
|
|
|
|
if responseCodeByte > ResponseCode.high.byte:
|
|
|
|
return neterr InvalidResponseCode
|
|
|
|
|
|
|
|
let responseCode = ResponseCode responseCodeByte
|
|
|
|
case responseCode:
|
|
|
|
of InvalidRequest, ServerError, ResourceUnavailable:
|
|
|
|
let
|
|
|
|
errorMsgChunk = await readChunkPayload(
|
|
|
|
conn, peer, maxChunkSize, ErrorMsg)
|
|
|
|
errorMsg = if errorMsgChunk.isOk: errorMsgChunk.value
|
|
|
|
else: return err(errorMsgChunk.error)
|
|
|
|
errorMsgStr = toPrettyString(errorMsg.asSeq)
|
|
|
|
debug "Error response from peer", responseCode, errMsg = errorMsgStr
|
|
|
|
return err Eth2NetworkingError(kind: ReceivedErrorResponse,
|
|
|
|
responseCode: responseCode,
|
|
|
|
errorMsg: errorMsgStr)
|
|
|
|
of Success:
|
|
|
|
discard
|
|
|
|
|
|
|
|
return await readChunkPayload(conn, peer, maxChunkSize, MsgType)
|
|
|
|
|
|
|
|
except LPStreamEOFError, LPStreamIncompleteError:
|
|
|
|
return neterr UnexpectedEOF
|
|
|
|
|
|
|
|
proc readResponse(conn: Connection, peer: Peer, maxChunkSize: uint32,
|
|
|
|
MsgType: type, timeout: Duration): Future[NetRes[MsgType]] {.async.} =
|
|
|
|
when MsgType is seq:
|
|
|
|
type E = ElemType(MsgType)
|
|
|
|
var results: MsgType
|
|
|
|
while true:
|
|
|
|
# Because we interleave networking with response processing, it may
|
|
|
|
# happen that reading all chunks takes longer than a strict dealine
|
|
|
|
# timeout would allow, so we allow each chunk a new timeout instead.
|
|
|
|
# The problem is exacerbated by the large number of round-trips to the
|
|
|
|
# poll loop that each future along the way causes.
|
|
|
|
trace "reading chunk", conn
|
|
|
|
let nextFut = conn.readResponseChunk(peer, maxChunkSize, E)
|
|
|
|
if not await nextFut.withTimeout(timeout):
|
|
|
|
return neterr(ReadResponseTimeout)
|
|
|
|
let nextRes = nextFut.read()
|
|
|
|
if nextRes.isErr:
|
|
|
|
if nextRes.error.kind == PotentiallyExpectedEOF:
|
|
|
|
trace "EOF chunk", conn, err = nextRes.error
|
|
|
|
|
|
|
|
return ok results
|
|
|
|
trace "Error chunk", conn, err = nextRes.error
|
|
|
|
|
|
|
|
return err nextRes.error
|
|
|
|
else:
|
|
|
|
trace "Got chunk", conn
|
|
|
|
results.add nextRes.value
|
|
|
|
else:
|
|
|
|
let nextFut = conn.readResponseChunk(peer, maxChunkSize, MsgType)
|
|
|
|
if not await nextFut.withTimeout(timeout):
|
|
|
|
return neterr(ReadResponseTimeout)
|
|
|
|
return nextFut.read()
|
2020-05-12 22:35:40 +00:00
|
|
|
|
2022-05-05 09:17:14 +00:00
|
|
|
func maxChunkSize*(t: typedesc[bellatrix.SignedBeaconBlock]): uint32 =
|
2022-05-05 05:45:35 +00:00
|
|
|
MAX_CHUNK_SIZE_BELLATRIX
|
|
|
|
|
2022-05-05 09:17:14 +00:00
|
|
|
func maxChunkSize*(t: typedesc): uint32 =
|
2022-05-05 05:45:35 +00:00
|
|
|
MAX_CHUNK_SIZE
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
proc makeEth2Request(peer: Peer, protocolId: string, requestBytes: Bytes,
|
|
|
|
ResponseMsg: type,
|
2020-05-12 22:37:07 +00:00
|
|
|
timeout: Duration): Future[NetRes[ResponseMsg]]
|
2020-09-14 14:50:03 +00:00
|
|
|
{.async.} =
|
2020-05-12 22:37:07 +00:00
|
|
|
var deadline = sleepAsync timeout
|
2020-05-06 22:24:55 +00:00
|
|
|
|
2020-05-12 22:35:40 +00:00
|
|
|
let stream = awaitWithTimeout(peer.network.openStream(peer, protocolId),
|
2020-05-12 22:37:07 +00:00
|
|
|
deadline): return neterr StreamOpenTimeout
|
2020-04-14 16:49:46 +00:00
|
|
|
try:
|
|
|
|
# Send the request
|
2021-08-23 10:29:50 +00:00
|
|
|
# Some clients don't want a length sent for empty requests
|
|
|
|
# So don't send anything on empty requests
|
|
|
|
if requestBytes.len > 0:
|
|
|
|
await stream.writeChunk(none ResponseCode, requestBytes)
|
2020-09-10 19:40:09 +00:00
|
|
|
# Half-close the stream to mark the end of the request - if this is not
|
|
|
|
# done, the other peer might never send us the response.
|
|
|
|
await stream.close()
|
2020-04-14 16:49:46 +00:00
|
|
|
|
|
|
|
# Read the response
|
2022-05-08 07:08:13 +00:00
|
|
|
return await readResponse(
|
|
|
|
stream, peer, maxChunkSize(ResponseMsg), ResponseMsg, timeout)
|
2020-04-14 16:49:46 +00:00
|
|
|
finally:
|
2020-09-24 17:03:17 +00:00
|
|
|
await stream.closeWithEOF()
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-05-23 22:24:47 +00:00
|
|
|
proc init*[MsgType](T: type MultipleChunksResponse[MsgType],
|
2020-08-10 13:18:17 +00:00
|
|
|
peer: Peer, conn: Connection): T =
|
|
|
|
T(UntypedResponse(peer: peer, stream: conn))
|
2020-05-23 22:24:47 +00:00
|
|
|
|
|
|
|
proc init*[MsgType](T: type SingleChunkResponse[MsgType],
|
2020-08-10 13:18:17 +00:00
|
|
|
peer: Peer, conn: Connection): T =
|
|
|
|
T(UntypedResponse(peer: peer, stream: conn))
|
2020-05-23 22:24:47 +00:00
|
|
|
|
2022-05-23 12:02:54 +00:00
|
|
|
template write*[M](
|
|
|
|
r: MultipleChunksResponse[M], val: M,
|
|
|
|
contextBytes: openArray[byte] = []): untyped =
|
2021-07-07 09:09:47 +00:00
|
|
|
mixin sendResponseChunk
|
2022-05-23 12:02:54 +00:00
|
|
|
sendResponseChunk(UntypedResponse(r), val, contextBytes)
|
2020-05-23 22:24:47 +00:00
|
|
|
|
2022-05-05 11:00:02 +00:00
|
|
|
template writeBytesSZ*[M](
|
|
|
|
r: MultipleChunksResponse[M], uncompressedLen: uint64,
|
|
|
|
bytes: openArray[byte], contextBytes: openArray[byte]): untyped =
|
|
|
|
sendResponseChunkBytesSZ(UntypedResponse(r), uncompressedLen, bytes, contextBytes)
|
2022-02-07 17:20:10 +00:00
|
|
|
|
2022-05-23 12:02:54 +00:00
|
|
|
template send*[M](
|
|
|
|
r: SingleChunkResponse[M], val: M,
|
|
|
|
contextBytes: openArray[byte] = []): untyped =
|
2021-07-07 09:09:47 +00:00
|
|
|
mixin sendResponseChunk
|
2020-05-23 22:24:47 +00:00
|
|
|
doAssert UntypedResponse(r).writtenChunks == 0
|
2022-05-23 12:02:54 +00:00
|
|
|
sendResponseChunk(UntypedResponse(r), val, contextBytes)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc performProtocolHandshakes(peer: Peer, incoming: bool) {.async.} =
|
2020-08-10 10:58:34 +00:00
|
|
|
# Loop down serially because it's easier to reason about the connection state
|
|
|
|
# when there are fewer async races, specially during setup
|
2020-03-22 21:55:01 +00:00
|
|
|
for protocol in allProtocols:
|
2020-06-09 11:49:58 +00:00
|
|
|
if protocol.onPeerConnected != nil:
|
2020-08-10 10:58:34 +00:00
|
|
|
await protocol.onPeerConnected(peer, incoming)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
proc initProtocol(name: string,
|
|
|
|
peerInit: PeerStateInitializer,
|
|
|
|
networkInit: NetworkStateInitializer): ProtocolInfoObj =
|
2021-02-22 16:17:48 +00:00
|
|
|
ProtocolInfoObj(
|
|
|
|
name: name,
|
|
|
|
messages: @[],
|
|
|
|
peerStateInitializer: peerInit,
|
|
|
|
networkStateInitializer: networkInit)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
proc registerProtocol(protocol: ProtocolInfo) =
|
|
|
|
# TODO: This can be done at compile-time in the future
|
|
|
|
let pos = lowerBound(gProtocols, protocol)
|
|
|
|
gProtocols.insert(protocol, pos)
|
|
|
|
for i in 0 ..< gProtocols.len:
|
|
|
|
gProtocols[i].index = i
|
|
|
|
|
|
|
|
proc setEventHandlers(p: ProtocolInfo,
|
2020-06-09 11:49:58 +00:00
|
|
|
onPeerConnected: OnPeerConnectedHandler,
|
|
|
|
onPeerDisconnected: OnPeerDisconnectedHandler) =
|
|
|
|
p.onPeerConnected = onPeerConnected
|
|
|
|
p.onPeerDisconnected = onPeerDisconnected
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2021-07-19 12:25:11 +00:00
|
|
|
proc implementSendProcBody(sendProc: SendProc) =
|
2020-03-22 21:55:01 +00:00
|
|
|
let
|
|
|
|
msg = sendProc.msg
|
2020-05-23 22:24:47 +00:00
|
|
|
UntypedResponse = bindSym "UntypedResponse"
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
proc sendCallGenerator(peer, bytes: NimNode): NimNode =
|
|
|
|
if msg.kind != msgResponse:
|
|
|
|
let msgProto = getRequestProtoName(msg.procDef)
|
|
|
|
case msg.kind
|
|
|
|
of msgRequest:
|
2020-05-23 22:24:47 +00:00
|
|
|
let ResponseRecord = msg.response.recName
|
2020-03-22 21:55:01 +00:00
|
|
|
quote:
|
|
|
|
makeEth2Request(`peer`, `msgProto`, `bytes`,
|
2020-05-23 22:24:47 +00:00
|
|
|
`ResponseRecord`, `timeoutVar`)
|
2020-03-22 21:55:01 +00:00
|
|
|
else:
|
|
|
|
quote: sendNotificationMsg(`peer`, `msgProto`, `bytes`)
|
|
|
|
else:
|
2020-05-23 22:24:47 +00:00
|
|
|
quote: sendResponseChunkBytes(`UntypedResponse`(`peer`), `bytes`)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
sendProc.useStandardBody(nil, nil, sendCallGenerator)
|
|
|
|
|
2020-05-06 22:24:55 +00:00
|
|
|
proc handleIncomingStream(network: Eth2Node,
|
|
|
|
conn: Connection,
|
2020-09-14 14:50:03 +00:00
|
|
|
MsgType: type) {.async.} =
|
2020-03-22 21:55:01 +00:00
|
|
|
mixin callUserHandler, RecType
|
2020-05-12 22:35:40 +00:00
|
|
|
|
|
|
|
type MsgRec = RecType(MsgType)
|
2020-05-15 12:41:00 +00:00
|
|
|
const msgName {.used.} = typetraits.name(MsgType)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
## Uncomment this to enable tracing on all incoming requests
|
|
|
|
## You can include `msgNameLit` in the condition to select
|
|
|
|
## more specific requests:
|
|
|
|
# when chronicles.runtimeFilteringEnabled:
|
|
|
|
# setLogLevel(LogLevel.TRACE)
|
|
|
|
# defer: setLogLevel(LogLevel.DEBUG)
|
2020-03-22 23:23:21 +00:00
|
|
|
# trace "incoming " & `msgNameLit` & " conn"
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-10-09 13:44:51 +00:00
|
|
|
let peer = peerFromStream(network, conn)
|
2020-03-22 21:55:01 +00:00
|
|
|
try:
|
2020-11-26 19:23:45 +00:00
|
|
|
case peer.connectionState
|
|
|
|
of Disconnecting, Disconnected, None:
|
|
|
|
# We got incoming stream request while disconnected or disconnecting.
|
2020-11-29 13:43:41 +00:00
|
|
|
debug "Got incoming request from disconnected peer", peer = peer,
|
2020-11-26 19:23:45 +00:00
|
|
|
message = msgName
|
|
|
|
await conn.closeWithEOF()
|
|
|
|
return
|
|
|
|
of Connecting:
|
|
|
|
# We got incoming stream request while handshake is not yet finished,
|
|
|
|
# TODO: We could check it here.
|
|
|
|
debug "Got incoming request from peer while in handshake", peer = peer,
|
|
|
|
msgName
|
|
|
|
of Connected:
|
|
|
|
# We got incoming stream from peer with proper connection state.
|
|
|
|
debug "Got incoming request from peer", peer = peer, msgName
|
|
|
|
|
2020-05-21 13:21:29 +00:00
|
|
|
template returnInvalidRequest(msg: ErrorMsg) =
|
2020-10-09 13:44:51 +00:00
|
|
|
peer.updateScore(PeerScoreInvalidRequest)
|
2020-08-10 13:18:17 +00:00
|
|
|
await sendErrorResponse(peer, conn, InvalidRequest, msg)
|
2020-05-12 22:37:07 +00:00
|
|
|
return
|
2020-05-06 22:24:55 +00:00
|
|
|
|
2020-05-21 13:21:29 +00:00
|
|
|
template returnInvalidRequest(msg: string) =
|
|
|
|
returnInvalidRequest(ErrorMsg msg.toBytes)
|
|
|
|
|
2022-03-09 14:03:58 +00:00
|
|
|
template returnResourceUnavailable(msg: ErrorMsg) =
|
|
|
|
await sendErrorResponse(peer, conn, ResourceUnavailable, msg)
|
|
|
|
return
|
|
|
|
|
|
|
|
template returnResourceUnavailable(msg: string) =
|
|
|
|
returnResourceUnavailable(ErrorMsg msg.toBytes)
|
|
|
|
|
2022-05-08 07:08:13 +00:00
|
|
|
# TODO(zah) The TTFB timeout is not implemented in LibP2P streams back-end
|
2020-05-12 22:37:07 +00:00
|
|
|
let deadline = sleepAsync RESP_TIMEOUT
|
2020-05-12 22:35:40 +00:00
|
|
|
|
2022-03-04 08:52:49 +00:00
|
|
|
const isEmptyMsg = when MsgRec is object:
|
|
|
|
# We need nested `when` statements here, because Nim doesn't properly
|
|
|
|
# apply boolean short-circuit logic at compile time and this causes
|
|
|
|
# `totalSerializedFields` to be applied to non-object types that it
|
|
|
|
# doesn't know how to support.
|
|
|
|
when totalSerializedFields(MsgRec) == 0: true
|
|
|
|
else: false
|
|
|
|
else:
|
|
|
|
false
|
|
|
|
|
|
|
|
let msg = when isEmptyMsg:
|
|
|
|
NetRes[MsgRec].ok default(MsgRec)
|
|
|
|
else:
|
2020-05-15 17:56:34 +00:00
|
|
|
try:
|
2022-05-05 05:45:35 +00:00
|
|
|
awaitWithTimeout(
|
2022-05-08 07:08:13 +00:00
|
|
|
readChunkPayload(conn, peer, maxChunkSize(MsgRec), MsgRec), deadline):
|
2022-07-23 04:20:49 +00:00
|
|
|
# Timeout, e.g., cancellation due to fulfillment by different peer.
|
|
|
|
# Treat this similarly to `UnexpectedEOF`, `PotentiallyExpectedEOF`.
|
|
|
|
await sendErrorResponse(
|
|
|
|
peer, conn, InvalidRequest,
|
2022-05-05 05:45:35 +00:00
|
|
|
errorMsgLit "Request full data not sent in time")
|
2022-07-23 04:20:49 +00:00
|
|
|
return
|
2020-05-06 22:24:55 +00:00
|
|
|
|
2020-05-15 17:56:34 +00:00
|
|
|
except SerializationError as err:
|
|
|
|
returnInvalidRequest err.formatMsg("msg")
|
2020-05-12 22:35:40 +00:00
|
|
|
|
2020-05-15 17:56:34 +00:00
|
|
|
except SnappyError as err:
|
|
|
|
returnInvalidRequest err.msg
|
2020-05-12 22:35:40 +00:00
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
if msg.isErr:
|
|
|
|
let (responseCode, errMsg) = case msg.error.kind
|
|
|
|
of UnexpectedEOF, PotentiallyExpectedEOF:
|
2020-05-21 13:21:29 +00:00
|
|
|
(InvalidRequest, errorMsgLit "Incomplete request")
|
2020-05-12 22:35:40 +00:00
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
of InvalidContextBytes:
|
|
|
|
(ServerError, errorMsgLit "Unrecognized context bytes")
|
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
of InvalidSnappyBytes:
|
2020-05-21 13:21:29 +00:00
|
|
|
(InvalidRequest, errorMsgLit "Failed to decompress snappy payload")
|
2020-05-12 22:35:40 +00:00
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
of InvalidSszBytes:
|
2020-05-21 13:21:29 +00:00
|
|
|
(InvalidRequest, errorMsgLit "Failed to decode SSZ payload")
|
2020-05-12 22:37:07 +00:00
|
|
|
|
|
|
|
of ZeroSizePrefix:
|
2020-05-21 13:21:29 +00:00
|
|
|
(InvalidRequest, errorMsgLit "The request chunk cannot have a size of zero")
|
2020-05-12 22:37:07 +00:00
|
|
|
|
|
|
|
of SizePrefixOverflow:
|
2020-05-21 13:21:29 +00:00
|
|
|
(InvalidRequest, errorMsgLit "The chunk size exceed the maximum allowed")
|
2020-05-12 22:37:07 +00:00
|
|
|
|
|
|
|
of InvalidResponseCode, ReceivedErrorResponse,
|
|
|
|
StreamOpenTimeout, ReadResponseTimeout:
|
|
|
|
# These shouldn't be possible in a request, because
|
|
|
|
# there are no response codes being read, no stream
|
|
|
|
# openings and no reading of responses:
|
2020-05-21 13:21:29 +00:00
|
|
|
(ServerError, errorMsgLit "Internal server error")
|
2020-05-12 22:37:07 +00:00
|
|
|
|
|
|
|
of BrokenConnection:
|
|
|
|
return
|
|
|
|
|
2020-08-10 13:18:17 +00:00
|
|
|
await sendErrorResponse(peer, conn, responseCode, errMsg)
|
2020-05-12 22:37:07 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
|
|
|
logReceivedMsg(peer, MsgType(msg.get))
|
2020-08-10 13:18:17 +00:00
|
|
|
await callUserHandler(MsgType, peer, conn, msg.get)
|
2020-10-09 13:44:51 +00:00
|
|
|
except InvalidInputsError as err:
|
|
|
|
returnInvalidRequest err.msg
|
2022-03-09 14:03:58 +00:00
|
|
|
except ResourceUnavailableError as err:
|
|
|
|
returnResourceUnavailable err.msg
|
2020-05-12 22:37:07 +00:00
|
|
|
except CatchableError as err:
|
2020-08-10 13:18:17 +00:00
|
|
|
await sendErrorResponse(peer, conn, ServerError,
|
2020-05-21 13:21:29 +00:00
|
|
|
ErrorMsg err.msg.toBytes)
|
2020-05-06 22:24:55 +00:00
|
|
|
|
|
|
|
except CatchableError as err:
|
2020-05-15 17:56:34 +00:00
|
|
|
debug "Error processing an incoming request", err = err.msg, msgName
|
2020-05-06 22:24:55 +00:00
|
|
|
|
2020-04-14 16:49:46 +00:00
|
|
|
finally:
|
2020-09-24 17:03:17 +00:00
|
|
|
await conn.closeWithEOF()
|
2020-10-09 13:44:51 +00:00
|
|
|
discard network.peerPool.checkPeerScore(peer)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-11-26 19:23:45 +00:00
|
|
|
proc toPeerAddr*(r: enr.TypedRecord,
|
2021-08-18 12:30:05 +00:00
|
|
|
proto: IpTransportProtocol): Result[PeerAddr, cstring] =
|
2020-08-02 19:27:36 +00:00
|
|
|
if not r.secp256k1.isSome:
|
|
|
|
return err("enr: no secp256k1 key in record")
|
|
|
|
|
|
|
|
let
|
|
|
|
pubKey = ? keys.PublicKey.fromRaw(r.secp256k1.get)
|
2022-04-08 16:22:49 +00:00
|
|
|
peerId = ? PeerId.init(crypto.PublicKey(
|
2020-08-02 19:27:36 +00:00
|
|
|
scheme: Secp256k1, skkey: secp.SkPublicKey(pubKey)))
|
|
|
|
|
|
|
|
var addrs = newSeq[MultiAddress]()
|
|
|
|
|
2020-11-26 19:23:45 +00:00
|
|
|
case proto
|
|
|
|
of tcpProtocol:
|
|
|
|
if r.ip.isSome and r.tcp.isSome:
|
|
|
|
let ip = ipv4(r.ip.get)
|
2020-11-26 08:05:23 +00:00
|
|
|
addrs.add MultiAddress.init(ip, tcpProtocol, Port r.tcp.get)
|
2020-11-26 19:23:45 +00:00
|
|
|
|
|
|
|
if r.ip6.isSome:
|
|
|
|
let ip = ipv6(r.ip6.get)
|
|
|
|
if r.tcp6.isSome:
|
|
|
|
addrs.add MultiAddress.init(ip, tcpProtocol, Port r.tcp6.get)
|
|
|
|
elif r.tcp.isSome:
|
|
|
|
addrs.add MultiAddress.init(ip, tcpProtocol, Port r.tcp.get)
|
|
|
|
else:
|
|
|
|
discard
|
|
|
|
|
|
|
|
of udpProtocol:
|
|
|
|
if r.ip.isSome and r.udp.isSome:
|
|
|
|
let ip = ipv4(r.ip.get)
|
|
|
|
addrs.add MultiAddress.init(ip, udpProtocol, Port r.udp.get)
|
|
|
|
|
|
|
|
if r.ip6.isSome:
|
|
|
|
let ip = ipv6(r.ip6.get)
|
|
|
|
if r.udp6.isSome:
|
|
|
|
addrs.add MultiAddress.init(ip, udpProtocol, Port r.udp6.get)
|
|
|
|
elif r.udp.isSome:
|
|
|
|
addrs.add MultiAddress.init(ip, udpProtocol, Port r.udp.get)
|
|
|
|
else:
|
|
|
|
discard
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-08-02 19:27:36 +00:00
|
|
|
if addrs.len == 0:
|
|
|
|
return err("enr: no addresses in record")
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-08-02 19:27:36 +00:00
|
|
|
ok(PeerAddr(peerId: peerId, addrs: addrs))
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-09-21 16:02:27 +00:00
|
|
|
proc checkPeer(node: Eth2Node, peerAddr: PeerAddr): bool =
|
2020-08-02 19:27:36 +00:00
|
|
|
logScope: peer = peerAddr.peerId
|
2020-09-21 16:02:27 +00:00
|
|
|
let peerId = peerAddr.peerId
|
|
|
|
if node.peerPool.hasPeer(peerId):
|
|
|
|
trace "Already connected"
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
if node.isSeen(peerId):
|
|
|
|
trace "Recently connected"
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
true
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc dialPeer(node: Eth2Node, peerAddr: PeerAddr, index = 0) {.async.} =
|
2020-09-21 16:02:27 +00:00
|
|
|
## Establish connection with remote peer identified by address ``peerAddr``.
|
|
|
|
logScope:
|
|
|
|
peer = peerAddr.peerId
|
|
|
|
index = index
|
2020-06-03 08:46:29 +00:00
|
|
|
|
2020-09-21 16:02:27 +00:00
|
|
|
if not(node.checkPeer(peerAddr)):
|
|
|
|
return
|
2020-07-23 20:51:56 +00:00
|
|
|
|
2020-09-21 16:02:27 +00:00
|
|
|
debug "Connecting to discovered peer"
|
|
|
|
var deadline = sleepAsync(node.connectTimeout)
|
2022-03-11 10:51:53 +00:00
|
|
|
var workfut = node.switch.connect(
|
|
|
|
peerAddr.peerId,
|
|
|
|
peerAddr.addrs,
|
|
|
|
forceDial = true
|
|
|
|
)
|
2020-06-03 08:46:29 +00:00
|
|
|
|
2020-09-21 16:02:27 +00:00
|
|
|
try:
|
|
|
|
# `or` operation will only raise exception of `workfut`, because `deadline`
|
|
|
|
# could not raise exception.
|
|
|
|
await workfut or deadline
|
|
|
|
if workfut.finished():
|
|
|
|
if not deadline.finished():
|
|
|
|
deadline.cancel()
|
|
|
|
inc nbc_successful_dials
|
2020-06-03 08:46:29 +00:00
|
|
|
else:
|
2020-09-21 16:02:27 +00:00
|
|
|
debug "Connection to remote peer timed out"
|
|
|
|
inc nbc_timeout_dials
|
|
|
|
node.addSeen(peerAddr.peerId, SeenTableTimeTimeout)
|
2020-11-17 18:03:29 +00:00
|
|
|
await cancelAndWait(workfut)
|
2020-09-21 16:02:27 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
debug "Connection to remote peer failed", msg = exc.msg
|
|
|
|
inc nbc_failed_dials
|
|
|
|
node.addSeen(peerAddr.peerId, SeenTableTimeDeadPeer)
|
|
|
|
|
|
|
|
proc connectWorker(node: Eth2Node, index: int) {.async.} =
|
|
|
|
debug "Connection worker started", index = index
|
|
|
|
while true:
|
|
|
|
# This loop will never produce HIGH CPU usage because it will wait
|
|
|
|
# and block until it not obtains new peer from the queue ``connQueue``.
|
|
|
|
let remotePeerAddr = await node.connQueue.popFirst()
|
2021-03-22 09:17:14 +00:00
|
|
|
# Previous worker dial might have hit the maximum peers.
|
|
|
|
# TODO: could clear the whole connTable and connQueue here also, best
|
|
|
|
# would be to have this event based coming from peer pool or libp2p.
|
2022-03-11 10:51:53 +00:00
|
|
|
|
|
|
|
if node.peerPool.len < node.hardMaxPeers:
|
2021-03-22 09:17:14 +00:00
|
|
|
await node.dialPeer(remotePeerAddr, index)
|
2020-09-21 16:02:27 +00:00
|
|
|
# Peer was added to `connTable` before adding it to `connQueue`, so we
|
|
|
|
# excluding peer here after processing.
|
|
|
|
node.connTable.excl(remotePeerAddr.peerId)
|
|
|
|
|
2021-08-18 12:30:05 +00:00
|
|
|
proc toPeerAddr(node: Node): Result[PeerAddr, cstring] =
|
2020-09-21 16:02:27 +00:00
|
|
|
let nodeRecord = ? node.record.toTypedRecord()
|
2020-11-26 19:23:45 +00:00
|
|
|
let peerAddr = ? nodeRecord.toPeerAddr(tcpProtocol)
|
2020-09-21 16:02:27 +00:00
|
|
|
ok(peerAddr)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
func isCompatibleForkId*(discoveryForkId: ENRForkID, peerForkId: ENRForkID): bool =
|
2021-09-29 11:06:16 +00:00
|
|
|
if discoveryForkId.fork_digest == peerForkId.fork_digest:
|
|
|
|
if discoveryForkId.next_fork_version < peerForkId.next_fork_version:
|
|
|
|
# Peer knows about a fork and we don't
|
|
|
|
true
|
|
|
|
elif discoveryForkId.next_fork_version == peerForkId.next_fork_version:
|
|
|
|
# We should have the same next_fork_epoch
|
|
|
|
discoveryForkId.next_fork_epoch == peerForkId.next_fork_epoch
|
|
|
|
|
|
|
|
else:
|
|
|
|
# Our next fork version is bigger than the peer's one
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
# Wrong fork digest
|
|
|
|
false
|
|
|
|
|
2021-09-21 22:25:49 +00:00
|
|
|
proc queryRandom*(
|
|
|
|
d: Eth2DiscoveryProtocol,
|
|
|
|
forkId: ENRForkID,
|
2021-10-21 13:09:19 +00:00
|
|
|
wantedAttnets: AttnetBits,
|
2022-03-11 10:51:53 +00:00
|
|
|
wantedSyncnets: SyncnetBits,
|
|
|
|
minScore: int): Future[seq[Node]] {.async.} =
|
2021-08-23 10:29:50 +00:00
|
|
|
## Perform a discovery query for a random target
|
2021-03-24 10:48:53 +00:00
|
|
|
## (forkId) and matching at least one of the attestation subnets.
|
|
|
|
|
2021-09-29 11:06:16 +00:00
|
|
|
let nodes = await d.queryRandom()
|
2021-08-23 10:29:50 +00:00
|
|
|
|
|
|
|
var filtered: seq[(int, Node)]
|
2021-03-24 10:48:53 +00:00
|
|
|
for n in nodes:
|
2021-09-21 22:25:49 +00:00
|
|
|
var score: int = 0
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2021-09-29 11:06:16 +00:00
|
|
|
let eth2FieldBytes = n.record.tryGet(enrForkIdField, seq[byte])
|
|
|
|
if eth2FieldBytes.isNone():
|
|
|
|
continue
|
|
|
|
let peerForkId =
|
|
|
|
try:
|
|
|
|
SSZ.decode(eth2FieldBytes.get(), ENRForkID)
|
|
|
|
except SszError as e:
|
|
|
|
debug "Could not decode the eth2 field of peer",
|
|
|
|
peer = n.record.toURI(), exception = e.name, msg = e.msg
|
|
|
|
continue
|
|
|
|
|
|
|
|
if not forkId.isCompatibleForkId(peerForkId):
|
|
|
|
continue
|
|
|
|
|
2021-09-21 22:25:49 +00:00
|
|
|
let attnetsBytes = n.record.tryGet(enrAttestationSubnetsField, seq[byte])
|
|
|
|
if attnetsBytes.isSome():
|
2021-08-23 10:29:50 +00:00
|
|
|
let attnetsNode =
|
|
|
|
try:
|
2021-10-21 13:09:19 +00:00
|
|
|
SSZ.decode(attnetsBytes.get(), AttnetBits)
|
2021-08-23 10:29:50 +00:00
|
|
|
except SszError as e:
|
2021-09-21 22:25:49 +00:00
|
|
|
debug "Could not decode the attnets ERN bitfield of peer",
|
2021-08-23 10:29:50 +00:00
|
|
|
peer = n.record.toURI(), exception = e.name, msg = e.msg
|
|
|
|
continue
|
|
|
|
|
|
|
|
for i in 0..<ATTESTATION_SUBNET_COUNT:
|
2021-09-21 22:25:49 +00:00
|
|
|
if wantedAttnets[i] and attnetsNode[i]:
|
|
|
|
score += 1
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2021-09-21 22:25:49 +00:00
|
|
|
let syncnetsBytes = n.record.tryGet(enrSyncSubnetsField, seq[byte])
|
|
|
|
if syncnetsBytes.isSome():
|
|
|
|
let syncnetsNode =
|
|
|
|
try:
|
2021-10-21 13:09:19 +00:00
|
|
|
SSZ.decode(syncnetsBytes.get(), SyncnetBits)
|
2021-09-21 22:25:49 +00:00
|
|
|
except SszError as e:
|
|
|
|
debug "Could not decode the syncnets ENR bitfield of peer",
|
|
|
|
peer = n.record.toURI(), exception = e.name, msg = e.msg
|
|
|
|
continue
|
|
|
|
|
2022-01-08 23:28:49 +00:00
|
|
|
for i in SyncSubcommitteeIndex:
|
2021-09-21 22:25:49 +00:00
|
|
|
if wantedSyncnets[i] and syncnetsNode[i]:
|
|
|
|
score += 10 # connecting to the right syncnet is urgent
|
|
|
|
|
2022-03-11 10:51:53 +00:00
|
|
|
if score >= minScore:
|
2021-09-21 22:25:49 +00:00
|
|
|
filtered.add((score, n))
|
2021-08-23 10:29:50 +00:00
|
|
|
|
|
|
|
d.rng[].shuffle(filtered)
|
2021-09-22 11:19:25 +00:00
|
|
|
return filtered.sortedByIt(-it[0]).mapIt(it[1])
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-03-11 10:51:53 +00:00
|
|
|
proc trimConnections(node: Eth2Node, count: int) =
|
2021-08-23 10:29:50 +00:00
|
|
|
# Kill `count` peers, scoring them to remove the least useful ones
|
|
|
|
|
2022-04-08 16:22:49 +00:00
|
|
|
var scores = initOrderedTable[PeerId, int]()
|
2021-08-23 10:29:50 +00:00
|
|
|
|
|
|
|
# Take into account the stabilitySubnets
|
|
|
|
# During sync, only this will be used to score peers
|
2022-03-11 10:51:53 +00:00
|
|
|
# since gossipsub is not running yet
|
2021-08-23 10:29:50 +00:00
|
|
|
#
|
|
|
|
# A peer subscribed to all stabilitySubnets will
|
|
|
|
# have 640 points
|
2022-03-11 10:51:53 +00:00
|
|
|
var peersInGracePeriod = 0
|
2021-08-23 10:29:50 +00:00
|
|
|
for peer in node.peers.values:
|
|
|
|
if peer.connectionState != Connected: continue
|
2022-03-11 10:51:53 +00:00
|
|
|
|
|
|
|
# Metadata pinger is used as grace period
|
|
|
|
if peer.metadata.isNone:
|
|
|
|
peersInGracePeriod.inc()
|
|
|
|
continue
|
2021-08-23 10:29:50 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
stabilitySubnets = peer.metadata.get().attnets
|
|
|
|
stabilitySubnetsCount = stabilitySubnets.countOnes()
|
|
|
|
thisPeersScore = 10 * stabilitySubnetsCount
|
|
|
|
|
2021-10-21 11:01:29 +00:00
|
|
|
scores[peer.peerId] = thisPeersScore
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-03-11 10:51:53 +00:00
|
|
|
|
|
|
|
# Safegard: if we have too many peers in the grace
|
|
|
|
# period, don't kick anyone. Otherwise, they will be
|
|
|
|
# preferred over long-standing peers
|
|
|
|
if peersInGracePeriod > scores.len div 2:
|
|
|
|
return
|
|
|
|
|
2021-08-23 10:29:50 +00:00
|
|
|
# Split a 1000 points for each topic's peers
|
2022-03-11 10:51:53 +00:00
|
|
|
# + 5 000 points for each subbed topic
|
2021-08-23 10:29:50 +00:00
|
|
|
# This gives priority to peers in topics with few peers
|
|
|
|
# For instance, a topic with `dHigh` peers will give 80 points to each peer
|
|
|
|
# Whereas a topic with `dLow` peers will give 250 points to each peer
|
2022-03-11 10:51:53 +00:00
|
|
|
#
|
|
|
|
# Then, use the average of all topics per peers, to avoid giving too much
|
|
|
|
# point to big peers
|
|
|
|
|
2022-04-08 16:22:49 +00:00
|
|
|
var gossipScores = initTable[PeerId, tuple[sum: int, count: int]]()
|
2022-02-01 17:20:55 +00:00
|
|
|
for topic, _ in node.pubsub.gossipsub:
|
2021-08-23 10:29:50 +00:00
|
|
|
let
|
2022-02-01 17:20:55 +00:00
|
|
|
peersInMesh = node.pubsub.mesh.peers(topic)
|
|
|
|
peersSubbed = node.pubsub.gossipsub.peers(topic)
|
2022-03-11 10:51:53 +00:00
|
|
|
scorePerMeshPeer = 5_000 div max(peersInMesh, 1)
|
2022-02-01 17:20:55 +00:00
|
|
|
scorePerSubbedPeer = 1_000 div max(peersSubbed, 1)
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-03-11 10:51:53 +00:00
|
|
|
for peer in node.pubsub.gossipsub.getOrDefault(topic):
|
2021-08-23 10:29:50 +00:00
|
|
|
if peer.peerId notin scores: continue
|
2022-03-11 10:51:53 +00:00
|
|
|
let currentVal = gossipScores.getOrDefault(peer.peerId)
|
|
|
|
gossipScores[peer.peerId] = (
|
|
|
|
currentVal.sum + scorePerSubbedPeer,
|
|
|
|
currentVal.count + 1
|
|
|
|
)
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-03-11 10:51:53 +00:00
|
|
|
# Avoid global topics (>75% of peers), which would greatly reduce
|
|
|
|
# the average score for small peers
|
|
|
|
if peersSubbed > scores.len div 4 * 3: continue
|
|
|
|
|
|
|
|
for peer in node.pubsub.mesh.getOrDefault(topic):
|
2022-02-01 17:20:55 +00:00
|
|
|
if peer.peerId notin scores: continue
|
2022-03-11 10:51:53 +00:00
|
|
|
let currentVal = gossipScores.getOrDefault(peer.peerId)
|
|
|
|
gossipScores[peer.peerId] = (
|
|
|
|
currentVal.sum + scorePerMeshPeer,
|
|
|
|
currentVal.count + 1
|
|
|
|
)
|
|
|
|
|
2022-05-10 10:03:40 +00:00
|
|
|
for peerId, gScore in gossipScores:
|
2022-03-11 10:51:53 +00:00
|
|
|
scores[peerId] =
|
|
|
|
scores.getOrDefault(peerId) + (gScore.sum div gScore.count)
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-04-08 16:22:49 +00:00
|
|
|
proc sortPerScore(a, b: (PeerId, int)): int =
|
2021-08-23 10:29:50 +00:00
|
|
|
system.cmp(a[1], b[1])
|
|
|
|
|
|
|
|
scores.sort(sortPerScore)
|
|
|
|
|
|
|
|
var toKick = count
|
|
|
|
|
|
|
|
for peerId in scores.keys:
|
2021-09-28 07:58:03 +00:00
|
|
|
debug "kicking peer", peerId, score=scores[peerId]
|
2022-03-11 10:51:53 +00:00
|
|
|
asyncSpawn node.getPeer(peerId).disconnect(PeerScoreLow)
|
2021-08-23 10:29:50 +00:00
|
|
|
dec toKick
|
2021-09-28 07:58:03 +00:00
|
|
|
inc(nbc_cycling_kicked_peers)
|
2021-08-23 10:29:50 +00:00
|
|
|
if toKick <= 0: return
|
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
proc getLowSubnets(node: Eth2Node, epoch: Epoch): (AttnetBits, SyncnetBits) =
|
2021-08-23 10:29:50 +00:00
|
|
|
# Returns the subnets required to have a healthy mesh
|
|
|
|
# The subnets are computed, to, in order:
|
2022-01-04 13:37:04 +00:00
|
|
|
# - Have 0 subnet with < `dLow` peers from topic subscription
|
2021-08-23 10:29:50 +00:00
|
|
|
# - Have 0 subscribed subnet below `dLow`
|
|
|
|
# - Have 0 subscribed subnet below `dOut` outgoing peers
|
2022-01-04 13:37:04 +00:00
|
|
|
# - Have 0 subnet with < `dHigh` peers from topic subscription
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-02-01 17:20:55 +00:00
|
|
|
nbc_gossipsub_low_fanout.set(0)
|
|
|
|
nbc_gossipsub_good_fanout.set(0)
|
|
|
|
nbc_gossipsub_healthy_fanout.set(0)
|
|
|
|
|
2021-09-21 22:25:49 +00:00
|
|
|
template findLowSubnets(topicNameGenerator: untyped,
|
|
|
|
SubnetIdType: type,
|
|
|
|
totalSubnets: static int): auto =
|
|
|
|
var
|
|
|
|
lowOutgoingSubnets: BitArray[totalSubnets]
|
2022-01-04 13:37:04 +00:00
|
|
|
notHighOutgoingSubnets: BitArray[totalSubnets]
|
|
|
|
belowDSubnets: BitArray[totalSubnets]
|
2021-09-21 22:25:49 +00:00
|
|
|
belowDOutSubnets: BitArray[totalSubnets]
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2021-09-21 22:25:49 +00:00
|
|
|
for subNetId in 0 ..< totalSubnets:
|
|
|
|
let topic =
|
2022-04-08 16:22:49 +00:00
|
|
|
topicNameGenerator(node.forkId.fork_digest, SubnetIdType(subNetId))
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-01-04 13:37:04 +00:00
|
|
|
if node.pubsub.gossipsub.peers(topic) < node.pubsub.parameters.dLow:
|
2021-09-21 22:25:49 +00:00
|
|
|
lowOutgoingSubnets.setBit(subNetId)
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-01-04 13:37:04 +00:00
|
|
|
if node.pubsub.gossipsub.peers(topic) < node.pubsub.parameters.dHigh:
|
|
|
|
notHighOutgoingSubnets.setBit(subNetId)
|
|
|
|
|
2021-09-21 22:25:49 +00:00
|
|
|
# Not subscribed
|
|
|
|
if topic notin node.pubsub.mesh: continue
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2021-09-21 22:25:49 +00:00
|
|
|
if node.pubsub.mesh.peers(topic) < node.pubsub.parameters.dLow:
|
2022-01-04 13:37:04 +00:00
|
|
|
belowDSubnets.setBit(subNetId)
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
let outPeers = node.pubsub.mesh.getOrDefault(topic).countIt(it.outbound)
|
|
|
|
if outPeers < node.pubsub.parameters.dOut:
|
2021-09-21 22:25:49 +00:00
|
|
|
belowDOutSubnets.setBit(subNetId)
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-02-01 17:20:55 +00:00
|
|
|
nbc_gossipsub_low_fanout.inc(int64(lowOutgoingSubnets.countOnes()))
|
|
|
|
nbc_gossipsub_good_fanout.inc(int64(
|
|
|
|
notHighOutgoingSubnets.countOnes() -
|
|
|
|
lowOutgoingSubnets.countOnes()
|
|
|
|
))
|
|
|
|
nbc_gossipsub_healthy_fanout.inc(int64(
|
|
|
|
totalSubnets - notHighOutgoingSubnets.countOnes()))
|
|
|
|
|
2022-01-04 13:37:04 +00:00
|
|
|
if lowOutgoingSubnets.countOnes() > 0:
|
2021-09-21 22:25:49 +00:00
|
|
|
lowOutgoingSubnets
|
2022-01-04 13:37:04 +00:00
|
|
|
elif belowDSubnets.countOnes() > 0:
|
|
|
|
belowDSubnets
|
|
|
|
elif belowDOutSubnets.countOnes() > 0:
|
2021-09-21 22:25:49 +00:00
|
|
|
belowDOutSubnets
|
2022-01-04 13:37:04 +00:00
|
|
|
else:
|
|
|
|
notHighOutgoingSubnets
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2021-09-21 22:25:49 +00:00
|
|
|
return (
|
|
|
|
findLowSubnets(getAttestationTopic, SubnetId, ATTESTATION_SUBNET_COUNT),
|
2021-10-04 13:08:47 +00:00
|
|
|
# We start looking one epoch before the transition in order to allow
|
|
|
|
# some time for the gossip meshes to get healthy:
|
|
|
|
if epoch + 1 >= node.cfg.ALTAIR_FORK_EPOCH:
|
2021-10-20 16:32:46 +00:00
|
|
|
findLowSubnets(getSyncCommitteeTopic, SyncSubcommitteeIndex, SYNC_COMMITTEE_SUBNET_COUNT)
|
2021-10-04 13:08:47 +00:00
|
|
|
else:
|
2021-10-21 13:09:19 +00:00
|
|
|
default(SyncnetBits)
|
2021-09-21 22:25:49 +00:00
|
|
|
)
|
2021-03-24 10:48:53 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc runDiscoveryLoop(node: Eth2Node) {.async.} =
|
2020-03-22 21:55:01 +00:00
|
|
|
debug "Starting discovery loop"
|
2020-09-21 16:02:27 +00:00
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
while true:
|
2021-08-23 10:29:50 +00:00
|
|
|
let
|
2021-10-04 13:08:47 +00:00
|
|
|
currentEpoch = node.getBeaconTime().slotOrZero.epoch
|
|
|
|
(wantedAttnets, wantedSyncnets) = node.getLowSubnets(currentEpoch)
|
2021-08-23 10:29:50 +00:00
|
|
|
wantedAttnetsCount = wantedAttnets.countOnes()
|
2021-09-21 22:25:49 +00:00
|
|
|
wantedSyncnetsCount = wantedSyncnets.countOnes()
|
2022-03-11 10:51:53 +00:00
|
|
|
outgoingPeers = node.peerPool.lenCurrent({PeerType.Outgoing})
|
|
|
|
targetOutgoingPeers = max(node.wantedPeers div 10, 3)
|
|
|
|
|
|
|
|
if wantedAttnetsCount > 0 or wantedSyncnetsCount > 0 or
|
|
|
|
outgoingPeers < targetOutgoingPeers:
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-03-11 10:51:53 +00:00
|
|
|
let
|
|
|
|
minScore =
|
|
|
|
if wantedAttnetsCount > 0 or wantedSyncnetsCount > 0:
|
|
|
|
1
|
|
|
|
else:
|
|
|
|
0
|
|
|
|
discoveredNodes = await node.discovery.queryRandom(
|
|
|
|
node.discoveryForkId, wantedAttnets, wantedSyncnets, minScore)
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2021-09-28 07:58:03 +00:00
|
|
|
let newPeers = block:
|
|
|
|
var np = newSeq[PeerAddr]()
|
|
|
|
for discNode in discoveredNodes:
|
|
|
|
let res = discNode.toPeerAddr()
|
|
|
|
if res.isErr():
|
|
|
|
debug "Failed to decode discovery's node address",
|
2022-04-08 16:22:49 +00:00
|
|
|
node = discNode, errMsg = res.error
|
2021-09-28 07:58:03 +00:00
|
|
|
continue
|
|
|
|
|
2021-01-14 07:58:13 +00:00
|
|
|
let peerAddr = res.get()
|
2021-09-28 07:58:03 +00:00
|
|
|
if node.checkPeer(peerAddr) and
|
|
|
|
peerAddr.peerId notin node.connTable:
|
|
|
|
np.add(peerAddr)
|
|
|
|
np
|
|
|
|
|
|
|
|
let
|
2022-03-11 10:51:53 +00:00
|
|
|
roomCurrent = node.hardMaxPeers - len(node.peerPool)
|
|
|
|
peersToKick = min(newPeers.len - roomCurrent, node.hardMaxPeers div 5)
|
2021-09-28 07:58:03 +00:00
|
|
|
|
2022-03-11 10:51:53 +00:00
|
|
|
if peersToKick > 0 and newPeers.len > 0:
|
|
|
|
node.trimConnections(peersToKick)
|
2021-09-28 07:58:03 +00:00
|
|
|
|
|
|
|
for peerAddr in newPeers:
|
|
|
|
# We adding to pending connections table here, but going
|
|
|
|
# to remove it only in `connectWorker`.
|
|
|
|
node.connTable.incl(peerAddr.peerId)
|
|
|
|
await node.connQueue.addLast(peerAddr)
|
2021-01-14 07:58:13 +00:00
|
|
|
|
2022-03-11 10:51:53 +00:00
|
|
|
debug "Discovery tick",
|
|
|
|
wanted_peers = node.wantedPeers,
|
|
|
|
current_peers = len(node.peerPool),
|
2021-01-14 07:58:13 +00:00
|
|
|
discovered_nodes = len(discoveredNodes),
|
2021-09-28 07:58:03 +00:00
|
|
|
new_peers = len(newPeers)
|
2021-01-14 07:58:13 +00:00
|
|
|
|
2021-09-28 07:58:03 +00:00
|
|
|
if len(newPeers) == 0:
|
2022-03-11 10:51:53 +00:00
|
|
|
let currentPeers = len(node.peerPool)
|
2021-01-14 07:58:13 +00:00
|
|
|
if currentPeers <= node.wantedPeers shr 2: # 25%
|
|
|
|
warn "Peer count low, no new peers discovered",
|
|
|
|
discovered_nodes = len(discoveredNodes), new_peers = newPeers,
|
|
|
|
current_peers = currentPeers, wanted_peers = node.wantedPeers
|
2021-01-18 13:13:26 +00:00
|
|
|
|
|
|
|
# Discovery `queryRandom` can have a synchronous fast path for example
|
|
|
|
# when no peers are in the routing table. Don't run it in continuous loop.
|
2021-08-23 10:29:50 +00:00
|
|
|
#
|
|
|
|
# Also, give some time to dial the discovered nodes and update stats etc
|
|
|
|
await sleepAsync(5.seconds)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-11-26 19:23:45 +00:00
|
|
|
proc resolvePeer(peer: Peer) =
|
|
|
|
# Resolve task which performs searching of peer's public key and recovery of
|
|
|
|
# ENR using discovery5. We only resolve ENR for peers we know about to avoid
|
|
|
|
# querying the network - as of now, the ENR is not needed, except for
|
|
|
|
# debuggging
|
2021-10-21 11:01:29 +00:00
|
|
|
logScope: peer = peer.peerId
|
2020-11-26 19:23:45 +00:00
|
|
|
let startTime = now(chronos.Moment)
|
|
|
|
let nodeId =
|
|
|
|
block:
|
|
|
|
var key: PublicKey
|
2022-04-08 16:22:49 +00:00
|
|
|
# `secp256k1` keys are always stored inside PeerId.
|
2021-10-21 11:01:29 +00:00
|
|
|
discard peer.peerId.extractPublicKey(key)
|
2020-11-26 19:23:45 +00:00
|
|
|
keys.PublicKey.fromRaw(key.skkey.getBytes()).get().toNodeId()
|
|
|
|
|
|
|
|
debug "Peer's ENR recovery task started", node_id = $nodeId
|
|
|
|
|
|
|
|
# This is "fast-path" for peers which was dialed. In this case discovery
|
|
|
|
# already has most recent ENR information about this peer.
|
|
|
|
let gnode = peer.network.discovery.getNode(nodeId)
|
|
|
|
if gnode.isSome():
|
|
|
|
peer.enr = some(gnode.get().record)
|
|
|
|
inc(nbc_successful_discoveries)
|
|
|
|
let delay = now(chronos.Moment) - startTime
|
|
|
|
nbc_resolve_time.observe(delay.toFloatSeconds())
|
2021-01-21 17:42:57 +00:00
|
|
|
debug "Peer's ENR recovered", delay
|
2020-11-26 19:23:45 +00:00
|
|
|
|
|
|
|
proc handlePeer*(peer: Peer) {.async.} =
|
|
|
|
let res = peer.network.peerPool.addPeerNoWait(peer, peer.direction)
|
|
|
|
case res:
|
|
|
|
of PeerStatus.LowScoreError, PeerStatus.NoSpaceError:
|
|
|
|
# Peer has low score or we do not have enough space in PeerPool,
|
|
|
|
# we are going to disconnect it gracefully.
|
|
|
|
# Peer' state will be updated in connection event.
|
|
|
|
debug "Peer has low score or there no space in PeerPool",
|
|
|
|
peer = peer, reason = res
|
|
|
|
await peer.disconnect(FaultOrError)
|
|
|
|
of PeerStatus.DeadPeerError:
|
|
|
|
# Peer's lifetime future is finished, so its already dead,
|
|
|
|
# we do not need to perform gracefull disconect.
|
|
|
|
# Peer's state will be updated in connection event.
|
|
|
|
discard
|
|
|
|
of PeerStatus.DuplicateError:
|
|
|
|
# Peer is already present in PeerPool, we can't perform disconnect,
|
|
|
|
# because in such case we could kill both connections (connection
|
|
|
|
# which is present in PeerPool and new one).
|
|
|
|
# This is possible bug, because we could enter here only if number
|
|
|
|
# of `peer.connections == 1`, it means that Peer's lifetime is not
|
|
|
|
# tracked properly and we still not received `Disconnected` event.
|
|
|
|
debug "Peer is already present in PeerPool", peer = peer
|
|
|
|
of PeerStatus.Success:
|
|
|
|
# Peer was added to PeerPool.
|
|
|
|
peer.score = NewPeerScore
|
|
|
|
peer.connectionState = Connected
|
|
|
|
# We spawn task which will obtain ENR for this peer.
|
|
|
|
resolvePeer(peer)
|
|
|
|
debug "Peer successfully connected", peer = peer,
|
|
|
|
connections = peer.connections
|
|
|
|
|
2022-04-08 16:22:49 +00:00
|
|
|
proc onConnEvent(node: Eth2Node, peerId: PeerId, event: ConnEvent) {.async.} =
|
2020-08-10 10:58:34 +00:00
|
|
|
let peer = node.getPeer(peerId)
|
|
|
|
case event.kind
|
|
|
|
of ConnEventKind.Connected:
|
|
|
|
inc peer.connections
|
2020-11-26 19:23:45 +00:00
|
|
|
debug "Peer connection upgraded", peer = $peerId,
|
|
|
|
connections = peer.connections
|
2020-08-10 10:58:34 +00:00
|
|
|
if peer.connections == 1:
|
|
|
|
# Libp2p may connect multiple times to the same peer - using different
|
2020-08-08 20:52:02 +00:00
|
|
|
# transports for both incoming and outgoing. For now, we'll count our
|
2020-08-10 10:58:34 +00:00
|
|
|
# "fist" encounter with the peer as the true connection, leaving the
|
|
|
|
# other connections be - libp2p limits the number of concurrent
|
|
|
|
# connections to the same peer, and only one of these connections will be
|
|
|
|
# active. Nonetheless, this quirk will cause a number of odd behaviours:
|
|
|
|
# * For peer limits, we might miscount the incoming vs outgoing quota
|
|
|
|
# * Protocol handshakes are wonky: we'll not necessarily use the newly
|
|
|
|
# connected transport - instead we'll just pick a random one!
|
2020-11-26 19:23:45 +00:00
|
|
|
case peer.connectionState
|
|
|
|
of Disconnecting:
|
|
|
|
# We got connection with peer which we currently disconnecting.
|
|
|
|
# Normally this does not happen, but if a peer is being disconnected
|
|
|
|
# while a concurrent (incoming for example) connection attempt happens,
|
|
|
|
# we might end up here
|
|
|
|
debug "Got connection attempt from peer that we are disconnecting",
|
|
|
|
peer = peerId
|
|
|
|
await node.switch.disconnect(peerId)
|
|
|
|
return
|
|
|
|
of None:
|
|
|
|
# We have established a connection with the new peer.
|
|
|
|
peer.connectionState = Connecting
|
|
|
|
of Disconnected:
|
|
|
|
# We have established a connection with the peer that we have seen
|
|
|
|
# before - reusing the existing peer object is fine
|
|
|
|
peer.connectionState = Connecting
|
|
|
|
peer.score = 0 # Will be set to NewPeerScore after handshake
|
|
|
|
of Connecting, Connected:
|
|
|
|
# This means that we got notification event from peer which we already
|
|
|
|
# connected or connecting right now. If this situation will happened,
|
|
|
|
# it means bug on `nim-libp2p` side.
|
|
|
|
warn "Got connection attempt from peer which we already connected",
|
|
|
|
peer = peerId
|
|
|
|
await peer.disconnect(FaultOrError)
|
|
|
|
return
|
|
|
|
|
|
|
|
# Store connection direction inside Peer object.
|
|
|
|
if event.incoming:
|
|
|
|
peer.direction = PeerType.Incoming
|
|
|
|
else:
|
|
|
|
peer.direction = PeerType.Outgoing
|
2020-08-08 20:52:02 +00:00
|
|
|
|
2020-11-26 08:05:23 +00:00
|
|
|
await performProtocolHandshakes(peer, event.incoming)
|
|
|
|
|
2020-08-10 10:58:34 +00:00
|
|
|
of ConnEventKind.Disconnected:
|
|
|
|
dec peer.connections
|
2020-11-26 19:23:45 +00:00
|
|
|
debug "Lost connection to peer", peer = peerId,
|
|
|
|
connections = peer.connections
|
|
|
|
|
2020-08-10 10:58:34 +00:00
|
|
|
if peer.connections == 0:
|
2020-11-26 19:23:45 +00:00
|
|
|
debug "Peer disconnected", peer = $peerId, connections = peer.connections
|
|
|
|
|
|
|
|
# Whatever caused disconnection, avoid connection spamming
|
|
|
|
node.addSeen(peerId, SeenTableTimeReconnect)
|
|
|
|
|
2020-08-10 10:58:34 +00:00
|
|
|
let fut = peer.disconnectedFut
|
2020-11-26 19:23:45 +00:00
|
|
|
if not(isNil(fut)):
|
2020-11-26 08:05:23 +00:00
|
|
|
fut.complete()
|
2020-11-26 19:23:45 +00:00
|
|
|
peer.disconnectedFut = nil
|
|
|
|
else:
|
|
|
|
# TODO (cheatfate): This could be removed when bug will be fixed inside
|
|
|
|
# `nim-libp2p`.
|
|
|
|
debug "Got new event while peer is already disconnected",
|
|
|
|
peer = peerId, peer_state = peer.connectionState
|
|
|
|
peer.connectionState = Disconnected
|
2020-08-10 10:58:34 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc new(T: type Eth2Node,
|
|
|
|
config: BeaconNodeConf | LightClientConf, runtimeCfg: RuntimeConfig,
|
|
|
|
enrForkId: ENRForkID, discoveryForkId: ENRForkID,
|
|
|
|
forkDigests: ref ForkDigests, getBeaconTime: GetBeaconTimeFn,
|
|
|
|
switch: Switch, pubsub: GossipSub,
|
|
|
|
ip: Option[ValidIpAddress], tcpPort, udpPort: Option[Port],
|
|
|
|
privKey: keys.PrivateKey, discovery: bool,
|
2022-06-21 08:29:16 +00:00
|
|
|
rng: ref HmacDrbgContext): T {.raises: [Defect, CatchableError].} =
|
2020-08-12 14:16:59 +00:00
|
|
|
when not defined(local_testnet):
|
2021-02-22 16:17:48 +00:00
|
|
|
let
|
2022-05-31 10:45:37 +00:00
|
|
|
connectTimeout = chronos.minutes(1)
|
|
|
|
seenThreshold = chronos.minutes(5)
|
2020-08-12 14:16:59 +00:00
|
|
|
else:
|
2021-02-22 16:17:48 +00:00
|
|
|
let
|
2022-05-31 10:45:37 +00:00
|
|
|
connectTimeout = chronos.seconds(10)
|
|
|
|
seenThreshold = chronos.seconds(10)
|
2022-03-18 11:36:50 +00:00
|
|
|
type MetaData = altair.MetaData # Weird bug without this..
|
|
|
|
|
|
|
|
# Versions up to v22.3.0 would write an empty `MetaData` to
|
|
|
|
#`data-dir/node-metadata.json` which would then be reloaded on startup - don't
|
|
|
|
# write a file with this name or downgrades will break!
|
|
|
|
const metadata = MetaData()
|
2021-02-22 16:17:48 +00:00
|
|
|
|
|
|
|
let node = T(
|
|
|
|
switch: switch,
|
|
|
|
pubsub: pubsub,
|
|
|
|
wantedPeers: config.maxPeers,
|
2022-03-11 10:51:53 +00:00
|
|
|
hardMaxPeers: config.hardMaxPeers.get(config.maxPeers * 3 div 2), #*1.5
|
2021-08-19 10:45:31 +00:00
|
|
|
cfg: runtimeCfg,
|
2022-04-08 16:22:49 +00:00
|
|
|
peerPool: newPeerPool[Peer, PeerId](),
|
2021-02-22 16:17:48 +00:00
|
|
|
# Its important here to create AsyncQueue with limited size, otherwise
|
|
|
|
# it could produce HIGH cpu usage.
|
|
|
|
connQueue: newAsyncQueue[PeerAddr](ConcurrentConnections),
|
|
|
|
metadata: metadata,
|
|
|
|
forkId: enrForkId,
|
2021-09-29 11:06:16 +00:00
|
|
|
discoveryForkId: discoveryForkId,
|
2021-07-07 09:09:47 +00:00
|
|
|
forkDigests: forkDigests,
|
2021-08-19 10:45:31 +00:00
|
|
|
getBeaconTime: getBeaconTime,
|
2021-02-22 16:17:48 +00:00
|
|
|
discovery: Eth2DiscoveryProtocol.new(
|
|
|
|
config, ip, tcpPort, udpPort, privKey,
|
2021-08-10 06:19:13 +00:00
|
|
|
{
|
|
|
|
enrForkIdField: SSZ.encode(enrForkId),
|
|
|
|
enrAttestationSubnetsField: SSZ.encode(metadata.attnets)
|
|
|
|
},
|
2021-02-22 16:17:48 +00:00
|
|
|
rng),
|
|
|
|
discoveryEnabled: discovery,
|
|
|
|
rng: rng,
|
|
|
|
connectTimeout: connectTimeout,
|
2021-08-23 10:29:50 +00:00
|
|
|
seenThreshold: seenThreshold
|
2021-02-22 16:17:48 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
newSeq node.protocolStates, allProtocols.len
|
2020-03-22 21:55:01 +00:00
|
|
|
for proto in allProtocols:
|
|
|
|
if proto.networkStateInitializer != nil:
|
2021-02-22 16:17:48 +00:00
|
|
|
node.protocolStates[proto.index] = proto.networkStateInitializer(node)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
for msg in proto.messages:
|
2022-05-31 10:45:37 +00:00
|
|
|
when config is BeaconNodeConf:
|
2022-07-29 08:45:39 +00:00
|
|
|
if msg.isLightClientRequest and not config.lightClientDataServe:
|
2022-05-31 10:45:37 +00:00
|
|
|
continue
|
|
|
|
elif config is LightClientConf:
|
|
|
|
if not msg.isRequired:
|
|
|
|
continue
|
2020-03-22 21:55:01 +00:00
|
|
|
if msg.protocolMounter != nil:
|
2021-02-22 16:17:48 +00:00
|
|
|
msg.protocolMounter node
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2021-10-21 11:01:29 +00:00
|
|
|
proc peerHook(peerId: PeerId, event: ConnEvent): Future[void] {.gcsafe.} =
|
|
|
|
onConnEvent(node, peerId, event)
|
2020-08-10 10:58:34 +00:00
|
|
|
|
2021-10-04 18:42:34 +00:00
|
|
|
switch.addConnEventHandler(peerHook, ConnEventKind.Connected)
|
|
|
|
switch.addConnEventHandler(peerHook, ConnEventKind.Disconnected)
|
|
|
|
|
2021-11-01 14:50:24 +00:00
|
|
|
proc scoreCheck(peer: Peer): bool =
|
|
|
|
peer.score >= PeerScoreLowLimit
|
|
|
|
|
|
|
|
proc onDeletePeer(peer: Peer) =
|
|
|
|
if peer.connectionState notin {ConnectionState.Disconnecting,
|
|
|
|
ConnectionState.Disconnected}:
|
|
|
|
if peer.score < PeerScoreLowLimit:
|
|
|
|
debug "Peer was removed from PeerPool due to low score", peer = peer,
|
|
|
|
peer_score = peer.score, score_low_limit = PeerScoreLowLimit,
|
|
|
|
score_high_limit = PeerScoreHighLimit
|
|
|
|
asyncSpawn(peer.disconnect(PeerScoreLow))
|
|
|
|
else:
|
|
|
|
debug "Peer was removed from PeerPool", peer = peer,
|
|
|
|
peer_score = peer.score, score_low_limit = PeerScoreLowLimit,
|
|
|
|
score_high_limit = PeerScoreHighLimit
|
|
|
|
asyncSpawn(peer.disconnect(FaultOrError)) # Shouldn't actually happen!
|
|
|
|
|
|
|
|
node.peerPool.setScoreCheck(scoreCheck)
|
|
|
|
node.peerPool.setOnDeletePeer(onDeletePeer)
|
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
node
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
template publicKey(node: Eth2Node): keys.PublicKey =
|
2020-06-22 19:40:19 +00:00
|
|
|
node.discovery.privKey.toPublicKey
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-08-03 17:35:27 +00:00
|
|
|
proc startListening*(node: Eth2Node) {.async.} =
|
2020-08-24 11:52:06 +00:00
|
|
|
if node.discoveryEnabled:
|
2020-10-09 13:37:12 +00:00
|
|
|
try:
|
|
|
|
node.discovery.open()
|
2020-11-17 10:14:53 +00:00
|
|
|
except CatchableError:
|
2020-10-09 13:37:12 +00:00
|
|
|
fatal "Failed to start discovery service. UDP port may be already in use"
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
try:
|
2021-12-17 11:39:24 +00:00
|
|
|
await node.switch.start()
|
2020-10-09 13:37:12 +00:00
|
|
|
except CatchableError:
|
|
|
|
fatal "Failed to start LibP2P transport. TCP port may be already in use"
|
|
|
|
quit 1
|
|
|
|
|
2021-08-23 10:29:50 +00:00
|
|
|
proc peerPingerHeartbeat(node: Eth2Node): Future[void] {.gcsafe.}
|
2022-03-11 10:51:53 +00:00
|
|
|
proc peerTrimmerHeartbeat(node: Eth2Node): Future[void] {.gcsafe.}
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2020-06-10 19:36:54 +00:00
|
|
|
proc start*(node: Eth2Node) {.async.} =
|
2020-09-16 10:00:11 +00:00
|
|
|
|
|
|
|
proc onPeerCountChanged() =
|
2022-03-11 10:51:53 +00:00
|
|
|
trace "Number of peers has been changed", length = len(node.peerPool)
|
2020-09-16 10:00:11 +00:00
|
|
|
nbc_peers.set int64(len(node.peerPool))
|
|
|
|
|
|
|
|
node.peerPool.setPeerCounter(onPeerCountChanged)
|
|
|
|
|
2020-06-10 19:36:54 +00:00
|
|
|
for i in 0 ..< ConcurrentConnections:
|
2020-09-21 16:02:27 +00:00
|
|
|
node.connWorkers.add connectWorker(node, i)
|
2020-06-10 19:36:54 +00:00
|
|
|
|
2020-08-24 11:52:06 +00:00
|
|
|
if node.discoveryEnabled:
|
|
|
|
node.discovery.start()
|
|
|
|
traceAsyncErrors node.runDiscoveryLoop()
|
|
|
|
else:
|
2020-10-01 18:56:42 +00:00
|
|
|
notice "Discovery disabled; trying bootstrap nodes",
|
2020-08-24 11:52:06 +00:00
|
|
|
nodes = node.discovery.bootstrapRecords.len
|
|
|
|
for enr in node.discovery.bootstrapRecords:
|
|
|
|
let tr = enr.toTypedRecord()
|
|
|
|
if tr.isOk():
|
2020-11-26 19:23:45 +00:00
|
|
|
let pa = tr.get().toPeerAddr(tcpProtocol)
|
2020-08-24 11:52:06 +00:00
|
|
|
if pa.isOk():
|
|
|
|
await node.connQueue.addLast(pa.get())
|
2021-08-23 10:29:50 +00:00
|
|
|
node.peerPingerHeartbeatFut = node.peerPingerHeartbeat()
|
2022-03-11 10:51:53 +00:00
|
|
|
node.peerTrimmerHeartbeatFut = node.peerTrimmerHeartbeat()
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-05-19 18:57:35 +00:00
|
|
|
proc stop*(node: Eth2Node) {.async.} =
|
2020-05-28 01:14:01 +00:00
|
|
|
# Ignore errors in futures, since we're shutting down (but log them on the
|
|
|
|
# TRACE level, if a timeout is reached).
|
2022-03-11 10:51:53 +00:00
|
|
|
var waitedFutures =
|
|
|
|
@[
|
|
|
|
node.switch.stop(),
|
|
|
|
node.peerPingerHeartbeat.cancelAndWait(),
|
|
|
|
node.peerTrimmerHeartbeatFut.cancelAndWait(),
|
|
|
|
]
|
|
|
|
|
|
|
|
if node.discoveryEnabled:
|
|
|
|
waitedFutures &= node.discovery.closeWait()
|
|
|
|
|
2020-05-28 01:14:01 +00:00
|
|
|
let
|
|
|
|
timeout = 5.seconds
|
|
|
|
completed = await withTimeout(allFutures(waitedFutures), timeout)
|
|
|
|
if not completed:
|
2020-08-20 16:30:47 +00:00
|
|
|
trace "Eth2Node.stop(): timeout reached", timeout,
|
|
|
|
futureErrors = waitedFutures.filterIt(it.error != nil).mapIt(it.error.msg)
|
2020-05-19 18:57:35 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc init(T: type Peer, network: Eth2Node, peerId: PeerId): Peer =
|
2020-11-26 19:23:45 +00:00
|
|
|
let res = Peer(
|
2021-10-21 11:01:29 +00:00
|
|
|
peerId: peerId,
|
2020-11-26 19:23:45 +00:00
|
|
|
network: network,
|
|
|
|
connectionState: ConnectionState.None,
|
|
|
|
lastReqTime: now(chronos.Moment),
|
2021-08-23 10:29:50 +00:00
|
|
|
lastMetadataTime: now(chronos.Moment),
|
2020-11-26 19:23:45 +00:00
|
|
|
protocolStates: newSeq[RootRef](len(allProtocols))
|
|
|
|
)
|
|
|
|
for i in 0 ..< len(allProtocols):
|
2020-03-22 21:55:01 +00:00
|
|
|
let proto = allProtocols[i]
|
2020-11-26 19:23:45 +00:00
|
|
|
if not(isNil(proto.peerStateInitializer)):
|
|
|
|
res.protocolStates[i] = proto.peerStateInitializer(res)
|
|
|
|
res
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
proc registerMsg(protocol: ProtocolInfo,
|
|
|
|
name: string,
|
|
|
|
mounter: MounterProc,
|
2022-03-17 14:09:18 +00:00
|
|
|
libp2pCodecName: string,
|
2022-05-31 10:45:37 +00:00
|
|
|
isRequired, isLightClientRequest: bool) =
|
2020-03-22 21:55:01 +00:00
|
|
|
protocol.messages.add MessageInfo(name: name,
|
|
|
|
protocolMounter: mounter,
|
2022-03-17 14:09:18 +00:00
|
|
|
libp2pCodecName: libp2pCodecName,
|
2022-05-31 10:45:37 +00:00
|
|
|
isRequired: isRequired,
|
2022-03-17 14:09:18 +00:00
|
|
|
isLightClientRequest: isLightClientRequest)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend =
|
|
|
|
var
|
|
|
|
Format = ident "SSZ"
|
2020-05-12 22:37:07 +00:00
|
|
|
Bool = bindSym "bool"
|
2020-03-22 23:23:21 +00:00
|
|
|
Connection = bindSym "Connection"
|
2020-03-22 21:55:01 +00:00
|
|
|
Peer = bindSym "Peer"
|
|
|
|
Eth2Node = bindSym "Eth2Node"
|
|
|
|
registerMsg = bindSym "registerMsg"
|
|
|
|
initProtocol = bindSym "initProtocol"
|
|
|
|
msgVar = ident "msg"
|
|
|
|
networkVar = ident "network"
|
|
|
|
callUserHandler = ident "callUserHandler"
|
2020-05-23 22:24:47 +00:00
|
|
|
MSG = ident "MSG"
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
p.useRequestIds = false
|
|
|
|
p.useSingleRecordInlining = true
|
|
|
|
|
|
|
|
new result
|
|
|
|
|
|
|
|
result.PeerType = Peer
|
|
|
|
result.NetworkType = Eth2Node
|
|
|
|
result.registerProtocol = bindSym "registerProtocol"
|
|
|
|
result.setEventHandlers = bindSym "setEventHandlers"
|
|
|
|
result.SerializationFormat = Format
|
2020-05-12 22:37:07 +00:00
|
|
|
result.RequestResultsWrapper = ident "NetRes"
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-03-31 18:39:02 +00:00
|
|
|
result.implementMsg = proc (msg: p2p_protocol_dsl.Message) =
|
2020-05-23 22:24:47 +00:00
|
|
|
if msg.kind == msgResponse:
|
|
|
|
return
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
let
|
|
|
|
protocol = msg.protocol
|
|
|
|
msgName = $msg.ident
|
|
|
|
msgNameLit = newLit msgName
|
|
|
|
MsgRecName = msg.recName
|
|
|
|
MsgStrongRecName = msg.strongRecName
|
|
|
|
codecNameLit = getRequestProtoName(msg.procDef)
|
2022-05-31 10:45:37 +00:00
|
|
|
isRequiredLit = isRequiredProto(msg.procDef)
|
2022-03-17 14:09:18 +00:00
|
|
|
isLightClientRequestLit = isLightClientRequestProto(msg.procDef)
|
2020-05-26 17:07:18 +00:00
|
|
|
protocolMounterName = ident(msgName & "Mounter")
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
##
|
|
|
|
## Implement the Thunk:
|
|
|
|
##
|
2020-03-22 23:23:21 +00:00
|
|
|
## The protocol handlers in nim-libp2p receive only a `Connection`
|
2020-03-22 21:55:01 +00:00
|
|
|
## parameter and there is no way to access the wider context (such
|
|
|
|
## as the current `Switch`). In our handlers, we may need to list all
|
|
|
|
## peers in the current network, so we must keep a reference to the
|
|
|
|
## network object in the closure environment of the installed handlers.
|
|
|
|
##
|
|
|
|
## For this reason, we define a `protocol mounter` proc that will
|
|
|
|
## initialize the network object by creating handlers bound to the
|
|
|
|
## specific network.
|
|
|
|
##
|
2020-05-26 17:07:18 +00:00
|
|
|
var userHandlerCall = newTree(nnkDiscardStmt)
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
if msg.userHandler != nil:
|
2020-05-26 17:07:18 +00:00
|
|
|
var OutputParamType = if msg.kind == msgRequest: msg.outputParamType
|
|
|
|
else: nil
|
2020-05-23 22:24:47 +00:00
|
|
|
|
|
|
|
if OutputParamType == nil:
|
2020-05-26 17:07:18 +00:00
|
|
|
userHandlerCall = msg.genUserHandlerCall(msgVar, [peerVar])
|
|
|
|
if msg.kind == msgRequest:
|
|
|
|
userHandlerCall = newCall(ident"sendUserHandlerResultAsChunkImpl",
|
|
|
|
streamVar,
|
|
|
|
userHandlerCall)
|
2020-05-23 22:24:47 +00:00
|
|
|
else:
|
|
|
|
if OutputParamType.kind == nnkVarTy:
|
|
|
|
OutputParamType = OutputParamType[0]
|
|
|
|
|
|
|
|
let isChunkStream = eqIdent(OutputParamType[0], "MultipleChunksResponse")
|
|
|
|
msg.response.recName = if isChunkStream:
|
|
|
|
newTree(nnkBracketExpr, ident"seq", OutputParamType[1])
|
|
|
|
else:
|
|
|
|
OutputParamType[1]
|
|
|
|
|
|
|
|
let responseVar = ident("response")
|
|
|
|
userHandlerCall = newStmtList(
|
|
|
|
newVarStmt(responseVar,
|
|
|
|
newCall(ident"init", OutputParamType,
|
2020-08-10 13:18:17 +00:00
|
|
|
peerVar, streamVar)),
|
2020-05-23 22:24:47 +00:00
|
|
|
msg.genUserHandlerCall(msgVar, [peerVar], outputParam = responseVar))
|
|
|
|
|
2020-05-26 17:07:18 +00:00
|
|
|
protocol.outRecvProcs.add quote do:
|
|
|
|
template `callUserHandler`(`MSG`: type `MsgStrongRecName`,
|
|
|
|
`peerVar`: `Peer`,
|
|
|
|
`streamVar`: `Connection`,
|
|
|
|
`msgVar`: `MsgRecName`): untyped =
|
|
|
|
`userHandlerCall`
|
|
|
|
|
|
|
|
proc `protocolMounterName`(`networkVar`: `Eth2Node`) =
|
|
|
|
proc snappyThunk(`streamVar`: `Connection`,
|
|
|
|
`protocolVar`: string): Future[void] {.gcsafe.} =
|
2020-08-10 13:18:17 +00:00
|
|
|
return handleIncomingStream(`networkVar`, `streamVar`,
|
2020-05-26 17:07:18 +00:00
|
|
|
`MsgStrongRecName`)
|
|
|
|
|
|
|
|
mount `networkVar`.switch,
|
2020-09-22 17:34:34 +00:00
|
|
|
LPProtocol(codecs: @[`codecNameLit` & "ssz_snappy"],
|
2020-05-26 17:07:18 +00:00
|
|
|
handler: snappyThunk)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
##
|
|
|
|
## Implement Senders and Handshake
|
|
|
|
##
|
|
|
|
if msg.kind == msgHandshake:
|
|
|
|
macros.error "Handshake messages are not supported in LibP2P protocols"
|
|
|
|
else:
|
|
|
|
var sendProc = msg.createSendProc()
|
|
|
|
implementSendProcBody sendProc
|
|
|
|
|
|
|
|
protocol.outProcRegistrations.add(
|
|
|
|
newCall(registerMsg,
|
|
|
|
protocol.protocolInfoVar,
|
|
|
|
msgNameLit,
|
2020-05-26 17:07:18 +00:00
|
|
|
protocolMounterName,
|
2022-03-17 14:09:18 +00:00
|
|
|
codecNameLit,
|
2022-05-31 10:45:37 +00:00
|
|
|
isRequiredLit,
|
2022-03-17 14:09:18 +00:00
|
|
|
isLightClientRequestLit))
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
result.implementProtocolInit = proc (p: P2PProtocol): NimNode =
|
|
|
|
return newCall(initProtocol, newLit(p.name), p.peerInit, p.netInit)
|
|
|
|
|
2021-08-23 10:29:50 +00:00
|
|
|
#Must import here because of cyclicity
|
|
|
|
import ../sync/sync_protocol
|
2022-05-31 10:45:37 +00:00
|
|
|
export sync_protocol
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-04-08 16:22:49 +00:00
|
|
|
proc updatePeerMetadata(node: Eth2Node, peerId: PeerId) {.async.} =
|
2021-08-23 10:29:50 +00:00
|
|
|
trace "updating peer metadata", peerId
|
|
|
|
|
|
|
|
var peer = node.getPeer(peerId)
|
|
|
|
|
2021-09-17 05:56:30 +00:00
|
|
|
#getMetaData can fail with an exception
|
|
|
|
let newMetadata =
|
|
|
|
try:
|
2022-02-01 17:20:55 +00:00
|
|
|
tryGet(await peer.getMetadata_v2())
|
2021-09-17 05:56:30 +00:00
|
|
|
except CatchableError:
|
2022-02-01 17:20:55 +00:00
|
|
|
let metadataV1 =
|
|
|
|
try: tryGet(await peer.getMetaData())
|
2021-09-17 05:56:30 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
debug "Failed to retrieve metadata from peer!", peerId, msg=exc.msg
|
2022-02-01 17:20:55 +00:00
|
|
|
peer.failedMetadataRequests.inc()
|
2021-09-17 05:56:30 +00:00
|
|
|
return
|
|
|
|
|
2022-02-01 17:20:55 +00:00
|
|
|
toAltairMetadata(metadataV1)
|
2021-08-23 10:29:50 +00:00
|
|
|
|
|
|
|
peer.metadata = some(newMetadata)
|
2022-02-01 17:20:55 +00:00
|
|
|
peer.failedMetadataRequests = 0
|
2021-08-23 10:29:50 +00:00
|
|
|
peer.lastMetadataTime = Moment.now()
|
|
|
|
|
2021-09-28 07:58:03 +00:00
|
|
|
const
|
|
|
|
# For Phase0, metadata change every +27 hours
|
|
|
|
MetadataRequestFrequency = 30.minutes
|
2022-02-01 17:20:55 +00:00
|
|
|
MetadataRequestMaxFailures = 3
|
2021-09-28 07:58:03 +00:00
|
|
|
|
2021-08-23 10:29:50 +00:00
|
|
|
proc peerPingerHeartbeat(node: Eth2Node) {.async.} =
|
|
|
|
while true:
|
|
|
|
let heartbeatStart_m = Moment.now()
|
|
|
|
var updateFutures: seq[Future[void]]
|
|
|
|
|
|
|
|
for peer in node.peers.values:
|
|
|
|
if peer.connectionState != Connected: continue
|
|
|
|
|
|
|
|
if peer.metadata.isNone or
|
2021-09-28 07:58:03 +00:00
|
|
|
heartbeatStart_m - peer.lastMetadataTime > MetadataRequestFrequency:
|
2021-10-21 11:01:29 +00:00
|
|
|
updateFutures.add(node.updatePeerMetadata(peer.peerId))
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-02-01 17:20:55 +00:00
|
|
|
await allFutures(updateFutures)
|
2021-08-23 10:29:50 +00:00
|
|
|
|
|
|
|
for peer in node.peers.values:
|
|
|
|
if peer.connectionState != Connected: continue
|
|
|
|
|
2022-02-01 17:20:55 +00:00
|
|
|
if peer.failedMetadataRequests > MetadataRequestMaxFailures:
|
2021-09-28 07:58:03 +00:00
|
|
|
debug "no metadata from peer, kicking it", peer
|
2021-08-23 10:29:50 +00:00
|
|
|
asyncSpawn peer.disconnect(PeerScoreLow)
|
|
|
|
|
2021-09-28 07:58:03 +00:00
|
|
|
await sleepAsync(5.seconds)
|
2021-08-23 10:29:50 +00:00
|
|
|
|
2022-03-11 10:51:53 +00:00
|
|
|
proc peerTrimmerHeartbeat(node: Eth2Node) {.async.} =
|
|
|
|
while true:
|
|
|
|
# Peer trimmer
|
|
|
|
|
|
|
|
# Only count Connected peers
|
|
|
|
# (to avoid counting Disconnecting ones)
|
|
|
|
var connectedPeers = 0
|
|
|
|
for peer in node.peers.values:
|
|
|
|
if peer.connectionState == Connected:
|
|
|
|
inc connectedPeers
|
|
|
|
|
|
|
|
let excessPeers = connectedPeers - node.wantedPeers
|
|
|
|
if excessPeers > 0:
|
|
|
|
# Let chronos take back control every kick
|
|
|
|
node.trimConnections(1)
|
|
|
|
|
|
|
|
await sleepAsync(1.seconds div max(1, excessPeers))
|
|
|
|
|
2020-03-22 20:54:47 +00:00
|
|
|
func asEthKey*(key: PrivateKey): keys.PrivateKey =
|
2020-04-17 13:29:49 +00:00
|
|
|
keys.PrivateKey(key.skkey)
|
2019-11-03 23:02:27 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc initAddress(T: type MultiAddress, str: string): T =
|
2020-03-22 20:54:47 +00:00
|
|
|
let address = MultiAddress.init(str)
|
|
|
|
if IPFS.match(address) and matchPartial(multiaddress.TCP, address):
|
|
|
|
result = address
|
|
|
|
else:
|
|
|
|
raise newException(MultiAddressError,
|
|
|
|
"Invalid bootstrap node multi-address")
|
|
|
|
|
|
|
|
template tcpEndPoint(address, port): auto =
|
2020-06-05 15:08:50 +00:00
|
|
|
MultiAddress.init(address, tcpProtocol, port)
|
2020-03-22 20:54:47 +00:00
|
|
|
|
2022-07-13 21:26:16 +00:00
|
|
|
func initNetKeys(privKey: PrivateKey): NetKeyPair =
|
|
|
|
let pubKey = privKey.getPublicKey().expect("working public key from random")
|
2022-05-31 10:45:37 +00:00
|
|
|
NetKeyPair(seckey: privKey, pubkey: pubKey)
|
|
|
|
|
2022-07-13 21:26:16 +00:00
|
|
|
proc getRandomNetKeys*(rng: var HmacDrbgContext): NetKeyPair =
|
|
|
|
let privKey = PrivateKey.random(Secp256k1, rng).valueOr:
|
|
|
|
fatal "Could not generate random network key file"
|
|
|
|
quit QuitFailure
|
|
|
|
initNetKeys(privKey)
|
|
|
|
|
|
|
|
proc getPersistentNetKeys(
|
|
|
|
rng: var HmacDrbgContext,
|
|
|
|
dataDir, netKeyFile: string,
|
|
|
|
netKeyInsecurePassword: bool,
|
|
|
|
allowLoadExisting: bool): NetKeyPair =
|
|
|
|
if netKeyFile == "random":
|
|
|
|
let
|
|
|
|
keys = rng.getRandomNetKeys()
|
|
|
|
pres = PeerId.init(keys.pubkey).valueOr:
|
2022-04-08 16:22:49 +00:00
|
|
|
fatal "Could not obtain PeerId from network key"
|
2020-11-16 14:39:00 +00:00
|
|
|
quit QuitFailure
|
2022-07-13 21:26:16 +00:00
|
|
|
info "Generating new networking key",
|
|
|
|
network_public_key = keys.pubkey, network_peer_id = $pres
|
|
|
|
keys
|
|
|
|
else:
|
|
|
|
let
|
|
|
|
# Insecure password used only for automated testing.
|
|
|
|
insecurePassword =
|
|
|
|
if netKeyInsecurePassword:
|
|
|
|
some(NetworkInsecureKeyPassword)
|
2020-08-19 13:12:10 +00:00
|
|
|
else:
|
2022-07-13 21:26:16 +00:00
|
|
|
none[string]()
|
2020-08-19 13:12:10 +00:00
|
|
|
|
2022-07-13 21:26:16 +00:00
|
|
|
keyPath =
|
|
|
|
if isAbsolute(netKeyFile):
|
|
|
|
netKeyFile
|
|
|
|
else:
|
|
|
|
dataDir / netKeyFile
|
|
|
|
logScope: key_path = keyPath
|
2020-08-25 10:16:31 +00:00
|
|
|
|
2022-07-13 21:26:16 +00:00
|
|
|
if fileAccessible(keyPath, {AccessFlags.Find}) and allowLoadExisting:
|
|
|
|
info "Network key storage is present, unlocking"
|
2020-08-25 10:16:31 +00:00
|
|
|
|
2022-07-13 21:26:16 +00:00
|
|
|
let
|
|
|
|
privKey = loadNetKeystore(keyPath, insecurePassword).valueOr:
|
2020-08-24 16:06:41 +00:00
|
|
|
fatal "Could not load network key file"
|
2020-08-19 13:12:10 +00:00
|
|
|
quit QuitFailure
|
2022-07-13 21:26:16 +00:00
|
|
|
keys = initNetKeys(privKey)
|
|
|
|
info "Network key storage was successfully unlocked",
|
|
|
|
network_public_key = keys.pubkey
|
|
|
|
keys
|
|
|
|
else:
|
|
|
|
if allowLoadExisting:
|
2020-08-24 16:06:41 +00:00
|
|
|
info "Network key storage is missing, creating a new one",
|
2022-07-13 21:26:16 +00:00
|
|
|
key_path = keyPath
|
|
|
|
let
|
|
|
|
keys = rng.getRandomNetKeys()
|
|
|
|
sres = saveNetKeystore(rng, keyPath, keys.seckey, insecurePassword)
|
|
|
|
if sres.isErr():
|
|
|
|
fatal "Could not create network key file"
|
|
|
|
quit QuitFailure
|
2020-08-25 10:16:31 +00:00
|
|
|
|
2022-07-13 21:26:16 +00:00
|
|
|
info "New network key storage was created",
|
|
|
|
network_public_key = keys.pubkey
|
|
|
|
keys
|
2020-08-19 13:12:10 +00:00
|
|
|
|
2022-07-13 21:26:16 +00:00
|
|
|
proc getPersistentNetKeys*(
|
|
|
|
rng: var HmacDrbgContext, config: BeaconNodeConf): NetKeyPair =
|
|
|
|
case config.cmd
|
|
|
|
of BNStartUpCmd.noCommand, BNStartUpCmd.record:
|
|
|
|
rng.getPersistentNetKeys(
|
|
|
|
string(config.dataDir), config.netKeyFile, config.netKeyInsecurePassword,
|
|
|
|
allowLoadExisting = true)
|
2020-08-24 16:06:41 +00:00
|
|
|
|
2022-05-31 10:45:37 +00:00
|
|
|
of BNStartUpCmd.createTestnet:
|
2021-02-22 16:17:48 +00:00
|
|
|
if config.netKeyFile == "random":
|
2020-08-25 12:49:05 +00:00
|
|
|
fatal "Could not create testnet using `random` network key"
|
|
|
|
quit QuitFailure
|
|
|
|
|
2022-07-13 21:26:16 +00:00
|
|
|
rng.getPersistentNetKeys(
|
|
|
|
string(config.dataDir), config.netKeyFile, config.netKeyInsecurePassword,
|
|
|
|
allowLoadExisting = false)
|
2020-08-19 13:12:10 +00:00
|
|
|
else:
|
2022-07-13 21:26:16 +00:00
|
|
|
rng.getRandomNetKeys()
|
2020-03-22 20:54:47 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
func gossipId(
|
2022-03-09 10:30:31 +00:00
|
|
|
data: openArray[byte], altairPrefix, topic: string): seq[byte] =
|
2022-08-17 11:33:19 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.2/specs/phase0/p2p-interface.md#topics-and-messages
|
2022-08-20 16:03:32 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.3/specs/altair/p2p-interface.md#topics-and-messages
|
2020-11-12 10:45:28 +00:00
|
|
|
const
|
|
|
|
MESSAGE_DOMAIN_INVALID_SNAPPY = [0x00'u8, 0x00, 0x00, 0x00]
|
|
|
|
MESSAGE_DOMAIN_VALID_SNAPPY = [0x01'u8, 0x00, 0x00, 0x00]
|
2020-10-20 12:31:20 +00:00
|
|
|
let messageDigest = withEth2Hash:
|
2022-03-09 10:30:31 +00:00
|
|
|
h.update(MESSAGE_DOMAIN_VALID_SNAPPY)
|
2021-10-21 13:09:19 +00:00
|
|
|
|
|
|
|
if topic.startsWith(altairPrefix):
|
2021-06-25 00:07:46 +00:00
|
|
|
h.update topic.len.uint64.toBytesLE
|
|
|
|
h.update topic
|
2020-10-20 08:54:11 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
h.update data
|
2021-07-07 09:09:47 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
messageDigest.data[0..19]
|
2020-06-28 20:06:50 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc newBeaconSwitch(config: BeaconNodeConf | LightClientConf,
|
|
|
|
seckey: PrivateKey, address: MultiAddress,
|
2022-06-21 08:29:16 +00:00
|
|
|
rng: ref HmacDrbgContext): Switch {.raises: [Defect, CatchableError].} =
|
2021-10-04 18:42:34 +00:00
|
|
|
SwitchBuilder
|
|
|
|
.new()
|
|
|
|
.withPrivateKey(seckey)
|
|
|
|
.withAddress(address)
|
|
|
|
.withRng(rng)
|
|
|
|
.withNoise()
|
2022-05-31 10:45:37 +00:00
|
|
|
.withMplex(chronos.minutes(5), chronos.minutes(5))
|
2021-10-04 18:42:34 +00:00
|
|
|
.withMaxConnections(config.maxPeers)
|
|
|
|
.withAgentVersion(config.agentString)
|
|
|
|
.withTcpTransport({ServerFlags.ReuseAddr})
|
|
|
|
.build()
|
2020-11-28 07:00:36 +00:00
|
|
|
|
2022-01-03 16:20:15 +00:00
|
|
|
func maxGossipMaxSize(): auto {.compileTime.} =
|
2022-01-18 16:31:05 +00:00
|
|
|
max(GOSSIP_MAX_SIZE, GOSSIP_MAX_SIZE_BELLATRIX)
|
2022-01-03 16:20:15 +00:00
|
|
|
|
|
|
|
template gossipMaxSize(T: untyped): uint32 =
|
|
|
|
const maxSize = static:
|
|
|
|
when isFixedSize(T):
|
|
|
|
fixedPortionSize(T)
|
2022-01-12 14:50:30 +00:00
|
|
|
elif T is bellatrix.SignedBeaconBlock:
|
2022-01-18 16:31:05 +00:00
|
|
|
GOSSIP_MAX_SIZE_BELLATRIX
|
2022-01-03 16:20:15 +00:00
|
|
|
# TODO https://github.com/status-im/nim-ssz-serialization/issues/20 for
|
|
|
|
# Attestation, AttesterSlashing, and SignedAggregateAndProof, which all
|
|
|
|
# have lists bounded at MAX_VALIDATORS_PER_COMMITTEE (2048) items, thus
|
|
|
|
# having max sizes significantly smaller than GOSSIP_MAX_SIZE.
|
|
|
|
elif T is Attestation or T is AttesterSlashing or
|
|
|
|
T is SignedAggregateAndProof or T is phase0.SignedBeaconBlock or
|
|
|
|
T is altair.SignedBeaconBlock:
|
|
|
|
GOSSIP_MAX_SIZE
|
|
|
|
else:
|
2022-05-05 05:45:35 +00:00
|
|
|
{.fatal: "unknown type " & name(T).}
|
2022-01-03 16:20:15 +00:00
|
|
|
static: doAssert maxSize <= maxGossipMaxSize()
|
|
|
|
maxSize.uint32
|
|
|
|
|
2022-06-21 08:29:16 +00:00
|
|
|
proc createEth2Node*(rng: ref HmacDrbgContext,
|
2022-05-31 10:45:37 +00:00
|
|
|
config: BeaconNodeConf | LightClientConf,
|
2021-03-19 02:22:45 +00:00
|
|
|
netKeys: NetKeyPair,
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
cfg: RuntimeConfig,
|
2021-11-05 07:34:34 +00:00
|
|
|
forkDigests: ref ForkDigests,
|
2021-08-19 10:45:31 +00:00
|
|
|
getBeaconTime: GetBeaconTimeFn,
|
2022-04-08 16:22:49 +00:00
|
|
|
genesis_validators_root: Eth2Digest): Eth2Node
|
2021-07-07 09:09:47 +00:00
|
|
|
{.raises: [Defect, CatchableError].} =
|
2021-08-10 20:46:35 +00:00
|
|
|
let
|
2021-08-19 10:45:31 +00:00
|
|
|
enrForkId = getENRForkID(
|
2022-04-08 16:22:49 +00:00
|
|
|
cfg, getBeaconTime().slotOrZero.epoch, genesis_validators_root)
|
2021-07-07 09:09:47 +00:00
|
|
|
|
2021-09-29 11:06:16 +00:00
|
|
|
discoveryForkId = getDiscoveryForkID(
|
2022-04-08 16:22:49 +00:00
|
|
|
cfg, getBeaconTime().slotOrZero.epoch, genesis_validators_root)
|
2021-09-29 11:06:16 +00:00
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
(extIp, extTcpPort, extUdpPort) = try: setupAddress(
|
|
|
|
config.nat, config.listenAddress, config.tcpPort, config.udpPort, clientId)
|
|
|
|
except CatchableError as exc: raise exc
|
|
|
|
except Exception as exc: raiseAssert exc.msg
|
2021-07-07 09:09:47 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
hostAddress = tcpEndPoint(config.listenAddress, config.tcpPort)
|
2021-02-16 20:35:10 +00:00
|
|
|
announcedAddresses = if extIp.isNone() or extTcpPort.isNone(): @[]
|
|
|
|
else: @[tcpEndPoint(extIp.get(), extTcpPort.get())]
|
2020-03-22 20:54:47 +00:00
|
|
|
|
2020-11-16 19:15:43 +00:00
|
|
|
debug "Initializing networking", hostAddress,
|
2020-11-28 07:00:36 +00:00
|
|
|
network_public_key = netKeys.pubkey,
|
|
|
|
announcedAddresses
|
2020-08-24 16:06:41 +00:00
|
|
|
|
2020-03-22 20:54:47 +00:00
|
|
|
# TODO nim-libp2p still doesn't have support for announcing addresses
|
|
|
|
# that are different from the host address (this is relevant when we
|
|
|
|
# are running behind a NAT).
|
2021-02-22 16:17:48 +00:00
|
|
|
var switch = newBeaconSwitch(config, netKeys.seckey, hostAddress, rng)
|
2021-10-21 13:09:19 +00:00
|
|
|
|
|
|
|
let altairPrefix = "/eth2/" & $forkDigests.altair
|
|
|
|
|
2022-02-25 12:22:48 +00:00
|
|
|
func msgIdProvider(m: messages.Message): Result[seq[byte], ValidationResult] =
|
2021-10-21 13:09:19 +00:00
|
|
|
template topic: untyped =
|
|
|
|
if m.topicIDs.len > 0: m.topicIDs[0] else: ""
|
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
try:
|
2022-01-03 16:20:15 +00:00
|
|
|
# This doesn't have to be a tight bound, just enough to avoid denial of
|
|
|
|
# service attacks.
|
|
|
|
let decoded = snappy.decode(m.data, maxGossipMaxSize())
|
2022-03-09 10:30:31 +00:00
|
|
|
ok(gossipId(decoded, altairPrefix, topic))
|
2021-07-07 09:09:47 +00:00
|
|
|
except CatchableError:
|
2022-03-09 10:30:31 +00:00
|
|
|
err(ValidationResult.Reject)
|
2021-07-07 09:09:47 +00:00
|
|
|
|
2020-11-23 09:57:03 +00:00
|
|
|
let
|
2021-02-09 09:20:55 +00:00
|
|
|
params = GossipSubParams(
|
|
|
|
explicit: true,
|
2022-05-31 10:45:37 +00:00
|
|
|
pruneBackoff: chronos.minutes(1),
|
|
|
|
unsubscribeBackoff: chronos.seconds(10),
|
2021-02-09 09:20:55 +00:00
|
|
|
floodPublish: true,
|
|
|
|
gossipFactor: 0.05,
|
|
|
|
d: 8,
|
|
|
|
dLow: 6,
|
|
|
|
dHigh: 12,
|
|
|
|
dScore: 6,
|
|
|
|
dOut: 6 div 2, # less than dlow and no more than dlow/2
|
|
|
|
dLazy: 6,
|
2022-05-31 10:45:37 +00:00
|
|
|
heartbeatInterval: chronos.milliseconds(700),
|
2021-02-09 09:20:55 +00:00
|
|
|
historyLength: 6,
|
|
|
|
historyGossip: 3,
|
2022-05-31 10:45:37 +00:00
|
|
|
fanoutTTL: chronos.seconds(60),
|
|
|
|
seenTTL: chronos.seconds(385),
|
2021-02-09 09:20:55 +00:00
|
|
|
gossipThreshold: -4000,
|
|
|
|
publishThreshold: -8000,
|
|
|
|
graylistThreshold: -16000, # also disconnect threshold
|
|
|
|
opportunisticGraftThreshold: 0,
|
2022-05-31 10:45:37 +00:00
|
|
|
decayInterval: chronos.seconds(12),
|
2021-02-09 09:20:55 +00:00
|
|
|
decayToZero: 0.01,
|
2022-05-31 10:45:37 +00:00
|
|
|
retainScore: chronos.seconds(385),
|
2021-02-09 09:20:55 +00:00
|
|
|
appSpecificWeight: 0.0,
|
|
|
|
ipColocationFactorWeight: -53.75,
|
|
|
|
ipColocationFactorThreshold: 3.0,
|
|
|
|
behaviourPenaltyWeight: -15.9,
|
|
|
|
behaviourPenaltyDecay: 0.986,
|
2021-03-23 06:10:17 +00:00
|
|
|
disconnectBadPeers: true,
|
|
|
|
directPeers:
|
|
|
|
block:
|
|
|
|
var res = initTable[PeerId, seq[MultiAddress]]()
|
|
|
|
if config.directPeers.len > 0:
|
|
|
|
for s in config.directPeers:
|
|
|
|
let
|
|
|
|
maddress = MultiAddress.init(s).tryGet()
|
|
|
|
mpeerId = maddress[multiCodec("p2p")].tryGet()
|
2022-04-08 16:22:49 +00:00
|
|
|
peerId = PeerId.init(mpeerId.protoAddress().tryGet()).tryGet()
|
2021-08-10 20:46:35 +00:00
|
|
|
res.mgetOrPut(peerId, @[]).add(maddress)
|
2021-03-23 06:10:17 +00:00
|
|
|
info "Adding priviledged direct peer", peerId, address = maddress
|
|
|
|
res
|
2021-02-09 09:20:55 +00:00
|
|
|
)
|
2021-10-04 18:42:34 +00:00
|
|
|
pubsub = GossipSub.init(
|
2020-11-23 09:57:03 +00:00
|
|
|
switch = switch,
|
|
|
|
msgIdProvider = msgIdProvider,
|
2022-06-30 09:54:49 +00:00
|
|
|
# We process messages in the validator, so we don't need data callbacks
|
|
|
|
triggerSelf = false,
|
2020-11-23 09:57:03 +00:00
|
|
|
sign = false,
|
2020-11-26 19:23:45 +00:00
|
|
|
verifySignature = false,
|
2020-11-23 09:57:03 +00:00
|
|
|
anonymize = true,
|
2022-01-03 16:20:15 +00:00
|
|
|
maxMessageSize = maxGossipMaxSize(),
|
2020-11-29 19:07:20 +00:00
|
|
|
parameters = params)
|
2021-10-21 13:09:19 +00:00
|
|
|
|
2020-08-08 20:52:02 +00:00
|
|
|
switch.mount(pubsub)
|
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
let node = Eth2Node.new(
|
2021-09-29 11:06:16 +00:00
|
|
|
config, cfg, enrForkId, discoveryForkId, forkDigests, getBeaconTime, switch, pubsub, extIp,
|
2021-08-19 10:45:31 +00:00
|
|
|
extTcpPort, extUdpPort, netKeys.seckey.asEthKey,
|
|
|
|
discovery = config.discv5Enabled, rng = rng)
|
2020-03-22 20:54:47 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
node.pubsub.subscriptionValidator =
|
|
|
|
proc(topic: string): bool {.gcsafe, raises: [Defect].} =
|
|
|
|
topic in node.validTopics
|
|
|
|
|
|
|
|
node
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
func announcedENR*(node: Eth2Node): enr.Record =
|
2020-03-23 11:26:44 +00:00
|
|
|
doAssert node.discovery != nil, "The Eth2Node must be initialized"
|
|
|
|
node.discovery.localNode.record
|
2020-03-22 20:54:47 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
func shortForm*(id: NetKeyPair): string =
|
2022-04-08 16:22:49 +00:00
|
|
|
$PeerId.init(id.pubkey)
|
2020-03-22 20:54:47 +00:00
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc subscribe*(
|
|
|
|
node: Eth2Node, topic: string, topicParams: TopicParams,
|
2021-08-18 12:30:05 +00:00
|
|
|
enableTopicMetrics: bool = false) =
|
2021-01-12 03:27:09 +00:00
|
|
|
if enableTopicMetrics:
|
2021-10-21 13:09:19 +00:00
|
|
|
node.pubsub.knownTopics.incl(topic)
|
2021-01-12 03:27:09 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
node.pubsub.topicParams[topic] = topicParams
|
2020-08-20 16:30:47 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
# Passing in `nil` because we do all message processing in the validator
|
|
|
|
node.pubsub.subscribe(topic, nil)
|
2021-01-15 04:17:06 +00:00
|
|
|
|
2021-04-26 20:39:44 +00:00
|
|
|
proc newValidationResultFuture(v: ValidationResult): Future[ValidationResult] =
|
|
|
|
let res = newFuture[ValidationResult]("eth2_network.execValidator")
|
|
|
|
res.complete(v)
|
|
|
|
res
|
|
|
|
|
2020-08-11 15:08:44 +00:00
|
|
|
proc addValidator*[MsgType](node: Eth2Node,
|
|
|
|
topic: string,
|
2020-09-18 11:53:09 +00:00
|
|
|
msgValidator: proc(msg: MsgType):
|
2021-03-24 16:20:55 +00:00
|
|
|
ValidationResult {.gcsafe, raises: [Defect].} ) =
|
2021-08-18 12:30:05 +00:00
|
|
|
# Message validators run when subscriptions are enabled - they validate the
|
|
|
|
# data and return an indication of whether the message should be broadcast
|
|
|
|
# or not - validation is `async` but implemented without the macro because
|
|
|
|
# this is a performance hotspot.
|
|
|
|
proc execValidator(topic: string, message: GossipMsg):
|
|
|
|
Future[ValidationResult] {.raises: [Defect].} =
|
2020-10-20 08:54:11 +00:00
|
|
|
inc nbc_gossip_messages_received
|
2021-08-18 12:30:05 +00:00
|
|
|
trace "Validating incoming gossip message", len = message.data.len, topic
|
2020-11-12 10:45:28 +00:00
|
|
|
|
2022-01-03 16:20:15 +00:00
|
|
|
var decompressed = snappy.decode(message.data, gossipMaxSize(MsgType))
|
2021-08-18 12:30:05 +00:00
|
|
|
let res = if decompressed.len > 0:
|
2020-11-12 10:45:28 +00:00
|
|
|
try:
|
2021-08-18 12:30:05 +00:00
|
|
|
let decoded = SSZ.decode(decompressed, MsgType)
|
|
|
|
decompressed = newSeq[byte](0) # release memory before validating
|
|
|
|
msgValidator(decoded) # doesn't raise!
|
|
|
|
except SszError as e:
|
|
|
|
inc nbc_gossip_failed_ssz
|
|
|
|
debug "Error decoding gossip",
|
|
|
|
topic, len = message.data.len, decompressed = decompressed.len,
|
|
|
|
error = e.msg
|
|
|
|
ValidationResult.Reject
|
|
|
|
else: # snappy returns empty seq on failed decompression
|
|
|
|
inc nbc_gossip_failed_snappy
|
|
|
|
debug "Error decompressing gossip", topic, len = message.data.len
|
|
|
|
ValidationResult.Reject
|
|
|
|
|
|
|
|
newValidationResultFuture(res)
|
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
node.validTopics.incl topic # Only allow subscription to validated topics
|
|
|
|
node.pubsub.addValidator(topic, execValidator)
|
2020-06-27 10:16:43 +00:00
|
|
|
|
2021-04-02 14:36:43 +00:00
|
|
|
proc addAsyncValidator*[MsgType](node: Eth2Node,
|
|
|
|
topic: string,
|
|
|
|
msgValidator: proc(msg: MsgType):
|
|
|
|
Future[ValidationResult] {.gcsafe, raises: [Defect].} ) =
|
2021-08-18 12:30:05 +00:00
|
|
|
proc execValidator(topic: string, message: GossipMsg):
|
|
|
|
Future[ValidationResult] {.raises: [Defect].} =
|
2021-04-02 14:36:43 +00:00
|
|
|
inc nbc_gossip_messages_received
|
2021-08-18 12:30:05 +00:00
|
|
|
trace "Validating incoming gossip message", len = message.data.len, topic
|
2021-04-02 14:36:43 +00:00
|
|
|
|
2022-01-03 16:20:15 +00:00
|
|
|
var decompressed = snappy.decode(message.data, gossipMaxSize(MsgType))
|
2021-08-18 12:30:05 +00:00
|
|
|
if decompressed.len > 0:
|
2021-04-26 20:39:44 +00:00
|
|
|
try:
|
2021-08-18 12:30:05 +00:00
|
|
|
let decoded = SSZ.decode(decompressed, MsgType)
|
|
|
|
decompressed = newSeq[byte](0) # release memory before validating
|
|
|
|
msgValidator(decoded) # doesn't raise!
|
|
|
|
except SszError as e:
|
|
|
|
inc nbc_gossip_failed_ssz
|
|
|
|
debug "Error decoding gossip",
|
|
|
|
topic, len = message.data.len, decompressed = decompressed.len,
|
|
|
|
error = e.msg
|
|
|
|
newValidationResultFuture(ValidationResult.Reject)
|
|
|
|
else: # snappy returns empty seq on failed decompression
|
|
|
|
inc nbc_gossip_failed_snappy
|
|
|
|
debug "Error decompressing gossip", topic, len = message.data.len
|
|
|
|
newValidationResultFuture(ValidationResult.Reject)
|
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
node.validTopics.incl topic # Only allow subscription to validated topics
|
|
|
|
|
|
|
|
node.pubsub.addValidator(topic, execValidator)
|
2021-08-18 12:30:05 +00:00
|
|
|
|
|
|
|
proc unsubscribe*(node: Eth2Node, topic: string) =
|
2021-10-21 13:09:19 +00:00
|
|
|
node.pubsub.unsubscribeAll(topic)
|
2020-03-22 20:54:47 +00:00
|
|
|
|
2022-07-01 14:48:45 +00:00
|
|
|
proc gossipEncode(msg: auto): seq[byte] =
|
|
|
|
let uncompressed = SSZ.encode(msg)
|
|
|
|
# This function only for messages we create. A message this large amounts to
|
|
|
|
# an internal logic error.
|
|
|
|
doAssert uncompressed.len <= maxGossipMaxSize()
|
2021-03-26 06:52:01 +00:00
|
|
|
|
2022-07-01 14:48:45 +00:00
|
|
|
snappy.encode(uncompressed)
|
2022-06-11 01:16:40 +00:00
|
|
|
|
2022-07-01 14:48:45 +00:00
|
|
|
proc broadcast(node: Eth2Node, topic: string, msg: seq[byte]):
|
|
|
|
Future[Result[void, cstring]] {.async.} =
|
|
|
|
let peers = await node.pubsub.publish(topic, msg)
|
2022-06-11 01:16:40 +00:00
|
|
|
|
2022-07-01 14:48:45 +00:00
|
|
|
# TODO remove workaround for sync committee BN/VC log spam
|
|
|
|
if peers > 0 or find(topic, "sync_committee_") != -1:
|
|
|
|
inc nbc_gossip_messages_sent
|
|
|
|
return ok()
|
|
|
|
else:
|
|
|
|
# Increments libp2p_gossipsub_failed_publish metric
|
|
|
|
return err("No peers on libp2p topic")
|
2021-03-26 06:52:01 +00:00
|
|
|
|
2022-07-01 14:48:45 +00:00
|
|
|
proc broadcast(node: Eth2Node, topic: string, msg: auto):
|
|
|
|
Future[Result[void, cstring]] =
|
|
|
|
# Avoid {.async.} copies of message while broadcasting
|
|
|
|
broadcast(node, topic, gossipEncode(msg))
|
2021-05-10 07:13:36 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
proc subscribeAttestationSubnets*(
|
|
|
|
node: Eth2Node, subnets: AttnetBits, forkDigest: ForkDigest) =
|
2022-08-17 11:33:19 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.2/specs/phase0/p2p-interface.md#attestations-and-aggregation
|
2021-10-12 10:17:37 +00:00
|
|
|
# Nimbus won't score attestation subnets for now, we just rely on block and
|
|
|
|
# aggregate which are more stable and reliable
|
2021-05-10 07:13:36 +00:00
|
|
|
|
|
|
|
for subnet_id, enabled in subnets:
|
|
|
|
if enabled:
|
|
|
|
node.subscribe(getAttestationTopic(
|
2021-08-09 12:54:45 +00:00
|
|
|
forkDigest, SubnetId(subnet_id)), TopicParams.init()) # don't score attestation subnets for now
|
2021-05-10 07:13:36 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
proc unsubscribeAttestationSubnets*(
|
|
|
|
node: Eth2Node, subnets: AttnetBits, forkDigest: ForkDigest) =
|
2022-08-17 11:33:19 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.2/specs/phase0/p2p-interface.md#attestations-and-aggregation
|
2021-10-12 10:17:37 +00:00
|
|
|
# Nimbus won't score attestation subnets for now; we just rely on block and
|
|
|
|
# aggregate which are more stable and reliable
|
2021-05-10 07:13:36 +00:00
|
|
|
|
|
|
|
for subnet_id, enabled in subnets:
|
|
|
|
if enabled:
|
2021-08-09 12:54:45 +00:00
|
|
|
node.unsubscribe(getAttestationTopic(forkDigest, SubnetId(subnet_id)))
|
2021-05-10 07:13:36 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
proc updateStabilitySubnetMetadata*(node: Eth2Node, attnets: AttnetBits) =
|
2022-08-17 11:33:19 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.2/specs/phase0/p2p-interface.md#metadata
|
2021-10-18 09:11:44 +00:00
|
|
|
if node.metadata.attnets == attnets:
|
|
|
|
return
|
|
|
|
|
2021-05-10 07:13:36 +00:00
|
|
|
node.metadata.seq_number += 1
|
|
|
|
node.metadata.attnets = attnets
|
|
|
|
|
2022-05-24 08:26:35 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/phase0/validator.md#phase-0-attestation-subnet-stability
|
2022-08-17 11:33:19 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.2/specs/phase0/p2p-interface.md#attestation-subnet-bitfield
|
2021-08-10 06:19:13 +00:00
|
|
|
let res = node.discovery.updateRecord({
|
|
|
|
enrAttestationSubnetsField: SSZ.encode(node.metadata.attnets)
|
|
|
|
})
|
2021-05-10 07:13:36 +00:00
|
|
|
if res.isErr():
|
|
|
|
# This should not occur in this scenario as the private key would always
|
|
|
|
# be the correct one and the ENR will not increase in size.
|
2021-08-10 06:19:13 +00:00
|
|
|
warn "Failed to update the ENR attnets field", error = res.error
|
2021-05-10 07:13:36 +00:00
|
|
|
else:
|
|
|
|
debug "Stability subnets changed; updated ENR attnets", attnets
|
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
proc updateSyncnetsMetadata*(node: Eth2Node, syncnets: SyncnetBits) =
|
2022-05-23 19:30:24 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/altair/validator.md#sync-committee-subnet-stability
|
2021-10-21 13:09:19 +00:00
|
|
|
if node.metadata.syncnets == syncnets:
|
|
|
|
return
|
|
|
|
|
2021-08-10 06:19:13 +00:00
|
|
|
node.metadata.seq_number += 1
|
|
|
|
node.metadata.syncnets = syncnets
|
|
|
|
|
|
|
|
let res = node.discovery.updateRecord({
|
|
|
|
enrSyncSubnetsField: SSZ.encode(node.metadata.syncnets)
|
|
|
|
})
|
|
|
|
if res.isErr():
|
|
|
|
# This should not occur in this scenario as the private key would always
|
|
|
|
# be the correct one and the ENR will not increase in size.
|
|
|
|
warn "Failed to update the ENR syncnets field", error = res.error
|
|
|
|
else:
|
|
|
|
debug "Sync committees changed; updated ENR syncnets", syncnets
|
|
|
|
|
2021-09-29 11:06:16 +00:00
|
|
|
proc updateForkId(node: Eth2Node, value: ENRForkID) =
|
2021-08-10 06:19:13 +00:00
|
|
|
node.forkId = value
|
|
|
|
let res = node.discovery.updateRecord({enrForkIdField: SSZ.encode value})
|
|
|
|
if res.isErr():
|
|
|
|
# This should not occur in this scenario as the private key would always
|
|
|
|
# be the correct one and the ENR will not increase in size.
|
|
|
|
warn "Failed to update the ENR fork id", value, error = res.error
|
|
|
|
else:
|
|
|
|
debug "ENR fork id changed", value
|
|
|
|
|
2022-04-08 16:22:49 +00:00
|
|
|
proc updateForkId*(node: Eth2Node, epoch: Epoch, genesis_validators_root: Eth2Digest) =
|
|
|
|
node.updateForkId(getENRForkID(node.cfg, epoch, genesis_validators_root))
|
|
|
|
node.discoveryForkId = getDiscoveryForkID(node.cfg, epoch, genesis_validators_root)
|
2021-09-29 11:06:16 +00:00
|
|
|
|
2021-08-19 10:45:31 +00:00
|
|
|
func forkDigestAtEpoch(node: Eth2Node, epoch: Epoch): ForkDigest =
|
2022-05-31 10:45:37 +00:00
|
|
|
node.forkDigests[].atEpoch(epoch, node.cfg)
|
2021-08-19 10:45:31 +00:00
|
|
|
|
|
|
|
proc getWallEpoch(node: Eth2Node): Epoch =
|
|
|
|
node.getBeaconTime().slotOrZero.epoch
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc broadcastAttestation*(
|
|
|
|
node: Eth2Node, subnet_id: SubnetId, attestation: Attestation):
|
|
|
|
Future[SendResult] =
|
2021-08-19 10:45:31 +00:00
|
|
|
# Regardless of the contents of the attestation,
|
2022-08-20 16:03:32 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.3/specs/altair/p2p-interface.md#transitioning-the-gossip
|
2021-08-19 10:45:31 +00:00
|
|
|
# implies that pre-fork, messages using post-fork digests might be
|
|
|
|
# ignored, whilst post-fork, there is effectively a seen_ttl-based
|
|
|
|
# timer unsubscription point that means no new pre-fork-forkdigest
|
|
|
|
# should be sent.
|
2022-06-15 08:14:47 +00:00
|
|
|
let
|
|
|
|
forkPrefix = node.forkDigestAtEpoch(node.getWallEpoch)
|
|
|
|
topic = getAttestationTopic(forkPrefix, subnet_id)
|
2021-08-23 10:41:48 +00:00
|
|
|
node.broadcast(topic, attestation)
|
2021-08-19 10:45:31 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc broadcastVoluntaryExit*(
|
|
|
|
node: Eth2Node, exit: SignedVoluntaryExit): Future[SendResult] =
|
|
|
|
let topic = getVoluntaryExitsTopic(node.forkDigestAtEpoch(node.getWallEpoch))
|
2021-10-19 15:20:55 +00:00
|
|
|
node.broadcast(topic, exit)
|
2021-08-19 10:45:31 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc broadcastAttesterSlashing*(
|
|
|
|
node: Eth2Node, slashing: AttesterSlashing): Future[SendResult] =
|
2021-10-19 15:20:55 +00:00
|
|
|
let topic = getAttesterSlashingsTopic(
|
2021-08-19 10:45:31 +00:00
|
|
|
node.forkDigestAtEpoch(node.getWallEpoch))
|
2021-10-19 15:20:55 +00:00
|
|
|
node.broadcast(topic, slashing)
|
2021-08-19 10:45:31 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc broadcastProposerSlashing*(
|
|
|
|
node: Eth2Node, slashing: ProposerSlashing): Future[SendResult] =
|
2021-10-19 15:20:55 +00:00
|
|
|
let topic = getProposerSlashingsTopic(
|
2021-08-19 10:45:31 +00:00
|
|
|
node.forkDigestAtEpoch(node.getWallEpoch))
|
2021-10-19 15:20:55 +00:00
|
|
|
node.broadcast(topic, slashing)
|
2021-08-23 10:41:48 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc broadcastAggregateAndProof*(
|
|
|
|
node: Eth2Node, proof: SignedAggregateAndProof): Future[SendResult] =
|
2021-10-19 15:20:55 +00:00
|
|
|
let topic = getAggregateAndProofsTopic(
|
2021-08-23 10:41:48 +00:00
|
|
|
node.forkDigestAtEpoch(node.getWallEpoch))
|
2021-10-19 15:20:55 +00:00
|
|
|
node.broadcast(topic, proof)
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc broadcastBeaconBlock*(
|
|
|
|
node: Eth2Node, blck: phase0.SignedBeaconBlock): Future[SendResult] =
|
2021-10-19 15:20:55 +00:00
|
|
|
let topic = getBeaconBlocksTopic(node.forkDigests.phase0)
|
|
|
|
node.broadcast(topic, blck)
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc broadcastBeaconBlock*(
|
|
|
|
node: Eth2Node, blck: altair.SignedBeaconBlock): Future[SendResult] =
|
2021-10-19 15:20:55 +00:00
|
|
|
let topic = getBeaconBlocksTopic(node.forkDigests.altair)
|
|
|
|
node.broadcast(topic, blck)
|
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc broadcastBeaconBlock*(
|
|
|
|
node: Eth2Node, blck: bellatrix.SignedBeaconBlock): Future[SendResult] =
|
2022-01-05 14:24:15 +00:00
|
|
|
let topic = getBeaconBlocksTopic(node.forkDigests.bellatrix)
|
2021-10-19 15:20:55 +00:00
|
|
|
node.broadcast(topic, blck)
|
2021-08-23 10:41:48 +00:00
|
|
|
|
2022-06-15 08:14:47 +00:00
|
|
|
proc broadcastBeaconBlock*(
|
|
|
|
node: Eth2Node, forked: ForkedSignedBeaconBlock): Future[SendResult] =
|
2021-10-19 15:20:55 +00:00
|
|
|
withBlck(forked): node.broadcastBeaconBlock(blck)
|
2021-08-28 09:00:00 +00:00
|
|
|
|
|
|
|
proc broadcastSyncCommitteeMessage*(
|
2022-01-08 23:28:49 +00:00
|
|
|
node: Eth2Node, msg: SyncCommitteeMessage,
|
2022-06-15 08:14:47 +00:00
|
|
|
subcommitteeIdx: SyncSubcommitteeIndex): Future[SendResult] =
|
2022-04-28 10:51:49 +00:00
|
|
|
let topic = getSyncCommitteeTopic(
|
|
|
|
node.forkDigestAtEpoch(node.getWallEpoch), subcommitteeIdx)
|
2021-08-28 09:00:00 +00:00
|
|
|
node.broadcast(topic, msg)
|
|
|
|
|
|
|
|
proc broadcastSignedContributionAndProof*(
|
2022-06-15 08:14:47 +00:00
|
|
|
node: Eth2Node, msg: SignedContributionAndProof): Future[SendResult] =
|
2022-04-28 10:51:49 +00:00
|
|
|
let topic = getSyncCommitteeContributionAndProofTopic(
|
|
|
|
node.forkDigestAtEpoch(node.getWallEpoch))
|
2021-08-28 09:00:00 +00:00
|
|
|
node.broadcast(topic, msg)
|
2022-03-17 20:11:29 +00:00
|
|
|
|
2022-05-23 12:02:54 +00:00
|
|
|
proc broadcastLightClientFinalityUpdate*(
|
2022-06-15 08:14:47 +00:00
|
|
|
node: Eth2Node, msg: altair.LightClientFinalityUpdate):
|
|
|
|
Future[SendResult] =
|
2022-05-23 12:02:54 +00:00
|
|
|
let topic = getLightClientFinalityUpdateTopic(
|
|
|
|
node.forkDigestAtEpoch(msg.attested_header.slot.epoch))
|
|
|
|
node.broadcast(topic, msg)
|
|
|
|
|
|
|
|
proc broadcastLightClientOptimisticUpdate*(
|
2022-06-15 08:14:47 +00:00
|
|
|
node: Eth2Node, msg: altair.LightClientOptimisticUpdate):
|
|
|
|
Future[SendResult] =
|
2022-05-23 12:02:54 +00:00
|
|
|
let topic = getLightClientOptimisticUpdateTopic(
|
|
|
|
node.forkDigestAtEpoch(msg.attested_header.slot.epoch))
|
2022-03-17 20:11:29 +00:00
|
|
|
node.broadcast(topic, msg)
|