2021-03-26 06:52:01 +00:00
|
|
|
# beacon_chain
|
|
|
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
|
|
|
{.push raises: [Defect].}
|
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
import
|
2020-03-22 21:55:01 +00:00
|
|
|
# Std lib
|
2021-06-25 00:07:46 +00:00
|
|
|
std/[typetraits, sequtils, os, algorithm, math, sets, strutils],
|
2020-08-02 19:27:36 +00:00
|
|
|
std/options as stdOptions,
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
# Status libs
|
2020-12-14 14:09:26 +00:00
|
|
|
stew/[leb128, base58, endians2, results, byteutils, io2], bearssl,
|
2020-06-05 15:08:50 +00:00
|
|
|
stew/shims/net as stewNet,
|
2020-05-20 17:05:22 +00:00
|
|
|
stew/shims/[macros, tables],
|
|
|
|
faststreams/[inputs, outputs, buffers], snappy, snappy/framing,
|
2020-03-22 21:55:01 +00:00
|
|
|
json_serialization, json_serialization/std/[net, options],
|
|
|
|
chronos, chronicles, metrics,
|
2021-03-23 06:10:17 +00:00
|
|
|
libp2p/[switch, peerinfo, multicodec,
|
2020-12-14 14:09:26 +00:00
|
|
|
multiaddress, crypto/crypto, crypto/secp,
|
2021-04-05 20:21:27 +00:00
|
|
|
protocols/identify, protocols/protocol,
|
|
|
|
builders],
|
2020-11-28 07:00:36 +00:00
|
|
|
libp2p/muxers/muxer, libp2p/muxers/mplex/mplex,
|
|
|
|
libp2p/transports/[transport, tcptransport],
|
|
|
|
libp2p/protocols/secure/[secure, noise],
|
2021-03-23 06:10:17 +00:00
|
|
|
libp2p/protocols/pubsub/[pubsub, gossipsub, rpc/message, rpc/messages],
|
2020-05-20 04:57:39 +00:00
|
|
|
libp2p/transports/tcptransport,
|
2020-09-06 08:39:25 +00:00
|
|
|
libp2p/stream/connection,
|
2021-03-22 09:17:14 +00:00
|
|
|
libp2p/utils/semaphore,
|
2020-05-29 10:03:29 +00:00
|
|
|
eth/[keys, async_utils], eth/p2p/p2p_protocol_dsl,
|
2021-05-10 07:13:36 +00:00
|
|
|
eth/net/nat, eth/p2p/discoveryv5/[enr, node, random2],
|
2021-03-05 13:12:00 +00:00
|
|
|
".."/[
|
|
|
|
version, conf,
|
|
|
|
ssz/ssz_serialization, beacon_clock],
|
2021-08-10 20:46:35 +00:00
|
|
|
../spec/datatypes/[phase0, altair],
|
|
|
|
../spec/[digest, network, helpers, forks],
|
2021-03-05 13:12:00 +00:00
|
|
|
../validators/keystore_management,
|
|
|
|
./eth2_discovery, ./peer_pool, ./libp2p_json_serialization
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-09-08 11:32:43 +00:00
|
|
|
when chronicles.enabledLogLevel == LogLevel.TRACE:
|
|
|
|
import std/sequtils
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
export
|
2020-09-06 08:39:25 +00:00
|
|
|
version, multiaddress, peer_pool, peerinfo, p2pProtocol, connection,
|
2020-10-27 09:00:57 +00:00
|
|
|
libp2p_json_serialization, ssz_serialization, results, eth2_discovery
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
logScope:
|
|
|
|
topics = "networking"
|
2020-02-05 20:40:14 +00:00
|
|
|
|
|
|
|
type
|
2021-03-19 02:22:45 +00:00
|
|
|
NetKeyPair* = crypto.KeyPair
|
2020-02-19 08:58:10 +00:00
|
|
|
PublicKey* = crypto.PublicKey
|
|
|
|
PrivateKey* = crypto.PrivateKey
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
Bytes = seq[byte]
|
2020-05-21 13:21:29 +00:00
|
|
|
ErrorMsg = List[byte, 256]
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-03-24 11:13:07 +00:00
|
|
|
# TODO: This is here only to eradicate a compiler
|
|
|
|
# warning about unused import (rpc/messages).
|
|
|
|
GossipMsg = messages.Message
|
|
|
|
|
2020-06-03 08:46:29 +00:00
|
|
|
SeenItem* = object
|
2020-08-02 19:27:36 +00:00
|
|
|
peerId*: PeerID
|
2020-06-03 08:46:29 +00:00
|
|
|
stamp*: chronos.Moment
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
Eth2Node* = ref object of RootObj
|
|
|
|
switch*: Switch
|
2020-11-29 19:07:20 +00:00
|
|
|
pubsub*: GossipSub
|
2020-03-22 21:55:01 +00:00
|
|
|
discovery*: Eth2DiscoveryProtocol
|
2020-08-24 11:52:06 +00:00
|
|
|
discoveryEnabled*: bool
|
2020-03-22 21:55:01 +00:00
|
|
|
wantedPeers*: int
|
|
|
|
peerPool*: PeerPool[Peer, PeerID]
|
|
|
|
protocolStates*: seq[RootRef]
|
|
|
|
libp2pTransportLoops*: seq[Future[void]]
|
2021-07-07 09:09:47 +00:00
|
|
|
metadata*: altair.MetaData
|
2020-06-03 08:46:29 +00:00
|
|
|
connectTimeout*: chronos.Duration
|
|
|
|
seenThreshold*: chronos.Duration
|
2020-08-02 19:27:36 +00:00
|
|
|
connQueue: AsyncQueue[PeerAddr]
|
2020-06-03 08:46:29 +00:00
|
|
|
seenTable: Table[PeerID, SeenItem]
|
|
|
|
connWorkers: seq[Future[void]]
|
2020-08-02 19:27:36 +00:00
|
|
|
connTable: HashSet[PeerID]
|
2021-07-26 09:51:14 +00:00
|
|
|
forkId*: ENRForkID
|
2021-07-07 09:09:47 +00:00
|
|
|
forkDigests*: ForkDigestsRef
|
2020-07-07 15:51:02 +00:00
|
|
|
rng*: ref BrHmacDrbgContext
|
2020-11-26 19:23:45 +00:00
|
|
|
peers*: Table[PeerID, Peer]
|
2021-01-15 04:17:06 +00:00
|
|
|
validTopics: HashSet[string]
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
EthereumNode = Eth2Node # needed for the definitions in p2p_backends_helpers
|
|
|
|
|
2020-07-25 06:12:23 +00:00
|
|
|
AverageThroughput* = object
|
|
|
|
count*: uint64
|
|
|
|
average*: float
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
Peer* = ref object
|
|
|
|
network*: Eth2Node
|
|
|
|
info*: PeerInfo
|
|
|
|
discoveryId*: Eth2DiscoveryId
|
|
|
|
connectionState*: ConnectionState
|
|
|
|
protocolStates*: seq[RootRef]
|
2020-07-25 06:12:23 +00:00
|
|
|
netThroughput: AverageThroughput
|
2020-03-22 21:55:01 +00:00
|
|
|
score*: int
|
2020-10-09 13:44:51 +00:00
|
|
|
requestQuota*: float
|
|
|
|
lastReqTime*: Moment
|
2020-08-10 10:58:34 +00:00
|
|
|
connections*: int
|
2020-11-26 19:23:45 +00:00
|
|
|
enr*: Option[enr.Record]
|
|
|
|
direction*: PeerType
|
2020-08-10 10:58:34 +00:00
|
|
|
disconnectedFut: Future[void]
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-08-02 19:27:36 +00:00
|
|
|
PeerAddr* = object
|
|
|
|
peerId*: PeerID
|
|
|
|
addrs*: seq[MultiAddress]
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
ConnectionState* = enum
|
|
|
|
None,
|
|
|
|
Connecting,
|
|
|
|
Connected,
|
|
|
|
Disconnecting,
|
|
|
|
Disconnected
|
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
UntypedResponse* = ref object
|
2020-03-22 21:55:01 +00:00
|
|
|
peer*: Peer
|
2020-03-22 23:23:21 +00:00
|
|
|
stream*: Connection
|
2020-05-23 22:24:47 +00:00
|
|
|
writtenChunks*: int
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-05-23 22:24:47 +00:00
|
|
|
SingleChunkResponse*[MsgType] = distinct UntypedResponse
|
|
|
|
## Protocol requests using this type will produce request-making
|
|
|
|
## client-side procs that return `NetRes[MsgType]`
|
|
|
|
|
|
|
|
MultipleChunksResponse*[MsgType] = distinct UntypedResponse
|
|
|
|
## Protocol requests using this type will produce request-making
|
|
|
|
## client-side procs that return `NetRes[seq[MsgType]]`.
|
|
|
|
## In the future, such procs will return an `InputStream[NetRes[MsgType]]`.
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
MessageInfo* = object
|
|
|
|
name*: string
|
|
|
|
|
|
|
|
# Private fields:
|
|
|
|
libp2pCodecName: string
|
|
|
|
protocolMounter*: MounterProc
|
|
|
|
|
|
|
|
ProtocolInfoObj* = object
|
|
|
|
name*: string
|
|
|
|
messages*: seq[MessageInfo]
|
|
|
|
index*: int # the position of the protocol in the
|
|
|
|
# ordered list of supported protocols
|
|
|
|
|
|
|
|
# Private fields:
|
|
|
|
peerStateInitializer*: PeerStateInitializer
|
|
|
|
networkStateInitializer*: NetworkStateInitializer
|
2020-06-09 11:49:58 +00:00
|
|
|
onPeerConnected*: OnPeerConnectedHandler
|
|
|
|
onPeerDisconnected*: OnPeerDisconnectedHandler
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
ProtocolInfo* = ptr ProtocolInfoObj
|
|
|
|
|
|
|
|
ResponseCode* = enum
|
|
|
|
Success
|
|
|
|
InvalidRequest
|
|
|
|
ServerError
|
|
|
|
|
2021-03-24 16:20:55 +00:00
|
|
|
PeerStateInitializer* = proc(peer: Peer): RootRef {.gcsafe, raises: [Defect].}
|
|
|
|
NetworkStateInitializer* = proc(network: EthereumNode): RootRef {.gcsafe, raises: [Defect].}
|
2020-08-10 10:58:34 +00:00
|
|
|
OnPeerConnectedHandler* = proc(peer: Peer, incoming: bool): Future[void] {.gcsafe.}
|
2021-03-24 16:20:55 +00:00
|
|
|
OnPeerDisconnectedHandler* = proc(peer: Peer): Future[void] {.gcsafe, raises: [Defect].}
|
2020-03-22 21:55:01 +00:00
|
|
|
ThunkProc* = LPProtoHandler
|
2021-03-26 06:52:01 +00:00
|
|
|
MounterProc* = proc(network: Eth2Node) {.gcsafe, raises: [Defect, CatchableError].}
|
2021-03-24 16:20:55 +00:00
|
|
|
MessageContentPrinter* = proc(msg: pointer): string {.gcsafe, raises: [Defect].}
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2021-02-25 13:37:22 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#goodbye
|
2020-03-22 21:55:01 +00:00
|
|
|
DisconnectionReason* = enum
|
2020-06-20 07:24:33 +00:00
|
|
|
# might see other values on the wire!
|
|
|
|
ClientShutDown = 1
|
|
|
|
IrrelevantNetwork = 2
|
|
|
|
FaultOrError = 3
|
2020-09-25 13:43:45 +00:00
|
|
|
# Clients MAY use reason codes above 128 to indicate alternative,
|
|
|
|
# erroneous request-specific responses.
|
|
|
|
PeerScoreLow = 237 # 79 * 3
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
PeerDisconnected* = object of CatchableError
|
|
|
|
reason*: DisconnectionReason
|
|
|
|
|
|
|
|
TransmissionError* = object of CatchableError
|
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
Eth2NetworkingErrorKind* = enum
|
|
|
|
BrokenConnection
|
|
|
|
ReceivedErrorResponse
|
|
|
|
UnexpectedEOF
|
|
|
|
PotentiallyExpectedEOF
|
|
|
|
InvalidResponseCode
|
|
|
|
InvalidSnappyBytes
|
|
|
|
InvalidSszBytes
|
|
|
|
StreamOpenTimeout
|
|
|
|
ReadResponseTimeout
|
|
|
|
ZeroSizePrefix
|
|
|
|
SizePrefixOverflow
|
2021-07-07 09:09:47 +00:00
|
|
|
InvalidContextBytes
|
2020-05-12 22:37:07 +00:00
|
|
|
|
|
|
|
Eth2NetworkingError = object
|
|
|
|
case kind*: Eth2NetworkingErrorKind
|
|
|
|
of ReceivedErrorResponse:
|
|
|
|
responseCode: ResponseCode
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
errorMsg: string
|
2020-05-12 22:37:07 +00:00
|
|
|
else:
|
|
|
|
discard
|
|
|
|
|
2020-10-09 13:44:51 +00:00
|
|
|
InvalidInputsError* = object of CatchableError
|
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
NetRes*[T] = Result[T, Eth2NetworkingError]
|
|
|
|
## This is type returned from all network requests
|
|
|
|
|
2021-07-26 09:51:14 +00:00
|
|
|
func phase0metadata*(node: Eth2Node): phase0.MetaData =
|
|
|
|
phase0.MetaData(
|
2021-07-07 09:09:47 +00:00
|
|
|
seq_number: node.metadata.seq_number,
|
|
|
|
attnets: node.metadata.attnets)
|
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
const
|
2020-11-09 14:23:15 +00:00
|
|
|
clientId* = "Nimbus beacon node " & fullVersionStr
|
2020-04-15 02:41:22 +00:00
|
|
|
nodeMetadataFilename = "node-metadata.json"
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2020-04-23 15:31:00 +00:00
|
|
|
NewPeerScore* = 200
|
|
|
|
## Score which will be assigned to new connected Peer
|
2020-05-28 05:02:28 +00:00
|
|
|
PeerScoreLowLimit* = 0
|
2020-04-23 15:31:00 +00:00
|
|
|
## Score after which peer will be kicked
|
2020-05-28 05:02:28 +00:00
|
|
|
PeerScoreHighLimit* = 1000
|
|
|
|
## Max value of peer's score
|
2020-10-09 13:44:51 +00:00
|
|
|
PeerScoreInvalidRequest* = -500
|
|
|
|
## This peer is sending malformed or nonsensical data
|
2020-04-23 15:31:00 +00:00
|
|
|
|
2021-02-15 16:40:00 +00:00
|
|
|
ConcurrentConnections = 10
|
2020-06-03 08:46:29 +00:00
|
|
|
## Maximum number of active concurrent connection requests.
|
|
|
|
|
2021-02-15 16:40:00 +00:00
|
|
|
SeenTableTimeTimeout =
|
2020-08-12 14:16:59 +00:00
|
|
|
when not defined(local_testnet): 5.minutes else: 10.seconds
|
|
|
|
|
2020-06-08 16:02:50 +00:00
|
|
|
## Seen period of time for timeout connections
|
2021-02-15 16:40:00 +00:00
|
|
|
SeenTableTimeDeadPeer =
|
2020-08-12 14:16:59 +00:00
|
|
|
when not defined(local_testnet): 5.minutes else: 10.seconds
|
|
|
|
|
2020-06-08 16:02:50 +00:00
|
|
|
## Period of time for dead peers.
|
2021-02-15 16:40:00 +00:00
|
|
|
SeenTableTimeIrrelevantNetwork = 24.hours
|
2020-06-08 16:02:50 +00:00
|
|
|
## Period of time for `IrrelevantNetwork` error reason.
|
2021-02-15 16:40:00 +00:00
|
|
|
SeenTableTimeClientShutDown = 10.minutes
|
2020-06-08 16:02:50 +00:00
|
|
|
## Period of time for `ClientShutDown` error reason.
|
2021-02-15 16:40:00 +00:00
|
|
|
SeenTableTimeFaultOrError = 10.minutes
|
2020-06-08 16:02:50 +00:00
|
|
|
## Period of time for `FaultOnError` error reason.
|
2021-02-15 16:40:00 +00:00
|
|
|
SeenTablePenaltyError = 60.minutes
|
2020-09-25 13:43:45 +00:00
|
|
|
## Period of time for peers which score below or equal to zero.
|
2021-02-15 16:40:00 +00:00
|
|
|
SeenTableTimeReconnect = 1.minutes
|
2020-11-26 19:23:45 +00:00
|
|
|
## Minimal time between disconnection and reconnection attempt
|
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
template neterr*(kindParam: Eth2NetworkingErrorKind): auto =
|
2020-05-12 22:37:07 +00:00
|
|
|
err(type(result), Eth2NetworkingError(kind: kindParam))
|
|
|
|
|
2019-10-25 17:15:12 +00:00
|
|
|
# Metrics for tracking attestation and beacon block loss
|
2020-06-17 11:04:24 +00:00
|
|
|
declareCounter nbc_gossip_messages_sent,
|
2019-10-25 17:15:12 +00:00
|
|
|
"Number of gossip messages sent by this peer"
|
|
|
|
|
2020-06-17 11:04:24 +00:00
|
|
|
declareCounter nbc_gossip_messages_received,
|
2019-10-25 17:15:12 +00:00
|
|
|
"Number of gossip messages received by this peer"
|
|
|
|
|
2020-11-28 18:53:51 +00:00
|
|
|
declareCounter nbc_successful_dials,
|
2020-03-22 23:23:21 +00:00
|
|
|
"Number of successfully dialed peers"
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-11-28 18:53:51 +00:00
|
|
|
declareCounter nbc_failed_dials,
|
2020-06-03 08:46:29 +00:00
|
|
|
"Number of dialing attempts that failed"
|
|
|
|
|
2020-11-28 18:53:51 +00:00
|
|
|
declareCounter nbc_timeout_dials,
|
2020-06-03 08:46:29 +00:00
|
|
|
"Number of dialing attempts that exceeded timeout"
|
|
|
|
|
2020-11-28 18:53:51 +00:00
|
|
|
declareGauge nbc_peers,
|
2020-03-22 23:23:21 +00:00
|
|
|
"Number of active libp2p peers"
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-11-28 18:53:51 +00:00
|
|
|
declareCounter nbc_successful_discoveries,
|
2020-11-26 19:23:45 +00:00
|
|
|
"Number of successfull discoveries"
|
|
|
|
|
2020-11-28 18:53:51 +00:00
|
|
|
declareCounter nbc_failed_discoveries,
|
2020-11-26 19:23:45 +00:00
|
|
|
"Number of failed discoveries"
|
|
|
|
|
|
|
|
const delayBuckets = [1.0, 5.0, 10.0, 20.0, 40.0, 60.0]
|
|
|
|
|
|
|
|
declareHistogram nbc_resolve_time,
|
|
|
|
"Time(s) used while resolving peer information",
|
|
|
|
buckets = delayBuckets
|
|
|
|
|
2020-05-12 22:35:40 +00:00
|
|
|
const
|
|
|
|
snappy_implementation {.strdefine.} = "libp2p"
|
|
|
|
|
|
|
|
const useNativeSnappy = when snappy_implementation == "native": true
|
|
|
|
elif snappy_implementation == "libp2p": false
|
|
|
|
else: {.fatal: "Please set snappy_implementation to either 'libp2p' or 'native'".}
|
2020-05-06 22:24:55 +00:00
|
|
|
|
2020-07-21 16:07:14 +00:00
|
|
|
const
|
|
|
|
libp2p_pki_schemes {.strdefine.} = ""
|
|
|
|
|
|
|
|
when libp2p_pki_schemes != "secp256k1":
|
|
|
|
{.fatal: "Incorrect building process, please use -d:\"libp2p_pki_schemes=secp256k1\"".}
|
|
|
|
|
2020-08-25 10:16:31 +00:00
|
|
|
const
|
|
|
|
NetworkInsecureKeyPassword = "INSECUREPASSWORD"
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
template libp2pProtocol*(name: string, version: int) {.pragma.}
|
|
|
|
|
2020-09-06 08:39:25 +00:00
|
|
|
func shortLog*(peer: Peer): string = shortLog(peer.info.peerId)
|
|
|
|
chronicles.formatIt(Peer): shortLog(it)
|
2020-09-30 11:47:42 +00:00
|
|
|
chronicles.formatIt(PublicKey): byteutils.toHex(it.getBytes().tryGet())
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
template remote*(peer: Peer): untyped =
|
|
|
|
peer.info.peerId
|
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
proc openStream(node: Eth2Node,
|
|
|
|
peer: Peer,
|
|
|
|
protocolId: string): Future[Connection] {.async.} =
|
2020-11-29 13:43:41 +00:00
|
|
|
# When dialling here, we do not provide addresses - all new connection
|
|
|
|
# attempts are handled via `connect` which also takes into account
|
|
|
|
# reconnection timeouts
|
2020-08-10 13:18:17 +00:00
|
|
|
let
|
|
|
|
protocolId = protocolId & "ssz_snappy"
|
|
|
|
conn = await dial(
|
2020-11-29 13:43:41 +00:00
|
|
|
node.switch, peer.info.peerId, protocolId)
|
2020-08-10 13:18:17 +00:00
|
|
|
|
|
|
|
# libp2p may replace peerinfo ref sometimes, so make sure we have a recent
|
|
|
|
# one
|
|
|
|
if conn.peerInfo != nil:
|
|
|
|
peer.info = conn.peerInfo
|
|
|
|
|
|
|
|
return conn
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
proc init*(T: type Peer, network: Eth2Node, info: PeerInfo): Peer {.gcsafe.}
|
|
|
|
|
2020-10-27 09:00:57 +00:00
|
|
|
func peerId*(node: Eth2Node): PeerID =
|
|
|
|
node.switch.peerInfo.peerId
|
|
|
|
|
|
|
|
func enrRecord*(node: Eth2Node): Record =
|
|
|
|
node.discovery.localNode.record
|
|
|
|
|
2020-09-14 14:50:03 +00:00
|
|
|
proc getPeer*(node: Eth2Node, peerId: PeerID): Peer =
|
2020-08-10 10:58:34 +00:00
|
|
|
node.peers.withValue(peerId, peer) do:
|
|
|
|
return peer[]
|
|
|
|
do:
|
|
|
|
let peer = Peer.init(node, PeerInfo.init(peerId))
|
2021-08-10 20:46:35 +00:00
|
|
|
return node.peers.mgetOrPut(peerId, peer)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-08-10 13:18:17 +00:00
|
|
|
proc peerFromStream(network: Eth2Node, conn: Connection): Peer =
|
2020-11-10 18:41:04 +00:00
|
|
|
result = network.getPeer(conn.peerInfo.peerId)
|
|
|
|
result.info = conn.peerInfo
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
proc getKey*(peer: Peer): PeerID {.inline.} =
|
2020-09-25 13:43:45 +00:00
|
|
|
peer.info.peerId
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
proc getFuture*(peer: Peer): Future[void] {.inline.} =
|
2020-11-26 19:23:45 +00:00
|
|
|
if isNil(peer.disconnectedFut):
|
|
|
|
peer.disconnectedFut = newFuture[void]("Peer.disconnectedFut")
|
2020-09-25 13:43:45 +00:00
|
|
|
peer.disconnectedFut
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-04-20 14:59:18 +00:00
|
|
|
proc getScore*(a: Peer): int =
|
2020-07-25 06:12:23 +00:00
|
|
|
## Returns current score value for peer ``peer``.
|
2020-09-25 13:43:45 +00:00
|
|
|
a.score
|
2020-04-20 14:59:18 +00:00
|
|
|
|
2020-05-28 05:02:28 +00:00
|
|
|
proc updateScore*(peer: Peer, score: int) {.inline.} =
|
2020-04-23 15:31:00 +00:00
|
|
|
## Update peer's ``peer`` score with value ``score``.
|
|
|
|
peer.score = peer.score + score
|
2020-05-28 05:02:28 +00:00
|
|
|
if peer.score > PeerScoreHighLimit:
|
|
|
|
peer.score = PeerScoreHighLimit
|
2020-04-23 15:31:00 +00:00
|
|
|
|
2020-11-20 10:00:22 +00:00
|
|
|
proc calcThroughput(dur: Duration, value: uint64): float =
|
2020-07-25 06:12:23 +00:00
|
|
|
let secs = float(chronos.seconds(1).nanoseconds)
|
|
|
|
if isZero(dur):
|
|
|
|
0.0
|
|
|
|
else:
|
|
|
|
float(value) * (secs / float(dur.nanoseconds))
|
|
|
|
|
|
|
|
proc updateNetThroughput*(peer: Peer, dur: Duration,
|
|
|
|
bytesCount: uint64) {.inline.} =
|
|
|
|
## Update peer's ``peer`` network throughput.
|
|
|
|
let bytesPerSecond = calcThroughput(dur, bytesCount)
|
|
|
|
let a = peer.netThroughput.average
|
|
|
|
let n = peer.netThroughput.count
|
|
|
|
peer.netThroughput.average = a + (bytesPerSecond - a) / float(n + 1)
|
|
|
|
inc(peer.netThroughput.count)
|
|
|
|
|
|
|
|
proc netBps*(peer: Peer): float {.inline.} =
|
|
|
|
## Returns current network throughput average value in Bps for peer ``peer``.
|
|
|
|
round((peer.netThroughput.average * 10_000) / 10_000)
|
|
|
|
|
|
|
|
proc netKbps*(peer: Peer): float {.inline.} =
|
|
|
|
## Returns current network throughput average value in Kbps for peer ``peer``.
|
|
|
|
round(((peer.netThroughput.average / 1024) * 10_000) / 10_000)
|
|
|
|
|
|
|
|
proc netMbps*(peer: Peer): float {.inline.} =
|
|
|
|
## Returns current network throughput average value in Mbps for peer ``peer``.
|
|
|
|
round(((peer.netThroughput.average / (1024 * 1024)) * 10_000) / 10_000)
|
|
|
|
|
|
|
|
proc `<`*(a, b: Peer): bool =
|
|
|
|
## Comparison function, which first checks peer's scores, and if the peers'
|
|
|
|
## score is equal it compares peers' network throughput.
|
|
|
|
if a.score < b.score:
|
|
|
|
true
|
|
|
|
elif a.score == b.score:
|
|
|
|
(a.netThroughput.average < b.netThroughput.average)
|
|
|
|
else:
|
|
|
|
false
|
|
|
|
|
2020-10-09 13:44:51 +00:00
|
|
|
const
|
|
|
|
maxRequestQuota = 1000000.0
|
|
|
|
fullReplenishTime = 5.seconds
|
|
|
|
replenishRate = (maxRequestQuota / fullReplenishTime.nanoseconds.float)
|
|
|
|
|
|
|
|
proc updateRequestQuota*(peer: Peer, reqCost: float) =
|
|
|
|
let
|
|
|
|
currentTime = now(chronos.Moment)
|
|
|
|
nanosSinceLastReq = nanoseconds(currentTime - peer.lastReqTime)
|
|
|
|
replenishedQuota = peer.requestQuota + nanosSinceLastReq.float * replenishRate
|
|
|
|
|
|
|
|
peer.lastReqTime = currentTime
|
|
|
|
peer.requestQuota = min(replenishedQuota, maxRequestQuota) - reqCost
|
|
|
|
|
2020-10-13 12:37:25 +00:00
|
|
|
template awaitNonNegativeRequestQuota*(peer: Peer) =
|
|
|
|
let quota = peer.requestQuota
|
|
|
|
if quota < 0:
|
|
|
|
await sleepAsync(nanoseconds(int((-quota) / replenishRate)))
|
2020-10-09 13:44:51 +00:00
|
|
|
|
|
|
|
func allowedOpsPerSecondCost*(n: int): float =
|
|
|
|
(replenishRate * 1000000000'f / n.float)
|
|
|
|
|
2020-08-02 19:27:36 +00:00
|
|
|
proc isSeen*(network: ETh2Node, peerId: PeerID): bool =
|
2020-09-21 16:02:27 +00:00
|
|
|
## Returns ``true`` if ``peerId`` present in SeenTable and time period is not
|
|
|
|
## yet expired.
|
2020-06-08 16:02:50 +00:00
|
|
|
let currentTime = now(chronos.Moment)
|
2020-08-02 19:27:36 +00:00
|
|
|
if peerId notin network.seenTable:
|
2020-09-25 13:43:45 +00:00
|
|
|
false
|
|
|
|
else:
|
2021-03-26 06:52:01 +00:00
|
|
|
let item = try: network.seenTable[peerId]
|
|
|
|
except KeyError: raiseAssert "checked with notin"
|
2020-09-25 13:43:45 +00:00
|
|
|
if currentTime >= item.stamp:
|
|
|
|
# Peer is in SeenTable, but the time period has expired.
|
|
|
|
network.seenTable.del(peerId)
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
true
|
2020-06-08 16:02:50 +00:00
|
|
|
|
2020-08-02 19:27:36 +00:00
|
|
|
proc addSeen*(network: ETh2Node, peerId: PeerID,
|
2020-06-08 16:02:50 +00:00
|
|
|
period: chronos.Duration) =
|
2020-09-21 16:02:27 +00:00
|
|
|
## Adds peer with PeerID ``peerId`` to SeenTable and timeout ``period``.
|
2020-08-02 19:27:36 +00:00
|
|
|
let item = SeenItem(peerId: peerId, stamp: now(chronos.Moment) + period)
|
2020-11-26 19:23:45 +00:00
|
|
|
withValue(network.seenTable, peerId, entry) do:
|
|
|
|
if entry.stamp < item.stamp:
|
|
|
|
entry.stamp = item.stamp
|
|
|
|
do:
|
|
|
|
network.seenTable[peerId] = item
|
2020-06-08 16:02:50 +00:00
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
proc disconnect*(peer: Peer, reason: DisconnectionReason,
|
|
|
|
notifyOtherPeer = false) {.async.} =
|
2020-11-10 18:41:04 +00:00
|
|
|
# TODO(zah): How should we notify the other peer?
|
2020-09-25 13:43:45 +00:00
|
|
|
try:
|
|
|
|
if peer.connectionState notin {Disconnecting, Disconnected}:
|
|
|
|
peer.connectionState = Disconnecting
|
|
|
|
# We adding peer in SeenTable before actual disconnect to avoid races.
|
|
|
|
let seenTime = case reason
|
|
|
|
of ClientShutDown:
|
|
|
|
SeenTableTimeClientShutDown
|
|
|
|
of IrrelevantNetwork:
|
|
|
|
SeenTableTimeIrrelevantNetwork
|
|
|
|
of FaultOrError:
|
|
|
|
SeenTableTimeFaultOrError
|
|
|
|
of PeerScoreLow:
|
|
|
|
SeenTablePenaltyError
|
|
|
|
peer.network.addSeen(peer.info.peerId, seenTime)
|
|
|
|
await peer.network.switch.disconnect(peer.info.peerId)
|
2020-11-17 10:14:53 +00:00
|
|
|
except CatchableError:
|
2020-09-25 13:43:45 +00:00
|
|
|
# We do not care about exceptions in disconnection procedure.
|
|
|
|
trace "Exception while disconnecting peer", peer = peer.info.peerId,
|
|
|
|
reason = reason
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
include eth/p2p/p2p_backends_helpers
|
|
|
|
include eth/p2p/p2p_tracing
|
|
|
|
|
|
|
|
proc getRequestProtoName(fn: NimNode): NimNode =
|
|
|
|
# `getCustomPragmaVal` doesn't work yet on regular nnkProcDef nodes
|
|
|
|
# (TODO: file as an issue)
|
|
|
|
|
|
|
|
let pragmas = fn.pragma
|
|
|
|
if pragmas.kind == nnkPragma and pragmas.len > 0:
|
|
|
|
for pragma in pragmas:
|
2021-03-26 06:52:01 +00:00
|
|
|
try:
|
|
|
|
if pragma.len > 0 and $pragma[0] == "libp2pProtocol":
|
|
|
|
let protoName = $(pragma[1])
|
|
|
|
let protoVer = $(pragma[2].intVal)
|
|
|
|
return newLit("/eth2/beacon_chain/req/" & protoName & "/" & protoVer & "/")
|
|
|
|
except Exception as exc: raiseAssert exc.msg # TODO https://github.com/nim-lang/Nim/issues/17454
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
return newLit("")
|
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
proc writeChunk*(conn: Connection,
|
|
|
|
responseCode: Option[ResponseCode],
|
2021-07-07 09:09:47 +00:00
|
|
|
payload: Bytes,
|
|
|
|
contextBytes: openarray[byte] = []): Future[void] =
|
2020-05-12 22:37:07 +00:00
|
|
|
var output = memoryOutput()
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
try:
|
|
|
|
if responseCode.isSome:
|
|
|
|
output.write byte(responseCode.get)
|
2020-05-12 22:37:07 +00:00
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
if contextBytes.len > 0:
|
|
|
|
output.write contextBytes
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
output.write toBytes(payload.lenu64, Leb128).toOpenArray()
|
2021-07-07 09:09:47 +00:00
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
framingFormatCompress(output, payload)
|
|
|
|
except IOError as exc:
|
|
|
|
raiseAssert exc.msg # memoryOutput shouldn't raise
|
|
|
|
try:
|
|
|
|
conn.write(output.getOutput)
|
|
|
|
except Exception as exc: # TODO fix libp2p
|
|
|
|
raiseAssert exc.msg
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-05-21 13:21:29 +00:00
|
|
|
template errorMsgLit(x: static string): ErrorMsg =
|
|
|
|
const val = ErrorMsg toBytes(x)
|
|
|
|
val
|
|
|
|
|
2020-12-14 14:09:26 +00:00
|
|
|
func formatErrorMsg(msg: ErrorMsg): string =
|
|
|
|
# ErrorMsg "usually" contains a human-readable string - we'll try to parse it
|
|
|
|
# as ASCII and return hex if that fails
|
|
|
|
for c in msg:
|
|
|
|
if c < 32 or c > 127:
|
2020-06-04 06:19:25 +00:00
|
|
|
return byteutils.toHex(asSeq(msg))
|
|
|
|
|
2020-12-14 14:09:26 +00:00
|
|
|
string.fromBytes(asSeq(msg))
|
2020-06-04 06:19:25 +00:00
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
proc sendErrorResponse(peer: Peer,
|
2020-03-22 23:23:21 +00:00
|
|
|
conn: Connection,
|
2020-03-22 21:55:01 +00:00
|
|
|
responseCode: ResponseCode,
|
2020-09-18 16:39:33 +00:00
|
|
|
errMsg: ErrorMsg): Future[void] =
|
2020-06-04 06:19:25 +00:00
|
|
|
debug "Error processing request",
|
|
|
|
peer, responseCode, errMsg = formatErrorMsg(errMsg)
|
2020-09-18 16:39:33 +00:00
|
|
|
conn.writeChunk(some responseCode, SSZ.encode(errMsg))
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-11-17 15:07:05 +00:00
|
|
|
proc sendNotificationMsg(peer: Peer, protocolId: string, requestBytes: Bytes) {.async.} =
|
2020-04-15 02:41:22 +00:00
|
|
|
var
|
|
|
|
deadline = sleepAsync RESP_TIMEOUT
|
|
|
|
streamFut = peer.network.openStream(peer, protocolId)
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
await streamFut or deadline
|
2020-04-15 02:41:22 +00:00
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
if not streamFut.finished:
|
2020-11-17 15:07:05 +00:00
|
|
|
await streamFut.cancelAndWait()
|
2020-03-22 21:55:01 +00:00
|
|
|
raise newException(TransmissionError, "Failed to open LibP2P stream")
|
|
|
|
|
|
|
|
let stream = streamFut.read
|
2020-04-14 16:49:46 +00:00
|
|
|
try:
|
2020-08-10 13:18:17 +00:00
|
|
|
await stream.writeChunk(none ResponseCode, requestBytes)
|
2020-04-14 16:49:46 +00:00
|
|
|
finally:
|
2020-09-10 19:40:09 +00:00
|
|
|
await stream.close()
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-09-18 16:39:33 +00:00
|
|
|
proc sendResponseChunkBytes(response: UntypedResponse, payload: Bytes): Future[void] =
|
2020-05-23 22:24:47 +00:00
|
|
|
inc response.writtenChunks
|
2020-09-18 16:39:33 +00:00
|
|
|
response.stream.writeChunk(some Success, payload)
|
2020-05-23 22:24:47 +00:00
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
proc sendResponseChunk*(response: UntypedResponse, val: auto): Future[void] =
|
2020-05-23 22:24:47 +00:00
|
|
|
inc response.writtenChunks
|
2020-09-18 16:39:33 +00:00
|
|
|
response.stream.writeChunk(some Success, SSZ.encode(val))
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-05-23 22:24:47 +00:00
|
|
|
template sendUserHandlerResultAsChunkImpl*(stream: Connection,
|
|
|
|
handlerResultFut: Future): untyped =
|
|
|
|
let handlerRes = await handlerResultFut
|
2020-08-10 13:18:17 +00:00
|
|
|
writeChunk(stream, some Success, SSZ.encode(handlerRes))
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-05-23 22:24:47 +00:00
|
|
|
template sendUserHandlerResultAsChunkImpl*(stream: Connection,
|
|
|
|
handlerResult: auto): untyped =
|
2020-08-10 13:18:17 +00:00
|
|
|
writeChunk(stream, some Success, SSZ.encode(handlerResult))
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-05-12 22:35:40 +00:00
|
|
|
when useNativeSnappy:
|
|
|
|
include faststreams_backend
|
|
|
|
else:
|
|
|
|
include libp2p_streams_backend
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
proc makeEth2Request(peer: Peer, protocolId: string, requestBytes: Bytes,
|
|
|
|
ResponseMsg: type,
|
2020-05-12 22:37:07 +00:00
|
|
|
timeout: Duration): Future[NetRes[ResponseMsg]]
|
2020-09-14 14:50:03 +00:00
|
|
|
{.async.} =
|
2020-05-12 22:37:07 +00:00
|
|
|
var deadline = sleepAsync timeout
|
2020-05-06 22:24:55 +00:00
|
|
|
|
2020-05-12 22:35:40 +00:00
|
|
|
let stream = awaitWithTimeout(peer.network.openStream(peer, protocolId),
|
2020-05-12 22:37:07 +00:00
|
|
|
deadline): return neterr StreamOpenTimeout
|
2020-04-14 16:49:46 +00:00
|
|
|
try:
|
|
|
|
# Send the request
|
2020-08-10 13:18:17 +00:00
|
|
|
await stream.writeChunk(none ResponseCode, requestBytes)
|
2020-09-10 19:40:09 +00:00
|
|
|
# Half-close the stream to mark the end of the request - if this is not
|
|
|
|
# done, the other peer might never send us the response.
|
|
|
|
await stream.close()
|
2020-04-14 16:49:46 +00:00
|
|
|
|
|
|
|
# Read the response
|
2020-09-10 19:40:09 +00:00
|
|
|
return
|
|
|
|
await readResponse(when useNativeSnappy: libp2pInput(stream) else: stream,
|
|
|
|
peer, ResponseMsg, timeout)
|
2020-04-14 16:49:46 +00:00
|
|
|
finally:
|
2020-09-24 17:03:17 +00:00
|
|
|
await stream.closeWithEOF()
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-05-23 22:24:47 +00:00
|
|
|
proc init*[MsgType](T: type MultipleChunksResponse[MsgType],
|
2020-08-10 13:18:17 +00:00
|
|
|
peer: Peer, conn: Connection): T =
|
|
|
|
T(UntypedResponse(peer: peer, stream: conn))
|
2020-05-23 22:24:47 +00:00
|
|
|
|
|
|
|
proc init*[MsgType](T: type SingleChunkResponse[MsgType],
|
2020-08-10 13:18:17 +00:00
|
|
|
peer: Peer, conn: Connection): T =
|
|
|
|
T(UntypedResponse(peer: peer, stream: conn))
|
2020-05-23 22:24:47 +00:00
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
template write*[M](r: MultipleChunksResponse[M], val: M): untyped =
|
|
|
|
mixin sendResponseChunk
|
|
|
|
sendResponseChunk(UntypedResponse(r), val)
|
2020-05-23 22:24:47 +00:00
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
template send*[M](r: SingleChunkResponse[M], val: M): untyped =
|
|
|
|
mixin sendResponseChunk
|
2020-05-23 22:24:47 +00:00
|
|
|
doAssert UntypedResponse(r).writtenChunks == 0
|
2021-07-07 09:09:47 +00:00
|
|
|
sendResponseChunk(UntypedResponse(r), val)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-08-10 10:58:34 +00:00
|
|
|
proc performProtocolHandshakes*(peer: Peer, incoming: bool) {.async.} =
|
|
|
|
# Loop down serially because it's easier to reason about the connection state
|
|
|
|
# when there are fewer async races, specially during setup
|
2020-03-22 21:55:01 +00:00
|
|
|
for protocol in allProtocols:
|
2020-06-09 11:49:58 +00:00
|
|
|
if protocol.onPeerConnected != nil:
|
2020-08-10 10:58:34 +00:00
|
|
|
await protocol.onPeerConnected(peer, incoming)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
proc initProtocol(name: string,
|
|
|
|
peerInit: PeerStateInitializer,
|
|
|
|
networkInit: NetworkStateInitializer): ProtocolInfoObj =
|
2021-02-22 16:17:48 +00:00
|
|
|
ProtocolInfoObj(
|
|
|
|
name: name,
|
|
|
|
messages: @[],
|
|
|
|
peerStateInitializer: peerInit,
|
|
|
|
networkStateInitializer: networkInit)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
proc registerProtocol(protocol: ProtocolInfo) =
|
|
|
|
# TODO: This can be done at compile-time in the future
|
|
|
|
let pos = lowerBound(gProtocols, protocol)
|
|
|
|
gProtocols.insert(protocol, pos)
|
|
|
|
for i in 0 ..< gProtocols.len:
|
|
|
|
gProtocols[i].index = i
|
|
|
|
|
|
|
|
proc setEventHandlers(p: ProtocolInfo,
|
2020-06-09 11:49:58 +00:00
|
|
|
onPeerConnected: OnPeerConnectedHandler,
|
|
|
|
onPeerDisconnected: OnPeerDisconnectedHandler) =
|
|
|
|
p.onPeerConnected = onPeerConnected
|
|
|
|
p.onPeerDisconnected = onPeerDisconnected
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2021-07-19 12:25:11 +00:00
|
|
|
proc implementSendProcBody(sendProc: SendProc) =
|
2020-03-22 21:55:01 +00:00
|
|
|
let
|
|
|
|
msg = sendProc.msg
|
2020-05-23 22:24:47 +00:00
|
|
|
UntypedResponse = bindSym "UntypedResponse"
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
proc sendCallGenerator(peer, bytes: NimNode): NimNode =
|
|
|
|
if msg.kind != msgResponse:
|
|
|
|
let msgProto = getRequestProtoName(msg.procDef)
|
|
|
|
case msg.kind
|
|
|
|
of msgRequest:
|
2020-05-23 22:24:47 +00:00
|
|
|
let ResponseRecord = msg.response.recName
|
2020-03-22 21:55:01 +00:00
|
|
|
quote:
|
|
|
|
makeEth2Request(`peer`, `msgProto`, `bytes`,
|
2020-05-23 22:24:47 +00:00
|
|
|
`ResponseRecord`, `timeoutVar`)
|
2020-03-22 21:55:01 +00:00
|
|
|
else:
|
|
|
|
quote: sendNotificationMsg(`peer`, `msgProto`, `bytes`)
|
|
|
|
else:
|
2020-05-23 22:24:47 +00:00
|
|
|
quote: sendResponseChunkBytes(`UntypedResponse`(`peer`), `bytes`)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
sendProc.useStandardBody(nil, nil, sendCallGenerator)
|
|
|
|
|
2020-05-06 22:24:55 +00:00
|
|
|
proc handleIncomingStream(network: Eth2Node,
|
|
|
|
conn: Connection,
|
2020-09-14 14:50:03 +00:00
|
|
|
MsgType: type) {.async.} =
|
2020-03-22 21:55:01 +00:00
|
|
|
mixin callUserHandler, RecType
|
2020-05-12 22:35:40 +00:00
|
|
|
|
|
|
|
type MsgRec = RecType(MsgType)
|
2020-05-15 12:41:00 +00:00
|
|
|
const msgName {.used.} = typetraits.name(MsgType)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
## Uncomment this to enable tracing on all incoming requests
|
|
|
|
## You can include `msgNameLit` in the condition to select
|
|
|
|
## more specific requests:
|
|
|
|
# when chronicles.runtimeFilteringEnabled:
|
|
|
|
# setLogLevel(LogLevel.TRACE)
|
|
|
|
# defer: setLogLevel(LogLevel.DEBUG)
|
2020-03-22 23:23:21 +00:00
|
|
|
# trace "incoming " & `msgNameLit` & " conn"
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-10-09 13:44:51 +00:00
|
|
|
let peer = peerFromStream(network, conn)
|
2020-03-22 21:55:01 +00:00
|
|
|
try:
|
2020-11-26 19:23:45 +00:00
|
|
|
case peer.connectionState
|
|
|
|
of Disconnecting, Disconnected, None:
|
|
|
|
# We got incoming stream request while disconnected or disconnecting.
|
2020-11-29 13:43:41 +00:00
|
|
|
debug "Got incoming request from disconnected peer", peer = peer,
|
2020-11-26 19:23:45 +00:00
|
|
|
message = msgName
|
|
|
|
await conn.closeWithEOF()
|
|
|
|
return
|
|
|
|
of Connecting:
|
|
|
|
# We got incoming stream request while handshake is not yet finished,
|
|
|
|
# TODO: We could check it here.
|
|
|
|
debug "Got incoming request from peer while in handshake", peer = peer,
|
|
|
|
msgName
|
|
|
|
of Connected:
|
|
|
|
# We got incoming stream from peer with proper connection state.
|
|
|
|
debug "Got incoming request from peer", peer = peer, msgName
|
|
|
|
|
2020-05-21 13:21:29 +00:00
|
|
|
template returnInvalidRequest(msg: ErrorMsg) =
|
2020-10-09 13:44:51 +00:00
|
|
|
peer.updateScore(PeerScoreInvalidRequest)
|
2020-08-10 13:18:17 +00:00
|
|
|
await sendErrorResponse(peer, conn, InvalidRequest, msg)
|
2020-05-12 22:37:07 +00:00
|
|
|
return
|
2020-05-06 22:24:55 +00:00
|
|
|
|
2020-05-21 13:21:29 +00:00
|
|
|
template returnInvalidRequest(msg: string) =
|
|
|
|
returnInvalidRequest(ErrorMsg msg.toBytes)
|
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
let s = when useNativeSnappy:
|
|
|
|
let fs = libp2pInput(conn)
|
2020-04-14 16:49:46 +00:00
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
if fs.timeoutToNextByte(TTFB_TIMEOUT):
|
2020-05-21 13:21:29 +00:00
|
|
|
returnInvalidRequest(errorMsgLit "Request first byte not sent in time")
|
2020-05-12 22:35:40 +00:00
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
fs
|
|
|
|
else:
|
2020-11-10 18:41:04 +00:00
|
|
|
# TODO(zah) The TTFB timeout is not implemented in LibP2P streams back-end
|
2020-05-12 22:37:07 +00:00
|
|
|
conn
|
2020-05-12 22:35:40 +00:00
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
let deadline = sleepAsync RESP_TIMEOUT
|
2020-05-12 22:35:40 +00:00
|
|
|
|
2020-05-15 17:56:34 +00:00
|
|
|
let msg = if sizeof(MsgRec) > 0:
|
|
|
|
try:
|
2020-08-10 13:18:17 +00:00
|
|
|
awaitWithTimeout(readChunkPayload(s, peer, MsgRec), deadline):
|
2020-05-21 13:21:29 +00:00
|
|
|
returnInvalidRequest(errorMsgLit "Request full data not sent in time")
|
2020-05-06 22:24:55 +00:00
|
|
|
|
2020-05-15 17:56:34 +00:00
|
|
|
except SerializationError as err:
|
|
|
|
returnInvalidRequest err.formatMsg("msg")
|
2020-05-12 22:35:40 +00:00
|
|
|
|
2020-05-15 17:56:34 +00:00
|
|
|
except SnappyError as err:
|
|
|
|
returnInvalidRequest err.msg
|
|
|
|
else:
|
|
|
|
NetRes[MsgRec].ok default(MsgRec)
|
2020-05-12 22:35:40 +00:00
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
if msg.isErr:
|
|
|
|
let (responseCode, errMsg) = case msg.error.kind
|
|
|
|
of UnexpectedEOF, PotentiallyExpectedEOF:
|
2020-05-21 13:21:29 +00:00
|
|
|
(InvalidRequest, errorMsgLit "Incomplete request")
|
2020-05-12 22:35:40 +00:00
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
of InvalidContextBytes:
|
|
|
|
(ServerError, errorMsgLit "Unrecognized context bytes")
|
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
of InvalidSnappyBytes:
|
2020-05-21 13:21:29 +00:00
|
|
|
(InvalidRequest, errorMsgLit "Failed to decompress snappy payload")
|
2020-05-12 22:35:40 +00:00
|
|
|
|
2020-05-12 22:37:07 +00:00
|
|
|
of InvalidSszBytes:
|
2020-05-21 13:21:29 +00:00
|
|
|
(InvalidRequest, errorMsgLit "Failed to decode SSZ payload")
|
2020-05-12 22:37:07 +00:00
|
|
|
|
|
|
|
of ZeroSizePrefix:
|
2020-05-21 13:21:29 +00:00
|
|
|
(InvalidRequest, errorMsgLit "The request chunk cannot have a size of zero")
|
2020-05-12 22:37:07 +00:00
|
|
|
|
|
|
|
of SizePrefixOverflow:
|
2020-05-21 13:21:29 +00:00
|
|
|
(InvalidRequest, errorMsgLit "The chunk size exceed the maximum allowed")
|
2020-05-12 22:37:07 +00:00
|
|
|
|
|
|
|
of InvalidResponseCode, ReceivedErrorResponse,
|
|
|
|
StreamOpenTimeout, ReadResponseTimeout:
|
|
|
|
# These shouldn't be possible in a request, because
|
|
|
|
# there are no response codes being read, no stream
|
|
|
|
# openings and no reading of responses:
|
2020-05-21 13:21:29 +00:00
|
|
|
(ServerError, errorMsgLit "Internal server error")
|
2020-05-12 22:37:07 +00:00
|
|
|
|
|
|
|
of BrokenConnection:
|
|
|
|
return
|
|
|
|
|
2020-08-10 13:18:17 +00:00
|
|
|
await sendErrorResponse(peer, conn, responseCode, errMsg)
|
2020-05-12 22:37:07 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
|
|
|
logReceivedMsg(peer, MsgType(msg.get))
|
2020-08-10 13:18:17 +00:00
|
|
|
await callUserHandler(MsgType, peer, conn, msg.get)
|
2020-10-09 13:44:51 +00:00
|
|
|
except InvalidInputsError as err:
|
|
|
|
returnInvalidRequest err.msg
|
|
|
|
await sendErrorResponse(peer, conn, ServerError,
|
|
|
|
ErrorMsg err.msg.toBytes)
|
2020-05-12 22:37:07 +00:00
|
|
|
except CatchableError as err:
|
2020-08-10 13:18:17 +00:00
|
|
|
await sendErrorResponse(peer, conn, ServerError,
|
2020-05-21 13:21:29 +00:00
|
|
|
ErrorMsg err.msg.toBytes)
|
2020-05-06 22:24:55 +00:00
|
|
|
|
|
|
|
except CatchableError as err:
|
2020-05-15 17:56:34 +00:00
|
|
|
debug "Error processing an incoming request", err = err.msg, msgName
|
2020-05-06 22:24:55 +00:00
|
|
|
|
2020-04-14 16:49:46 +00:00
|
|
|
finally:
|
2020-09-24 17:03:17 +00:00
|
|
|
await conn.closeWithEOF()
|
2020-10-09 13:44:51 +00:00
|
|
|
discard network.peerPool.checkPeerScore(peer)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-11-26 19:23:45 +00:00
|
|
|
proc toPeerAddr*(r: enr.TypedRecord,
|
|
|
|
proto: IpTransportProtocol): Result[PeerAddr, cstring] {.
|
|
|
|
raises: [Defect].} =
|
|
|
|
|
2020-08-02 19:27:36 +00:00
|
|
|
if not r.secp256k1.isSome:
|
|
|
|
return err("enr: no secp256k1 key in record")
|
|
|
|
|
|
|
|
let
|
|
|
|
pubKey = ? keys.PublicKey.fromRaw(r.secp256k1.get)
|
|
|
|
peerId = ? PeerID.init(crypto.PublicKey(
|
|
|
|
scheme: Secp256k1, skkey: secp.SkPublicKey(pubKey)))
|
|
|
|
|
|
|
|
var addrs = newSeq[MultiAddress]()
|
|
|
|
|
2020-11-26 19:23:45 +00:00
|
|
|
case proto
|
|
|
|
of tcpProtocol:
|
|
|
|
if r.ip.isSome and r.tcp.isSome:
|
|
|
|
let ip = ipv4(r.ip.get)
|
2020-11-26 08:05:23 +00:00
|
|
|
addrs.add MultiAddress.init(ip, tcpProtocol, Port r.tcp.get)
|
2020-11-26 19:23:45 +00:00
|
|
|
|
|
|
|
if r.ip6.isSome:
|
|
|
|
let ip = ipv6(r.ip6.get)
|
|
|
|
if r.tcp6.isSome:
|
|
|
|
addrs.add MultiAddress.init(ip, tcpProtocol, Port r.tcp6.get)
|
|
|
|
elif r.tcp.isSome:
|
|
|
|
addrs.add MultiAddress.init(ip, tcpProtocol, Port r.tcp.get)
|
|
|
|
else:
|
|
|
|
discard
|
|
|
|
|
|
|
|
of udpProtocol:
|
|
|
|
if r.ip.isSome and r.udp.isSome:
|
|
|
|
let ip = ipv4(r.ip.get)
|
|
|
|
addrs.add MultiAddress.init(ip, udpProtocol, Port r.udp.get)
|
|
|
|
|
|
|
|
if r.ip6.isSome:
|
|
|
|
let ip = ipv6(r.ip6.get)
|
|
|
|
if r.udp6.isSome:
|
|
|
|
addrs.add MultiAddress.init(ip, udpProtocol, Port r.udp6.get)
|
|
|
|
elif r.udp.isSome:
|
|
|
|
addrs.add MultiAddress.init(ip, udpProtocol, Port r.udp.get)
|
|
|
|
else:
|
|
|
|
discard
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-08-02 19:27:36 +00:00
|
|
|
if addrs.len == 0:
|
|
|
|
return err("enr: no addresses in record")
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-08-02 19:27:36 +00:00
|
|
|
ok(PeerAddr(peerId: peerId, addrs: addrs))
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-09-21 16:02:27 +00:00
|
|
|
proc checkPeer(node: Eth2Node, peerAddr: PeerAddr): bool =
|
2020-08-02 19:27:36 +00:00
|
|
|
logScope: peer = peerAddr.peerId
|
2020-09-21 16:02:27 +00:00
|
|
|
let peerId = peerAddr.peerId
|
|
|
|
if node.peerPool.hasPeer(peerId):
|
|
|
|
trace "Already connected"
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
if node.isSeen(peerId):
|
|
|
|
trace "Recently connected"
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
true
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-09-21 16:02:27 +00:00
|
|
|
proc dialPeer*(node: Eth2Node, peerAddr: PeerAddr, index = 0) {.async.} =
|
|
|
|
## Establish connection with remote peer identified by address ``peerAddr``.
|
|
|
|
logScope:
|
|
|
|
peer = peerAddr.peerId
|
|
|
|
index = index
|
2020-06-03 08:46:29 +00:00
|
|
|
|
2020-09-21 16:02:27 +00:00
|
|
|
if not(node.checkPeer(peerAddr)):
|
|
|
|
return
|
2020-07-23 20:51:56 +00:00
|
|
|
|
2020-09-21 16:02:27 +00:00
|
|
|
debug "Connecting to discovered peer"
|
|
|
|
var deadline = sleepAsync(node.connectTimeout)
|
|
|
|
var workfut = node.switch.connect(peerAddr.peerId, peerAddr.addrs)
|
2020-06-03 08:46:29 +00:00
|
|
|
|
2020-09-21 16:02:27 +00:00
|
|
|
try:
|
|
|
|
# `or` operation will only raise exception of `workfut`, because `deadline`
|
|
|
|
# could not raise exception.
|
|
|
|
await workfut or deadline
|
|
|
|
if workfut.finished():
|
|
|
|
if not deadline.finished():
|
|
|
|
deadline.cancel()
|
|
|
|
inc nbc_successful_dials
|
2020-06-03 08:46:29 +00:00
|
|
|
else:
|
2020-09-21 16:02:27 +00:00
|
|
|
debug "Connection to remote peer timed out"
|
|
|
|
inc nbc_timeout_dials
|
|
|
|
node.addSeen(peerAddr.peerId, SeenTableTimeTimeout)
|
2020-11-17 18:03:29 +00:00
|
|
|
await cancelAndWait(workfut)
|
2020-09-21 16:02:27 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
debug "Connection to remote peer failed", msg = exc.msg
|
|
|
|
inc nbc_failed_dials
|
|
|
|
node.addSeen(peerAddr.peerId, SeenTableTimeDeadPeer)
|
|
|
|
|
|
|
|
proc connectWorker(node: Eth2Node, index: int) {.async.} =
|
|
|
|
debug "Connection worker started", index = index
|
|
|
|
while true:
|
|
|
|
# This loop will never produce HIGH CPU usage because it will wait
|
|
|
|
# and block until it not obtains new peer from the queue ``connQueue``.
|
|
|
|
let remotePeerAddr = await node.connQueue.popFirst()
|
2021-03-22 09:17:14 +00:00
|
|
|
# Previous worker dial might have hit the maximum peers.
|
|
|
|
# TODO: could clear the whole connTable and connQueue here also, best
|
|
|
|
# would be to have this event based coming from peer pool or libp2p.
|
|
|
|
if node.switch.connManager.outSema.count > 0:
|
|
|
|
await node.dialPeer(remotePeerAddr, index)
|
2020-09-21 16:02:27 +00:00
|
|
|
# Peer was added to `connTable` before adding it to `connQueue`, so we
|
|
|
|
# excluding peer here after processing.
|
|
|
|
node.connTable.excl(remotePeerAddr.peerId)
|
|
|
|
|
|
|
|
proc toPeerAddr(node: Node): Result[PeerAddr, cstring] {.raises: [Defect].} =
|
|
|
|
let nodeRecord = ? node.record.toTypedRecord()
|
2020-11-26 19:23:45 +00:00
|
|
|
let peerAddr = ? nodeRecord.toPeerAddr(tcpProtocol)
|
2020-09-21 16:02:27 +00:00
|
|
|
ok(peerAddr)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2021-03-24 10:48:53 +00:00
|
|
|
proc queryRandom*(d: Eth2DiscoveryProtocol, forkId: ENRForkID,
|
|
|
|
attnets: BitArray[ATTESTATION_SUBNET_COUNT]):
|
2021-06-08 20:23:19 +00:00
|
|
|
Future[seq[PeerAddr]] {.async, raises: [Defect].} =
|
2021-03-24 10:48:53 +00:00
|
|
|
## Perform a discovery query for a random target matching the eth2 field
|
|
|
|
## (forkId) and matching at least one of the attestation subnets.
|
|
|
|
let nodes = await d.queryRandom()
|
2021-07-26 09:51:14 +00:00
|
|
|
let sszForkId = SSZ.encode(forkId)
|
2021-03-24 10:48:53 +00:00
|
|
|
|
|
|
|
var filtered: seq[PeerAddr]
|
|
|
|
for n in nodes:
|
2021-08-10 06:19:13 +00:00
|
|
|
if n.record.contains((enrForkIdField, sszForkId)):
|
|
|
|
let res = n.record.tryGet(enrAttestationSubnetsField, seq[byte])
|
2021-03-24 10:48:53 +00:00
|
|
|
|
|
|
|
if res.isSome():
|
|
|
|
let attnetsNode =
|
|
|
|
try:
|
|
|
|
SSZ.decode(res.get(), BitArray[ATTESTATION_SUBNET_COUNT])
|
|
|
|
except SszError as e:
|
|
|
|
debug "Could not decode attestation subnet bitfield of peer",
|
|
|
|
peer = n.record.toURI(), exception = e.name, msg = e.msg
|
|
|
|
continue
|
|
|
|
|
|
|
|
for i in 0..<attnetsNode.bytes.len:
|
|
|
|
if (attnets.bytes[i] and attnetsNode.bytes[i]) > 0:
|
|
|
|
# we have at least one subnet match
|
|
|
|
let peerAddr = n.toPeerAddr()
|
|
|
|
if peerAddr.isOk():
|
|
|
|
filtered.add(peerAddr.get())
|
|
|
|
break
|
|
|
|
|
|
|
|
return filtered
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
proc runDiscoveryLoop*(node: Eth2Node) {.async.} =
|
|
|
|
debug "Starting discovery loop"
|
2020-09-21 16:02:27 +00:00
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
while true:
|
2021-03-22 09:17:14 +00:00
|
|
|
if node.switch.connManager.outSema.count > 0:
|
2021-08-10 06:19:13 +00:00
|
|
|
let forkId = (enrForkIdField, SSZ.encode(node.forkId))
|
|
|
|
var discoveredNodes = await node.discovery.queryRandom(forkId)
|
2021-01-14 07:58:13 +00:00
|
|
|
var newPeers = 0
|
|
|
|
for discNode in discoveredNodes:
|
|
|
|
let res = discNode.toPeerAddr()
|
|
|
|
if res.isOk():
|
|
|
|
let peerAddr = res.get()
|
|
|
|
# Waiting for an empty space in PeerPool.
|
|
|
|
while true:
|
|
|
|
if node.peerPool.lenSpace({PeerType.Outgoing}) == 0:
|
|
|
|
await node.peerPool.waitForEmptySpace(PeerType.Outgoing)
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
# Check if peer present in SeenTable or PeerPool.
|
|
|
|
if node.checkPeer(peerAddr):
|
|
|
|
if peerAddr.peerId notin node.connTable:
|
|
|
|
# We adding to pending connections table here, but going
|
|
|
|
# to remove it only in `connectWorker`.
|
|
|
|
node.connTable.incl(peerAddr.peerId)
|
|
|
|
await node.connQueue.addLast(peerAddr)
|
|
|
|
inc(newPeers)
|
|
|
|
else:
|
|
|
|
debug "Failed to decode discovery's node address",
|
|
|
|
node = discnode, errMsg = res.error
|
|
|
|
|
|
|
|
debug "Discovery tick", wanted_peers = node.wantedPeers,
|
|
|
|
space = node.peerPool.shortLogSpace(),
|
|
|
|
acquired = node.peerPool.shortLogAcquired(),
|
|
|
|
available = node.peerPool.shortLogAvailable(),
|
|
|
|
current = node.peerPool.shortLogCurrent(),
|
|
|
|
length = len(node.peerPool),
|
|
|
|
discovered_nodes = len(discoveredNodes),
|
|
|
|
new_peers = newPeers
|
|
|
|
|
|
|
|
if newPeers == 0:
|
|
|
|
let currentPeers = node.peerPool.lenCurrent()
|
|
|
|
if currentPeers <= node.wantedPeers shr 2: # 25%
|
|
|
|
warn "Peer count low, no new peers discovered",
|
|
|
|
discovered_nodes = len(discoveredNodes), new_peers = newPeers,
|
|
|
|
current_peers = currentPeers, wanted_peers = node.wantedPeers
|
2021-01-18 13:13:26 +00:00
|
|
|
|
|
|
|
# Discovery `queryRandom` can have a synchronous fast path for example
|
|
|
|
# when no peers are in the routing table. Don't run it in continuous loop.
|
|
|
|
await sleepAsync(1.seconds)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
proc getPersistentNetMetadata*(config: BeaconNodeConf): altair.MetaData
|
|
|
|
{.raises: [Defect, IOError, SerializationError].} =
|
2021-02-22 16:17:48 +00:00
|
|
|
let metadataPath = config.dataDir / nodeMetadataFilename
|
2020-04-15 02:41:22 +00:00
|
|
|
if not fileExists(metadataPath):
|
2021-08-10 20:46:35 +00:00
|
|
|
var res: altair.MetaData
|
2020-04-15 02:41:22 +00:00
|
|
|
for i in 0 ..< ATTESTATION_SUBNET_COUNT:
|
2020-12-18 08:50:29 +00:00
|
|
|
# TODO:
|
|
|
|
# Persistent (stability) subnets should be stored with their expiration
|
|
|
|
# epochs. For now, indicate that we participate in no persistent subnets.
|
2021-08-10 20:46:35 +00:00
|
|
|
res.attnets[i] = false
|
|
|
|
Json.saveFile(metadataPath, res)
|
|
|
|
res
|
2020-04-15 02:41:22 +00:00
|
|
|
else:
|
2021-08-10 20:46:35 +00:00
|
|
|
Json.loadFile(metadataPath, altair.MetaData)
|
2020-04-15 02:41:22 +00:00
|
|
|
|
2020-11-26 19:23:45 +00:00
|
|
|
proc resolvePeer(peer: Peer) =
|
|
|
|
# Resolve task which performs searching of peer's public key and recovery of
|
|
|
|
# ENR using discovery5. We only resolve ENR for peers we know about to avoid
|
|
|
|
# querying the network - as of now, the ENR is not needed, except for
|
|
|
|
# debuggging
|
|
|
|
logScope: peer = peer.info.peerId
|
|
|
|
let startTime = now(chronos.Moment)
|
|
|
|
let nodeId =
|
|
|
|
block:
|
|
|
|
var key: PublicKey
|
|
|
|
# `secp256k1` keys are always stored inside PeerID.
|
|
|
|
discard peer.info.peerId.extractPublicKey(key)
|
|
|
|
keys.PublicKey.fromRaw(key.skkey.getBytes()).get().toNodeId()
|
|
|
|
|
|
|
|
debug "Peer's ENR recovery task started", node_id = $nodeId
|
|
|
|
|
|
|
|
# This is "fast-path" for peers which was dialed. In this case discovery
|
|
|
|
# already has most recent ENR information about this peer.
|
|
|
|
let gnode = peer.network.discovery.getNode(nodeId)
|
|
|
|
if gnode.isSome():
|
|
|
|
peer.enr = some(gnode.get().record)
|
|
|
|
inc(nbc_successful_discoveries)
|
|
|
|
let delay = now(chronos.Moment) - startTime
|
|
|
|
nbc_resolve_time.observe(delay.toFloatSeconds())
|
2021-01-21 17:42:57 +00:00
|
|
|
debug "Peer's ENR recovered", delay
|
2020-11-26 19:23:45 +00:00
|
|
|
|
|
|
|
proc handlePeer*(peer: Peer) {.async.} =
|
|
|
|
let res = peer.network.peerPool.addPeerNoWait(peer, peer.direction)
|
|
|
|
case res:
|
|
|
|
of PeerStatus.LowScoreError, PeerStatus.NoSpaceError:
|
|
|
|
# Peer has low score or we do not have enough space in PeerPool,
|
|
|
|
# we are going to disconnect it gracefully.
|
|
|
|
# Peer' state will be updated in connection event.
|
|
|
|
debug "Peer has low score or there no space in PeerPool",
|
|
|
|
peer = peer, reason = res
|
|
|
|
await peer.disconnect(FaultOrError)
|
|
|
|
of PeerStatus.DeadPeerError:
|
|
|
|
# Peer's lifetime future is finished, so its already dead,
|
|
|
|
# we do not need to perform gracefull disconect.
|
|
|
|
# Peer's state will be updated in connection event.
|
|
|
|
discard
|
|
|
|
of PeerStatus.DuplicateError:
|
|
|
|
# Peer is already present in PeerPool, we can't perform disconnect,
|
|
|
|
# because in such case we could kill both connections (connection
|
|
|
|
# which is present in PeerPool and new one).
|
|
|
|
# This is possible bug, because we could enter here only if number
|
|
|
|
# of `peer.connections == 1`, it means that Peer's lifetime is not
|
|
|
|
# tracked properly and we still not received `Disconnected` event.
|
|
|
|
debug "Peer is already present in PeerPool", peer = peer
|
|
|
|
of PeerStatus.Success:
|
|
|
|
# Peer was added to PeerPool.
|
|
|
|
peer.score = NewPeerScore
|
|
|
|
peer.connectionState = Connected
|
|
|
|
# We spawn task which will obtain ENR for this peer.
|
|
|
|
resolvePeer(peer)
|
|
|
|
debug "Peer successfully connected", peer = peer,
|
|
|
|
connections = peer.connections
|
|
|
|
|
2020-08-10 10:58:34 +00:00
|
|
|
proc onConnEvent(node: Eth2Node, peerId: PeerID, event: ConnEvent) {.async.} =
|
|
|
|
let peer = node.getPeer(peerId)
|
|
|
|
case event.kind
|
|
|
|
of ConnEventKind.Connected:
|
|
|
|
inc peer.connections
|
2020-11-26 19:23:45 +00:00
|
|
|
debug "Peer connection upgraded", peer = $peerId,
|
|
|
|
connections = peer.connections
|
2020-08-10 10:58:34 +00:00
|
|
|
if peer.connections == 1:
|
|
|
|
# Libp2p may connect multiple times to the same peer - using different
|
2020-08-08 20:52:02 +00:00
|
|
|
# transports for both incoming and outgoing. For now, we'll count our
|
2020-08-10 10:58:34 +00:00
|
|
|
# "fist" encounter with the peer as the true connection, leaving the
|
|
|
|
# other connections be - libp2p limits the number of concurrent
|
|
|
|
# connections to the same peer, and only one of these connections will be
|
|
|
|
# active. Nonetheless, this quirk will cause a number of odd behaviours:
|
|
|
|
# * For peer limits, we might miscount the incoming vs outgoing quota
|
|
|
|
# * Protocol handshakes are wonky: we'll not necessarily use the newly
|
|
|
|
# connected transport - instead we'll just pick a random one!
|
2020-11-26 19:23:45 +00:00
|
|
|
case peer.connectionState
|
|
|
|
of Disconnecting:
|
|
|
|
# We got connection with peer which we currently disconnecting.
|
|
|
|
# Normally this does not happen, but if a peer is being disconnected
|
|
|
|
# while a concurrent (incoming for example) connection attempt happens,
|
|
|
|
# we might end up here
|
|
|
|
debug "Got connection attempt from peer that we are disconnecting",
|
|
|
|
peer = peerId
|
|
|
|
await node.switch.disconnect(peerId)
|
|
|
|
return
|
|
|
|
of None:
|
|
|
|
# We have established a connection with the new peer.
|
|
|
|
peer.connectionState = Connecting
|
|
|
|
of Disconnected:
|
|
|
|
# We have established a connection with the peer that we have seen
|
|
|
|
# before - reusing the existing peer object is fine
|
|
|
|
peer.connectionState = Connecting
|
|
|
|
peer.score = 0 # Will be set to NewPeerScore after handshake
|
|
|
|
of Connecting, Connected:
|
|
|
|
# This means that we got notification event from peer which we already
|
|
|
|
# connected or connecting right now. If this situation will happened,
|
|
|
|
# it means bug on `nim-libp2p` side.
|
|
|
|
warn "Got connection attempt from peer which we already connected",
|
|
|
|
peer = peerId
|
|
|
|
await peer.disconnect(FaultOrError)
|
|
|
|
return
|
|
|
|
|
|
|
|
# Store connection direction inside Peer object.
|
|
|
|
if event.incoming:
|
|
|
|
peer.direction = PeerType.Incoming
|
|
|
|
else:
|
|
|
|
peer.direction = PeerType.Outgoing
|
2020-08-08 20:52:02 +00:00
|
|
|
|
2020-11-26 08:05:23 +00:00
|
|
|
await performProtocolHandshakes(peer, event.incoming)
|
|
|
|
|
2020-08-10 10:58:34 +00:00
|
|
|
of ConnEventKind.Disconnected:
|
|
|
|
dec peer.connections
|
2020-11-26 19:23:45 +00:00
|
|
|
debug "Lost connection to peer", peer = peerId,
|
|
|
|
connections = peer.connections
|
|
|
|
|
2020-08-10 10:58:34 +00:00
|
|
|
if peer.connections == 0:
|
2020-11-26 19:23:45 +00:00
|
|
|
debug "Peer disconnected", peer = $peerId, connections = peer.connections
|
|
|
|
|
|
|
|
# Whatever caused disconnection, avoid connection spamming
|
|
|
|
node.addSeen(peerId, SeenTableTimeReconnect)
|
|
|
|
|
2020-08-10 10:58:34 +00:00
|
|
|
let fut = peer.disconnectedFut
|
2020-11-26 19:23:45 +00:00
|
|
|
if not(isNil(fut)):
|
2020-11-26 08:05:23 +00:00
|
|
|
fut.complete()
|
2020-11-26 19:23:45 +00:00
|
|
|
peer.disconnectedFut = nil
|
|
|
|
else:
|
|
|
|
# TODO (cheatfate): This could be removed when bug will be fixed inside
|
|
|
|
# `nim-libp2p`.
|
|
|
|
debug "Got new event while peer is already disconnected",
|
|
|
|
peer = peerId, peer_state = peer.connectionState
|
|
|
|
peer.connectionState = Disconnected
|
2020-08-10 10:58:34 +00:00
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
proc new*(T: type Eth2Node, config: BeaconNodeConf,
|
|
|
|
enrForkId: ENRForkID, forkDigests: ForkDigestsRef,
|
2021-02-22 16:17:48 +00:00
|
|
|
switch: Switch, pubsub: GossipSub, ip: Option[ValidIpAddress],
|
2021-02-16 20:35:10 +00:00
|
|
|
tcpPort, udpPort: Option[Port], privKey: keys.PrivateKey, discovery: bool,
|
2021-03-26 06:52:01 +00:00
|
|
|
rng: ref BrHmacDrbgContext): T {.raises: [Defect, CatchableError].} =
|
2021-02-22 16:17:48 +00:00
|
|
|
let
|
|
|
|
metadata = getPersistentNetMetadata(config)
|
2020-08-12 14:16:59 +00:00
|
|
|
when not defined(local_testnet):
|
2021-02-22 16:17:48 +00:00
|
|
|
let
|
|
|
|
connectTimeout = 1.minutes
|
|
|
|
seenThreshold = 5.minutes
|
2020-08-12 14:16:59 +00:00
|
|
|
else:
|
2021-02-22 16:17:48 +00:00
|
|
|
let
|
|
|
|
connectTimeout = 10.seconds
|
|
|
|
seenThreshold = 10.seconds
|
|
|
|
|
|
|
|
let node = T(
|
|
|
|
switch: switch,
|
|
|
|
pubsub: pubsub,
|
|
|
|
wantedPeers: config.maxPeers,
|
|
|
|
peerPool: newPeerPool[Peer, PeerID](maxPeers = config.maxPeers),
|
|
|
|
# Its important here to create AsyncQueue with limited size, otherwise
|
|
|
|
# it could produce HIGH cpu usage.
|
|
|
|
connQueue: newAsyncQueue[PeerAddr](ConcurrentConnections),
|
|
|
|
# TODO: The persistent net metadata should only be used in the case of reusing
|
|
|
|
# the previous netkey.
|
|
|
|
metadata: metadata,
|
|
|
|
forkId: enrForkId,
|
2021-07-07 09:09:47 +00:00
|
|
|
forkDigests: forkDigests,
|
2021-02-22 16:17:48 +00:00
|
|
|
discovery: Eth2DiscoveryProtocol.new(
|
|
|
|
config, ip, tcpPort, udpPort, privKey,
|
2021-08-10 06:19:13 +00:00
|
|
|
{
|
|
|
|
enrForkIdField: SSZ.encode(enrForkId),
|
|
|
|
enrAttestationSubnetsField: SSZ.encode(metadata.attnets)
|
|
|
|
},
|
2021-02-22 16:17:48 +00:00
|
|
|
rng),
|
|
|
|
discoveryEnabled: discovery,
|
|
|
|
rng: rng,
|
|
|
|
connectTimeout: connectTimeout,
|
|
|
|
seenThreshold: seenThreshold,
|
|
|
|
)
|
|
|
|
|
|
|
|
newSeq node.protocolStates, allProtocols.len
|
2020-03-22 21:55:01 +00:00
|
|
|
for proto in allProtocols:
|
|
|
|
if proto.networkStateInitializer != nil:
|
2021-02-22 16:17:48 +00:00
|
|
|
node.protocolStates[proto.index] = proto.networkStateInitializer(node)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
for msg in proto.messages:
|
|
|
|
if msg.protocolMounter != nil:
|
2021-02-22 16:17:48 +00:00
|
|
|
msg.protocolMounter node
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
|
2021-07-26 12:36:17 +00:00
|
|
|
proc peerHook(peerInfo: PeerInfo, event: ConnEvent): Future[void] {.gcsafe.} =
|
|
|
|
onConnEvent(node, peerInfo.peerId, event)
|
2020-08-10 10:58:34 +00:00
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
try:
|
|
|
|
switch.addConnEventHandler(peerHook, ConnEventKind.Connected)
|
|
|
|
switch.addConnEventHandler(peerHook, ConnEventKind.Disconnected)
|
|
|
|
except Exception as exc: # TODO fix libp2p, shouldn't happen
|
|
|
|
raiseAssert exc.msg
|
2021-02-22 16:17:48 +00:00
|
|
|
node
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
template publicKey*(node: Eth2Node): keys.PublicKey =
|
2020-06-22 19:40:19 +00:00
|
|
|
node.discovery.privKey.toPublicKey
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-08-03 17:35:27 +00:00
|
|
|
proc startListening*(node: Eth2Node) {.async.} =
|
2020-08-24 11:52:06 +00:00
|
|
|
if node.discoveryEnabled:
|
2020-10-09 13:37:12 +00:00
|
|
|
try:
|
|
|
|
node.discovery.open()
|
2020-11-17 10:14:53 +00:00
|
|
|
except CatchableError:
|
2020-10-09 13:37:12 +00:00
|
|
|
fatal "Failed to start discovery service. UDP port may be already in use"
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
try:
|
|
|
|
node.libp2pTransportLoops = await node.switch.start()
|
|
|
|
except CatchableError:
|
|
|
|
fatal "Failed to start LibP2P transport. TCP port may be already in use"
|
|
|
|
quit 1
|
|
|
|
|
2020-08-08 20:52:02 +00:00
|
|
|
await node.pubsub.start()
|
2020-06-10 19:36:54 +00:00
|
|
|
|
|
|
|
proc start*(node: Eth2Node) {.async.} =
|
2020-09-16 10:00:11 +00:00
|
|
|
|
|
|
|
proc onPeerCountChanged() =
|
2020-09-21 16:02:27 +00:00
|
|
|
trace "Number of peers has been changed",
|
|
|
|
space = node.peerPool.shortLogSpace(),
|
|
|
|
acquired = node.peerPool.shortLogAcquired(),
|
|
|
|
available = node.peerPool.shortLogAvailable(),
|
|
|
|
current = node.peerPool.shortLogCurrent(),
|
|
|
|
length = len(node.peerPool)
|
2020-09-16 10:00:11 +00:00
|
|
|
nbc_peers.set int64(len(node.peerPool))
|
|
|
|
|
|
|
|
node.peerPool.setPeerCounter(onPeerCountChanged)
|
|
|
|
|
2020-06-10 19:36:54 +00:00
|
|
|
for i in 0 ..< ConcurrentConnections:
|
2020-09-21 16:02:27 +00:00
|
|
|
node.connWorkers.add connectWorker(node, i)
|
2020-06-10 19:36:54 +00:00
|
|
|
|
2020-08-24 11:52:06 +00:00
|
|
|
if node.discoveryEnabled:
|
|
|
|
node.discovery.start()
|
|
|
|
traceAsyncErrors node.runDiscoveryLoop()
|
|
|
|
else:
|
2020-10-01 18:56:42 +00:00
|
|
|
notice "Discovery disabled; trying bootstrap nodes",
|
2020-08-24 11:52:06 +00:00
|
|
|
nodes = node.discovery.bootstrapRecords.len
|
|
|
|
for enr in node.discovery.bootstrapRecords:
|
|
|
|
let tr = enr.toTypedRecord()
|
|
|
|
if tr.isOk():
|
2020-11-26 19:23:45 +00:00
|
|
|
let pa = tr.get().toPeerAddr(tcpProtocol)
|
2020-08-24 11:52:06 +00:00
|
|
|
if pa.isOk():
|
|
|
|
await node.connQueue.addLast(pa.get())
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-05-19 18:57:35 +00:00
|
|
|
proc stop*(node: Eth2Node) {.async.} =
|
2020-05-28 01:14:01 +00:00
|
|
|
# Ignore errors in futures, since we're shutting down (but log them on the
|
|
|
|
# TRACE level, if a timeout is reached).
|
|
|
|
let
|
2021-07-13 07:58:45 +00:00
|
|
|
waitedFutures = if node.discoveryEnabled:
|
|
|
|
@[node.discovery.closeWait(), node.switch.stop()]
|
|
|
|
else:
|
|
|
|
@[node.switch.stop()]
|
2020-05-28 01:14:01 +00:00
|
|
|
timeout = 5.seconds
|
|
|
|
completed = await withTimeout(allFutures(waitedFutures), timeout)
|
|
|
|
if not completed:
|
2020-08-20 16:30:47 +00:00
|
|
|
trace "Eth2Node.stop(): timeout reached", timeout,
|
|
|
|
futureErrors = waitedFutures.filterIt(it.error != nil).mapIt(it.error.msg)
|
2020-05-19 18:57:35 +00:00
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
proc init*(T: type Peer, network: Eth2Node, info: PeerInfo): Peer =
|
2020-11-26 19:23:45 +00:00
|
|
|
let res = Peer(
|
|
|
|
info: info,
|
|
|
|
network: network,
|
|
|
|
connectionState: ConnectionState.None,
|
|
|
|
lastReqTime: now(chronos.Moment),
|
|
|
|
protocolStates: newSeq[RootRef](len(allProtocols))
|
|
|
|
)
|
|
|
|
for i in 0 ..< len(allProtocols):
|
2020-03-22 21:55:01 +00:00
|
|
|
let proto = allProtocols[i]
|
2020-11-26 19:23:45 +00:00
|
|
|
if not(isNil(proto.peerStateInitializer)):
|
|
|
|
res.protocolStates[i] = proto.peerStateInitializer(res)
|
|
|
|
res
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
proc registerMsg(protocol: ProtocolInfo,
|
|
|
|
name: string,
|
|
|
|
mounter: MounterProc,
|
2020-03-22 23:23:21 +00:00
|
|
|
libp2pCodecName: string) =
|
2020-03-22 21:55:01 +00:00
|
|
|
protocol.messages.add MessageInfo(name: name,
|
|
|
|
protocolMounter: mounter,
|
2020-03-22 23:23:21 +00:00
|
|
|
libp2pCodecName: libp2pCodecName)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend =
|
|
|
|
var
|
|
|
|
Format = ident "SSZ"
|
2020-05-12 22:37:07 +00:00
|
|
|
Bool = bindSym "bool"
|
2020-03-22 23:23:21 +00:00
|
|
|
Connection = bindSym "Connection"
|
2020-03-22 21:55:01 +00:00
|
|
|
Peer = bindSym "Peer"
|
|
|
|
Eth2Node = bindSym "Eth2Node"
|
|
|
|
registerMsg = bindSym "registerMsg"
|
|
|
|
initProtocol = bindSym "initProtocol"
|
|
|
|
msgVar = ident "msg"
|
|
|
|
networkVar = ident "network"
|
|
|
|
callUserHandler = ident "callUserHandler"
|
2020-05-23 22:24:47 +00:00
|
|
|
MSG = ident "MSG"
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
p.useRequestIds = false
|
|
|
|
p.useSingleRecordInlining = true
|
|
|
|
|
|
|
|
new result
|
|
|
|
|
|
|
|
result.PeerType = Peer
|
|
|
|
result.NetworkType = Eth2Node
|
|
|
|
result.registerProtocol = bindSym "registerProtocol"
|
|
|
|
result.setEventHandlers = bindSym "setEventHandlers"
|
|
|
|
result.SerializationFormat = Format
|
2020-05-12 22:37:07 +00:00
|
|
|
result.RequestResultsWrapper = ident "NetRes"
|
2020-03-22 21:55:01 +00:00
|
|
|
|
2020-03-31 18:39:02 +00:00
|
|
|
result.implementMsg = proc (msg: p2p_protocol_dsl.Message) =
|
2020-05-23 22:24:47 +00:00
|
|
|
if msg.kind == msgResponse:
|
|
|
|
return
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
let
|
|
|
|
protocol = msg.protocol
|
|
|
|
msgName = $msg.ident
|
|
|
|
msgNameLit = newLit msgName
|
|
|
|
MsgRecName = msg.recName
|
|
|
|
MsgStrongRecName = msg.strongRecName
|
|
|
|
codecNameLit = getRequestProtoName(msg.procDef)
|
2020-05-26 17:07:18 +00:00
|
|
|
protocolMounterName = ident(msgName & "Mounter")
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
##
|
|
|
|
## Implement the Thunk:
|
|
|
|
##
|
2020-03-22 23:23:21 +00:00
|
|
|
## The protocol handlers in nim-libp2p receive only a `Connection`
|
2020-03-22 21:55:01 +00:00
|
|
|
## parameter and there is no way to access the wider context (such
|
|
|
|
## as the current `Switch`). In our handlers, we may need to list all
|
|
|
|
## peers in the current network, so we must keep a reference to the
|
|
|
|
## network object in the closure environment of the installed handlers.
|
|
|
|
##
|
|
|
|
## For this reason, we define a `protocol mounter` proc that will
|
|
|
|
## initialize the network object by creating handlers bound to the
|
|
|
|
## specific network.
|
|
|
|
##
|
2020-05-26 17:07:18 +00:00
|
|
|
var userHandlerCall = newTree(nnkDiscardStmt)
|
|
|
|
|
2020-03-22 21:55:01 +00:00
|
|
|
if msg.userHandler != nil:
|
2020-05-26 17:07:18 +00:00
|
|
|
var OutputParamType = if msg.kind == msgRequest: msg.outputParamType
|
|
|
|
else: nil
|
2020-05-23 22:24:47 +00:00
|
|
|
|
|
|
|
if OutputParamType == nil:
|
2020-05-26 17:07:18 +00:00
|
|
|
userHandlerCall = msg.genUserHandlerCall(msgVar, [peerVar])
|
|
|
|
if msg.kind == msgRequest:
|
|
|
|
userHandlerCall = newCall(ident"sendUserHandlerResultAsChunkImpl",
|
|
|
|
streamVar,
|
|
|
|
userHandlerCall)
|
2020-05-23 22:24:47 +00:00
|
|
|
else:
|
|
|
|
if OutputParamType.kind == nnkVarTy:
|
|
|
|
OutputParamType = OutputParamType[0]
|
|
|
|
|
|
|
|
let isChunkStream = eqIdent(OutputParamType[0], "MultipleChunksResponse")
|
|
|
|
msg.response.recName = if isChunkStream:
|
|
|
|
newTree(nnkBracketExpr, ident"seq", OutputParamType[1])
|
|
|
|
else:
|
|
|
|
OutputParamType[1]
|
|
|
|
|
|
|
|
let responseVar = ident("response")
|
|
|
|
userHandlerCall = newStmtList(
|
|
|
|
newVarStmt(responseVar,
|
|
|
|
newCall(ident"init", OutputParamType,
|
2020-08-10 13:18:17 +00:00
|
|
|
peerVar, streamVar)),
|
2020-05-23 22:24:47 +00:00
|
|
|
msg.genUserHandlerCall(msgVar, [peerVar], outputParam = responseVar))
|
|
|
|
|
2020-05-26 17:07:18 +00:00
|
|
|
protocol.outRecvProcs.add quote do:
|
|
|
|
template `callUserHandler`(`MSG`: type `MsgStrongRecName`,
|
|
|
|
`peerVar`: `Peer`,
|
|
|
|
`streamVar`: `Connection`,
|
|
|
|
`msgVar`: `MsgRecName`): untyped =
|
|
|
|
`userHandlerCall`
|
|
|
|
|
|
|
|
proc `protocolMounterName`(`networkVar`: `Eth2Node`) =
|
|
|
|
proc snappyThunk(`streamVar`: `Connection`,
|
|
|
|
`protocolVar`: string): Future[void] {.gcsafe.} =
|
2020-08-10 13:18:17 +00:00
|
|
|
return handleIncomingStream(`networkVar`, `streamVar`,
|
2020-05-26 17:07:18 +00:00
|
|
|
`MsgStrongRecName`)
|
|
|
|
|
|
|
|
mount `networkVar`.switch,
|
2020-09-22 17:34:34 +00:00
|
|
|
LPProtocol(codecs: @[`codecNameLit` & "ssz_snappy"],
|
2020-05-26 17:07:18 +00:00
|
|
|
handler: snappyThunk)
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
##
|
|
|
|
## Implement Senders and Handshake
|
|
|
|
##
|
|
|
|
if msg.kind == msgHandshake:
|
|
|
|
macros.error "Handshake messages are not supported in LibP2P protocols"
|
|
|
|
else:
|
|
|
|
var sendProc = msg.createSendProc()
|
|
|
|
implementSendProcBody sendProc
|
|
|
|
|
|
|
|
protocol.outProcRegistrations.add(
|
|
|
|
newCall(registerMsg,
|
|
|
|
protocol.protocolInfoVar,
|
|
|
|
msgNameLit,
|
2020-05-26 17:07:18 +00:00
|
|
|
protocolMounterName,
|
2020-03-22 23:23:21 +00:00
|
|
|
codecNameLit))
|
2020-03-22 21:55:01 +00:00
|
|
|
|
|
|
|
result.implementProtocolInit = proc (p: P2PProtocol): NimNode =
|
|
|
|
return newCall(initProtocol, newLit(p.name), p.peerInit, p.netInit)
|
|
|
|
|
2020-03-22 20:54:47 +00:00
|
|
|
func asLibp2pKey*(key: keys.PublicKey): PublicKey =
|
2020-04-17 13:29:49 +00:00
|
|
|
PublicKey(scheme: Secp256k1, skkey: secp.SkPublicKey(key))
|
2019-11-03 23:02:27 +00:00
|
|
|
|
2020-03-22 20:54:47 +00:00
|
|
|
func asEthKey*(key: PrivateKey): keys.PrivateKey =
|
2020-04-17 13:29:49 +00:00
|
|
|
keys.PrivateKey(key.skkey)
|
2019-11-03 23:02:27 +00:00
|
|
|
|
2020-03-22 20:54:47 +00:00
|
|
|
proc initAddress*(T: type MultiAddress, str: string): T =
|
|
|
|
let address = MultiAddress.init(str)
|
|
|
|
if IPFS.match(address) and matchPartial(multiaddress.TCP, address):
|
|
|
|
result = address
|
|
|
|
else:
|
|
|
|
raise newException(MultiAddressError,
|
|
|
|
"Invalid bootstrap node multi-address")
|
|
|
|
|
|
|
|
template tcpEndPoint(address, port): auto =
|
2020-06-05 15:08:50 +00:00
|
|
|
MultiAddress.init(address, tcpProtocol, port)
|
2020-03-22 20:54:47 +00:00
|
|
|
|
2020-08-19 13:12:10 +00:00
|
|
|
proc getPersistentNetKeys*(rng: var BrHmacDrbgContext,
|
2021-03-19 02:22:45 +00:00
|
|
|
config: BeaconNodeConf): NetKeyPair =
|
2021-02-22 16:17:48 +00:00
|
|
|
case config.cmd
|
2020-11-24 23:23:28 +00:00
|
|
|
of noCommand, record:
|
2021-02-22 16:17:48 +00:00
|
|
|
if config.netKeyFile == "random":
|
2020-08-19 13:12:10 +00:00
|
|
|
let res = PrivateKey.random(Secp256k1, rng)
|
|
|
|
if res.isErr():
|
|
|
|
fatal "Could not generate random network key file"
|
|
|
|
quit QuitFailure
|
2021-03-26 06:52:01 +00:00
|
|
|
let
|
|
|
|
privKey = res.get()
|
|
|
|
pubKey = privKey.getKey().expect("working public key from random")
|
|
|
|
pres = PeerID.init(pubKey)
|
2020-11-16 14:39:00 +00:00
|
|
|
if pres.isErr():
|
|
|
|
fatal "Could not obtain PeerID from network key"
|
|
|
|
quit QuitFailure
|
|
|
|
info "Generating new networking key", network_public_key = pubKey,
|
|
|
|
network_peer_id = $pres.get()
|
2021-03-26 06:52:01 +00:00
|
|
|
NetKeyPair(seckey: privKey, pubkey: pubKey)
|
2020-08-19 13:12:10 +00:00
|
|
|
else:
|
|
|
|
let keyPath =
|
2021-02-22 16:17:48 +00:00
|
|
|
if isAbsolute(config.netKeyFile):
|
|
|
|
config.netKeyFile
|
2020-08-19 13:12:10 +00:00
|
|
|
else:
|
2021-02-22 16:17:48 +00:00
|
|
|
config.dataDir / config.netKeyFile
|
2020-08-19 13:12:10 +00:00
|
|
|
|
|
|
|
if fileAccessible(keyPath, {AccessFlags.Find}):
|
2020-08-24 16:06:41 +00:00
|
|
|
info "Network key storage is present, unlocking", key_path = keyPath
|
2020-08-25 10:16:31 +00:00
|
|
|
|
|
|
|
# Insecure password used only for automated testing.
|
|
|
|
let insecurePassword =
|
2021-02-22 16:17:48 +00:00
|
|
|
if config.netKeyInsecurePassword:
|
2020-08-25 10:16:31 +00:00
|
|
|
some(NetworkInsecureKeyPassword)
|
|
|
|
else:
|
|
|
|
none[string]()
|
|
|
|
|
|
|
|
let res = loadNetKeystore(keyPath, insecurePassword)
|
2020-08-24 16:06:41 +00:00
|
|
|
if res.isNone():
|
|
|
|
fatal "Could not load network key file"
|
2020-08-19 13:12:10 +00:00
|
|
|
quit QuitFailure
|
2021-03-26 06:52:01 +00:00
|
|
|
let
|
|
|
|
privKey = res.get()
|
|
|
|
pubKey = privKey.getKey().expect("working public key from file")
|
2020-08-24 16:06:41 +00:00
|
|
|
info "Network key storage was successfully unlocked",
|
2020-09-30 11:47:42 +00:00
|
|
|
key_path = keyPath, network_public_key = pubKey
|
2021-03-26 06:52:01 +00:00
|
|
|
NetKeyPair(seckey: privKey, pubkey: pubKey)
|
2020-08-19 13:12:10 +00:00
|
|
|
else:
|
2020-08-24 16:06:41 +00:00
|
|
|
info "Network key storage is missing, creating a new one",
|
|
|
|
key_path = keyPath
|
|
|
|
let rres = PrivateKey.random(Secp256k1, rng)
|
|
|
|
if rres.isErr():
|
2020-08-19 13:12:10 +00:00
|
|
|
fatal "Could not generate random network key file"
|
|
|
|
quit QuitFailure
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
let
|
|
|
|
privKey = rres.get()
|
|
|
|
pubKey = privKey.getKey().expect("working public key from random")
|
2020-08-19 13:12:10 +00:00
|
|
|
|
2020-08-25 10:16:31 +00:00
|
|
|
# Insecure password used only for automated testing.
|
|
|
|
let insecurePassword =
|
2021-02-22 16:17:48 +00:00
|
|
|
if config.netKeyInsecurePassword:
|
2020-08-25 10:16:31 +00:00
|
|
|
some(NetworkInsecureKeyPassword)
|
|
|
|
else:
|
|
|
|
none[string]()
|
|
|
|
|
|
|
|
let sres = saveNetKeystore(rng, keyPath, privKey, insecurePassword)
|
2020-08-24 16:06:41 +00:00
|
|
|
if sres.isErr():
|
|
|
|
fatal "Could not create network key file", key_path = keyPath
|
2020-08-19 13:12:10 +00:00
|
|
|
quit QuitFailure
|
|
|
|
|
2020-08-24 16:06:41 +00:00
|
|
|
info "New network key storage was created", key_path = keyPath,
|
2020-09-30 11:47:42 +00:00
|
|
|
network_public_key = pubKey
|
2021-03-26 06:52:01 +00:00
|
|
|
NetKeyPair(seckey: privKey, pubkey: pubKey)
|
2020-08-24 16:06:41 +00:00
|
|
|
|
2020-08-19 13:12:10 +00:00
|
|
|
of createTestnet:
|
2021-02-22 16:17:48 +00:00
|
|
|
if config.netKeyFile == "random":
|
2020-08-25 12:49:05 +00:00
|
|
|
fatal "Could not create testnet using `random` network key"
|
|
|
|
quit QuitFailure
|
|
|
|
|
2020-08-19 13:12:10 +00:00
|
|
|
let keyPath =
|
2021-02-22 16:17:48 +00:00
|
|
|
if isAbsolute(config.netKeyFile):
|
|
|
|
config.netKeyFile
|
2020-05-18 08:11:21 +00:00
|
|
|
else:
|
2021-02-22 16:17:48 +00:00
|
|
|
config.dataDir / config.netKeyFile
|
2020-03-22 20:54:47 +00:00
|
|
|
|
2020-08-24 16:06:41 +00:00
|
|
|
let rres = PrivateKey.random(Secp256k1, rng)
|
|
|
|
if rres.isErr():
|
2020-08-19 13:12:10 +00:00
|
|
|
fatal "Could not generate random network key file"
|
|
|
|
quit QuitFailure
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
let
|
|
|
|
privKey = rres.get()
|
|
|
|
pubKey = privKey.getKey().expect("working public key from random")
|
2020-08-19 13:12:10 +00:00
|
|
|
|
2020-08-25 10:16:31 +00:00
|
|
|
# Insecure password used only for automated testing.
|
|
|
|
let insecurePassword =
|
2021-02-22 16:17:48 +00:00
|
|
|
if config.netKeyInsecurePassword:
|
2020-08-25 10:16:31 +00:00
|
|
|
some(NetworkInsecureKeyPassword)
|
|
|
|
else:
|
|
|
|
none[string]()
|
|
|
|
|
|
|
|
let sres = saveNetKeystore(rng, keyPath, privKey, insecurePassword)
|
2020-08-24 16:06:41 +00:00
|
|
|
if sres.isErr():
|
2020-08-25 12:49:05 +00:00
|
|
|
fatal "Could not create network key file", key_path = keyPath
|
2020-08-19 13:12:10 +00:00
|
|
|
quit QuitFailure
|
|
|
|
|
2020-08-24 16:06:41 +00:00
|
|
|
info "New network key storage was created", key_path = keyPath,
|
2020-09-30 11:47:42 +00:00
|
|
|
network_public_key = pubKey
|
2020-08-24 16:06:41 +00:00
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
NetKeyPair(seckey: privKey, pubkey: pubKey)
|
2020-08-19 13:12:10 +00:00
|
|
|
else:
|
|
|
|
let res = PrivateKey.random(Secp256k1, rng)
|
|
|
|
if res.isErr():
|
|
|
|
fatal "Could not generate random network key file"
|
|
|
|
quit QuitFailure
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
let
|
|
|
|
privKey = res.get()
|
|
|
|
pubKey = privKey.getKey().expect("working public key from random")
|
|
|
|
NetKeyPair(seckey: privKey, pubkey: pubKey)
|
2020-03-22 20:54:47 +00:00
|
|
|
|
2021-06-25 00:07:46 +00:00
|
|
|
func gossipId(data: openArray[byte], topic: string, valid: bool): seq[byte] =
|
2021-02-25 13:37:22 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#topics-and-messages
|
2021-07-01 08:55:16 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.8/specs/altair/p2p-interface.md#topics-and-messages
|
2020-11-12 10:45:28 +00:00
|
|
|
const
|
|
|
|
MESSAGE_DOMAIN_INVALID_SNAPPY = [0x00'u8, 0x00, 0x00, 0x00]
|
|
|
|
MESSAGE_DOMAIN_VALID_SNAPPY = [0x01'u8, 0x00, 0x00, 0x00]
|
2020-10-20 12:31:20 +00:00
|
|
|
let messageDigest = withEth2Hash:
|
2020-11-12 10:45:28 +00:00
|
|
|
h.update(
|
|
|
|
if valid: MESSAGE_DOMAIN_VALID_SNAPPY else: MESSAGE_DOMAIN_INVALID_SNAPPY)
|
2021-06-25 00:07:46 +00:00
|
|
|
if topic.len > 0: #altair topic
|
|
|
|
h.update topic.len.uint64.toBytesLE
|
|
|
|
h.update topic
|
2020-10-20 08:54:11 +00:00
|
|
|
h.update data
|
|
|
|
|
2021-06-25 00:07:46 +00:00
|
|
|
return messageDigest.data[0..19].toSeq()
|
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
func isAltairTopic(topic: string, altairPrefix: string): bool =
|
|
|
|
const prefixLen = "/eth2/".len
|
|
|
|
|
|
|
|
if topic.len <= altairPrefix.len + prefixLen:
|
2021-06-25 00:07:46 +00:00
|
|
|
false
|
|
|
|
else:
|
2021-07-07 09:09:47 +00:00
|
|
|
for ind, ch in altairPrefix:
|
|
|
|
if ch != topic[ind + prefixLen]: return false
|
|
|
|
true
|
2021-06-25 00:07:46 +00:00
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
func getAltairTopic(m: messages.Message, altairPrefix: string): string =
|
|
|
|
# TODO Return a lent string here to avoid the string copy
|
2021-06-25 00:07:46 +00:00
|
|
|
let topic = if m.topicIDs.len > 0: m.topicIDs[0] else: ""
|
2021-07-07 09:09:47 +00:00
|
|
|
if isAltairTopic(topic, altairPrefix):
|
2021-06-25 00:07:46 +00:00
|
|
|
topic
|
|
|
|
else:
|
|
|
|
""
|
2020-06-28 20:06:50 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
proc newBeaconSwitch*(config: BeaconNodeConf, seckey: PrivateKey,
|
2020-11-28 07:00:36 +00:00
|
|
|
address: MultiAddress,
|
2021-03-26 06:52:01 +00:00
|
|
|
rng: ref BrHmacDrbgContext): Switch {.raises: [Defect, CatchableError].} =
|
|
|
|
try:
|
2021-04-05 20:21:27 +00:00
|
|
|
SwitchBuilder
|
2021-04-06 17:23:48 +00:00
|
|
|
.new()
|
2021-04-05 20:21:27 +00:00
|
|
|
.withPrivateKey(seckey)
|
|
|
|
.withAddress(address)
|
|
|
|
.withRng(rng)
|
|
|
|
.withNoise()
|
2021-04-13 20:08:33 +00:00
|
|
|
.withMplex(5.minutes, 5.minutes)
|
|
|
|
.withMaxConnections(config.maxPeers)
|
2021-04-05 20:21:27 +00:00
|
|
|
.withAgentVersion(config.agentString)
|
2021-04-13 20:08:33 +00:00
|
|
|
.withTcpTransport({ServerFlags.ReuseAddr})
|
2021-04-05 20:21:27 +00:00
|
|
|
.build()
|
2021-03-26 06:52:01 +00:00
|
|
|
except CatchableError as exc: raise exc
|
|
|
|
except Exception as exc: # TODO fix libp2p
|
|
|
|
if exc is Defect: raise (ref Defect)exc
|
|
|
|
raiseAssert exc.msg
|
2020-11-28 07:00:36 +00:00
|
|
|
|
2020-08-19 13:12:10 +00:00
|
|
|
proc createEth2Node*(rng: ref BrHmacDrbgContext,
|
2021-02-22 16:17:48 +00:00
|
|
|
config: BeaconNodeConf,
|
2021-03-19 02:22:45 +00:00
|
|
|
netKeys: NetKeyPair,
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
cfg: RuntimeConfig,
|
2021-07-07 09:09:47 +00:00
|
|
|
forkDigests: ForkDigestsRef,
|
2021-08-10 06:19:13 +00:00
|
|
|
wallEpoch: Epoch,
|
2021-07-07 09:09:47 +00:00
|
|
|
genesisValidatorsRoot: Eth2Digest): Eth2Node
|
|
|
|
{.raises: [Defect, CatchableError].} =
|
2021-08-10 20:46:35 +00:00
|
|
|
let
|
2021-08-10 06:19:13 +00:00
|
|
|
enrForkId = getENRForkID(cfg, wallEpoch, genesisValidatorsRoot)
|
2021-07-07 09:09:47 +00:00
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
(extIp, extTcpPort, extUdpPort) = try: setupAddress(
|
|
|
|
config.nat, config.listenAddress, config.tcpPort, config.udpPort, clientId)
|
|
|
|
except CatchableError as exc: raise exc
|
|
|
|
except Exception as exc: raiseAssert exc.msg
|
2021-07-07 09:09:47 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
hostAddress = tcpEndPoint(config.listenAddress, config.tcpPort)
|
2021-02-16 20:35:10 +00:00
|
|
|
announcedAddresses = if extIp.isNone() or extTcpPort.isNone(): @[]
|
|
|
|
else: @[tcpEndPoint(extIp.get(), extTcpPort.get())]
|
2020-03-22 20:54:47 +00:00
|
|
|
|
2020-11-16 19:15:43 +00:00
|
|
|
debug "Initializing networking", hostAddress,
|
2020-11-28 07:00:36 +00:00
|
|
|
network_public_key = netKeys.pubkey,
|
|
|
|
announcedAddresses
|
2020-08-24 16:06:41 +00:00
|
|
|
|
2020-03-22 20:54:47 +00:00
|
|
|
# TODO nim-libp2p still doesn't have support for announcing addresses
|
|
|
|
# that are different from the host address (this is relevant when we
|
|
|
|
# are running behind a NAT).
|
2021-02-22 16:17:48 +00:00
|
|
|
var switch = newBeaconSwitch(config, netKeys.seckey, hostAddress, rng)
|
2020-08-08 20:52:02 +00:00
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
func msgIdProvider(m: messages.Message): seq[byte] =
|
|
|
|
let topic = getAltairTopic(m, forkDigests.altairTopicPrefix)
|
|
|
|
try:
|
|
|
|
let decoded = snappy.decode(m.data, GOSSIP_MAX_SIZE)
|
|
|
|
gossipId(decoded, topic, true)
|
|
|
|
except CatchableError:
|
|
|
|
gossipId(m.data, topic, false)
|
|
|
|
|
2020-11-23 09:57:03 +00:00
|
|
|
let
|
2021-02-09 09:20:55 +00:00
|
|
|
params = GossipSubParams(
|
|
|
|
explicit: true,
|
|
|
|
pruneBackoff: 1.minutes,
|
|
|
|
floodPublish: true,
|
|
|
|
gossipFactor: 0.05,
|
|
|
|
d: 8,
|
|
|
|
dLow: 6,
|
|
|
|
dHigh: 12,
|
|
|
|
dScore: 6,
|
|
|
|
dOut: 6 div 2, # less than dlow and no more than dlow/2
|
|
|
|
dLazy: 6,
|
|
|
|
heartbeatInterval: 700.milliseconds,
|
|
|
|
historyLength: 6,
|
|
|
|
historyGossip: 3,
|
|
|
|
fanoutTTL: 60.seconds,
|
|
|
|
seenTTL: 385.seconds,
|
|
|
|
gossipThreshold: -4000,
|
|
|
|
publishThreshold: -8000,
|
|
|
|
graylistThreshold: -16000, # also disconnect threshold
|
|
|
|
opportunisticGraftThreshold: 0,
|
|
|
|
decayInterval: 12.seconds,
|
|
|
|
decayToZero: 0.01,
|
|
|
|
retainScore: 385.seconds,
|
|
|
|
appSpecificWeight: 0.0,
|
|
|
|
ipColocationFactorWeight: -53.75,
|
|
|
|
ipColocationFactorThreshold: 3.0,
|
|
|
|
behaviourPenaltyWeight: -15.9,
|
|
|
|
behaviourPenaltyDecay: 0.986,
|
2021-03-23 06:10:17 +00:00
|
|
|
disconnectBadPeers: true,
|
|
|
|
directPeers:
|
|
|
|
block:
|
|
|
|
var res = initTable[PeerId, seq[MultiAddress]]()
|
|
|
|
if config.directPeers.len > 0:
|
|
|
|
for s in config.directPeers:
|
|
|
|
let
|
|
|
|
maddress = MultiAddress.init(s).tryGet()
|
|
|
|
mpeerId = maddress[multiCodec("p2p")].tryGet()
|
|
|
|
peerId = PeerID.init(mpeerId.protoAddress().tryGet()).tryGet()
|
2021-08-10 20:46:35 +00:00
|
|
|
res.mgetOrPut(peerId, @[]).add(maddress)
|
2021-03-23 06:10:17 +00:00
|
|
|
info "Adding priviledged direct peer", peerId, address = maddress
|
|
|
|
res
|
2021-02-09 09:20:55 +00:00
|
|
|
)
|
2021-03-26 06:52:01 +00:00
|
|
|
pubsub = try: GossipSub.init(
|
2020-11-23 09:57:03 +00:00
|
|
|
switch = switch,
|
|
|
|
msgIdProvider = msgIdProvider,
|
2020-11-26 19:23:45 +00:00
|
|
|
triggerSelf = true,
|
2020-11-23 09:57:03 +00:00
|
|
|
sign = false,
|
2020-11-26 19:23:45 +00:00
|
|
|
verifySignature = false,
|
2020-11-23 09:57:03 +00:00
|
|
|
anonymize = true,
|
2020-11-29 19:07:20 +00:00
|
|
|
parameters = params)
|
2021-03-26 06:52:01 +00:00
|
|
|
except CatchableError as exc: raise exc
|
|
|
|
except Exception as exc: raiseAssert exc.msg # TODO fix libp2p
|
2020-08-08 20:52:02 +00:00
|
|
|
switch.mount(pubsub)
|
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
Eth2Node.new(config, enrForkId,
|
|
|
|
forkDigests,
|
|
|
|
switch, pubsub,
|
2021-02-22 16:17:48 +00:00
|
|
|
extIp, extTcpPort, extUdpPort,
|
|
|
|
netKeys.seckey.asEthKey,
|
|
|
|
discovery = config.discv5Enabled,
|
|
|
|
rng = rng)
|
2020-03-22 20:54:47 +00:00
|
|
|
|
2020-03-23 11:26:44 +00:00
|
|
|
proc announcedENR*(node: Eth2Node): enr.Record =
|
|
|
|
doAssert node.discovery != nil, "The Eth2Node must be initialized"
|
|
|
|
node.discovery.localNode.record
|
2020-03-22 20:54:47 +00:00
|
|
|
|
2021-03-19 02:22:45 +00:00
|
|
|
proc shortForm*(id: NetKeyPair): string =
|
2020-03-22 20:54:47 +00:00
|
|
|
$PeerID.init(id.pubkey)
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc subscribe*(
|
|
|
|
node: Eth2Node, topic: string, topicParams: TopicParams,
|
|
|
|
enableTopicMetrics: bool = false) {.raises: [Defect, CatchableError].} =
|
2021-05-07 04:31:28 +00:00
|
|
|
proc dummyMsgHandler(topic: string, data: seq[byte]): Future[void] =
|
|
|
|
# Avoid closure environment with `{.async.}`
|
|
|
|
var res = newFuture[void]("eth2_network.dummyMsgHandler")
|
|
|
|
res.complete()
|
|
|
|
res
|
2020-08-20 16:30:47 +00:00
|
|
|
|
2021-01-12 03:27:09 +00:00
|
|
|
let
|
|
|
|
topicName = topic & "_snappy"
|
|
|
|
|
|
|
|
if enableTopicMetrics:
|
|
|
|
node.pubsub.knownTopics.incl(topicName)
|
|
|
|
|
2021-02-09 09:20:55 +00:00
|
|
|
node.pubsub.topicParams[topicName] = topicParams
|
2021-03-26 06:52:01 +00:00
|
|
|
try:
|
|
|
|
node.pubsub.subscribe(topicName, dummyMsgHandler)
|
|
|
|
except CatchableError as exc: raise exc # TODO fix libp2p
|
|
|
|
except Exception as exc: raiseAssert exc.msg
|
2020-08-20 16:30:47 +00:00
|
|
|
|
2021-01-15 04:17:06 +00:00
|
|
|
proc setValidTopics*(node: Eth2Node, topics: openArray[string]) =
|
|
|
|
let topicsSnappy = topics.mapIt(it & "_snappy")
|
|
|
|
node.validTopics = topicsSnappy.toHashSet()
|
|
|
|
# there is a window of time where we got the switch open, we need this lazy update for now
|
|
|
|
node.pubsub.subscriptionValidator =
|
|
|
|
proc(topic: string): bool {.gcsafe, raises: [Defect].} =
|
|
|
|
topic in node.validTopics
|
|
|
|
|
2021-04-26 20:39:44 +00:00
|
|
|
proc newValidationResultFuture(v: ValidationResult): Future[ValidationResult] =
|
|
|
|
let res = newFuture[ValidationResult]("eth2_network.execValidator")
|
|
|
|
res.complete(v)
|
|
|
|
res
|
|
|
|
|
2020-08-11 15:08:44 +00:00
|
|
|
proc addValidator*[MsgType](node: Eth2Node,
|
|
|
|
topic: string,
|
2020-09-18 11:53:09 +00:00
|
|
|
msgValidator: proc(msg: MsgType):
|
2021-03-24 16:20:55 +00:00
|
|
|
ValidationResult {.gcsafe, raises: [Defect].} ) =
|
2020-03-31 18:39:02 +00:00
|
|
|
# Validate messages as soon as subscribed
|
2020-06-27 10:16:43 +00:00
|
|
|
proc execValidator(
|
2021-04-26 20:39:44 +00:00
|
|
|
topic: string, message: GossipMsg): Future[ValidationResult] {.raises: [Defect].} =
|
2020-10-20 08:54:11 +00:00
|
|
|
inc nbc_gossip_messages_received
|
2020-06-27 10:16:43 +00:00
|
|
|
trace "Validating incoming gossip message",
|
2020-11-12 10:45:28 +00:00
|
|
|
len = message.data.len, topic
|
|
|
|
|
|
|
|
let res =
|
|
|
|
try:
|
2021-04-26 20:39:44 +00:00
|
|
|
var decompressed = snappy.decode(message.data, GOSSIP_MAX_SIZE)
|
2020-11-12 10:45:28 +00:00
|
|
|
if decompressed.len > 0:
|
2021-04-26 20:39:44 +00:00
|
|
|
let decoded = SSZ.decode(decompressed, MsgType)
|
|
|
|
decompressed = newSeq[byte](0) # release memory before validating
|
|
|
|
msgValidator(decoded)
|
2020-11-12 10:45:28 +00:00
|
|
|
else:
|
|
|
|
debug "Empty gossip data after decompression",
|
|
|
|
topic, len = message.data.len
|
|
|
|
ValidationResult.Ignore
|
|
|
|
except CatchableError as err:
|
|
|
|
debug "Gossip validation error",
|
|
|
|
msg = err.msg, topic, len = message.data.len
|
|
|
|
ValidationResult.Ignore
|
2021-04-26 20:39:44 +00:00
|
|
|
return newValidationResultFuture(res)
|
2020-06-27 10:16:43 +00:00
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
try:
|
|
|
|
node.pubsub.addValidator(topic & "_snappy", execValidator)
|
|
|
|
except Exception as exc: raiseAssert exc.msg # TODO fix libp2p
|
2020-06-27 10:16:43 +00:00
|
|
|
|
2021-04-02 14:36:43 +00:00
|
|
|
proc addAsyncValidator*[MsgType](node: Eth2Node,
|
|
|
|
topic: string,
|
|
|
|
msgValidator: proc(msg: MsgType):
|
|
|
|
Future[ValidationResult] {.gcsafe, raises: [Defect].} ) =
|
|
|
|
proc execValidator(
|
|
|
|
topic: string, message: GossipMsg): Future[ValidationResult] {.raises: [Defect].} =
|
|
|
|
inc nbc_gossip_messages_received
|
|
|
|
trace "Validating incoming gossip message",
|
|
|
|
len = message.data.len, topic
|
|
|
|
|
2021-04-26 20:39:44 +00:00
|
|
|
let res =
|
|
|
|
try:
|
|
|
|
var decompressed = snappy.decode(message.data, GOSSIP_MAX_SIZE)
|
|
|
|
if decompressed.len > 0:
|
|
|
|
let decoded = SSZ.decode(decompressed, MsgType)
|
|
|
|
decompressed = newSeq[byte](0) # release memory before validating
|
|
|
|
return msgValidator(decoded) # Reuses future from msgValidator
|
|
|
|
else:
|
|
|
|
debug "Empty gossip data after decompression",
|
|
|
|
topic, len = message.data.len
|
|
|
|
ValidationResult.Ignore
|
|
|
|
except CatchableError as err:
|
|
|
|
debug "Gossip validation error",
|
|
|
|
msg = err.msg, topic, len = message.data.len
|
|
|
|
ValidationResult.Ignore
|
|
|
|
return newValidationResultFuture(res)
|
2021-04-02 14:36:43 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
node.pubsub.addValidator(topic & "_snappy", execValidator)
|
|
|
|
except Exception as exc: raiseAssert exc.msg # TODO fix libp2p
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc unsubscribe*(node: Eth2Node, topic: string) {.raises: [Defect, CatchableError].} =
|
|
|
|
try:
|
|
|
|
node.pubsub.unsubscribeAll(topic & "_snappy")
|
|
|
|
except CatchableError as exc: raise exc
|
|
|
|
except Exception as exc: raiseAssert exc.msg # TODO fix libp2p
|
2020-03-22 20:54:47 +00:00
|
|
|
|
2020-12-18 18:45:27 +00:00
|
|
|
proc traceMessage(fut: FutureBase, msgId: seq[byte]) =
|
2020-03-22 20:54:47 +00:00
|
|
|
fut.addCallback do (arg: pointer):
|
|
|
|
if not(fut.failed):
|
2020-12-18 18:45:27 +00:00
|
|
|
trace "Outgoing pubsub message sent", msgId = byteutils.toHex(msgId)
|
2020-06-27 10:16:43 +00:00
|
|
|
elif fut.error != nil:
|
2020-12-18 18:45:27 +00:00
|
|
|
debug "Gossip message not sent",
|
|
|
|
msgId = byteutils.toHex(msgId), err = fut.error.msg
|
2020-06-27 10:16:43 +00:00
|
|
|
else:
|
2020-12-18 18:45:27 +00:00
|
|
|
debug "Unexpected future state for gossip",
|
|
|
|
msgId = byteutils.toHex(msgId), state = fut.state
|
2020-03-22 20:54:47 +00:00
|
|
|
|
|
|
|
proc broadcast*(node: Eth2Node, topic: string, msg: auto) =
|
2021-03-26 06:52:01 +00:00
|
|
|
try:
|
|
|
|
let
|
|
|
|
uncompressed = SSZ.encode(msg)
|
|
|
|
compressed = try: snappy.encode(uncompressed)
|
|
|
|
except InputTooLarge:
|
|
|
|
raiseAssert "More than 4gb? not likely.."
|
|
|
|
|
|
|
|
# This is only for messages we create. A message this large amounts to an
|
|
|
|
# internal logic error.
|
|
|
|
doAssert uncompressed.len <= GOSSIP_MAX_SIZE
|
|
|
|
inc nbc_gossip_messages_sent
|
|
|
|
|
|
|
|
var futSnappy = try: node.pubsub.publish(topic & "_snappy", compressed)
|
|
|
|
except Exception as exc:
|
|
|
|
raiseAssert exc.msg # TODO fix libp2p
|
2021-06-25 00:07:46 +00:00
|
|
|
traceMessage(futSnappy, gossipId(uncompressed, topic & "_snappy", true))
|
2021-03-26 06:52:01 +00:00
|
|
|
except IOError as exc:
|
|
|
|
raiseAssert exc.msg # TODO in-memory compression shouldn't fail
|
2021-05-10 07:13:36 +00:00
|
|
|
|
2021-08-09 12:54:45 +00:00
|
|
|
proc subscribeAttestationSubnets*(node: Eth2Node, subnets: BitArray[ATTESTATION_SUBNET_COUNT],
|
|
|
|
forkDigest: ForkDigest)
|
|
|
|
{.raises: [Defect, CatchableError].} =
|
2021-05-10 07:13:36 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#attestations-and-aggregation
|
|
|
|
# nimbus won't score attestation subnets for now, we just rely on block and aggregate which are more stabe and reliable
|
|
|
|
|
|
|
|
for subnet_id, enabled in subnets:
|
|
|
|
if enabled:
|
|
|
|
node.subscribe(getAttestationTopic(
|
2021-08-09 12:54:45 +00:00
|
|
|
forkDigest, SubnetId(subnet_id)), TopicParams.init()) # don't score attestation subnets for now
|
2021-05-10 07:13:36 +00:00
|
|
|
|
2021-08-09 12:54:45 +00:00
|
|
|
proc unsubscribeAttestationSubnets*(node: Eth2Node, subnets: BitArray[ATTESTATION_SUBNET_COUNT],
|
|
|
|
forkDigest: ForkDigest)
|
|
|
|
{.raises: [Defect, CatchableError].} =
|
2021-05-10 07:13:36 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#attestations-and-aggregation
|
|
|
|
# nimbus won't score attestation subnets for now, we just rely on block and aggregate which are more stabe and reliable
|
|
|
|
|
|
|
|
for subnet_id, enabled in subnets:
|
|
|
|
if enabled:
|
2021-08-09 12:54:45 +00:00
|
|
|
node.unsubscribe(getAttestationTopic(forkDigest, SubnetId(subnet_id)))
|
2021-05-10 07:13:36 +00:00
|
|
|
|
|
|
|
proc updateStabilitySubnetMetadata*(
|
|
|
|
node: Eth2Node, attnets: BitArray[ATTESTATION_SUBNET_COUNT]) =
|
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#metadata
|
|
|
|
node.metadata.seq_number += 1
|
|
|
|
node.metadata.attnets = attnets
|
|
|
|
|
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#phase-0-attestation-subnet-stability
|
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#attestation-subnet-bitfield
|
2021-08-10 06:19:13 +00:00
|
|
|
let res = node.discovery.updateRecord({
|
|
|
|
enrAttestationSubnetsField: SSZ.encode(node.metadata.attnets)
|
|
|
|
})
|
2021-05-10 07:13:36 +00:00
|
|
|
if res.isErr():
|
|
|
|
# This should not occur in this scenario as the private key would always
|
|
|
|
# be the correct one and the ENR will not increase in size.
|
2021-08-10 06:19:13 +00:00
|
|
|
warn "Failed to update the ENR attnets field", error = res.error
|
2021-05-10 07:13:36 +00:00
|
|
|
else:
|
|
|
|
debug "Stability subnets changed; updated ENR attnets", attnets
|
|
|
|
|
2021-08-10 06:19:13 +00:00
|
|
|
proc updateSyncnetsMetadata*(
|
|
|
|
node: Eth2Node, syncnets: BitArray[altair.SYNC_COMMITTEE_SUBNET_COUNT]) =
|
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.8/specs/altair/validator.md#sync-committee-subnet-stability
|
|
|
|
node.metadata.seq_number += 1
|
|
|
|
node.metadata.syncnets = syncnets
|
|
|
|
|
|
|
|
let res = node.discovery.updateRecord({
|
|
|
|
enrSyncSubnetsField: SSZ.encode(node.metadata.syncnets)
|
|
|
|
})
|
|
|
|
if res.isErr():
|
|
|
|
# This should not occur in this scenario as the private key would always
|
|
|
|
# be the correct one and the ENR will not increase in size.
|
|
|
|
warn "Failed to update the ENR syncnets field", error = res.error
|
|
|
|
else:
|
|
|
|
debug "Sync committees changed; updated ENR syncnets", syncnets
|
|
|
|
|
|
|
|
proc updateForkId*(node: Eth2Node, value: ENRForkID) =
|
|
|
|
node.forkId = value
|
|
|
|
let res = node.discovery.updateRecord({enrForkIdField: SSZ.encode value})
|
|
|
|
if res.isErr():
|
|
|
|
# This should not occur in this scenario as the private key would always
|
|
|
|
# be the correct one and the ENR will not increase in size.
|
|
|
|
warn "Failed to update the ENR fork id", value, error = res.error
|
|
|
|
else:
|
|
|
|
debug "ENR fork id changed", value
|
|
|
|
|
2021-05-10 07:13:36 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#phase-0-attestation-subnet-stability
|
|
|
|
func getStabilitySubnetLength*(node: Eth2Node): uint64 =
|
|
|
|
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION +
|
|
|
|
node.rng[].rand(EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION.int).uint64
|
|
|
|
|
|
|
|
func getRandomSubnetId*(node: Eth2Node): SubnetId =
|
|
|
|
node.rng[].rand(ATTESTATION_SUBNET_COUNT - 1).SubnetId
|