2019-10-01 13:52:28 +00:00
|
|
|
# beacon_chain
|
2020-02-21 12:16:58 +00:00
|
|
|
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
2019-10-01 13:52:28 +00:00
|
|
|
# Licensed and distributed under either of
|
2019-11-25 15:30:02 +00:00
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
2019-10-01 13:52:28 +00:00
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
2020-09-11 17:46:48 +00:00
|
|
|
import std/[sequtils, strutils, os, tables, options]
|
2020-05-09 14:18:58 +00:00
|
|
|
import confutils, chronicles, chronos
|
2020-07-01 11:41:40 +00:00
|
|
|
import libp2p/[switch, standard_setup, multiaddress, multicodec, peerinfo]
|
2020-05-09 14:18:58 +00:00
|
|
|
import libp2p/crypto/crypto as lcrypto
|
2020-05-20 04:57:39 +00:00
|
|
|
import libp2p/crypto/secp as lsecp
|
2020-08-08 20:52:02 +00:00
|
|
|
import libp2p/protocols/pubsub/[pubsub, gossipsub]
|
2020-05-09 14:18:58 +00:00
|
|
|
import eth/p2p/discoveryv5/enr as enr
|
2020-09-11 17:46:48 +00:00
|
|
|
import eth/p2p/discoveryv5/[protocol, node]
|
|
|
|
import eth/keys as ethkeys
|
2020-05-09 14:18:58 +00:00
|
|
|
import stew/[results, objects]
|
2019-10-01 13:52:28 +00:00
|
|
|
import stew/byteutils as bu
|
2020-06-05 17:06:24 +00:00
|
|
|
import stew/shims/net
|
2020-05-09 14:18:58 +00:00
|
|
|
import nimcrypto/[hash, keccak]
|
|
|
|
import secp256k1 as s
|
|
|
|
import stint
|
|
|
|
import snappy
|
2021-03-02 14:02:10 +00:00
|
|
|
import ../beacon_chain/spec/[crypto, datatypes, network, digest]
|
|
|
|
import ../beacon_chain/ssz/ssz_serialization
|
2019-10-01 13:52:28 +00:00
|
|
|
|
|
|
|
const
|
|
|
|
InspectorName* = "Beacon-Chain Network Inspector"
|
|
|
|
InspectorMajor*: int = 0
|
|
|
|
InspectorMinor*: int = 0
|
2020-05-09 14:18:58 +00:00
|
|
|
InspectorPatch*: int = 3
|
2019-10-01 13:52:28 +00:00
|
|
|
InspectorVersion* = $InspectorMajor & "." & $InspectorMinor & "." &
|
|
|
|
$InspectorPatch
|
|
|
|
InspectorIdent* = "Inspector/$1 ($2/$3)" % [InspectorVersion,
|
|
|
|
hostCPU, hostOS]
|
2020-05-09 14:18:58 +00:00
|
|
|
InspectorCopyright* = "Copyright(C) 2020" &
|
2019-10-01 13:52:28 +00:00
|
|
|
" Status Research & Development GmbH"
|
|
|
|
InspectorHeader* = InspectorName & ", Version " & InspectorVersion &
|
|
|
|
" [" & hostOS & ": " & hostCPU & "]\r\n" &
|
|
|
|
InspectorCopyright & "\r\n"
|
|
|
|
|
2020-05-09 14:18:58 +00:00
|
|
|
DISCV5BN* = mapAnd(UDP, mapEq("p2p"))
|
|
|
|
ETH2BN* = mapAnd(TCP, mapEq("p2p"))
|
|
|
|
|
2019-10-01 13:52:28 +00:00
|
|
|
type
|
2020-05-09 14:18:58 +00:00
|
|
|
DiscoveryProtocol* = protocol.Protocol
|
|
|
|
|
|
|
|
ENRFieldPair* = object
|
|
|
|
eth2: seq[byte]
|
|
|
|
attnets: seq[byte]
|
|
|
|
|
|
|
|
ENRForkID* = object
|
|
|
|
fork_digest*: ForkDigest
|
|
|
|
next_fork_version*: Version
|
|
|
|
next_fork_epoch*: Epoch
|
|
|
|
|
2019-10-01 13:52:28 +00:00
|
|
|
TopicFilter* {.pure.} = enum
|
2020-06-29 18:08:58 +00:00
|
|
|
Blocks, Attestations, Exits, ProposerSlashing, AttesterSlashings
|
2019-10-01 13:52:28 +00:00
|
|
|
|
2020-05-09 14:18:58 +00:00
|
|
|
BootstrapKind* {.pure.} = enum
|
|
|
|
Enr, MultiAddr
|
|
|
|
|
2019-10-01 13:52:28 +00:00
|
|
|
StartUpCommand* {.pure.} = enum
|
|
|
|
noCommand
|
|
|
|
|
2020-05-09 14:18:58 +00:00
|
|
|
BootstrapAddress* = object
|
|
|
|
case kind*: BootstrapKind
|
|
|
|
of BootstrapKind.Enr:
|
|
|
|
addressRec: enr.Record
|
|
|
|
of BootstrapKind.MultiAddr:
|
|
|
|
addressMa: MultiAddress
|
|
|
|
|
2019-10-01 13:52:28 +00:00
|
|
|
InspectorConf* = object
|
2019-11-11 14:43:12 +00:00
|
|
|
logLevel* {.
|
|
|
|
defaultValue: LogLevel.TRACE
|
|
|
|
desc: "Sets the inspector's verbosity log level"
|
|
|
|
abbr: "v"
|
|
|
|
name: "verbosity" }: LogLevel
|
|
|
|
|
|
|
|
fullPeerId* {.
|
|
|
|
defaultValue: false
|
|
|
|
desc: "Sets the inspector full PeerID output"
|
|
|
|
abbr: "p"
|
|
|
|
name: "fullpeerid" }: bool
|
|
|
|
|
|
|
|
floodSub* {.
|
|
|
|
defaultValue: true
|
|
|
|
desc: "Sets inspector engine to FloodSub"
|
|
|
|
abbr: "f"
|
|
|
|
name: "floodsub" }: bool
|
|
|
|
|
|
|
|
gossipSub* {.
|
|
|
|
defaultValue: false
|
|
|
|
desc: "Sets inspector engine to GossipSub"
|
|
|
|
abbr: "g"
|
|
|
|
name: "gossipsub" }: bool
|
|
|
|
|
2020-04-15 03:11:45 +00:00
|
|
|
forkDigest* {.
|
2020-05-09 14:18:58 +00:00
|
|
|
defaultValue: "",
|
2020-04-15 03:11:45 +00:00
|
|
|
desc: "Sets the fork-digest value used to construct all topic names"
|
|
|
|
name: "forkdigest"}: string
|
|
|
|
|
2019-11-11 14:43:12 +00:00
|
|
|
signFlag* {.
|
|
|
|
defaultValue: false
|
|
|
|
desc: "Sets the inspector's to send/verify signatures in pubsub messages"
|
|
|
|
abbr: "s"
|
|
|
|
name: "sign" }: bool
|
|
|
|
|
|
|
|
topics* {.
|
|
|
|
desc: "Sets monitored topics, where `*` - all, " &
|
|
|
|
"[a]ttestations, [b]locks, [e]xits, " &
|
|
|
|
"[ps]roposer slashings, [as]ttester slashings"
|
|
|
|
abbr: "t"
|
|
|
|
name: "topics" }: seq[string]
|
|
|
|
|
|
|
|
customTopics* {.
|
|
|
|
desc: "Sets custom monitored topics"
|
|
|
|
abbr: "c"
|
|
|
|
name: "custom" }: seq[string]
|
|
|
|
|
2019-10-01 13:52:28 +00:00
|
|
|
bootstrapFile* {.
|
2019-11-11 14:43:12 +00:00
|
|
|
defaultValue: ""
|
2019-10-01 13:52:28 +00:00
|
|
|
desc: "Specifies file which holds bootstrap nodes multiaddresses " &
|
2019-11-11 14:43:12 +00:00
|
|
|
"delimeted by CRLF"
|
|
|
|
abbr: "l"
|
|
|
|
name: "bootfile" }: string
|
|
|
|
|
2019-10-01 13:52:28 +00:00
|
|
|
bootstrapNodes* {.
|
|
|
|
desc: "Specifies one or more bootstrap nodes" &
|
2019-11-11 14:43:12 +00:00
|
|
|
" to use when connecting to the network"
|
|
|
|
abbr: "b"
|
|
|
|
name: "bootnodes" }: seq[string]
|
2019-10-01 13:52:28 +00:00
|
|
|
|
2019-12-16 10:22:01 +00:00
|
|
|
decode* {.
|
|
|
|
desc: "Try to decode message using SSZ"
|
|
|
|
abbr: "d"
|
|
|
|
defaultValue: false }: bool
|
|
|
|
|
2020-05-09 14:18:58 +00:00
|
|
|
discoveryPort* {.
|
|
|
|
desc: "DiscoveryV5 UDP port number"
|
|
|
|
defaultValue: 9000 }: int
|
|
|
|
|
|
|
|
ethPort* {.
|
|
|
|
desc: "Ethereum2 TCP port number",
|
|
|
|
defaultValue: 9000 }: int
|
|
|
|
|
|
|
|
bindAddress* {.
|
|
|
|
desc: "Bind Discovery to MultiAddress",
|
|
|
|
defaultValue: "/ip4/0.0.0.0".}: string
|
|
|
|
|
|
|
|
maxPeers* {.
|
|
|
|
desc: "Maximum number of peers",
|
|
|
|
defaultValue: 100.}: int
|
|
|
|
|
|
|
|
noDiscovery* {.
|
|
|
|
desc: "Disable discovery",
|
|
|
|
defaultValue: false.}: bool
|
|
|
|
|
2020-06-05 17:06:24 +00:00
|
|
|
StrRes[T] = Result[T, string]
|
|
|
|
|
2020-11-20 10:00:22 +00:00
|
|
|
func `==`(a, b: ENRFieldPair): bool =
|
2020-05-09 14:18:58 +00:00
|
|
|
result = (a.eth2 == b.eth2)
|
|
|
|
|
2020-06-30 09:17:49 +00:00
|
|
|
func hasTCP(a: PeerInfo): bool =
|
2020-05-09 14:18:58 +00:00
|
|
|
for ma in a.addrs:
|
|
|
|
if TCP.match(ma):
|
|
|
|
return true
|
|
|
|
|
2020-06-30 09:17:49 +00:00
|
|
|
func toNodeId(a: PeerID): Option[NodeId] =
|
2020-05-09 14:18:58 +00:00
|
|
|
var buffer: array[64, byte]
|
|
|
|
if a.hasPublicKey():
|
|
|
|
var pubkey: lcrypto.PublicKey
|
|
|
|
if extractPublicKey(a, pubkey):
|
|
|
|
if pubkey.scheme == PKScheme.Secp256k1:
|
|
|
|
let tmp = s.SkPublicKey(pubkey.skkey).toRaw()
|
|
|
|
copyMem(addr buffer[0], unsafeAddr tmp[1], 64)
|
|
|
|
result = some(readUintBE[256](keccak256.digest(buffer).data))
|
|
|
|
|
|
|
|
chronicles.formatIt PeerInfo: it.shortLog
|
|
|
|
chronicles.formatIt seq[PeerInfo]:
|
|
|
|
var res = newSeq[string]()
|
2020-06-11 05:14:26 +00:00
|
|
|
for item in it.items(): res.add($item.shortLog())
|
2020-05-09 14:18:58 +00:00
|
|
|
"[" & res.join(", ") & "]"
|
|
|
|
|
|
|
|
func getTopics(forkDigest: ForkDigest,
|
2020-11-20 10:00:22 +00:00
|
|
|
filter: TopicFilter): seq[string] =
|
2019-10-01 13:52:28 +00:00
|
|
|
case filter
|
|
|
|
of TopicFilter.Blocks:
|
2020-05-09 14:18:58 +00:00
|
|
|
let topic = getBeaconBlocksTopic(forkDigest)
|
2020-06-30 09:17:49 +00:00
|
|
|
@[topic & "_snappy"]
|
2019-10-01 13:52:28 +00:00
|
|
|
of TopicFilter.Exits:
|
2020-05-09 14:18:58 +00:00
|
|
|
let topic = getVoluntaryExitsTopic(forkDigest)
|
2020-06-30 09:17:49 +00:00
|
|
|
@[topic & "_snappy"]
|
2019-10-01 13:52:28 +00:00
|
|
|
of TopicFilter.ProposerSlashing:
|
2020-05-09 14:18:58 +00:00
|
|
|
let topic = getProposerSlashingsTopic(forkDigest)
|
2020-06-30 09:17:49 +00:00
|
|
|
@[topic & "_snappy"]
|
2019-10-01 13:52:28 +00:00
|
|
|
of TopicFilter.AttesterSlashings:
|
2020-05-09 14:18:58 +00:00
|
|
|
let topic = getAttesterSlashingsTopic(forkDigest)
|
2020-06-30 09:17:49 +00:00
|
|
|
@[topic & "_snappy"]
|
2020-05-09 14:18:58 +00:00
|
|
|
of TopicFilter.Attestations:
|
2020-06-30 09:17:49 +00:00
|
|
|
mapIt(
|
|
|
|
0'u64 ..< ATTESTATION_SUBNET_COUNT.uint64,
|
|
|
|
getAttestationTopic(forkDigest, it) & "_snappy")
|
2019-10-01 13:52:28 +00:00
|
|
|
|
|
|
|
proc loadBootFile(name: string): seq[string] =
|
|
|
|
try:
|
|
|
|
result = readFile(name).splitLines()
|
|
|
|
except:
|
|
|
|
discard
|
|
|
|
|
2020-06-30 09:17:49 +00:00
|
|
|
func unpackYmlLine(line: string): string =
|
2020-05-09 14:18:58 +00:00
|
|
|
result = line
|
|
|
|
let stripped = line.strip()
|
|
|
|
var parts = stripped.split({'"'})
|
|
|
|
if len(parts) == 3:
|
|
|
|
if parts[0].startsWith("-") and len(parts[2]) == 0:
|
|
|
|
result = parts[1]
|
|
|
|
|
|
|
|
proc getBootstrapAddress(bootnode: string): Option[BootstrapAddress] =
|
|
|
|
var rec: enr.Record
|
|
|
|
try:
|
|
|
|
var stripped = bootnode.strip()
|
|
|
|
if stripped.startsWith("-"):
|
|
|
|
stripped = unpackYmlLine(stripped)
|
|
|
|
if len(stripped) > 0:
|
|
|
|
if stripped.startsWith("enr:"):
|
|
|
|
if fromURI(rec, EnrUri(stripped)):
|
|
|
|
let res = BootstrapAddress(kind: BootstrapKind.Enr, addressRec: rec)
|
|
|
|
return some(res)
|
|
|
|
else:
|
|
|
|
warn "Incorrect or empty ENR bootstrap address", address = stripped
|
|
|
|
else:
|
2020-06-05 17:06:24 +00:00
|
|
|
let maRes = MultiAddress.init(stripped)
|
|
|
|
let ma = if maRes.isOk: maRes.get
|
|
|
|
else: return
|
2020-05-09 14:18:58 +00:00
|
|
|
if ETH2BN.match(ma) or DISCV5BN.match(ma):
|
|
|
|
let res = BootstrapAddress(kind: BootstrapKind.MultiAddr,
|
|
|
|
addressMa: ma)
|
|
|
|
return some(res)
|
|
|
|
else:
|
|
|
|
warn "Incorrect MultiAddress bootstrap address", address = stripped
|
|
|
|
except CatchableError as exc:
|
|
|
|
warn "Incorrect bootstrap address", address = bootnode, errMsg = exc.msg
|
|
|
|
|
2020-06-30 09:17:49 +00:00
|
|
|
func tryGetForkDigest(bootnode: enr.Record): Option[ForkDigest] =
|
2020-05-09 14:18:58 +00:00
|
|
|
var forkId: ENRForkID
|
|
|
|
var sszForkData = bootnode.tryGet("eth2", seq[byte])
|
|
|
|
if sszForkData.isSome():
|
|
|
|
try:
|
|
|
|
forkId = SSZ.decode(sszForkData.get(), ENRForkID)
|
|
|
|
result = some(forkId.fork_digest)
|
|
|
|
except CatchableError:
|
|
|
|
discard
|
|
|
|
|
2020-06-30 09:17:49 +00:00
|
|
|
func tryGetFieldPairs(bootnode: enr.Record): Option[ENRFieldPair] =
|
2020-05-09 14:18:58 +00:00
|
|
|
var sszEth2 = bootnode.tryGet("eth2", seq[byte])
|
|
|
|
var sszAttnets = bootnode.tryGet("attnets", seq[byte])
|
|
|
|
if sszEth2.isSome() and sszAttnets.isSome():
|
|
|
|
result = some(ENRFieldPair(eth2: sszEth2.get(),
|
|
|
|
attnets: sszAttnets.get()))
|
|
|
|
|
2020-06-30 09:17:49 +00:00
|
|
|
func tryGetForkDigest(hexdigest: string): Option[ForkDigest] =
|
2020-05-09 14:18:58 +00:00
|
|
|
var res: ForkDigest
|
|
|
|
if len(hexdigest) > 0:
|
|
|
|
try:
|
2020-05-11 18:08:52 +00:00
|
|
|
hexToByteArray(hexdigest, array[4 ,byte](res))
|
2020-05-09 14:18:58 +00:00
|
|
|
result = some(res)
|
|
|
|
except CatchableError:
|
|
|
|
discard
|
|
|
|
|
2020-06-30 09:17:49 +00:00
|
|
|
func tryGetMultiAddress(address: string): Option[MultiAddress] =
|
2020-06-05 17:06:24 +00:00
|
|
|
let maRes = MultiAddress.init(address)
|
|
|
|
let ma = if maRes.isOk: maRes.get
|
|
|
|
else: return
|
|
|
|
if IP4.match(ma) or IP6.match(ma):
|
|
|
|
result = some(ma)
|
2020-05-09 14:18:58 +00:00
|
|
|
|
|
|
|
proc loadBootstrapNodes(conf: InspectorConf): seq[BootstrapAddress] =
|
|
|
|
result = newSeq[BootstrapAddress]()
|
|
|
|
|
|
|
|
if len(conf.bootstrapFile) > 0:
|
|
|
|
info "Loading bootstrap nodes from file", filename = conf.bootstrapFile
|
|
|
|
var nodes = loadBootFile(conf.bootstrapFile)
|
|
|
|
for nodeString in nodes:
|
|
|
|
let res = getBootstrapAddress(nodeString)
|
|
|
|
if res.isSome():
|
|
|
|
result.add(res.get())
|
|
|
|
|
|
|
|
for nodeString in conf.bootstrapNodes:
|
|
|
|
let res = getBootstrapAddress(nodeString)
|
|
|
|
if res.isSome():
|
|
|
|
result.add(res.get())
|
|
|
|
|
2020-11-20 10:00:22 +00:00
|
|
|
proc init(p: typedesc[PeerInfo],
|
|
|
|
maddr: MultiAddress): StrRes[PeerInfo] =
|
2020-05-09 14:18:58 +00:00
|
|
|
## Initialize PeerInfo using address which includes PeerID.
|
|
|
|
if IPFS.match(maddr):
|
2020-06-05 17:06:24 +00:00
|
|
|
let peerid = ? protoAddress(? maddr[2])
|
2020-07-01 11:41:40 +00:00
|
|
|
result = ok(PeerInfo.init(
|
|
|
|
? (PeerID.init(peerid).mapErr(proc (v: cstring): string = $v)),
|
|
|
|
[(? maddr[0]) & (? maddr[1])]))
|
2020-05-09 14:18:58 +00:00
|
|
|
|
2020-11-20 10:00:22 +00:00
|
|
|
proc init(p: typedesc[PeerInfo],
|
2020-06-05 17:06:24 +00:00
|
|
|
enraddr: enr.Record): StrRes[PeerInfo] =
|
2020-05-09 14:18:58 +00:00
|
|
|
var trec: enr.TypedRecord
|
|
|
|
try:
|
|
|
|
let trecOpt = enraddr.toTypedRecord()
|
2020-05-29 10:03:29 +00:00
|
|
|
if trecOpt.isOk():
|
2020-05-09 14:18:58 +00:00
|
|
|
trec = trecOpt.get()
|
|
|
|
if trec.secp256k1.isSome():
|
2020-05-20 05:35:34 +00:00
|
|
|
let skpubkey = ethkeys.PublicKey.fromRaw(trec.secp256k1.get())
|
2020-05-18 08:11:21 +00:00
|
|
|
if skpubkey.isOk():
|
2020-05-20 05:35:34 +00:00
|
|
|
let peerid = PeerID.init(
|
|
|
|
PublicKey(scheme: Secp256k1,
|
|
|
|
skkey: lsecp.SkPublicKey(skpubkey.get())))
|
2020-05-09 14:18:58 +00:00
|
|
|
var mas = newSeq[MultiAddress]()
|
|
|
|
if trec.ip.isSome() and trec.tcp.isSome():
|
2020-06-05 17:06:24 +00:00
|
|
|
let ma = (? MultiAddress.init(multiCodec("ip4"), trec.ip.get())) &
|
|
|
|
(? MultiAddress.init(multiCodec("tcp"), trec.tcp.get()))
|
2020-05-09 14:18:58 +00:00
|
|
|
mas.add(ma)
|
|
|
|
if trec.ip6.isSome() and trec.tcp6.isSome():
|
2020-06-05 17:06:24 +00:00
|
|
|
let ma = (? MultiAddress.init(multiCodec("ip6"), trec.ip6.get())) &
|
|
|
|
(? MultiAddress.init(multiCodec("tcp"), trec.tcp6.get()))
|
2020-05-09 14:18:58 +00:00
|
|
|
mas.add(ma)
|
|
|
|
if trec.ip.isSome() and trec.udp.isSome():
|
2020-06-05 17:06:24 +00:00
|
|
|
let ma = (? MultiAddress.init(multiCodec("ip4"), trec.ip.get())) &
|
|
|
|
(? MultiAddress.init(multiCodec("udp"), trec.udp.get()))
|
2020-05-09 14:18:58 +00:00
|
|
|
mas.add(ma)
|
|
|
|
if trec.ip6.isSome() and trec.udp6.isSome():
|
2020-06-05 17:06:24 +00:00
|
|
|
let ma = (? MultiAddress.init(multiCodec("ip6"), trec.ip6.get())) &
|
|
|
|
(? MultiAddress.init(multiCodec("udp"), trec.udp6.get()))
|
2020-05-09 14:18:58 +00:00
|
|
|
mas.add(ma)
|
2020-07-01 11:41:40 +00:00
|
|
|
result = ok PeerInfo.init(peerid.tryGet(), mas)
|
2020-05-09 14:18:58 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
warn "Error", errMsg = exc.msg, record = enraddr.toUri()
|
|
|
|
|
|
|
|
proc connectToNetwork(switch: Switch, nodes: seq[PeerInfo],
|
|
|
|
timeout: Duration): Future[seq[PeerInfo]] {.async.} =
|
|
|
|
var pending = newSeq[Future[void]]()
|
|
|
|
var res = newSeq[PeerInfo]()
|
|
|
|
var timed, succeed, failed: int
|
|
|
|
|
|
|
|
for pinfo in nodes:
|
2020-09-08 11:32:43 +00:00
|
|
|
pending.add(switch.connect(pinfo.peerId, pinfo.addrs))
|
2020-05-09 14:18:58 +00:00
|
|
|
|
|
|
|
debug "Connecting to peers", count = $len(pending), peers = nodes
|
|
|
|
|
|
|
|
if len(pending) > 0:
|
|
|
|
var timer = sleepAsync(timeout)
|
|
|
|
discard await one(timer, allFutures(pending))
|
|
|
|
for i in 0 ..< len(pending):
|
|
|
|
let fut = pending[i]
|
|
|
|
if fut.finished():
|
|
|
|
if fut.failed():
|
|
|
|
inc(failed)
|
|
|
|
warn "Unable to connect to node", address = nodes[i],
|
|
|
|
errMsg = fut.readError().msg
|
|
|
|
else:
|
|
|
|
inc(succeed)
|
|
|
|
info "Connected to node", address = nodes[i]
|
|
|
|
res.add(nodes[i])
|
|
|
|
else:
|
|
|
|
inc(timed)
|
|
|
|
fut.cancel()
|
|
|
|
warn "Connection to node timed out", address = nodes[i]
|
|
|
|
|
|
|
|
debug "Connection statistics", succeed = succeed, failed = failed,
|
|
|
|
timeout = timed, count = $len(pending)
|
|
|
|
|
|
|
|
result = res
|
|
|
|
|
2020-11-20 10:00:22 +00:00
|
|
|
proc connectLoop(switch: Switch,
|
|
|
|
peerQueue: AsyncQueue[PeerInfo],
|
|
|
|
peerTable: TableRef[PeerID, PeerInfo],
|
|
|
|
timeout: Duration): Future[void] {.async.} =
|
2020-05-09 14:18:58 +00:00
|
|
|
var addresses = newSeq[PeerInfo]()
|
|
|
|
trace "Starting connection loop", queue_size = len(peerQueue),
|
|
|
|
table_size = len(peerTable),
|
|
|
|
timeout = timeout
|
|
|
|
while true:
|
|
|
|
if len(addresses) > 0:
|
|
|
|
addresses.setLen(0)
|
|
|
|
let ma = await peerQueue.popFirst()
|
|
|
|
addresses.add(ma)
|
|
|
|
while not(peerQueue.empty()):
|
|
|
|
addresses.add(peerQueue.popFirstNoWait())
|
|
|
|
trace "Got new peers", count = len(addresses)
|
|
|
|
var infos = await switch.connectToNetwork(addresses, timeout)
|
|
|
|
for item in infos:
|
|
|
|
peerTable[item.peerId] = item
|
|
|
|
|
2020-11-20 10:00:22 +00:00
|
|
|
func toIpAddress(ma: MultiAddress): Option[ValidIpAddress] =
|
2020-05-09 14:18:58 +00:00
|
|
|
if IP4.match(ma):
|
2020-06-05 17:06:24 +00:00
|
|
|
let addressRes = ma.protoAddress()
|
|
|
|
let address = if addressRes.isOk: addressRes.get
|
|
|
|
else: return
|
|
|
|
result = some(ipv4 toArray(4, address))
|
2020-05-09 14:18:58 +00:00
|
|
|
elif IP6.match(ma):
|
2020-06-05 17:06:24 +00:00
|
|
|
let addressRes = ma.protoAddress()
|
|
|
|
let address = if addressRes.isOk: addressRes.get
|
|
|
|
else: return
|
|
|
|
result = some(ipv6 toArray(16, address))
|
2020-05-09 14:18:58 +00:00
|
|
|
|
|
|
|
proc bootstrapDiscovery(conf: InspectorConf,
|
|
|
|
host: MultiAddress,
|
|
|
|
privkey: lcrypto.PrivateKey,
|
|
|
|
bootnodes: seq[enr.Record],
|
|
|
|
enrFields: Option[ENRFieldPair]): DiscoveryProtocol =
|
2020-05-18 08:11:21 +00:00
|
|
|
var pk = ethkeys.PrivateKey(privkey.skkey)
|
2020-05-09 14:18:58 +00:00
|
|
|
let udpPort = Port(conf.discoveryPort)
|
|
|
|
let tcpPort = Port(conf.ethPort)
|
|
|
|
let host = host.toIpAddress()
|
|
|
|
if enrFields.isSome():
|
|
|
|
let fields = enrFields.get()
|
2020-07-10 09:17:15 +00:00
|
|
|
let pairs = {"eth2": fields.eth2, "attnets": fields.attnets}
|
2021-02-16 20:35:10 +00:00
|
|
|
result = newProtocol(pk, host, some(tcpPort), some(udpPort), pairs,
|
|
|
|
bootnodes, bindPort = udpPort)
|
2020-05-09 14:18:58 +00:00
|
|
|
else:
|
2021-02-16 20:35:10 +00:00
|
|
|
result = newProtocol(pk, host, some(tcpPort), some(udpPort), [],
|
|
|
|
bootnodes, bindPort = udpPort)
|
2020-05-09 14:18:58 +00:00
|
|
|
result.open()
|
|
|
|
result.start()
|
|
|
|
|
|
|
|
proc logEnrAddress(address: string) =
|
2019-10-01 13:52:28 +00:00
|
|
|
var
|
2020-05-09 14:18:58 +00:00
|
|
|
rec: enr.Record
|
|
|
|
trec: enr.TypedRecord
|
|
|
|
eth2fork_digest, eth2next_fork_version, eth2next_fork_epoch: string
|
|
|
|
attnets: string
|
|
|
|
|
|
|
|
if fromURI(rec, EnrUri(address)):
|
|
|
|
var eth2Data = rec.tryGet("eth2", seq[byte])
|
|
|
|
var attnData = rec.tryGet("attnets", seq[byte])
|
|
|
|
var optrec = rec.toTypedRecord()
|
|
|
|
|
2020-05-29 10:03:29 +00:00
|
|
|
if optrec.isOk():
|
2020-05-09 14:18:58 +00:00
|
|
|
trec = optrec.get()
|
|
|
|
|
|
|
|
if eth2Data.isSome():
|
|
|
|
try:
|
|
|
|
var forkid = SSZ.decode(eth2Data.get(), ENRForkID)
|
2020-05-11 18:08:52 +00:00
|
|
|
eth2fork_digest = $forkid.fork_digest
|
|
|
|
eth2next_fork_version = $forkid.next_fork_version
|
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
|
|
|
eth2next_fork_epoch = strutils.toHex(uint64(forkid.next_fork_epoch))
|
2020-05-09 14:18:58 +00:00
|
|
|
except CatchableError:
|
|
|
|
eth2fork_digest = "Error"
|
|
|
|
eth2next_fork_version = "Error"
|
|
|
|
eth2next_fork_epoch = "Error"
|
|
|
|
else:
|
|
|
|
eth2fork_digest = "None"
|
|
|
|
eth2next_fork_version = "None"
|
|
|
|
eth2next_fork_epoch = "None"
|
2019-10-01 13:52:28 +00:00
|
|
|
|
2020-05-09 14:18:58 +00:00
|
|
|
if attnData.isSome():
|
2020-05-23 22:54:41 +00:00
|
|
|
var attn = SSZ.decode(attnData.get(), List[byte, 9999999]) # TODO: what's the limit on that list?
|
|
|
|
attnets = bu.toHex(attn.asSeq)
|
2020-05-09 14:18:58 +00:00
|
|
|
else:
|
|
|
|
attnets = "None"
|
|
|
|
|
|
|
|
info "ENR bootstrap address fileds",
|
|
|
|
enr_uri = address,
|
|
|
|
enr_id = trec.id,
|
|
|
|
secp256k1 = if trec.secp256k1.isSome():
|
|
|
|
bu.toHex(trec.secp256k1.get())
|
|
|
|
else:
|
|
|
|
"None",
|
|
|
|
ip4 = if trec.ip.isSome():
|
|
|
|
$MultiAddress.init(multiCodec("ip4"), trec.ip.get())
|
|
|
|
else:
|
|
|
|
"None",
|
|
|
|
ip6 = if trec.ip6.isSome():
|
|
|
|
$MultiAddress.init(multiCodec("ip6"), trec.ip6.get())
|
|
|
|
else:
|
|
|
|
"None",
|
|
|
|
tcp = if trec.tcp.isSome(): $trec.tcp.get() else: "None",
|
|
|
|
udp = if trec.udp.isSome(): $trec.udp.get() else: "None",
|
|
|
|
tcp6 = if trec.tcp6.isSome(): $trec.tcp6.get() else: "None",
|
|
|
|
udp6 = if trec.udp6.isSome(): $trec.udp6.get() else: "None",
|
|
|
|
eth2_fork_digest = eth2fork_digest,
|
|
|
|
eth2_next_fork_version = eth2next_fork_version,
|
|
|
|
eth2_next_fork_epoch = eth2next_fork_epoch,
|
|
|
|
eth2_attnets = attnets
|
|
|
|
else:
|
|
|
|
info "ENR bootstrap address is wrong or incomplete", enr_uri = address
|
|
|
|
else:
|
|
|
|
info "ENR bootstrap address is wrong or incomplete", enr_uri = address
|
|
|
|
|
2020-11-20 10:00:22 +00:00
|
|
|
func init(p: typedesc[PeerInfo],
|
|
|
|
enruri: EnrUri): Option[PeerInfo] =
|
2020-05-09 14:18:58 +00:00
|
|
|
var rec: enr.Record
|
|
|
|
if fromURI(rec, enruri):
|
|
|
|
logEnrAddress(rec.toUri())
|
|
|
|
result = PeerInfo.init(rec)
|
|
|
|
|
|
|
|
proc pubsubLogger(conf: InspectorConf, switch: Switch,
|
|
|
|
resolveQueue: AsyncQueue[PeerID], topic: string,
|
2020-09-14 14:50:03 +00:00
|
|
|
data: seq[byte]): Future[void] {.async.} =
|
2020-05-09 14:18:58 +00:00
|
|
|
info "Received pubsub message", size = len(data),
|
|
|
|
topic = topic,
|
|
|
|
message = bu.toHex(data)
|
|
|
|
var buffer: seq[byte]
|
|
|
|
if conf.decode:
|
|
|
|
if topic.endsWith("_snappy"):
|
2019-12-16 10:22:01 +00:00
|
|
|
try:
|
2020-08-19 11:33:52 +00:00
|
|
|
buffer = snappy.decode(data, GOSSIP_MAX_SIZE)
|
2019-12-16 10:22:01 +00:00
|
|
|
except CatchableError as exc:
|
2020-05-09 14:18:58 +00:00
|
|
|
warn "Unable to decompress message", errMsg = exc.msg
|
|
|
|
else:
|
|
|
|
buffer = data
|
2019-12-16 10:22:01 +00:00
|
|
|
|
2020-05-09 14:18:58 +00:00
|
|
|
try:
|
2020-06-30 09:17:49 +00:00
|
|
|
if topic.endsWith(topicBeaconBlocksSuffix & "_snappy"):
|
2020-05-09 14:18:58 +00:00
|
|
|
info "SignedBeaconBlock", msg = SSZ.decode(buffer, SignedBeaconBlock)
|
2020-07-09 11:43:27 +00:00
|
|
|
elif topic.endsWith("_snappy") and topic.contains("/beacon_attestation_"):
|
2020-05-09 14:18:58 +00:00
|
|
|
info "Attestation", msg = SSZ.decode(buffer, Attestation)
|
2020-06-30 09:17:49 +00:00
|
|
|
elif topic.endsWith(topicVoluntaryExitsSuffix & "_snappy"):
|
2020-05-09 14:18:58 +00:00
|
|
|
info "SignedVoluntaryExit", msg = SSZ.decode(buffer,
|
|
|
|
SignedVoluntaryExit)
|
2020-06-30 09:17:49 +00:00
|
|
|
elif topic.endsWith(topicProposerSlashingsSuffix & "_snappy"):
|
2020-05-09 14:18:58 +00:00
|
|
|
info "ProposerSlashing", msg = SSZ.decode(buffer, ProposerSlashing)
|
2020-06-30 09:17:49 +00:00
|
|
|
elif topic.endsWith(topicAttesterSlashingsSuffix & "_snappy"):
|
2020-05-09 14:18:58 +00:00
|
|
|
info "AttesterSlashing", msg = SSZ.decode(buffer, AttesterSlashing)
|
2020-06-30 09:17:49 +00:00
|
|
|
elif topic.endsWith(topicAggregateAndProofsSuffix & "_snappy"):
|
2020-05-09 14:18:58 +00:00
|
|
|
info "AggregateAndProof", msg = SSZ.decode(buffer, AggregateAndProof)
|
|
|
|
|
|
|
|
except CatchableError as exc:
|
|
|
|
info "Unable to decode message", errMsg = exc.msg
|
|
|
|
|
|
|
|
proc resolveLoop(conf: InspectorConf,
|
|
|
|
discovery: DiscoveryProtocol,
|
|
|
|
switch: Switch,
|
|
|
|
peerQueue: AsyncQueue[PeerID],
|
|
|
|
peers: TableRef[PeerID, PeerInfo]) {.async.} =
|
|
|
|
debug "Starting resolution loop"
|
|
|
|
while true:
|
|
|
|
let peerId = await peerQueue.popFirst()
|
|
|
|
let idOpt = peerId.toNodeId()
|
|
|
|
if idOpt.isSome():
|
|
|
|
try:
|
|
|
|
let nodeOpt = await discovery.resolve(idOpt.get())
|
|
|
|
if nodeOpt.isSome():
|
|
|
|
let peerOpt = PeerInfo.init(nodeOpt.get().record)
|
2020-06-05 17:06:24 +00:00
|
|
|
if peerOpt.isOk():
|
2020-05-09 14:18:58 +00:00
|
|
|
let peer = peerOpt.get()
|
|
|
|
trace "Peer resolved", peer_id = peerId,
|
|
|
|
node_id = idOpt.get(),
|
|
|
|
peer_info = peer
|
|
|
|
peers[peerId] = peer
|
|
|
|
else:
|
|
|
|
warn "Peer's record is invalid", peer_id = peerId,
|
|
|
|
node_id = idOpt.get(),
|
|
|
|
peer_record = nodeOpt.get().record
|
|
|
|
else:
|
|
|
|
trace "Node resolution returns empty answer", peer_id = peerId,
|
|
|
|
node_id = idOpt.get()
|
|
|
|
|
|
|
|
except CatchableError as exc:
|
|
|
|
warn "Node address resolution failed", errMsg = exc.msg,
|
|
|
|
peer_id = peerId,
|
|
|
|
node_id = idOpt.get()
|
|
|
|
|
|
|
|
proc discoveryLoop(conf: InspectorConf,
|
|
|
|
discovery: DiscoveryProtocol,
|
|
|
|
switch: Switch,
|
|
|
|
connQueue: AsyncQueue[PeerInfo],
|
|
|
|
peers: TableRef[PeerID, PeerInfo]) {.async.} =
|
|
|
|
debug "Starting discovery loop"
|
|
|
|
let wantedPeers = conf.maxPeers
|
|
|
|
while true:
|
|
|
|
try:
|
|
|
|
let discoveredPeers = discovery.randomNodes(wantedPeers - len(peers))
|
|
|
|
for peer in discoveredPeers:
|
|
|
|
let pinfoOpt = PeerInfo.init(peer.record)
|
2020-06-05 17:06:24 +00:00
|
|
|
if pinfoOpt.isOk():
|
2020-05-09 14:18:58 +00:00
|
|
|
let pinfo = pinfoOpt.get()
|
|
|
|
if pinfo.hasTCP():
|
2020-09-08 11:32:43 +00:00
|
|
|
if not switch.isConnected(pinfo.peerId):
|
2020-05-09 14:18:58 +00:00
|
|
|
debug "Discovered new peer", peer = pinfo,
|
|
|
|
peers_count = len(peers)
|
|
|
|
await connQueue.addLast(pinfo)
|
|
|
|
else:
|
|
|
|
debug "Found discovery only peer", peer = pinfo
|
|
|
|
|
|
|
|
except CatchableError as exc:
|
|
|
|
debug "Error in discovery", errMsg = exc.msg
|
|
|
|
|
|
|
|
await sleepAsync(1.seconds)
|
|
|
|
|
|
|
|
proc run(conf: InspectorConf) {.async.} =
|
|
|
|
var
|
|
|
|
topics: set[TopicFilter] = {}
|
|
|
|
forkDigest: Option[ForkDigest]
|
|
|
|
enrFields: Option[ENRFieldPair]
|
|
|
|
|
|
|
|
var pubsubPeers = newTable[PeerID, PeerInfo]()
|
|
|
|
var resolveQueue = newAsyncQueue[PeerID](10)
|
|
|
|
var connectQueue = newAsyncQueue[PeerInfo](10)
|
2020-07-07 15:51:02 +00:00
|
|
|
let rng = lcrypto.newRng()
|
2020-05-09 14:18:58 +00:00
|
|
|
|
|
|
|
let bootnodes = loadBootstrapNodes(conf)
|
|
|
|
if len(bootnodes) == 0:
|
|
|
|
error "Not enough bootnodes to establish connection with network"
|
|
|
|
quit(1)
|
|
|
|
|
|
|
|
var eth2bootnodes = newSeq[PeerInfo]()
|
|
|
|
var disc5bootnodes = newSeq[enr.Record]()
|
|
|
|
|
|
|
|
for item in bootnodes:
|
|
|
|
if item.kind == BootstrapKind.Enr:
|
|
|
|
logEnrAddress(item.addressRec.toUri())
|
|
|
|
|
|
|
|
let pinfoOpt = PeerInfo.init(item.addressRec)
|
2020-06-05 17:06:24 +00:00
|
|
|
if pinfoOpt.isOk():
|
2020-05-09 14:18:58 +00:00
|
|
|
let pinfo = pinfoOpt.get()
|
|
|
|
for ma in pinfo.addrs:
|
|
|
|
if TCP.match(ma):
|
|
|
|
eth2bootnodes.add(pinfo)
|
|
|
|
break
|
|
|
|
for ma in pinfo.addrs:
|
|
|
|
if UDP.match(ma):
|
|
|
|
disc5bootnodes.add(item.addressRec)
|
|
|
|
break
|
|
|
|
|
|
|
|
let forkOpt = tryGetForkDigest(item.addressRec)
|
|
|
|
if forkOpt.isSome():
|
|
|
|
if forkDigest.isSome():
|
|
|
|
if forkDigest.get() != forkOpt.get():
|
|
|
|
warn "Bootstrap node address has different forkDigest",
|
|
|
|
address = item.addressRec.toUri(),
|
2020-05-11 18:08:52 +00:00
|
|
|
address_fork_digest = $(forkOpt.get()),
|
|
|
|
stored_fork_digest = $(forkDigest.get())
|
2020-05-09 14:18:58 +00:00
|
|
|
else:
|
|
|
|
forkDigest = forkOpt
|
|
|
|
|
|
|
|
let enrFieldsOpt = tryGetFieldPairs(item.addressRec)
|
|
|
|
if enrFieldsOpt.isSome():
|
|
|
|
if enrFields.isSome():
|
|
|
|
if enrFields.get() != enrFieldsOpt.get():
|
|
|
|
warn "Bootstrap node address has different eth2 values",
|
|
|
|
address = item.addressRec.toUri(),
|
|
|
|
eth2_field_stored = bu.toHex(enrFields.get().eth2),
|
|
|
|
eth2_field_address = bu.toHex(enrFieldsOpt.get().eth2)
|
|
|
|
else:
|
|
|
|
enrFields = enrFieldsOpt
|
|
|
|
|
|
|
|
elif item.kind == BootstrapKind.MultiAddr:
|
|
|
|
if ETH2BN.match(item.addressMa):
|
|
|
|
eth2bootnodes.add(PeerInfo.init(item.addressMa).get())
|
|
|
|
|
|
|
|
if len(eth2bootnodes) == 0:
|
|
|
|
error "Not enough Ethereum2 bootnodes to establish connection with network"
|
|
|
|
quit(1)
|
|
|
|
|
|
|
|
if len(disc5bootnodes) == 0:
|
|
|
|
warn "Not enough DiscoveryV5 bootnodes, discovery will be disabled"
|
|
|
|
|
|
|
|
var argForkDigest = tryGetForkDigest(conf.forkDigest)
|
|
|
|
|
|
|
|
if forkDigest.isNone():
|
|
|
|
if argForkDigest.isNone():
|
|
|
|
error "forkDigest argument and bootstrap forkDigest are missing"
|
|
|
|
quit(1)
|
|
|
|
else:
|
|
|
|
forkDigest = argForkDigest
|
|
|
|
else:
|
|
|
|
if argForkDigest.isSome():
|
|
|
|
if forkDigest.isSome() != argForkDigest.isSome():
|
|
|
|
warn "forkDigest argument value is different, using argument value",
|
2020-05-11 18:08:52 +00:00
|
|
|
argument_fork_digest = argForkDigest.get(),
|
|
|
|
bootstrap_fork_digest = forkDigest.get()
|
2020-05-09 14:18:58 +00:00
|
|
|
forkDigest = argForkDigest
|
|
|
|
|
2020-07-07 15:51:02 +00:00
|
|
|
let seckey = lcrypto.PrivateKey.random(PKScheme.Secp256k1, rng[]).tryGet()
|
2020-05-09 14:18:58 +00:00
|
|
|
# let pubkey = seckey.getKey()
|
|
|
|
|
|
|
|
let hostAddress = tryGetMultiAddress(conf.bindAddress)
|
|
|
|
if hostAddress.isNone():
|
|
|
|
error "Bind address is incorrect MultiAddress", address = conf.bindAddress
|
|
|
|
quit(1)
|
|
|
|
|
2020-08-08 20:52:02 +00:00
|
|
|
let switch = newStandardSwitch(some(seckey), hostAddress.get(), rng = rng)
|
|
|
|
|
|
|
|
let pubsub = GossipSub.init(
|
|
|
|
switch = switch,
|
|
|
|
triggerSelf = true, sign = false,
|
|
|
|
verifySignature = false).PubSub
|
|
|
|
|
|
|
|
switch.mount(pubsub)
|
2019-10-01 13:52:28 +00:00
|
|
|
|
|
|
|
if len(conf.topics) > 0:
|
|
|
|
for item in conf.topics:
|
|
|
|
let lcitem = item.toLowerAscii()
|
|
|
|
|
|
|
|
if lcitem == "*":
|
|
|
|
topics.incl({TopicFilter.Blocks, TopicFilter.Attestations,
|
|
|
|
TopicFilter.Exits, TopicFilter.ProposerSlashing,
|
2020-06-08 18:41:50 +00:00
|
|
|
TopicFilter.AttesterSlashings})
|
2019-10-01 13:52:28 +00:00
|
|
|
break
|
|
|
|
elif lcitem == "a":
|
|
|
|
topics.incl(TopicFilter.Attestations)
|
|
|
|
elif lcitem == "b":
|
|
|
|
topics.incl(TopicFilter.Blocks)
|
|
|
|
elif lcitem == "e":
|
|
|
|
topics.incl(TopicFilter.Exits)
|
|
|
|
elif lcitem == "ps":
|
|
|
|
topics.incl(TopicFilter.ProposerSlashing)
|
|
|
|
elif lcitem == "as":
|
|
|
|
topics.incl(TopicFilter.AttesterSlashings)
|
|
|
|
else:
|
|
|
|
discard
|
|
|
|
else:
|
|
|
|
topics.incl({TopicFilter.Blocks, TopicFilter.Attestations,
|
|
|
|
TopicFilter.Exits, TopicFilter.ProposerSlashing,
|
2020-06-08 18:41:50 +00:00
|
|
|
TopicFilter.AttesterSlashings})
|
2019-10-01 13:52:28 +00:00
|
|
|
|
2020-05-09 14:18:58 +00:00
|
|
|
proc pubsubTrampoline(topic: string,
|
|
|
|
data: seq[byte]): Future[void] {.gcsafe.} =
|
|
|
|
result = pubsubLogger(conf, switch, resolveQueue, topic, data)
|
2019-10-01 13:52:28 +00:00
|
|
|
|
2020-08-08 20:52:02 +00:00
|
|
|
discard await switch.start()
|
|
|
|
await pubsub.start()
|
2019-10-01 13:52:28 +00:00
|
|
|
|
2020-05-09 14:18:58 +00:00
|
|
|
var topicFilters = newSeq[string]()
|
|
|
|
try:
|
|
|
|
for filter in topics:
|
|
|
|
for topic in getTopics(forkDigest.get(), filter):
|
2020-12-24 08:48:52 +00:00
|
|
|
pubsub.subscribe(topic, pubsubTrampoline)
|
2020-05-09 14:18:58 +00:00
|
|
|
topicFilters.add(topic)
|
|
|
|
trace "Subscribed to topic", topic = topic
|
|
|
|
for filter in conf.customTopics:
|
2020-12-24 08:48:52 +00:00
|
|
|
pubsub.subscribe(filter, pubsubTrampoline)
|
2020-05-09 14:18:58 +00:00
|
|
|
topicFilters.add(filter)
|
|
|
|
trace "Subscribed to custom topic", topic = filter
|
|
|
|
except CatchableError as exc:
|
|
|
|
error "Could not subscribe to topics", errMsg = exc.msg
|
2019-10-01 13:52:28 +00:00
|
|
|
quit(1)
|
|
|
|
|
2020-05-09 14:18:58 +00:00
|
|
|
info InspectorIdent & " starting", topic_filters = topicFilters,
|
|
|
|
eth2_bootnodes = eth2bootnodes,
|
|
|
|
disc5_bootnodes = disc5bootnodes
|
2019-10-01 13:52:28 +00:00
|
|
|
|
2020-05-09 14:18:58 +00:00
|
|
|
asyncCheck connectLoop(switch, connectQueue,
|
|
|
|
pubsubPeers, 10.seconds)
|
2019-11-07 23:19:35 +00:00
|
|
|
|
2020-05-09 14:18:58 +00:00
|
|
|
for node in eth2bootnodes:
|
|
|
|
await connectQueue.addLast(node)
|
2019-11-07 23:19:35 +00:00
|
|
|
|
2020-05-09 14:18:58 +00:00
|
|
|
if len(disc5bootnodes) > 0:
|
|
|
|
var proto = bootstrapDiscovery(conf, hostAddress.get(), seckey,
|
|
|
|
disc5bootnodes, enrFields)
|
|
|
|
if not(conf.noDiscovery):
|
|
|
|
asyncCheck discoveryLoop(conf, proto, switch, connectQueue,
|
|
|
|
pubsubPeers)
|
2019-10-01 13:52:28 +00:00
|
|
|
|
2020-05-09 14:18:58 +00:00
|
|
|
asyncCheck resolveLoop(conf, proto, switch, resolveQueue,
|
|
|
|
pubsubPeers)
|
2020-04-15 03:11:45 +00:00
|
|
|
|
2020-05-09 14:18:58 +00:00
|
|
|
# We are not going to exit from this procedure
|
|
|
|
var emptyFut = newFuture[void]()
|
|
|
|
await emptyFut
|
2019-10-01 13:52:28 +00:00
|
|
|
|
|
|
|
when isMainModule:
|
|
|
|
echo InspectorHeader
|
|
|
|
var conf = InspectorConf.load(version = InspectorVersion)
|
|
|
|
waitFor run(conf)
|