nim-libp2p auto bump (#2840)
* auto-bump nim-libp2p * Remove peer info for other peers Not definitive, just to test the libp2p's unstable branch * finish up Remove peer info for other peers * getKey -> getPublicKey * bump libp2p * libp2p bump Co-authored-by: = <Menduist@users.noreply.github.com> Co-authored-by: Tanguy <tanguy@status.im>
This commit is contained in:
parent
15ce2de3f0
commit
29b26f3f6b
|
@ -92,7 +92,7 @@ type
|
|||
|
||||
Peer* = ref object
|
||||
network*: Eth2Node
|
||||
info*: PeerInfo
|
||||
peerId*: PeerId
|
||||
discoveryId*: Eth2DiscoveryId
|
||||
connectionState*: ConnectionState
|
||||
protocolStates*: seq[RootRef]
|
||||
|
@ -309,12 +309,12 @@ const
|
|||
|
||||
template libp2pProtocol*(name: string, version: int) {.pragma.}
|
||||
|
||||
func shortLog*(peer: Peer): string = shortLog(peer.info.peerId)
|
||||
func shortLog*(peer: Peer): string = shortLog(peer.peerId)
|
||||
chronicles.formatIt(Peer): shortLog(it)
|
||||
chronicles.formatIt(PublicKey): byteutils.toHex(it.getBytes().tryGet())
|
||||
|
||||
template remote*(peer: Peer): untyped =
|
||||
peer.info.peerId
|
||||
peer.peerId
|
||||
|
||||
proc openStream(node: Eth2Node,
|
||||
peer: Peer,
|
||||
|
@ -325,16 +325,11 @@ proc openStream(node: Eth2Node,
|
|||
let
|
||||
protocolId = protocolId & "ssz_snappy"
|
||||
conn = await dial(
|
||||
node.switch, peer.info.peerId, protocolId)
|
||||
|
||||
# libp2p may replace peerinfo ref sometimes, so make sure we have a recent
|
||||
# one
|
||||
if conn.peerInfo != nil:
|
||||
peer.info = conn.peerInfo
|
||||
node.switch, peer.peerId, protocolId)
|
||||
|
||||
return conn
|
||||
|
||||
proc init*(T: type Peer, network: Eth2Node, info: PeerInfo): Peer {.gcsafe.}
|
||||
proc init*(T: type Peer, network: Eth2Node, peerId: PeerId): Peer {.gcsafe.}
|
||||
|
||||
func peerId*(node: Eth2Node): PeerID =
|
||||
node.switch.peerInfo.peerId
|
||||
|
@ -346,15 +341,15 @@ proc getPeer*(node: Eth2Node, peerId: PeerID): Peer =
|
|||
node.peers.withValue(peerId, peer) do:
|
||||
return peer[]
|
||||
do:
|
||||
let peer = Peer.init(node, PeerInfo.init(peerId))
|
||||
let peer = Peer.init(node, peerId)
|
||||
return node.peers.mgetOrPut(peerId, peer)
|
||||
|
||||
proc peerFromStream(network: Eth2Node, conn: Connection): Peer =
|
||||
result = network.getPeer(conn.peerInfo.peerId)
|
||||
result.info = conn.peerInfo
|
||||
result = network.getPeer(conn.peerId)
|
||||
result.peerId = conn.peerId
|
||||
|
||||
proc getKey*(peer: Peer): PeerID {.inline.} =
|
||||
peer.info.peerId
|
||||
peer.peerId
|
||||
|
||||
proc getFuture*(peer: Peer): Future[void] {.inline.} =
|
||||
if isNil(peer.disconnectedFut):
|
||||
|
@ -473,11 +468,11 @@ proc disconnect*(peer: Peer, reason: DisconnectionReason,
|
|||
SeenTableTimeFaultOrError
|
||||
of PeerScoreLow:
|
||||
SeenTablePenaltyError
|
||||
peer.network.addSeen(peer.info.peerId, seenTime)
|
||||
await peer.network.switch.disconnect(peer.info.peerId)
|
||||
peer.network.addSeen(peer.peerId, seenTime)
|
||||
await peer.network.switch.disconnect(peer.peerId)
|
||||
except CatchableError:
|
||||
# We do not care about exceptions in disconnection procedure.
|
||||
trace "Exception while disconnecting peer", peer = peer.info.peerId,
|
||||
trace "Exception while disconnecting peer", peer = peer.peerId,
|
||||
reason = reason
|
||||
|
||||
include eth/p2p/p2p_backends_helpers
|
||||
|
@ -997,7 +992,7 @@ proc trimConnections(node: Eth2Node, count: int) {.async.} =
|
|||
stabilitySubnetsCount = stabilitySubnets.countOnes()
|
||||
thisPeersScore = 10 * stabilitySubnetsCount
|
||||
|
||||
scores[peer.info.peerId] = thisPeersScore
|
||||
scores[peer.peerId] = thisPeersScore
|
||||
|
||||
# Split a 1000 points for each topic's peers
|
||||
# This gives priority to peers in topics with few peers
|
||||
|
@ -1181,13 +1176,13 @@ proc resolvePeer(peer: Peer) =
|
|||
# ENR using discovery5. We only resolve ENR for peers we know about to avoid
|
||||
# querying the network - as of now, the ENR is not needed, except for
|
||||
# debuggging
|
||||
logScope: peer = peer.info.peerId
|
||||
logScope: peer = peer.peerId
|
||||
let startTime = now(chronos.Moment)
|
||||
let nodeId =
|
||||
block:
|
||||
var key: PublicKey
|
||||
# `secp256k1` keys are always stored inside PeerID.
|
||||
discard peer.info.peerId.extractPublicKey(key)
|
||||
discard peer.peerId.extractPublicKey(key)
|
||||
keys.PublicKey.fromRaw(key.skkey.getBytes()).get().toNodeId()
|
||||
|
||||
debug "Peer's ENR recovery task started", node_id = $nodeId
|
||||
|
@ -1364,8 +1359,8 @@ proc new*(T: type Eth2Node, config: BeaconNodeConf, runtimeCfg: RuntimeConfig,
|
|||
msg.protocolMounter node
|
||||
|
||||
|
||||
proc peerHook(peerInfo: PeerInfo, event: ConnEvent): Future[void] {.gcsafe.} =
|
||||
onConnEvent(node, peerInfo.peerId, event)
|
||||
proc peerHook(peerId: PeerId, event: ConnEvent): Future[void] {.gcsafe.} =
|
||||
onConnEvent(node, peerId, event)
|
||||
|
||||
switch.addConnEventHandler(peerHook, ConnEventKind.Connected)
|
||||
switch.addConnEventHandler(peerHook, ConnEventKind.Disconnected)
|
||||
|
@ -1437,9 +1432,9 @@ proc stop*(node: Eth2Node) {.async.} =
|
|||
trace "Eth2Node.stop(): timeout reached", timeout,
|
||||
futureErrors = waitedFutures.filterIt(it.error != nil).mapIt(it.error.msg)
|
||||
|
||||
proc init*(T: type Peer, network: Eth2Node, info: PeerInfo): Peer =
|
||||
proc init*(T: type Peer, network: Eth2Node, peerId: PeerId): Peer =
|
||||
let res = Peer(
|
||||
info: info,
|
||||
peerId: peerId,
|
||||
network: network,
|
||||
connectionState: ConnectionState.None,
|
||||
lastReqTime: now(chronos.Moment),
|
||||
|
@ -1620,7 +1615,7 @@ proc peerPingerHeartbeat(node: Eth2Node) {.async.} =
|
|||
|
||||
if peer.metadata.isNone or
|
||||
heartbeatStart_m - peer.lastMetadataTime > MetadataRequestFrequency:
|
||||
updateFutures.add(node.updatePeerMetadata(peer.info.peerId))
|
||||
updateFutures.add(node.updatePeerMetadata(peer.peerId))
|
||||
|
||||
discard await allFinished(updateFutures)
|
||||
|
||||
|
@ -1666,7 +1661,7 @@ proc getPersistentNetKeys*(rng: var BrHmacDrbgContext,
|
|||
quit QuitFailure
|
||||
let
|
||||
privKey = res.get()
|
||||
pubKey = privKey.getKey().expect("working public key from random")
|
||||
pubKey = privKey.getPublicKey().expect("working public key from random")
|
||||
pres = PeerID.init(pubKey)
|
||||
if pres.isErr():
|
||||
fatal "Could not obtain PeerID from network key"
|
||||
|
@ -1697,7 +1692,7 @@ proc getPersistentNetKeys*(rng: var BrHmacDrbgContext,
|
|||
quit QuitFailure
|
||||
let
|
||||
privKey = res.get()
|
||||
pubKey = privKey.getKey().expect("working public key from file")
|
||||
pubKey = privKey.getPublicKey().expect("working public key from file")
|
||||
info "Network key storage was successfully unlocked",
|
||||
key_path = keyPath, network_public_key = pubKey
|
||||
NetKeyPair(seckey: privKey, pubkey: pubKey)
|
||||
|
@ -1711,7 +1706,7 @@ proc getPersistentNetKeys*(rng: var BrHmacDrbgContext,
|
|||
|
||||
let
|
||||
privKey = rres.get()
|
||||
pubKey = privKey.getKey().expect("working public key from random")
|
||||
pubKey = privKey.getPublicKey().expect("working public key from random")
|
||||
|
||||
# Insecure password used only for automated testing.
|
||||
let insecurePassword =
|
||||
|
@ -1747,7 +1742,7 @@ proc getPersistentNetKeys*(rng: var BrHmacDrbgContext,
|
|||
|
||||
let
|
||||
privKey = rres.get()
|
||||
pubKey = privKey.getKey().expect("working public key from random")
|
||||
pubKey = privKey.getPublicKey().expect("working public key from random")
|
||||
|
||||
# Insecure password used only for automated testing.
|
||||
let insecurePassword =
|
||||
|
@ -1773,7 +1768,7 @@ proc getPersistentNetKeys*(rng: var BrHmacDrbgContext,
|
|||
|
||||
let
|
||||
privKey = res.get()
|
||||
pubKey = privKey.getKey().expect("working public key from random")
|
||||
pubKey = privKey.getPublicKey().expect("working public key from random")
|
||||
NetKeyPair(seckey: privKey, pubkey: pubKey)
|
||||
|
||||
func gossipId(data: openArray[byte], topic: string, valid: bool): seq[byte] =
|
||||
|
|
|
@ -8,7 +8,7 @@ import
|
|||
std/[sequtils],
|
||||
stew/results,
|
||||
chronicles,
|
||||
libp2p/[multiaddress, multicodec],
|
||||
libp2p/[multiaddress, multicodec, peerstore],
|
||||
libp2p/protocols/pubsub/pubsubpeer,
|
||||
./rest_utils,
|
||||
../eth1/eth1_monitor,
|
||||
|
@ -71,13 +71,13 @@ type
|
|||
peerId*: PeerID
|
||||
connected*: bool
|
||||
|
||||
proc toInfo(info: RestPeerInfoTuple): RestPeerInfo =
|
||||
proc toInfo(node: BeaconNode, peerId: PeerId): RestPeerInfo =
|
||||
RestPeerInfo(
|
||||
peerId: info.peerId,
|
||||
addrs: info.addrs,
|
||||
protocols: info.protocols,
|
||||
protoVersion: info.protoVersion,
|
||||
agentVersion: info.agentVersion
|
||||
peerId: $peerId,
|
||||
addrs: node.network.switch.peerStore.addressBook.get(peerId).toSeq().mapIt($it),
|
||||
protocols: node.network.switch.peerStore.protoBook.get(peerId).toSeq(),
|
||||
protoVersion: node.network.switch.peerStore.protoVersionBook.get(peerId),
|
||||
agentVersion: node.network.switch.peerStore.agentBook.get(peerId)
|
||||
)
|
||||
|
||||
proc toNode(v: PubSubPeer, backoff: Moment): RestPubSubPeer =
|
||||
|
@ -151,7 +151,7 @@ proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
for id, peer in node.network.peerPool:
|
||||
res.add(
|
||||
RestSimplePeer(
|
||||
info: shortLog(peer.info).toInfo(),
|
||||
info: toInfo(node, id),
|
||||
connectionState: $peer.connectionState,
|
||||
score: peer.score
|
||||
)
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
import
|
||||
std/[sequtils],
|
||||
stew/results,
|
||||
chronicles,
|
||||
eth/p2p/discoveryv5/enr,
|
||||
libp2p/[multiaddress, multicodec],
|
||||
libp2p/[multiaddress, multicodec, peerstore],
|
||||
nimcrypto/utils as ncrutils,
|
||||
../version, ../beacon_node, ../sync/sync_manager,
|
||||
../networking/[eth2_network, peer_pool],
|
||||
|
@ -85,12 +86,13 @@ proc toString(direction: PeerType): string =
|
|||
of PeerType.Outgoing:
|
||||
"outbound"
|
||||
|
||||
proc getLastSeenAddress(info: PeerInfo): string =
|
||||
proc getLastSeenAddress(node: BeaconNode, id: PeerId): string =
|
||||
# TODO (cheatfate): We need to provide filter here, which will be able to
|
||||
# filter such multiaddresses like `/ip4/0.0.0.0` or local addresses or
|
||||
# addresses with peer ids.
|
||||
if len(info.addrs) > 0:
|
||||
$info.addrs[len(info.addrs) - 1]
|
||||
let addrs = node.network.switch.peerStore.addressBook.get(id).toSeq()
|
||||
if len(addrs) > 0:
|
||||
$addrs[len(addrs) - 1]
|
||||
else:
|
||||
""
|
||||
proc getDiscoveryAddresses(node: BeaconNode): Option[seq[string]] =
|
||||
|
@ -187,17 +189,17 @@ proc installNodeApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
dres.get()
|
||||
|
||||
var res: seq[RpcNodePeer]
|
||||
for item in node.network.peers.values():
|
||||
if (item.connectionState in connectionMask) and
|
||||
(item.direction in directionMask):
|
||||
for peer in node.network.peers.values():
|
||||
if (peer.connectionState in connectionMask) and
|
||||
(peer.direction in directionMask):
|
||||
let peer = (
|
||||
peer_id: $item.info.peerId,
|
||||
enr: if item.enr.isSome(): item.enr.get().toUri() else: "",
|
||||
last_seen_p2p_address: item.info.getLastSeenAddress(),
|
||||
state: item.connectionState.toString(),
|
||||
direction: item.direction.toString(),
|
||||
agent: item.info.agentVersion, # Fields `agent` and `proto` are not
|
||||
proto: item.info.protoVersion # part of specification.
|
||||
peer_id: $peer.peerId,
|
||||
enr: if peer.enr.isSome(): peer.enr.get().toUri() else: "",
|
||||
last_seen_p2p_address: getLastSeenAddress(node, peer.peerId),
|
||||
state: peer.connectionState.toString(),
|
||||
direction: peer.direction.toString(),
|
||||
agent: node.network.switch.peerStore.agentBook.get(peer.peerId), # Fields `agent` and `proto` are not
|
||||
proto: node.network.switch.peerStore.protoVersionBook.get(peer.peerId) # part of specification
|
||||
)
|
||||
res.add(peer)
|
||||
return RestApiResponse.jsonResponseWMeta(res, (count: uint64(len(res))))
|
||||
|
@ -233,13 +235,13 @@ proc installNodeApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
res
|
||||
return RestApiResponse.jsonResponse(
|
||||
(
|
||||
peer_id: $peer.info.peerId,
|
||||
peer_id: $peer.peerId,
|
||||
enr: if peer.enr.isSome(): peer.enr.get().toUri() else: "",
|
||||
last_seen_p2p_address: peer.info.getLastSeenAddress(),
|
||||
last_seen_p2p_address: getLastSeenAddress(node, peer.peerId),
|
||||
state: peer.connectionState.toString(),
|
||||
direction: peer.direction.toString(),
|
||||
agent: peer.info.agentVersion, # Fields `agent` and `proto` are not
|
||||
proto: peer.info.protoVersion # part of specification
|
||||
agent: node.network.switch.peerStore.agentBook.get(peer.peerId), # Fields `agent` and `proto` are not
|
||||
proto: node.network.switch.peerStore.protoVersionBook.get(peer.peerId) # part of specification
|
||||
)
|
||||
)
|
||||
|
||||
|
|
|
@ -74,7 +74,7 @@ proc installNimbusApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
for id, peer in node.network.peerPool:
|
||||
peers.add(
|
||||
%(
|
||||
info: shortLog(peer.info),
|
||||
id: shortLog(peer.peerId),
|
||||
connectionState: $peer.connectionState,
|
||||
score: peer.score,
|
||||
)
|
||||
|
|
|
@ -7,11 +7,11 @@
|
|||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import std/options,
|
||||
import std/[options, sequtils],
|
||||
chronicles,
|
||||
json_rpc/servers/httpserver,
|
||||
eth/p2p/discoveryv5/enr,
|
||||
libp2p/[multiaddress, multicodec],
|
||||
libp2p/[multiaddress, multicodec, peerstore],
|
||||
nimcrypto/utils as ncrutils,
|
||||
../beacon_node, ../version,
|
||||
../networking/[eth2_network, peer_pool],
|
||||
|
@ -109,12 +109,13 @@ proc toString(direction: PeerType): string =
|
|||
of PeerType.Outgoing:
|
||||
"outbound"
|
||||
|
||||
proc getLastSeenAddress(info: PeerInfo): string =
|
||||
proc getLastSeenAddress(node: BeaconNode, id: PeerId): string =
|
||||
# TODO (cheatfate): We need to provide filter here, which will be able to
|
||||
# filter such multiaddresses like `/ip4/0.0.0.0` or local addresses or
|
||||
# addresses with peer ids.
|
||||
if len(info.addrs) > 0:
|
||||
$info.addrs[len(info.addrs) - 1]
|
||||
let addrs = node.network.switch.peerStore.addressBook.get(id).toSeq()
|
||||
if len(addrs) > 0:
|
||||
$addrs[len(addrs) - 1]
|
||||
else:
|
||||
""
|
||||
|
||||
|
@ -187,18 +188,18 @@ proc installNodeApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
raise newException(CatchableError, "Incorrect direction parameter")
|
||||
let states = rstates.get()
|
||||
let dirs = rdirs.get()
|
||||
for item in node.network.peers.values():
|
||||
if (item.connectionState in states) and (item.direction in dirs):
|
||||
let peer = (
|
||||
peer_id: $item.info.peerId,
|
||||
enr: if item.enr.isSome(): item.enr.get().toUri() else: "",
|
||||
last_seen_p2p_address: item.info.getLastSeenAddress(),
|
||||
state: item.connectionState.toString(),
|
||||
direction: item.direction.toString(),
|
||||
agent: item.info.agentVersion, # Fields `agent` and `proto` are not
|
||||
proto: item.info.protoVersion # part of specification.
|
||||
for peer in node.network.peers.values():
|
||||
if (peer.connectionState in states) and (peer.direction in dirs):
|
||||
let resPeer = (
|
||||
peer_id: $peer.peerId,
|
||||
enr: if peer.enr.isSome(): peer.enr.get().toUri() else: "",
|
||||
last_seen_p2p_address: getLastSeenAddress(node, peer.peerId),
|
||||
state: peer.connectionState.toString(),
|
||||
direction: peer.direction.toString(),
|
||||
agent: node.network.switch.peerStore.agentBook.get(peer.peerId), # Fields `agent` and `proto` are not
|
||||
proto: node.network.switch.peerStore.protoVersionBook.get(peer.peerId) # part of specification
|
||||
)
|
||||
res.add(peer)
|
||||
res.add(resPeer)
|
||||
return res
|
||||
|
||||
rpcServer.rpc("get_v1_node_peer_count") do () -> RpcNodePeerCount:
|
||||
|
@ -229,13 +230,13 @@ proc installNodeApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
raise newException(CatchableError, "Peer not found")
|
||||
|
||||
return (
|
||||
peer_id: $peer.info.peerId,
|
||||
peer_id: $peer.peerId,
|
||||
enr: if peer.enr.isSome(): peer.enr.get().toUri() else: "",
|
||||
last_seen_p2p_address: peer.info.getLastSeenAddress(),
|
||||
last_seen_p2p_address: getLastSeenAddress(node, peer.peerId),
|
||||
state: peer.connectionState.toString(),
|
||||
direction: peer.direction.toString(),
|
||||
agent: peer.info.agentVersion, # Fields `agent` and `proto` are not part
|
||||
proto: peer.info.protoVersion # of specification
|
||||
agent: node.network.switch.peerStore.agentBook.get(peer.peerId), # Fields `agent` and `proto` are not
|
||||
proto: node.network.switch.peerStore.protoVersionBook.get(peer.peerId) # part of specification
|
||||
)
|
||||
|
||||
rpcServer.rpc("get_v1_node_version") do () -> JsonNode:
|
||||
|
|
|
@ -693,7 +693,7 @@ proc createNetKeystore*(kdfKind: KdfKind,
|
|||
let
|
||||
secret = privKey.getBytes().get()
|
||||
cryptoField = createCryptoField(kdfKind, rng, secret, password, salt, iv)
|
||||
pubKey = privKey.getKey().get()
|
||||
pubKey = privKey.getPublicKey().get()
|
||||
uuid = uuidGenerate().expect("Random bytes should be available")
|
||||
|
||||
NetKeystore(
|
||||
|
|
|
@ -153,7 +153,7 @@ p2pProtocol BeaconSync(version = 1,
|
|||
|
||||
onPeerConnected do (peer: Peer, incoming: bool) {.async.}:
|
||||
debug "Peer connected",
|
||||
peer, peerInfo = shortLog(peer.info), incoming
|
||||
peer, peerId = shortLog(peer.peerId), incoming
|
||||
# Per the eth2 protocol, whoever dials must send a status message when
|
||||
# connected for the first time, but because of how libp2p works, there may
|
||||
# be a race between incoming and outgoing connections and disconnects that
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit cb94baf9c4081a78a889957c93baa16aff3511de
|
||||
Subproject commit 3669b90ceb6d1687e91cf3f87138cb90803f9446
|
Loading…
Reference in New Issue