nim-libp2p auto bump (#2840)

* auto-bump nim-libp2p

* Remove peer info for other peers

Not definitive, just to test the libp2p's unstable branch

* finish up Remove peer info for other peers

* getKey -> getPublicKey

* bump libp2p

* libp2p bump

Co-authored-by: = <Menduist@users.noreply.github.com>
Co-authored-by: Tanguy <tanguy@status.im>
This commit is contained in:
nbc-bump-bot[bot] 2021-10-21 13:01:29 +02:00 committed by GitHub
parent 15ce2de3f0
commit 29b26f3f6b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 78 additions and 80 deletions

View File

@ -92,7 +92,7 @@ type
Peer* = ref object Peer* = ref object
network*: Eth2Node network*: Eth2Node
info*: PeerInfo peerId*: PeerId
discoveryId*: Eth2DiscoveryId discoveryId*: Eth2DiscoveryId
connectionState*: ConnectionState connectionState*: ConnectionState
protocolStates*: seq[RootRef] protocolStates*: seq[RootRef]
@ -309,12 +309,12 @@ const
template libp2pProtocol*(name: string, version: int) {.pragma.} template libp2pProtocol*(name: string, version: int) {.pragma.}
func shortLog*(peer: Peer): string = shortLog(peer.info.peerId) func shortLog*(peer: Peer): string = shortLog(peer.peerId)
chronicles.formatIt(Peer): shortLog(it) chronicles.formatIt(Peer): shortLog(it)
chronicles.formatIt(PublicKey): byteutils.toHex(it.getBytes().tryGet()) chronicles.formatIt(PublicKey): byteutils.toHex(it.getBytes().tryGet())
template remote*(peer: Peer): untyped = template remote*(peer: Peer): untyped =
peer.info.peerId peer.peerId
proc openStream(node: Eth2Node, proc openStream(node: Eth2Node,
peer: Peer, peer: Peer,
@ -325,16 +325,11 @@ proc openStream(node: Eth2Node,
let let
protocolId = protocolId & "ssz_snappy" protocolId = protocolId & "ssz_snappy"
conn = await dial( conn = await dial(
node.switch, peer.info.peerId, protocolId) node.switch, peer.peerId, protocolId)
# libp2p may replace peerinfo ref sometimes, so make sure we have a recent
# one
if conn.peerInfo != nil:
peer.info = conn.peerInfo
return conn return conn
proc init*(T: type Peer, network: Eth2Node, info: PeerInfo): Peer {.gcsafe.} proc init*(T: type Peer, network: Eth2Node, peerId: PeerId): Peer {.gcsafe.}
func peerId*(node: Eth2Node): PeerID = func peerId*(node: Eth2Node): PeerID =
node.switch.peerInfo.peerId node.switch.peerInfo.peerId
@ -346,15 +341,15 @@ proc getPeer*(node: Eth2Node, peerId: PeerID): Peer =
node.peers.withValue(peerId, peer) do: node.peers.withValue(peerId, peer) do:
return peer[] return peer[]
do: do:
let peer = Peer.init(node, PeerInfo.init(peerId)) let peer = Peer.init(node, peerId)
return node.peers.mgetOrPut(peerId, peer) return node.peers.mgetOrPut(peerId, peer)
proc peerFromStream(network: Eth2Node, conn: Connection): Peer = proc peerFromStream(network: Eth2Node, conn: Connection): Peer =
result = network.getPeer(conn.peerInfo.peerId) result = network.getPeer(conn.peerId)
result.info = conn.peerInfo result.peerId = conn.peerId
proc getKey*(peer: Peer): PeerID {.inline.} = proc getKey*(peer: Peer): PeerID {.inline.} =
peer.info.peerId peer.peerId
proc getFuture*(peer: Peer): Future[void] {.inline.} = proc getFuture*(peer: Peer): Future[void] {.inline.} =
if isNil(peer.disconnectedFut): if isNil(peer.disconnectedFut):
@ -473,11 +468,11 @@ proc disconnect*(peer: Peer, reason: DisconnectionReason,
SeenTableTimeFaultOrError SeenTableTimeFaultOrError
of PeerScoreLow: of PeerScoreLow:
SeenTablePenaltyError SeenTablePenaltyError
peer.network.addSeen(peer.info.peerId, seenTime) peer.network.addSeen(peer.peerId, seenTime)
await peer.network.switch.disconnect(peer.info.peerId) await peer.network.switch.disconnect(peer.peerId)
except CatchableError: except CatchableError:
# We do not care about exceptions in disconnection procedure. # We do not care about exceptions in disconnection procedure.
trace "Exception while disconnecting peer", peer = peer.info.peerId, trace "Exception while disconnecting peer", peer = peer.peerId,
reason = reason reason = reason
include eth/p2p/p2p_backends_helpers include eth/p2p/p2p_backends_helpers
@ -997,7 +992,7 @@ proc trimConnections(node: Eth2Node, count: int) {.async.} =
stabilitySubnetsCount = stabilitySubnets.countOnes() stabilitySubnetsCount = stabilitySubnets.countOnes()
thisPeersScore = 10 * stabilitySubnetsCount thisPeersScore = 10 * stabilitySubnetsCount
scores[peer.info.peerId] = thisPeersScore scores[peer.peerId] = thisPeersScore
# Split a 1000 points for each topic's peers # Split a 1000 points for each topic's peers
# This gives priority to peers in topics with few peers # This gives priority to peers in topics with few peers
@ -1181,13 +1176,13 @@ proc resolvePeer(peer: Peer) =
# ENR using discovery5. We only resolve ENR for peers we know about to avoid # ENR using discovery5. We only resolve ENR for peers we know about to avoid
# querying the network - as of now, the ENR is not needed, except for # querying the network - as of now, the ENR is not needed, except for
# debuggging # debuggging
logScope: peer = peer.info.peerId logScope: peer = peer.peerId
let startTime = now(chronos.Moment) let startTime = now(chronos.Moment)
let nodeId = let nodeId =
block: block:
var key: PublicKey var key: PublicKey
# `secp256k1` keys are always stored inside PeerID. # `secp256k1` keys are always stored inside PeerID.
discard peer.info.peerId.extractPublicKey(key) discard peer.peerId.extractPublicKey(key)
keys.PublicKey.fromRaw(key.skkey.getBytes()).get().toNodeId() keys.PublicKey.fromRaw(key.skkey.getBytes()).get().toNodeId()
debug "Peer's ENR recovery task started", node_id = $nodeId debug "Peer's ENR recovery task started", node_id = $nodeId
@ -1364,8 +1359,8 @@ proc new*(T: type Eth2Node, config: BeaconNodeConf, runtimeCfg: RuntimeConfig,
msg.protocolMounter node msg.protocolMounter node
proc peerHook(peerInfo: PeerInfo, event: ConnEvent): Future[void] {.gcsafe.} = proc peerHook(peerId: PeerId, event: ConnEvent): Future[void] {.gcsafe.} =
onConnEvent(node, peerInfo.peerId, event) onConnEvent(node, peerId, event)
switch.addConnEventHandler(peerHook, ConnEventKind.Connected) switch.addConnEventHandler(peerHook, ConnEventKind.Connected)
switch.addConnEventHandler(peerHook, ConnEventKind.Disconnected) switch.addConnEventHandler(peerHook, ConnEventKind.Disconnected)
@ -1437,9 +1432,9 @@ proc stop*(node: Eth2Node) {.async.} =
trace "Eth2Node.stop(): timeout reached", timeout, trace "Eth2Node.stop(): timeout reached", timeout,
futureErrors = waitedFutures.filterIt(it.error != nil).mapIt(it.error.msg) futureErrors = waitedFutures.filterIt(it.error != nil).mapIt(it.error.msg)
proc init*(T: type Peer, network: Eth2Node, info: PeerInfo): Peer = proc init*(T: type Peer, network: Eth2Node, peerId: PeerId): Peer =
let res = Peer( let res = Peer(
info: info, peerId: peerId,
network: network, network: network,
connectionState: ConnectionState.None, connectionState: ConnectionState.None,
lastReqTime: now(chronos.Moment), lastReqTime: now(chronos.Moment),
@ -1620,7 +1615,7 @@ proc peerPingerHeartbeat(node: Eth2Node) {.async.} =
if peer.metadata.isNone or if peer.metadata.isNone or
heartbeatStart_m - peer.lastMetadataTime > MetadataRequestFrequency: heartbeatStart_m - peer.lastMetadataTime > MetadataRequestFrequency:
updateFutures.add(node.updatePeerMetadata(peer.info.peerId)) updateFutures.add(node.updatePeerMetadata(peer.peerId))
discard await allFinished(updateFutures) discard await allFinished(updateFutures)
@ -1666,7 +1661,7 @@ proc getPersistentNetKeys*(rng: var BrHmacDrbgContext,
quit QuitFailure quit QuitFailure
let let
privKey = res.get() privKey = res.get()
pubKey = privKey.getKey().expect("working public key from random") pubKey = privKey.getPublicKey().expect("working public key from random")
pres = PeerID.init(pubKey) pres = PeerID.init(pubKey)
if pres.isErr(): if pres.isErr():
fatal "Could not obtain PeerID from network key" fatal "Could not obtain PeerID from network key"
@ -1697,7 +1692,7 @@ proc getPersistentNetKeys*(rng: var BrHmacDrbgContext,
quit QuitFailure quit QuitFailure
let let
privKey = res.get() privKey = res.get()
pubKey = privKey.getKey().expect("working public key from file") pubKey = privKey.getPublicKey().expect("working public key from file")
info "Network key storage was successfully unlocked", info "Network key storage was successfully unlocked",
key_path = keyPath, network_public_key = pubKey key_path = keyPath, network_public_key = pubKey
NetKeyPair(seckey: privKey, pubkey: pubKey) NetKeyPair(seckey: privKey, pubkey: pubKey)
@ -1711,7 +1706,7 @@ proc getPersistentNetKeys*(rng: var BrHmacDrbgContext,
let let
privKey = rres.get() privKey = rres.get()
pubKey = privKey.getKey().expect("working public key from random") pubKey = privKey.getPublicKey().expect("working public key from random")
# Insecure password used only for automated testing. # Insecure password used only for automated testing.
let insecurePassword = let insecurePassword =
@ -1747,7 +1742,7 @@ proc getPersistentNetKeys*(rng: var BrHmacDrbgContext,
let let
privKey = rres.get() privKey = rres.get()
pubKey = privKey.getKey().expect("working public key from random") pubKey = privKey.getPublicKey().expect("working public key from random")
# Insecure password used only for automated testing. # Insecure password used only for automated testing.
let insecurePassword = let insecurePassword =
@ -1773,7 +1768,7 @@ proc getPersistentNetKeys*(rng: var BrHmacDrbgContext,
let let
privKey = res.get() privKey = res.get()
pubKey = privKey.getKey().expect("working public key from random") pubKey = privKey.getPublicKey().expect("working public key from random")
NetKeyPair(seckey: privKey, pubkey: pubKey) NetKeyPair(seckey: privKey, pubkey: pubKey)
func gossipId(data: openArray[byte], topic: string, valid: bool): seq[byte] = func gossipId(data: openArray[byte], topic: string, valid: bool): seq[byte] =

View File

@ -8,7 +8,7 @@ import
std/[sequtils], std/[sequtils],
stew/results, stew/results,
chronicles, chronicles,
libp2p/[multiaddress, multicodec], libp2p/[multiaddress, multicodec, peerstore],
libp2p/protocols/pubsub/pubsubpeer, libp2p/protocols/pubsub/pubsubpeer,
./rest_utils, ./rest_utils,
../eth1/eth1_monitor, ../eth1/eth1_monitor,
@ -71,13 +71,13 @@ type
peerId*: PeerID peerId*: PeerID
connected*: bool connected*: bool
proc toInfo(info: RestPeerInfoTuple): RestPeerInfo = proc toInfo(node: BeaconNode, peerId: PeerId): RestPeerInfo =
RestPeerInfo( RestPeerInfo(
peerId: info.peerId, peerId: $peerId,
addrs: info.addrs, addrs: node.network.switch.peerStore.addressBook.get(peerId).toSeq().mapIt($it),
protocols: info.protocols, protocols: node.network.switch.peerStore.protoBook.get(peerId).toSeq(),
protoVersion: info.protoVersion, protoVersion: node.network.switch.peerStore.protoVersionBook.get(peerId),
agentVersion: info.agentVersion agentVersion: node.network.switch.peerStore.agentBook.get(peerId)
) )
proc toNode(v: PubSubPeer, backoff: Moment): RestPubSubPeer = proc toNode(v: PubSubPeer, backoff: Moment): RestPubSubPeer =
@ -151,7 +151,7 @@ proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) =
for id, peer in node.network.peerPool: for id, peer in node.network.peerPool:
res.add( res.add(
RestSimplePeer( RestSimplePeer(
info: shortLog(peer.info).toInfo(), info: toInfo(node, id),
connectionState: $peer.connectionState, connectionState: $peer.connectionState,
score: peer.score score: peer.score
) )

View File

@ -1,8 +1,9 @@
import import
std/[sequtils],
stew/results, stew/results,
chronicles, chronicles,
eth/p2p/discoveryv5/enr, eth/p2p/discoveryv5/enr,
libp2p/[multiaddress, multicodec], libp2p/[multiaddress, multicodec, peerstore],
nimcrypto/utils as ncrutils, nimcrypto/utils as ncrutils,
../version, ../beacon_node, ../sync/sync_manager, ../version, ../beacon_node, ../sync/sync_manager,
../networking/[eth2_network, peer_pool], ../networking/[eth2_network, peer_pool],
@ -85,12 +86,13 @@ proc toString(direction: PeerType): string =
of PeerType.Outgoing: of PeerType.Outgoing:
"outbound" "outbound"
proc getLastSeenAddress(info: PeerInfo): string = proc getLastSeenAddress(node: BeaconNode, id: PeerId): string =
# TODO (cheatfate): We need to provide filter here, which will be able to # TODO (cheatfate): We need to provide filter here, which will be able to
# filter such multiaddresses like `/ip4/0.0.0.0` or local addresses or # filter such multiaddresses like `/ip4/0.0.0.0` or local addresses or
# addresses with peer ids. # addresses with peer ids.
if len(info.addrs) > 0: let addrs = node.network.switch.peerStore.addressBook.get(id).toSeq()
$info.addrs[len(info.addrs) - 1] if len(addrs) > 0:
$addrs[len(addrs) - 1]
else: else:
"" ""
proc getDiscoveryAddresses(node: BeaconNode): Option[seq[string]] = proc getDiscoveryAddresses(node: BeaconNode): Option[seq[string]] =
@ -187,17 +189,17 @@ proc installNodeApiHandlers*(router: var RestRouter, node: BeaconNode) =
dres.get() dres.get()
var res: seq[RpcNodePeer] var res: seq[RpcNodePeer]
for item in node.network.peers.values(): for peer in node.network.peers.values():
if (item.connectionState in connectionMask) and if (peer.connectionState in connectionMask) and
(item.direction in directionMask): (peer.direction in directionMask):
let peer = ( let peer = (
peer_id: $item.info.peerId, peer_id: $peer.peerId,
enr: if item.enr.isSome(): item.enr.get().toUri() else: "", enr: if peer.enr.isSome(): peer.enr.get().toUri() else: "",
last_seen_p2p_address: item.info.getLastSeenAddress(), last_seen_p2p_address: getLastSeenAddress(node, peer.peerId),
state: item.connectionState.toString(), state: peer.connectionState.toString(),
direction: item.direction.toString(), direction: peer.direction.toString(),
agent: item.info.agentVersion, # Fields `agent` and `proto` are not agent: node.network.switch.peerStore.agentBook.get(peer.peerId), # Fields `agent` and `proto` are not
proto: item.info.protoVersion # part of specification. proto: node.network.switch.peerStore.protoVersionBook.get(peer.peerId) # part of specification
) )
res.add(peer) res.add(peer)
return RestApiResponse.jsonResponseWMeta(res, (count: uint64(len(res)))) return RestApiResponse.jsonResponseWMeta(res, (count: uint64(len(res))))
@ -233,13 +235,13 @@ proc installNodeApiHandlers*(router: var RestRouter, node: BeaconNode) =
res res
return RestApiResponse.jsonResponse( return RestApiResponse.jsonResponse(
( (
peer_id: $peer.info.peerId, peer_id: $peer.peerId,
enr: if peer.enr.isSome(): peer.enr.get().toUri() else: "", enr: if peer.enr.isSome(): peer.enr.get().toUri() else: "",
last_seen_p2p_address: peer.info.getLastSeenAddress(), last_seen_p2p_address: getLastSeenAddress(node, peer.peerId),
state: peer.connectionState.toString(), state: peer.connectionState.toString(),
direction: peer.direction.toString(), direction: peer.direction.toString(),
agent: peer.info.agentVersion, # Fields `agent` and `proto` are not agent: node.network.switch.peerStore.agentBook.get(peer.peerId), # Fields `agent` and `proto` are not
proto: peer.info.protoVersion # part of specification proto: node.network.switch.peerStore.protoVersionBook.get(peer.peerId) # part of specification
) )
) )

View File

@ -74,7 +74,7 @@ proc installNimbusApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
for id, peer in node.network.peerPool: for id, peer in node.network.peerPool:
peers.add( peers.add(
%( %(
info: shortLog(peer.info), id: shortLog(peer.peerId),
connectionState: $peer.connectionState, connectionState: $peer.connectionState,
score: peer.score, score: peer.score,
) )

View File

@ -7,11 +7,11 @@
{.push raises: [Defect].} {.push raises: [Defect].}
import std/options, import std/[options, sequtils],
chronicles, chronicles,
json_rpc/servers/httpserver, json_rpc/servers/httpserver,
eth/p2p/discoveryv5/enr, eth/p2p/discoveryv5/enr,
libp2p/[multiaddress, multicodec], libp2p/[multiaddress, multicodec, peerstore],
nimcrypto/utils as ncrutils, nimcrypto/utils as ncrutils,
../beacon_node, ../version, ../beacon_node, ../version,
../networking/[eth2_network, peer_pool], ../networking/[eth2_network, peer_pool],
@ -109,12 +109,13 @@ proc toString(direction: PeerType): string =
of PeerType.Outgoing: of PeerType.Outgoing:
"outbound" "outbound"
proc getLastSeenAddress(info: PeerInfo): string = proc getLastSeenAddress(node: BeaconNode, id: PeerId): string =
# TODO (cheatfate): We need to provide filter here, which will be able to # TODO (cheatfate): We need to provide filter here, which will be able to
# filter such multiaddresses like `/ip4/0.0.0.0` or local addresses or # filter such multiaddresses like `/ip4/0.0.0.0` or local addresses or
# addresses with peer ids. # addresses with peer ids.
if len(info.addrs) > 0: let addrs = node.network.switch.peerStore.addressBook.get(id).toSeq()
$info.addrs[len(info.addrs) - 1] if len(addrs) > 0:
$addrs[len(addrs) - 1]
else: else:
"" ""
@ -187,18 +188,18 @@ proc installNodeApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
raise newException(CatchableError, "Incorrect direction parameter") raise newException(CatchableError, "Incorrect direction parameter")
let states = rstates.get() let states = rstates.get()
let dirs = rdirs.get() let dirs = rdirs.get()
for item in node.network.peers.values(): for peer in node.network.peers.values():
if (item.connectionState in states) and (item.direction in dirs): if (peer.connectionState in states) and (peer.direction in dirs):
let peer = ( let resPeer = (
peer_id: $item.info.peerId, peer_id: $peer.peerId,
enr: if item.enr.isSome(): item.enr.get().toUri() else: "", enr: if peer.enr.isSome(): peer.enr.get().toUri() else: "",
last_seen_p2p_address: item.info.getLastSeenAddress(), last_seen_p2p_address: getLastSeenAddress(node, peer.peerId),
state: item.connectionState.toString(), state: peer.connectionState.toString(),
direction: item.direction.toString(), direction: peer.direction.toString(),
agent: item.info.agentVersion, # Fields `agent` and `proto` are not agent: node.network.switch.peerStore.agentBook.get(peer.peerId), # Fields `agent` and `proto` are not
proto: item.info.protoVersion # part of specification. proto: node.network.switch.peerStore.protoVersionBook.get(peer.peerId) # part of specification
) )
res.add(peer) res.add(resPeer)
return res return res
rpcServer.rpc("get_v1_node_peer_count") do () -> RpcNodePeerCount: rpcServer.rpc("get_v1_node_peer_count") do () -> RpcNodePeerCount:
@ -229,13 +230,13 @@ proc installNodeApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
raise newException(CatchableError, "Peer not found") raise newException(CatchableError, "Peer not found")
return ( return (
peer_id: $peer.info.peerId, peer_id: $peer.peerId,
enr: if peer.enr.isSome(): peer.enr.get().toUri() else: "", enr: if peer.enr.isSome(): peer.enr.get().toUri() else: "",
last_seen_p2p_address: peer.info.getLastSeenAddress(), last_seen_p2p_address: getLastSeenAddress(node, peer.peerId),
state: peer.connectionState.toString(), state: peer.connectionState.toString(),
direction: peer.direction.toString(), direction: peer.direction.toString(),
agent: peer.info.agentVersion, # Fields `agent` and `proto` are not part agent: node.network.switch.peerStore.agentBook.get(peer.peerId), # Fields `agent` and `proto` are not
proto: peer.info.protoVersion # of specification proto: node.network.switch.peerStore.protoVersionBook.get(peer.peerId) # part of specification
) )
rpcServer.rpc("get_v1_node_version") do () -> JsonNode: rpcServer.rpc("get_v1_node_version") do () -> JsonNode:

View File

@ -693,7 +693,7 @@ proc createNetKeystore*(kdfKind: KdfKind,
let let
secret = privKey.getBytes().get() secret = privKey.getBytes().get()
cryptoField = createCryptoField(kdfKind, rng, secret, password, salt, iv) cryptoField = createCryptoField(kdfKind, rng, secret, password, salt, iv)
pubKey = privKey.getKey().get() pubKey = privKey.getPublicKey().get()
uuid = uuidGenerate().expect("Random bytes should be available") uuid = uuidGenerate().expect("Random bytes should be available")
NetKeystore( NetKeystore(

View File

@ -153,7 +153,7 @@ p2pProtocol BeaconSync(version = 1,
onPeerConnected do (peer: Peer, incoming: bool) {.async.}: onPeerConnected do (peer: Peer, incoming: bool) {.async.}:
debug "Peer connected", debug "Peer connected",
peer, peerInfo = shortLog(peer.info), incoming peer, peerId = shortLog(peer.peerId), incoming
# Per the eth2 protocol, whoever dials must send a status message when # Per the eth2 protocol, whoever dials must send a status message when
# connected for the first time, but because of how libp2p works, there may # connected for the first time, but because of how libp2p works, there may
# be a race between incoming and outgoing connections and disconnects that # be a race between incoming and outgoing connections and disconnects that

2
vendor/nim-libp2p vendored

@ -1 +1 @@
Subproject commit cb94baf9c4081a78a889957c93baa16aff3511de Subproject commit 3669b90ceb6d1687e91cf3f87138cb90803f9446