mirror of https://github.com/waku-org/nwaku.git
fix: prevent IP 0.0.0.0 from being published and update peers with empty ENR data (#1982)
This commit is contained in:
parent
645b034367
commit
47ae19c104
|
@ -5,7 +5,8 @@ import
|
||||||
./waku_core/test_namespaced_topics,
|
./waku_core/test_namespaced_topics,
|
||||||
./waku_core/test_time,
|
./waku_core/test_time,
|
||||||
./waku_core/test_message_digest,
|
./waku_core/test_message_digest,
|
||||||
./waku_core/test_peers
|
./waku_core/test_peers,
|
||||||
|
./waku_core/test_published_address
|
||||||
|
|
||||||
|
|
||||||
# Waku archive test suite
|
# Waku archive test suite
|
||||||
|
|
|
@ -0,0 +1,27 @@
|
||||||
|
{.used.}
|
||||||
|
|
||||||
|
import
|
||||||
|
stew/shims/net as stewNet,
|
||||||
|
std/strutils,
|
||||||
|
testutils/unittests
|
||||||
|
import
|
||||||
|
../testlib/wakucore,
|
||||||
|
../testlib/wakunode
|
||||||
|
|
||||||
|
suite "Waku Core - Published Address":
|
||||||
|
|
||||||
|
test "Test IP 0.0.0.0":
|
||||||
|
let
|
||||||
|
node = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init(
|
||||||
|
"0.0.0.0"),Port(0))
|
||||||
|
|
||||||
|
check:
|
||||||
|
($node.announcedAddresses).contains("127.0.0.1")
|
||||||
|
|
||||||
|
test "Test custom IP":
|
||||||
|
let
|
||||||
|
node = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init(
|
||||||
|
"8.8.8.8"),Port(0))
|
||||||
|
|
||||||
|
check:
|
||||||
|
($node.announcedAddresses).contains("8.8.8.8")
|
|
@ -31,10 +31,10 @@ procSuite "Waku v2 JSON-RPC API - Admin":
|
||||||
asyncTest "connect to ad-hoc peers":
|
asyncTest "connect to ad-hoc peers":
|
||||||
# Create a couple of nodes
|
# Create a couple of nodes
|
||||||
let
|
let
|
||||||
node1 = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(60600))
|
node1 = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(60600))
|
||||||
node2 = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(60602))
|
node2 = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(60602))
|
||||||
peerInfo2 = node2.switch.peerInfo
|
peerInfo2 = node2.switch.peerInfo
|
||||||
node3 = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(60604))
|
node3 = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(60604))
|
||||||
peerInfo3 = node3.switch.peerInfo
|
peerInfo3 = node3.switch.peerInfo
|
||||||
|
|
||||||
await allFutures([node1.start(), node2.start(), node3.start()])
|
await allFutures([node1.start(), node2.start(), node3.start()])
|
||||||
|
@ -90,7 +90,7 @@ procSuite "Waku v2 JSON-RPC API - Admin":
|
||||||
|
|
||||||
asyncTest "get managed peer information":
|
asyncTest "get managed peer information":
|
||||||
# Create 3 nodes and start them with relay
|
# Create 3 nodes and start them with relay
|
||||||
let nodes = toSeq(0..<3).mapIt(newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(60220+it*2)))
|
let nodes = toSeq(0..<3).mapIt(newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(60220+it*2)))
|
||||||
await allFutures(nodes.mapIt(it.start()))
|
await allFutures(nodes.mapIt(it.start()))
|
||||||
await allFutures(nodes.mapIt(it.mountRelay()))
|
await allFutures(nodes.mapIt(it.mountRelay()))
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ else:
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[options, sequtils],
|
std/[options, sequtils, strutils],
|
||||||
stew/results,
|
stew/results,
|
||||||
stew/shims/net,
|
stew/shims/net,
|
||||||
libp2p/multiaddress
|
libp2p/multiaddress
|
||||||
|
@ -53,6 +53,11 @@ template wsFlag(wssEnabled: bool): MultiAddress =
|
||||||
else: MultiAddress.init("/ws").tryGet()
|
else: MultiAddress.init("/ws").tryGet()
|
||||||
|
|
||||||
|
|
||||||
|
proc formatListenAddress(inputMultiAdd: MultiAddress): MultiAddress =
|
||||||
|
let inputStr = $inputMultiAdd
|
||||||
|
# If MultiAddress contains "0.0.0.0", replace it for "127.0.0.1"
|
||||||
|
return MultiAddress.init(inputStr.replace("0.0.0.0", "127.0.0.1")).get()
|
||||||
|
|
||||||
proc init*(T: type NetConfig,
|
proc init*(T: type NetConfig,
|
||||||
bindIp: ValidIpAddress,
|
bindIp: ValidIpAddress,
|
||||||
bindPort: Port,
|
bindPort: Port,
|
||||||
|
@ -111,7 +116,7 @@ proc init*(T: type NetConfig,
|
||||||
if hostExtAddress.isSome():
|
if hostExtAddress.isSome():
|
||||||
announcedAddresses.add(hostExtAddress.get())
|
announcedAddresses.add(hostExtAddress.get())
|
||||||
else:
|
else:
|
||||||
announcedAddresses.add(hostAddress) # We always have at least a bind address for the host
|
announcedAddresses.add(formatListenAddress(hostAddress)) # We always have at least a bind address for the host
|
||||||
|
|
||||||
# External multiaddrs that the operator may have configured
|
# External multiaddrs that the operator may have configured
|
||||||
if extMultiAddrs.len > 0:
|
if extMultiAddrs.len > 0:
|
||||||
|
|
|
@ -121,8 +121,9 @@ proc addPeer*(pm: PeerManager, remotePeerInfo: RemotePeerInfo, origin = UnknownO
|
||||||
discard remotePeerInfo.peerId.extractPublicKey(publicKey)
|
discard remotePeerInfo.peerId.extractPublicKey(publicKey)
|
||||||
|
|
||||||
if pm.peerStore[AddressBook][remotePeerInfo.peerId] == remotePeerInfo.addrs and
|
if pm.peerStore[AddressBook][remotePeerInfo.peerId] == remotePeerInfo.addrs and
|
||||||
pm.peerStore[KeyBook][remotePeerInfo.peerId] == publicKey:
|
pm.peerStore[KeyBook][remotePeerInfo.peerId] == publicKey and
|
||||||
# Peer already managed
|
pm.peerStore[ENRBook][remotePeerInfo.peerId].raw.len > 0:
|
||||||
|
# Peer already managed and ENR info is already saved
|
||||||
return
|
return
|
||||||
|
|
||||||
trace "Adding peer to manager", peerId = remotePeerInfo.peerId, addresses = remotePeerInfo.addrs
|
trace "Adding peer to manager", peerId = remotePeerInfo.peerId, addresses = remotePeerInfo.addrs
|
||||||
|
|
Loading…
Reference in New Issue