2023-02-10 09:43:16 +00:00
|
|
|
|
{.used.}
|
|
|
|
|
|
|
|
|
|
import
|
|
|
|
|
std/[options, sequtils],
|
|
|
|
|
stew/shims/net as stewNet,
|
|
|
|
|
testutils/unittests,
|
|
|
|
|
chronicles,
|
|
|
|
|
chronos,
|
|
|
|
|
eth/keys,
|
|
|
|
|
libp2p/crypto/crypto,
|
|
|
|
|
json_rpc/[rpcserver, rpcclient]
|
|
|
|
|
import
|
|
|
|
|
../../../waku/v2/node/peer_manager,
|
|
|
|
|
../../../waku/v2/node/waku_node,
|
|
|
|
|
../../../waku/v2/node/jsonrpc/admin/handlers as admin_api,
|
|
|
|
|
../../../waku/v2/node/jsonrpc/admin/client as admin_api_client,
|
|
|
|
|
../../../waku/v2/protocol/waku_relay,
|
|
|
|
|
../../../waku/v2/protocol/waku_archive,
|
|
|
|
|
../../../waku/v2/protocol/waku_archive/driver/queue_driver,
|
|
|
|
|
../../../waku/v2/protocol/waku_store,
|
|
|
|
|
../../../waku/v2/protocol/waku_filter,
|
|
|
|
|
../../../waku/v2/utils/peers,
|
2023-02-13 10:43:49 +00:00
|
|
|
|
../testlib/waku2
|
2023-02-10 09:43:16 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
procSuite "Waku v2 JSON-RPC API - Admin":
|
|
|
|
|
let
|
|
|
|
|
bindIp = ValidIpAddress.init("0.0.0.0")
|
|
|
|
|
|
|
|
|
|
asyncTest "connect to ad-hoc peers":
|
|
|
|
|
# Create a couple of nodes
|
|
|
|
|
let
|
2023-02-13 10:43:49 +00:00
|
|
|
|
node1 = WakuNode.new(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(60600))
|
|
|
|
|
node2 = WakuNode.new(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(60602))
|
2023-02-10 09:43:16 +00:00
|
|
|
|
peerInfo2 = node2.switch.peerInfo
|
2023-02-13 10:43:49 +00:00
|
|
|
|
node3 = WakuNode.new(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(60604))
|
2023-02-10 09:43:16 +00:00
|
|
|
|
peerInfo3 = node3.switch.peerInfo
|
|
|
|
|
|
|
|
|
|
await allFutures([node1.start(), node2.start(), node3.start()])
|
|
|
|
|
|
|
|
|
|
await node1.mountRelay()
|
|
|
|
|
await node2.mountRelay()
|
|
|
|
|
await node3.mountRelay()
|
|
|
|
|
|
|
|
|
|
# RPC server setup
|
|
|
|
|
let
|
|
|
|
|
rpcPort = Port(8551)
|
|
|
|
|
ta = initTAddress(bindIp, rpcPort)
|
|
|
|
|
server = newRpcHttpServer([ta])
|
|
|
|
|
|
|
|
|
|
installAdminApiHandlers(node1, server)
|
|
|
|
|
server.start()
|
|
|
|
|
|
|
|
|
|
let client = newRpcHttpClient()
|
|
|
|
|
await client.connect("127.0.0.1", rpcPort, false)
|
|
|
|
|
|
|
|
|
|
# Connect to nodes 2 and 3 using the Admin API
|
|
|
|
|
let postRes = await client.post_waku_v2_admin_v1_peers(@[constructMultiaddrStr(peerInfo2),
|
|
|
|
|
constructMultiaddrStr(peerInfo3)])
|
|
|
|
|
|
|
|
|
|
check:
|
|
|
|
|
postRes
|
|
|
|
|
|
|
|
|
|
# Verify that newly connected peers are being managed
|
|
|
|
|
let getRes = await client.get_waku_v2_admin_v1_peers()
|
|
|
|
|
|
|
|
|
|
check:
|
|
|
|
|
getRes.len == 2
|
|
|
|
|
# Check peer 2
|
|
|
|
|
getRes.anyIt(it.protocol == WakuRelayCodec and
|
|
|
|
|
it.multiaddr == constructMultiaddrStr(peerInfo2))
|
|
|
|
|
# Check peer 3
|
|
|
|
|
getRes.anyIt(it.protocol == WakuRelayCodec and
|
|
|
|
|
it.multiaddr == constructMultiaddrStr(peerInfo3))
|
|
|
|
|
|
|
|
|
|
# Verify that raises an exception if we can't connect to the peer
|
|
|
|
|
let nonExistentPeer = "/ip4/0.0.0.0/tcp/10000/p2p/16Uiu2HAm6HZZr7aToTvEBPpiys4UxajCTU97zj5v7RNR2gbniy1D"
|
|
|
|
|
expect(ValueError):
|
|
|
|
|
discard await client.post_waku_v2_admin_v1_peers(@[nonExistentPeer])
|
|
|
|
|
|
|
|
|
|
let malformedPeer = "/malformed/peer"
|
|
|
|
|
expect(ValueError):
|
|
|
|
|
discard await client.post_waku_v2_admin_v1_peers(@[malformedPeer])
|
|
|
|
|
|
|
|
|
|
await server.stop()
|
|
|
|
|
await server.closeWait()
|
|
|
|
|
|
|
|
|
|
await allFutures([node1.stop(), node2.stop(), node3.stop()])
|
|
|
|
|
|
|
|
|
|
asyncTest "get managed peer information":
|
|
|
|
|
# Create 3 nodes and start them with relay
|
2023-02-13 10:43:49 +00:00
|
|
|
|
let nodes = toSeq(0..<3).mapIt(WakuNode.new(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(60220+it*2)))
|
2023-02-10 09:43:16 +00:00
|
|
|
|
await allFutures(nodes.mapIt(it.start()))
|
|
|
|
|
await allFutures(nodes.mapIt(it.mountRelay()))
|
|
|
|
|
|
|
|
|
|
# Dial nodes 2 and 3 from node1
|
|
|
|
|
await nodes[0].connectToNodes(@[constructMultiaddrStr(nodes[1].peerInfo)])
|
|
|
|
|
await nodes[0].connectToNodes(@[constructMultiaddrStr(nodes[2].peerInfo)])
|
|
|
|
|
|
|
|
|
|
# RPC server setup
|
|
|
|
|
let
|
|
|
|
|
rpcPort = Port(8552)
|
|
|
|
|
ta = initTAddress(bindIp, rpcPort)
|
|
|
|
|
server = newRpcHttpServer([ta])
|
|
|
|
|
|
|
|
|
|
installAdminApiHandlers(nodes[0], server)
|
|
|
|
|
server.start()
|
|
|
|
|
|
|
|
|
|
let client = newRpcHttpClient()
|
|
|
|
|
await client.connect("127.0.0.1", rpcPort, false)
|
|
|
|
|
|
|
|
|
|
let response = await client.get_waku_v2_admin_v1_peers()
|
|
|
|
|
|
|
|
|
|
check:
|
|
|
|
|
response.len == 2
|
|
|
|
|
# Check peer 2
|
|
|
|
|
response.anyIt(it.protocol == WakuRelayCodec and
|
|
|
|
|
it.multiaddr == constructMultiaddrStr(nodes[1].peerInfo))
|
|
|
|
|
# Check peer 3
|
|
|
|
|
response.anyIt(it.protocol == WakuRelayCodec and
|
|
|
|
|
it.multiaddr == constructMultiaddrStr(nodes[2].peerInfo))
|
|
|
|
|
|
|
|
|
|
# Artificially remove the address from the book
|
|
|
|
|
nodes[0].peerManager.peerStore[AddressBook][nodes[1].peerInfo.peerId] = @[]
|
|
|
|
|
nodes[0].peerManager.peerStore[AddressBook][nodes[2].peerInfo.peerId] = @[]
|
|
|
|
|
|
|
|
|
|
# Verify that the returned addresses are empty
|
|
|
|
|
let responseEmptyAdd = await client.get_waku_v2_admin_v1_peers()
|
|
|
|
|
check:
|
|
|
|
|
responseEmptyAdd[0].multiaddr == ""
|
|
|
|
|
responseEmptyAdd[1].multiaddr == ""
|
|
|
|
|
|
|
|
|
|
await server.stop()
|
|
|
|
|
await server.closeWait()
|
|
|
|
|
|
|
|
|
|
await allFutures(nodes.mapIt(it.stop()))
|
|
|
|
|
|
|
|
|
|
asyncTest "get unmanaged peer information":
|
2023-02-13 10:43:49 +00:00
|
|
|
|
let node = WakuNode.new(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(60523))
|
2023-02-10 09:43:16 +00:00
|
|
|
|
|
|
|
|
|
await node.start()
|
|
|
|
|
|
|
|
|
|
# RPC server setup
|
|
|
|
|
let
|
|
|
|
|
rpcPort = Port(8553)
|
|
|
|
|
ta = initTAddress(bindIp, rpcPort)
|
|
|
|
|
server = newRpcHttpServer([ta])
|
|
|
|
|
|
|
|
|
|
installAdminApiHandlers(node, server)
|
|
|
|
|
server.start()
|
|
|
|
|
|
|
|
|
|
let client = newRpcHttpClient()
|
|
|
|
|
await client.connect("127.0.0.1", rpcPort, false)
|
|
|
|
|
|
|
|
|
|
await node.mountFilter()
|
|
|
|
|
await node.mountFilterClient()
|
|
|
|
|
let driver: ArchiveDriver = QueueDriver.new()
|
|
|
|
|
node.mountArchive(some(driver), none(MessageValidator), none(RetentionPolicy))
|
|
|
|
|
await node.mountStore()
|
|
|
|
|
node.mountStoreClient()
|
|
|
|
|
|
|
|
|
|
# Create and set some peers
|
|
|
|
|
let
|
|
|
|
|
locationAddr = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet()
|
|
|
|
|
|
2023-02-13 10:43:49 +00:00
|
|
|
|
filterPeer = PeerInfo.new(generateEcdsaKey(), @[locationAddr])
|
|
|
|
|
storePeer = PeerInfo.new(generateEcdsaKey(), @[locationAddr])
|
2023-02-10 09:43:16 +00:00
|
|
|
|
|
2023-02-27 17:24:31 +00:00
|
|
|
|
node.peerManager.addServicePeer(filterPeer.toRemotePeerInfo(), WakuFilterCodec)
|
|
|
|
|
node.peerManager.addServicePeer(storePeer.toRemotePeerInfo(), WakuStoreCodec)
|
|
|
|
|
|
|
|
|
|
# Mock that we connected in the past so Identify populated this
|
|
|
|
|
node.peerManager.peerStore[ProtoBook][filterPeer.peerId] = @[WakuFilterCodec]
|
|
|
|
|
node.peerManager.peerStore[ProtoBook][storePeer.peerId] = @[WakuStoreCodec]
|
2023-02-10 09:43:16 +00:00
|
|
|
|
|
|
|
|
|
let response = await client.get_waku_v2_admin_v1_peers()
|
|
|
|
|
|
|
|
|
|
## Then
|
|
|
|
|
check:
|
|
|
|
|
response.len == 2
|
|
|
|
|
# Check filter peer
|
|
|
|
|
(response.filterIt(it.protocol == WakuFilterCodec)[0]).multiaddr == constructMultiaddrStr(filterPeer)
|
|
|
|
|
# Check store peer
|
|
|
|
|
(response.filterIt(it.protocol == WakuStoreCodec)[0]).multiaddr == constructMultiaddrStr(storePeer)
|
|
|
|
|
|
|
|
|
|
## Cleanup
|
|
|
|
|
await server.stop()
|
|
|
|
|
await server.closeWait()
|
|
|
|
|
|
|
|
|
|
await node.stop()
|