Use json rpc client to run tests on portal testnet (#899)

Tests have also been added & adjusted. Original ones were actually
failing but unnoticed due to mistake in port passed in curl command.
This commit is contained in:
Kim De Mey 2021-12-03 09:51:25 +01:00 committed by GitHub
parent ed04aad203
commit 63b0945b52
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 159 additions and 37 deletions

View File

@ -156,6 +156,10 @@ fluffy-test: | build deps
fluffy-tools: | build deps
$(ENV_SCRIPT) nim portalcli $(NIM_PARAMS) nimbus.nims
# Build fluffy test_portal_testnet
fluffy-test-portal-testnet: | build deps
$(ENV_SCRIPT) nim test_portal_testnet $(NIM_PARAMS) nimbus.nims
# usual cleaning
clean: | clean-common
rm -rf build/{nimbus,fluffy,$(TOOLS_CSV),all_tests,test_rpc,all_fluffy_tests,portalcli}

View File

@ -0,0 +1,16 @@
# Nimbus
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
std/os,
json_rpc/rpcclient,
./rpc_types, rpc_discovery_api # for the PongResponse
export rpcclient, rpc_types
createRpcSigs(RpcClient, currentSourcePath.parentDir / "rpc_calls" / "rpc_discovery_calls.nim")
createRpcSigs(RpcClient, currentSourcePath.parentDir / "rpc_calls" / "rpc_portal_calls.nim")

View File

@ -1,3 +1,15 @@
# Discovery v5 json-rpc calls
proc discv5_nodeInfo(): NodeInfo
proc discv5_routingTableInfo(): RoutingTableInfo
proc discv5_nodeInfo(): NodeInfo
proc discv5_updateNodeInfo(kvPairs: seq[(string, string)]): RoutingTableInfo
proc discv5_setEnr(enr: Record): bool
proc discv5_getEnr(nodeId: NodeId): Record
proc discv5_deleteEnr(nodeId: NodeId): bool
proc discv5_lookupEnr(nodeId: NodeId): Record
proc discv5_ping(nodeId: Record): PongResponse
proc discv5_findNodes(nodeId: Record, distances: seq[uint16]): seq[Record]
proc discv5_talk(nodeId: Record, protocol, payload: string): string
proc discv5_recursiveFindNodes(): seq[Record]

View File

@ -1,7 +1,9 @@
## Portal State Network json-rpc calls
proc portal_state_nodeInfo(): NodeInfo
proc portal_state_routingTableInfo(): RoutingTableInfo
proc portal_state_recursiveFindNodes(): seq[Record]
## Portal History Network json-rpc calls
proc portal_history_nodeInfo(): NodeInfo
proc portal_history_routingTableInfo(): RoutingTableInfo
proc portal_history_recursiveFindNodes(): seq[Record]

View File

@ -16,7 +16,7 @@ import
export rpc_types # tasty sandwich
type
PongResponse = object
PongResponse* = object
enrSeq: uint64
recipientIP: string
recipientPort: uint16
@ -114,8 +114,8 @@ proc installDiscoveryApiHandlers*(rpcServer: RpcServer|RpcProxy,
return talkresp.get().toHex()
rpcServer.rpc("discv5_recursiveFindNodes") do() -> seq[Record]:
# TODO: Not according to the specification currently. Should do a lookup
# here instead of query, and the node_id is a parameter to be passed.
# But in that case it would be very similar to discv5_lookupEnr.
let discovered = await d.queryRandom()
# TODO: Not according to the specification currently as the node_id is a
# parameter to be passed, but in that case it would be very similar to
# discv5_lookupEnr.
let discovered = await d.lookup(NodeId.random(d.rng[]))
return discovered.map(proc(n: Node): Record = n.record)

View File

@ -12,16 +12,16 @@ import
stew/results,
eth/p2p/discoveryv5/[routing_table, enr, node]
export jsonmarshal, enr, routing_table
export jsonmarshal, routing_table, enr, node
type
NodeInfo* = object
nodeId: NodeId
nodeENR: Record
nodeId*: NodeId
nodeENR*: Record
RoutingTableInfo* = object
localKey: NodeId
buckets: seq[seq[NodeId]]
localKey*: NodeId
buckets*: seq[seq[NodeId]]
proc getNodeInfo*(r: RoutingTable): NodeInfo =
NodeInfo(nodeId: r.localNode.id, nodeENR: r.localNode.record)

View File

@ -183,7 +183,9 @@ fi
# Build the binaries
BINARIES="fluffy"
$MAKE -j ${NPROC} LOG_LEVEL=TRACE ${BINARIES} NIMFLAGS="-d:chronicles_colors=off -d:chronicles_sinks=textlines" #V=2
TEST_BINARIES="fluffy-test-portal-testnet"
$MAKE -j ${NPROC} LOG_LEVEL=TRACE ${BINARIES} NIMFLAGS="-d:chronicles_colors=off -d:chronicles_sinks=textlines"
$MAKE -j ${NPROC} LOG_LEVEL=INFO ${TEST_BINARIES} NIMFLAGS="-d:chronicles_sinks=textlines"
# Kill child processes on Ctrl-C/SIGTERM/exit, passing the PID of this shell
# instance as the parent and the target process name as a pattern to the
@ -304,35 +306,16 @@ if [[ "$BG_JOBS" != "$NUM_JOBS" ]]; then
exit 1
fi
# TODO: Move this to a separate script or create nim process that is rpc client
# once things get more complicated
check_nodes() {
echo "Checking routing table of all nodes."
for NUM_NODE in $(seq 0 $(( NUM_NODES - 1 ))); do
if [[ ${NUM_NODE} == ${BOOTSTRAP_NODE} ]]; then
RPC_PORT="$(( BASE_RPC_PORT + NUM_NODE ))"
ROUTING_TABLE_NODES=$(curl -s -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":"id","method":"discv5_routingTableInfo","params":[]}' http://localhost:${RPC_PORT} | jq '.result.buckets' | jq 'flatten' | jq '. | length')
if [[ $ROUTING_TABLE_NODES != $(( NUM_NODES - 1 )) ]]; then
echo "Check for node ${NUM_NODE} failed."
return 1
fi
else
curl -s -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":"id","method":"discv5_recursiveFindNodes","params":[]}' http://localhost:${RPC_PORT} &>/dev/null
ROUTING_TABLE_NODES=$(curl -s -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":"id","method":"discv5_routingTableInfo","params":[]}' http://localhost:${RPC_PORT} | jq '.result.buckets' | jq 'flatten' | jq '. | length')
if [[ $ROUTING_TABLE_NODES != $(( NUM_NODES - 1 )) ]]; then
echo "Check for node ${NUM_NODE} failed."
return 1
fi
fi
done
}
# launch htop and run until `TIMEOUT_DURATION` or check the nodes and quit.
if [[ "$USE_HTOP" == "1" ]]; then
htop -p "$PIDS"
cleanup
else
check_nodes
# Need to let to settle the network a bit, as currently at start discv5 and
# the Portal networks all send messages at once to the same nodes, causing
# messages to drop when handshakes are going on.
sleep 5
./build/test_portal_testnet --node-count:${NUM_NODES}
FAILED=$?
if [[ "$FAILED" != "0" ]]; then
dump_logs

View File

@ -0,0 +1,102 @@
# Nimbus
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
std/sequtils,
unittest2, testutils, confutils, chronos,
eth/p2p/discoveryv5/random2, eth/keys,
../rpc/portal_rpc_client
type
PortalTestnetConf* = object
nodeCount* {.
defaultValue: 17
desc: "Number of nodes to test"
name: "node-count" .}: int
rpcAddress* {.
desc: "Listening address of the JSON-RPC service for all nodes"
defaultValue: "127.0.0.1"
name: "rpc-address" }: string
baseRpcPort* {.
defaultValue: 7000
desc: "Port of the JSON-RPC service of the bootstrap (first) node"
name: "base-rpc-port" .}: uint16
proc connectToRpcServers(config: PortalTestnetConf):
Future[seq[RpcClient]] {.async.} =
var clients: seq[RpcClient]
for i in 0..<config.nodeCount:
let client = newRpcHttpClient()
await client.connect(
config.rpcAddress, Port(config.baseRpcPort + uint16(i)), false)
clients.add(client)
return clients
# We are kind of abusing the unittest2 here to run json rpc tests against other
# processes. Needs to be compiled with `-d:unittest2DisableParamFiltering` or
# the confutils cli will not work.
procSuite "Portal testnet tests":
let config = PortalTestnetConf.load()
let rng = newRng()
asyncTest "Discv5 - RoutingTableInfo at start":
let clients = await connectToRpcServers(config)
for i, client in clients:
let routingTableInfo = await client.discv5_routingTableInfo()
var start: seq[NodeId]
let nodes = foldl(routingTableInfo.buckets, a & b, start)
if i == 0: # bootstrap node has all nodes (however not all verified)
check nodes.len == config.nodeCount - 1
else: # Other nodes will have bootstrap node at this point, and maybe more
check nodes.len > 0
asyncTest "Discv5 - Random node lookup from each node":
let clients = await connectToRpcServers(config)
for client in clients:
# We need to run a recursive lookup for each node to kick-off the network
discard await client.discv5_recursiveFindNodes()
for client in clients:
# grab a random json-rpc client and take its `NodeInfo`
let randomClient = sample(rng[], clients)
let nodeInfo = await randomClient.discv5_nodeInfo()
var enr: Record
try:
enr = await client.discv5_lookupEnr(nodeInfo.nodeId)
except ValueError as e:
echo e.msg
check enr == nodeInfo.nodeENR
asyncTest "Portal State - RoutingTableInfo at start":
let clients = await connectToRpcServers(config)
for i, client in clients:
let routingTableInfo = await client.portal_state_routingTableInfo()
var start: seq[NodeId]
let nodes = foldl(routingTableInfo.buckets, a & b, start)
if i == 0: # bootstrap node has all nodes (however not all verified)
check nodes.len == config.nodeCount - 1
else: # Other nodes will have bootstrap node at this point, and maybe more
check nodes.len > 0
asyncTest "Portal History - RoutingTableInfo at start":
let clients = await connectToRpcServers(config)
for i, client in clients:
let routingTableInfo = await client.portal_history_routingTableInfo()
var start: seq[NodeId]
let nodes = foldl(routingTableInfo.buckets, a & b, start)
if i == 0: # bootstrap node has all nodes (however not all verified)
check nodes.len == config.nodeCount - 1
else: # Other nodes will have bootstrap node at this point, and maybe more
check nodes.len > 0

View File

@ -57,6 +57,9 @@ task fluffy, "Build fluffy":
task portalcli, "Build portalcli":
buildBinary "portalcli", "fluffy/tools/", "-d:chronicles_log_level=TRACE -d:chronosStrictException"
task test_portal_testnet, "Build test_portal_testnet":
buildBinary "test_portal_testnet", "fluffy/scripts/", "-d:chronicles_log_level=DEBUG -d:chronosStrictException -d:unittest2DisableParamFiltering"
task testfluffy, "Run fluffy tests":
# Need the nimbus_db_backend in state network tests as we need a Hexary to
# start from, even though it only uses the MemoryDb.

2
vendor/nim-eth vendored

@ -1 +1 @@
Subproject commit 84f755d792538e160d97467490878c4166aa20a0
Subproject commit 6e21b32f0d0569b6bfc527eecd6f9ff65452f271