2021-03-29 19:17:48 +00:00
|
|
|
{.used.}
|
|
|
|
|
2021-03-24 10:48:53 +00:00
|
|
|
import
|
2021-12-22 12:37:31 +00:00
|
|
|
testutils/unittests,
|
2021-03-24 10:48:53 +00:00
|
|
|
chronos, stew/shims/net, eth/keys, eth/p2p/discoveryv5/enr,
|
2021-06-21 08:35:24 +00:00
|
|
|
../beacon_chain/spec/datatypes/base,
|
2021-08-10 20:46:35 +00:00
|
|
|
../beacon_chain/spec/network,
|
2021-03-24 10:48:53 +00:00
|
|
|
../beacon_chain/networking/[eth2_network, eth2_discovery],
|
|
|
|
./testutil
|
|
|
|
|
2021-08-18 13:58:43 +00:00
|
|
|
proc new(T: type Eth2DiscoveryProtocol,
|
2021-03-24 10:48:53 +00:00
|
|
|
pk: keys.PrivateKey,
|
|
|
|
enrIp: Option[ValidIpAddress], enrTcpPort, enrUdpPort: Option[Port],
|
|
|
|
bindPort: Port, bindIp: ValidIpAddress,
|
|
|
|
enrFields: openArray[(string, seq[byte])] = [],
|
|
|
|
rng: ref BrHmacDrbgContext):
|
2022-04-08 16:22:49 +00:00
|
|
|
T {.raises: [CatchableError, Defect].} =
|
2021-03-24 10:48:53 +00:00
|
|
|
|
|
|
|
newProtocol(pk, enrIp, enrTcpPort, enrUdpPort, enrFields,
|
|
|
|
bindPort = bindPort, bindIp = bindIp, rng = rng)
|
|
|
|
|
|
|
|
proc generateNode(rng: ref BrHmacDrbgContext, port: Port,
|
|
|
|
enrFields: openArray[(string, seq[byte])] = []): Eth2DiscoveryProtocol =
|
|
|
|
let ip = ValidIpAddress.init("127.0.0.1")
|
|
|
|
Eth2DiscoveryProtocol.new(keys.PrivateKey.random(rng[]),
|
|
|
|
some(ip), some(port), some(port), port, ip, enrFields, rng = rng)
|
|
|
|
|
2021-09-21 22:25:49 +00:00
|
|
|
# TODO: Add tests with a syncnets preference
|
2021-10-21 13:09:19 +00:00
|
|
|
const noSyncnetsPreference = SyncnetBits()
|
2021-09-21 22:25:49 +00:00
|
|
|
|
2021-12-22 12:37:31 +00:00
|
|
|
procSuite "Eth2 specific discovery tests":
|
2021-03-24 10:48:53 +00:00
|
|
|
let
|
|
|
|
rng = keys.newRng()
|
|
|
|
enrForkId = ENRForkID(
|
|
|
|
fork_digest: ForkDigest([byte 0, 1, 2, 3]),
|
|
|
|
next_fork_version: Version([byte 0, 0, 0, 0]),
|
|
|
|
next_fork_epoch: Epoch(0))
|
|
|
|
|
2021-04-28 16:41:02 +00:00
|
|
|
asyncTest "Subnet query":
|
2021-10-21 13:09:19 +00:00
|
|
|
var attnets: AttnetBits
|
2021-03-24 10:48:53 +00:00
|
|
|
attnets.setBit(34)
|
|
|
|
|
|
|
|
let
|
|
|
|
node1 = generateNode(rng, Port(5000))
|
2021-08-10 20:46:35 +00:00
|
|
|
node2 = generateNode(rng, Port(5001), {
|
|
|
|
enrForkIdField: SSZ.encode(enrForkId),
|
|
|
|
enrAttestationSubnetsField: SSZ.encode(attnets)
|
|
|
|
})
|
2021-03-24 10:48:53 +00:00
|
|
|
|
|
|
|
node1.open()
|
|
|
|
node2.open()
|
|
|
|
|
|
|
|
# ping in one direction to add node2 to routing table of node1
|
|
|
|
check (await node2.ping(node1.localNode)).isOk()
|
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
var attnetsSelected: AttnetBits
|
2021-03-24 10:48:53 +00:00
|
|
|
attnetsSelected.setBit(42)
|
|
|
|
attnetsSelected.setBit(34)
|
|
|
|
|
2021-09-21 22:25:49 +00:00
|
|
|
let discovered = await node1.queryRandom(
|
2022-03-11 10:51:53 +00:00
|
|
|
enrForkId, attnetsSelected, noSyncnetsPreference, 1)
|
2021-03-24 10:48:53 +00:00
|
|
|
check discovered.len == 1
|
|
|
|
|
|
|
|
await node1.closeWait()
|
|
|
|
await node2.closeWait()
|
|
|
|
|
2021-04-28 16:41:02 +00:00
|
|
|
asyncTest "Invalid attnets field":
|
2021-03-24 10:48:53 +00:00
|
|
|
var invalidAttnets: BitArray[ATTESTATION_SUBNET_COUNT div 2]
|
|
|
|
invalidAttnets.setBit(15)
|
|
|
|
# TODO: This doesn't fail actually.
|
|
|
|
# var invalidAttnets2: BitArray[ATTESTATION_SUBNET_COUNT * 2]
|
|
|
|
# invalidAttnets2.setBit(15)
|
2021-10-21 13:09:19 +00:00
|
|
|
var attnets: AttnetBits
|
2021-03-24 10:48:53 +00:00
|
|
|
attnets.setBit(15)
|
|
|
|
|
|
|
|
let
|
|
|
|
node1 = generateNode(rng, Port(5000))
|
2021-08-10 20:46:35 +00:00
|
|
|
node2 = generateNode(rng, Port(5001), {
|
|
|
|
enrForkIdField: SSZ.encode(enrForkId),
|
|
|
|
enrAttestationSubnetsField: SSZ.encode(invalidAttnets)
|
|
|
|
})
|
|
|
|
node3 = generateNode(rng, Port(5002), {
|
|
|
|
enrForkIdField: SSZ.encode(enrForkId),
|
|
|
|
enrAttestationSubnetsField: SSZ.encode(attnets)
|
|
|
|
})
|
2021-03-24 10:48:53 +00:00
|
|
|
|
|
|
|
node1.open()
|
|
|
|
node2.open()
|
|
|
|
node3.open()
|
|
|
|
|
|
|
|
check (await node2.ping(node1.localNode)).isOk()
|
|
|
|
check (await node3.ping(node1.localNode)).isOk()
|
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
var attnetsSelected: AttnetBits
|
2021-03-24 10:48:53 +00:00
|
|
|
attnetsSelected.setBit(15)
|
|
|
|
attnetsSelected.setBit(42)
|
|
|
|
|
2021-09-21 22:25:49 +00:00
|
|
|
let discovered = await node1.queryRandom(
|
2022-03-11 10:51:53 +00:00
|
|
|
enrForkId, attnetsSelected, noSyncnetsPreference, 1)
|
2021-03-24 10:48:53 +00:00
|
|
|
check discovered.len == 1
|
|
|
|
|
|
|
|
await node1.closeWait()
|
|
|
|
await node2.closeWait()
|
|
|
|
await node3.closeWait()
|
|
|
|
|
2021-04-28 16:41:02 +00:00
|
|
|
asyncTest "Subnet query after ENR update":
|
2021-10-21 13:09:19 +00:00
|
|
|
var attnets: AttnetBits
|
2021-03-24 10:48:53 +00:00
|
|
|
attnets.setBit(1)
|
|
|
|
|
|
|
|
let
|
|
|
|
node1 = generateNode(rng, Port(5000))
|
2021-08-10 20:46:35 +00:00
|
|
|
node2 = generateNode(rng, Port(5001), {
|
|
|
|
enrForkIdField: SSZ.encode(enrForkId),
|
|
|
|
enrAttestationSubnetsField: SSZ.encode(attnets)
|
|
|
|
})
|
2021-03-24 10:48:53 +00:00
|
|
|
|
|
|
|
node1.open()
|
|
|
|
node2.open()
|
|
|
|
|
|
|
|
check (await node2.ping(node1.localNode)).isOk()
|
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
var attnetsSelected: AttnetBits
|
2021-03-24 10:48:53 +00:00
|
|
|
attnetsSelected.setBit(2)
|
|
|
|
|
|
|
|
block:
|
2021-09-21 22:25:49 +00:00
|
|
|
let discovered = await node1.queryRandom(
|
2022-03-11 10:51:53 +00:00
|
|
|
enrForkId, attnetsSelected, noSyncnetsPreference, 1)
|
2021-03-24 10:48:53 +00:00
|
|
|
check discovered.len == 0
|
|
|
|
|
|
|
|
block:
|
|
|
|
attnets.setBit(2)
|
2021-08-10 20:46:35 +00:00
|
|
|
check node2.updateRecord({
|
|
|
|
enrForkIdField: SSZ.encode(enrForkId),
|
|
|
|
enrAttestationSubnetsField: SSZ.encode(attnets)
|
|
|
|
}).isOk()
|
2021-03-24 10:48:53 +00:00
|
|
|
|
2021-07-19 12:25:11 +00:00
|
|
|
let nodes = await node1.findNode(node2.localNode, @[0'u16])
|
2021-03-24 10:48:53 +00:00
|
|
|
check nodes.isOk() and nodes[].len > 0
|
|
|
|
discard node1.addNode(nodes[][0])
|
|
|
|
|
2021-09-21 22:25:49 +00:00
|
|
|
let discovered = await node1.queryRandom(
|
2022-03-11 10:51:53 +00:00
|
|
|
enrForkId, attnetsSelected, noSyncnetsPreference, 1)
|
2021-03-24 10:48:53 +00:00
|
|
|
check discovered.len == 1
|
|
|
|
|
|
|
|
await node1.closeWait()
|
|
|
|
await node2.closeWait()
|
2021-09-29 11:06:16 +00:00
|
|
|
|
|
|
|
suite "Fork id compatibility test":
|
|
|
|
test "Digest check":
|
|
|
|
check false == isCompatibleForkId(
|
|
|
|
ENRForkID(
|
|
|
|
fork_digest: ForkDigest([byte 0, 1, 2, 3]),
|
|
|
|
next_fork_version: Version([byte 0, 0, 0, 0]),
|
|
|
|
next_fork_epoch: Epoch(0)),
|
|
|
|
ENRForkID(
|
|
|
|
fork_digest: ForkDigest([byte 9, 9, 9, 9]),
|
|
|
|
next_fork_version: Version([byte 0, 0, 0, 0]),
|
|
|
|
next_fork_epoch: Epoch(0)))
|
|
|
|
|
|
|
|
check true == isCompatibleForkId(
|
|
|
|
ENRForkID(
|
|
|
|
fork_digest: ForkDigest([byte 0, 1, 2, 3]),
|
|
|
|
next_fork_version: Version([byte 0, 0, 0, 0]),
|
|
|
|
next_fork_epoch: Epoch(0)),
|
|
|
|
ENRForkID(
|
|
|
|
fork_digest: ForkDigest([byte 0, 1, 2, 3]),
|
|
|
|
next_fork_version: Version([byte 0, 0, 0, 0]),
|
|
|
|
next_fork_epoch: Epoch(0)))
|
|
|
|
|
|
|
|
test "Fork check":
|
|
|
|
# Future fork should work
|
|
|
|
check true == isCompatibleForkId(
|
|
|
|
ENRForkID(
|
|
|
|
fork_digest: ForkDigest([byte 0, 1, 2, 3]),
|
|
|
|
next_fork_version: Version([byte 0, 0, 0, 0]),
|
|
|
|
next_fork_epoch: Epoch(0)),
|
|
|
|
ENRForkID(
|
|
|
|
fork_digest: ForkDigest([byte 0, 1, 2, 3]),
|
|
|
|
next_fork_version: Version([byte 2, 2, 2, 2]),
|
|
|
|
next_fork_epoch: Epoch(2)))
|
|
|
|
|
|
|
|
# Past fork should fail
|
|
|
|
check false == isCompatibleForkId(
|
|
|
|
ENRForkID(
|
|
|
|
fork_digest: ForkDigest([byte 0, 1, 2, 3]),
|
|
|
|
next_fork_version: Version([byte 0, 0, 0, 1]),
|
|
|
|
next_fork_epoch: Epoch(0)),
|
|
|
|
ENRForkID(
|
|
|
|
fork_digest: ForkDigest([byte 0, 1, 2, 3]),
|
|
|
|
next_fork_version: Version([byte 0, 0, 0, 0]),
|
|
|
|
next_fork_epoch: Epoch(0)))
|
|
|
|
|
|
|
|
test "Next fork epoch check":
|
|
|
|
# Same fork should check next_fork_epoch
|
|
|
|
check false == isCompatibleForkId(
|
|
|
|
ENRForkID(
|
|
|
|
fork_digest: ForkDigest([byte 0, 1, 2, 3]),
|
|
|
|
next_fork_version: Version([byte 0, 0, 0, 0]),
|
|
|
|
next_fork_epoch: Epoch(0)),
|
|
|
|
ENRForkID(
|
|
|
|
fork_digest: ForkDigest([byte 0, 1, 2, 3]),
|
|
|
|
next_fork_version: Version([byte 0, 0, 0, 0]),
|
|
|
|
next_fork_epoch: Epoch(2)))
|