mirror of https://github.com/status-im/nim-eth.git
make bitsPerHop configurable + add routing table tests
This commit is contained in:
parent
6c85a48b4c
commit
2d7b3440f2
|
@ -50,7 +50,8 @@ proc runP2pTests() =
|
||||||
"test_protocol_handlers",
|
"test_protocol_handlers",
|
||||||
"test_enr",
|
"test_enr",
|
||||||
"test_discoveryv5",
|
"test_discoveryv5",
|
||||||
"test_discv5_encoding"
|
"test_discv5_encoding",
|
||||||
|
"test_routing_table"
|
||||||
]:
|
]:
|
||||||
runTest("tests/p2p/" & filename)
|
runTest("tests/p2p/" & filename)
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,14 @@ type
|
||||||
RoutingTable* = object
|
RoutingTable* = object
|
||||||
thisNode: Node
|
thisNode: Node
|
||||||
buckets: seq[KBucket]
|
buckets: seq[KBucket]
|
||||||
|
bitsPerHop: int ## This value indicates how many bits (at minimum) you get
|
||||||
|
## closer to finding your target per query. Practically, it tells you also
|
||||||
|
## how often your "not in range" branch will split off. Setting this to 1
|
||||||
|
## is the basic, non accelerated version, which will never split off the
|
||||||
|
## not in range branch and which will result in log base2 n hops per lookup.
|
||||||
|
## Setting it higher will increase the amount of splitting on a not in range
|
||||||
|
## branch (thus holding more nodes with a better keyspace coverage) and this
|
||||||
|
## will result in an improvement of log base(2^b) n hops per lookup.
|
||||||
|
|
||||||
KBucket = ref object
|
KBucket = ref object
|
||||||
istart, iend: NodeId ## Range of NodeIds this KBucket covers. This is not a
|
istart, iend: NodeId ## Range of NodeIds this KBucket covers. This is not a
|
||||||
|
@ -27,7 +35,6 @@ type
|
||||||
const
|
const
|
||||||
BUCKET_SIZE* = 16
|
BUCKET_SIZE* = 16
|
||||||
REPLACEMENT_CACHE_SIZE* = 8
|
REPLACEMENT_CACHE_SIZE* = 8
|
||||||
BITS_PER_HOP = 8
|
|
||||||
ID_SIZE = 256
|
ID_SIZE = 256
|
||||||
|
|
||||||
proc distanceTo(n: Node, id: NodeId): UInt256 =
|
proc distanceTo(n: Node, id: NodeId): UInt256 =
|
||||||
|
@ -165,9 +172,10 @@ proc computeSharedPrefixBits(nodes: openarray[Node]): int =
|
||||||
|
|
||||||
doAssert(false, "Unable to calculate number of shared prefix bits")
|
doAssert(false, "Unable to calculate number of shared prefix bits")
|
||||||
|
|
||||||
proc init*(r: var RoutingTable, thisNode: Node) {.inline.} =
|
proc init*(r: var RoutingTable, thisNode: Node, bitsPerHop = 8) {.inline.} =
|
||||||
r.thisNode = thisNode
|
r.thisNode = thisNode
|
||||||
r.buckets = @[newKBucket(0.u256, high(Uint256))]
|
r.buckets = @[newKBucket(0.u256, high(Uint256))]
|
||||||
|
r.bitsPerHop = bitsPerHop
|
||||||
randomize() # for later `randomNodes` selection
|
randomize() # for later `randomNodes` selection
|
||||||
|
|
||||||
proc splitBucket(r: var RoutingTable, index: int) =
|
proc splitBucket(r: var RoutingTable, index: int) =
|
||||||
|
@ -189,13 +197,14 @@ proc addNode*(r: var RoutingTable, n: Node): Node =
|
||||||
let bucket = r.bucketForNode(n.id)
|
let bucket = r.bucketForNode(n.id)
|
||||||
let evictionCandidate = bucket.add(n)
|
let evictionCandidate = bucket.add(n)
|
||||||
if not evictionCandidate.isNil:
|
if not evictionCandidate.isNil:
|
||||||
# Split if the bucket has the local node in its range or if the depth is not congruent
|
# Split if the bucket has the local node in its range or if the depth is not
|
||||||
# to 0 mod BITS_PER_HOP
|
# congruent to 0 mod `bitsPerHop`
|
||||||
|
|
||||||
let depth = computeSharedPrefixBits(bucket.nodes)
|
let depth = computeSharedPrefixBits(bucket.nodes)
|
||||||
# TODO: Shouldn't the adding to replacement cache be done only if the bucket
|
# TODO: Shouldn't the adding to replacement cache be done only if the bucket
|
||||||
# doesn't get split?
|
# doesn't get split?
|
||||||
if bucket.inRange(r.thisNode) or (depth mod BITS_PER_HOP != 0 and depth != ID_SIZE):
|
if bucket.inRange(r.thisNode) or
|
||||||
|
(depth mod r.bitsPerHop != 0 and depth != ID_SIZE):
|
||||||
r.splitBucket(r.buckets.find(bucket))
|
r.splitBucket(r.buckets.find(bucket))
|
||||||
return r.addNode(n) # retry
|
return r.addNode(n) # retry
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,55 @@
|
||||||
|
import
|
||||||
|
testutils/unittests, stew/shims/net, nimcrypto,
|
||||||
|
eth/[keys, rlp, trie/db],
|
||||||
|
eth/p2p/discoveryv5/[discovery_db, enr, node, types, routing_table, encoding],
|
||||||
|
eth/p2p/discoveryv5/protocol as discv5_protocol
|
||||||
|
|
||||||
|
|
||||||
|
proc localAddress*(port: int): Address =
|
||||||
|
Address(ip: ValidIpAddress.init("127.0.0.1"), port: Port(port))
|
||||||
|
|
||||||
|
proc initDiscoveryNode*(privKey: PrivateKey, address: Address,
|
||||||
|
bootstrapRecords: openarray[Record] = [],
|
||||||
|
localEnrFields: openarray[FieldPair] = []):
|
||||||
|
discv5_protocol.Protocol =
|
||||||
|
var db = DiscoveryDB.init(newMemoryDB())
|
||||||
|
result = newProtocol(privKey, db,
|
||||||
|
some(address.ip),
|
||||||
|
address.port, address.port,
|
||||||
|
bootstrapRecords = bootstrapRecords,
|
||||||
|
localEnrFields = localEnrFields)
|
||||||
|
|
||||||
|
result.open()
|
||||||
|
|
||||||
|
proc nodeIdInNodes*(id: NodeId, nodes: openarray[Node]): bool =
|
||||||
|
for n in nodes:
|
||||||
|
if id == n.id: return true
|
||||||
|
|
||||||
|
# Creating a random packet with specific nodeid each time
|
||||||
|
proc randomPacket*(tag: PacketTag): seq[byte] =
|
||||||
|
var
|
||||||
|
authTag: AuthTag
|
||||||
|
msg: array[44, byte]
|
||||||
|
|
||||||
|
check randomBytes(authTag) == authTag.len
|
||||||
|
check randomBytes(msg) == msg.len
|
||||||
|
result.add(tag)
|
||||||
|
result.add(rlp.encode(authTag))
|
||||||
|
result.add(msg)
|
||||||
|
|
||||||
|
proc generateNode*(privKey = PrivateKey.random()[], port: int = 20302,
|
||||||
|
localEnrFields: openarray[FieldPair] = []): Node =
|
||||||
|
let port = Port(port)
|
||||||
|
let enr = enr.Record.init(1, privKey, some(ValidIpAddress.init("127.0.0.1")),
|
||||||
|
port, port, localEnrFields).expect("Properly intialized private key")
|
||||||
|
result = newNode(enr).expect("Properly initialized node")
|
||||||
|
|
||||||
|
proc nodeAtDistance*(n: Node, d: uint32): Node =
|
||||||
|
while true:
|
||||||
|
let node = generateNode()
|
||||||
|
if logDist(n.id, node.id) == d:
|
||||||
|
return node
|
||||||
|
|
||||||
|
proc nodesAtDistance*(n: Node, d: uint32, amount: int): seq[Node] =
|
||||||
|
for i in 0..<amount:
|
||||||
|
result.add(nodeAtDistance(n, d))
|
|
@ -30,18 +30,6 @@ proc setupTestNode*(capabilities: varargs[ProtocolInfo, `protocolInfo`]): Ethere
|
||||||
for capability in capabilities:
|
for capability in capabilities:
|
||||||
result.addCapability capability
|
result.addCapability capability
|
||||||
|
|
||||||
template asyncTest*(name, body: untyped) =
|
|
||||||
test name:
|
|
||||||
proc scenario {.async.} = body
|
|
||||||
waitFor scenario()
|
|
||||||
|
|
||||||
template procSuite*(name, body: untyped) =
|
|
||||||
proc suitePayload =
|
|
||||||
suite name:
|
|
||||||
body
|
|
||||||
|
|
||||||
suitePayload()
|
|
||||||
|
|
||||||
proc packData*(payload: openArray[byte], pk: PrivateKey): seq[byte] =
|
proc packData*(payload: openArray[byte], pk: PrivateKey): seq[byte] =
|
||||||
let
|
let
|
||||||
payloadSeq = @payload
|
payloadSeq = @payload
|
||||||
|
|
|
@ -1,60 +1,11 @@
|
||||||
import
|
import
|
||||||
unittest, chronos, sequtils, chronicles, tables, stint, nimcrypto,
|
chronos, chronicles, tables, stint, nimcrypto, testutils/unittests,
|
||||||
stew/shims/net, eth/[keys, rlp], eth/trie/db,
|
stew/shims/net, eth/keys,
|
||||||
eth/p2p/discoveryv5/[discovery_db, enr, node, types, routing_table, encoding],
|
eth/p2p/discoveryv5/[enr, node, types, routing_table, encoding],
|
||||||
eth/p2p/discoveryv5/protocol as discv5_protocol,
|
eth/p2p/discoveryv5/protocol as discv5_protocol,
|
||||||
./p2p_test_helper
|
./discv5_test_helper
|
||||||
|
|
||||||
proc localAddress*(port: int): Address =
|
procSuite "Discovery v5 Tests":
|
||||||
Address(ip: ValidIpAddress.init("127.0.0.1"), port: Port(port))
|
|
||||||
|
|
||||||
proc initDiscoveryNode*(privKey: PrivateKey, address: Address,
|
|
||||||
bootstrapRecords: openarray[Record] = [],
|
|
||||||
localEnrFields: openarray[FieldPair] = []):
|
|
||||||
discv5_protocol.Protocol =
|
|
||||||
var db = DiscoveryDB.init(newMemoryDB())
|
|
||||||
result = newProtocol(privKey, db,
|
|
||||||
some(address.ip),
|
|
||||||
address.port, address.port,
|
|
||||||
bootstrapRecords = bootstrapRecords,
|
|
||||||
localEnrFields = localEnrFields)
|
|
||||||
|
|
||||||
result.open()
|
|
||||||
|
|
||||||
proc nodeIdInNodes(id: NodeId, nodes: openarray[Node]): bool =
|
|
||||||
for n in nodes:
|
|
||||||
if id == n.id: return true
|
|
||||||
|
|
||||||
# Creating a random packet with specific nodeid each time
|
|
||||||
proc randomPacket(tag: PacketTag): seq[byte] =
|
|
||||||
var
|
|
||||||
authTag: AuthTag
|
|
||||||
msg: array[44, byte]
|
|
||||||
|
|
||||||
check randomBytes(authTag) == authTag.len
|
|
||||||
check randomBytes(msg) == msg.len
|
|
||||||
result.add(tag)
|
|
||||||
result.add(rlp.encode(authTag))
|
|
||||||
result.add(msg)
|
|
||||||
|
|
||||||
proc generateNode(privKey = PrivateKey.random()[], port: int = 20302,
|
|
||||||
localEnrFields: openarray[FieldPair] = []): Node =
|
|
||||||
let port = Port(port)
|
|
||||||
let enr = enr.Record.init(1, privKey, some(ValidIpAddress.init("127.0.0.1")),
|
|
||||||
port, port, localEnrFields).expect("Properly intialized private key")
|
|
||||||
result = newNode(enr).expect("Properly initialized node")
|
|
||||||
|
|
||||||
proc nodeAtDistance(n: Node, d: uint32): Node =
|
|
||||||
while true:
|
|
||||||
let node = generateNode()
|
|
||||||
if logDist(n.id, node.id) == d:
|
|
||||||
return node
|
|
||||||
|
|
||||||
proc nodesAtDistance(n: Node, d: uint32, amount: int): seq[Node] =
|
|
||||||
for i in 0..<amount:
|
|
||||||
result.add(nodeAtDistance(n, d))
|
|
||||||
|
|
||||||
suite "Discovery v5 Tests":
|
|
||||||
asyncTest "GetNode":
|
asyncTest "GetNode":
|
||||||
# TODO: This could be tested in just a routing table only context
|
# TODO: This could be tested in just a routing table only context
|
||||||
let
|
let
|
||||||
|
|
|
@ -8,7 +8,8 @@
|
||||||
# MIT license (LICENSE-MIT)
|
# MIT license (LICENSE-MIT)
|
||||||
|
|
||||||
import
|
import
|
||||||
unittest, tables, chronos, eth/p2p,
|
tables, chronos, testutils/unittests,
|
||||||
|
eth/p2p,
|
||||||
./p2p_test_helper
|
./p2p_test_helper
|
||||||
|
|
||||||
type
|
type
|
||||||
|
|
|
@ -0,0 +1,61 @@
|
||||||
|
import
|
||||||
|
unittest, stew/shims/net, stint,
|
||||||
|
eth/keys, eth/p2p/discoveryv5/[routing_table, node],
|
||||||
|
./discv5_test_helper
|
||||||
|
|
||||||
|
suite "Routing Table Tests":
|
||||||
|
test "Bucket splitting in range branch b=1":
|
||||||
|
let node = generateNode()
|
||||||
|
var table: RoutingTable
|
||||||
|
|
||||||
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
||||||
|
table.init(node, 1)
|
||||||
|
|
||||||
|
for j in 0..5'u32:
|
||||||
|
for i in 0..<BUCKET_SIZE:
|
||||||
|
check table.addNode(node.nodeAtDistance(256-j)) == nil
|
||||||
|
check table.addNode(node.nodeAtDistance(256-j)) != nil
|
||||||
|
|
||||||
|
test "Bucket splitting off range branch b=1":
|
||||||
|
let node = generateNode()
|
||||||
|
var table: RoutingTable
|
||||||
|
|
||||||
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
||||||
|
table.init(node, 1)
|
||||||
|
|
||||||
|
# Add 16 nodes, distance 256
|
||||||
|
for i in 0..<BUCKET_SIZE:
|
||||||
|
check table.addNode(node.nodeAtDistance(256)) == nil
|
||||||
|
|
||||||
|
# This should split the bucket in the distance 256 branch, and the distance
|
||||||
|
# <=255 branch. But not add the node, as distance 256 bucket is already full
|
||||||
|
# and b=1 will not allow it to spit any further
|
||||||
|
check table.addNode(node.nodeAtDistance(256)) != nil
|
||||||
|
|
||||||
|
# This add should be allowed as it is on the branch where the own node's id
|
||||||
|
# id belongs to.
|
||||||
|
check table.addNode(node.nodeAtDistance(255)) == nil
|
||||||
|
|
||||||
|
test "Bucket splitting off range branch b=2":
|
||||||
|
let node = generateNode()
|
||||||
|
var table: RoutingTable
|
||||||
|
|
||||||
|
# bitsPerHop = 2, allow not in range branch to split once.
|
||||||
|
table.init(node, 2)
|
||||||
|
|
||||||
|
# Add 16 nodes, distance 256
|
||||||
|
for i in 0..<BUCKET_SIZE:
|
||||||
|
check table.addNode(node.nodeAtDistance(256)) == nil
|
||||||
|
|
||||||
|
# Add another 32 nodes, to make the not in range branch split and be be sure
|
||||||
|
# both buckets are full.
|
||||||
|
# TODO: Could improve by adding specific nodes for one of the buckets.
|
||||||
|
for i in 0..<BUCKET_SIZE*2:
|
||||||
|
discard table.addNode(node.nodeAtDistance(256))
|
||||||
|
|
||||||
|
# Adding another should fail as both buckets should be full and not be
|
||||||
|
# allowed to split another time
|
||||||
|
check table.addNode(node.nodeAtDistance(256)) != nil
|
||||||
|
# This add should be allowed as it is on the branch where the own node's id
|
||||||
|
# id belongs to.
|
||||||
|
check table.addNode(node.nodeAtDistance(255)) == nil
|
|
@ -8,8 +8,8 @@
|
||||||
# MIT license (LICENSE-MIT)
|
# MIT license (LICENSE-MIT)
|
||||||
|
|
||||||
import
|
import
|
||||||
sequtils, options, unittest, tables, chronos, eth/[keys, p2p],
|
sequtils, options, tables, chronos, testutils/unittests,
|
||||||
eth/p2p/rlpx_protocols/whisper_protocol, eth/p2p/peer_pool,
|
eth/[keys, p2p], eth/p2p/rlpx_protocols/whisper_protocol, eth/p2p/peer_pool,
|
||||||
./p2p_test_helper
|
./p2p_test_helper
|
||||||
|
|
||||||
proc resetMessageQueues(nodes: varargs[EthereumNode]) =
|
proc resetMessageQueues(nodes: varargs[EthereumNode]) =
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
# MIT license (LICENSE-MIT)
|
# MIT license (LICENSE-MIT)
|
||||||
|
|
||||||
import
|
import
|
||||||
sequtils, strformat, options, unittest,
|
sequtils, strformat, options, testutils/unittests,
|
||||||
chronicles, chronos, eth/[rlp, keys, p2p],
|
chronicles, chronos, eth/[rlp, keys, p2p],
|
||||||
eth/p2p/mock_peers
|
eth/p2p/mock_peers
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue