From 2d7b3440f2842ca1d434024cc9f46375decca378 Mon Sep 17 00:00:00 2001 From: kdeme Date: Wed, 17 Jun 2020 13:51:30 +0200 Subject: [PATCH] make bitsPerHop configurable + add routing table tests --- eth.nimble | 3 +- eth/p2p/discoveryv5/routing_table.nim | 19 ++++++--- tests/p2p/discv5_test_helper.nim | 55 ++++++++++++++++++++++++ tests/p2p/p2p_test_helper.nim | 12 ------ tests/p2p/test_discoveryv5.nim | 59 +++----------------------- tests/p2p/test_protocol_handlers.nim | 3 +- tests/p2p/test_routing_table.nim | 61 +++++++++++++++++++++++++++ tests/p2p/test_shh_connect.nim | 4 +- tests/p2p/tserver.nim | 2 +- 9 files changed, 142 insertions(+), 76 deletions(-) create mode 100644 tests/p2p/discv5_test_helper.nim create mode 100644 tests/p2p/test_routing_table.nim diff --git a/eth.nimble b/eth.nimble index 5caf7a1..b9ecb13 100644 --- a/eth.nimble +++ b/eth.nimble @@ -50,7 +50,8 @@ proc runP2pTests() = "test_protocol_handlers", "test_enr", "test_discoveryv5", - "test_discv5_encoding" + "test_discv5_encoding", + "test_routing_table" ]: runTest("tests/p2p/" & filename) diff --git a/eth/p2p/discoveryv5/routing_table.nim b/eth/p2p/discoveryv5/routing_table.nim index 7a71337..1016e19 100644 --- a/eth/p2p/discoveryv5/routing_table.nim +++ b/eth/p2p/discoveryv5/routing_table.nim @@ -9,6 +9,14 @@ type RoutingTable* = object thisNode: Node buckets: seq[KBucket] + bitsPerHop: int ## This value indicates how many bits (at minimum) you get + ## closer to finding your target per query. Practically, it tells you also + ## how often your "not in range" branch will split off. Setting this to 1 + ## is the basic, non accelerated version, which will never split off the + ## not in range branch and which will result in log base2 n hops per lookup. + ## Setting it higher will increase the amount of splitting on a not in range + ## branch (thus holding more nodes with a better keyspace coverage) and this + ## will result in an improvement of log base(2^b) n hops per lookup. KBucket = ref object istart, iend: NodeId ## Range of NodeIds this KBucket covers. This is not a @@ -27,7 +35,6 @@ type const BUCKET_SIZE* = 16 REPLACEMENT_CACHE_SIZE* = 8 - BITS_PER_HOP = 8 ID_SIZE = 256 proc distanceTo(n: Node, id: NodeId): UInt256 = @@ -165,9 +172,10 @@ proc computeSharedPrefixBits(nodes: openarray[Node]): int = doAssert(false, "Unable to calculate number of shared prefix bits") -proc init*(r: var RoutingTable, thisNode: Node) {.inline.} = +proc init*(r: var RoutingTable, thisNode: Node, bitsPerHop = 8) {.inline.} = r.thisNode = thisNode r.buckets = @[newKBucket(0.u256, high(Uint256))] + r.bitsPerHop = bitsPerHop randomize() # for later `randomNodes` selection proc splitBucket(r: var RoutingTable, index: int) = @@ -189,13 +197,14 @@ proc addNode*(r: var RoutingTable, n: Node): Node = let bucket = r.bucketForNode(n.id) let evictionCandidate = bucket.add(n) if not evictionCandidate.isNil: - # Split if the bucket has the local node in its range or if the depth is not congruent - # to 0 mod BITS_PER_HOP + # Split if the bucket has the local node in its range or if the depth is not + # congruent to 0 mod `bitsPerHop` let depth = computeSharedPrefixBits(bucket.nodes) # TODO: Shouldn't the adding to replacement cache be done only if the bucket # doesn't get split? - if bucket.inRange(r.thisNode) or (depth mod BITS_PER_HOP != 0 and depth != ID_SIZE): + if bucket.inRange(r.thisNode) or + (depth mod r.bitsPerHop != 0 and depth != ID_SIZE): r.splitBucket(r.buckets.find(bucket)) return r.addNode(n) # retry diff --git a/tests/p2p/discv5_test_helper.nim b/tests/p2p/discv5_test_helper.nim new file mode 100644 index 0000000..9cb2978 --- /dev/null +++ b/tests/p2p/discv5_test_helper.nim @@ -0,0 +1,55 @@ +import + testutils/unittests, stew/shims/net, nimcrypto, + eth/[keys, rlp, trie/db], + eth/p2p/discoveryv5/[discovery_db, enr, node, types, routing_table, encoding], + eth/p2p/discoveryv5/protocol as discv5_protocol + + +proc localAddress*(port: int): Address = + Address(ip: ValidIpAddress.init("127.0.0.1"), port: Port(port)) + +proc initDiscoveryNode*(privKey: PrivateKey, address: Address, + bootstrapRecords: openarray[Record] = [], + localEnrFields: openarray[FieldPair] = []): + discv5_protocol.Protocol = + var db = DiscoveryDB.init(newMemoryDB()) + result = newProtocol(privKey, db, + some(address.ip), + address.port, address.port, + bootstrapRecords = bootstrapRecords, + localEnrFields = localEnrFields) + + result.open() + +proc nodeIdInNodes*(id: NodeId, nodes: openarray[Node]): bool = + for n in nodes: + if id == n.id: return true + +# Creating a random packet with specific nodeid each time +proc randomPacket*(tag: PacketTag): seq[byte] = + var + authTag: AuthTag + msg: array[44, byte] + + check randomBytes(authTag) == authTag.len + check randomBytes(msg) == msg.len + result.add(tag) + result.add(rlp.encode(authTag)) + result.add(msg) + +proc generateNode*(privKey = PrivateKey.random()[], port: int = 20302, + localEnrFields: openarray[FieldPair] = []): Node = + let port = Port(port) + let enr = enr.Record.init(1, privKey, some(ValidIpAddress.init("127.0.0.1")), + port, port, localEnrFields).expect("Properly intialized private key") + result = newNode(enr).expect("Properly initialized node") + +proc nodeAtDistance*(n: Node, d: uint32): Node = + while true: + let node = generateNode() + if logDist(n.id, node.id) == d: + return node + +proc nodesAtDistance*(n: Node, d: uint32, amount: int): seq[Node] = + for i in 0.. Split only the branch in range of own id + table.init(node, 1) + + for j in 0..5'u32: + for i in 0.. Split only the branch in range of own id + table.init(node, 1) + + # Add 16 nodes, distance 256 + for i in 0..