From 9acdca795bec190932d9e1a0846aba77ae301e3a Mon Sep 17 00:00:00 2001 From: Ben Bierens <39762930+benbierens@users.noreply.github.com> Date: Mon, 23 Sep 2024 15:49:08 +0200 Subject: [PATCH] routing table logging update (#97) * Clear logs for adding and removing of nodes. routingtable log topic for filtering. * Makes node ID shortening consistent with other short-id formats * redundant else block * fixes dependencies --- codexdht.nimble | 4 +- codexdht/private/eth/p2p/discoveryv5/node.nim | 2 +- .../eth/p2p/discoveryv5/routing_table.nim | 42 ++++++++++--------- 3 files changed, 26 insertions(+), 22 deletions(-) diff --git a/codexdht.nimble b/codexdht.nimble index 6e3fe3e..0f89769 100644 --- a/codexdht.nimble +++ b/codexdht.nimble @@ -12,12 +12,12 @@ requires "protobuf_serialization" # >= 0.2.0 & < 0.3.0 requires "nimcrypto >= 0.5.4" requires "bearssl == 0.2.5" requires "chronicles >= 0.10.2 & < 0.11.0" -requires "chronos#dc3847e4d6733dfc3811454c2a9c384b87343e26" +requires "chronos >= 4.0.3 & < 4.1.0" requires "libp2p == 1.5.0" requires "metrics" requires "stew#head" requires "stint" -requires "https://github.com/codex-storage/nim-datastore >= 0.1.0 & < 0.2.0" +requires "https://github.com/codex-storage/nim-datastore >= 0.1.1 & < 0.2.0" requires "questionable" task testAll, "Run all test suites": diff --git a/codexdht/private/eth/p2p/discoveryv5/node.nim b/codexdht/private/eth/p2p/discoveryv5/node.nim index 0f08aec..7b62dff 100644 --- a/codexdht/private/eth/p2p/discoveryv5/node.nim +++ b/codexdht/private/eth/p2p/discoveryv5/node.nim @@ -135,7 +135,7 @@ func shortLog*(id: NodeId): string = result = sid else: result = newStringOfCap(10) - for i in 0..<2: + for i in 0..<3: result.add(sid[i]) result.add("*") for i in (len(sid) - 6)..sid.high: diff --git a/codexdht/private/eth/p2p/discoveryv5/routing_table.nim b/codexdht/private/eth/p2p/discoveryv5/routing_table.nim index 60ec69c..a890863 100644 --- a/codexdht/private/eth/p2p/discoveryv5/routing_table.nim +++ b/codexdht/private/eth/p2p/discoveryv5/routing_table.nim @@ -17,6 +17,9 @@ export options declarePublicGauge routing_table_nodes, "Discovery routing table nodes", labels = ["state"] +logScope: + topics = "discv5 routingtable" + type DistanceProc* = proc(a, b: NodeId): NodeId {.raises: [Defect], gcsafe, noSideEffect.} LogDistanceProc* = proc(a, b: NodeId): uint16 {.raises: [Defect], gcsafe, noSideEffect.} @@ -317,15 +320,12 @@ proc addReplacement(r: var RoutingTable, k: KBucket, n: Node): NodeStatus = # gets moved to the tail. if k.replacementCache[nodeIdx].address.get().ip != n.address.get().ip: if not ipLimitInc(r, k, n): - trace "replace: ip limit reached" return IpLimitReached ipLimitDec(r, k, k.replacementCache[nodeIdx]) k.replacementCache.delete(nodeIdx) k.replacementCache.add(n) - trace "replace: already existed" return ReplacementExisting elif not ipLimitInc(r, k, n): - trace "replace: ip limit reached (2)" return IpLimitReached else: doAssert(k.replacementCache.len <= REPLACEMENT_CACHE_SIZE) @@ -336,7 +336,7 @@ proc addReplacement(r: var RoutingTable, k: KBucket, n: Node): NodeStatus = k.replacementCache.delete(0) k.replacementCache.add(n) - trace "replace: added" + debug "Node added to replacement cache", n return ReplacementAdded proc addNode*(r: var RoutingTable, n: Node): NodeStatus = @@ -403,21 +403,22 @@ proc addNode*(r: var RoutingTable, n: Node): NodeStatus = return IpLimitReached bucket.add(n) - else: - # Bucket must be full, but lets see if it should be split the bucket. + debug "Node added to routing table", n + return Added - # Calculate the prefix shared by all nodes in the bucket's range, not the - # ones actually in the bucket. - let depth = computeSharedPrefixBits(@[bucket.istart, bucket.iend]) - # Split if the bucket has the local node in its range or if the depth is not - # congruent to 0 mod `bitsPerHop` - if bucket.inRange(r.localNode) or - (depth mod r.bitsPerHop != 0 and depth != ID_SIZE): - r.splitBucket(r.buckets.find(bucket)) - return r.addNode(n) # retry adding - else: - # When bucket doesn't get split the node is added to the replacement cache - return r.addReplacement(bucket, n) + # Bucket must be full, but lets see if it should be split the bucket. + # Calculate the prefix shared by all nodes in the bucket's range, not the + # ones actually in the bucket. + let depth = computeSharedPrefixBits(@[bucket.istart, bucket.iend]) + # Split if the bucket has the local node in its range or if the depth is not + # congruent to 0 mod `bitsPerHop` + if bucket.inRange(r.localNode) or + (depth mod r.bitsPerHop != 0 and depth != ID_SIZE): + r.splitBucket(r.buckets.find(bucket)) + return r.addNode(n) # retry adding + + # When bucket doesn't get split the node is added to the replacement cache + return r.addReplacement(bucket, n) proc removeNode*(r: var RoutingTable, n: Node) = ## Remove the node `n` from the routing table. @@ -433,12 +434,15 @@ proc replaceNode*(r: var RoutingTable, n: Node) = # revalidation as you don't want to try pinging that node all the time. let b = r.bucketForNode(n.id) if b.remove(n): + debug "Node removed from routing table", n ipLimitDec(r, b, n) if b.replacementCache.len > 0: # Nodes in the replacement cache are already included in the ip limits. - b.add(b.replacementCache[high(b.replacementCache)]) + let rn = b.replacementCache[high(b.replacementCache)] + b.add(rn) b.replacementCache.delete(high(b.replacementCache)) + debug "Node added to routing table from replacement cache", node=rn proc getNode*(r: RoutingTable, id: NodeId): Option[Node] = ## Get the `Node` with `id` as `NodeId` from the routing table.