mirror of
https://github.com/logos-storage/logos-storage-nim-dht.git
synced 2026-01-03 22:13:06 +00:00
routing table logging update (#97)
* Clear logs for adding and removing of nodes. routingtable log topic for filtering. * Makes node ID shortening consistent with other short-id formats * redundant else block * fixes dependencies
This commit is contained in:
parent
5f38fd9570
commit
9acdca795b
@ -12,12 +12,12 @@ requires "protobuf_serialization" # >= 0.2.0 & < 0.3.0
|
||||
requires "nimcrypto >= 0.5.4"
|
||||
requires "bearssl == 0.2.5"
|
||||
requires "chronicles >= 0.10.2 & < 0.11.0"
|
||||
requires "chronos#dc3847e4d6733dfc3811454c2a9c384b87343e26"
|
||||
requires "chronos >= 4.0.3 & < 4.1.0"
|
||||
requires "libp2p == 1.5.0"
|
||||
requires "metrics"
|
||||
requires "stew#head"
|
||||
requires "stint"
|
||||
requires "https://github.com/codex-storage/nim-datastore >= 0.1.0 & < 0.2.0"
|
||||
requires "https://github.com/codex-storage/nim-datastore >= 0.1.1 & < 0.2.0"
|
||||
requires "questionable"
|
||||
|
||||
task testAll, "Run all test suites":
|
||||
|
||||
@ -135,7 +135,7 @@ func shortLog*(id: NodeId): string =
|
||||
result = sid
|
||||
else:
|
||||
result = newStringOfCap(10)
|
||||
for i in 0..<2:
|
||||
for i in 0..<3:
|
||||
result.add(sid[i])
|
||||
result.add("*")
|
||||
for i in (len(sid) - 6)..sid.high:
|
||||
|
||||
@ -17,6 +17,9 @@ export options
|
||||
declarePublicGauge routing_table_nodes,
|
||||
"Discovery routing table nodes", labels = ["state"]
|
||||
|
||||
logScope:
|
||||
topics = "discv5 routingtable"
|
||||
|
||||
type
|
||||
DistanceProc* = proc(a, b: NodeId): NodeId {.raises: [Defect], gcsafe, noSideEffect.}
|
||||
LogDistanceProc* = proc(a, b: NodeId): uint16 {.raises: [Defect], gcsafe, noSideEffect.}
|
||||
@ -317,15 +320,12 @@ proc addReplacement(r: var RoutingTable, k: KBucket, n: Node): NodeStatus =
|
||||
# gets moved to the tail.
|
||||
if k.replacementCache[nodeIdx].address.get().ip != n.address.get().ip:
|
||||
if not ipLimitInc(r, k, n):
|
||||
trace "replace: ip limit reached"
|
||||
return IpLimitReached
|
||||
ipLimitDec(r, k, k.replacementCache[nodeIdx])
|
||||
k.replacementCache.delete(nodeIdx)
|
||||
k.replacementCache.add(n)
|
||||
trace "replace: already existed"
|
||||
return ReplacementExisting
|
||||
elif not ipLimitInc(r, k, n):
|
||||
trace "replace: ip limit reached (2)"
|
||||
return IpLimitReached
|
||||
else:
|
||||
doAssert(k.replacementCache.len <= REPLACEMENT_CACHE_SIZE)
|
||||
@ -336,7 +336,7 @@ proc addReplacement(r: var RoutingTable, k: KBucket, n: Node): NodeStatus =
|
||||
k.replacementCache.delete(0)
|
||||
|
||||
k.replacementCache.add(n)
|
||||
trace "replace: added"
|
||||
debug "Node added to replacement cache", n
|
||||
return ReplacementAdded
|
||||
|
||||
proc addNode*(r: var RoutingTable, n: Node): NodeStatus =
|
||||
@ -403,21 +403,22 @@ proc addNode*(r: var RoutingTable, n: Node): NodeStatus =
|
||||
return IpLimitReached
|
||||
|
||||
bucket.add(n)
|
||||
else:
|
||||
# Bucket must be full, but lets see if it should be split the bucket.
|
||||
debug "Node added to routing table", n
|
||||
return Added
|
||||
|
||||
# Calculate the prefix shared by all nodes in the bucket's range, not the
|
||||
# ones actually in the bucket.
|
||||
let depth = computeSharedPrefixBits(@[bucket.istart, bucket.iend])
|
||||
# Split if the bucket has the local node in its range or if the depth is not
|
||||
# congruent to 0 mod `bitsPerHop`
|
||||
if bucket.inRange(r.localNode) or
|
||||
(depth mod r.bitsPerHop != 0 and depth != ID_SIZE):
|
||||
r.splitBucket(r.buckets.find(bucket))
|
||||
return r.addNode(n) # retry adding
|
||||
else:
|
||||
# When bucket doesn't get split the node is added to the replacement cache
|
||||
return r.addReplacement(bucket, n)
|
||||
# Bucket must be full, but lets see if it should be split the bucket.
|
||||
# Calculate the prefix shared by all nodes in the bucket's range, not the
|
||||
# ones actually in the bucket.
|
||||
let depth = computeSharedPrefixBits(@[bucket.istart, bucket.iend])
|
||||
# Split if the bucket has the local node in its range or if the depth is not
|
||||
# congruent to 0 mod `bitsPerHop`
|
||||
if bucket.inRange(r.localNode) or
|
||||
(depth mod r.bitsPerHop != 0 and depth != ID_SIZE):
|
||||
r.splitBucket(r.buckets.find(bucket))
|
||||
return r.addNode(n) # retry adding
|
||||
|
||||
# When bucket doesn't get split the node is added to the replacement cache
|
||||
return r.addReplacement(bucket, n)
|
||||
|
||||
proc removeNode*(r: var RoutingTable, n: Node) =
|
||||
## Remove the node `n` from the routing table.
|
||||
@ -433,12 +434,15 @@ proc replaceNode*(r: var RoutingTable, n: Node) =
|
||||
# revalidation as you don't want to try pinging that node all the time.
|
||||
let b = r.bucketForNode(n.id)
|
||||
if b.remove(n):
|
||||
debug "Node removed from routing table", n
|
||||
ipLimitDec(r, b, n)
|
||||
|
||||
if b.replacementCache.len > 0:
|
||||
# Nodes in the replacement cache are already included in the ip limits.
|
||||
b.add(b.replacementCache[high(b.replacementCache)])
|
||||
let rn = b.replacementCache[high(b.replacementCache)]
|
||||
b.add(rn)
|
||||
b.replacementCache.delete(high(b.replacementCache))
|
||||
debug "Node added to routing table from replacement cache", node=rn
|
||||
|
||||
proc getNode*(r: RoutingTable, id: NodeId): Option[Node] =
|
||||
## Get the `Node` with `id` as `NodeId` from the routing table.
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user