mirror of
https://github.com/logos-storage/logos-storage-nim-dht.git
synced 2026-01-07 16:03:09 +00:00
routing table logging update (#97)
* Clear logs for adding and removing of nodes. routingtable log topic for filtering. * Makes node ID shortening consistent with other short-id formats * redundant else block * fixes dependencies
This commit is contained in:
parent
5f38fd9570
commit
9acdca795b
@ -12,12 +12,12 @@ requires "protobuf_serialization" # >= 0.2.0 & < 0.3.0
|
|||||||
requires "nimcrypto >= 0.5.4"
|
requires "nimcrypto >= 0.5.4"
|
||||||
requires "bearssl == 0.2.5"
|
requires "bearssl == 0.2.5"
|
||||||
requires "chronicles >= 0.10.2 & < 0.11.0"
|
requires "chronicles >= 0.10.2 & < 0.11.0"
|
||||||
requires "chronos#dc3847e4d6733dfc3811454c2a9c384b87343e26"
|
requires "chronos >= 4.0.3 & < 4.1.0"
|
||||||
requires "libp2p == 1.5.0"
|
requires "libp2p == 1.5.0"
|
||||||
requires "metrics"
|
requires "metrics"
|
||||||
requires "stew#head"
|
requires "stew#head"
|
||||||
requires "stint"
|
requires "stint"
|
||||||
requires "https://github.com/codex-storage/nim-datastore >= 0.1.0 & < 0.2.0"
|
requires "https://github.com/codex-storage/nim-datastore >= 0.1.1 & < 0.2.0"
|
||||||
requires "questionable"
|
requires "questionable"
|
||||||
|
|
||||||
task testAll, "Run all test suites":
|
task testAll, "Run all test suites":
|
||||||
|
|||||||
@ -135,7 +135,7 @@ func shortLog*(id: NodeId): string =
|
|||||||
result = sid
|
result = sid
|
||||||
else:
|
else:
|
||||||
result = newStringOfCap(10)
|
result = newStringOfCap(10)
|
||||||
for i in 0..<2:
|
for i in 0..<3:
|
||||||
result.add(sid[i])
|
result.add(sid[i])
|
||||||
result.add("*")
|
result.add("*")
|
||||||
for i in (len(sid) - 6)..sid.high:
|
for i in (len(sid) - 6)..sid.high:
|
||||||
|
|||||||
@ -17,6 +17,9 @@ export options
|
|||||||
declarePublicGauge routing_table_nodes,
|
declarePublicGauge routing_table_nodes,
|
||||||
"Discovery routing table nodes", labels = ["state"]
|
"Discovery routing table nodes", labels = ["state"]
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
topics = "discv5 routingtable"
|
||||||
|
|
||||||
type
|
type
|
||||||
DistanceProc* = proc(a, b: NodeId): NodeId {.raises: [Defect], gcsafe, noSideEffect.}
|
DistanceProc* = proc(a, b: NodeId): NodeId {.raises: [Defect], gcsafe, noSideEffect.}
|
||||||
LogDistanceProc* = proc(a, b: NodeId): uint16 {.raises: [Defect], gcsafe, noSideEffect.}
|
LogDistanceProc* = proc(a, b: NodeId): uint16 {.raises: [Defect], gcsafe, noSideEffect.}
|
||||||
@ -317,15 +320,12 @@ proc addReplacement(r: var RoutingTable, k: KBucket, n: Node): NodeStatus =
|
|||||||
# gets moved to the tail.
|
# gets moved to the tail.
|
||||||
if k.replacementCache[nodeIdx].address.get().ip != n.address.get().ip:
|
if k.replacementCache[nodeIdx].address.get().ip != n.address.get().ip:
|
||||||
if not ipLimitInc(r, k, n):
|
if not ipLimitInc(r, k, n):
|
||||||
trace "replace: ip limit reached"
|
|
||||||
return IpLimitReached
|
return IpLimitReached
|
||||||
ipLimitDec(r, k, k.replacementCache[nodeIdx])
|
ipLimitDec(r, k, k.replacementCache[nodeIdx])
|
||||||
k.replacementCache.delete(nodeIdx)
|
k.replacementCache.delete(nodeIdx)
|
||||||
k.replacementCache.add(n)
|
k.replacementCache.add(n)
|
||||||
trace "replace: already existed"
|
|
||||||
return ReplacementExisting
|
return ReplacementExisting
|
||||||
elif not ipLimitInc(r, k, n):
|
elif not ipLimitInc(r, k, n):
|
||||||
trace "replace: ip limit reached (2)"
|
|
||||||
return IpLimitReached
|
return IpLimitReached
|
||||||
else:
|
else:
|
||||||
doAssert(k.replacementCache.len <= REPLACEMENT_CACHE_SIZE)
|
doAssert(k.replacementCache.len <= REPLACEMENT_CACHE_SIZE)
|
||||||
@ -336,7 +336,7 @@ proc addReplacement(r: var RoutingTable, k: KBucket, n: Node): NodeStatus =
|
|||||||
k.replacementCache.delete(0)
|
k.replacementCache.delete(0)
|
||||||
|
|
||||||
k.replacementCache.add(n)
|
k.replacementCache.add(n)
|
||||||
trace "replace: added"
|
debug "Node added to replacement cache", n
|
||||||
return ReplacementAdded
|
return ReplacementAdded
|
||||||
|
|
||||||
proc addNode*(r: var RoutingTable, n: Node): NodeStatus =
|
proc addNode*(r: var RoutingTable, n: Node): NodeStatus =
|
||||||
@ -403,21 +403,22 @@ proc addNode*(r: var RoutingTable, n: Node): NodeStatus =
|
|||||||
return IpLimitReached
|
return IpLimitReached
|
||||||
|
|
||||||
bucket.add(n)
|
bucket.add(n)
|
||||||
else:
|
debug "Node added to routing table", n
|
||||||
# Bucket must be full, but lets see if it should be split the bucket.
|
return Added
|
||||||
|
|
||||||
# Calculate the prefix shared by all nodes in the bucket's range, not the
|
# Bucket must be full, but lets see if it should be split the bucket.
|
||||||
# ones actually in the bucket.
|
# Calculate the prefix shared by all nodes in the bucket's range, not the
|
||||||
let depth = computeSharedPrefixBits(@[bucket.istart, bucket.iend])
|
# ones actually in the bucket.
|
||||||
# Split if the bucket has the local node in its range or if the depth is not
|
let depth = computeSharedPrefixBits(@[bucket.istart, bucket.iend])
|
||||||
# congruent to 0 mod `bitsPerHop`
|
# Split if the bucket has the local node in its range or if the depth is not
|
||||||
if bucket.inRange(r.localNode) or
|
# congruent to 0 mod `bitsPerHop`
|
||||||
(depth mod r.bitsPerHop != 0 and depth != ID_SIZE):
|
if bucket.inRange(r.localNode) or
|
||||||
r.splitBucket(r.buckets.find(bucket))
|
(depth mod r.bitsPerHop != 0 and depth != ID_SIZE):
|
||||||
return r.addNode(n) # retry adding
|
r.splitBucket(r.buckets.find(bucket))
|
||||||
else:
|
return r.addNode(n) # retry adding
|
||||||
# When bucket doesn't get split the node is added to the replacement cache
|
|
||||||
return r.addReplacement(bucket, n)
|
# When bucket doesn't get split the node is added to the replacement cache
|
||||||
|
return r.addReplacement(bucket, n)
|
||||||
|
|
||||||
proc removeNode*(r: var RoutingTable, n: Node) =
|
proc removeNode*(r: var RoutingTable, n: Node) =
|
||||||
## Remove the node `n` from the routing table.
|
## Remove the node `n` from the routing table.
|
||||||
@ -433,12 +434,15 @@ proc replaceNode*(r: var RoutingTable, n: Node) =
|
|||||||
# revalidation as you don't want to try pinging that node all the time.
|
# revalidation as you don't want to try pinging that node all the time.
|
||||||
let b = r.bucketForNode(n.id)
|
let b = r.bucketForNode(n.id)
|
||||||
if b.remove(n):
|
if b.remove(n):
|
||||||
|
debug "Node removed from routing table", n
|
||||||
ipLimitDec(r, b, n)
|
ipLimitDec(r, b, n)
|
||||||
|
|
||||||
if b.replacementCache.len > 0:
|
if b.replacementCache.len > 0:
|
||||||
# Nodes in the replacement cache are already included in the ip limits.
|
# Nodes in the replacement cache are already included in the ip limits.
|
||||||
b.add(b.replacementCache[high(b.replacementCache)])
|
let rn = b.replacementCache[high(b.replacementCache)]
|
||||||
|
b.add(rn)
|
||||||
b.replacementCache.delete(high(b.replacementCache))
|
b.replacementCache.delete(high(b.replacementCache))
|
||||||
|
debug "Node added to routing table from replacement cache", node=rn
|
||||||
|
|
||||||
proc getNode*(r: RoutingTable, id: NodeId): Option[Node] =
|
proc getNode*(r: RoutingTable, id: NodeId): Option[Node] =
|
||||||
## Get the `Node` with `id` as `NodeId` from the routing table.
|
## Get the `Node` with `id` as `NodeId` from the routing table.
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user