mirror of https://github.com/status-im/nim-eth.git
Merge pull request #247 from status-im/routing-table
Routing table work
This commit is contained in:
commit
0d591c6423
|
@ -50,7 +50,8 @@ proc runP2pTests() =
|
||||||
"test_protocol_handlers",
|
"test_protocol_handlers",
|
||||||
"test_enr",
|
"test_enr",
|
||||||
"test_discoveryv5",
|
"test_discoveryv5",
|
||||||
"test_discv5_encoding"
|
"test_discv5_encoding",
|
||||||
|
"test_routing_table"
|
||||||
]:
|
]:
|
||||||
runTest("tests/p2p/" & filename)
|
runTest("tests/p2p/" & filename)
|
||||||
|
|
||||||
|
|
|
@ -666,7 +666,7 @@ proc revalidateNode*(d: Protocol, n: Node)
|
||||||
# peers in the DHT
|
# peers in the DHT
|
||||||
if n.record notin d.bootstrapRecords:
|
if n.record notin d.bootstrapRecords:
|
||||||
trace "Revalidation of node failed, removing node", record = n.record
|
trace "Revalidation of node failed, removing node", record = n.record
|
||||||
d.routingTable.removeNode(n)
|
d.routingTable.replaceNode(n)
|
||||||
# Remove shared secrets when removing the node from routing table.
|
# Remove shared secrets when removing the node from routing table.
|
||||||
# This might be to direct, so we could keep these longer. But better
|
# This might be to direct, so we could keep these longer. But better
|
||||||
# would be to simply not remove the nodes immediatly but only after x
|
# would be to simply not remove the nodes immediatly but only after x
|
||||||
|
|
|
@ -3,22 +3,40 @@ import
|
||||||
stint, chronicles,
|
stint, chronicles,
|
||||||
node
|
node
|
||||||
|
|
||||||
|
export options
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
type
|
type
|
||||||
RoutingTable* = object
|
RoutingTable* = object
|
||||||
thisNode: Node
|
thisNode: Node
|
||||||
buckets: seq[KBucket]
|
buckets: seq[KBucket]
|
||||||
|
bitsPerHop: int ## This value indicates how many bits (at minimum) you get
|
||||||
|
## closer to finding your target per query. Practically, it tells you also
|
||||||
|
## how often your "not in range" branch will split off. Setting this to 1
|
||||||
|
## is the basic, non accelerated version, which will never split off the
|
||||||
|
## not in range branch and which will result in log base2 n hops per lookup.
|
||||||
|
## Setting it higher will increase the amount of splitting on a not in range
|
||||||
|
## branch (thus holding more nodes with a better keyspace coverage) and this
|
||||||
|
## will result in an improvement of log base(2^b) n hops per lookup.
|
||||||
|
|
||||||
KBucket = ref object
|
KBucket = ref object
|
||||||
istart, iend: NodeId
|
istart, iend: NodeId ## Range of NodeIds this KBucket covers. This is not a
|
||||||
nodes: seq[Node]
|
## simple logarithmic distance as buckets can be split over a prefix that
|
||||||
replacementCache: seq[Node]
|
## does not cover the `thisNode` id.
|
||||||
lastUpdated: float # epochTime
|
nodes: seq[Node] ## Node entries of the KBucket. Sorted according to last
|
||||||
|
## time seen. First entry (head) is considered the most recently seen node
|
||||||
|
## and the last entry (tail) is considered the least recently seen node.
|
||||||
|
## Here "seen" means a successful request-response. This can also not have
|
||||||
|
## occured yet.
|
||||||
|
replacementCache: seq[Node] ## Nodes that could not be added to the `nodes`
|
||||||
|
## seq as it is full and without stale nodes. This is practically a small
|
||||||
|
## LRU cache.
|
||||||
|
lastUpdated: float ## epochTime of last update to `nodes` in the KBucket.
|
||||||
|
|
||||||
const
|
const
|
||||||
BUCKET_SIZE* = 16
|
BUCKET_SIZE* = 16
|
||||||
BITS_PER_HOP = 8
|
REPLACEMENT_CACHE_SIZE* = 8
|
||||||
ID_SIZE = 256
|
ID_SIZE = 256
|
||||||
|
|
||||||
proc distanceTo(n: Node, id: NodeId): UInt256 =
|
proc distanceTo(n: Node, id: NodeId): UInt256 =
|
||||||
|
@ -59,30 +77,56 @@ proc nodesByDistanceTo(k: KBucket, id: NodeId): seq[Node] =
|
||||||
sortedByIt(k.nodes, it.distanceTo(id))
|
sortedByIt(k.nodes, it.distanceTo(id))
|
||||||
|
|
||||||
proc len(k: KBucket): int {.inline.} = k.nodes.len
|
proc len(k: KBucket): int {.inline.} = k.nodes.len
|
||||||
proc head(k: KBucket): Node {.inline.} = k.nodes[0]
|
proc tail(k: KBucket): Node {.inline.} = k.nodes[high(k.nodes)]
|
||||||
|
|
||||||
proc add(k: KBucket, n: Node): Node =
|
proc add(k: KBucket, n: Node): Node =
|
||||||
## Try to add the given node to this bucket.
|
## Try to add the given node to this bucket.
|
||||||
|
##
|
||||||
## If the node is already present, it is moved to the tail of the list, and we return nil.
|
## If the node is already present, nothing is done, as the node should only
|
||||||
|
## be moved in case of a new succesful request-reponse.
|
||||||
## If the node is not already present and the bucket has fewer than k entries, it is inserted
|
##
|
||||||
## at the tail of the list, and we return nil.
|
## If the node is not already present and the bucket has fewer than k entries,
|
||||||
|
## it is inserted as the last entry of the bucket (least recently seen node),
|
||||||
## If the bucket is full, we add the node to the bucket's replacement cache and return the
|
## and nil is returned.
|
||||||
## node at the head of the list (i.e. the least recently seen), which should be evicted if it
|
##
|
||||||
## fails to respond to a ping.
|
## If the bucket is full, the node at the last entry of the bucket (least
|
||||||
|
## recently seen), which should be evicted if it fails to respond to a ping,
|
||||||
|
## is returned.
|
||||||
|
##
|
||||||
|
## Reasoning here is that adding nodes will happen for a big part from
|
||||||
|
## lookups, which do not necessarily return nodes that are (still) reachable.
|
||||||
|
## So, more trust is put in the own ordering and newly additions are added
|
||||||
|
## as least recently seen (in fact they are never seen yet from this node its
|
||||||
|
## perspective).
|
||||||
|
## However, in discovery v5 it can be that a node is added after a incoming
|
||||||
|
## request, and considering a handshake that needs to be done, it is likely
|
||||||
|
## that this node is reachable. An additional `addSeen` proc could be created
|
||||||
|
## for this,
|
||||||
k.lastUpdated = epochTime()
|
k.lastUpdated = epochTime()
|
||||||
let nodeIdx = k.nodes.find(n)
|
let nodeIdx = k.nodes.find(n)
|
||||||
if nodeIdx != -1:
|
if nodeIdx != -1:
|
||||||
k.nodes.delete(nodeIdx)
|
return nil
|
||||||
k.nodes.add(n)
|
|
||||||
elif k.len < BUCKET_SIZE:
|
elif k.len < BUCKET_SIZE:
|
||||||
k.nodes.add(n)
|
k.nodes.add(n)
|
||||||
else:
|
|
||||||
k.replacementCache.add(n)
|
|
||||||
return k.head
|
|
||||||
return nil
|
return nil
|
||||||
|
else:
|
||||||
|
return k.tail
|
||||||
|
|
||||||
|
proc addReplacement(k: KBucket, n: Node) =
|
||||||
|
## Add the node to the tail of the replacement cache of the KBucket.
|
||||||
|
##
|
||||||
|
## If the replacement cache is full, the oldest (first entry) node will be
|
||||||
|
## removed. If the node is already in the replacement cache, it will be moved
|
||||||
|
## to the tail.
|
||||||
|
let nodeIdx = k.replacementCache.find(n)
|
||||||
|
if nodeIdx != -1:
|
||||||
|
k.replacementCache.delete(nodeIdx)
|
||||||
|
k.replacementCache.add(n)
|
||||||
|
else:
|
||||||
|
doAssert(k.replacementCache.len <= REPLACEMENT_CACHE_SIZE)
|
||||||
|
if k.replacementCache.len == REPLACEMENT_CACHE_SIZE:
|
||||||
|
k.replacementCache.delete(0)
|
||||||
|
k.replacementCache.add(n)
|
||||||
|
|
||||||
proc removeNode(k: KBucket, n: Node) =
|
proc removeNode(k: KBucket, n: Node) =
|
||||||
let i = k.nodes.find(n)
|
let i = k.nodes.find(n)
|
||||||
|
@ -105,23 +149,21 @@ proc inRange(k: KBucket, n: Node): bool {.inline.} =
|
||||||
|
|
||||||
proc contains(k: KBucket, n: Node): bool = n in k.nodes
|
proc contains(k: KBucket, n: Node): bool = n in k.nodes
|
||||||
|
|
||||||
proc binaryGetBucketForNode(buckets: openarray[KBucket],
|
proc binaryGetBucketForNode*(buckets: openarray[KBucket],
|
||||||
id: NodeId): KBucket {.inline.} =
|
id: NodeId): KBucket =
|
||||||
## Given a list of ordered buckets, returns the bucket for a given node.
|
## Given a list of ordered buckets, returns the bucket for a given `NodeId`.
|
||||||
|
## Returns nil if no bucket in range for given `id` is found.
|
||||||
let bucketPos = lowerBound(buckets, id) do(a: KBucket, b: NodeId) -> int:
|
let bucketPos = lowerBound(buckets, id) do(a: KBucket, b: NodeId) -> int:
|
||||||
cmp(a.iend, b)
|
cmp(a.iend, b)
|
||||||
# Prevents edge cases where bisect_left returns an out of range index
|
|
||||||
|
# Prevent cases where `lowerBound` returns an out of range index e.g. at empty
|
||||||
|
# openarray, or when the id is out range for all buckets in the openarray.
|
||||||
if bucketPos < buckets.len:
|
if bucketPos < buckets.len:
|
||||||
let bucket = buckets[bucketPos]
|
let bucket = buckets[bucketPos]
|
||||||
if bucket.istart <= id and id <= bucket.iend:
|
if bucket.istart <= id and id <= bucket.iend:
|
||||||
result = bucket
|
result = bucket
|
||||||
|
|
||||||
# TODO: Is this really an error that should occur? Feels a lot like a work-
|
proc computeSharedPrefixBits(nodes: openarray[NodeId]): int =
|
||||||
# around to another problem. Set to Defect for now.
|
|
||||||
if result.isNil:
|
|
||||||
raise (ref Defect)(msg: "No bucket found for node with id " & $id)
|
|
||||||
|
|
||||||
proc computeSharedPrefixBits(nodes: openarray[Node]): int =
|
|
||||||
## Count the number of prefix bits shared by all nodes.
|
## Count the number of prefix bits shared by all nodes.
|
||||||
if nodes.len < 2:
|
if nodes.len < 2:
|
||||||
return ID_SIZE
|
return ID_SIZE
|
||||||
|
@ -131,18 +173,20 @@ proc computeSharedPrefixBits(nodes: openarray[Node]): int =
|
||||||
|
|
||||||
for i in 1 .. ID_SIZE:
|
for i in 1 .. ID_SIZE:
|
||||||
mask = mask or (one shl (ID_SIZE - i))
|
mask = mask or (one shl (ID_SIZE - i))
|
||||||
let reference = nodes[0].id and mask
|
let reference = nodes[0] and mask
|
||||||
for j in 1 .. nodes.high:
|
for j in 1 .. nodes.high:
|
||||||
if (nodes[j].id and mask) != reference: return i - 1
|
if (nodes[j] and mask) != reference: return i - 1
|
||||||
|
|
||||||
for n in nodes:
|
for n in nodes:
|
||||||
echo n.id.toHex()
|
echo n.toHex()
|
||||||
|
|
||||||
|
# Reaching this would mean that all node ids are equal
|
||||||
doAssert(false, "Unable to calculate number of shared prefix bits")
|
doAssert(false, "Unable to calculate number of shared prefix bits")
|
||||||
|
|
||||||
proc init*(r: var RoutingTable, thisNode: Node) {.inline.} =
|
proc init*(r: var RoutingTable, thisNode: Node, bitsPerHop = 8) {.inline.} =
|
||||||
r.thisNode = thisNode
|
r.thisNode = thisNode
|
||||||
r.buckets = @[newKBucket(0.u256, high(Uint256))]
|
r.buckets = @[newKBucket(0.u256, high(Uint256))]
|
||||||
|
r.bitsPerHop = bitsPerHop
|
||||||
randomize() # for later `randomNodes` selection
|
randomize() # for later `randomNodes` selection
|
||||||
|
|
||||||
proc splitBucket(r: var RoutingTable, index: int) =
|
proc splitBucket(r: var RoutingTable, index: int) =
|
||||||
|
@ -152,31 +196,58 @@ proc splitBucket(r: var RoutingTable, index: int) =
|
||||||
r.buckets.insert(b, index + 1)
|
r.buckets.insert(b, index + 1)
|
||||||
|
|
||||||
proc bucketForNode(r: RoutingTable, id: NodeId): KBucket =
|
proc bucketForNode(r: RoutingTable, id: NodeId): KBucket =
|
||||||
binaryGetBucketForNode(r.buckets, id)
|
result = binaryGetBucketForNode(r.buckets, id)
|
||||||
|
doAssert(not result.isNil(),
|
||||||
|
"Routing table should always cover the full id space")
|
||||||
|
|
||||||
proc removeNode*(r: var RoutingTable, n: Node) =
|
proc removeNode*(r: var RoutingTable, n: Node) =
|
||||||
|
## Remove the node `n` from the routing table.
|
||||||
r.bucketForNode(n.id).removeNode(n)
|
r.bucketForNode(n.id).removeNode(n)
|
||||||
|
|
||||||
proc addNode*(r: var RoutingTable, n: Node): Node =
|
proc addNode*(r: var RoutingTable, n: Node): Node =
|
||||||
|
## Try to add the node to the routing table.
|
||||||
|
##
|
||||||
|
## First, an attempt will be done to add the node to the bucket in its range.
|
||||||
|
## If this fails, the bucket will be split if it is eligable for splitting.
|
||||||
|
## If so, a new attempt will be done to add the node. If not, the node will be
|
||||||
|
## added to the replacement cache.
|
||||||
if n == r.thisNode:
|
if n == r.thisNode:
|
||||||
# warn "Trying to add ourselves to the routing table", node = n
|
# warn "Trying to add ourselves to the routing table", node = n
|
||||||
return
|
return
|
||||||
let bucket = r.bucketForNode(n.id)
|
let bucket = r.bucketForNode(n.id)
|
||||||
let evictionCandidate = bucket.add(n)
|
let evictionCandidate = bucket.add(n)
|
||||||
if not evictionCandidate.isNil:
|
if not evictionCandidate.isNil:
|
||||||
# Split if the bucket has the local node in its range or if the depth is not congruent
|
# Split if the bucket has the local node in its range or if the depth is not
|
||||||
# to 0 mod BITS_PER_HOP
|
# congruent to 0 mod `bitsPerHop`
|
||||||
|
#
|
||||||
let depth = computeSharedPrefixBits(bucket.nodes)
|
# Calculate the prefix shared by all nodes in the bucket's range, not the
|
||||||
# TODO: Shouldn't the adding to replacement cache be done only if the bucket
|
# ones actually in the bucket.
|
||||||
# doesn't get split?
|
let depth = computeSharedPrefixBits(@[bucket.istart, bucket.iend])
|
||||||
if bucket.inRange(r.thisNode) or (depth mod BITS_PER_HOP != 0 and depth != ID_SIZE):
|
if bucket.inRange(r.thisNode) or
|
||||||
|
(depth mod r.bitsPerHop != 0 and depth != ID_SIZE):
|
||||||
r.splitBucket(r.buckets.find(bucket))
|
r.splitBucket(r.buckets.find(bucket))
|
||||||
return r.addNode(n) # retry
|
return r.addNode(n) # retry adding
|
||||||
|
else:
|
||||||
|
# When bucket doesn't get split the node is added to the replacement cache
|
||||||
|
bucket.addReplacement(n)
|
||||||
|
|
||||||
# Nothing added, ping evictionCandidate
|
# Nothing added, return evictionCandidate
|
||||||
return evictionCandidate
|
return evictionCandidate
|
||||||
|
|
||||||
|
proc replaceNode*(r: var RoutingTable, n: Node) =
|
||||||
|
## Replace node `n` with last entry in the replacement cache. If there are
|
||||||
|
## no entries in the replacement cache, node `n` will simply be removed.
|
||||||
|
# TODO: Kademlia paper recommends here to not remove nodes if there are no
|
||||||
|
# replacements. However, that would require a bit more complexity in the
|
||||||
|
# revalidation as you don't want to try pinging that node all the time.
|
||||||
|
let b = r.bucketForNode(n.id)
|
||||||
|
let idx = b.nodes.find(n)
|
||||||
|
if idx != -1:
|
||||||
|
b.nodes.delete(idx)
|
||||||
|
if b.replacementCache.len > 0:
|
||||||
|
b.nodes.add(b.replacementCache[high(b.replacementCache)])
|
||||||
|
b.replacementCache.delete(high(b.replacementCache))
|
||||||
|
|
||||||
proc getNode*(r: RoutingTable, id: NodeId): Option[Node] =
|
proc getNode*(r: RoutingTable, id: NodeId): Option[Node] =
|
||||||
let b = r.bucketForNode(id)
|
let b = r.bucketForNode(id)
|
||||||
for n in b.nodes:
|
for n in b.nodes:
|
||||||
|
@ -222,7 +293,7 @@ proc neighboursAtDistance*(r: RoutingTable, distance: uint32,
|
||||||
proc len*(r: RoutingTable): int =
|
proc len*(r: RoutingTable): int =
|
||||||
for b in r.buckets: result += b.len
|
for b in r.buckets: result += b.len
|
||||||
|
|
||||||
proc moveRight[T](arr: var openarray[T], a, b: int) {.inline.} =
|
proc moveRight[T](arr: var openarray[T], a, b: int) =
|
||||||
## In `arr` move elements in range [a, b] right by 1.
|
## In `arr` move elements in range [a, b] right by 1.
|
||||||
var t: T
|
var t: T
|
||||||
shallowCopy(t, arr[b + 1])
|
shallowCopy(t, arr[b + 1])
|
||||||
|
@ -231,19 +302,23 @@ proc moveRight[T](arr: var openarray[T], a, b: int) {.inline.} =
|
||||||
shallowCopy(arr[a], t)
|
shallowCopy(arr[a], t)
|
||||||
|
|
||||||
proc setJustSeen*(r: RoutingTable, n: Node) =
|
proc setJustSeen*(r: RoutingTable, n: Node) =
|
||||||
# Move `n` to front of its bucket
|
## Move `n` to the head (most recently seen) of its bucket.
|
||||||
|
## If `n` is not in the routing table, do nothing.
|
||||||
let b = r.bucketForNode(n.id)
|
let b = r.bucketForNode(n.id)
|
||||||
let idx = b.nodes.find(n)
|
let idx = b.nodes.find(n)
|
||||||
doAssert(idx >= 0)
|
if idx >= 0:
|
||||||
if idx != 0:
|
if idx != 0:
|
||||||
b.nodes.moveRight(0, idx - 1)
|
b.nodes.moveRight(0, idx - 1)
|
||||||
b.nodes[0] = n
|
|
||||||
b.lastUpdated = epochTime()
|
b.lastUpdated = epochTime()
|
||||||
|
|
||||||
proc nodeToRevalidate*(r: RoutingTable): Node =
|
proc nodeToRevalidate*(r: RoutingTable): Node =
|
||||||
|
## Return a node to revalidate. The least recently seen node from a random
|
||||||
|
## bucket is selected.
|
||||||
var buckets = r.buckets
|
var buckets = r.buckets
|
||||||
shuffle(buckets)
|
shuffle(buckets)
|
||||||
# TODO: Should we prioritize less-recently-updated buckets instead?
|
# TODO: Should we prioritize less-recently-updated buckets instead? Could use
|
||||||
|
# `lastUpdated` for this, but it would probably make more sense to only update
|
||||||
|
# that value on revalidation then and rename it to `lastValidated`.
|
||||||
for b in buckets:
|
for b in buckets:
|
||||||
if b.len > 0:
|
if b.len > 0:
|
||||||
return b.nodes[^1]
|
return b.nodes[^1]
|
||||||
|
@ -260,10 +335,16 @@ proc randomNodes*(r: RoutingTable, maxAmount: int,
|
||||||
result = newSeqOfCap[Node](maxAmount)
|
result = newSeqOfCap[Node](maxAmount)
|
||||||
var seen = initHashSet[Node]()
|
var seen = initHashSet[Node]()
|
||||||
|
|
||||||
# This is a rather inneficient way of randomizing nodes from all buckets, but even if we
|
# This is a rather inefficient way of randomizing nodes from all buckets, but even if we
|
||||||
# iterate over all nodes in the routing table, the time it takes would still be
|
# iterate over all nodes in the routing table, the time it takes would still be
|
||||||
# insignificant compared to the time it takes for the network roundtrips when connecting
|
# insignificant compared to the time it takes for the network roundtrips when connecting
|
||||||
# to nodes.
|
# to nodes.
|
||||||
|
# However, "time it takes" might not be relevant, as there might be no point
|
||||||
|
# in providing more `randomNodes` as the routing table might not have anything
|
||||||
|
# new to provide. And there is no way for the calling code to know this. So
|
||||||
|
# while it will take less total time compared to e.g. an (async)
|
||||||
|
# randomLookup, the time might be wasted as all nodes are possibly seen
|
||||||
|
# already.
|
||||||
while len(seen) < maxAmount:
|
while len(seen) < maxAmount:
|
||||||
# TODO: Is it important to get a better random source for these sample calls?
|
# TODO: Is it important to get a better random source for these sample calls?
|
||||||
let bucket = sample(r.buckets)
|
let bucket = sample(r.buckets)
|
||||||
|
|
|
@ -0,0 +1,55 @@
|
||||||
|
import
|
||||||
|
testutils/unittests, stew/shims/net, nimcrypto,
|
||||||
|
eth/[keys, rlp, trie/db],
|
||||||
|
eth/p2p/discoveryv5/[discovery_db, enr, node, types, routing_table, encoding],
|
||||||
|
eth/p2p/discoveryv5/protocol as discv5_protocol
|
||||||
|
|
||||||
|
|
||||||
|
proc localAddress*(port: int): Address =
|
||||||
|
Address(ip: ValidIpAddress.init("127.0.0.1"), port: Port(port))
|
||||||
|
|
||||||
|
proc initDiscoveryNode*(privKey: PrivateKey, address: Address,
|
||||||
|
bootstrapRecords: openarray[Record] = [],
|
||||||
|
localEnrFields: openarray[FieldPair] = []):
|
||||||
|
discv5_protocol.Protocol =
|
||||||
|
var db = DiscoveryDB.init(newMemoryDB())
|
||||||
|
result = newProtocol(privKey, db,
|
||||||
|
some(address.ip),
|
||||||
|
address.port, address.port,
|
||||||
|
bootstrapRecords = bootstrapRecords,
|
||||||
|
localEnrFields = localEnrFields)
|
||||||
|
|
||||||
|
result.open()
|
||||||
|
|
||||||
|
proc nodeIdInNodes*(id: NodeId, nodes: openarray[Node]): bool =
|
||||||
|
for n in nodes:
|
||||||
|
if id == n.id: return true
|
||||||
|
|
||||||
|
# Creating a random packet with specific nodeid each time
|
||||||
|
proc randomPacket*(tag: PacketTag): seq[byte] =
|
||||||
|
var
|
||||||
|
authTag: AuthTag
|
||||||
|
msg: array[44, byte]
|
||||||
|
|
||||||
|
check randomBytes(authTag) == authTag.len
|
||||||
|
check randomBytes(msg) == msg.len
|
||||||
|
result.add(tag)
|
||||||
|
result.add(rlp.encode(authTag))
|
||||||
|
result.add(msg)
|
||||||
|
|
||||||
|
proc generateNode*(privKey = PrivateKey.random()[], port: int = 20302,
|
||||||
|
localEnrFields: openarray[FieldPair] = []): Node =
|
||||||
|
let port = Port(port)
|
||||||
|
let enr = enr.Record.init(1, privKey, some(ValidIpAddress.init("127.0.0.1")),
|
||||||
|
port, port, localEnrFields).expect("Properly intialized private key")
|
||||||
|
result = newNode(enr).expect("Properly initialized node")
|
||||||
|
|
||||||
|
proc nodeAtDistance*(n: Node, d: uint32): Node =
|
||||||
|
while true:
|
||||||
|
let node = generateNode()
|
||||||
|
if logDist(n.id, node.id) == d:
|
||||||
|
return node
|
||||||
|
|
||||||
|
proc nodesAtDistance*(n: Node, d: uint32, amount: int): seq[Node] =
|
||||||
|
for i in 0..<amount:
|
||||||
|
result.add(nodeAtDistance(n, d))
|
|
@ -30,18 +30,6 @@ proc setupTestNode*(capabilities: varargs[ProtocolInfo, `protocolInfo`]): Ethere
|
||||||
for capability in capabilities:
|
for capability in capabilities:
|
||||||
result.addCapability capability
|
result.addCapability capability
|
||||||
|
|
||||||
template asyncTest*(name, body: untyped) =
|
|
||||||
test name:
|
|
||||||
proc scenario {.async.} = body
|
|
||||||
waitFor scenario()
|
|
||||||
|
|
||||||
template procSuite*(name, body: untyped) =
|
|
||||||
proc suitePayload =
|
|
||||||
suite name:
|
|
||||||
body
|
|
||||||
|
|
||||||
suitePayload()
|
|
||||||
|
|
||||||
proc packData*(payload: openArray[byte], pk: PrivateKey): seq[byte] =
|
proc packData*(payload: openArray[byte], pk: PrivateKey): seq[byte] =
|
||||||
let
|
let
|
||||||
payloadSeq = @payload
|
payloadSeq = @payload
|
||||||
|
|
|
@ -1,60 +1,11 @@
|
||||||
import
|
import
|
||||||
unittest, chronos, sequtils, chronicles, tables, stint, nimcrypto,
|
chronos, chronicles, tables, stint, nimcrypto, testutils/unittests,
|
||||||
stew/shims/net, eth/[keys, rlp], eth/trie/db,
|
stew/shims/net, eth/keys,
|
||||||
eth/p2p/discoveryv5/[discovery_db, enr, node, types, routing_table, encoding],
|
eth/p2p/discoveryv5/[enr, node, types, routing_table, encoding],
|
||||||
eth/p2p/discoveryv5/protocol as discv5_protocol,
|
eth/p2p/discoveryv5/protocol as discv5_protocol,
|
||||||
./p2p_test_helper
|
./discv5_test_helper
|
||||||
|
|
||||||
proc localAddress*(port: int): Address =
|
procSuite "Discovery v5 Tests":
|
||||||
Address(ip: ValidIpAddress.init("127.0.0.1"), port: Port(port))
|
|
||||||
|
|
||||||
proc initDiscoveryNode*(privKey: PrivateKey, address: Address,
|
|
||||||
bootstrapRecords: openarray[Record] = [],
|
|
||||||
localEnrFields: openarray[FieldPair] = []):
|
|
||||||
discv5_protocol.Protocol =
|
|
||||||
var db = DiscoveryDB.init(newMemoryDB())
|
|
||||||
result = newProtocol(privKey, db,
|
|
||||||
some(address.ip),
|
|
||||||
address.port, address.port,
|
|
||||||
bootstrapRecords = bootstrapRecords,
|
|
||||||
localEnrFields = localEnrFields)
|
|
||||||
|
|
||||||
result.open()
|
|
||||||
|
|
||||||
proc nodeIdInNodes(id: NodeId, nodes: openarray[Node]): bool =
|
|
||||||
for n in nodes:
|
|
||||||
if id == n.id: return true
|
|
||||||
|
|
||||||
# Creating a random packet with specific nodeid each time
|
|
||||||
proc randomPacket(tag: PacketTag): seq[byte] =
|
|
||||||
var
|
|
||||||
authTag: AuthTag
|
|
||||||
msg: array[44, byte]
|
|
||||||
|
|
||||||
check randomBytes(authTag) == authTag.len
|
|
||||||
check randomBytes(msg) == msg.len
|
|
||||||
result.add(tag)
|
|
||||||
result.add(rlp.encode(authTag))
|
|
||||||
result.add(msg)
|
|
||||||
|
|
||||||
proc generateNode(privKey = PrivateKey.random()[], port: int = 20302,
|
|
||||||
localEnrFields: openarray[FieldPair] = []): Node =
|
|
||||||
let port = Port(port)
|
|
||||||
let enr = enr.Record.init(1, privKey, some(ValidIpAddress.init("127.0.0.1")),
|
|
||||||
port, port, localEnrFields).expect("Properly intialized private key")
|
|
||||||
result = newNode(enr).expect("Properly initialized node")
|
|
||||||
|
|
||||||
proc nodeAtDistance(n: Node, d: uint32): Node =
|
|
||||||
while true:
|
|
||||||
let node = generateNode()
|
|
||||||
if logDist(n.id, node.id) == d:
|
|
||||||
return node
|
|
||||||
|
|
||||||
proc nodesAtDistance(n: Node, d: uint32, amount: int): seq[Node] =
|
|
||||||
for i in 0..<amount:
|
|
||||||
result.add(nodeAtDistance(n, d))
|
|
||||||
|
|
||||||
suite "Discovery v5 Tests":
|
|
||||||
asyncTest "GetNode":
|
asyncTest "GetNode":
|
||||||
# TODO: This could be tested in just a routing table only context
|
# TODO: This could be tested in just a routing table only context
|
||||||
let
|
let
|
||||||
|
|
|
@ -8,7 +8,8 @@
|
||||||
# MIT license (LICENSE-MIT)
|
# MIT license (LICENSE-MIT)
|
||||||
|
|
||||||
import
|
import
|
||||||
unittest, tables, chronos, eth/p2p,
|
tables, chronos, testutils/unittests,
|
||||||
|
eth/p2p,
|
||||||
./p2p_test_helper
|
./p2p_test_helper
|
||||||
|
|
||||||
type
|
type
|
||||||
|
|
|
@ -0,0 +1,254 @@
|
||||||
|
import
|
||||||
|
unittest,
|
||||||
|
eth/p2p/discoveryv5/[routing_table, node],
|
||||||
|
./discv5_test_helper
|
||||||
|
|
||||||
|
suite "Routing Table Tests":
|
||||||
|
test "Bucket splitting in range branch b=1":
|
||||||
|
let node = generateNode()
|
||||||
|
var table: RoutingTable
|
||||||
|
|
||||||
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
||||||
|
table.init(node, 1)
|
||||||
|
|
||||||
|
for j in 0..5'u32:
|
||||||
|
for i in 0..<BUCKET_SIZE:
|
||||||
|
check table.addNode(node.nodeAtDistance(256-j)) == nil
|
||||||
|
check table.addNode(node.nodeAtDistance(256-j)) != nil
|
||||||
|
|
||||||
|
test "Bucket splitting off range branch b=1":
|
||||||
|
let node = generateNode()
|
||||||
|
var table: RoutingTable
|
||||||
|
|
||||||
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
||||||
|
table.init(node, 1)
|
||||||
|
|
||||||
|
# Add 16 nodes, distance 256
|
||||||
|
for i in 0..<BUCKET_SIZE:
|
||||||
|
check table.addNode(node.nodeAtDistance(256)) == nil
|
||||||
|
|
||||||
|
# This should split the bucket in the distance 256 branch, and the distance
|
||||||
|
# <=255 branch. But not add the node, as distance 256 bucket is already full
|
||||||
|
# and b=1 will not allow it to spit any further
|
||||||
|
check table.addNode(node.nodeAtDistance(256)) != nil
|
||||||
|
|
||||||
|
# This add should be allowed as it is on the branch where the own node's id
|
||||||
|
# id belongs to.
|
||||||
|
check table.addNode(node.nodeAtDistance(255)) == nil
|
||||||
|
|
||||||
|
test "Bucket splitting off range branch b=2":
|
||||||
|
let node = generateNode()
|
||||||
|
var table: RoutingTable
|
||||||
|
|
||||||
|
# bitsPerHop = 2, allow not in range branch to split once (2 buckets).
|
||||||
|
table.init(node, 2)
|
||||||
|
|
||||||
|
# Add 16 nodes, distance 256 from `node`, but all with 2 bits shared prefix
|
||||||
|
# among themselves.
|
||||||
|
let firstNode = node.nodeAtDistance(256)
|
||||||
|
check table.addNode(firstNode) == nil
|
||||||
|
for n in 1..<BUCKET_SIZE:
|
||||||
|
check table.addNode(firstNode.nodeAtDistance(254)) == nil
|
||||||
|
|
||||||
|
# Add 16 more nodes with only 1 bit shared prefix with previous 16. This
|
||||||
|
# should cause the initial bucket to split and and fill the second bucket
|
||||||
|
# with the 16 new entries.
|
||||||
|
for n in 0..<BUCKET_SIZE:
|
||||||
|
check table.addNode(firstNode.nodeAtDistance(255)) == nil
|
||||||
|
|
||||||
|
# Adding another should fail as both buckets will be full and not be
|
||||||
|
# allowed to split another time.
|
||||||
|
check table.addNode(node.nodeAtDistance(256)) != nil
|
||||||
|
# And also when targetting one of the two specific buckets.
|
||||||
|
check table.addNode(firstNode.nodeAtDistance(255)) != nil
|
||||||
|
check table.addNode(firstNode.nodeAtDistance(254)) != nil
|
||||||
|
# This add should be allowed as it is on the branch where the own node's id
|
||||||
|
# id belongs to.
|
||||||
|
check table.addNode(node.nodeAtDistance(255)) == nil
|
||||||
|
|
||||||
|
test "Replacement cache":
|
||||||
|
let node = generateNode()
|
||||||
|
var table: RoutingTable
|
||||||
|
|
||||||
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
||||||
|
table.init(node, 1)
|
||||||
|
|
||||||
|
# create a full bucket
|
||||||
|
let bucketNodes = node.nodesAtDistance(256, BUCKET_SIZE)
|
||||||
|
for n in bucketNodes:
|
||||||
|
check table.addNode(n) == nil
|
||||||
|
|
||||||
|
# create a full replacement cache
|
||||||
|
let replacementNodes = node.nodesAtDistance(256, REPLACEMENT_CACHE_SIZE)
|
||||||
|
for n in replacementNodes:
|
||||||
|
check table.addNode(n) != nil
|
||||||
|
|
||||||
|
# Add one more node to replacement (would drop first one)
|
||||||
|
let lastNode = node.nodeAtDistance(256)
|
||||||
|
check table.addNode(lastNode) != nil
|
||||||
|
|
||||||
|
# This should replace the last node in the bucket, with the last one of
|
||||||
|
# the replacement cache.
|
||||||
|
table.replaceNode(table.nodeToRevalidate())
|
||||||
|
block:
|
||||||
|
# Should return the last node of the replacement cache successfully.
|
||||||
|
let result = table.getNode(lastNode.id)
|
||||||
|
check:
|
||||||
|
result.isSome()
|
||||||
|
result.get() == lastNode
|
||||||
|
block:
|
||||||
|
# This node should be removed
|
||||||
|
check (table.getNode(bucketNodes[bucketNodes.high].id)).isNone()
|
||||||
|
|
||||||
|
test "Empty bucket":
|
||||||
|
let node = generateNode()
|
||||||
|
var table: RoutingTable
|
||||||
|
|
||||||
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
||||||
|
table.init(node, 1)
|
||||||
|
|
||||||
|
check table.nodeToRevalidate().isNil()
|
||||||
|
|
||||||
|
# try to replace not existing node
|
||||||
|
table.replaceNode(generateNode())
|
||||||
|
check table.len == 0
|
||||||
|
|
||||||
|
let addedNode = generateNode()
|
||||||
|
check table.addNode(addedNode) == nil
|
||||||
|
check table.len == 1
|
||||||
|
|
||||||
|
# try to replace not existing node
|
||||||
|
table.replaceNode(generateNode())
|
||||||
|
check table.len == 1
|
||||||
|
|
||||||
|
table.replaceNode(addedNode)
|
||||||
|
check table.len == 0
|
||||||
|
|
||||||
|
test "Empty replacement cache":
|
||||||
|
let node = generateNode()
|
||||||
|
var table: RoutingTable
|
||||||
|
|
||||||
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
||||||
|
table.init(node, 1)
|
||||||
|
|
||||||
|
# create a full bucket TODO: no need to store bucketNodes
|
||||||
|
let bucketNodes = node.nodesAtDistance(256, BUCKET_SIZE)
|
||||||
|
for n in bucketNodes:
|
||||||
|
check table.addNode(n) == nil
|
||||||
|
|
||||||
|
table.replaceNode(table.nodeToRevalidate())
|
||||||
|
# This node should still be removed
|
||||||
|
check (table.getNode(bucketNodes[bucketNodes.high].id)).isNone()
|
||||||
|
|
||||||
|
test "Double add":
|
||||||
|
let node = generateNode()
|
||||||
|
var table: RoutingTable
|
||||||
|
|
||||||
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
||||||
|
table.init(node, 1)
|
||||||
|
|
||||||
|
let doubleNode = node.nodeAtDistance(256)
|
||||||
|
# Try to add the node twice
|
||||||
|
check table.addNode(doubleNode) == nil
|
||||||
|
check table.addNode(doubleNode) == nil
|
||||||
|
|
||||||
|
for n in 0..<BUCKET_SIZE-1:
|
||||||
|
check table.addNode(node.nodeAtDistance(256)) == nil
|
||||||
|
|
||||||
|
check table.addNode(node.nodeAtDistance(256)) != nil
|
||||||
|
# Check when adding again once the bucket is full
|
||||||
|
check table.addNode(doubleNode) == nil
|
||||||
|
|
||||||
|
# Test if its order is preserved, there is one node in replacement cache
|
||||||
|
# which is why we run `BUCKET_SIZE` times.
|
||||||
|
for n in 0..<BUCKET_SIZE:
|
||||||
|
table.replaceNode(table.nodeToRevalidate())
|
||||||
|
|
||||||
|
let result = table.getNode(doubleNode.id)
|
||||||
|
check:
|
||||||
|
result.isSome()
|
||||||
|
result.get() == doubleNode
|
||||||
|
table.len == 1
|
||||||
|
|
||||||
|
test "Double replacement add":
|
||||||
|
let node = generateNode()
|
||||||
|
var table: RoutingTable
|
||||||
|
|
||||||
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
||||||
|
table.init(node, 1)
|
||||||
|
|
||||||
|
# create a full bucket
|
||||||
|
let bucketNodes = node.nodesAtDistance(256, BUCKET_SIZE)
|
||||||
|
for n in bucketNodes:
|
||||||
|
check table.addNode(n) == nil
|
||||||
|
|
||||||
|
# create a full replacement cache
|
||||||
|
let replacementNodes = node.nodesAtDistance(256, REPLACEMENT_CACHE_SIZE)
|
||||||
|
for n in replacementNodes:
|
||||||
|
check table.addNode(n) != nil
|
||||||
|
|
||||||
|
check table.addNode(replacementNodes[0]) != nil
|
||||||
|
|
||||||
|
table.replaceNode(table.nodeToRevalidate())
|
||||||
|
block:
|
||||||
|
# Should return the last node of the replacement cache successfully.
|
||||||
|
let result = table.getNode(replacementNodes[0].id)
|
||||||
|
check:
|
||||||
|
result.isSome()
|
||||||
|
result.get() == replacementNodes[0]
|
||||||
|
block:
|
||||||
|
# This node should be removed
|
||||||
|
check (table.getNode(bucketNodes[bucketNodes.high].id)).isNone()
|
||||||
|
|
||||||
|
test "Just seen":
|
||||||
|
let node = generateNode()
|
||||||
|
var table: RoutingTable
|
||||||
|
|
||||||
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
||||||
|
table.init(node, 1)
|
||||||
|
|
||||||
|
# create a full bucket
|
||||||
|
let bucketNodes = node.nodesAtDistance(256, BUCKET_SIZE)
|
||||||
|
for n in bucketNodes:
|
||||||
|
check table.addNode(n) == nil
|
||||||
|
|
||||||
|
# swap seen order
|
||||||
|
for n in bucketNodes:
|
||||||
|
table.setJustSeen(n)
|
||||||
|
|
||||||
|
for n in bucketNodes:
|
||||||
|
table.replaceNode(table.nodeToRevalidate())
|
||||||
|
check (table.getNode(n.id)).isNone()
|
||||||
|
|
||||||
|
test "Just seen replacement":
|
||||||
|
let node = generateNode()
|
||||||
|
var table: RoutingTable
|
||||||
|
|
||||||
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
||||||
|
table.init(node, 1)
|
||||||
|
|
||||||
|
# create a full bucket
|
||||||
|
let bucketNodes = node.nodesAtDistance(256, BUCKET_SIZE)
|
||||||
|
for n in bucketNodes:
|
||||||
|
check table.addNode(n) == nil
|
||||||
|
|
||||||
|
# create a full replacement cache
|
||||||
|
let replacementNodes = node.nodesAtDistance(256, REPLACEMENT_CACHE_SIZE)
|
||||||
|
for n in replacementNodes:
|
||||||
|
check table.addNode(n) != nil
|
||||||
|
|
||||||
|
for i in countdown(replacementNodes.high, 0):
|
||||||
|
table.replaceNode(table.nodeToRevalidate())
|
||||||
|
table.setJustSeen(replacementNodes[i])
|
||||||
|
|
||||||
|
for n in replacementNodes:
|
||||||
|
let result = table.getNode(n.id)
|
||||||
|
check:
|
||||||
|
result.isSome()
|
||||||
|
result.get() == n
|
||||||
|
|
||||||
|
for i in 0..<int(BUCKET_SIZE/2):
|
||||||
|
let result = table.getNode(bucketNodes[i].id)
|
||||||
|
check:
|
||||||
|
result.isSome()
|
||||||
|
result.get() == bucketNodes[i]
|
|
@ -8,8 +8,8 @@
|
||||||
# MIT license (LICENSE-MIT)
|
# MIT license (LICENSE-MIT)
|
||||||
|
|
||||||
import
|
import
|
||||||
sequtils, options, unittest, tables, chronos, eth/[keys, p2p],
|
sequtils, options, tables, chronos, testutils/unittests,
|
||||||
eth/p2p/rlpx_protocols/whisper_protocol, eth/p2p/peer_pool,
|
eth/[keys, p2p], eth/p2p/rlpx_protocols/whisper_protocol, eth/p2p/peer_pool,
|
||||||
./p2p_test_helper
|
./p2p_test_helper
|
||||||
|
|
||||||
proc resetMessageQueues(nodes: varargs[EthereumNode]) =
|
proc resetMessageQueues(nodes: varargs[EthereumNode]) =
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
# MIT license (LICENSE-MIT)
|
# MIT license (LICENSE-MIT)
|
||||||
|
|
||||||
import
|
import
|
||||||
sequtils, strformat, options, unittest,
|
sequtils, strformat, options, testutils/unittests,
|
||||||
chronicles, chronos, eth/[rlp, keys, p2p],
|
chronicles, chronos, eth/[rlp, keys, p2p],
|
||||||
eth/p2p/mock_peers
|
eth/p2p/mock_peers
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue