2021-03-29 13:26:34 +00:00
|
|
|
{.used.}
|
|
|
|
|
2020-06-17 11:51:30 +00:00
|
|
|
import
|
2021-12-11 18:12:55 +00:00
|
|
|
unittest2,
|
2021-04-06 11:33:24 +00:00
|
|
|
../../eth/keys, ../../eth/p2p/discoveryv5/[routing_table, node, enr],
|
2020-06-17 11:51:30 +00:00
|
|
|
./discv5_test_helper
|
|
|
|
|
2021-12-20 12:14:50 +00:00
|
|
|
func customDistance*(a, b: NodeId): UInt256 =
|
2021-09-02 12:00:36 +00:00
|
|
|
if a >= b:
|
|
|
|
a - b
|
|
|
|
else:
|
|
|
|
b - a
|
|
|
|
|
|
|
|
func customLogDistance*(a, b: NodeId): uint16 =
|
|
|
|
let distance = customDistance(a, b)
|
|
|
|
let modulo = distance mod (u256(uint8.high))
|
|
|
|
cast[uint16](modulo)
|
|
|
|
|
|
|
|
func customIdAdDist*(id: NodeId, dist: uint16): NodeId =
|
|
|
|
id + u256(dist)
|
|
|
|
|
2020-06-17 11:51:30 +00:00
|
|
|
suite "Routing Table Tests":
|
2020-07-07 08:56:26 +00:00
|
|
|
let rng = newRng()
|
|
|
|
|
2020-11-26 17:20:15 +00:00
|
|
|
# Used for testing. Could also at runtime check whether the address is the
|
|
|
|
# loopback address as these are only allowed to be added when coming from
|
|
|
|
# another loopback nodes, however that check is done in the protocol code and
|
|
|
|
# thus independent of routing_table.
|
|
|
|
let ipLimits = TableIpLimits(tableIpLimit: 200,
|
|
|
|
bucketIpLimit: BUCKET_SIZE + REPLACEMENT_CACHE_SIZE + 1)
|
|
|
|
|
2021-09-02 12:00:36 +00:00
|
|
|
let customDistanceCalculator = DistanceCalculator(
|
2021-12-20 12:14:50 +00:00
|
|
|
calculateDistance: customDistance,
|
|
|
|
calculateLogDistance: customLogDistance,
|
2021-09-02 12:00:36 +00:00
|
|
|
calculateIdAtDistance: customIdAdDist)
|
|
|
|
|
2020-11-26 17:20:15 +00:00
|
|
|
test "Add local node":
|
|
|
|
let node = generateNode(PrivateKey.random(rng[]))
|
2021-09-07 09:56:16 +00:00
|
|
|
var table = RoutingTable.init(node, 1, ipLimits, rng = rng)
|
2020-11-26 17:20:15 +00:00
|
|
|
|
|
|
|
check table.addNode(node) == LocalNode
|
|
|
|
|
2020-06-17 11:51:30 +00:00
|
|
|
test "Bucket splitting in range branch b=1":
|
2020-07-07 08:56:26 +00:00
|
|
|
let node = generateNode(PrivateKey.random(rng[]))
|
2020-06-17 11:51:30 +00:00
|
|
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
2021-09-07 09:56:16 +00:00
|
|
|
var table = RoutingTable.init(node, 1, ipLimits, rng = rng)
|
2020-06-17 11:51:30 +00:00
|
|
|
|
|
|
|
for j in 0..5'u32:
|
|
|
|
for i in 0..<BUCKET_SIZE:
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(node.nodeAtDistance(rng[], 256-j)) == Added
|
|
|
|
check table.addNode(node.nodeAtDistance(rng[], 256-j)) == ReplacementAdded
|
2020-06-17 11:51:30 +00:00
|
|
|
|
|
|
|
test "Bucket splitting off range branch b=1":
|
2020-07-07 08:56:26 +00:00
|
|
|
let node = generateNode(PrivateKey.random(rng[]))
|
2020-06-17 11:51:30 +00:00
|
|
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
2021-09-07 09:56:16 +00:00
|
|
|
var table = RoutingTable.init(node, 1, ipLimits, rng = rng)
|
2020-06-17 11:51:30 +00:00
|
|
|
|
|
|
|
# Add 16 nodes, distance 256
|
|
|
|
for i in 0..<BUCKET_SIZE:
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(node.nodeAtDistance(rng[], 256)) == Added
|
2020-06-17 11:51:30 +00:00
|
|
|
|
|
|
|
# This should split the bucket in the distance 256 branch, and the distance
|
|
|
|
# <=255 branch. But not add the node, as distance 256 bucket is already full
|
|
|
|
# and b=1 will not allow it to spit any further
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(node.nodeAtDistance(rng[], 256)) == ReplacementAdded
|
2020-06-17 11:51:30 +00:00
|
|
|
|
|
|
|
# This add should be allowed as it is on the branch where the own node's id
|
|
|
|
# id belongs to.
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(node.nodeAtDistance(rng[], 255)) == Added
|
2020-06-17 11:51:30 +00:00
|
|
|
|
|
|
|
test "Bucket splitting off range branch b=2":
|
2020-07-07 08:56:26 +00:00
|
|
|
let node = generateNode(PrivateKey.random(rng[]))
|
2020-06-22 14:46:58 +00:00
|
|
|
# bitsPerHop = 2, allow not in range branch to split once (2 buckets).
|
2021-09-07 09:56:16 +00:00
|
|
|
var table = RoutingTable.init(node, 2, ipLimits, rng = rng)
|
2020-06-17 11:51:30 +00:00
|
|
|
|
2020-06-22 14:46:58 +00:00
|
|
|
# Add 16 nodes, distance 256 from `node`, but all with 2 bits shared prefix
|
|
|
|
# among themselves.
|
2020-07-07 08:56:26 +00:00
|
|
|
let firstNode = node.nodeAtDistance(rng[], 256)
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(firstNode) == Added
|
2020-06-22 14:46:58 +00:00
|
|
|
for n in 1..<BUCKET_SIZE:
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(firstNode.nodeAtDistance(rng[], 254)) == Added
|
2020-06-17 11:51:30 +00:00
|
|
|
|
2020-06-22 14:46:58 +00:00
|
|
|
# Add 16 more nodes with only 1 bit shared prefix with previous 16. This
|
|
|
|
# should cause the initial bucket to split and and fill the second bucket
|
|
|
|
# with the 16 new entries.
|
2020-06-23 14:11:58 +00:00
|
|
|
for n in 0..<BUCKET_SIZE:
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(firstNode.nodeAtDistance(rng[], 255)) == Added
|
2020-06-17 11:51:30 +00:00
|
|
|
|
2020-06-22 14:46:58 +00:00
|
|
|
# Adding another should fail as both buckets will be full and not be
|
|
|
|
# allowed to split another time.
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(node.nodeAtDistance(rng[], 256)) == ReplacementAdded
|
2022-11-16 16:44:00 +00:00
|
|
|
# And also when targeting one of the two specific buckets.
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(firstNode.nodeAtDistance(rng[], 255)) == ReplacementAdded
|
|
|
|
check table.addNode(firstNode.nodeAtDistance(rng[], 254)) == ReplacementAdded
|
2020-06-17 11:51:30 +00:00
|
|
|
# This add should be allowed as it is on the branch where the own node's id
|
|
|
|
# id belongs to.
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(node.nodeAtDistance(rng[], 255)) == Added
|
2020-06-23 14:11:58 +00:00
|
|
|
|
|
|
|
test "Replacement cache":
|
2020-07-07 08:56:26 +00:00
|
|
|
let node = generateNode(PrivateKey.random(rng[]))
|
2020-06-23 14:11:58 +00:00
|
|
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
2021-09-07 09:56:16 +00:00
|
|
|
var table = RoutingTable.init(node, 1, ipLimits, rng = rng)
|
2020-06-23 14:11:58 +00:00
|
|
|
|
|
|
|
# create a full bucket
|
2020-07-07 08:56:26 +00:00
|
|
|
let bucketNodes = node.nodesAtDistance(rng[], 256, BUCKET_SIZE)
|
2020-06-23 14:11:58 +00:00
|
|
|
for n in bucketNodes:
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(n) == Added
|
2020-06-23 14:11:58 +00:00
|
|
|
|
|
|
|
# create a full replacement cache
|
2020-07-07 08:56:26 +00:00
|
|
|
let replacementNodes = node.nodesAtDistance(rng[], 256, REPLACEMENT_CACHE_SIZE)
|
2020-06-23 14:11:58 +00:00
|
|
|
for n in replacementNodes:
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(n) == ReplacementAdded
|
2020-06-23 14:11:58 +00:00
|
|
|
|
|
|
|
# Add one more node to replacement (would drop first one)
|
2020-07-07 08:56:26 +00:00
|
|
|
let lastNode = node.nodeAtDistance(rng[], 256)
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(lastNode) == ReplacementAdded
|
2020-06-23 14:11:58 +00:00
|
|
|
|
|
|
|
# This should replace the last node in the bucket, with the last one of
|
|
|
|
# the replacement cache.
|
|
|
|
table.replaceNode(table.nodeToRevalidate())
|
|
|
|
block:
|
|
|
|
# Should return the last node of the replacement cache successfully.
|
2022-07-09 08:55:15 +00:00
|
|
|
let res = table.getNode(lastNode.id)
|
2020-06-23 14:11:58 +00:00
|
|
|
check:
|
2022-07-09 08:55:15 +00:00
|
|
|
res.isSome()
|
|
|
|
res.get() == lastNode
|
2020-06-23 14:11:58 +00:00
|
|
|
block:
|
|
|
|
# This node should be removed
|
|
|
|
check (table.getNode(bucketNodes[bucketNodes.high].id)).isNone()
|
|
|
|
|
2020-06-23 15:54:12 +00:00
|
|
|
test "Empty bucket":
|
2020-07-07 08:56:26 +00:00
|
|
|
let node = generateNode(PrivateKey.random(rng[]))
|
2020-06-23 15:54:12 +00:00
|
|
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
2021-09-07 09:56:16 +00:00
|
|
|
var table = RoutingTable.init(node, 1, ipLimits, rng = rng)
|
2020-06-23 15:54:12 +00:00
|
|
|
|
|
|
|
check table.nodeToRevalidate().isNil()
|
|
|
|
|
|
|
|
# try to replace not existing node
|
2020-07-07 08:56:26 +00:00
|
|
|
table.replaceNode(generateNode(PrivateKey.random(rng[])))
|
2020-06-23 15:54:12 +00:00
|
|
|
check table.len == 0
|
|
|
|
|
2020-07-07 08:56:26 +00:00
|
|
|
let addedNode = generateNode(PrivateKey.random(rng[]))
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(addedNode) == Added
|
2020-06-23 15:54:12 +00:00
|
|
|
check table.len == 1
|
|
|
|
|
|
|
|
# try to replace not existing node
|
2020-07-07 08:56:26 +00:00
|
|
|
table.replaceNode(generateNode(PrivateKey.random(rng[])))
|
2020-06-23 15:54:12 +00:00
|
|
|
check table.len == 1
|
|
|
|
|
|
|
|
table.replaceNode(addedNode)
|
|
|
|
check table.len == 0
|
|
|
|
|
2020-06-23 14:11:58 +00:00
|
|
|
test "Empty replacement cache":
|
2020-07-07 08:56:26 +00:00
|
|
|
let node = generateNode(PrivateKey.random(rng[]))
|
2020-06-23 14:11:58 +00:00
|
|
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
2021-09-07 09:56:16 +00:00
|
|
|
var table = RoutingTable.init(node, 1, ipLimits, rng = rng)
|
2020-06-23 14:11:58 +00:00
|
|
|
|
|
|
|
# create a full bucket TODO: no need to store bucketNodes
|
2020-07-07 08:56:26 +00:00
|
|
|
let bucketNodes = node.nodesAtDistance(rng[], 256, BUCKET_SIZE)
|
2020-06-23 14:11:58 +00:00
|
|
|
for n in bucketNodes:
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(n) == Added
|
2020-06-23 14:11:58 +00:00
|
|
|
|
|
|
|
table.replaceNode(table.nodeToRevalidate())
|
|
|
|
# This node should still be removed
|
|
|
|
check (table.getNode(bucketNodes[bucketNodes.high].id)).isNone()
|
|
|
|
|
2020-06-23 15:54:12 +00:00
|
|
|
test "Double add":
|
2020-07-07 08:56:26 +00:00
|
|
|
let node = generateNode(PrivateKey.random(rng[]))
|
2020-06-23 15:54:12 +00:00
|
|
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
2021-09-07 09:56:16 +00:00
|
|
|
var table = RoutingTable.init(node, 1, ipLimits, rng = rng)
|
2020-06-23 15:54:12 +00:00
|
|
|
|
2020-07-07 08:56:26 +00:00
|
|
|
let doubleNode = node.nodeAtDistance(rng[], 256)
|
2020-06-23 15:54:12 +00:00
|
|
|
# Try to add the node twice
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(doubleNode) == Added
|
|
|
|
check table.addNode(doubleNode) == Existing
|
2020-06-23 15:54:12 +00:00
|
|
|
|
|
|
|
for n in 0..<BUCKET_SIZE-1:
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(node.nodeAtDistance(rng[], 256)) == Added
|
2020-06-23 15:54:12 +00:00
|
|
|
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(node.nodeAtDistance(rng[], 256)) == ReplacementAdded
|
2020-06-23 15:54:12 +00:00
|
|
|
# Check when adding again once the bucket is full
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(doubleNode) == Existing
|
2020-06-23 15:54:12 +00:00
|
|
|
|
|
|
|
# Test if its order is preserved, there is one node in replacement cache
|
|
|
|
# which is why we run `BUCKET_SIZE` times.
|
|
|
|
for n in 0..<BUCKET_SIZE:
|
|
|
|
table.replaceNode(table.nodeToRevalidate())
|
|
|
|
|
2022-07-09 08:55:15 +00:00
|
|
|
let res = table.getNode(doubleNode.id)
|
2020-06-23 15:54:12 +00:00
|
|
|
check:
|
2022-07-09 08:55:15 +00:00
|
|
|
res.isSome()
|
|
|
|
res.get() == doubleNode
|
2020-06-23 15:54:12 +00:00
|
|
|
table.len == 1
|
|
|
|
|
|
|
|
test "Double replacement add":
|
2020-07-07 08:56:26 +00:00
|
|
|
let node = generateNode(PrivateKey.random(rng[]))
|
2020-06-23 14:11:58 +00:00
|
|
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
2021-09-07 09:56:16 +00:00
|
|
|
var table = RoutingTable.init(node, 1, ipLimits, rng = rng)
|
2020-06-23 14:11:58 +00:00
|
|
|
|
|
|
|
# create a full bucket
|
2020-07-07 08:56:26 +00:00
|
|
|
let bucketNodes = node.nodesAtDistance(rng[], 256, BUCKET_SIZE)
|
2020-06-23 14:11:58 +00:00
|
|
|
for n in bucketNodes:
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(n) == Added
|
2020-06-23 14:11:58 +00:00
|
|
|
|
|
|
|
# create a full replacement cache
|
2020-07-07 08:56:26 +00:00
|
|
|
let replacementNodes = node.nodesAtDistance(rng[], 256, REPLACEMENT_CACHE_SIZE)
|
2020-06-23 14:11:58 +00:00
|
|
|
for n in replacementNodes:
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(n) == ReplacementAdded
|
2020-06-23 14:11:58 +00:00
|
|
|
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(replacementNodes[0]) == ReplacementExisting
|
2020-06-23 14:11:58 +00:00
|
|
|
|
|
|
|
table.replaceNode(table.nodeToRevalidate())
|
|
|
|
block:
|
|
|
|
# Should return the last node of the replacement cache successfully.
|
2022-07-09 08:55:15 +00:00
|
|
|
let res = table.getNode(replacementNodes[0].id)
|
2020-06-23 14:11:58 +00:00
|
|
|
check:
|
2022-07-09 08:55:15 +00:00
|
|
|
res.isSome()
|
|
|
|
res.get() == replacementNodes[0]
|
2020-06-23 14:11:58 +00:00
|
|
|
block:
|
|
|
|
# This node should be removed
|
|
|
|
check (table.getNode(bucketNodes[bucketNodes.high].id)).isNone()
|
2020-06-23 15:54:12 +00:00
|
|
|
|
|
|
|
test "Just seen":
|
2020-07-07 08:56:26 +00:00
|
|
|
let node = generateNode(PrivateKey.random(rng[]))
|
2020-06-23 15:54:12 +00:00
|
|
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
2021-09-07 09:56:16 +00:00
|
|
|
var table = RoutingTable.init(node, 1, ipLimits, rng = rng)
|
2020-06-23 15:54:12 +00:00
|
|
|
|
|
|
|
# create a full bucket
|
2020-07-07 08:56:26 +00:00
|
|
|
let bucketNodes = node.nodesAtDistance(rng[], 256, BUCKET_SIZE)
|
2020-06-23 15:54:12 +00:00
|
|
|
for n in bucketNodes:
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(n) == Added
|
2020-06-23 15:54:12 +00:00
|
|
|
|
|
|
|
# swap seen order
|
|
|
|
for n in bucketNodes:
|
|
|
|
table.setJustSeen(n)
|
|
|
|
|
|
|
|
for n in bucketNodes:
|
|
|
|
table.replaceNode(table.nodeToRevalidate())
|
|
|
|
check (table.getNode(n.id)).isNone()
|
|
|
|
|
|
|
|
test "Just seen replacement":
|
2020-07-07 08:56:26 +00:00
|
|
|
let node = generateNode(PrivateKey.random(rng[]))
|
2020-06-23 15:54:12 +00:00
|
|
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
2021-09-07 09:56:16 +00:00
|
|
|
var table = RoutingTable.init(node, 1, ipLimits, rng = rng)
|
2020-06-23 15:54:12 +00:00
|
|
|
|
|
|
|
# create a full bucket
|
2020-07-07 08:56:26 +00:00
|
|
|
let bucketNodes = node.nodesAtDistance(rng[], 256, BUCKET_SIZE)
|
2020-06-23 15:54:12 +00:00
|
|
|
for n in bucketNodes:
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(n) == Added
|
2020-06-23 15:54:12 +00:00
|
|
|
|
|
|
|
# create a full replacement cache
|
2020-07-07 08:56:26 +00:00
|
|
|
let replacementNodes = node.nodesAtDistance(rng[], 256, REPLACEMENT_CACHE_SIZE)
|
2020-06-23 15:54:12 +00:00
|
|
|
for n in replacementNodes:
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(n) == ReplacementAdded
|
2020-06-23 15:54:12 +00:00
|
|
|
|
|
|
|
for i in countdown(replacementNodes.high, 0):
|
|
|
|
table.replaceNode(table.nodeToRevalidate())
|
|
|
|
table.setJustSeen(replacementNodes[i])
|
|
|
|
|
|
|
|
for n in replacementNodes:
|
2022-07-09 08:55:15 +00:00
|
|
|
let res = table.getNode(n.id)
|
2020-06-23 15:54:12 +00:00
|
|
|
check:
|
2022-07-09 08:55:15 +00:00
|
|
|
res.isSome()
|
|
|
|
res.get() == n
|
2020-06-23 15:54:12 +00:00
|
|
|
|
|
|
|
for i in 0..<int(BUCKET_SIZE/2):
|
2022-07-09 08:55:15 +00:00
|
|
|
let res = table.getNode(bucketNodes[i].id)
|
2020-06-23 15:54:12 +00:00
|
|
|
check:
|
2022-07-09 08:55:15 +00:00
|
|
|
res.isSome()
|
|
|
|
res.get() == bucketNodes[i]
|
2020-11-26 17:20:15 +00:00
|
|
|
|
|
|
|
test "Ip limits on bucket":
|
|
|
|
let node = generateNode(PrivateKey.random(rng[]))
|
|
|
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
2021-09-07 09:56:16 +00:00
|
|
|
var table = RoutingTable.init(node, 1, DefaultTableIpLimits, rng = rng)
|
2020-11-26 17:20:15 +00:00
|
|
|
|
|
|
|
block: # First bucket
|
|
|
|
let sameIpNodes = node.nodesAtDistance(rng[], 256,
|
|
|
|
int(DefaultTableIpLimits.bucketIpLimit))
|
|
|
|
for n in sameIpNodes:
|
|
|
|
check table.addNode(n) == Added
|
|
|
|
|
|
|
|
# Try to add a node, which should fail due to ip bucket limit
|
|
|
|
let anotherSameIpNode = node.nodeAtDistance(rng[], 256)
|
|
|
|
check table.addNode(anotherSameIpNode) == IpLimitReached
|
|
|
|
|
|
|
|
# Remove one and try add again
|
|
|
|
table.replaceNode(table.nodeToRevalidate())
|
|
|
|
check table.addNode(anotherSameIpNode) == Added
|
|
|
|
|
|
|
|
# Further fill the bucket with nodes with different ip.
|
|
|
|
let diffIpNodes = node.nodesAtDistanceUniqueIp(rng[], 256,
|
|
|
|
int(BUCKET_SIZE - DefaultTableIpLimits.bucketIpLimit),
|
2023-11-10 06:28:21 +00:00
|
|
|
parseIpAddress("192.168.0.1"))
|
2020-11-26 17:20:15 +00:00
|
|
|
for n in diffIpNodes:
|
|
|
|
check table.addNode(n) == Added
|
|
|
|
|
|
|
|
block: # Second bucket
|
|
|
|
# Try to add another node with the same IP, but different distance.
|
|
|
|
# This should split the bucket and add it.
|
|
|
|
let anotherSameIpNode = node.nodeAtDistance(rng[], 255)
|
|
|
|
check table.addNode(anotherSameIpNode) == Added
|
|
|
|
|
|
|
|
# Add more nodes with different ip and distance 255 to get in the new bucket
|
|
|
|
let diffIpNodes = node.nodesAtDistanceUniqueIp(rng[], 255,
|
|
|
|
int(BUCKET_SIZE - DefaultTableIpLimits.bucketIpLimit - 1),
|
2023-11-10 06:28:21 +00:00
|
|
|
parseIpAddress("192.168.1.1"))
|
2020-11-26 17:20:15 +00:00
|
|
|
for n in diffIpNodes:
|
|
|
|
check table.addNode(n) == Added
|
|
|
|
|
|
|
|
let sameIpNodes = node.nodesAtDistance(rng[], 255,
|
|
|
|
int(DefaultTableIpLimits.bucketIpLimit - 1))
|
|
|
|
for n in sameIpNodes:
|
|
|
|
check table.addNode(n) == Added
|
|
|
|
|
|
|
|
# Adding in another one should fail again
|
|
|
|
let anotherSameIpNode2 = node.nodeAtDistance(rng[], 255)
|
|
|
|
check table.addNode(anotherSameIpNode2) == IpLimitReached
|
|
|
|
|
|
|
|
test "Ip limits on routing table":
|
|
|
|
let node = generateNode(PrivateKey.random(rng[]))
|
|
|
|
# bitsPerHop = 1 -> Split only the branch in range of own id
|
2021-09-07 09:56:16 +00:00
|
|
|
var table = RoutingTable.init(node, 1, DefaultTableIpLimits, rng = rng)
|
2020-11-26 17:20:15 +00:00
|
|
|
|
|
|
|
let amount = uint32(DefaultTableIpLimits.tableIpLimit div
|
|
|
|
DefaultTableIpLimits.bucketIpLimit)
|
|
|
|
# Fill `amount` of buckets, each with 14 nodes with different ips and 2
|
|
|
|
# with equal ones.
|
|
|
|
for j in 0..<amount:
|
|
|
|
let nodes = node.nodesAtDistanceUniqueIp(rng[], 256 - j,
|
|
|
|
int(BUCKET_SIZE - DefaultTableIpLimits.bucketIpLimit),
|
2023-11-10 06:28:21 +00:00
|
|
|
parseIpAddress("192.168.0.1"))
|
2020-11-26 17:20:15 +00:00
|
|
|
for n in nodes:
|
|
|
|
check table.addNode(n) == Added
|
|
|
|
|
|
|
|
let sameIpNodes = node.nodesAtDistance(rng[], 256 - j,
|
|
|
|
int(DefaultTableIpLimits.bucketIpLimit))
|
|
|
|
for n in sameIpNodes:
|
|
|
|
check table.addNode(n) == Added
|
|
|
|
|
|
|
|
# Add a node with a different IP, should work and split a bucket once more.
|
|
|
|
let anotherDiffIpNode = node.nodeAtDistance(rng[], 256 - amount,
|
2023-11-10 06:28:21 +00:00
|
|
|
parseIpAddress("192.168.1.1"))
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(anotherDiffIpNode) == Added
|
|
|
|
|
|
|
|
let amountLeft = int(DefaultTableIpLimits.tableIpLimit mod
|
|
|
|
DefaultTableIpLimits.bucketIpLimit)
|
|
|
|
|
|
|
|
let sameIpNodes = node.nodesAtDistance(rng[], 256 - amount, amountLeft)
|
|
|
|
for n in sameIpNodes:
|
|
|
|
check table.addNode(n) == Added
|
|
|
|
|
|
|
|
# Add a node with same ip to this fresh bucket, should fail because of total
|
|
|
|
# ip limit of routing table is reached.
|
|
|
|
let anotherSameIpNode = node.nodeAtDistance(rng[], 256 - amount)
|
|
|
|
check table.addNode(anotherSameIpNode) == IpLimitReached
|
|
|
|
|
|
|
|
test "Ip limits on replacement cache":
|
|
|
|
let node = generateNode(PrivateKey.random(rng[]))
|
2021-09-07 09:56:16 +00:00
|
|
|
var table = RoutingTable.init(node, 1, DefaultTableIpLimits, rng = rng)
|
2020-11-26 17:20:15 +00:00
|
|
|
|
|
|
|
let diffIpNodes = node.nodesAtDistanceUniqueIp(rng[], 256,
|
|
|
|
int(BUCKET_SIZE - DefaultTableIpLimits.bucketIpLimit + 1),
|
2023-11-10 06:28:21 +00:00
|
|
|
parseIpAddress("192.168.0.1"))
|
2020-11-26 17:20:15 +00:00
|
|
|
for n in diffIpNodes:
|
|
|
|
check table.addNode(n) == Added
|
|
|
|
|
|
|
|
let sameIpNodes = node.nodesAtDistance(rng[], 256,
|
|
|
|
int(DefaultTableIpLimits.bucketIpLimit - 1))
|
|
|
|
for n in sameIpNodes:
|
|
|
|
check table.addNode(n) == Added
|
|
|
|
|
|
|
|
let anotherSameIpNode1 = node.nodeAtDistance(rng[], 256)
|
|
|
|
check table.addNode(anotherSameIpNode1) == ReplacementAdded
|
|
|
|
|
|
|
|
let anotherSameIpNode2 = node.nodeAtDistance(rng[], 256)
|
|
|
|
check table.addNode(anotherSameIpNode2) == IpLimitReached
|
|
|
|
|
|
|
|
block: # Replace node to see if the first one becomes available
|
|
|
|
table.replaceNode(table.nodeToRevalidate())
|
|
|
|
let res = table.getNode(anotherSameIpNode1.id)
|
|
|
|
check:
|
|
|
|
res.isSome()
|
|
|
|
res.get() == anotherSameIpNode1
|
|
|
|
|
|
|
|
table.getNode(anotherSameIpNode2.id).isNone()
|
|
|
|
|
|
|
|
block: # Replace again to see if the first one never becomes available
|
|
|
|
table.replaceNode(table.nodeToRevalidate())
|
|
|
|
check:
|
|
|
|
table.getNode(anotherSameIpNode1.id).isNone()
|
|
|
|
table.getNode(anotherSameIpNode2.id).isNone()
|
|
|
|
|
|
|
|
test "Ip limits on replacement cache: deletion":
|
|
|
|
let node = generateNode(PrivateKey.random(rng[]))
|
2021-09-07 09:56:16 +00:00
|
|
|
var table = RoutingTable.init(node, 1, DefaultTableIpLimits, rng = rng)
|
2020-11-26 17:20:15 +00:00
|
|
|
|
|
|
|
block: # Fill bucket
|
|
|
|
let sameIpNodes = node.nodesAtDistance(rng[], 256,
|
|
|
|
int(DefaultTableIpLimits.bucketIpLimit - 1))
|
|
|
|
for n in sameIpNodes:
|
|
|
|
check table.addNode(n) == Added
|
|
|
|
|
|
|
|
let diffIpNodes = node.nodesAtDistanceUniqueIp(rng[], 256,
|
|
|
|
int(BUCKET_SIZE - DefaultTableIpLimits.bucketIpLimit + 1),
|
2023-11-10 06:28:21 +00:00
|
|
|
parseIpAddress("192.168.0.1"))
|
2020-11-26 17:20:15 +00:00
|
|
|
for n in diffIpNodes:
|
|
|
|
check table.addNode(n) == Added
|
|
|
|
|
|
|
|
block: # Fill bucket replacement cache
|
|
|
|
let sameIpNode = node.nodeAtDistance(rng[], 256)
|
|
|
|
check table.addNode(sameIpNode) == ReplacementAdded
|
|
|
|
|
|
|
|
let diffIpNodes = node.nodesAtDistanceUniqueIp(rng[], 256,
|
|
|
|
int(REPLACEMENT_CACHE_SIZE - 1),
|
2023-11-10 06:28:21 +00:00
|
|
|
parseIpAddress("192.168.1.1"))
|
2020-11-26 17:20:15 +00:00
|
|
|
for n in diffIpNodes:
|
|
|
|
check table.addNode(n) == ReplacementAdded
|
|
|
|
|
|
|
|
# Try to add node to replacement, but limit is reached
|
|
|
|
let sameIpNode = node.nodeAtDistance(rng[], 256)
|
|
|
|
check table.addNode(sameIpNode) == IpLimitReached
|
|
|
|
|
|
|
|
# Add one with different ip, to remove the first
|
|
|
|
let diffIpNode = node.nodeAtDistance(rng[], 256,
|
2023-11-10 06:28:21 +00:00
|
|
|
parseIpAddress("192.168.2.1"))
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(diffIpNode) == ReplacementAdded
|
|
|
|
|
|
|
|
# Now the add should work
|
|
|
|
check table.addNode(sameIpNode) == ReplacementAdded
|
|
|
|
|
|
|
|
test "Ip limits on replacement cache: double add":
|
|
|
|
let node = generateNode(PrivateKey.random(rng[]))
|
2021-09-07 09:56:16 +00:00
|
|
|
var table = RoutingTable.init(node, 1, DefaultTableIpLimits, rng = rng)
|
2020-11-26 17:20:15 +00:00
|
|
|
|
|
|
|
# Fill bucket
|
|
|
|
let diffIpNodes = node.nodesAtDistanceUniqueIp(rng[], 256, BUCKET_SIZE,
|
2023-11-10 06:28:21 +00:00
|
|
|
parseIpAddress("192.168.0.1"))
|
2020-11-26 17:20:15 +00:00
|
|
|
for n in diffIpNodes:
|
|
|
|
check table.addNode(n) == Added
|
|
|
|
|
|
|
|
# Test if double add does not account for the ip limits.
|
|
|
|
for i in 0..<DefaultTableIpLimits.bucketIpLimit:
|
|
|
|
let sameIpNode = node.nodeAtDistance(rng[], 256)
|
|
|
|
check table.addNode(sameIpNode) == ReplacementAdded
|
|
|
|
# Add it again
|
|
|
|
check table.addNode(sameIpNode) == ReplacementExisting
|
|
|
|
|
|
|
|
let sameIpNode = node.nodeAtDistance(rng[], 256)
|
|
|
|
check table.addNode(sameIpNode) == IpLimitReached
|
|
|
|
|
|
|
|
test "Ip limits on bucket: double add with new ip":
|
|
|
|
let node = generateNode(PrivateKey.random(rng[]))
|
2021-09-07 09:56:16 +00:00
|
|
|
var table = RoutingTable.init(node, 1, DefaultTableIpLimits, rng = rng)
|
2020-11-26 17:20:15 +00:00
|
|
|
|
|
|
|
let pk = PrivateKey.random(rng[])
|
|
|
|
let sameIpNode1 = generateNode(pk)
|
|
|
|
check table.addNode(sameIpNode1) == Added
|
|
|
|
|
|
|
|
let updatedNode1 = generateNode(pk)
|
|
|
|
# Need to do an update to get seqNum increased
|
2021-01-26 13:11:22 +00:00
|
|
|
let updated = updatedNode1.update(pk,
|
2024-06-18 16:09:27 +00:00
|
|
|
Opt.some(parseIpAddress("192.168.0.1")),
|
|
|
|
Opt.some(Port(9000)), Opt.some(Port(9000)))
|
2020-11-26 17:20:15 +00:00
|
|
|
check updated.isOk()
|
|
|
|
check table.addNode(updatedNode1) == Existing
|
|
|
|
|
|
|
|
let sameIpNodes = node.nodesAtDistance(rng[], 256,
|
|
|
|
int(DefaultTableIpLimits.bucketIpLimit))
|
|
|
|
for n in sameIpNodes:
|
|
|
|
check table.addNode(n) == Added
|
|
|
|
|
|
|
|
check table.len == int(DefaultTableIpLimits.bucketIpLimit) + 1
|
|
|
|
|
|
|
|
test "Ip limits on replacement cache: double add with new ip":
|
|
|
|
let node = generateNode(PrivateKey.random(rng[]))
|
2021-09-07 09:56:16 +00:00
|
|
|
var table = RoutingTable.init(node, 1, DefaultTableIpLimits, rng = rng)
|
2020-11-26 17:20:15 +00:00
|
|
|
|
|
|
|
# Fill bucket
|
|
|
|
let diffIpNodes = node.nodesAtDistanceUniqueIp(rng[], 256, BUCKET_SIZE,
|
2023-11-10 06:28:21 +00:00
|
|
|
parseIpAddress("192.168.0.1"))
|
2020-11-26 17:20:15 +00:00
|
|
|
for n in diffIpNodes:
|
|
|
|
check table.addNode(n) == Added
|
|
|
|
|
|
|
|
let (sameIpNode1, pk) = node.nodeAndPrivKeyAtDistance(rng[], 256)
|
|
|
|
check table.addNode(sameIpNode1) == ReplacementAdded
|
|
|
|
|
|
|
|
# For replacements we don't need to get seqNum increased as the node will
|
|
|
|
# still get pushed in front of the queue.
|
2023-11-10 06:28:21 +00:00
|
|
|
let updatedNode1 = generateNode(pk, ip = parseIpAddress("192.168.1.1"))
|
2020-11-26 17:20:15 +00:00
|
|
|
check table.addNode(updatedNode1) == ReplacementExisting
|
|
|
|
|
|
|
|
let sameIpNodes = node.nodesAtDistance(rng[], 256,
|
|
|
|
int(DefaultTableIpLimits.bucketIpLimit))
|
|
|
|
for n in sameIpNodes:
|
|
|
|
check table.addNode(n) == ReplacementAdded
|
|
|
|
|
|
|
|
test "Ip limits on bucket: even more adds with new ip":
|
|
|
|
# This tests against an issue where the ip of the nodes would not get updated
|
|
|
|
let node = generateNode(PrivateKey.random(rng[]))
|
2021-09-07 09:56:16 +00:00
|
|
|
var table = RoutingTable.init(node, 1, DefaultTableIpLimits, rng = rng)
|
2020-11-26 17:20:15 +00:00
|
|
|
|
|
|
|
let pk = PrivateKey.random(rng[])
|
|
|
|
let sameIpNode1 = generateNode(pk)
|
|
|
|
check table.addNode(sameIpNode1) == Added
|
|
|
|
|
|
|
|
let updatedNode1 = generateNode(pk)
|
|
|
|
|
|
|
|
for i in 0..<DefaultTableIpLimits.bucketIpLimit + 1:
|
|
|
|
# Need to do an update to get seqNum increased
|
2021-01-26 13:11:22 +00:00
|
|
|
let updated = updatedNode1.update(pk,
|
2024-06-18 16:09:27 +00:00
|
|
|
Opt.some(parseIpAddress("192.168.0.1")),
|
|
|
|
Opt.some(Port(9000+i)), Opt.some(Port(9000+i)))
|
2020-11-26 17:20:15 +00:00
|
|
|
check updated.isOk()
|
|
|
|
check table.addNode(updatedNode1) == Existing
|
|
|
|
|
|
|
|
let sameIpNodes = node.nodesAtDistance(rng[], 256,
|
|
|
|
int(DefaultTableIpLimits.bucketIpLimit))
|
|
|
|
for n in sameIpNodes:
|
|
|
|
check table.addNode(n) == Added
|
|
|
|
|
|
|
|
check table.len == int(DefaultTableIpLimits.bucketIpLimit) + 1
|
2021-09-02 12:00:36 +00:00
|
|
|
|
|
|
|
test "Custom distance calculator: distance":
|
|
|
|
let numNodes = 10
|
|
|
|
let local = generateNode(PrivateKey.random(rng[]))
|
2021-09-07 09:56:16 +00:00
|
|
|
var table = RoutingTable.init(local, 1, ipLimits, rng = rng,
|
|
|
|
distanceCalculator = customDistanceCalculator)
|
2021-09-02 12:00:36 +00:00
|
|
|
|
2022-06-17 20:45:37 +00:00
|
|
|
let nodes = generateNRandomNodes(rng[], numNodes)
|
2021-09-02 12:00:36 +00:00
|
|
|
|
|
|
|
for n in nodes:
|
|
|
|
check table.addNode(n) == Added
|
|
|
|
|
|
|
|
let neighbours = table.neighbours(local.id)
|
|
|
|
check len(neighbours) == numNodes
|
|
|
|
|
2022-11-16 16:44:00 +00:00
|
|
|
# check that neighbours are sorted by provided custom distance function
|
2021-09-02 12:00:36 +00:00
|
|
|
for i in 0..numNodes-2:
|
|
|
|
let prevDist = customDistance(local.id, neighbours[i].id)
|
|
|
|
let nextDist = customDistance(local.id, neighbours[i + 1].id)
|
|
|
|
check prevDist <= nextDist
|
|
|
|
|
|
|
|
test "Custom distance calculator: at log distance":
|
|
|
|
let numNodes = 10
|
|
|
|
let local = generateNode(PrivateKey.random(rng[]))
|
2021-09-07 09:56:16 +00:00
|
|
|
var table = RoutingTable.init(local, 1, ipLimits, rng = rng,
|
|
|
|
distanceCalculator = customDistanceCalculator)
|
2021-09-02 12:00:36 +00:00
|
|
|
|
2022-06-17 20:45:37 +00:00
|
|
|
let nodes = generateNRandomNodes(rng[], numNodes)
|
2021-09-02 12:00:36 +00:00
|
|
|
|
|
|
|
for n in nodes:
|
|
|
|
check table.addNode(n) == Added
|
|
|
|
|
|
|
|
let neighbours = table.neighbours(local.id)
|
|
|
|
check len(neighbours) == numNodes
|
|
|
|
|
|
|
|
for n in neighbours:
|
|
|
|
let cLogDist = customLogDistance(local.id, n.id)
|
|
|
|
let neighboursAtLogDist = table.neighboursAtDistance(cLogDist)
|
|
|
|
# there may be more than one node at provided distance
|
|
|
|
check len(neighboursAtLogDist) >= 1
|
|
|
|
check neighboursAtLogDist.contains(n)
|