mirror of
https://github.com/logos-storage/logos-storage-nim-dht.git
synced 2026-01-08 00:13:07 +00:00
feat: Swap PeerRecords with SignedPeerRecords
Providers now add/get SignedPeerRecords to/from the DHT. Changed the PeerId calculation to use the public key of the discovery node.
This commit is contained in:
parent
4a2a6878b4
commit
35026762b4
1
.gitignore
vendored
1
.gitignore
vendored
@ -1,2 +1,3 @@
|
|||||||
coverage
|
coverage
|
||||||
nimcache
|
nimcache
|
||||||
|
tests/testAll
|
||||||
|
|||||||
@ -14,7 +14,7 @@ requires "nim >= 1.2.0",
|
|||||||
"chronicles >= 0.10.2 & < 0.11.0",
|
"chronicles >= 0.10.2 & < 0.11.0",
|
||||||
"chronos >= 3.0.11 & < 3.1.0",
|
"chronos >= 3.0.11 & < 3.1.0",
|
||||||
"eth >= 1.0.0 & < 1.1.0", # to be removed in https://github.com/status-im/nim-libp2p-dht/issues/2
|
"eth >= 1.0.0 & < 1.1.0", # to be removed in https://github.com/status-im/nim-libp2p-dht/issues/2
|
||||||
"libp2p#unstable",
|
"libp2p#316f205381f9015402a53009c8f61bf17d2989b5",
|
||||||
"metrics",
|
"metrics",
|
||||||
"protobufserialization >= 0.2.0 & < 0.3.0",
|
"protobufserialization >= 0.2.0 & < 0.3.0",
|
||||||
"secp256k1 >= 0.5.2 & < 0.6.0",
|
"secp256k1 >= 0.5.2 & < 0.6.0",
|
||||||
@ -58,3 +58,4 @@ task coverage, "generates code coverage report":
|
|||||||
exec("genhtml coverage/coverage.f.info --output-directory coverage/report")
|
exec("genhtml coverage/coverage.f.info --output-directory coverage/report")
|
||||||
echo "Opening HTML coverage report in browser..."
|
echo "Opening HTML coverage report in browser..."
|
||||||
exec("open coverage/report/index.html")
|
exec("open coverage/report/index.html")
|
||||||
|
|
||||||
|
|||||||
@ -17,7 +17,7 @@ import
|
|||||||
|
|
||||||
type
|
type
|
||||||
ProvidersProtocol* = ref object
|
ProvidersProtocol* = ref object
|
||||||
providers: Table[NodeId, seq[PeerRecord]]
|
providers: Table[NodeId, seq[SignedPeerRecord]]
|
||||||
discovery*: protocol.Protocol
|
discovery*: protocol.Protocol
|
||||||
|
|
||||||
## ---- AddProvider ----
|
## ---- AddProvider ----
|
||||||
@ -25,7 +25,7 @@ type
|
|||||||
const
|
const
|
||||||
protoIdAddProvider = "AP".toBytes()
|
protoIdAddProvider = "AP".toBytes()
|
||||||
|
|
||||||
proc addProviderLocal(p: ProvidersProtocol, cId: NodeId, prov: PeerRecord) =
|
proc addProviderLocal(p: ProvidersProtocol, cId: NodeId, prov: SignedPeerRecord) =
|
||||||
trace "adding provider to local db", n=p.discovery.localNode, cId, prov
|
trace "adding provider to local db", n=p.discovery.localNode, cId, prov
|
||||||
p.providers.mgetOrPut(cId, @[]).add(prov)
|
p.providers.mgetOrPut(cId, @[]).add(prov)
|
||||||
|
|
||||||
@ -48,12 +48,12 @@ proc registerAddProvider(p: ProvidersProtocol) =
|
|||||||
let protocol = TalkProtocol(protocolHandler: handler)
|
let protocol = TalkProtocol(protocolHandler: handler)
|
||||||
discard p.discovery.registerTalkProtocol(protoIdAddProvider, protocol) #TODO: handle error
|
discard p.discovery.registerTalkProtocol(protoIdAddProvider, protocol) #TODO: handle error
|
||||||
|
|
||||||
proc sendAddProvider*(p: ProvidersProtocol, dst: Node, cId: NodeId, pr: PeerRecord) =
|
proc sendAddProvider*(p: ProvidersProtocol, dst: Node, cId: NodeId, pr: SignedPeerRecord) =
|
||||||
#type NodeDesc = tuple[ip: IpAddress, udpPort, tcpPort: Port, pk: PublicKey]
|
#type NodeDesc = tuple[ip: IpAddress, udpPort, tcpPort: Port, pk: PublicKey]
|
||||||
let msg = AddProviderMessage(cId: cId, prov: pr)
|
let msg = AddProviderMessage(cId: cId, prov: pr)
|
||||||
discard p.discovery.talkReq(dst, protoIdAddProvider, msg.encode())
|
discard p.discovery.talkReq(dst, protoIdAddProvider, msg.encode())
|
||||||
|
|
||||||
proc addProvider*(p: ProvidersProtocol, cId: NodeId, pr: PeerRecord): Future[seq[Node]] {.async.} =
|
proc addProvider*(p: ProvidersProtocol, cId: NodeId, pr: SignedPeerRecord): Future[seq[Node]] {.async.} =
|
||||||
result = await p.discovery.lookup(cId)
|
result = await p.discovery.lookup(cId)
|
||||||
trace "lookup returned:", result
|
trace "lookup returned:", result
|
||||||
# TODO: lookup is sepcified as not returning local, even if that is the closest. Is this OK?
|
# TODO: lookup is sepcified as not returning local, even if that is the closest. Is this OK?
|
||||||
@ -87,7 +87,7 @@ proc getProvidersLocal*(
|
|||||||
p: ProvidersProtocol,
|
p: ProvidersProtocol,
|
||||||
cId: NodeId,
|
cId: NodeId,
|
||||||
maxitems: int = 5,
|
maxitems: int = 5,
|
||||||
): seq[PeerRecord] {.raises: [KeyError,Defect].}=
|
): seq[SignedPeerRecord] {.raises: [KeyError,Defect].}=
|
||||||
result = if (cId in p.providers): p.providers[cId] else: @[]
|
result = if (cId in p.providers): p.providers[cId] else: @[]
|
||||||
|
|
||||||
proc getProviders*(
|
proc getProviders*(
|
||||||
@ -95,7 +95,7 @@ proc getProviders*(
|
|||||||
cId: NodeId,
|
cId: NodeId,
|
||||||
maxitems: int = 5,
|
maxitems: int = 5,
|
||||||
timeout: timer.Duration = chronos.milliseconds(5000)
|
timeout: timer.Duration = chronos.milliseconds(5000)
|
||||||
): Future[seq[PeerRecord]] {.async.} =
|
): Future[seq[SignedPeerRecord]] {.async.} =
|
||||||
## Search for providers of the given cId.
|
## Search for providers of the given cId.
|
||||||
|
|
||||||
# What providers do we know about?
|
# What providers do we know about?
|
||||||
@ -144,7 +144,10 @@ proc registerGetProviders(p: ProvidersProtocol) =
|
|||||||
let returnMsg = recvGetProviders(p, fromId, msg)
|
let returnMsg = recvGetProviders(p, fromId, msg)
|
||||||
trace "returnMsg", returnMsg
|
trace "returnMsg", returnMsg
|
||||||
|
|
||||||
|
try:
|
||||||
returnMsg.encode()
|
returnMsg.encode()
|
||||||
|
except ResultError[CryptoError]:
|
||||||
|
return @[]
|
||||||
|
|
||||||
let protocol = TalkProtocol(protocolHandler: handler)
|
let protocol = TalkProtocol(protocolHandler: handler)
|
||||||
discard p.discovery.registerTalkProtocol(protoIdGetProviders, protocol) #TODO: handle error
|
discard p.discovery.registerTalkProtocol(protoIdGetProviders, protocol) #TODO: handle error
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
import
|
import
|
||||||
../discv5/[node],
|
../discv5/[node],
|
||||||
libp2p/routing_record,
|
libp2p/[routing_record, signed_envelope],
|
||||||
libp2p/protobuf/minprotobuf,
|
libp2p/protobuf/minprotobuf,
|
||||||
./providers_messages
|
./providers_messages
|
||||||
|
|
||||||
@ -34,12 +34,22 @@ func getField*(pb: ProtoBuffer, field: int,
|
|||||||
else:
|
else:
|
||||||
err(ProtoError.IncorrectBlob)
|
err(ProtoError.IncorrectBlob)
|
||||||
|
|
||||||
func write*(pb: var ProtoBuffer, field: int, pr: PeerRecord) =
|
func write*[T: SignedPeerRecord | PeerRecord | Envelope](
|
||||||
## Write PeerRecord value ``pr`` to object ``pb`` using ProtoBuf's encoding.
|
pb: var ProtoBuffer,
|
||||||
write(pb, field, pr.encode())
|
field: int,
|
||||||
|
env: T) {.raises: [Defect, ResultError[CryptoError]].} =
|
||||||
|
|
||||||
|
## Write Envelope value ``env`` to object ``pb`` using ProtoBuf's encoding.
|
||||||
|
let encoded = env.encode().tryGet()
|
||||||
|
write(pb, field, encoded)
|
||||||
|
|
||||||
|
# TODO: This should be included upstream in libp2p/signed_envelope. Once it's
|
||||||
|
# added in libp2p, we can remove it from here.
|
||||||
|
proc encode*[T](msg: SignedPayload[T]): Result[seq[byte], CryptoError] =
|
||||||
|
msg.envelope.encode()
|
||||||
|
|
||||||
proc getRepeatedField*(pb: ProtoBuffer, field: int,
|
proc getRepeatedField*(pb: ProtoBuffer, field: int,
|
||||||
value: var seq[PeerRecord]): ProtoResult[bool] {.
|
value: var seq[SignedPeerRecord]): ProtoResult[bool] {.
|
||||||
inline.} =
|
inline.} =
|
||||||
var items: seq[seq[byte]]
|
var items: seq[seq[byte]]
|
||||||
value.setLen(0)
|
value.setLen(0)
|
||||||
@ -48,7 +58,7 @@ proc getRepeatedField*(pb: ProtoBuffer, field: int,
|
|||||||
ok(false)
|
ok(false)
|
||||||
else:
|
else:
|
||||||
for item in items:
|
for item in items:
|
||||||
let ma = PeerRecord.decode(item)
|
let ma = SignedPeerRecord.decode(item)
|
||||||
if ma.isOk():
|
if ma.isOk():
|
||||||
value.add(ma.get())
|
value.add(ma.get())
|
||||||
else:
|
else:
|
||||||
@ -102,7 +112,6 @@ proc decode*(
|
|||||||
|
|
||||||
let pb = initProtoBuffer(buffer)
|
let pb = initProtoBuffer(buffer)
|
||||||
var msg = ProvidersMessage()
|
var msg = ProvidersMessage()
|
||||||
|
|
||||||
? pb.getRequiredField(1, msg.total)
|
? pb.getRequiredField(1, msg.total)
|
||||||
discard ? pb.getRepeatedField(2, msg.provs)
|
discard ? pb.getRepeatedField(2, msg.provs)
|
||||||
|
|
||||||
@ -117,3 +126,4 @@ proc encode*(msg: ProvidersMessage): seq[byte] =
|
|||||||
|
|
||||||
pb.finish()
|
pb.finish()
|
||||||
pb.buffer
|
pb.buffer
|
||||||
|
|
||||||
|
|||||||
@ -5,11 +5,11 @@ import
|
|||||||
type
|
type
|
||||||
AddProviderMessage* = object
|
AddProviderMessage* = object
|
||||||
cId*: NodeId
|
cId*: NodeId
|
||||||
prov*: PeerRecord
|
prov*: SignedPeerRecord
|
||||||
|
|
||||||
GetProvidersMessage* = object
|
GetProvidersMessage* = object
|
||||||
cId*: NodeId
|
cId*: NodeId
|
||||||
|
|
||||||
ProvidersMessage* = object
|
ProvidersMessage* = object
|
||||||
total*: uint32
|
total*: uint32
|
||||||
provs*: seq[PeerRecord]
|
provs*: seq[SignedPeerRecord]
|
||||||
|
|||||||
@ -10,6 +10,7 @@
|
|||||||
{.used.}
|
{.used.}
|
||||||
|
|
||||||
import
|
import
|
||||||
|
std/options,
|
||||||
std/sequtils,
|
std/sequtils,
|
||||||
chronos, stew/byteutils, nimcrypto, asynctest,
|
chronos, stew/byteutils, nimcrypto, asynctest,
|
||||||
eth/keys,
|
eth/keys,
|
||||||
@ -17,9 +18,12 @@ import
|
|||||||
chronicles,
|
chronicles,
|
||||||
libp2pdht/discv5/protocol as discv5_protocol,
|
libp2pdht/discv5/protocol as discv5_protocol,
|
||||||
test_helper,
|
test_helper,
|
||||||
|
libp2p/crypto/crypto,
|
||||||
|
libp2p/crypto/secp,
|
||||||
libp2p/routing_record,
|
libp2p/routing_record,
|
||||||
libp2p/multihash,
|
libp2p/multihash,
|
||||||
libp2p/multicodec
|
libp2p/multicodec,
|
||||||
|
libp2p/signed_envelope
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -35,34 +39,31 @@ proc initProvidersNode(
|
|||||||
let d = initDiscoveryNode(rng, privKey, address, bootstrapRecords)
|
let d = initDiscoveryNode(rng, privKey, address, bootstrapRecords)
|
||||||
newProvidersProtocol(d)
|
newProvidersProtocol(d)
|
||||||
|
|
||||||
proc toPeerRecord(p: ProvidersProtocol) : PeerRecord =
|
proc toSignedPeerRecord(privKey: crypto.PrivateKey) : SignedPeerRecord =
|
||||||
## hadle conversion between the two worlds
|
## handle conversion between the two worlds
|
||||||
|
|
||||||
#NodeId is a keccak-256 hash created by keccak256.digest and stored as UInt256
|
let pr = PeerRecord.init(
|
||||||
let discNodeId = p.discovery.localNode.id
|
peerId = PeerId.init(privKey.getPublicKey.get).get,
|
||||||
## get it back to MDigest form
|
|
||||||
var digest: MDigest[256]
|
|
||||||
digest.data = discNodeId.toBytesBE
|
|
||||||
## get into a MultiHash
|
|
||||||
var mh = MultiHash.init(multiCodec("keccak-256"), digest).orError(HashError).get()
|
|
||||||
result = PeerRecord.init(
|
|
||||||
peerId = PeerId.init(mh.data.buffer).get,
|
|
||||||
seqNo = 0,
|
seqNo = 0,
|
||||||
addresses = @[])
|
addresses = @[])
|
||||||
|
return SignedPeerRecord.init(privKey, pr).expect("Should init SignedPeerRecord with private key")
|
||||||
# trace "IDs", discNodeId, digest, mh, peerId=result.peerId.hex
|
# trace "IDs", discNodeId, digest, mh, peerId=result.peerId.hex
|
||||||
|
|
||||||
proc bootstrapNodes(nodecount: int, bootnodes: openArray[Record], rng = keys.newRng()) : seq[ProvidersProtocol] =
|
proc bootstrapNodes(nodecount: int, bootnodes: openArray[Record], rng = keys.newRng()) : seq[(ProvidersProtocol, keys.PrivateKey)] =
|
||||||
|
|
||||||
for i in 0..<nodecount:
|
for i in 0..<nodecount:
|
||||||
let node = initProvidersNode(rng, keys.PrivateKey.random(rng[]), localAddress(20302 + i), bootnodes)
|
let privKey = keys.PrivateKey.random(rng[])
|
||||||
|
let node = initProvidersNode(rng, privKey, localAddress(20302 + i), bootnodes)
|
||||||
node.discovery.start()
|
node.discovery.start()
|
||||||
result.add(node)
|
result.add((node, privKey))
|
||||||
debug "---- STARTING BOOSTRAPS ---"
|
debug "---- STARTING BOOSTRAPS ---"
|
||||||
|
|
||||||
#await allFutures(result.mapIt(it.bootstrap())) # this waits for bootstrap based on bootENode, which includes bonding with all its ping pongs
|
#await allFutures(result.mapIt(it.bootstrap())) # this waits for bootstrap based on bootENode, which includes bonding with all its ping pongs
|
||||||
|
|
||||||
proc bootstrapNetwork(nodecount: int, rng = keys.newRng()) : seq[ProvidersProtocol] =
|
proc bootstrapNetwork(nodecount: int, rng = keys.newRng()) : seq[(ProvidersProtocol, keys.PrivateKey)] =
|
||||||
let
|
let
|
||||||
|
privKey = keys.PrivateKey.fromHex(
|
||||||
|
"a2b50376a79b1a8c8a3296485572bdfbf54708bb46d3c25d73d2723aaaf6a617")[]
|
||||||
bootNodeKey = keys.PrivateKey.fromHex(
|
bootNodeKey = keys.PrivateKey.fromHex(
|
||||||
"a2b50376a79b1a8c8a3296485572bdfbf54708bb46d3c25d73d2723aaaf6a617")[]
|
"a2b50376a79b1a8c8a3296485572bdfbf54708bb46d3c25d73d2723aaaf6a617")[]
|
||||||
bootNodeAddr = localAddress(20301)
|
bootNodeAddr = localAddress(20301)
|
||||||
@ -71,55 +72,66 @@ proc bootstrapNetwork(nodecount: int, rng = keys.newRng()) : seq[ProvidersProtoc
|
|||||||
#waitFor bootNode.bootstrap() # immediate, since no bootnodes are defined above
|
#waitFor bootNode.bootstrap() # immediate, since no bootnodes are defined above
|
||||||
|
|
||||||
result = bootstrapNodes(nodecount - 1, @[bootnode.discovery.localNode.record], rng = rng)
|
result = bootstrapNodes(nodecount - 1, @[bootnode.discovery.localNode.record], rng = rng)
|
||||||
result.insert(bootNode, 0)
|
result.insert((bootNode, privKey), 0)
|
||||||
|
|
||||||
|
# TODO: Remove this once we have removed all traces of nim-eth/keys
|
||||||
|
func pkToPk(pk: keys.PrivateKey) : Option[crypto.PrivateKey] =
|
||||||
|
let res = some(crypto.PrivateKey.init((secp.SkPrivateKey)(pk)))
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
# suite "Providers Tests":
|
# suite "Providers Tests":
|
||||||
suite "Providers Tests: node alone":
|
suite "Providers Tests: node alone":
|
||||||
var
|
var
|
||||||
rng: ref HmacDrbgContext
|
rng: ref HmacDrbgContext
|
||||||
nodes: seq[ProvidersProtocol]
|
nodes: seq[(ProvidersProtocol, keys.PrivateKey)]
|
||||||
targetId: NodeId
|
targetId: NodeId
|
||||||
|
node0: ProvidersProtocol
|
||||||
|
privKey_keys0: keys.PrivateKey
|
||||||
|
privKey0: crypto.PrivateKey
|
||||||
|
signedPeerRec0: SignedPeerRecord
|
||||||
|
peerRec0: PeerRecord
|
||||||
|
|
||||||
setupAll:
|
setupAll:
|
||||||
debug "RUNNING BEFORE TESTS"
|
|
||||||
rng = keys.newRng()
|
rng = keys.newRng()
|
||||||
nodes = bootstrapNetwork(nodecount=1)
|
nodes = bootstrapNetwork(nodecount=1)
|
||||||
targetId = toNodeId(keys.PrivateKey.random(rng[]).toPublicKey)
|
targetId = toNodeId(keys.PrivateKey.random(rng[]).toPublicKey)
|
||||||
|
(node0, privKey_keys0) = nodes[0]
|
||||||
|
privKey0 = privKey_keys0.pkToPk.get
|
||||||
|
signedPeerRec0 = privKey0.toSignedPeerRecord
|
||||||
|
peerRec0 = signedPeerRec0.data
|
||||||
|
|
||||||
teardownAll:
|
teardownAll:
|
||||||
debug "RUNNING AFTER TESTS"
|
for (n, _) in nodes:
|
||||||
for n in nodes:
|
|
||||||
await n.discovery.closeWait()
|
await n.discovery.closeWait()
|
||||||
await sleepAsync(chronos.seconds(3))
|
await sleepAsync(chronos.seconds(3))
|
||||||
|
|
||||||
|
|
||||||
test "Node in isolation should store":
|
test "Node in isolation should store":
|
||||||
debug "---- ADDING PROVIDERS ---", nodes = nodes.len
|
debug "---- ADDING PROVIDERS ---", nodes = nodes.len
|
||||||
let addedTo = await nodes[0].addProvider(targetId, nodes[0].toPeerRecord)
|
let addedTo = await node0.addProvider(targetId, signedPeerRec0)
|
||||||
debug "Provider added to: ", addedTo
|
debug "Provider added to: ", addedTo
|
||||||
|
|
||||||
debug "---- STARTING CHECKS ---"
|
debug "---- STARTING CHECKS ---"
|
||||||
check (addedTo.len == 1)
|
check (addedTo.len == 1)
|
||||||
check (addedTo[0].id == nodes[0].discovery.localNode.id)
|
check (addedTo[0].id == node0.discovery.localNode.id)
|
||||||
check (nodes[0].getProvidersLocal(targetId)[0].peerId == nodes[0].toPeerRecord.peerId)
|
check (node0.getProvidersLocal(targetId)[0].data.peerId == peerRec0.peerId)
|
||||||
|
|
||||||
test "Node in isolation should retrieve":
|
test "Node in isolation should retrieve":
|
||||||
|
|
||||||
debug "---- STARTING PROVIDERS LOOKUP ---"
|
debug "---- STARTING PROVIDERS LOOKUP ---"
|
||||||
let providers = await nodes[0].getProviders(targetId)
|
let providers = await node0.getProviders(targetId)
|
||||||
debug "Providers:", providers
|
debug "Providers:", providers
|
||||||
|
|
||||||
debug "---- STARTING CHECKS ---"
|
debug "---- STARTING CHECKS ---"
|
||||||
check (providers.len > 0 and providers[0].peerId == nodes[0].toPeerRecord.peerId)
|
check (providers.len > 0 and providers[0].data.peerId == peerRec0.peerId)
|
||||||
|
|
||||||
test "Should not retrieve bogus":
|
test "Should not retrieve bogus":
|
||||||
|
|
||||||
let bogusId = toNodeId(keys.PrivateKey.random(rng[]).toPublicKey)
|
let bogusId = toNodeId(keys.PrivateKey.random(rng[]).toPublicKey)
|
||||||
|
|
||||||
debug "---- STARTING PROVIDERS LOOKUP ---"
|
debug "---- STARTING PROVIDERS LOOKUP ---"
|
||||||
let providers = await nodes[0].getProviders(bogusId)
|
let providers = await node0.getProviders(bogusId)
|
||||||
debug "Providers:", providers
|
debug "Providers:", providers
|
||||||
|
|
||||||
debug "---- STARTING CHECKS ---"
|
debug "---- STARTING CHECKS ---"
|
||||||
@ -130,39 +142,49 @@ suite "Providers Tests: two nodes":
|
|||||||
|
|
||||||
var
|
var
|
||||||
rng: ref HmacDrbgContext
|
rng: ref HmacDrbgContext
|
||||||
nodes: seq[ProvidersProtocol]
|
nodes: seq[(ProvidersProtocol, keys.PrivateKey)]
|
||||||
targetId: NodeId
|
targetId: NodeId
|
||||||
|
node0: ProvidersProtocol
|
||||||
|
privKey_keys0: keys.PrivateKey
|
||||||
|
privKey0: crypto.PrivateKey
|
||||||
|
signedPeerRec0: SignedPeerRecord
|
||||||
|
peerRec0: PeerRecord
|
||||||
|
|
||||||
setupAll:
|
setupAll:
|
||||||
rng = keys.newRng()
|
rng = keys.newRng()
|
||||||
nodes = bootstrapNetwork(nodecount=2)
|
nodes = bootstrapNetwork(nodecount=2)
|
||||||
targetId = toNodeId(keys.PrivateKey.random(rng[]).toPublicKey)
|
targetId = toNodeId(keys.PrivateKey.random(rng[]).toPublicKey)
|
||||||
|
(node0, privKey_keys0) = nodes[0]
|
||||||
|
privKey0 = privKey_keys0.pkToPk.get
|
||||||
|
signedPeerRec0 = privKey0.toSignedPeerRecord
|
||||||
|
peerRec0 = signedPeerRec0.data
|
||||||
|
|
||||||
teardownAll:
|
teardownAll:
|
||||||
for n in nodes:
|
for (n, _) in nodes:
|
||||||
await n.discovery.closeWait()
|
await n.discovery.closeWait()
|
||||||
await sleepAsync(chronos.seconds(3))
|
await sleepAsync(chronos.seconds(3))
|
||||||
|
|
||||||
test "2 nodes, store and retieve from same":
|
test "2 nodes, store and retrieve from same":
|
||||||
|
|
||||||
debug "---- ADDING PROVIDERS ---"
|
debug "---- ADDING PROVIDERS ---"
|
||||||
let addedTo = await nodes[0].addProvider(targetId, nodes[0].toPeerRecord)
|
let addedTo = await node0.addProvider(targetId, signedPeerRec0)
|
||||||
debug "Provider added to: ", addedTo
|
debug "Provider added to: ", addedTo
|
||||||
|
|
||||||
debug "---- STARTING PROVIDERS LOOKUP ---"
|
debug "---- STARTING PROVIDERS LOOKUP ---"
|
||||||
let providers = await nodes[0].getProviders(targetId)
|
let providers = await node0.getProviders(targetId)
|
||||||
debug "Providers:", providers
|
debug "Providers:", providers
|
||||||
|
|
||||||
debug "---- STARTING CHECKS ---"
|
debug "---- STARTING CHECKS ---"
|
||||||
check (providers.len == 1 and providers[0].peerId == nodes[0].toPeerRecord.peerId)
|
check (providers.len == 1 and providers[0].data.peerId == peerRec0.peerId)
|
||||||
|
|
||||||
test "2 nodes, retieve from other":
|
test "2 nodes, retrieve from other":
|
||||||
debug "---- STARTING PROVIDERS LOOKUP ---"
|
debug "---- STARTING PROVIDERS LOOKUP ---"
|
||||||
let providers = await nodes[1].getProviders(targetId)
|
let (node1, _) = nodes[1]
|
||||||
|
let providers = await node1.getProviders(targetId)
|
||||||
debug "Providers:", providers
|
debug "Providers:", providers
|
||||||
|
|
||||||
debug "---- STARTING CHECKS ---"
|
debug "---- STARTING CHECKS ---"
|
||||||
check (providers.len == 1 and providers[0].peerId == nodes[0].toPeerRecord.peerId)
|
check (providers.len == 1 and providers[0].data.peerId == peerRec0.peerId)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -170,40 +192,50 @@ suite "Providers Tests: 20 nodes":
|
|||||||
|
|
||||||
var
|
var
|
||||||
rng: ref HmacDrbgContext
|
rng: ref HmacDrbgContext
|
||||||
nodes: seq[ProvidersProtocol]
|
nodes: seq[(ProvidersProtocol, keys.PrivateKey)]
|
||||||
targetId: NodeId
|
targetId: NodeId
|
||||||
|
node0: ProvidersProtocol
|
||||||
|
privKey_keys0: keys.PrivateKey
|
||||||
|
privKey0: crypto.PrivateKey
|
||||||
|
signedPeerRec0: SignedPeerRecord
|
||||||
|
peerRec0: PeerRecord
|
||||||
|
|
||||||
setupAll:
|
setupAll:
|
||||||
rng = keys.newRng()
|
rng = keys.newRng()
|
||||||
nodes = bootstrapNetwork(nodecount=20)
|
nodes = bootstrapNetwork(nodecount=20)
|
||||||
targetId = toNodeId(keys.PrivateKey.random(rng[]).toPublicKey)
|
targetId = toNodeId(keys.PrivateKey.random(rng[]).toPublicKey)
|
||||||
|
(node0, privKey_keys0) = nodes[0]
|
||||||
|
privKey0 = privKey_keys0.pkToPk.get
|
||||||
|
signedPeerRec0 = privKey0.toSignedPeerRecord
|
||||||
|
peerRec0 = signedPeerRec0.data
|
||||||
|
|
||||||
await sleepAsync(chronos.seconds(15))
|
await sleepAsync(chronos.seconds(15))
|
||||||
|
|
||||||
teardownAll:
|
teardownAll:
|
||||||
for n in nodes: # if last test is enabled, we need nodes[1..^1] here
|
for (n, _) in nodes: # if last test is enabled, we need nodes[1..^1] here
|
||||||
await n.discovery.closeWait()
|
await n.discovery.closeWait()
|
||||||
|
|
||||||
test "20 nodes, store and retieve from same":
|
test "20 nodes, store and retrieve from same":
|
||||||
|
|
||||||
debug "---- ADDING PROVIDERS ---"
|
debug "---- ADDING PROVIDERS ---"
|
||||||
let addedTo = await nodes[0].addProvider(targetId, nodes[0].toPeerRecord)
|
let addedTo = await node0.addProvider(targetId, signedPeerRec0)
|
||||||
debug "Provider added to: ", addedTo
|
debug "Provider added to: ", addedTo
|
||||||
|
|
||||||
debug "---- STARTING PROVIDERS LOOKUP ---"
|
debug "---- STARTING PROVIDERS LOOKUP ---"
|
||||||
let providers = await nodes[0].getProviders(targetId)
|
let providers = await node0.getProviders(targetId)
|
||||||
debug "Providers:", providers
|
debug "Providers:", providers
|
||||||
|
|
||||||
debug "---- STARTING CHECKS ---"
|
debug "---- STARTING CHECKS ---"
|
||||||
check (providers.len == 1 and providers[0].peerId == nodes[0].toPeerRecord.peerId)
|
check (providers.len == 1 and providers[0].data.peerId == peerRec0.peerId)
|
||||||
|
|
||||||
test "20 nodes, retieve from other":
|
test "20 nodes, retrieve from other":
|
||||||
debug "---- STARTING PROVIDERS LOOKUP ---"
|
debug "---- STARTING PROVIDERS LOOKUP ---"
|
||||||
let providers = await nodes[^1].getProviders(targetId)
|
let (node19, _) = nodes[^2]
|
||||||
|
let providers = await node19.getProviders(targetId)
|
||||||
debug "Providers:", providers
|
debug "Providers:", providers
|
||||||
|
|
||||||
debug "---- STARTING CHECKS ---"
|
debug "---- STARTING CHECKS ---"
|
||||||
check (providers.len == 1 and providers[0].peerId == nodes[0].toPeerRecord.peerId)
|
check (providers.len == 1 and providers[0].data.peerId == peerRec0.peerId)
|
||||||
|
|
||||||
# test "20 nodes, retieve after bootnode dies":
|
# test "20 nodes, retieve after bootnode dies":
|
||||||
# # TODO: currently this is not working even with a 2 minute timeout
|
# # TODO: currently this is not working even with a 2 minute timeout
|
||||||
@ -216,5 +248,3 @@ suite "Providers Tests: 20 nodes":
|
|||||||
|
|
||||||
# debug "---- STARTING CHECKS ---"
|
# debug "---- STARTING CHECKS ---"
|
||||||
# check (providers.len == 1 and providers[0].peerId == nodes[0].toPeerRecord.peerId)
|
# check (providers.len == 1 and providers[0].peerId == nodes[0].toPeerRecord.peerId)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user