* rework discovery with async queues

* increase max message size for large manifests

* increase sleep time to 100 millis

* pass config

* check for nil on start/stop

* fix tests and split out discovery tests

* don't auto mount network

* add discovery tests

* rework moc discovery

* move discovery moc to disc dir

* don't force logging syncs

* don't force moc discovery on all tests

* rework discovery with methods

* add top level utils file

* don't use asyncCheck

* don't pass entire blocks to list blocks calback

* spelling

* - don't send want reqs to peers reporting the cid

- Don't request blocks directly on presense update, use `requestBlock`

* bug, nodes should not have blocks in local store

* Add failing test

* prefetch blocks so that download isn't serial

* if request already pending, return the handle

* fire discovery if no peers report block as have

* only query discovery if not enough nodes for cid

* wrap async req in template

* use non awaiting version of queue routines

* rework E2E tests as unittest

* re-add chronicles sinks

Co-authored-by: Tanguy <tanguy@status.im>
This commit is contained in:
Dmitriy Ryajov 2022-05-12 15:52:03 -06:00 committed by GitHub
parent 9ca4f90cf3
commit d669e344bc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 592 additions and 363 deletions

View File

@ -7,7 +7,8 @@
## This file may not be copied, modified, or distributed except according to ## This file may not be copied, modified, or distributed except according to
## those terms. ## those terms.
import std/[sequtils, sets, tables, sugar] import std/sequtils
import std/sets
import pkg/chronos import pkg/chronos
import pkg/chronicles import pkg/chronicles
@ -15,7 +16,7 @@ import pkg/libp2p
import ../stores/blockstore import ../stores/blockstore
import ../blocktype as bt import ../blocktype as bt
import ../utils/asyncheapqueue import ../utils
import ../discovery import ../discovery
import ./protobuf/blockexc import ./protobuf/blockexc
@ -32,31 +33,20 @@ logScope:
topics = "dagger blockexc engine" topics = "dagger blockexc engine"
const const
DefaultBlockTimeout* = 5.minutes
DefaultMaxPeersPerRequest* = 10 DefaultMaxPeersPerRequest* = 10
DefaultTaskQueueSize = 100 DefaultTaskQueueSize = 100
DefaultConcurrentTasks = 10 DefaultConcurrentTasks = 10
DefaultMaxRetries = 3 DefaultMaxRetries = 3
DefaultConcurrentDiscRequests = 10
# Current advertisement is meant to be more efficient than DefaultConcurrentAdvertRequests = 10
# correct, so blocks could be advertised more slowly than that DefaultDiscoveryTimeout = 1.minutes
# Put some margin DefaultMaxQueriedBlocksCache = 1000
BlockAdvertisementFrequency = 30.minutes DefaultMinPeersPerBlock = 3
type type
TaskHandler* = proc(task: BlockExcPeerCtx): Future[void] {.gcsafe.} TaskHandler* = proc(task: BlockExcPeerCtx): Future[void] {.gcsafe.}
TaskScheduler* = proc(task: BlockExcPeerCtx): bool {.gcsafe.} TaskScheduler* = proc(task: BlockExcPeerCtx): bool {.gcsafe.}
BlockDiscovery* = ref object
discoveredProvider: AsyncEvent
discoveryLoop: Future[void]
toDiscover: Cid
treatedPeer: HashSet[PeerId]
inflightIWant: HashSet[PeerId]
gotIWantResponse: AsyncEvent
provides: seq[PeerId]
lastDhtQuery: Moment
BlockExcEngine* = ref object of RootObj BlockExcEngine* = ref object of RootObj
localStore*: BlockStore # where we localStore blocks for this instance localStore*: BlockStore # where we localStore blocks for this instance
network*: BlockExcNetwork # network interface network*: BlockExcNetwork # network interface
@ -70,12 +60,16 @@ type
peersPerRequest: int # max number of peers to request from peersPerRequest: int # max number of peers to request from
wallet*: WalletRef # nitro wallet for micropayments wallet*: WalletRef # nitro wallet for micropayments
pricing*: ?Pricing # optional bandwidth pricing pricing*: ?Pricing # optional bandwidth pricing
advertisedBlocks: seq[Cid] discovery*: Discovery # Discovery interface
advertisedIndex: int concurrentAdvReqs: int # Concurrent advertise requests
advertisementFrequency: Duration advertiseLoop*: Future[void] # Advertise loop task handle
runningDiscoveries*: Table[Cid, BlockDiscovery] advertiseQueue*: AsyncQueue[Cid] # Advertise queue
blockAdded: AsyncEvent advertiseTasks*: seq[Future[void]] # Advertise tasks
discovery*: Discovery concurrentDiscReqs: int # Concurrent discovery requests
discoveryLoop*: Future[void] # Discovery loop task handle
discoveryTasks*: seq[Future[void]] # Discovery tasks
discoveryQueue*: AsyncQueue[Cid] # Discovery queue
minPeersPerBlock*: int # Max number of peers with block
Pricing* = object Pricing* = object
address*: EthAddress address*: EthAddress
@ -100,7 +94,95 @@ proc scheduleTask(b: BlockExcEngine, task: BlockExcPeerCtx): bool {.gcsafe} =
b.taskQueue.pushOrUpdateNoWait(task).isOk() b.taskQueue.pushOrUpdateNoWait(task).isOk()
proc blockexcTaskRunner(b: BlockExcEngine): Future[void] {.gcsafe.} proc blockexcTaskRunner(b: BlockExcEngine): Future[void] {.gcsafe.}
proc advertiseLoop(b: BlockExcEngine): Future[void] {.gcsafe.}
proc discoveryLoopRunner(b: BlockExcEngine) {.async.} =
while b.blockexcRunning:
for cid in toSeq(b.pendingBlocks.wantList):
try:
await b.discoveryQueue.put(cid)
except CatchableError as exc:
trace "Exception in discovery loop", exc = exc.msg
trace "About to sleep, number of wanted blocks", wanted = b.pendingBlocks.len
await sleepAsync(30.seconds)
proc advertiseLoopRunner*(b: BlockExcEngine) {.async.} =
proc onBlock(cid: Cid) {.async.} =
try:
await b.advertiseQueue.put(cid)
except CatchableError as exc:
trace "Exception listing blocks", exc = exc.msg
while b.blockexcRunning:
await b.localStore.listBlocks(onBlock)
await sleepAsync(30.seconds)
trace "Exiting advertise task loop"
proc advertiseTaskRunner(b: BlockExcEngine) {.async.} =
## Run advertise tasks
##
while b.blockexcRunning:
try:
let cid = await b.advertiseQueue.get()
await b.discovery.provideBlock(cid)
except CatchableError as exc:
trace "Exception in advertise task runner", exc = exc.msg
trace "Exiting advertise task runner"
proc discoveryTaskRunner(b: BlockExcEngine) {.async.} =
## Run discovery tasks
##
while b.blockexcRunning:
try:
let
cid = await b.discoveryQueue.get()
haves = b.peers.filterIt(
it.peerHave.anyIt( it == cid )
)
trace "Got peers for block", cid = $cid, count = haves.len
let
providers =
if haves.len < b.minPeersPerBlock:
await b.discovery
.findBlockProviders(cid)
.wait(DefaultDiscoveryTimeout)
else:
@[]
checkFutures providers.mapIt( b.network.dialPeer(it.data) )
except CatchableError as exc:
trace "Exception in discovery task runner", exc = exc.msg
trace "Exiting discovery task runner"
template queueFindBlocksReq(b: BlockExcEngine, cids: seq[Cid]) =
proc queueReq() {.async.} =
try:
for cid in cids:
if cid notin b.discoveryQueue:
trace "Queueing find block request", cid = $cid
await b.discoveryQueue.put(cid)
except CatchableError as exc:
trace "Exception queueing discovery request", exc = exc.msg
asyncSpawn queueReq()
template queueProvideBlocksReq(b: BlockExcEngine, cids: seq[Cid]) =
proc queueReq() {.async.} =
try:
for cid in cids:
if cid notin b.advertiseQueue:
trace "Queueing provide block request", cid = $cid
await b.advertiseQueue.put(cid)
except CatchableError as exc:
trace "Exception queueing discovery request", exc = exc.msg
asyncSpawn queueReq()
proc start*(b: BlockExcEngine) {.async.} = proc start*(b: BlockExcEngine) {.async.} =
## Start the blockexc task ## Start the blockexc task
@ -116,14 +198,14 @@ proc start*(b: BlockExcEngine) {.async.} =
for i in 0..<b.concurrentTasks: for i in 0..<b.concurrentTasks:
b.blockexcTasks.add(blockexcTaskRunner(b)) b.blockexcTasks.add(blockexcTaskRunner(b))
info "Getting existing block list" for i in 0..<b.concurrentAdvReqs:
# TODO: should be reworked by #89 b.advertiseTasks.add(advertiseTaskRunner(b))
# let blocks = await b.localStore.blockList()
# b.advertisedBlocks = blocks
# We start faster to publish everything ASAP
b.advertisementFrequency = 5.seconds
b.blockexcTasks.add(b.advertiseLoop()) for i in 0..<b.concurrentDiscReqs:
b.discoveryTasks.add(discoveryTaskRunner(b))
b.advertiseLoop = advertiseLoopRunner(b)
b.discoveryLoop = discoveryLoopRunner(b)
proc stop*(b: BlockExcEngine) {.async.} = proc stop*(b: BlockExcEngine) {.async.} =
## Stop the blockexc blockexc ## Stop the blockexc blockexc
@ -141,156 +223,93 @@ proc stop*(b: BlockExcEngine) {.async.} =
await t.cancelAndWait() await t.cancelAndWait()
trace "Task stopped" trace "Task stopped"
for _, bd in b.runningDiscoveries: for t in b.advertiseTasks:
await bd.discoveryLoop.cancelAndWait() if not t.finished:
trace "Awaiting task to stop"
await t.cancelAndWait()
trace "Task stopped"
b.runningDiscoveries.clear() for t in b.discoveryTasks:
if not t.finished:
trace "Awaiting task to stop"
await t.cancelAndWait()
trace "Task stopped"
if not b.advertiseLoop.isNil and not b.advertiseLoop.finished:
trace "Awaiting advertise loop to stop"
await b.advertiseLoop.cancelAndWait()
trace "Advertise loop stopped"
if not b.discoveryLoop.isNil and not b.discoveryLoop.finished:
trace "Awaiting discovery loop to stop"
await b.discoveryLoop.cancelAndWait()
trace "Discovery loop stopped"
trace "NetworkStore stopped" trace "NetworkStore stopped"
proc discoverOnDht(b: BlockExcEngine, bd: BlockDiscovery) {.async.} =
bd.lastDhtQuery = Moment.fromNow(10.hours)
defer: bd.lastDhtQuery = Moment.now()
let discoveredProviders = await b.discovery.findBlockProviders(bd.toDiscover)
for peer in discoveredProviders:
asyncSpawn b.network.dialPeer(peer.data)
proc discoverLoop(b: BlockExcEngine, bd: BlockDiscovery) {.async.} =
# First, try connected peers
# After a percent of peers declined, or a timeout passed, query DHT
# rinse & repeat
#
# TODO add a global timeout
debug "starting block discovery", cid=bd.toDiscover
bd.gotIWantResponse.fire()
while true:
# wait for iwant replies
await bd.gotIWantResponse.wait()
bd.gotIWantResponse.clear()
var foundPeerNew = false
for p in b.peers:
if bd.toDiscover in p.peerHave and p.id notin bd.treatedPeer:
bd.provides.add(p.id)
bd.treatedPeer.incl(p.id)
bd.inflightIWant.excl(p.id)
foundPeerNew = true
if foundPeerNew:
bd.discoveredProvider.fire()
continue
trace "asking peers", cid=bd.toDiscover, peers=b.peers.len, treated=bd.treatedPeer.len, inflight=bd.inflightIWant.len
for p in b.peers:
if p.id notin bd.treatedPeer and p.id notin bd.inflightIWant:
# just send wants
bd.inflightIWant.incl(p.id)
b.network.request.sendWantList(
p.id,
@[bd.toDiscover],
wantType = WantType.wantHave,
sendDontHave = true)
if bd.inflightIWant.len < 3 and #TODO or a timeout
bd.lastDhtQuery < Moment.now() - 5.seconds:
#start query
asyncSpawn b.discoverOnDht(bd)
proc discoverBlock*(b: BlockExcEngine, cid: Cid): BlockDiscovery =
if cid in b.runningDiscoveries:
return b.runningDiscoveries[cid]
else:
result = BlockDiscovery(
toDiscover: cid,
discoveredProvider: newAsyncEvent(),
gotIWantResponse: newAsyncEvent(),
)
result.discoveryLoop = b.discoverLoop(result)
b.runningDiscoveries[cid] = result
return result
proc stopDiscovery(b: BlockExcEngine, cid: Cid) =
if cid in b.runningDiscoveries:
b.runningDiscoveries[cid].discoveryLoop.cancel()
b.runningDiscoveries.del(cid)
proc requestBlock*( proc requestBlock*(
b: BlockExcEngine, b: BlockExcEngine,
cid: Cid, cid: Cid,
timeout = DefaultBlockTimeout): Future[bt.Block] {.async.} = timeout = DefaultBlockTimeout): Future[bt.Block] =
## Request a block from remotes ## Request a block from remotes
## ##
debug "requesting block", cid trace "Requesting block", cid = $cid
# TODO
# we could optimize "groups of related chunks"
# be requesting multiple chunks, and running discovery
# less often
if cid in b.localStore:
return (await b.localStore.getBlock(cid)).get()
# be careful, don't give back control to main loop here
# otherwise, the block might slip in
if cid in b.pendingBlocks: if cid in b.pendingBlocks:
return await b.pendingBlocks.blocks[cid].wait(timeout) return b.pendingBlocks.getWantHandle(cid, timeout)
# We are the first one to request this block, so we handle it
let let
timeoutFut = sleepAsync(timeout) blk = b.pendingBlocks.getWantHandle(cid, timeout)
blk = b.pendingBlocks.addOrAwait(cid)
discovery = b.discoverBlock(cid)
# Just take the first discovered peer if b.peers.len <= 0:
try: trace "No peers to request blocks from", cid = $cid
await timeoutFut or blk or discovery.discoveredProvider.wait() b.queueFindBlocksReq(@[cid])
discovery.discoveredProvider.clear() return blk
except CancelledError as exc:
#TODO also wrong, same issue as below
blk.cancel()
b.stopDiscovery(cid)
raise exc
if timeoutFut.finished: var peers = b.peers
# TODO this is wrong, because other user may rely on us
# to handle this block. This proc should be asyncSpawned
#
# Other people may be using the discovery or blk
# so don't kill them
blk.cancel()
b.stopDiscovery(cid)
raise newException(AsyncTimeoutError, "")
if blk.finished: # get the first peer with at least one (any)
# a peer sent us the block out of the blue, why not # matching cid
b.stopDiscovery(cid) # TODO: this should be sorted by best to worst
return await blk var blockPeer: BlockExcPeerCtx
for p in peers:
if cid in p.peerHave:
blockPeer = p
break
# We got a provider # didn't find any peer with matching cids
# Currently, we just ask him for the block, and hope he gives it to us if isNil(blockPeer):
# blockPeer = peers[0]
# In reality, we could keep discovering until we find a suitable price, etc trace "No peers with block, sending to first peer", peer = blockPeer.id
b.stopDiscovery(cid)
timeoutFut.cancel()
assert discovery.provides.len > 0 peers.keepItIf(
it != blockPeer and cid notin it.peerHave
)
debug "Requesting block from peer", providerCount = discovery.provides.len,
peer = discovery.provides[0], cid
# request block # request block
b.network.request.sendWantList( b.network.request.sendWantList(
discovery.provides[0], blockPeer.id,
@[cid], @[cid],
wantType = WantType.wantBlock) # we want this remote to send us a block wantType = WantType.wantBlock) # we want this remote to send us a block
#TODO substract the discovery time if peers.len == 0:
return await blk.wait(timeout) trace "Not enough peers to send want list to", cid = $cid
b.queueFindBlocksReq(@[cid])
return blk # no peers to send wants to
# filter out the peer we've already requested from
let stop = min(peers.high, b.peersPerRequest)
trace "Sending want list requests to remaining peers", count = stop + 1
for p in peers[0..stop]:
if cid notin p.peerHave:
# just send wants
b.network.request.sendWantList(
p.id,
@[cid],
wantType = WantType.wantHave) # we only want to know if the peer has the block
return blk
proc blockPresenceHandler*( proc blockPresenceHandler*(
b: BlockExcEngine, b: BlockExcEngine,
@ -299,18 +318,33 @@ proc blockPresenceHandler*(
## Handle block presence ## Handle block presence
## ##
trace "Received presence update for peer", peer
let peerCtx = b.getPeerCtx(peer) let peerCtx = b.getPeerCtx(peer)
if isNil(peerCtx):
return
for blk in blocks: for blk in blocks:
if presence =? Presence.init(blk): if presence =? Presence.init(blk):
if not isNil(peerCtx): peerCtx.updatePresence(presence)
peerCtx.updatePresence(presence)
if presence.cid in b.runningDiscoveries: var
let bd = b.runningDiscoveries[presence.cid] cids = toSeq(b.pendingBlocks.wantList).filterIt(
if not presence.have: it in peerCtx.peerHave
bd.inflightIWant.excl(peer) )
bd.treatedPeer.incl(peer)
bd.gotIWantResponse.fire() trace "Received presence update for cids", peer, cids = $cids
if cids.len > 0:
b.network.request.sendWantList(
peer,
cids,
wantType = WantType.wantBlock) # we want this remote to send us a block
# if none of the connected peers report our wants in their have list,
# fire up discovery
b.queueFindBlocksReq(toSeq(b.pendingBlocks.wantList)
.filter(proc(cid: Cid): bool =
(not b.peers.anyIt( cid in it.peerHave ))))
proc scheduleTasks(b: BlockExcEngine, blocks: seq[bt.Block]) = proc scheduleTasks(b: BlockExcEngine, blocks: seq[bt.Block]) =
trace "Schedule a task for new blocks" trace "Schedule a task for new blocks"
@ -331,21 +365,11 @@ proc resolveBlocks*(b: BlockExcEngine, blocks: seq[bt.Block]) =
## and schedule any new task to be ran ## and schedule any new task to be ran
## ##
trace "Resolving blocks" trace "Resolving blocks", blocks = blocks.len
var gotNewBlocks = false b.pendingBlocks.resolve(blocks)
for bl in blocks: b.scheduleTasks(blocks)
if bl.cid notin b.advertisedBlocks: #TODO that's very slow, maybe a ordered hashset instead b.queueProvideBlocksReq(blocks.mapIt( it.cid ))
#TODO could do some smarter ordering here (insert it just before b.advertisedIndex, or similar)
b.advertisedBlocks.add(bl.cid)
asyncSpawn b.discovery.publishProvide(bl.cid)
gotNewBlocks = true
if gotNewBlocks:
b.pendingBlocks.resolve(blocks)
b.scheduleTasks(blocks)
b.blockAdded.fire()
proc payForBlocks(engine: BlockExcEngine, proc payForBlocks(engine: BlockExcEngine,
peer: BlockExcPeerCtx, peer: BlockExcPeerCtx,
@ -421,14 +445,20 @@ proc wantListHandler*(
if not b.scheduleTask(peerCtx): if not b.scheduleTask(peerCtx):
trace "Unable to schedule task for peer", peer trace "Unable to schedule task for peer", peer
proc accountHandler*(engine: BlockExcEngine, peer: PeerID, account: Account) {.async.} = proc accountHandler*(
engine: BlockExcEngine,
peer: PeerID,
account: Account) {.async.} =
let context = engine.getPeerCtx(peer) let context = engine.getPeerCtx(peer)
if context.isNil: if context.isNil:
return return
context.account = account.some context.account = account.some
proc paymentHandler*(engine: BlockExcEngine, peer: PeerId, payment: SignedState) {.async.} = proc paymentHandler*(
engine: BlockExcEngine,
peer: PeerId,
payment: SignedState) {.async.} =
without context =? engine.getPeerCtx(peer).option and without context =? engine.getPeerCtx(peer).option and
account =? context.account: account =? context.account:
return return
@ -451,13 +481,8 @@ proc setupPeer*(b: BlockExcEngine, peer: PeerID) =
)) ))
# broadcast our want list, the other peer will do the same # broadcast our want list, the other peer will do the same
let wantList = collect(newSeqOfCap(b.runningDiscoveries.len)): if b.pendingBlocks.len > 0:
for cid, bd in b.runningDiscoveries: b.network.request.sendWantList(peer, toSeq(b.pendingBlocks.wantList), full = true)
bd.inflightIWant.incl(peer)
cid
if wantList.len > 0:
b.network.request.sendWantList(peer, wantList, full = true, sendDontHave = true)
if address =? b.pricing.?address: if address =? b.pricing.?address:
b.network.request.sendAccount(peer, Account(address: address)) b.network.request.sendAccount(peer, Account(address: address))
@ -471,31 +496,6 @@ proc dropPeer*(b: BlockExcEngine, peer: PeerID) =
# drop the peer from the peers table # drop the peer from the peers table
b.peers.keepItIf( it.id != peer ) b.peers.keepItIf( it.id != peer )
proc advertiseLoop(b: BlockExcEngine) {.async, gcsafe.} =
while true:
if b.advertisedIndex >= b.advertisedBlocks.len:
b.advertisedIndex = 0
b.advertisementFrequency = BlockAdvertisementFrequency
# check that we still have this block.
while
b.advertisedIndex < b.advertisedBlocks.len and
not(b.localStore.contains(b.advertisedBlocks[b.advertisedIndex])):
b.advertisedBlocks.delete(b.advertisedIndex)
#publish it
if b.advertisedIndex < b.advertisedBlocks.len:
asyncSpawn b.discovery.publishProvide(b.advertisedBlocks[b.advertisedIndex])
inc b.advertisedIndex
let toSleep =
if b.advertisedBlocks.len > 0:
b.advertisementFrequency div b.advertisedBlocks.len
else:
30.minutes
await sleepAsync(toSleep) or b.blockAdded.wait()
b.blockAdded.clear()
proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} = proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
trace "Handling task for peer", peer = task.id trace "Handling task for peer", peer = task.id
@ -517,6 +517,7 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
.mapIt(!it.read) .mapIt(!it.read)
if blocks.len > 0: if blocks.len > 0:
trace "Sending blocks to peer", peer = task.id, blocks = blocks.len
b.network.request.sendBlocks( b.network.request.sendBlocks(
task.id, task.id,
blocks) blocks)
@ -559,19 +560,27 @@ proc new*(
discovery: Discovery, discovery: Discovery,
concurrentTasks = DefaultConcurrentTasks, concurrentTasks = DefaultConcurrentTasks,
maxRetries = DefaultMaxRetries, maxRetries = DefaultMaxRetries,
peersPerRequest = DefaultMaxPeersPerRequest): T = peersPerRequest = DefaultMaxPeersPerRequest,
concurrentAdvReqs = DefaultConcurrentAdvertRequests,
concurrentDiscReqs = DefaultConcurrentDiscRequests,
minPeersPerBlock = DefaultMinPeersPerBlock): T =
let engine = BlockExcEngine( let
localStore: localStore, engine = BlockExcEngine(
pendingBlocks: PendingBlocksManager.new(), localStore: localStore,
blockAdded: newAsyncEvent(), pendingBlocks: PendingBlocksManager.new(),
peersPerRequest: peersPerRequest, peersPerRequest: peersPerRequest,
network: network, network: network,
wallet: wallet, wallet: wallet,
concurrentTasks: concurrentTasks, concurrentTasks: concurrentTasks,
maxRetries: maxRetries, concurrentAdvReqs: concurrentAdvReqs,
discovery: discovery, concurrentDiscReqs: concurrentDiscReqs,
taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize)) maxRetries: maxRetries,
taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize),
discovery: discovery,
advertiseQueue: newAsyncQueue[Cid](DefaultTaskQueueSize),
discoveryQueue: newAsyncQueue[Cid](DefaultTaskQueueSize),
minPeersPerBlock: minPeersPerBlock)
proc peerEventHandler(peerId: PeerID, event: PeerEvent) {.async.} = proc peerEventHandler(peerId: PeerID, event: PeerEvent) {.async.} =
if event.kind == PeerEventKind.Joined: if event.kind == PeerEventKind.Joined:
@ -609,7 +618,6 @@ proc new*(
onBlocks: blocksHandler, onBlocks: blocksHandler,
onPresence: blockPresenceHandler, onPresence: blockPresenceHandler,
onAccount: accountHandler, onAccount: accountHandler,
onPayment: paymentHandler onPayment: paymentHandler)
)
return engine return engine

View File

@ -17,7 +17,7 @@ import ./protobuf/blockexc
logScope: logScope:
topics = "dagger blockexc networkpeer" topics = "dagger blockexc networkpeer"
const MaxMessageSize = 8 * 1024 * 1024 const MaxMessageSize = 100 * 1024 * 1024 # manifest files can be big
type type
RPCHandler* = proc(peer: NetworkPeer, msg: Message): Future[void] {.gcsafe.} RPCHandler* = proc(peer: NetworkPeer, msg: Message): Future[void] {.gcsafe.}

View File

@ -129,7 +129,7 @@ proc new*(T: type DaggerServer, config: DaggerConf): T =
) )
daggerNode = DaggerNodeRef.new(switch, store, engine, erasure, discovery, contracts) daggerNode = DaggerNodeRef.new(switch, store, engine, erasure, discovery, contracts)
restServer = RestServerRef.new( restServer = RestServerRef.new(
daggerNode.initRestApi(), daggerNode.initRestApi(config),
initTAddress("127.0.0.1" , config.apiPort), initTAddress("127.0.0.1" , config.apiPort),
bufferSize = (1024 * 64), bufferSize = (1024 * 64),
maxRequestBodySize = int.high) maxRequestBodySize = int.high)

View File

@ -8,18 +8,20 @@
## those terms. ## those terms.
import pkg/chronos import pkg/chronos
import pkg/chronicles
import pkg/libp2p import pkg/libp2p
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
import pkg/stew/shims/net import pkg/stew/shims/net
import pkg/libp2pdht/discv5/protocol as discv5 import pkg/libp2pdht/discv5/protocol as discv5
import rng import ./rng
import ./errors
export discv5 export discv5
type type
Discovery* = ref object Discovery* = ref object of RootObj
protocol: discv5.Protocol protocol: discv5.Protocol
localInfo: PeerInfo localInfo: PeerInfo
@ -55,15 +57,33 @@ proc toDiscoveryId*(cid: Cid): NodeId =
## To discovery id ## To discovery id
readUintBE[256](keccak256.digest(cid.data.buffer).data) readUintBE[256](keccak256.digest(cid.data.buffer).data)
proc findBlockProviders*( method findBlockProviders*(
d: Discovery, d: Discovery,
cid: Cid): Future[seq[SignedPeerRecord]] {.async.} = cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} =
return (await d.protocol.getProviders(cid.toDiscoveryId())).get() ## Find block providers
##
proc publishProvide*(d: Discovery, cid: Cid) {.async.} = trace "Finding providers for block", cid = $cid
let bid = cid.toDiscoveryId() without providers =?
discard await d.protocol.addProvider(bid, d.localInfo.signedPeerRecord) (await d.protocol.getProviders(cid.toDiscoveryId())).mapFailure, error:
trace "Error finding providers for block", cid = $cid, error = error.msg
return providers
method provideBlock*(d: Discovery, cid: Cid) {.async, base.} =
## Provide a bock Cid
##
trace "Providing block", cid = $cid
let
nodes = await d.protocol.addProvider(
cid.toDiscoveryId(),
d.localInfo.signedPeerRecord)
if nodes.len <= 0:
trace "Couldn't provide to any nodes!"
trace "Provided to nodes", nodes = nodes.len
proc start*(d: Discovery) {.async.} = proc start*(d: Discovery) {.async.} =
d.protocol.updateRecord(d.localInfo.signedPeerRecord).expect("updating SPR") d.protocol.updateRecord(d.localInfo.signedPeerRecord).expect("updating SPR")

View File

@ -109,7 +109,7 @@ proc encode*(
# TODO: this is a tight blocking loop so we sleep here to allow # TODO: this is a tight blocking loop so we sleep here to allow
# other events to be processed, this should be addressed # other events to be processed, this should be addressed
# by threading # by threading
await sleepAsync(10.millis) await sleepAsync(100.millis)
for j in 0..<blocks: for j in 0..<blocks:
let idx = blockIdx[j] let idx = blockIdx[j]

View File

@ -9,6 +9,7 @@
import std/options import std/options
import std/tables import std/tables
import std/sequtils
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
@ -46,10 +47,17 @@ type
contracts*: ?ContractInteractions contracts*: ?ContractInteractions
proc start*(node: DaggerNodeRef) {.async.} = proc start*(node: DaggerNodeRef) {.async.} =
await node.switch.start() if not node.switch.isNil:
await node.engine.start() await node.switch.start()
await node.erasure.start()
await node.discovery.start() if not node.engine.isNil:
await node.engine.start()
if not node.erasure.isNil:
await node.erasure.start()
if not node.discovery.isNil:
await node.discovery.start()
if contracts =? node.contracts: if contracts =? node.contracts:
await contracts.start() await contracts.start()
@ -60,10 +68,17 @@ proc start*(node: DaggerNodeRef) {.async.} =
proc stop*(node: DaggerNodeRef) {.async.} = proc stop*(node: DaggerNodeRef) {.async.} =
trace "Stopping node" trace "Stopping node"
await node.engine.stop() if not node.engine.isNil:
await node.switch.stop() await node.engine.stop()
await node.erasure.stop()
await node.discovery.stop() if not node.switch.isNil:
await node.switch.stop()
if not node.erasure.isNil:
await node.erasure.stop()
if not node.discovery.isNil:
await node.discovery.stop()
if contracts =? node.contracts: if contracts =? node.contracts:
await contracts.stop() await contracts.stop()
@ -103,12 +118,22 @@ proc retrieve*(
proc erasureJob(): Future[void] {.async.} = proc erasureJob(): Future[void] {.async.} =
try: try:
without res =? (await node.erasure.decode(manifest)), error: # spawn an erasure decoding job without res =? (await node.erasure.decode(manifest)), error: # spawn an erasure decoding job
trace "Unable to erasure decode manigest", cid, exc = error.msg trace "Unable to erasure decode manifest", cid, exc = error.msg
except CatchableError as exc: except CatchableError as exc:
trace "Exception decoding manifest", cid trace "Exception decoding manifest", cid
asyncSpawn erasureJob() asyncSpawn erasureJob()
proc prefetchBlocks() {.async.} =
## Initiates requests to all blocks in the manifest
##
try:
discard await allFinished(
manifest.mapIt( node.blockStore.getBlock( it ) ))
except CatchableError as exc:
trace "Exception prefetching blocks", exc = exc.msg
asyncSpawn prefetchBlocks()
return LPStream(StoreStream.new(node.blockStore, manifest)).success return LPStream(StoreStream.new(node.blockStore, manifest)).success
let let

View File

@ -37,9 +37,9 @@
# #
# Implementation: # Implementation:
# Our implementation uses additive cyclic groups instead of the multiplicative # Our implementation uses additive cyclic groups instead of the multiplicative
# cyclic group in the paper, thus changing the name of the group operation as in # cyclic group in the paper, thus changing the name of the group operation as in
# blscurve and blst. Thus, point multiplication becomes point addition, and scalar # blscurve and blst. Thus, point multiplication becomes point addition, and scalar
# exponentiation becomes scalar multiplicaiton. # exponentiation becomes scalar multiplication.
# #
# Number of operations: # Number of operations:
# The following table summarizes the number of operations in different phases # The following table summarizes the number of operations in different phases
@ -277,7 +277,7 @@ proc setup*(ssk: SecretKey, s:int64, filename: string): (Tau, seq[blst_p1]) =
let (u, ub) = rndP1() let (u, ub) = rndP1()
t.u.add(u) t.u.add(u)
ubase.add(ub) ubase.add(ub)
#TODO: a better bytearray conversion of TauZero for the signature might be needed #TODO: a better bytearray conversion of TauZero for the signature might be needed
# the current conversion using $t might be architecture dependent and not unique # the current conversion using $t might be architecture dependent and not unique
let signature = sign(ssk.signkey, $t) let signature = sign(ssk.signkey, $t)

View File

@ -21,11 +21,13 @@ import pkg/chronos
import pkg/presto import pkg/presto
import pkg/libp2p import pkg/libp2p
import pkg/stew/base10 import pkg/stew/base10
import pkg/confutils
import pkg/libp2p/routing_record import pkg/libp2p/routing_record
import ../node import ../node
import ../blocktype import ../blocktype
import ../conf
proc validate( proc validate(
pattern: string, pattern: string,
@ -83,7 +85,7 @@ proc decodeString(T: type bool, value: string): Result[T, cstring] =
proc encodeString(value: bool): Result[string, cstring] = proc encodeString(value: bool): Result[string, cstring] =
ok($value) ok($value)
proc initRestApi*(node: DaggerNodeRef): RestRouter = proc initRestApi*(node: DaggerNodeRef, conf: DaggerConf): RestRouter =
var router = RestRouter.init(validate) var router = RestRouter.init(validate)
router.api( router.api(
MethodGet, MethodGet,
@ -318,6 +320,7 @@ proc initRestApi*(node: DaggerNodeRef): RestRouter =
return RestApiResponse.response( return RestApiResponse.response(
"Id: " & $node.switch.peerInfo.peerId & "Id: " & $node.switch.peerInfo.peerId &
"\nAddrs: \n" & addrs & "\n") "\nAddrs: \n" & addrs &
"\nRoot Dir: " & $conf.dataDir)
return router return router

4
dagger/utils.nim Normal file
View File

@ -0,0 +1,4 @@
import ./utils/asyncheapqueue
import ./utils/fileutils
export asyncheapqueue, fileutils

View File

@ -1 +0,0 @@
patchFile("dagger", "discovery", "dagger/mockdiscovery")

View File

@ -12,17 +12,15 @@ import pkg/libp2p
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
import pkg/stew/shims/net import pkg/stew/shims/net
import pkg/libp2pdht/discv5/protocol as discv5 import pkg/dagger/discovery
export discv5
type type
Discovery* = ref object MockDiscovery* = ref object of Discovery
findBlockProviders_var*: proc(d: Discovery, cid: Cid): seq[SignedPeerRecord] {.gcsafe.} findBlockProvidersHandler*: proc(d: MockDiscovery, cid: Cid): seq[SignedPeerRecord] {.gcsafe.}
publishProvide_var*: proc(d: Discovery, cid: Cid) {.gcsafe.} publishProvideHandler*: proc(d: MockDiscovery, cid: Cid) {.gcsafe.}
proc new*( proc new*(
T: type Discovery, T: type MockDiscovery,
localInfo: PeerInfo, localInfo: PeerInfo,
discoveryPort: Port, discoveryPort: Port,
bootstrapNodes = newSeq[SignedPeerRecord](), bootstrapNodes = newSeq[SignedPeerRecord](),
@ -35,17 +33,16 @@ proc findPeer*(
peerId: PeerID): Future[?PeerRecord] {.async.} = peerId: PeerID): Future[?PeerRecord] {.async.} =
return none(PeerRecord) return none(PeerRecord)
proc findBlockProviders*( method findBlockProviders*(
d: Discovery, d: MockDiscovery,
cid: Cid): Future[seq[SignedPeerRecord]] {.async.} = cid: Cid): Future[seq[SignedPeerRecord]] {.async.} =
if isNil(d.findBlockProviders_var): return if isNil(d.findBlockProvidersHandler): return
return d.findBlockProviders_var(d, cid) return d.findBlockProvidersHandler(d, cid)
proc publishProvide*(d: Discovery, cid: Cid) {.async.} =
if isNil(d.publishProvide_var): return
d.publishProvide_var(d, cid)
method provideBlock*(d: MockDiscovery, cid: Cid) {.async.} =
if isNil(d.publishProvideHandler): return
d.publishProvideHandler(d, cid)
proc start*(d: Discovery) {.async.} = proc start*(d: Discovery) {.async.} =
discard discard

View File

@ -0,0 +1,232 @@
import std/sequtils
import std/sugar
import std/algorithm
import std/tables
import pkg/asynctest
import pkg/chronos
import pkg/stew/byteutils
import pkg/libp2p
import pkg/libp2p/errors
import pkg/dagger/rng
import pkg/dagger/stores
import pkg/dagger/blockexchange
import pkg/dagger/chunker
import pkg/dagger/blocktype as bt
import ./mockdiscovery
import ../../helpers
import ../../examples
suite "Block Advertising and Discovery":
let chunker = RandomChunker.new(Rng.instance(), size = 4096, chunkSize = 256)
var
blocks: seq[bt.Block]
switch: Switch
discovery: MockDiscovery
wallet: WalletRef
network: BlockExcNetwork
localStore: CacheStore
engine: BlockExcEngine
setup:
while true:
let chunk = await chunker.getBytes()
if chunk.len <= 0:
break
blocks.add(bt.Block.new(chunk).tryGet())
switch = newStandardSwitch(transportFlags = {ServerFlags.ReuseAddr})
discovery = MockDiscovery.new(switch.peerInfo, 0.Port)
wallet = WalletRef.example
network = BlockExcNetwork.new(switch)
localStore = CacheStore.new(blocks.mapIt( it ))
engine = BlockExcEngine.new(localStore, wallet, network, discovery, minPeersPerBlock = 1)
switch.mount(network)
test "Should discover want list":
let
pendingBlocks = blocks.mapIt(
engine.pendingBlocks.getWantHandle(it.cid)
)
await engine.start() # fire up discovery loop
discovery.findBlockProvidersHandler =
proc(d: MockDiscovery, cid: Cid): seq[SignedPeerRecord] =
engine.resolveBlocks(blocks.filterIt( it.cid == cid ))
await allFuturesThrowing(
allFinished(pendingBlocks))
await engine.stop()
test "Should advertise have blocks":
let
advertised = initTable.collect:
for b in blocks: {b.cid: newFuture[void]()}
discovery.publishProvideHandler = proc(d: MockDiscovery, cid: Cid) =
if cid in advertised and not advertised[cid].finished():
advertised[cid].complete()
await engine.start() # fire up advertise loop
await allFuturesThrowing(
allFinished(toSeq(advertised.values)))
await engine.stop()
test "Should not launch discovery if remote peer has block":
let
pendingBlocks = blocks.mapIt(
engine.pendingBlocks.getWantHandle(it.cid)
)
peerId = PeerID.example
haves = collect(initTable()):
for blk in blocks: {blk.cid: 0.u256}
engine.peers.add(
BlockExcPeerCtx(
id: peerId,
peerPrices: haves
))
discovery.findBlockProvidersHandler =
proc(d: MockDiscovery, cid: Cid): seq[SignedPeerRecord] =
check false
await engine.start() # fire up discovery loop
engine.pendingBlocks.resolve(blocks)
await allFuturesThrowing(
allFinished(pendingBlocks))
await engine.stop()
suite "E2E - Multiple Nodes Discovery":
let chunker = RandomChunker.new(Rng.instance(), size = 4096, chunkSize = 256)
var
switch: seq[Switch]
blockexc: seq[NetworkStore]
blocks: seq[bt.Block]
setup:
while true:
let chunk = await chunker.getBytes()
if chunk.len <= 0:
break
blocks.add(bt.Block.new(chunk).tryGet())
for _ in 0..<4:
let
s = newStandardSwitch(transportFlags = {ServerFlags.ReuseAddr})
discovery = MockDiscovery.new(s.peerInfo, 0.Port)
wallet = WalletRef.example
network = BlockExcNetwork.new(s)
localStore = CacheStore.new()
engine = BlockExcEngine.new(localStore, wallet, network, discovery, minPeersPerBlock = 1)
networkStore = NetworkStore.new(engine, localStore)
s.mount(network)
switch.add(s)
blockexc.add(networkStore)
teardown:
switch = @[]
blockexc = @[]
test "E2E - Should advertise and discover blocks":
# Distribute the blocks amongst 1..3
# Ask 0 to download everything without connecting him beforehand
var advertised: Table[Cid, SignedPeerRecord]
MockDiscovery(blockexc[1].engine.discovery)
.publishProvideHandler = proc(d: MockDiscovery, cid: Cid) =
advertised.add(cid, switch[1].peerInfo.signedPeerRecord)
MockDiscovery(blockexc[2].engine.discovery)
.publishProvideHandler = proc(d: MockDiscovery, cid: Cid) =
advertised.add(cid, switch[2].peerInfo.signedPeerRecord)
MockDiscovery(blockexc[3].engine.discovery)
.publishProvideHandler = proc(d: MockDiscovery, cid: Cid) =
advertised.add(cid, switch[3].peerInfo.signedPeerRecord)
await blockexc[1].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[0..5])
await blockexc[2].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[4..10])
await blockexc[3].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[10..15])
MockDiscovery(blockexc[0].engine.discovery)
.findBlockProvidersHandler = proc(d: MockDiscovery, cid: Cid): seq[SignedPeerRecord] =
if cid in advertised:
result.add(advertised[cid])
let futs = collect(newSeq):
for b in blocks:
blockexc[0].engine.requestBlock(b.cid)
await allFuturesThrowing(
switch.mapIt( it.start() ) &
blockexc.mapIt( it.engine.start() )
)
await allFutures(futs)
await allFuturesThrowing(
blockexc.mapIt( it.engine.stop() ) &
switch.mapIt( it.stop() )
)
test "E2E - Should advertise and discover blocks with peers already connected":
# Distribute the blocks amongst 1..3
# Ask 0 to download everything without connecting him beforehand
var advertised: Table[Cid, SignedPeerRecord]
MockDiscovery(blockexc[1].engine.discovery)
.publishProvideHandler = proc(d: MockDiscovery, cid: Cid) =
advertised.add(cid, switch[1].peerInfo.signedPeerRecord)
MockDiscovery(blockexc[2].engine.discovery)
.publishProvideHandler = proc(d: MockDiscovery, cid: Cid) =
advertised.add(cid, switch[2].peerInfo.signedPeerRecord)
MockDiscovery(blockexc[3].engine.discovery)
.publishProvideHandler = proc(d: MockDiscovery, cid: Cid) =
advertised.add(cid, switch[3].peerInfo.signedPeerRecord)
await blockexc[1].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[0..5])
await blockexc[2].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[4..10])
await blockexc[3].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[10..15])
MockDiscovery(blockexc[0].engine.discovery)
.findBlockProvidersHandler = proc(d: MockDiscovery, cid: Cid): seq[SignedPeerRecord] =
if cid in advertised:
result.add(advertised[cid])
await allFuturesThrowing(
switch.mapIt( it.start() ) &
blockexc.mapIt( it.engine.start() )
)
# Connect to the two first nodes
discard await blockexc[0].engine.requestBlock(blocks[0].cid)
discard await blockexc[0].engine.requestBlock(blocks[6].cid)
let futs = collect(newSeq):
for b in blocks:
blockexc[0].engine.requestBlock(b.cid)
await allFutures(futs).wait(10.seconds)
await allFuturesThrowing(
blockexc.mapIt( it.engine.stop() ) &
switch.mapIt( it.stop() )
)

View File

@ -1,5 +1,4 @@
import std/sequtils import std/sequtils
import std/sugar
import std/algorithm import std/algorithm
import pkg/asynctest import pkg/asynctest
@ -8,7 +7,6 @@ import pkg/stew/byteutils
import pkg/libp2p import pkg/libp2p
import pkg/libp2p/errors import pkg/libp2p/errors
import pkg/libp2pdht/discv5/protocol as discv5
import pkg/dagger/rng import pkg/dagger/rng
import pkg/dagger/stores import pkg/dagger/stores
@ -38,6 +36,7 @@ suite "NetworkStore engine - 2 nodes":
engine1, engine2: BlockExcEngine engine1, engine2: BlockExcEngine
localStore1, localStore2: BlockStore localStore1, localStore2: BlockStore
discovery1, discovery2: Discovery discovery1, discovery2: Discovery
pendingBlocks1, pendingBlocks2: seq[Future[bt.Block]]
setup: setup:
while true: while true:
@ -86,8 +85,8 @@ suite "NetworkStore engine - 2 nodes":
) )
# initialize our want lists # initialize our want lists
for b in blocks2: discard blockexc1.engine.discoverBlock(b.cid) pendingBlocks1 = blocks2.mapIt( blockexc1.engine.pendingBlocks.getWantHandle( it.cid ) )
for b in blocks1: discard blockexc2.engine.discoverBlock(b.cid) pendingBlocks2 = blocks1.mapIt( blockexc2.engine.pendingBlocks.getWantHandle( it.cid ) )
pricing1.address = wallet1.address pricing1.address = wallet1.address
pricing2.address = wallet2.address pricing2.address = wallet2.address
@ -98,7 +97,7 @@ suite "NetworkStore engine - 2 nodes":
switch2.peerInfo.peerId, switch2.peerInfo.peerId,
switch2.peerInfo.addrs) switch2.peerInfo.addrs)
await sleepAsync(100.milliseconds) # give some time to exchange lists await sleepAsync(1.seconds) # give some time to exchange lists
peerCtx2 = blockexc1.engine.getPeerCtx(peerId2) peerCtx2 = blockexc1.engine.getPeerCtx(peerId2)
peerCtx1 = blockexc2.engine.getPeerCtx(peerId1) peerCtx1 = blockexc2.engine.getPeerCtx(peerId1)
@ -113,12 +112,18 @@ suite "NetworkStore engine - 2 nodes":
check not isNil(peerCtx1) check not isNil(peerCtx1)
check not isNil(peerCtx2) check not isNil(peerCtx2)
await allFuturesThrowing(
allFinished(pendingBlocks1))
await allFuturesThrowing(
allFinished(pendingBlocks2))
check: check:
peerCtx1.peerHave.mapIt( $it ).sorted(cmp[string]) == peerCtx1.peerHave.mapIt( $it ).sorted(cmp[string]) ==
toSeq(blockexc2.engine.runningDiscoveries.keys()).mapIt( $it ).sorted(cmp[string]) pendingBlocks2.mapIt( $it.read.cid ).sorted(cmp[string])
peerCtx2.peerHave.mapIt( $it ).sorted(cmp[string]) == peerCtx2.peerHave.mapIt( $it ).sorted(cmp[string]) ==
toSeq(blockexc1.engine.runningDiscoveries.keys()).mapIt( $it ).sorted(cmp[string]) pendingBlocks1.mapIt( $it.read.cid ).sorted(cmp[string])
test "exchanges accounts on connect": test "exchanges accounts on connect":
check peerCtx1.account.?address == pricing1.address.some check peerCtx1.account.?address == pricing1.address.some
@ -175,7 +180,8 @@ suite "NetworkStore engine - 2 nodes":
check wallet2.balance(channel, Asset) > 0 check wallet2.balance(channel, Asset) > 0
suite "NetworkStore - multiple nodes": suite "NetworkStore - multiple nodes":
let chunker = RandomChunker.new(Rng.instance(), size = 4096, chunkSize = 256) let
chunker = RandomChunker.new(Rng.instance(), size = 4096, chunkSize = 256)
var var
switch: seq[Switch] switch: seq[Switch]
@ -213,10 +219,9 @@ suite "NetworkStore - multiple nodes":
engine = downloader.engine engine = downloader.engine
# Add blocks from 1st peer to want list # Add blocks from 1st peer to want list
for b in blocks[0..3]: let
discard engine.discoverBlock(b.cid) pendingBlocks1 = blocks[0..3].mapIt( engine.pendingBlocks.getWantHandle( it.cid ) )
for b in blocks[12..15]: pendingBlocks2 = blocks[12..15].mapIt( engine.pendingBlocks.getWantHandle( it.cid ))
discard engine.discoverBlock(b.cid)
await allFutures( await allFutures(
blocks[0..3].mapIt( blockexc[0].engine.localStore.putBlock(it) )) blocks[0..3].mapIt( blockexc[0].engine.localStore.putBlock(it) ))
@ -230,12 +235,16 @@ suite "NetworkStore - multiple nodes":
await connectNodes(switch) await connectNodes(switch)
await sleepAsync(1.seconds) await sleepAsync(1.seconds)
await allFuturesThrowing(
allFinished(pendingBlocks1),
allFinished(pendingBlocks2))
check: check:
engine.peers[0].peerHave.mapIt($it).sorted(cmp[string]) == engine.peers[0].peerHave.mapIt($it).sorted(cmp[string]) ==
blocks[0..3].mapIt( it.cid ).mapIt($it).sorted(cmp[string]) blocks[0..3].mapIt( $(it.cid) ).sorted(cmp[string])
engine.peers[3].peerHave.mapIt($it).sorted(cmp[string]) == engine.peers[3].peerHave.mapIt($it).sorted(cmp[string]) ==
blocks[12..15].mapIt( it.cid ).mapIt($it).sorted(cmp[string]) blocks[12..15].mapIt( $(it.cid) ).sorted(cmp[string])
test "should exchange blocks with multiple nodes": test "should exchange blocks with multiple nodes":
let let
@ -243,10 +252,9 @@ suite "NetworkStore - multiple nodes":
engine = downloader.engine engine = downloader.engine
# Add blocks from 1st peer to want list # Add blocks from 1st peer to want list
for b in blocks[0..3]: let
discard engine.discoverBlock(b.cid) pendingBlocks1 = blocks[0..3].mapIt( engine.pendingBlocks.getWantHandle( it.cid ) )
for b in blocks[12..15]: pendingBlocks2 = blocks[12..15].mapIt( engine.pendingBlocks.getWantHandle( it.cid ))
discard engine.discoverBlock(b.cid)
await allFutures( await allFutures(
blocks[0..3].mapIt( blockexc[0].engine.localStore.putBlock(it) )) blocks[0..3].mapIt( blockexc[0].engine.localStore.putBlock(it) ))
@ -260,74 +268,9 @@ suite "NetworkStore - multiple nodes":
await connectNodes(switch) await connectNodes(switch)
await sleepAsync(1.seconds) await sleepAsync(1.seconds)
let wantListBlocks = await allFinished(
blocks[0..3].mapIt( downloader.getBlock(it.cid) ))
check wantListBlocks.mapIt( !it.read ) == blocks[0..3]
suite "NetworkStore - discovery":
let chunker = RandomChunker.new(Rng.instance(), size = 4096, chunkSize = 256)
var
switch: seq[Switch]
blockexc: seq[NetworkStore]
blocks: seq[bt.Block]
setup:
while true:
let chunk = await chunker.getBytes()
if chunk.len <= 0:
break
blocks.add(bt.Block.new(chunk).tryGet())
for e in generateNodes(4):
switch.add(e.switch)
blockexc.add(e.blockexc)
await e.blockexc.engine.start()
await allFuturesThrowing( await allFuturesThrowing(
switch.mapIt( it.start() ) allFinished(pendingBlocks1),
) allFinished(pendingBlocks2))
teardown: check pendingBlocks1.mapIt( it.read ) == blocks[0..3]
await allFuturesThrowing( check pendingBlocks2.mapIt( it.read ) == blocks[12..15]
switch.mapIt( it.stop() )
)
switch = @[]
blockexc = @[]
test "Shouldn't launch discovery request if we are already connected":
await blockexc[0].engine.blocksHandler(switch[1].peerInfo.peerId, blocks)
blockexc[0].engine.discovery.findBlockProviders_var = proc(d: Discovery, cid: Cid): seq[SignedPeerRecord] =
check false
await connectNodes(switch)
let blk = await blockexc[1].engine.requestBlock(blocks[0].cid)
test "E2E discovery":
# Distribute the blocks amongst 1..3
# Ask 0 to download everything without connecting him beforehand
var advertised: Table[Cid, SignedPeerRecord]
blockexc[1].engine.discovery.publishProvide_var = proc(d: Discovery, cid: Cid) =
advertised[cid] = switch[1].peerInfo.signedPeerRecord
blockexc[2].engine.discovery.publishProvide_var = proc(d: Discovery, cid: Cid) =
advertised[cid] = switch[2].peerInfo.signedPeerRecord
blockexc[3].engine.discovery.publishProvide_var = proc(d: Discovery, cid: Cid) =
advertised[cid] = switch[3].peerInfo.signedPeerRecord
await blockexc[1].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[0..5])
await blockexc[2].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[4..10])
await blockexc[3].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[10..15])
blockexc[0].engine.discovery.findBlockProviders_var = proc(d: Discovery, cid: Cid): seq[SignedPeerRecord] =
if cid in advertised:
result.add(advertised[cid])
let futs = collect(newSeq):
for b in blocks:
blockexc[0].engine.requestBlock(b.cid)
await allFutures(futs)

View File

@ -66,8 +66,9 @@ suite "NetworkStore engine basic":
wallet, wallet,
network, network,
discovery) discovery)
for b in blocks: for b in blocks:
discard engine.discoverBlock(b.cid) discard engine.pendingBlocks.getWantHandle(b.cid)
engine.setupPeer(peerId) engine.setupPeer(peerId)
await done await done
@ -171,7 +172,7 @@ suite "NetworkStore engine handlers":
test "stores blocks in local store": test "stores blocks in local store":
let pending = blocks.mapIt( let pending = blocks.mapIt(
engine.pendingBlocks.addOrAwait( it.cid ) engine.pendingBlocks.getWantHandle( it.cid )
) )
await engine.blocksHandler(peerId, blocks) await engine.blocksHandler(peerId, blocks)

View File

@ -11,10 +11,8 @@ import ../examples
proc generateNodes*( proc generateNodes*(
num: Natural, num: Natural,
blocks: openArray[bt.Block] = [], blocks: openArray[bt.Block] = []):
secureManagers: openarray[SecureProtocol] = [ seq[tuple[switch: Switch, blockexc: NetworkStore]] =
SecureProtocol.Noise,
]): seq[tuple[switch: Switch, blockexc: NetworkStore]] =
for i in 0..<num: for i in 0..<num:
let let
switch = newStandardSwitch(transportFlags = {ServerFlags.ReuseAddr}) switch = newStandardSwitch(transportFlags = {ServerFlags.ReuseAddr})
@ -25,8 +23,6 @@ proc generateNodes*(
engine = BlockExcEngine.new(localStore, wallet, network, discovery) engine = BlockExcEngine.new(localStore, wallet, network, discovery)
networkStore = NetworkStore.new(engine, localStore) networkStore = NetworkStore.new(engine, localStore)
switch.mount(network)
switch.mount(network) switch.mount(network)
result.add((switch, networkStore)) result.add((switch, networkStore))

View File

@ -4,5 +4,6 @@ import ./blockexc/protobuf/testpayments as testprotobufpayments
import ./blockexc/protobuf/testpresence import ./blockexc/protobuf/testpresence
import ./blockexc/engine/testpayments as testenginepayments import ./blockexc/engine/testpayments as testenginepayments
import ./blockexc/testblockexc import ./blockexc/testblockexc
import ./blockexc/discovery/testdiscovery
{.warning[UnusedImport]: off.} {.warning[UnusedImport]: off.}