Merge 34b111f7dc1458cdbbe8186ae794235d59c81963 into 3bc6aa8b8d5ee72d23d793a0e909016214cdd599

This commit is contained in:
Chrysostomos Nanakos 2026-02-27 12:44:11 +02:00 committed by GitHub
commit b50e4a00da
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
91 changed files with 7621 additions and 3156 deletions

View File

@ -114,7 +114,7 @@ when (NimMajor, NimMinor, NimPatch) >= (1, 6, 11):
"BareExcept:off"
when (NimMajor, NimMinor) >= (2, 0):
--mm:
refc
orc
switch("define", "withoutPCRE")
@ -148,7 +148,7 @@ switch("define", "chronicles_sinks=textlines[dynamic],json[dynamic],textlines[dy
# Workaround for assembler incompatibility between constantine and secp256k1
switch("define", "use_asm_syntax_intel=false")
switch("define", "ctt_asm=false")
switch("define", "ctt_asm=true")
# Allow the use of old-style case objects for nim config compatibility
switch("define", "nimOldCaseObjects")

View File

@ -19,7 +19,7 @@ const RET_PROGRESS*: cint = 3
## Returns RET_OK as acknowledgment and call the callback
## with RET_OK code and the provided message.
proc success*(callback: StorageCallback, msg: string, userData: pointer): cint =
callback(RET_OK, cast[ptr cchar](msg), cast[csize_t](len(msg)), userData)
callback(RET_OK, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return RET_OK

View File

@ -1,5 +1,5 @@
import ./blockexchange/[network, engine, peers]
import ./blockexchange/protobuf/[blockexc, presence]
import ./blockexchange/protocol/[message, presence]
export network, engine, blockexc, presence, peers
export network, engine, message, presence, peers

View File

@ -1,5 +1,12 @@
import ./engine/discovery
import ./engine/advertiser
import ./engine/engine
import ./engine/scheduler
import ./engine/swarm
import ./engine/downloadcontext
import ./engine/activedownload
import ./engine/downloadmanager
export discovery, advertiser, engine
export
discovery, advertiser, engine, scheduler, swarm, downloadcontext, activedownload,
downloadmanager

View File

@ -0,0 +1,410 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/[tables, sets, monotimes, options]
import pkg/chronos
import pkg/libp2p
import pkg/metrics
import pkg/questionable
import ../protocol/message
import ../../blocktype
import ../../logutils
import ./scheduler
import ./swarm
import ./downloadcontext
export scheduler, swarm, downloadcontext
logScope:
topics = "storage activedownload"
declareGauge(
storage_block_exchange_retrieval_time_us,
"storage blockexchange block retrieval time us",
)
type
RetriesExhaustedError* = object of StorageError
BlockHandle* = Future[?!Block].Raising([CancelledError])
BlockHandleOpaque* = Future[?!void].Raising([CancelledError])
BlockReq* = object
handle*: BlockHandle
opaqueHandle*: BlockHandleOpaque
requested*: Option[PeerId]
inBatch*: Option[uint64]
blockRetries*: int
startTime*: int64
PendingBatch* = object
start*: uint64
count*: uint64
localCount*: uint64 # blocks already local when batch was scheduled
peerId*: PeerId
sentAt*: Moment
timeoutFuture*: Future[void] # timeout handler to cancel on completion
requestFuture*: Future[void] # request future to cancel on timeout
ActiveDownload* = ref object
id*: uint64 # for request/response correlation - echoed in protocol
cid*: Cid
ctx*: DownloadContext
blocks*: Table[BlockAddress, BlockReq] # per-download block requests
pendingBatches*: Table[uint64, PendingBatch] # batch start -> pending info
inFlightBatches*: Table[PeerId, seq[Future[void]]]
# track in-flight requests per peer for BDP - used as self-cleaning counter
exhaustedBlocks*: HashSet[BlockAddress]
# blocks that exhausted retries - failed permanently
blockRetries*: int
retryInterval*: Duration
cancelled*: bool
completionFuture*: Future[?!void].Raising([CancelledError])
proc waitForComplete*(
download: ActiveDownload
): Future[?!void] {.async: (raises: [CancelledError]).} =
return await download.completionFuture
proc signalCompletionIfDone(download: ActiveDownload, error: ref StorageError = nil) =
if download.completionFuture.finished:
return
if error != nil:
download.completionFuture.complete(void.failure(error))
elif download.ctx.isComplete:
download.completionFuture.complete(success())
proc makeBlockAddress*(download: ActiveDownload, index: uint64): BlockAddress =
BlockAddress(treeCid: download.cid, index: index.int)
proc getOrCreateBlockReq(
download: ActiveDownload, address: BlockAddress, requested: Option[PeerId]
): BlockReq =
download.blocks.withValue(address, blkReq):
return blkReq[]
do:
let blkReq = BlockReq(
handle: BlockHandle.init("ActiveDownload.getWantHandle"),
opaqueHandle: BlockHandleOpaque.init("ActiveDownload.getWantHandleOpaque"),
requested: requested,
inBatch: none(uint64),
blockRetries: download.blockRetries,
startTime: getMonoTime().ticks,
)
download.blocks[address] = blkReq
let handle = blkReq.handle
proc cleanUpBlock(data: pointer) {.raises: [].} =
download.blocks.del(address)
handle.addCallback(cleanUpBlock)
handle.cancelCallback = proc(data: pointer) {.raises: [].} =
if not handle.finished:
handle.removeCallback(cleanUpBlock)
cleanUpBlock(nil)
return blkReq
proc getWantHandle*(
download: ActiveDownload,
address: BlockAddress,
requested: Option[PeerId] = none(PeerId),
): Future[?!Block] {.async: (raw: true, raises: [CancelledError]).} =
download.getOrCreateBlockReq(address, requested).handle
proc getWantHandleOpaque*(
download: ActiveDownload,
address: BlockAddress,
requested: Option[PeerId] = none(PeerId),
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
download.getOrCreateBlockReq(address, requested).opaqueHandle
proc completeWantHandle*(
download: ActiveDownload, address: BlockAddress, blk: Option[Block] = none(Block)
): bool {.raises: [].} =
download.blocks.withValue(address, blockReq):
proc recordRetrievalTime(startTime: int64) =
let
stopTime = getMonoTime().ticks
retrievalDurationUs = (stopTime - startTime) div 1000
storage_block_exchange_retrieval_time_us.set(retrievalDurationUs)
if blk.isSome:
if not blockReq[].handle.finished:
blockReq[].handle.complete(success(blk.get))
blockReq[].opaqueHandle.complete(success())
recordRetrievalTime(blockReq[].startTime)
return true
else:
trace "Want handle already completed", address
return false
else:
if not blockReq[].opaqueHandle.finished:
blockReq[].opaqueHandle.complete(success())
recordRetrievalTime(blockReq[].startTime)
return true
else:
return false
do:
trace "No pending want handle found", address
return false
proc failWantHandle(
download: ActiveDownload, address: BlockAddress, error: ref StorageError
) {.raises: [].} =
download.blocks.withValue(address, blockReq):
if not blockReq[].handle.finished:
blockReq[].handle.complete(Block.failure(error))
blockReq[].opaqueHandle.complete(Result[void, ref CatchableError].err(error))
func retries*(download: ActiveDownload, address: BlockAddress): int =
download.blocks.withValue(address, pending):
result = pending[].blockRetries
do:
result = 0
func decRetries*(download: ActiveDownload, address: BlockAddress) =
download.blocks.withValue(address, pending):
pending[].blockRetries -= 1
func retriesExhausted*(download: ActiveDownload, address: BlockAddress): bool =
download.blocks.withValue(address, pending):
result = pending[].blockRetries <= 0
proc decrementBlockRetries*(
download: ActiveDownload, addresses: seq[BlockAddress]
): seq[BlockAddress] =
result = @[]
for address in addresses:
download.blocks.withValue(address, req):
req[].blockRetries -= 1
if req[].blockRetries <= 0:
result.add(address)
proc failExhaustedBlocks*(download: ActiveDownload, addresses: seq[BlockAddress]) =
if addresses.len == 0:
return
for address in addresses:
download.exhaustedBlocks.incl(address)
download.ctx.received += 1
let error = (ref RetriesExhaustedError)(
msg: "Block retries exhausted after " & $download.blockRetries & " attempts"
)
for address in addresses:
download.failWantHandle(address, error)
download.blocks.del(address)
download.signalCompletionIfDone(error)
proc isBlockExhausted*(download: ActiveDownload, address: BlockAddress): bool =
address in download.exhaustedBlocks
proc getBlockAddressesForRange*(
download: ActiveDownload, start: uint64, count: uint64
): seq[BlockAddress] =
result = @[]
for i in start ..< start + count:
let address = download.makeBlockAddress(i)
if address in download.blocks:
result.add(address)
func isRequested*(download: ActiveDownload, address: BlockAddress): bool =
result = false
download.blocks.withValue(address, pending):
result = pending[].requested.isSome
func getRequestPeer*(download: ActiveDownload, address: BlockAddress): Option[PeerId] =
result = none(PeerId)
download.blocks.withValue(address, pending):
result = pending[].requested
proc markRequested*(
download: ActiveDownload, address: BlockAddress, peer: PeerId
): bool =
if download.isRequested(address):
return false
download.blocks.withValue(address, pending):
pending[].requested = some(peer)
return true
proc clearRequest*(
download: ActiveDownload, address: BlockAddress, peer: Option[PeerId] = none(PeerId)
) =
download.blocks.withValue(address, pending):
if peer.isSome:
assert peer == pending[].requested
pending[].requested = none(PeerId)
pending[].inBatch = none(uint64)
func contains*(download: ActiveDownload, address: BlockAddress): bool =
address in download.blocks
proc markBlockReturned*(download: ActiveDownload) =
download.ctx.markBlockReturned()
proc markBatchInFlight*(
download: ActiveDownload,
start: uint64,
count: uint64,
localCount: uint64,
peerId: PeerId,
timeoutFuture: Future[void] = nil,
) =
download.pendingBatches[start] = PendingBatch(
start: start,
count: count,
localCount: localCount,
peerId: peerId,
sentAt: Moment.now(),
timeoutFuture: timeoutFuture,
)
download.ctx.markBatchInFlight(start, count, peerId)
for i in start ..< start + count:
let address = download.makeBlockAddress(i)
download.blocks.withValue(address, req):
req[].requested = some(peerId)
req[].inBatch = some(start)
proc setBatchTimeoutFuture*(
download: ActiveDownload, start: uint64, timeoutFuture: Future[void]
) =
download.pendingBatches.withValue(start, pending):
pending[].timeoutFuture = timeoutFuture
proc setBatchRequestFuture*(
download: ActiveDownload, start: uint64, requestFuture: Future[void]
) =
download.pendingBatches.withValue(start, pending):
pending[].requestFuture = requestFuture
proc completeBatchLocal*(download: ActiveDownload, start: uint64, count: uint64) =
download.ctx.scheduler.markComplete(start)
download.ctx.markBatchReceived(start, count, 0)
download.signalCompletionIfDone()
proc completeBatch*(
download: ActiveDownload,
start: uint64,
blocksDeliveryCount: uint64,
totalBytes: uint64,
) =
var localCount: uint64 = 0
download.pendingBatches.withValue(start, pending):
localCount = pending[].localCount
if not pending[].timeoutFuture.isNil and not pending[].timeoutFuture.finished:
pending[].timeoutFuture.cancelSoon()
download.pendingBatches.del(start)
download.ctx.scheduler.markComplete(start)
download.ctx.markBatchReceived(start, localCount + blocksDeliveryCount, totalBytes)
download.signalCompletionIfDone()
proc requeueBatch*(
download: ActiveDownload, start: uint64, count: uint64, front: bool = false
) =
download.pendingBatches.withValue(start, pending):
if not pending[].timeoutFuture.isNil and not pending[].timeoutFuture.finished:
pending[].timeoutFuture.cancelSoon()
download.pendingBatches.del(start)
if front:
download.ctx.scheduler.requeueFront(start, count)
else:
download.ctx.scheduler.requeueBack(start, count)
for i in start ..< start + count:
let address = download.makeBlockAddress(i)
download.blocks.withValue(address, req):
req[].requested = none(PeerId)
req[].inBatch = none(uint64)
proc partialCompleteBatch*(
download: ActiveDownload,
originalStart: uint64,
originalCount: uint64,
receivedBlocksCount: uint64,
missingRanges: seq[tuple[start: uint64, count: uint64]],
totalBytes: uint64,
) =
download.pendingBatches.withValue(originalStart, pending):
if not pending[].timeoutFuture.isNil and not pending[].timeoutFuture.finished:
pending[].timeoutFuture.cancelSoon()
download.pendingBatches.del(originalStart)
var missingBatches: seq[BlockBatch] = @[]
for r in missingRanges:
missingBatches.add((start: r.start, count: r.count))
download.ctx.scheduler.partialComplete(originalStart, missingBatches)
download.ctx.markBatchReceived(originalStart, receivedBlocksCount, totalBytes)
for r in missingRanges:
for i in r.start ..< r.start + r.count:
let address = download.makeBlockAddress(i)
download.blocks.withValue(address, req):
req[].requested = none(PeerId)
req[].inBatch = none(uint64)
download.signalCompletionIfDone()
proc isDownloadComplete*(download: ActiveDownload): bool =
download.ctx.isComplete()
proc hasWorkRemaining*(download: ActiveDownload): bool =
not download.ctx.scheduler.isEmpty()
proc pendingBatchCount*(download: ActiveDownload): int =
download.pendingBatches.len
proc handlePeerFailure*(download: ActiveDownload, peerId: PeerId) =
download.ctx.clearInFlightForPeer(peerId)
var toRequeue: seq[tuple[start: uint64, count: uint64]] = @[]
for start, batch in download.pendingBatches:
if batch.peerId == peerId:
toRequeue.add((start, batch.count))
for (start, count) in toRequeue:
download.requeueBatch(start, count, front = true)
trace "Requeued batches from failed peer", peer = peerId, batches = toRequeue.len
proc getSwarm(download: ActiveDownload): Swarm =
download.ctx.swarm
proc updatePeerAvailability*(
download: ActiveDownload, peerId: PeerId, availability: BlockAvailability
) =
if download.ctx.swarm.getPeer(peerId).isNone:
discard download.ctx.swarm.addPeer(peerId, availability)
else:
download.ctx.swarm.updatePeerAvailability(peerId, availability)
proc addPeerIfAbsent*(
download: ActiveDownload, peerId: PeerId, availability: BlockAvailability
): bool =
let existingPeer = download.ctx.swarm.getPeer(peerId)
if existingPeer.isSome:
# peer already tracked, skip if bakComplete
return existingPeer.get().availability.kind != bakComplete
discard download.ctx.swarm.addPeer(peerId, availability)
return true # new peer added, send WantHave

View File

@ -11,14 +11,10 @@
import pkg/chronos
import pkg/libp2p/cid
import pkg/libp2p/multicodec
import pkg/metrics
import pkg/questionable
import pkg/questionable/results
import ../protobuf/presence
import ../peers
import ../../utils
import ../../utils/exceptions
import ../../utils/trackedfutures

View File

@ -7,19 +7,12 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/sequtils
import std/algorithm
import pkg/chronos
import pkg/libp2p/cid
import pkg/libp2p/multicodec
import pkg/metrics
import pkg/questionable
import pkg/questionable/results
import ./pendingblocks
import ../protobuf/presence
import ../network
import ../peers
@ -28,7 +21,6 @@ import ../../utils/trackedfutures
import ../../discovery
import ../../stores/blockstore
import ../../logutils
import ../../manifest
logScope:
topics = "storage discoveryengine"
@ -38,60 +30,23 @@ declareGauge(storage_inflight_discovery, "inflight discovery requests")
const
DefaultConcurrentDiscRequests = 10
DefaultDiscoveryTimeout = 1.minutes
DefaultMinPeersPerBlock = 3
DefaultMaxPeersPerBlock = 8
DefaultDiscoveryLoopSleep = 3.seconds
type DiscoveryEngine* = ref object of RootObj
localStore*: BlockStore # Local block store for this instance
peers*: PeerCtxStore # Peer context store
peers*: PeerContextStore # Peer context store
network*: BlockExcNetwork # Network interface
discovery*: Discovery # Discovery interface
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
discEngineRunning*: bool # Indicates if discovery is running
concurrentDiscReqs: int # Concurrent discovery requests
discoveryLoop*: Future[void].Raising([]) # Discovery loop task handle
discoveryQueue*: AsyncQueue[Cid] # Discovery queue
trackedFutures*: TrackedFutures # Tracked Discovery tasks futures
minPeersPerBlock*: int # Min number of peers with block
maxPeersPerBlock*: int # Max number of peers with block
discoveryLoopSleep: Duration # Discovery loop sleep
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]]
# Inflight discovery requests
proc cleanupExcessPeers(b: DiscoveryEngine, cid: Cid) {.gcsafe, raises: [].} =
var haves = b.peers.peersHave(cid)
let count = haves.len - b.maxPeersPerBlock
if count <= 0:
return
haves.sort(
proc(a, b: BlockExcPeerCtx): int =
cmp(a.lastExchange, b.lastExchange)
)
let toRemove = haves[0 ..< count]
for peer in toRemove:
try:
peer.cleanPresence(BlockAddress.init(cid))
trace "Removed block presence from peer", cid, peer = peer.id
except CatchableError as exc:
error "Failed to clean presence for peer",
cid, peer = peer.id, error = exc.msg, name = exc.name
proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} =
try:
while b.discEngineRunning:
for cid in toSeq(b.pendingBlocks.wantListBlockCids):
await b.discoveryQueue.put(cid)
await sleepAsync(b.discoveryLoopSleep)
except CancelledError:
trace "Discovery loop cancelled"
proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
## Run discovery tasks
##
## Peer availability is tracked per-download in DownloadContext.swarm.
## This loop just runs discovery for CIDs that are queued.
try:
while b.discEngineRunning:
@ -103,30 +58,21 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
trace "Running discovery task for cid", cid
let haves = b.peers.peersHave(cid)
let request = b.discovery.find(cid)
b.inFlightDiscReqs[cid] = request
storage_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
if haves.len > b.maxPeersPerBlock:
trace "Cleaning up excess peers",
cid, peers = haves.len, max = b.maxPeersPerBlock
b.cleanupExcessPeers(cid)
continue
if haves.len < b.minPeersPerBlock:
let request = b.discovery.find(cid)
b.inFlightDiscReqs[cid] = request
defer:
b.inFlightDiscReqs.del(cid)
storage_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
defer:
b.inFlightDiscReqs.del(cid)
storage_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
if (await request.withTimeout(DefaultDiscoveryTimeout)) and
peers =? (await request).catch:
let dialed = await allFinished(peers.mapIt(b.network.dialPeer(it.data)))
if (await request.withTimeout(DefaultDiscoveryTimeout)) and
peers =? (await request).catch:
let dialed = await allFinished(peers.mapIt(b.network.dialPeer(it.data)))
for i, f in dialed:
if f.failed:
await b.discovery.removeProvider(peers[i].data.peerId)
for i, f in dialed:
if f.failed:
await b.discovery.removeProvider(peers[i].data.peerId)
except CancelledError:
trace "Discovery task cancelled"
return
@ -156,9 +102,6 @@ proc start*(b: DiscoveryEngine) {.async: (raises: []).} =
let fut = b.discoveryTaskLoop()
b.trackedFutures.track(fut)
b.discoveryLoop = b.discoveryQueueLoop()
b.trackedFutures.track(b.discoveryLoop)
trace "Discovery engine started"
proc stop*(b: DiscoveryEngine) {.async: (raises: []).} =
@ -180,28 +123,20 @@ proc stop*(b: DiscoveryEngine) {.async: (raises: []).} =
proc new*(
T: type DiscoveryEngine,
localStore: BlockStore,
peers: PeerCtxStore,
peers: PeerContextStore,
network: BlockExcNetwork,
discovery: Discovery,
pendingBlocks: PendingBlocksManager,
concurrentDiscReqs = DefaultConcurrentDiscRequests,
discoveryLoopSleep = DefaultDiscoveryLoopSleep,
minPeersPerBlock = DefaultMinPeersPerBlock,
maxPeersPerBlock = DefaultMaxPeersPerBlock,
): DiscoveryEngine =
## Create a discovery engine instance for advertising services
## Create a discovery engine instance
##
DiscoveryEngine(
localStore: localStore,
peers: peers,
network: network,
discovery: discovery,
pendingBlocks: pendingBlocks,
concurrentDiscReqs: concurrentDiscReqs,
discoveryQueue: newAsyncQueue[Cid](concurrentDiscReqs),
trackedFutures: TrackedFutures.new(),
inFlightDiscReqs: initTable[Cid, Future[seq[SignedPeerRecord]]](),
discoveryLoopSleep: discoveryLoopSleep,
minPeersPerBlock: minPeersPerBlock,
maxPeersPerBlock: maxPeersPerBlock,
)

View File

@ -0,0 +1,256 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/[tables, options, random]
import pkg/chronos
import pkg/libp2p/cid
import pkg/libp2p/peerid
import ./scheduler
import ./swarm
import ../peers/peercontext
import ../../storagetypes
import ../protocol/message
import ../protocol/constants
import ../utils
export scheduler, peercontext
const
PresenceWindowBytes*: uint64 = 1024 * 1024 * 1024
PresenceWindowBlocks*: uint64 = PresenceWindowBytes div DefaultBlockSize.uint64
MaxPresenceWindowBlocks*: uint64 = PresenceWindowBytes div MinBlockSize
PresenceWindowThreshold*: float = 0.75
PresenceBroadcastIntervalMin*: Duration = 5.seconds
PresenceBroadcastIntervalMax*: Duration = 10.seconds
PresenceBroadcastBlockThreshold*: uint64 = PresenceWindowBlocks div 2
DefaultBatchTimeoutUnknownBlockSize* = 30.seconds # timeout for unknown block size
static:
const
worstCaseRanges = MaxPresenceWindowBlocks div 2
worstCasePresenceBytes = worstCaseRanges * 16 + 1024 # +1KB safe overhead
doAssert worstCasePresenceBytes < MaxMessageSize,
"Presence window too large for MaxMessageSize with minimum block size. " &
"Worst case: " & $worstCasePresenceBytes & " bytes, limit: " & $MaxMessageSize &
" bytes"
type
DownloadProgress = object
blocksCompleted*: uint64
totalBlocks*: uint64
bytesTransferred*: uint64
DownloadContext* = ref object
treeCid*: Cid
blockSize*: uint32
totalBlocks*: uint64
received*: uint64
blocksReturned*: uint64
bytesReceived*: uint64
scheduler*: Scheduler
swarm*: Swarm
inFlightBlocks*: Table[uint64, PeerId] # block index -> peer fetching it
presenceWindowStart*: uint64
presenceWindowEnd*: uint64 # exclusive
presenceWindowSize*: uint64 # in blocks
lastAvailabilityBroadcastTime*: Moment
lastAvailabilityBroadcastedWatermark*: uint64
presenceBroadcastInterval*: Duration
proc computePresenceWindowSize*(blockSize: uint32): uint64 =
result = PresenceWindowBytes div blockSize.uint64
if result == 0:
result = 1
proc randomBroadcastInterval(): Duration =
# try avoid thundering herd.
rand(
PresenceBroadcastIntervalMin.milliseconds.int ..
PresenceBroadcastIntervalMax.milliseconds.int
).milliseconds
proc new*(
T: type DownloadContext,
treeCid: Cid,
blockSize: uint32,
totalBlocks: uint64,
alreadyHave: uint64 = 0,
): DownloadContext =
let windowSize =
if blockSize > 0:
computePresenceWindowSize(blockSize)
else:
PresenceWindowBlocks
let initialWindowEnd = min(windowSize, totalBlocks)
DownloadContext(
treeCid: treeCid,
blockSize: blockSize,
totalBlocks: totalBlocks,
received: alreadyHave,
blocksReturned: 0,
bytesReceived: 0,
scheduler: Scheduler.new(),
swarm: Swarm.new(),
inFlightBlocks: initTable[uint64, PeerId](),
presenceWindowStart: 0,
presenceWindowEnd: initialWindowEnd,
presenceWindowSize: windowSize,
lastAvailabilityBroadcastTime: Moment.now(),
lastAvailabilityBroadcastedWatermark: 0,
presenceBroadcastInterval: randomBroadcastInterval(),
)
proc isComplete*(ctx: DownloadContext): bool =
ctx.blocksReturned >= ctx.totalBlocks or ctx.received >= ctx.totalBlocks
proc hasBlockSize*(ctx: DownloadContext): bool =
ctx.blockSize > 0
proc setBlockSize*(ctx: DownloadContext, blockSize: uint32) =
if ctx.blockSize == 0 and blockSize > 0:
ctx.blockSize = blockSize
ctx.scheduler.updateBatchSize(computeBatchSize(blockSize).uint64)
proc markBlockReturned*(ctx: DownloadContext) =
# mark that a block was returned to the consumer by the iterator
ctx.blocksReturned += 1
proc markBatchReceived*(
ctx: DownloadContext, start: uint64, count: uint64, totalBytes: uint64
) =
ctx.received += count
ctx.bytesReceived += totalBytes
for i in start ..< start + count:
ctx.inFlightBlocks.del(i)
proc markBatchInFlight*(
ctx: DownloadContext, start: uint64, count: uint64, peerId: PeerId
) =
for i in start ..< start + count:
ctx.inFlightBlocks[i] = peerId
proc clearInFlightForPeer*(ctx: DownloadContext, peerId: PeerId) =
var toRemove: seq[uint64] = @[]
for blockIdx, peer in ctx.inFlightBlocks:
if peer == peerId:
toRemove.add(blockIdx)
for blockIdx in toRemove:
ctx.inFlightBlocks.del(blockIdx)
proc currentPresenceWindow*(ctx: DownloadContext): tuple[start: uint64, count: uint64] =
(
start: ctx.presenceWindowStart,
count: ctx.presenceWindowEnd - ctx.presenceWindowStart,
)
proc needsNextPresenceWindow*(ctx: DownloadContext): bool =
if ctx.presenceWindowEnd >= ctx.totalBlocks:
return false
let
watermark = ctx.scheduler.completedWatermark()
windowSize = ctx.presenceWindowEnd - ctx.presenceWindowStart
threshold =
ctx.presenceWindowStart + (windowSize.float * PresenceWindowThreshold).uint64
watermark >= threshold
proc advancePresenceWindow*(ctx: DownloadContext): tuple[start: uint64, count: uint64] =
let
newStart = ctx.presenceWindowEnd
newEnd = min(newStart + ctx.presenceWindowSize, ctx.totalBlocks)
ctx.presenceWindowStart = newStart
ctx.presenceWindowEnd = newEnd
let count = newEnd - newStart
(start: newStart, count: count)
proc trimPresenceBeforeWatermark*(ctx: DownloadContext) =
let watermark = ctx.scheduler.completedWatermark()
for peerId in ctx.swarm.connectedPeers():
let peerOpt = ctx.swarm.getPeer(peerId)
if peerOpt.isSome:
let peer = peerOpt.get()
# only trim range-based availability
if peer.availability.kind == bakRanges:
var newRanges: seq[tuple[start: uint64, count: uint64]] = @[]
for (start, count) in peer.availability.ranges:
let rangeEnd = start + count
if rangeEnd > watermark:
# keep ranges not entirely below watermark
newRanges.add((start, count))
peer.availability = BlockAvailability.fromRanges(newRanges)
proc shouldBroadcastAvailability*(ctx: DownloadContext): bool =
let watermark = ctx.scheduler.completedWatermark()
let newBlocks = watermark - ctx.lastAvailabilityBroadcastedWatermark
if newBlocks == 0:
return false
let timeSinceLast = Moment.now() - ctx.lastAvailabilityBroadcastTime
newBlocks >= PresenceBroadcastBlockThreshold or
timeSinceLast >= ctx.presenceBroadcastInterval
proc getAvailabilityBroadcast*(
ctx: DownloadContext
): tuple[start: uint64, count: uint64] =
let watermark = ctx.scheduler.completedWatermark()
(
start: ctx.lastAvailabilityBroadcastedWatermark,
count: watermark - ctx.lastAvailabilityBroadcastedWatermark,
)
proc markAvailabilityBroadcasted*(ctx: DownloadContext) =
ctx.lastAvailabilityBroadcastTime = Moment.now()
ctx.lastAvailabilityBroadcastedWatermark = ctx.scheduler.completedWatermark()
ctx.presenceBroadcastInterval = randomBroadcastInterval()
proc batchBytes*(ctx: DownloadContext): uint64 =
ctx.scheduler.batchSizeCount.uint64 * ctx.blockSize.uint64
proc batchTimeout*(
ctx: DownloadContext, peer: PeerContext, batchCount: uint64
): Duration =
let bytes = batchCount * ctx.blockSize.uint64
if bytes > 0:
peer.batchTimeout(bytes)
else:
DefaultBatchTimeoutUnknownBlockSize
## private - only used in tests
proc progress(ctx: DownloadContext): DownloadProgress =
DownloadProgress(
blocksCompleted: ctx.received,
totalBlocks: ctx.totalBlocks,
bytesTransferred: ctx.bytesReceived,
)
proc markBlockInFlight(ctx: DownloadContext, index: uint64, peerId: PeerId) =
ctx.inFlightBlocks[index] = peerId
proc isBlockInFlight(ctx: DownloadContext, index: uint64): bool =
index in ctx.inFlightBlocks
proc inFlightCount(ctx: DownloadContext): int =
ctx.inFlightBlocks.len
proc remainingBlocks(ctx: DownloadContext): uint64 =
if ctx.totalBlocks > ctx.received:
ctx.totalBlocks - ctx.received
else:
0
proc presenceWindowContains(ctx: DownloadContext, blockIndex: uint64): bool =
blockIndex >= ctx.presenceWindowStart and blockIndex < ctx.presenceWindowEnd

View File

@ -0,0 +1,202 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/[tables, sets, options]
import pkg/chronos
import pkg/libp2p
import pkg/questionable
import ../protocol/message
import ../protocol/constants
import ../utils
import ../../blocktype
import ../../logutils
import ./activedownload
export activedownload
logScope:
topics = "storage downloadmanager"
const
DefaultBlockRetries* = 100
DefaultRetryInterval* = 2.seconds
type
DownloadManager* = ref object of RootObj
nextDownloadId*: uint64 = 1 # 0 is invalid
blockRetries*: int
retryInterval*: Duration
downloads*: Table[Cid, Table[uint64, ActiveDownload]]
selectionPolicy*: SelectionPolicy
DownloadDesc* = object
cid*: Cid
blockSize*: uint32
startIndex*: uint64
count*: uint64
proc id*(desc: DownloadDesc): Cid =
desc.cid
proc toDownloadDesc*(
treeCid: Cid, totalBlocks: uint64, blockSize: uint32
): DownloadDesc =
DownloadDesc(cid: treeCid, blockSize: blockSize, startIndex: 0, count: totalBlocks)
proc toDownloadDesc*(
treeCid: Cid, startIndex: uint64, count: uint64, blockSize: uint32
): DownloadDesc =
DownloadDesc(cid: treeCid, blockSize: blockSize, startIndex: startIndex, count: count)
proc toDownloadDesc*(address: BlockAddress, blockSize: uint32): DownloadDesc =
DownloadDesc(
cid: address.treeCid,
blockSize: blockSize,
startIndex: address.index.uint64,
count: 1,
)
proc getDownload*(self: DownloadManager, treeCid: Cid): Option[ActiveDownload] =
self.downloads.withValue(treeCid, innerTable):
for _, download in innerTable[]:
return some(download)
return none(ActiveDownload)
proc getDownload*(
self: DownloadManager, downloadId: uint64, cid: Cid
): Option[ActiveDownload] =
self.downloads.withValue(cid, innerTable):
innerTable[].withValue(downloadId, download):
return some(download[])
return none(ActiveDownload)
proc cancelDownload*(self: DownloadManager, download: ActiveDownload) =
download.cancelled = true
for _, batch in download.pendingBatches:
if not batch.timeoutFuture.isNil and not batch.timeoutFuture.finished:
batch.timeoutFuture.cancelSoon()
if not batch.requestFuture.isNil and not batch.requestFuture.finished:
batch.requestFuture.cancelSoon()
for address, req in download.blocks:
if not req.handle.finished:
req.handle.fail(newException(CancelledError, "Download cancelled"))
if not req.opaqueHandle.finished:
req.opaqueHandle.fail(newException(CancelledError, "Download cancelled"))
download.blocks.clear()
if not download.completionFuture.finished:
download.completionFuture.fail(newException(CancelledError, "Download cancelled"))
self.downloads.withValue(download.cid, innerTable):
innerTable[].del(download.id)
if innerTable[].len == 0:
self.downloads.del(download.cid)
proc cancelDownload*(self: DownloadManager, cid: Cid) =
self.downloads.withValue(cid, innerTable):
var toCancel: seq[ActiveDownload] = @[]
for _, download in innerTable[]:
toCancel.add(download)
for download in toCancel:
self.cancelDownload(download)
proc releaseDownload*(self: DownloadManager, cid: Cid) =
self.cancelDownload(cid)
proc releaseDownload*(self: DownloadManager, downloadId: uint64, cid: Cid) =
let download = self.getDownload(downloadId, cid)
if download.isSome:
self.cancelDownload(download.get())
proc getNextBatch*(
self: DownloadManager, download: ActiveDownload
): Option[tuple[start: uint64, count: uint64]] =
case self.selectionPolicy
of spSequential:
let batch = download.ctx.scheduler.take()
if batch.isSome:
return some((start: batch.get().start, count: batch.get().count))
return none(tuple[start: uint64, count: uint64])
proc startDownload*(
self: DownloadManager, desc: DownloadDesc, missingBlocks: seq[uint64] = @[]
): ActiveDownload =
let
totalBlocks = desc.startIndex + desc.count
ctx = DownloadContext.new(desc.cid, desc.blockSize, totalBlocks)
batchSize =
if desc.blockSize > 0:
computeBatchSize(desc.blockSize)
else:
MinBatchSize
if missingBlocks.len > 0:
# use explicit indices directly
ctx.scheduler.initFromIndices(missingBlocks, batchSize.uint64)
elif desc.count > batchSize.uint64:
# this is a large download, more than one batch, use lazy mode
if desc.startIndex == 0:
ctx.scheduler.init(desc.count, batchSize.uint64)
else:
ctx.scheduler.initRange(desc.startIndex, desc.count, batchSize.uint64)
else:
# this is a small range, single batch or less, schedule explicit indices
var indices: seq[uint64] = @[]
for i in desc.startIndex ..< desc.startIndex + desc.count:
indices.add(i)
ctx.scheduler.initFromIndices(indices, batchSize.uint64)
let downloadId = self.nextDownloadId
self.nextDownloadId += 1
let download = ActiveDownload(
id: downloadId,
cid: desc.cid,
ctx: ctx,
blocks: initTable[BlockAddress, BlockReq](),
pendingBatches: initTable[uint64, PendingBatch](),
inFlightBatches: initTable[PeerId, seq[Future[void]]](),
exhaustedBlocks: initHashSet[BlockAddress](),
blockRetries: self.blockRetries,
retryInterval: self.retryInterval,
completionFuture:
Future[?!void].Raising([CancelledError]).init("ActiveDownload.completion"),
)
self.downloads.mgetOrPut(desc.cid, initTable[uint64, ActiveDownload]())[downloadId] =
download
trace "Started download",
cid = desc.cid,
startIndex = desc.startIndex,
count = desc.count,
batchSize = batchSize
return download
proc new*(
T: type DownloadManager,
retries = DefaultBlockRetries,
interval = DefaultRetryInterval,
selectionPolicy = spSequential,
): DownloadManager =
DownloadManager(
blockRetries: retries,
retryInterval: interval,
downloads: initTable[Cid, Table[uint64, ActiveDownload]](),
selectionPolicy: selectionPolicy,
)

File diff suppressed because it is too large Load Diff

View File

@ -1,218 +0,0 @@
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/tables
import std/monotimes
import std/strutils
import pkg/chronos
import pkg/libp2p
import pkg/metrics
import ../protobuf/blockexc
import ../../blocktype
import ../../logutils
logScope:
topics = "storage pendingblocks"
declareGauge(
storage_block_exchange_pending_block_requests,
"storage blockexchange pending block requests",
)
declareGauge(
storage_block_exchange_retrieval_time_us,
"storage blockexchange block retrieval time us",
)
const
DefaultBlockRetries* = 3000
DefaultRetryInterval* = 2.seconds
type
RetriesExhaustedError* = object of CatchableError
BlockHandle* = Future[Block].Raising([CancelledError, RetriesExhaustedError])
BlockReq* = object
handle*: BlockHandle
requested*: ?PeerId
blockRetries*: int
startTime*: int64
PendingBlocksManager* = ref object of RootObj
blockRetries*: int = DefaultBlockRetries
retryInterval*: Duration = DefaultRetryInterval
blocks*: Table[BlockAddress, BlockReq] # pending Block requests
lastInclusion*: Moment # time at which we last included a block into our wantlist
proc updatePendingBlockGauge(p: PendingBlocksManager) =
storage_block_exchange_pending_block_requests.set(p.blocks.len.int64)
proc getWantHandle*(
self: PendingBlocksManager, address: BlockAddress, requested: ?PeerId = PeerId.none
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
## Add an event for a block
##
self.blocks.withValue(address, blk):
return blk[].handle
do:
let blk = BlockReq(
handle: newFuture[Block]("pendingBlocks.getWantHandle"),
requested: requested,
blockRetries: self.blockRetries,
startTime: getMonoTime().ticks,
)
self.blocks[address] = blk
self.lastInclusion = Moment.now()
let handle = blk.handle
proc cleanUpBlock(data: pointer) {.raises: [].} =
self.blocks.del(address)
self.updatePendingBlockGauge()
handle.addCallback(cleanUpBlock)
handle.cancelCallback = proc(data: pointer) {.raises: [].} =
if not handle.finished:
handle.removeCallback(cleanUpBlock)
cleanUpBlock(nil)
self.updatePendingBlockGauge()
return handle
proc getWantHandle*(
self: PendingBlocksManager, cid: Cid, requested: ?PeerId = PeerId.none
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
self.getWantHandle(BlockAddress.init(cid), requested)
proc completeWantHandle*(
self: PendingBlocksManager, address: BlockAddress, blk: Block
) {.raises: [].} =
## Complete a pending want handle
self.blocks.withValue(address, blockReq):
if not blockReq[].handle.finished:
trace "Completing want handle from provided block", address
blockReq[].handle.complete(blk)
else:
trace "Want handle already completed", address
do:
trace "No pending want handle found for address", address
proc resolve*(
self: PendingBlocksManager, blocksDelivery: seq[BlockDelivery]
) {.gcsafe, raises: [].} =
## Resolve pending blocks
##
for bd in blocksDelivery:
self.blocks.withValue(bd.address, blockReq):
if not blockReq[].handle.finished:
trace "Resolving pending block", address = bd.address
let
startTime = blockReq[].startTime
stopTime = getMonoTime().ticks
retrievalDurationUs = (stopTime - startTime) div 1000
blockReq.handle.complete(bd.blk)
storage_block_exchange_retrieval_time_us.set(retrievalDurationUs)
else:
trace "Block handle already finished", address = bd.address
func retries*(self: PendingBlocksManager, address: BlockAddress): int =
self.blocks.withValue(address, pending):
result = pending[].blockRetries
do:
result = 0
func decRetries*(self: PendingBlocksManager, address: BlockAddress) =
self.blocks.withValue(address, pending):
pending[].blockRetries -= 1
func retriesExhausted*(self: PendingBlocksManager, address: BlockAddress): bool =
self.blocks.withValue(address, pending):
result = pending[].blockRetries <= 0
func isRequested*(self: PendingBlocksManager, address: BlockAddress): bool =
## Check if a block has been requested to a peer
##
result = false
self.blocks.withValue(address, pending):
result = pending[].requested.isSome
func getRequestPeer*(self: PendingBlocksManager, address: BlockAddress): ?PeerId =
## Returns the peer that requested this block
##
result = PeerId.none
self.blocks.withValue(address, pending):
result = pending[].requested
proc markRequested*(
self: PendingBlocksManager, address: BlockAddress, peer: PeerId
): bool =
## Marks this block as having been requested to a peer
##
if self.isRequested(address):
return false
self.blocks.withValue(address, pending):
pending[].requested = peer.some
return true
proc clearRequest*(
self: PendingBlocksManager, address: BlockAddress, peer: ?PeerId = PeerId.none
) =
self.blocks.withValue(address, pending):
if peer.isSome:
assert peer == pending[].requested
pending[].requested = PeerId.none
func contains*(self: PendingBlocksManager, cid: Cid): bool =
BlockAddress.init(cid) in self.blocks
func contains*(self: PendingBlocksManager, address: BlockAddress): bool =
address in self.blocks
iterator wantList*(self: PendingBlocksManager): BlockAddress =
for a in self.blocks.keys:
yield a
iterator wantListBlockCids*(self: PendingBlocksManager): Cid =
for a in self.blocks.keys:
if not a.leaf:
yield a.cid
iterator wantListCids*(self: PendingBlocksManager): Cid =
var yieldedCids = initHashSet[Cid]()
for a in self.blocks.keys:
let cid = a.cidOrTreeCid
if cid notin yieldedCids:
yieldedCids.incl(cid)
yield cid
iterator wantHandles*(self: PendingBlocksManager): Future[Block] =
for v in self.blocks.values:
yield v.handle
proc wantListLen*(self: PendingBlocksManager): int =
self.blocks.len
func len*(self: PendingBlocksManager): int =
self.blocks.len
func new*(
T: type PendingBlocksManager,
retries = DefaultBlockRetries,
interval = DefaultRetryInterval,
): PendingBlocksManager =
PendingBlocksManager(blockRetries: retries, retryInterval: interval)

View File

@ -0,0 +1,259 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/[algorithm, deques, sets, tables, options]
type
BlockBatch* = tuple[start: uint64, count: uint64]
SelectionPolicy* = enum
spSequential
Scheduler* = ref object
totalBlocks: uint64
batchSize: uint64
nextBatchStart: uint64
requeued: Deque[BlockBatch]
completedWatermark: uint64
completedOutOfOrder: HashSet[uint64]
inFlight: Table[uint64, uint64] # batch start -> block count
batchRemaining: Table[uint64, uint64] # parent batch start -> remaining blocks
proc new*(T: type Scheduler): Scheduler =
Scheduler(
totalBlocks: 0,
batchSize: 0,
nextBatchStart: 0,
requeued: initDeque[BlockBatch](),
completedWatermark: 0,
completedOutOfOrder: initHashSet[uint64](),
inFlight: initTable[uint64, uint64](),
batchRemaining: initTable[uint64, uint64](),
)
proc init*(self: Scheduler, totalBlocks: uint64, batchSize: uint64) =
self.totalBlocks = totalBlocks
self.batchSize = batchSize
self.nextBatchStart = 0
self.requeued.clear()
self.completedWatermark = 0
self.completedOutOfOrder.clear()
self.inFlight.clear()
self.batchRemaining.clear()
proc initRange*(self: Scheduler, startIndex: uint64, count: uint64, batchSize: uint64) =
self.totalBlocks = startIndex + count
self.batchSize = batchSize
self.nextBatchStart = startIndex
self.requeued.clear()
self.completedWatermark = startIndex
self.completedOutOfOrder.clear()
self.inFlight.clear()
self.batchRemaining.clear()
proc updateBatchSize*(self: Scheduler, newBatchSize: uint64) =
self.batchSize = newBatchSize
proc add*(self: Scheduler, start: uint64, count: uint64) =
self.requeued.addLast((start: start, count: count))
let batchEnd = start + count
if batchEnd > self.totalBlocks:
self.totalBlocks = batchEnd
if self.batchSize == 0:
self.batchSize = count
proc initFromIndices*(self: Scheduler, indices: seq[uint64], batchSize: uint64) =
let sortedIndices = indices.sorted()
self.batchSize = batchSize
self.nextBatchStart = 0
self.requeued.clear()
self.completedWatermark = 0
self.completedOutOfOrder.clear()
self.inFlight.clear()
self.batchRemaining.clear()
var
batchStart: uint64 = 0
batchCount: uint64 = 0
inBatch = false
for blockIdx in sortedIndices:
if not inBatch:
batchStart = blockIdx
batchCount = 1
inBatch = true
elif blockIdx == batchStart + batchCount:
batchCount += 1
else:
self.add(batchStart, batchCount)
batchStart = blockIdx
batchCount = 1
if batchCount >= batchSize:
self.add(batchStart, batchCount)
inBatch = false
batchCount = 0
if inBatch and batchCount > 0:
self.add(batchStart, batchCount)
proc generateNextBatchInternal(self: Scheduler): Option[BlockBatch] {.inline.} =
## does NOT add to inFlight - we must do that
while self.nextBatchStart < self.totalBlocks:
let
start = self.nextBatchStart
count = min(self.batchSize, self.totalBlocks - start)
self.nextBatchStart = start + count
if start < self.completedWatermark:
continue
if start in self.inFlight:
continue
if start in self.completedOutOfOrder:
continue
return some((start: start, count: count))
return none(BlockBatch)
proc take*(self: Scheduler): Option[BlockBatch] =
while self.requeued.len > 0:
let batch = self.requeued.popFirst()
if batch.start < self.completedWatermark:
continue
if batch.start in self.completedOutOfOrder:
continue
self.inFlight[batch.start] = batch.count
return some(batch)
let batchOpt = self.generateNextBatchInternal()
if batchOpt.isSome:
let batch = batchOpt.get()
self.inFlight[batch.start] = batch.count
return batchOpt
proc requeueBack*(self: Scheduler, start: uint64, count: uint64) {.inline.} =
## requeue batch at back (peer didn't have it, try later).
self.inFlight.del(start)
if start < self.completedWatermark:
return
if start in self.completedOutOfOrder:
return
self.requeued.addLast((start: start, count: count))
proc requeueFront*(self: Scheduler, start: uint64, count: uint64) {.inline.} =
## requeue batch at front (failed/timed out, retry soon).
self.inFlight.del(start)
if start < self.completedWatermark:
return
if start in self.completedOutOfOrder:
return
self.requeued.addFirst((start: start, count: count))
proc advanceWatermark(self: Scheduler, batchStart: uint64) =
if batchStart == self.completedWatermark:
self.completedWatermark = batchStart + self.batchSize
while self.completedWatermark in self.completedOutOfOrder:
self.completedOutOfOrder.excl(self.completedWatermark)
self.completedWatermark += self.batchSize
elif batchStart > self.completedWatermark:
self.completedOutOfOrder.incl(batchStart)
proc findPartialParent(self: Scheduler, start: uint64): Option[uint64] =
for parent, remaining in self.batchRemaining:
if start >= parent and start < parent + self.batchSize:
return some parent
return none(uint64)
proc markComplete*(self: Scheduler, start: uint64) =
let count = self.inFlight.getOrDefault(start, 0'u64)
self.inFlight.del(start)
let parent = self.findPartialParent(start)
if parent.isSome:
self.batchRemaining.withValue(parent.get, remaining):
remaining[] -= count
if remaining[] <= 0:
self.batchRemaining.del(parent.get)
self.advanceWatermark(parent.get)
return
self.advanceWatermark(start)
proc partialComplete*(
self: Scheduler, originalStart: uint64, missingRanges: seq[BlockBatch]
) =
let originalCount = self.inFlight.getOrDefault(originalStart, self.batchSize)
self.inFlight.del(originalStart)
var totalMissing: uint64 = 0
for batch in missingRanges:
totalMissing += batch.count
let parent = self.findPartialParent(originalStart)
if parent.isSome:
let delivered = originalCount - totalMissing
self.batchRemaining.withValue(parent.get, remaining):
remaining[] -= delivered
else:
self.batchRemaining[originalStart] = totalMissing
for i in countdown(missingRanges.len - 1, 0):
let batch = missingRanges[i]
self.requeued.addFirst(batch)
proc isEmpty*(self: Scheduler): bool =
self.completedWatermark >= self.totalBlocks and self.requeued.len == 0 and
self.inFlight.len == 0
proc completedWatermark*(self: Scheduler): uint64 =
self.completedWatermark
proc hasWork*(self: Scheduler): bool {.inline.} =
self.requeued.len > 0 or self.nextBatchStart < self.totalBlocks
proc requeuedCount*(self: Scheduler): int {.inline.} =
self.requeued.len
proc pending*(self: Scheduler): seq[BlockBatch] =
var res = newSeqUninit[BlockBatch](self.requeued.len)
for i, batch in self.requeued:
res[i] = batch
return res
proc clear*(self: Scheduler) =
self.requeued.clear()
self.completedOutOfOrder.clear()
self.inFlight.clear()
self.batchRemaining.clear()
self.nextBatchStart = 0
self.completedWatermark = 0
self.totalBlocks = 0
self.batchSize = 0
proc totalBlockCount*(self: Scheduler): uint64 =
self.totalBlocks
proc batchSizeCount*(self: Scheduler): uint64 =
self.batchSize
proc batchEnd*(batch: BlockBatch): uint64 =
batch.start + batch.count
proc contains*(batch: BlockBatch, blockIndex: uint64): bool =
blockIndex >= batch.start and blockIndex < batch.batchEnd
proc merge*(a, b: BlockBatch): Option[BlockBatch] =
if a.batchEnd < b.start or b.batchEnd < a.start:
return none(BlockBatch)
let
newStart = min(a.start, b.start)
newEnd = max(a.batchEnd, b.batchEnd)
some((start: newStart, count: newEnd - newStart))

View File

@ -0,0 +1,340 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/[tables, sets, options, random]
import pkg/chronos
import pkg/libp2p/peerid
import ../peers/peerctxstore
import ../peers/peerstats
import ../types
import ../../logutils
export peerctxstore, types
randomize()
logScope:
topics = "logos-storage swarm"
const
DefaultDeltaMin* = 2
DefaultDeltaMax* = 16
DefaultDeltaTarget* = 8
PeerStaleTimeout* = 30.seconds
PeerDefaultMaxFailures*: uint32 = 2
PeerDefaultMaxTimeouts*: uint32 = 5
ExplorationProbability* = 0.2
TimeoutPenaltyWeight* = 3.0
type
SwarmPeer* = ref object
availability*: BlockAvailability
lastSeen*: Moment
availabilityUpdated*: Moment
failureCount*: uint32
timeoutCount*: uint32
SwarmConfig* = object
deltaMin*: int
deltaMax*: int
deltaTarget*: int
maxPeerFailures*: uint32
maxPeerTimeouts*: uint32
PeerSelectionKind* = enum
pskFound
pskAtCapacity
pskNoPeers
PeerSelection* = object
case kind*: PeerSelectionKind
of pskFound:
peer*: PeerContext
of pskAtCapacity, pskNoPeers:
discard
Swarm* = ref object
config: SwarmConfig
peers: Table[PeerId, SwarmPeer]
removedPeers: HashSet[PeerId]
proc new*(T: type SwarmPeer, availability: BlockAvailability): SwarmPeer =
let now = Moment.now()
SwarmPeer(
availability: availability,
lastSeen: now,
availabilityUpdated: now,
failureCount: 0,
timeoutCount: 0,
)
proc isStale*(peer: SwarmPeer): bool =
Moment.now() - peer.lastSeen > PeerStaleTimeout
proc touch*(peer: SwarmPeer) =
peer.lastSeen = Moment.now()
proc updateAvailability*(peer: SwarmPeer, availability: BlockAvailability) =
peer.availability = peer.availability.merge(availability)
peer.availabilityUpdated = Moment.now()
peer.touch()
proc recordFailure*(peer: SwarmPeer) =
peer.failureCount += 1
proc recordTimeout*(peer: SwarmPeer) =
peer.timeoutCount += 1
proc resetFailures*(peer: SwarmPeer) =
peer.failureCount = 0
peer.timeoutCount = 0
proc defaultConfig*(_: type SwarmConfig): SwarmConfig =
SwarmConfig(
deltaMin: DefaultDeltaMin,
deltaMax: DefaultDeltaMax,
deltaTarget: DefaultDeltaTarget,
maxPeerFailures: PeerDefaultMaxFailures,
maxPeerTimeouts: PeerDefaultMaxTimeouts,
)
proc new*(T: type Swarm, config: SwarmConfig = SwarmConfig.defaultConfig()): Swarm =
Swarm(
config: config,
peers: initTable[PeerId, SwarmPeer](),
removedPeers: initHashSet[PeerId](),
)
proc addPeer*(swarm: Swarm, peerId: PeerId, availability: BlockAvailability): bool =
if peerId in swarm.removedPeers:
return false
if swarm.peers.len >= swarm.config.deltaMax:
return false
swarm.peers[peerId] = SwarmPeer.new(availability)
true
proc removePeer*(swarm: Swarm, peerId: PeerId): Option[SwarmPeer] =
swarm.peers.withValue(peerId, peer):
let res = some(peer[])
swarm.peers.del(peerId)
return res
return none(SwarmPeer)
proc banPeer*(swarm: Swarm, peerId: PeerId) =
swarm.peers.del(peerId)
swarm.removedPeers.incl(peerId)
proc getPeer*(swarm: Swarm, peerId: PeerId): Option[SwarmPeer] =
swarm.peers.withValue(peerId, peer):
return some(peer[])
return none(SwarmPeer)
proc updatePeerAvailability*(
swarm: Swarm, peerId: PeerId, availability: BlockAvailability
) =
swarm.peers.withValue(peerId, peer):
peer[].updateAvailability(availability)
proc recordPeerFailure*(swarm: Swarm, peerId: PeerId): bool =
## return true if peer should be removed
swarm.peers.withValue(peerId, peer):
peer[].recordFailure()
return peer[].failureCount >= swarm.config.maxPeerFailures
return false
proc recordPeerTimeout*(swarm: Swarm, peerId: PeerId): bool =
## return true if peer should be removed
swarm.peers.withValue(peerId, peer):
peer[].recordTimeout()
return peer[].timeoutCount >= swarm.config.maxPeerTimeouts
return false
proc recordPeerSuccess*(swarm: Swarm, peerId: PeerId) =
swarm.peers.withValue(peerId, peer):
peer[].resetFailures()
peer[].touch()
proc recordBatchSuccess*(
swarm: Swarm, peer: PeerContext, rttMicros: uint64, totalBytes: uint64
) =
swarm.peers.withValue(peer.id, swarmPeer):
swarmPeer[].resetFailures()
swarmPeer[].touch()
peer.stats.recordRequest(rttMicros, totalBytes)
proc activePeerCount*(swarm: Swarm): int =
for peer in swarm.peers.values:
if not peer.isStale:
result += 1
proc peerCount*(swarm: Swarm): int =
swarm.peers.len
proc needsPeers*(swarm: Swarm): bool =
swarm.activePeerCount() < swarm.config.deltaMin
proc peersNeeded*(swarm: Swarm): int =
let active = swarm.activePeerCount()
if active >= swarm.config.deltaTarget:
0
else:
swarm.config.deltaTarget - active
proc connectedPeers*(swarm: Swarm): seq[PeerId] =
for peerId in swarm.peers.keys:
result.add(peerId)
proc peersWithRange*(swarm: Swarm, start: uint64, count: uint64): seq[PeerId] =
for peerId, peer in swarm.peers:
if not peer.isStale and peer.availability.hasRange(start, count):
result.add(peerId)
proc peersWithAnyInRange*(swarm: Swarm, start: uint64, count: uint64): seq[PeerId] =
for peerId, peer in swarm.peers:
if not peer.isStale and peer.availability.hasAnyInRange(start, count):
result.add(peerId)
proc staleUnknownPeers*(swarm: Swarm): seq[PeerId] =
for peerId, peer in swarm.peers:
if peer.isStale and peer.availability.kind == bakUnknown:
result.add(peerId)
proc selectByBDP*(
peers: seq[PeerContext],
batchBytes: uint64,
inFlightBatches: var Table[PeerId, seq[Future[void]]],
penalties: var Table[PeerId, float],
explorationProb: float = ExplorationProbability,
): Option[PeerContext] {.gcsafe, raises: [].} =
if peers.len == 0:
return none(PeerContext)
if peers.len == 1:
return some(peers[0])
var untriedPeers: seq[PeerContext]
for peer in peers:
if peer.stats.throughputBps().isNone:
let
pipelineDepth = peer.optimalPipelineDepth(batchBytes)
currentLoad = inFlightBatches.getOrDefault(peer.id, @[]).len
if currentLoad < pipelineDepth:
untriedPeers.add(peer)
if untriedPeers.len > 0:
var
bestPeer = untriedPeers[0]
bestLoad = inFlightBatches.getOrDefault(bestPeer.id, @[]).len
for i in 1 ..< untriedPeers.len:
let load = inFlightBatches.getOrDefault(untriedPeers[i].id, @[]).len
if load < bestLoad:
bestLoad = load
bestPeer = untriedPeers[i]
return some(bestPeer)
let exploreRoll = rand(1.0)
if exploreRoll < explorationProb:
var peersWithCapacity: seq[PeerContext]
for peer in peers:
let
pipelineDepth = peer.optimalPipelineDepth(batchBytes)
currentLoad = inFlightBatches.getOrDefault(peer.id, @[]).len
if currentLoad < pipelineDepth:
peersWithCapacity.add(peer)
if peersWithCapacity.len > 0:
let idx = rand(peersWithCapacity.len - 1)
return some(peersWithCapacity[idx])
var
bestPeers: seq[PeerContext] = @[peers[0]]
bestScore = peers[0].evalBDPScore(
batchBytes,
inFlightBatches.getOrDefault(peers[0].id, @[]).len,
penalties.getOrDefault(peers[0].id, 0.0),
)
for i in 1 ..< peers.len:
let score = peers[i].evalBDPScore(
batchBytes,
inFlightBatches.getOrDefault(peers[i].id, @[]).len,
penalties.getOrDefault(peers[i].id, 0.0),
)
if score < bestScore:
bestScore = score
bestPeers = @[peers[i]]
elif score == bestScore:
bestPeers.add(peers[i])
if bestPeers.len > 1:
let idx = rand(bestPeers.len - 1)
return some(bestPeers[idx])
else:
return some(bestPeers[0])
proc selectPeerForBatch*(
swarm: Swarm,
peers: PeerContextStore,
start: uint64,
count: uint64,
batchBytes: uint64,
inFlightBatches: var Table[PeerId, seq[Future[void]]],
): PeerSelection =
var penalties: Table[PeerId, float]
for peerId, swarmPeer in swarm.peers:
if swarmPeer.timeoutCount > 0:
penalties[peerId] = swarmPeer.timeoutCount.float * TimeoutPenaltyWeight
let candidates = swarm.peersWithRange(start, count)
if candidates.len == 0:
let partialCandidates = swarm.peersWithAnyInRange(start, count)
trace "No full range peers, checking partial",
start = start, count = count, partialPeers = partialCandidates.len
if partialCandidates.len == 0:
return PeerSelection(kind: pskNoPeers)
var peerCtxs: seq[PeerContext]
for peerId in partialCandidates:
let peer = peers.get(peerId)
if peer.isNil:
# peer disconnected, remove from swarm immediately
discard swarm.removePeer(peerId)
continue
let currentInFlight = inFlightBatches.getOrDefault(peerId, @[]).len
if currentInFlight < peer.optimalPipelineDepth(batchBytes):
peerCtxs.add(peer)
if peerCtxs.len == 0:
return PeerSelection(kind: pskAtCapacity)
let selected = selectByBDP(peerCtxs, batchBytes, inFlightBatches, penalties)
if selected.isSome:
return PeerSelection(kind: pskFound, peer: selected.get())
return PeerSelection(kind: pskNoPeers)
var peerCtxs: seq[PeerContext]
for peerId in candidates:
let peer = peers.get(peerId)
if peer.isNil:
# peer disconnected - remove from swarm immediately
discard swarm.removePeer(peerId)
continue
let currentInFlight = inFlightBatches.getOrDefault(peerId, @[]).len
if currentInFlight < peer.optimalPipelineDepth(batchBytes):
peerCtxs.add(peer)
if peerCtxs.len == 0:
return PeerSelection(kind: pskAtCapacity)
let selected = selectByBDP(peerCtxs, batchBytes, inFlightBatches, penalties)
if selected.isSome:
return PeerSelection(kind: pskFound, peer: selected.get())
return PeerSelection(kind: pskNoPeers)

View File

@ -1,4 +1,5 @@
import ./network/network
import ./network/networkpeer
import ./protocol/wantblocks
export network, networkpeer
export network, networkpeer, wantblocks

View File

@ -19,12 +19,14 @@ import pkg/questionable/results
import ../../blocktype as bt
import ../../logutils
import ../protobuf/blockexc as pb
import ../types
import ../protocol/message
import ../../utils/trackedfutures
import ./networkpeer
import ../protocol/wantblocks
export networkpeer
export networkpeer, wantblocks
logScope:
topics = "storage blockexcnetwork"
@ -35,19 +37,19 @@ const
type
WantListHandler* = proc(peer: PeerId, wantList: WantList) {.async: (raises: []).}
BlocksDeliveryHandler* =
proc(peer: PeerId, blocks: seq[BlockDelivery]) {.async: (raises: []).}
BlockPresenceHandler* =
proc(peer: PeerId, precense: seq[BlockPresence]) {.async: (raises: []).}
PeerEventHandler* = proc(peer: PeerId) {.async: (raises: [CancelledError]).}
WantBlocksRequestHandlerProc* = proc(
peer: PeerId, req: WantBlocksRequest
): Future[seq[BlockDelivery]] {.async: (raises: [CancelledError]).}
BlockExcHandlers* = object
onWantList*: WantListHandler
onBlocksDelivery*: BlocksDeliveryHandler
onPresence*: BlockPresenceHandler
onWantBlocksRequest*: WantBlocksRequestHandlerProc
onPeerJoined*: PeerEventHandler
onPeerDeparted*: PeerEventHandler
onPeerDropped*: PeerEventHandler
WantListSender* = proc(
id: PeerId,
@ -57,21 +59,15 @@ type
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
rangeCount: uint64 = 0,
downloadId: uint64 = 0,
) {.async: (raises: [CancelledError]).}
WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]) {.
async: (raises: [CancelledError])
.}
BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {.
async: (raises: [CancelledError])
.}
PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]) {.
async: (raises: [CancelledError])
.}
BlockExcRequest* = object
sendWantList*: WantListSender
sendWantCancellations*: WantCancellationSender
sendBlocksDelivery*: BlocksDeliverySender
sendPresence*: PresenceSender
BlockExcNetwork* = ref object of LPProtocol
@ -97,13 +93,14 @@ proc isSelf*(b: BlockExcNetwork, peer: PeerId): bool =
return b.peerId == peer
proc send*(
b: BlockExcNetwork, id: PeerId, msg: pb.Message
b: BlockExcNetwork, id: PeerId, msg: Message
) {.async: (raises: [CancelledError]).} =
## Send message to peer
##
if not (id in b.peers):
trace "Unable to send, peer not found", peerId = id
trace "Unable to send protobuf, peer not in network.peers",
peerId = id, hasWantList = msg.wantList.entries.len > 0
return
try:
@ -136,6 +133,8 @@ proc sendWantList*(
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
rangeCount: uint64 = 0,
downloadId: uint64 = 0,
) {.async: (raw: true, raises: [CancelledError]).} =
## Send a want message to peer
##
@ -148,6 +147,8 @@ proc sendWantList*(
cancel: cancel,
wantType: wantType,
sendDontHave: sendDontHave,
rangeCount: rangeCount,
downloadId: downloadId,
)
),
full: full,
@ -155,30 +156,6 @@ proc sendWantList*(
b.send(id, Message(wantlist: msg))
proc sendWantCancellations*(
b: BlockExcNetwork, id: PeerId, addresses: seq[BlockAddress]
): Future[void] {.async: (raises: [CancelledError]).} =
## Informs a remote peer that we're no longer interested in a set of blocks
##
await b.sendWantList(id = id, addresses = addresses, cancel = true)
proc handleBlocksDelivery(
b: BlockExcNetwork, peer: NetworkPeer, blocksDelivery: seq[BlockDelivery]
) {.async: (raises: []).} =
## Handle incoming blocks
##
if not b.handlers.onBlocksDelivery.isNil:
await b.handlers.onBlocksDelivery(peer.id, blocksDelivery)
proc sendBlocksDelivery*(
b: BlockExcNetwork, id: PeerId, blocksDelivery: seq[BlockDelivery]
) {.async: (raw: true, raises: [CancelledError]).} =
## Send blocks to remote
##
b.send(id, pb.Message(payload: blocksDelivery))
proc handleBlockPresence(
b: BlockExcNetwork, peer: NetworkPeer, presence: seq[BlockPresence]
) {.async: (raises: []).} =
@ -204,9 +181,6 @@ proc rpcHandler(
if msg.wantList.entries.len > 0:
self.trackedFutures.track(self.handleWantList(peer, msg.wantList))
if msg.payload.len > 0:
self.trackedFutures.track(self.handleBlocksDelivery(peer, msg.payload))
if msg.blockPresences.len > 0:
self.trackedFutures.track(self.handleBlockPresence(peer, msg.blockPresences))
@ -234,14 +208,25 @@ proc getOrCreatePeer(self: BlockExcNetwork, peer: PeerId): NetworkPeer =
let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async: (raises: []).} =
await self.rpcHandler(p, msg)
let wantBlocksHandler = proc(
peerId: PeerId, req: WantBlocksRequest
): Future[seq[BlockDelivery]] {.async: (raises: [CancelledError]).} =
return await self.handlers.onWantBlocksRequest(peerId, req)
# create new pubsub peer
let blockExcPeer = NetworkPeer.new(peer, getConn, rpcHandler)
let blockExcPeer = NetworkPeer.new(peer, getConn, rpcHandler, wantBlocksHandler)
debug "Created new blockexc peer", peer
self.peers[peer] = blockExcPeer
return blockExcPeer
proc sendWantBlocksRequest*(
self: BlockExcNetwork, peer: PeerId, blockRange: BlockRange
): Future[WantBlocksResult[WantBlocksResponse]] {.async: (raises: [CancelledError]).} =
let networkPeer = self.getOrCreatePeer(peer)
return await networkPeer.sendWantBlocksRequest(blockRange)
proc dialPeer*(self: BlockExcNetwork, peer: PeerRecord) {.async.} =
## Dial a peer
##
@ -267,9 +252,6 @@ proc dropPeer*(
except CatchableError as error:
warn "Error attempting to disconnect from peer", peer = peer, error = error.msg
if not self.handlers.onPeerDropped.isNil:
await self.handlers.onPeerDropped(peer)
proc handlePeerJoined*(
self: BlockExcNetwork, peer: PeerId
) {.async: (raises: [CancelledError]).} =
@ -344,30 +326,19 @@ proc new*(
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
rangeCount: uint64 = 0,
downloadId: uint64 = 0,
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendWantList(id, cids, priority, cancel, wantType, full, sendDontHave)
proc sendWantCancellations(
id: PeerId, addresses: seq[BlockAddress]
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendWantCancellations(id, addresses)
proc sendBlocksDelivery(
id: PeerId, blocksDelivery: seq[BlockDelivery]
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendBlocksDelivery(id, blocksDelivery)
self.sendWantList(
id, cids, priority, cancel, wantType, full, sendDontHave, rangeCount, downloadId
)
proc sendPresence(
id: PeerId, presence: seq[BlockPresence]
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendBlockPresence(id, presence)
self.request = BlockExcRequest(
sendWantList: sendWantList,
sendWantCancellations: sendWantCancellations,
sendBlocksDelivery: sendBlocksDelivery,
sendPresence: sendPresence,
)
self.request = BlockExcRequest(sendWantList: sendWantList, sendPresence: sendPresence)
self.init()
return self

View File

@ -11,12 +11,19 @@
import pkg/chronos
import pkg/libp2p
import pkg/stew/endians2
import std/tables
import ../protobuf/blockexc
import ../protobuf/message
import ../protocol/message
import ../protocol/constants
import ../../errors
import ../../logutils
import ../../utils/trackedfutures
import ../../blocktype
import ../types
import ../protocol/wantblocks
export wantblocks
logScope:
topics = "storage blockexcnetworkpeer"
@ -28,13 +35,22 @@ type
RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.async: (raises: []).}
WantBlocksRequestHandler* = proc(
peer: PeerId, req: WantBlocksRequest
): Future[seq[BlockDelivery]] {.async: (raises: [CancelledError]).}
WantBlocksResponseFuture* = Future[WantBlocksResult[WantBlocksResponse]]
NetworkPeer* = ref object of RootObj
id*: PeerId
handler*: RPCHandler
wantBlocksHandler*: WantBlocksRequestHandler
sendConn: Connection
getConn: ConnProvider
yieldInterval*: Duration = DefaultYieldInterval
trackedFutures: TrackedFutures
pendingWantBlocksRequests*: Table[uint64, WantBlocksResponseFuture]
nextRequestId*: uint64
proc connected*(self: NetworkPeer): bool =
not (isNil(self.sendConn)) and not (self.sendConn.closed or self.sendConn.atEof)
@ -47,24 +63,82 @@ proc readLoop*(self: NetworkPeer, conn: Connection) {.async: (raises: []).} =
trace "Attaching read loop", peer = self.id, connId = conn.oid
try:
var nextYield = Moment.now() + self.yieldInterval
while not conn.atEof or not conn.closed:
while not conn.atEof and not conn.closed:
if Moment.now() > nextYield:
nextYield = Moment.now() + self.yieldInterval
trace "Yielding in read loop",
peer = self.id, nextYield = nextYield, interval = self.yieldInterval
await sleepAsync(10.millis)
var lenBuf: array[4, byte]
await conn.readExactly(addr lenBuf[0], 4)
let frameLen = uint32.fromBytes(lenBuf, littleEndian).int
if frameLen < 1:
warn "Frame too short", peer = self.id, frameLen = frameLen
return
var typeByte: array[1, byte]
await conn.readExactly(addr typeByte[0], 1)
if typeByte[0] > ord(high(MessageType)):
warn "Invalid message type byte", peer = self.id, typeByte = typeByte[0]
return
let
data = await conn.readLp(MaxMessageSize.int)
msg = Message.protobufDecode(data).mapFailure().tryGet()
trace "Received message", peer = self.id, connId = conn.oid
await self.handler(self, msg)
msgType = MessageType(typeByte[0])
dataLen = frameLen - 1
case msgType
of mtProtobuf:
if dataLen > MaxMessageSize.int:
warn "Protobuf message too large", peer = self.id, size = dataLen
return
var data = newSeq[byte](dataLen)
if dataLen > 0:
await conn.readExactly(addr data[0], dataLen)
let msg = Message.protobufDecode(data).mapFailure().tryGet()
await self.handler(self, msg)
of mtWantBlocksRequest:
let reqResult = await readWantBlocksRequest(conn, dataLen)
if reqResult.isErr:
warn "Failed to read WantBlocks request",
peer = self.id, error = reqResult.error.msg
return
let
req = reqResult.get
blocks = await self.wantBlocksHandler(self.id, req)
await writeWantBlocksResponse(conn, req.requestId, req.cid, blocks)
of mtWantBlocksResponse:
let respResult = await readWantBlocksResponse(conn, dataLen)
if respResult.isErr:
warn "Failed to read WantBlocks response",
peer = self.id, error = respResult.error.msg
return
let response = respResult.get
self.pendingWantBlocksRequests.withValue(response.requestId, fut):
if not fut[].finished:
fut[].complete(WantBlocksResult[WantBlocksResponse].ok(response))
self.pendingWantBlocksRequests.del(response.requestId)
do:
warn "Received WantBlocks response for unknown request ID",
peer = self.id, requestId = response.requestId
except CancelledError:
trace "Read loop cancelled"
except CatchableError as err:
warn "Exception in blockexc read loop", msg = err.msg
finally:
warn "Detaching read loop", peer = self.id, connId = conn.oid
for requestId, fut in self.pendingWantBlocksRequests:
if not fut.finished:
fut.complete(
WantBlocksResult[WantBlocksResponse].err(
wantBlocksError(ConnectionClosed, "Read loop exited")
)
)
self.pendingWantBlocksRequests.clear()
if self.sendConn == conn:
self.sendConn = nil
await conn.close()
@ -89,19 +163,61 @@ proc send*(
warn "Unable to get send connection for peer message not sent", peer = self.id
return
trace "Sending message", peer = self.id, connId = conn.oid
try:
await conn.writeLp(protobufEncode(msg))
let msgData = protobufEncode(msg)
let
frameLen = 1 + msgData.len
totalSize = 4 + frameLen
var buf = newSeq[byte](totalSize)
let lenBytes = uint32(frameLen).toBytes(littleEndian)
copyMem(addr buf[0], unsafeAddr lenBytes[0], 4)
buf[4] = mtProtobuf.byte
if msgData.len > 0:
copyMem(addr buf[5], unsafeAddr msgData[0], msgData.len)
await conn.write(buf)
except CatchableError as err:
if self.sendConn == conn:
self.sendConn = nil
raise newException(LPStreamError, "Failed to send message: " & err.msg)
proc sendWantBlocksRequest*(
self: NetworkPeer, blockRange: BlockRange
): Future[WantBlocksResult[WantBlocksResponse]] {.async: (raises: [CancelledError]).} =
let requestId = self.nextRequestId
self.nextRequestId += 1
let responseFuture = WantBlocksResponseFuture.init("wantBlocksRequest")
self.pendingWantBlocksRequests[requestId] = responseFuture
try:
let conn = await self.connect()
if isNil(conn):
self.pendingWantBlocksRequests.del(requestId)
return err(wantBlocksError(NoConnection, "No connection available"))
let req = WantBlocksRequest(
requestId: requestId, cid: blockRange.cid, ranges: blockRange.ranges
)
await writeWantBlocksRequest(conn, req)
return await responseFuture
except CancelledError as exc:
self.pendingWantBlocksRequests.del(requestId)
raise exc
except CatchableError as err:
self.pendingWantBlocksRequests.del(requestId)
return err(wantBlocksError(RequestFailed, "WantBlocks request failed: " & err.msg))
func new*(
T: type NetworkPeer,
peer: PeerId,
connProvider: ConnProvider,
rpcHandler: RPCHandler,
wantBlocksHandler: WantBlocksRequestHandler,
): NetworkPeer =
doAssert(not isNil(connProvider), "should supply connection provider")
@ -109,5 +225,6 @@ func new*(
id: peer,
getConn: connProvider,
handler: rpcHandler,
wantBlocksHandler: wantBlocksHandler,
trackedFutures: TrackedFutures(),
)

View File

@ -1,4 +1,5 @@
import ./peers/peerctxstore
import ./peers/peercontext
import ./peers/peerstats
export peerctxstore, peercontext
export peerctxstore, peercontext, peerstats

View File

@ -7,127 +7,87 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/sequtils
import std/tables
import std/sets
import pkg/libp2p
import pkg/chronos
import pkg/questionable
import ../protobuf/blockexc
import ../protobuf/presence
import ../../blocktype
import ../../logutils
import ./peerstats
const
MinRefreshInterval = 1.seconds
MaxRefreshBackoff = 36 # 36 seconds
MaxWantListBatchSize* = 1024 # Maximum blocks to send per WantList message
ThroughputScoreBaseline* = 12_500_000.0 # 100 Mbps baseline for throughput scoring
DefaultBatchTimeout* = 30.seconds # fallback when no BDP stats available
TimeoutSafetyFactor* = 3.0
# multiplier to account for variance (network jitter, congestion, GC pauses )
MinBatchTimeout* = 5.seconds # min to avoid too aggressive timeouts
MaxBatchTimeout* = 45.seconds # max to handle high contention scenarios
type BlockExcPeerCtx* = ref object of RootObj
type PeerContext* = ref object of RootObj
id*: PeerId
blocks*: Table[BlockAddress, Presence] # remote peer have list
wantedBlocks*: HashSet[BlockAddress] # blocks that the peer wants
exchanged*: int # times peer has exchanged with us
refreshInProgress*: bool # indicates if a refresh is in progress
lastRefresh*: Moment # last time we refreshed our knowledge of the blocks this peer has
refreshBackoff*: int = 1 # backoff factor for refresh requests
blocksSent*: HashSet[BlockAddress] # blocks sent to peer
blocksRequested*: HashSet[BlockAddress] # pending block requests to this peer
lastExchange*: Moment # last time peer has sent us a block
activityTimeout*: Duration
lastSentWants*: HashSet[BlockAddress]
# track what wantList we last sent for delta updates
stats*: PeerPerfStats
proc isKnowledgeStale*(self: BlockExcPeerCtx): bool =
let staleness =
self.lastRefresh + self.refreshBackoff * MinRefreshInterval < Moment.now()
proc new*(T: type PeerContext, id: PeerId): PeerContext =
PeerContext(id: id, stats: PeerPerfStats.new())
if staleness and self.refreshInProgress:
trace "Cleaning up refresh state", peer = self.id
self.refreshInProgress = false
self.refreshBackoff = 1
proc optimalPipelineDepth*(self: PeerContext, batchBytes: uint64): int =
self.stats.optimalPipelineDepth(batchBytes)
staleness
proc batchTimeout*(self: PeerContext, batchBytes: uint64): Duration =
## find optimal timeout for a batch based on BDP
## timeout = min((batchBytes / throughput + RTT) * safetyFactor, maxTimeout)
## it falls back to default if no stats available.
let
throughputOpt = self.stats.throughputBps()
rttOpt = self.stats.avgRttMicros()
proc isBlockSent*(self: BlockExcPeerCtx, address: BlockAddress): bool =
address in self.blocksSent
if throughputOpt.isNone or rttOpt.isNone:
return DefaultBatchTimeout
proc markBlockAsSent*(self: BlockExcPeerCtx, address: BlockAddress) =
self.blocksSent.incl(address)
let
throughput = throughputOpt.get()
rttMicros = rttOpt.get()
proc markBlockAsNotSent*(self: BlockExcPeerCtx, address: BlockAddress) =
self.blocksSent.excl(address)
if throughput == 0:
return DefaultBatchTimeout
proc refreshRequested*(self: BlockExcPeerCtx) =
trace "Refresh requested for peer", peer = self.id, backoff = self.refreshBackoff
self.refreshInProgress = true
self.lastRefresh = Moment.now()
let
transferTimeMicros = (batchBytes * 1_000_000) div throughput
totalTimeMicros = transferTimeMicros + rttMicros
timeoutMicros = (totalTimeMicros.float * TimeoutSafetyFactor).uint64
timeout = microseconds(timeoutMicros.int64)
proc refreshReplied*(self: BlockExcPeerCtx) =
self.refreshInProgress = false
self.lastRefresh = Moment.now()
self.refreshBackoff = min(self.refreshBackoff * 2, MaxRefreshBackoff)
if timeout < MinBatchTimeout:
return MinBatchTimeout
proc havesUpdated(self: BlockExcPeerCtx) =
self.refreshBackoff = 1
if timeout > MaxBatchTimeout:
return MaxBatchTimeout
proc wantsUpdated*(self: BlockExcPeerCtx) =
self.refreshBackoff = 1
return timeout
proc peerHave*(self: BlockExcPeerCtx): HashSet[BlockAddress] =
# XXX: this is ugly an inefficient, but since those will typically
# be used in "joins", it's better to pay the price here and have
# a linear join than to not do it and have a quadratic join.
toHashSet(self.blocks.keys.toSeq)
proc evalBDPScore*(
self: PeerContext, batchBytes: uint64, currentLoad: int, penalty: float
): float =
let
pipelineDepth = self.optimalPipelineDepth(batchBytes)
capacityScore =
if currentLoad >= pipelineDepth:
100.0
else:
(currentLoad.float / pipelineDepth.float) * 10.0
proc contains*(self: BlockExcPeerCtx, address: BlockAddress): bool =
address in self.blocks
throughputScore =
if self.stats.throughputBps().isSome:
let bps = self.stats.throughputBps().get().float
if bps > 0:
ThroughputScoreBaseline / bps
else:
50.0
else:
25.0 # normalization fallback
func setPresence*(self: BlockExcPeerCtx, presence: Presence) =
if presence.address notin self.blocks:
self.havesUpdated()
rttScore =
if self.stats.avgRttMicros().isSome:
self.stats.avgRttMicros().get().float / 10000.0
else:
5.0 # normalization fallback
self.blocks[presence.address] = presence
func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) =
for a in addresses:
self.blocks.del(a)
func cleanPresence*(self: BlockExcPeerCtx, address: BlockAddress) =
self.cleanPresence(@[address])
proc blockRequestScheduled*(self: BlockExcPeerCtx, address: BlockAddress) =
## Adds a block the set of blocks that have been requested to this peer
## (its request schedule).
if self.blocksRequested.len == 0:
self.lastExchange = Moment.now()
self.blocksRequested.incl(address)
proc blockRequestCancelled*(self: BlockExcPeerCtx, address: BlockAddress) =
## Removes a block from the set of blocks that have been requested to this peer
## (its request schedule).
self.blocksRequested.excl(address)
proc blockReceived*(self: BlockExcPeerCtx, address: BlockAddress): bool =
let wasRequested = address in self.blocksRequested
self.blocksRequested.excl(address)
self.lastExchange = Moment.now()
wasRequested
proc activityTimer*(
self: BlockExcPeerCtx
): Future[void] {.async: (raises: [CancelledError]).} =
## This is called by the block exchange when a block is scheduled for this peer.
## If the peer sends no blocks for a while, it is considered inactive/uncooperative
## and the peer is dropped. Note that ANY block that the peer sends will reset this
## timer for all blocks.
##
while true:
let idleTime = Moment.now() - self.lastExchange
if idleTime > self.activityTimeout:
return
await sleepAsync(self.activityTimeout - idleTime)
return capacityScore + throughputScore + rttScore + penalty

View File

@ -9,16 +9,10 @@
{.push raises: [].}
import std/sequtils
import std/tables
import std/algorithm
import std/sequtils
import pkg/chronos
import pkg/libp2p
import ../protobuf/blockexc
import ../../blocktype
import ../../logutils
import ./peercontext
@ -27,63 +21,37 @@ export peercontext
logScope:
topics = "storage peerctxstore"
type
PeerCtxStore* = ref object of RootObj
peers*: OrderedTable[PeerId, BlockExcPeerCtx]
type PeerContextStore* = ref object of RootObj
peers*: OrderedTable[PeerId, PeerContext]
PeersForBlock* = tuple[with: seq[BlockExcPeerCtx], without: seq[BlockExcPeerCtx]]
iterator items*(self: PeerCtxStore): BlockExcPeerCtx =
iterator items*(self: PeerContextStore): PeerContext =
for p in self.peers.values:
yield p
proc contains*(a: openArray[BlockExcPeerCtx], b: PeerId): bool =
proc contains*(a: openArray[PeerContext], b: PeerId): bool =
## Convenience method to check for peer precense
##
a.anyIt(it.id == b)
func peerIds*(self: PeerCtxStore): seq[PeerId] =
func peerIds*(self: PeerContextStore): seq[PeerId] =
toSeq(self.peers.keys)
func contains*(self: PeerCtxStore, peerId: PeerId): bool =
func contains*(self: PeerContextStore, peerId: PeerId): bool =
peerId in self.peers
func add*(self: PeerCtxStore, peer: BlockExcPeerCtx) =
func add*(self: PeerContextStore, peer: PeerContext) =
self.peers[peer.id] = peer
func remove*(self: PeerCtxStore, peerId: PeerId) =
func remove*(self: PeerContextStore, peerId: PeerId) =
self.peers.del(peerId)
func get*(self: PeerCtxStore, peerId: PeerId): BlockExcPeerCtx =
func get*(self: PeerContextStore, peerId: PeerId): PeerContext =
self.peers.getOrDefault(peerId, nil)
func len*(self: PeerCtxStore): int =
func len*(self: PeerContextStore): int =
self.peers.len
func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(address in it.peerHave)
func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
# FIXME: this is way slower and can end up leading to unexpected performance loss.
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it.cidOrTreeCid == cid))
func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(address in it.wantedBlocks)
func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
# FIXME: this is way slower and can end up leading to unexpected performance loss.
toSeq(self.peers.values).filterIt(it.wantedBlocks.anyIt(it.cidOrTreeCid == cid))
proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock =
var res: PeersForBlock = (@[], @[])
for peer in self:
if address in peer:
res.with.add(peer)
else:
res.without.add(peer)
res
proc new*(T: type PeerCtxStore): PeerCtxStore =
proc new*(T: type PeerContextStore): PeerContextStore =
## create new instance of a peer context store
PeerCtxStore(peers: initOrderedTable[PeerId, BlockExcPeerCtx]())
PeerContextStore(peers: initOrderedTable[PeerId, PeerContext]())

View File

@ -0,0 +1,101 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/[deques, options, math]
import pkg/chronos
const
RttSampleCount* = 8
MinRequestsPerPeer* = 2
MaxRequestsPerPeer* = 4
DefaultRequestsPerPeer* = 2
DefaultPipelineDepth* = 2
MinThroughputDuration* = 100.milliseconds
type PeerPerfStats* = object
rttSamples: Deque[uint64]
totalBytes: uint64
firstByteTime: Option[Moment]
lastByteTime: Option[Moment]
proc new*(T: type PeerPerfStats): PeerPerfStats =
PeerPerfStats(
rttSamples: initDeque[uint64](RttSampleCount),
totalBytes: 0,
firstByteTime: none(Moment),
lastByteTime: none(Moment),
)
proc recordRequest*(self: var PeerPerfStats, rttMicros: uint64, bytes: uint64) =
if self.rttSamples.len >= RttSampleCount:
discard self.rttSamples.popFirst()
self.rttSamples.addLast(rttMicros)
let now = Moment.now()
if self.firstByteTime.isNone:
self.firstByteTime = some(now)
self.lastByteTime = some(now)
self.totalBytes += bytes
proc avgRttMicros*(self: PeerPerfStats): Option[uint64] =
if self.rttSamples.len == 0:
return none(uint64)
var total: uint64 = 0
for sample in self.rttSamples:
total += sample
some(total div self.rttSamples.len.uint64)
proc throughputBps*(self: PeerPerfStats): Option[uint64] =
if self.firstByteTime.isNone or self.lastByteTime.isNone:
return none(uint64)
let
first = self.firstByteTime.get()
last = self.lastByteTime.get()
duration = last - first
if duration < MinThroughputDuration:
return none(uint64)
let secs = duration.nanoseconds.float64 / 1_000_000_000.0
some((self.totalBytes.float64 / secs).uint64)
proc optimalPipelineDepth*(self: PeerPerfStats, batchBytes: uint64): int =
if batchBytes == 0:
return DefaultPipelineDepth
let rttMicrosOpt = self.avgRttMicros()
if rttMicrosOpt.isNone:
return DefaultRequestsPerPeer
let throughputOpt = self.throughputBps()
if throughputOpt.isNone:
return DefaultRequestsPerPeer
let
rttMicros = rttMicrosOpt.get()
throughput = throughputOpt.get()
rttSecs = rttMicros.float64 / 1_000_000.0
bdpBytes = throughput.float64 * rttSecs
optimalRequests = ceil(bdpBytes / batchBytes.float64).int
return clamp(optimalRequests, MinRequestsPerPeer, MaxRequestsPerPeer)
proc totalBytes*(self: PeerPerfStats): uint64 =
self.totalBytes
proc sampleCount*(self: PeerPerfStats): int =
self.rttSamples.len
proc reset*(self: var PeerPerfStats) =
self.rttSamples.clear()
self.totalBytes = 0
self.firstByteTime = none(Moment)
self.lastByteTime = none(Moment)

View File

@ -1,43 +0,0 @@
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/hashes
import std/sequtils
import message
import ../../blocktype
export Message, protobufEncode, protobufDecode
export Wantlist, WantType, WantListEntry
export BlockDelivery, BlockPresenceType, BlockPresence
proc hash*(e: WantListEntry): Hash =
hash(e.address)
proc contains*(a: openArray[WantListEntry], b: BlockAddress): bool =
## Convenience method to check for peer precense
##
a.anyIt(it.address == b)
proc `==`*(a: WantListEntry, b: BlockAddress): bool =
return a.address == b
proc `<`*(a, b: WantListEntry): bool =
a.priority < b.priority
proc `==`*(a: BlockPresence, b: BlockAddress): bool =
return a.address == b
proc contains*(a: openArray[BlockPresence], b: BlockAddress): bool =
## Convenience method to check for peer precense
##
a.anyIt(it.address == b)

View File

@ -1,47 +0,0 @@
// Protocol of data exchange between Logos Storage nodes.
// Extended version of https://github.com/ipfs/specs/blob/main/BITSWAP.md
syntax = "proto3";
package blockexc.message.pb;
message Message {
message Wantlist {
enum WantType {
wantBlock = 0;
wantHave = 1;
}
message Entry {
bytes block = 1; // the block cid
int32 priority = 2; // the priority (normalized). default to 1
bool cancel = 3; // whether this revokes an entry
WantType wantType = 4; // Note: defaults to enum 0, ie Block
bool sendDontHave = 5; // Note: defaults to false
}
repeated Entry entries = 1; // a list of wantlist entries
bool full = 2; // whether this is the full wantlist. default to false
}
message Block {
bytes prefix = 1; // CID prefix (cid version, multicodec and multihash prefix (type + length)
bytes data = 2;
}
enum BlockPresenceType {
presenceHave = 0;
presenceDontHave = 1;
}
message BlockPresence {
bytes cid = 1;
BlockPresenceType type = 2;
}
Wantlist wantlist = 1;
repeated Block payload = 3; // what happened to 2?
repeated BlockPresence blockPresences = 4;
int32 pendingBytes = 5;
}

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014-2018 Juan Batiz-Benet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,35 +0,0 @@
{.push raises: [].}
import libp2p
import pkg/stint
import pkg/questionable
import pkg/questionable/results
import ./blockexc
import ../../blocktype
export questionable
export stint
export BlockPresenceType
type
PresenceMessage* = blockexc.BlockPresence
Presence* = object
address*: BlockAddress
have*: bool
func parse(_: type UInt256, bytes: seq[byte]): ?UInt256 =
if bytes.len > 32:
return UInt256.none
UInt256.fromBytesBE(bytes).some
func init*(_: type Presence, message: PresenceMessage): ?Presence =
some Presence(
address: message.address, have: message.`type` == BlockPresenceType.Have
)
func init*(_: type PresenceMessage, presence: Presence): PresenceMessage =
PresenceMessage(
address: presence.address,
`type`: if presence.have: BlockPresenceType.Have else: BlockPresenceType.DontHave,
)

View File

@ -0,0 +1,49 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/chronos
import ../../units
import ../../storagetypes
const
# if it hangs longer than this, skip peer and continue
DefaultWantHaveSendTimeout* = 30.seconds
# message size limits for protobuf control messages
MaxMessageSize*: uint32 = 16.MiBs.uint32
TargetBatchBytes*: uint32 = 4 * 1024 * 1024
MinBatchSize*: uint32 = 8
MaxMetadataSize*: uint32 = 4 * 1024 * 1024
MaxWantBlocksResponseBytes*: uint32 = 4 + MaxMetadataSize + TargetBatchBytes
MaxBlocksPerBatch*: uint32 = TargetBatchBytes div MinBlockSize.uint32
# the worst case which is alternating missing blocks (0,2,4...) creates max ranges
# each range costs 16 bytes (start:u64 + count:u64)
MaxWantBlocksRequestBytes*: uint32 = (MaxBlocksPerBatch div 2) * 16 + 1024
static:
doAssert MinBatchSize * MaxBlockSize.uint32 == TargetBatchBytes,
"MinBatchSize * MaxBlockSize must equal TargetBatchBytes"
doAssert MaxBlocksPerBatch == TargetBatchBytes div MinBlockSize.uint32,
"MaxBlocksPerBatch must equal TargetBatchBytes / MinBlockSize"
doAssert MaxWantBlocksResponseBytes == 4 + MaxMetadataSize + TargetBatchBytes,
"MaxWantBlocksResponseBytes must equal 4 + MaxMetadataSize + TargetBatchBytes"
# should fit worst case sparse batch - max ranges
const
worstCaseRanges = MaxBlocksPerBatch div 2
worstCaseRangeBytes = worstCaseRanges * 16
fixedOverhead = 64'u32 # request id + cidLen + cid + rangeCount
doAssert MaxWantBlocksRequestBytes >= worstCaseRangeBytes + fixedOverhead,
"MaxWantBlocksRequestBytes too small for worst case sparse batch"

View File

@ -9,31 +9,22 @@ import pkg/libp2p/cid
import pkg/questionable
import ../../units
import ../../merkletree
import ../../blocktype
const
MaxBlockSize* = 100.MiBs.uint
MaxMessageSize* = 100.MiBs.uint
type
WantType* = enum
WantBlock = 0
WantHave = 1
WantHave = 0 # Presence query - the only type used with batch transfer protocol
WantListEntry* = object
address*: BlockAddress
# XXX: I think explicit priority is pointless as the peer will request
# the blocks in the order it wants to receive them, and all we have to
# do is process those in the same order as we send them back. It also
# complicates things for no reason at the moment, as the priority is
# always set to 0.
priority*: int32 # The priority (normalized). default to 1
cancel*: bool # Whether this revokes an entry
wantType*: WantType # Note: defaults to enum 0, ie Block
wantType*: WantType # Defaults to WantHave (only type supported)
sendDontHave*: bool # Note: defaults to false
rangeCount*: uint64
# For range queries: number of sequential blocks starting from address.index (0 = single block)
downloadId*: uint64 # Unique download ID for request/response correlation
WantList* = object
entries*: seq[WantListEntry] # A list of wantList entries
@ -42,24 +33,22 @@ type
BlockDelivery* = object
blk*: Block
address*: BlockAddress
proof*: ?StorageMerkleProof # Present only if `address.leaf` is true
proof*: ?StorageMerkleProof
BlockPresenceType* = enum
Have = 0
DontHave = 1
DontHave = 0
HaveRange = 1
Complete = 2
BlockPresence* = object
address*: BlockAddress
`type`*: BlockPresenceType
StateChannelUpdate* = object
update*: seq[byte] # Signed Nitro state, serialized as JSON
kind*: BlockPresenceType
ranges*: seq[tuple[start: uint64, count: uint64]]
downloadId*: uint64 # echoed for request/response correlation
Message* = object
wantList*: WantList
payload*: seq[BlockDelivery]
blockPresences*: seq[BlockPresence]
pendingBytes*: uint
#
# Encoding Message into seq[byte] in Protobuf format
@ -67,12 +56,8 @@ type
proc write*(pb: var ProtoBuffer, field: int, value: BlockAddress) =
var ipb = initProtoBuffer()
ipb.write(1, value.leaf.uint)
if value.leaf:
ipb.write(2, value.treeCid.data.buffer)
ipb.write(3, value.index.uint64)
else:
ipb.write(4, value.cid.data.buffer)
ipb.write(1, value.treeCid.data.buffer)
ipb.write(2, value.index.uint64)
ipb.finish()
pb.write(field, ipb)
@ -83,6 +68,8 @@ proc write*(pb: var ProtoBuffer, field: int, value: WantListEntry) =
ipb.write(3, value.cancel.uint)
ipb.write(4, value.wantType.uint)
ipb.write(5, value.sendDontHave.uint)
ipb.write(6, value.rangeCount)
ipb.write(7, value.downloadId)
ipb.finish()
pb.write(field, ipb)
@ -94,32 +81,26 @@ proc write*(pb: var ProtoBuffer, field: int, value: WantList) =
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) =
var ipb = initProtoBuffer()
ipb.write(1, value.blk.cid.data.buffer)
ipb.write(2, value.blk.data)
ipb.write(3, value.address)
if value.address.leaf:
if proof =? value.proof:
ipb.write(4, proof.encode())
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: BlockPresence) =
var ipb = initProtoBuffer()
ipb.write(1, value.address)
ipb.write(2, value.`type`.uint)
ipb.write(2, value.kind.uint)
# Encode ranges if present
for (start, count) in value.ranges:
var rangePb = initProtoBuffer()
rangePb.write(1, start)
rangePb.write(2, count)
rangePb.finish()
ipb.write(3, rangePb)
ipb.write(4, value.downloadId)
ipb.finish()
pb.write(field, ipb)
proc protobufEncode*(value: Message): seq[byte] =
var ipb = initProtoBuffer()
ipb.write(1, value.wantList)
for v in value.payload:
ipb.write(3, v) # is this meant to be 2?
for v in value.blockPresences:
ipb.write(4, v)
ipb.write(5, value.pendingBytes)
ipb.finish()
ipb.buffer
@ -129,27 +110,13 @@ proc protobufEncode*(value: Message): seq[byte] =
proc decode*(_: type BlockAddress, pb: ProtoBuffer): ProtoResult[BlockAddress] =
var
value: BlockAddress
leaf: bool
field: uint64
cidBuf = newSeq[byte]()
if ?pb.getField(1, field):
leaf = bool(field)
if leaf:
var
treeCid: Cid
index: Natural
if ?pb.getField(2, cidBuf):
treeCid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
if ?pb.getField(3, field):
index = field
value = BlockAddress(leaf: true, treeCid: treeCid, index: index)
else:
var cid: Cid
if ?pb.getField(4, cidBuf):
cid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
value = BlockAddress(leaf: false, cid: cid)
if ?pb.getField(1, cidBuf):
value.treeCid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
if ?pb.getField(2, field):
value.index = field
ok(value)
@ -168,6 +135,10 @@ proc decode*(_: type WantListEntry, pb: ProtoBuffer): ProtoResult[WantListEntry]
value.wantType = WantType(field)
if ?pb.getField(5, field):
value.sendDontHave = bool(field)
if ?pb.getField(6, field):
value.rangeCount = field
if ?pb.getField(7, field):
value.downloadId = field
ok(value)
proc decode*(_: type WantList, pb: ProtoBuffer): ProtoResult[WantList] =
@ -182,44 +153,25 @@ proc decode*(_: type WantList, pb: ProtoBuffer): ProtoResult[WantList] =
value.full = bool(field)
ok(value)
proc decode*(_: type BlockDelivery, pb: ProtoBuffer): ProtoResult[BlockDelivery] =
var
value = BlockDelivery()
dataBuf = newSeq[byte]()
cidBuf = newSeq[byte]()
cid: Cid
ipb: ProtoBuffer
if ?pb.getField(1, cidBuf):
cid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
if ?pb.getField(2, dataBuf):
value.blk =
?Block.new(cid, dataBuf, verify = true).mapErr(x => ProtoError.IncorrectBlob)
if ?pb.getField(3, ipb):
value.address = ?BlockAddress.decode(ipb)
if value.address.leaf:
var proofBuf = newSeq[byte]()
if ?pb.getField(4, proofBuf):
let proof =
?StorageMerkleProof.decode(proofBuf).mapErr(x => ProtoError.IncorrectBlob)
value.proof = proof.some
else:
value.proof = StorageMerkleProof.none
else:
value.proof = StorageMerkleProof.none
ok(value)
proc decode*(_: type BlockPresence, pb: ProtoBuffer): ProtoResult[BlockPresence] =
var
value = BlockPresence()
field: uint64
ipb: ProtoBuffer
rangelist: seq[seq[byte]]
if ?pb.getField(1, ipb):
value.address = ?BlockAddress.decode(ipb)
if ?pb.getField(2, field):
value.`type` = BlockPresenceType(field)
value.kind = BlockPresenceType(field)
if ?pb.getRepeatedField(3, rangelist):
for item in rangelist:
var rangePb = initProtoBuffer(item)
var start, count: uint64
discard ?rangePb.getField(1, start)
discard ?rangePb.getField(2, count)
value.ranges.add((start, count))
if ?pb.getField(4, field):
value.downloadId = field
ok(value)
proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
@ -230,11 +182,7 @@ proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
sublist: seq[seq[byte]]
if ?pb.getField(1, ipb):
value.wantList = ?WantList.decode(ipb)
if ?pb.getRepeatedField(3, sublist): # meant to be 2?
for item in sublist:
value.payload.add(?BlockDelivery.decode(initProtoBuffer(item)))
if ?pb.getRepeatedField(4, sublist):
for item in sublist:
value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item)))
discard ?pb.getField(5, value.pendingBytes)
ok(value)

View File

@ -0,0 +1,31 @@
{.push raises: [].}
import libp2p
import pkg/questionable
import ./message
import ../../blocktype
export questionable
export BlockPresenceType
type
PresenceMessage* = message.BlockPresence
Presence* = object
address*: BlockAddress
have*: bool
presenceType*: BlockPresenceType
ranges*: seq[tuple[start: uint64, count: uint64]]
func init*(_: type Presence, message: PresenceMessage): ?Presence =
some Presence(
address: message.address,
have: message.kind in {BlockPresenceType.HaveRange, BlockPresenceType.Complete},
presenceType: message.kind,
ranges: message.ranges,
)
func init*(_: type PresenceMessage, presence: Presence): PresenceMessage =
PresenceMessage(
address: presence.address, kind: presence.presenceType, ranges: presence.ranges
)

View File

@ -0,0 +1,637 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/options
import pkg/chronos
import pkg/libp2p
import pkg/libp2p/multicodec
import pkg/stew/endians2
import pkg/results
import ../../blocktype
import ../../merkletree
import ../../logutils
import ../../errors
import ./message
import ./constants
export message, results, errors
logScope:
topics = "storage wantblocks"
const
SizeRequestId = sizeof(uint64)
SizeCidLen = sizeof(uint16)
SizeRangeCount = sizeof(uint32)
SizeRange = sizeof(uint64) + sizeof(uint64) # start + count
SizeBlockCount = sizeof(uint32)
SizeBlockIndex = sizeof(uint64)
SizeDataLen = sizeof(uint32)
SizeProofLen = sizeof(uint16)
SizeNodeLen = sizeof(uint16)
SizeMcodec = sizeof(uint64)
SizeNleaves = sizeof(uint64)
SizePathCount = sizeof(uint32)
SizeProofHeader = SizeMcodec + SizeBlockIndex + SizeNleaves + SizePathCount
SizeMetaLen = sizeof(uint32)
MaxMerkleProofDepth = 64
type
MessageType* = enum
mtProtobuf = 0x00 # Protobuf control messages (want lists, presence)
mtWantBlocksRequest = 0x01 # WantBlocks request
mtWantBlocksResponse = 0x02 # WantBlocks response
WantBlocksRequest* = object
requestId*: uint64
cid*: Cid
ranges*: seq[tuple[start: uint64, count: uint64]]
SharedBlocksBuffer* = ref object
data*: seq[byte]
BlockEntry* = object
index*: uint64
cid*: Cid
dataOffset*: int
dataLen*: int
proof*: StorageMerkleProof
WantBlocksResponse* = object
requestId*: uint64 # echoed request ID
treeCid*: Cid
blocks*: seq[BlockEntry]
sharedBuffer*: SharedBlocksBuffer
BlockDeliveryView* = object
cid*: Cid
address*: BlockAddress
proof*: Option[StorageMerkleProof]
sharedBuf*: SharedBlocksBuffer
dataOffset*: int
dataLen*: int
BlockMetadata =
tuple[index: uint64, cid: Cid, dataLen: uint32, proof: Option[StorageMerkleProof]]
proc frameProtobufMessage*(data: openArray[byte]): seq[byte] =
let frameLen = (1 + data.len).uint32
var buf = newSeqUninit[byte](4 + frameLen.int)
let frameLenLE = frameLen.toLE
copyMem(addr buf[0], unsafeAddr frameLenLE, 4)
buf[4] = mtProtobuf.byte
if data.len > 0:
copyMem(addr buf[5], unsafeAddr data[0], data.len)
buf
proc decodeProofBinary*(data: openArray[byte]): WantBlocksResult[StorageMerkleProof] =
if data.len < SizeProofHeader:
return err(wantBlocksError(ProofTooShort, "Proof data too short"))
var offset = 0
let
mcodecVal = uint64.fromBytes(data.toOpenArray(offset, offset + 7), littleEndian)
mcodec = MultiCodec.codec(mcodecVal.int)
if mcodec == InvalidMultiCodec:
return err(wantBlocksError(InvalidCodec, "Invalid MultiCodec: " & $mcodecVal))
offset += 8
let index = uint64.fromBytes(data.toOpenArray(offset, offset + 7), littleEndian).int
offset += 8
let nleaves = uint64.fromBytes(data.toOpenArray(offset, offset + 7), littleEndian).int
offset += 8
let pathCount =
uint32.fromBytes(data.toOpenArray(offset, offset + 3), littleEndian).int
offset += 4
if pathCount > MaxMerkleProofDepth:
return err(
wantBlocksError(ProofPathTooLarge, "Proof path count too large: " & $pathCount)
)
var nodes = newSeq[seq[byte]](pathCount)
for i in 0 ..< pathCount:
if offset + SizeNodeLen > data.len:
return err(wantBlocksError(ProofTruncated, "Proof truncated at node " & $i))
let nodeLen =
uint16.fromBytes(data.toOpenArray(offset, offset + 1), littleEndian).int
offset += 2
if offset + nodeLen > data.len:
return err(wantBlocksError(ProofTruncated, "Proof truncated at node data " & $i))
if nodeLen == 0:
nodes[i] = @[]
else:
nodes[i] = @(data.toOpenArray(offset, offset + nodeLen - 1))
offset += nodeLen
ok(
?StorageMerkleProof.init(mcodec, index, nleaves, nodes).mapErr(
proc(e: auto): ref WantBlocksError =
wantBlocksError(ProofCreationFailed, "Failed to create proof: " & e.msg)
)
)
proc calcRequestSize*(req: WantBlocksRequest): int {.inline.} =
let cidBytes = req.cid.data.buffer
SizeRequestId + SizeCidLen + cidBytes.len + SizeRangeCount +
(req.ranges.len * SizeRange)
proc encodeRequestInto*(
req: WantBlocksRequest, buf: var openArray[byte], startOffset: int
): int =
var offset = startOffset
let reqIdLE = req.requestId.toLE
copyMem(addr buf[offset], unsafeAddr reqIdLE, 8)
offset += 8
let
cidBytes = req.cid.data.buffer
cidLenLE = cidBytes.len.uint16.toLE
copyMem(addr buf[offset], unsafeAddr cidLenLE, 2)
offset += 2
if cidBytes.len > 0:
copyMem(addr buf[offset], unsafeAddr cidBytes[0], cidBytes.len)
offset += cidBytes.len
let rangeCountLE = req.ranges.len.uint32.toLE
copyMem(addr buf[offset], unsafeAddr rangeCountLE, 4)
offset += 4
for (start, count) in req.ranges:
let startLE = start.toLE
copyMem(addr buf[offset], unsafeAddr startLE, 8)
offset += 8
let countLE = count.toLE
copyMem(addr buf[offset], unsafeAddr countLE, 8)
offset += 8
return offset - startOffset
proc decodeRequest*(data: openArray[byte]): WantBlocksResult[WantBlocksRequest] =
if data.len < SizeRequestId + SizeCidLen + SizeRangeCount:
return err(wantBlocksError(RequestTooShort, "Request too short"))
var offset = 0
let requestId = uint64.fromBytes(data.toOpenArray(offset, offset + 7), littleEndian)
offset += 8
let cidLen = uint16.fromBytes(data.toOpenArray(offset, offset + 1), littleEndian).int
offset += 2
if cidLen == 0:
return err(wantBlocksError(InvalidCid, "CID length is zero"))
if offset + cidLen + SizeRangeCount > data.len:
return err(wantBlocksError(RequestTruncated, "Request truncated (CID)"))
let cid = ?Cid.init(data.toOpenArray(offset, offset + cidLen - 1)).mapErr(
proc(e: auto): ref WantBlocksError =
wantBlocksError(InvalidCid, "Invalid CID: " & $e)
)
offset += cidLen
let rangeCount =
uint32.fromBytes(data.toOpenArray(offset, offset + 3), littleEndian).int
offset += 4
if offset + (rangeCount * SizeRange) > data.len:
return err(wantBlocksError(RequestTruncated, "Request truncated (ranges)"))
var ranges = newSeqOfCap[tuple[start: uint64, count: uint64]](rangeCount)
for _ in 0 ..< rangeCount:
let start = uint64.fromBytes(data.toOpenArray(offset, offset + 7), littleEndian)
offset += 8
let count = uint64.fromBytes(data.toOpenArray(offset, offset + 7), littleEndian)
offset += 8
ranges.add((start, count))
ok(WantBlocksRequest(requestId: requestId, cid: cid, ranges: ranges))
proc calcProofBinarySize*(proof: StorageMerkleProof): int {.inline.} =
result = SizeProofHeader
for node in proof.path:
result += SizeNodeLen + node.len
proc calcResponseMetadataSize*(treeCid: Cid, blocks: seq[BlockDelivery]): int =
let treeCidBytes = treeCid.data.buffer
result = SizeRequestId + SizeCidLen + treeCidBytes.len + SizeBlockCount
for bd in blocks:
let blockCidBytes = bd.blk.cid.data.buffer
result +=
SizeBlockIndex + SizeCidLen + blockCidBytes.len + SizeDataLen + SizeProofLen
if bd.proof.isSome:
result += calcProofBinarySize(bd.proof.get)
proc encodeProofBinaryInto*(
proof: StorageMerkleProof, buf: var openArray[byte], startOffset: int
): int =
var offset = startOffset
let mcodecLE = proof.mcodec.uint64.toLE
copyMem(addr buf[offset], unsafeAddr mcodecLE, 8)
offset += 8
let indexLE = proof.index.uint64.toLE
copyMem(addr buf[offset], unsafeAddr indexLE, 8)
offset += 8
let nleavesLE = proof.nleaves.uint64.toLE
copyMem(addr buf[offset], unsafeAddr nleavesLE, 8)
offset += 8
let pathCountLE = proof.path.len.uint32.toLE
copyMem(addr buf[offset], unsafeAddr pathCountLE, 4)
offset += 4
for node in proof.path:
let nodeLenLE = node.len.uint16.toLE
copyMem(addr buf[offset], unsafeAddr nodeLenLE, 2)
offset += 2
if node.len > 0:
copyMem(addr buf[offset], unsafeAddr node[0], node.len)
offset += node.len
return offset - startOffset
proc encodeResponseMetadataInto*(
requestId: uint64,
treeCid: Cid,
blocks: seq[BlockDelivery],
buf: var openArray[byte],
startOffset: int,
): int =
var offset = startOffset
let reqIdLE = requestId.toLE
copyMem(addr buf[offset], unsafeAddr reqIdLE, 8)
offset += 8
let
treeCidBytes = treeCid.data.buffer
treeCidLenLE = treeCidBytes.len.uint16.toLE
copyMem(addr buf[offset], unsafeAddr treeCidLenLE, 2)
offset += 2
if treeCidBytes.len > 0:
copyMem(addr buf[offset], unsafeAddr treeCidBytes[0], treeCidBytes.len)
offset += treeCidBytes.len
let blockCountLE = blocks.len.uint32.toLE
copyMem(addr buf[offset], unsafeAddr blockCountLE, 4)
offset += 4
for bd in blocks:
let
index = uint64(bd.address.index)
indexLE = index.toLE
copyMem(addr buf[offset], unsafeAddr indexLE, 8)
offset += 8
let
blockCidBytes = bd.blk.cid.data.buffer
blockCidLenLE = blockCidBytes.len.uint16.toLE
copyMem(addr buf[offset], unsafeAddr blockCidLenLE, 2)
offset += 2
if blockCidBytes.len > 0:
copyMem(addr buf[offset], unsafeAddr blockCidBytes[0], blockCidBytes.len)
offset += blockCidBytes.len
let dataLenLE = bd.blk.data[].len.uint32.toLE
copyMem(addr buf[offset], unsafeAddr dataLenLE, 4)
offset += 4
if bd.proof.isSome:
let
proofSize = calcProofBinarySize(bd.proof.get)
proofLenLE = proofSize.uint16.toLE
copyMem(addr buf[offset], unsafeAddr proofLenLE, 2)
offset += 2
offset += encodeProofBinaryInto(bd.proof.get, buf, offset)
else:
let zeroLE = 0'u16.toLE
copyMem(addr buf[offset], unsafeAddr zeroLE, 2)
offset += 2
return offset - startOffset
proc decodeResponseMetadata(
data: openArray[byte]
): WantBlocksResult[(uint64, Cid, seq[BlockMetadata])] =
if data.len < SizeRequestId + SizeCidLen + SizeBlockCount:
return err(wantBlocksError(MetadataTooShort, "Metadata too short"))
var offset = 0
let requestId = uint64.fromBytes(data.toOpenArray(offset, offset + 7), littleEndian)
offset += 8
let cidLen = uint16.fromBytes(data.toOpenArray(offset, offset + 1), littleEndian).int
offset += 2
if cidLen == 0:
return err(wantBlocksError(InvalidCid, "Tree CID length is zero"))
if offset + cidLen + SizeBlockCount > data.len:
return err(wantBlocksError(MetadataTruncated, "Metadata truncated at CID"))
let treeCid = ?Cid.init(data.toOpenArray(offset, offset + cidLen - 1)).mapErr(
proc(e: auto): ref WantBlocksError =
wantBlocksError(InvalidCid, "Invalid CID: " & $e)
)
offset += cidLen
let blockCount = uint32.fromBytes(data.toOpenArray(offset, offset + 3), littleEndian)
offset += 4
if blockCount > MaxBlocksPerBatch:
return err(
wantBlocksError(
TooManyBlocks,
"Block count " & $blockCount & " exceeds maximum " & $MaxBlocksPerBatch,
)
)
var blocksMeta = newSeq[BlockMetadata](blockCount.int)
for i in 0 ..< blockCount:
if offset + SizeBlockIndex > data.len:
return
err(wantBlocksError(MetadataTruncated, "Metadata truncated at block " & $i))
let index = uint64.fromBytes(data.toOpenArray(offset, offset + 7), littleEndian)
offset += 8
if offset + SizeCidLen > data.len:
return err(
wantBlocksError(MetadataTruncated, "Metadata truncated at block cidLen " & $i)
)
let blockCidLen =
uint16.fromBytes(data.toOpenArray(offset, offset + 1), littleEndian).int
offset += 2
if blockCidLen == 0:
return err(wantBlocksError(InvalidCid, "Block CID length is zero at block " & $i))
if offset + blockCidLen > data.len:
return
err(wantBlocksError(MetadataTruncated, "Metadata truncated at block CID " & $i))
let blockCid = ?Cid.init(data.toOpenArray(offset, offset + blockCidLen - 1)).mapErr(
proc(e: auto): ref WantBlocksError =
wantBlocksError(InvalidCid, "Invalid block CID at " & $i & ": " & $e)
)
offset += blockCidLen
if offset + SizeDataLen > data.len:
return
err(wantBlocksError(MetadataTruncated, "Metadata truncated at dataLen " & $i))
let dataLen = uint32.fromBytes(data.toOpenArray(offset, offset + 3), littleEndian)
offset += 4
if dataLen > MaxBlockSize.uint32:
return err(
wantBlocksError(
DataSizeMismatch,
"Block dataLen exceeds MaxBlockSize at " & $i & ": " & $dataLen,
)
)
if offset + SizeProofLen > data.len:
return
err(wantBlocksError(MetadataTruncated, "Metadata truncated at proofLen " & $i))
let proofLen =
uint16.fromBytes(data.toOpenArray(offset, offset + 1), littleEndian).int
offset += 2
var proof: Option[StorageMerkleProof] = none(StorageMerkleProof)
if proofLen > 0:
if offset + proofLen > data.len:
return
err(wantBlocksError(MetadataTruncated, "Metadata truncated at proof " & $i))
let proofResult =
decodeProofBinary(data.toOpenArray(offset, offset + proofLen - 1))
if proofResult.isErr:
return err(
wantBlocksError(
ProofDecodeFailed,
"Failed to decode proof at block " & $i & ": " & proofResult.error.msg,
)
)
proof = some(proofResult.get)
offset += proofLen
blocksMeta[i] = (index: index, cid: blockCid, dataLen: dataLen, proof: proof)
ok((requestId, treeCid, blocksMeta))
proc writeWantBlocksResponse*(
conn: Connection, requestId: uint64, treeCid: Cid, blocks: seq[BlockDelivery]
) {.async: (raises: [CancelledError, LPStreamError]).} =
let metaSize = calcResponseMetadataSize(treeCid, blocks)
if metaSize > MaxMetadataSize.int:
warn "Metadata exceeds limit, skipping response",
metaSize = metaSize, limit = MaxMetadataSize, blockCount = blocks.len
return
var totalDataSize: uint64 = 0
for bd in blocks:
totalDataSize += bd.blk.data[].len.uint64
let contentSize = SizeMetaLen.uint64 + metaSize.uint64 + totalDataSize
if contentSize > MaxWantBlocksResponseBytes:
warn "Response exceeds size limit, skipping",
contentSize = contentSize,
limit = MaxWantBlocksResponseBytes,
blockCount = blocks.len
return
let
frameLen = 1 + contentSize.int
totalSize = 4 + frameLen
var
buf = newSeqUninit[byte](totalSize)
offset = 0
let frameLenLE = frameLen.uint32.toLE
copyMem(addr buf[offset], unsafeAddr frameLenLE, 4)
offset += 4
buf[offset] = mtWantBlocksResponse.byte
offset += 1
let metaSizeLE = metaSize.uint32.toLE
copyMem(addr buf[offset], unsafeAddr metaSizeLE, 4)
offset += 4
offset += encodeResponseMetadataInto(requestId, treeCid, blocks, buf, offset)
for bd in blocks:
if bd.blk.data[].len > 0:
copyMem(addr buf[offset], unsafeAddr bd.blk.data[][0], bd.blk.data[].len)
offset += bd.blk.data[].len
await conn.write(buf)
proc writeWantBlocksRequest*(
conn: Connection, req: WantBlocksRequest
) {.async: (raises: [CancelledError, LPStreamError]).} =
let
reqSize = calcRequestSize(req)
totalSize = 4 + 1 + reqSize
var buf = newSeqUninit[byte](totalSize)
let frameLenLE = (1 + reqSize).uint32.toLE
copyMem(addr buf[0], unsafeAddr frameLenLE, 4)
buf[4] = mtWantBlocksRequest.byte
discard encodeRequestInto(req, buf, 5)
await conn.write(buf)
proc readWantBlocksResponse*(
conn: Connection, dataLen: int
): Future[WantBlocksResult[WantBlocksResponse]] {.async: (raises: [CancelledError]).} =
try:
let totalLen = dataLen.uint32
if totalLen > MaxWantBlocksResponseBytes:
return err(wantBlocksError(ResponseTooLarge, "Response too large: " & $totalLen))
var lenBuf: array[4, byte]
await conn.readExactly(addr lenBuf[0], 4)
let metaLen = uint32.fromBytes(lenBuf, littleEndian)
if metaLen > MaxMetadataSize:
return err(wantBlocksError(MetadataTooLarge, "Metadata too large: " & $metaLen))
var metaBuf = newSeqUninit[byte](metaLen.int)
if metaLen > 0:
await conn.readExactly(addr metaBuf[0], metaLen.int)
let (requestId, treeCid, blocksMeta) = ?decodeResponseMetadata(metaBuf)
var totalDataSize: uint64 = 0
for bm in blocksMeta:
totalDataSize += bm.dataLen.uint64
if totalLen < SizeMetaLen.uint32 + metaLen:
return err(
wantBlocksError(
DataSizeMismatch,
"Invalid lengths: totalLen=" & $totalLen & " metaLen=" & $metaLen,
)
)
let dataLen = totalLen - SizeMetaLen.uint32 - metaLen
if dataLen.uint64 != totalDataSize:
return err(
wantBlocksError(
DataSizeMismatch,
"Data size mismatch: expected " & $totalDataSize & ", got " & $dataLen,
)
)
var sharedBuf = SharedBlocksBuffer(data: newSeqUninit[byte](totalDataSize.int))
if totalDataSize > 0:
await conn.readExactly(addr sharedBuf.data[0], totalDataSize.int)
var response: WantBlocksResponse
response.requestId = requestId
response.treeCid = treeCid
response.sharedBuffer = sharedBuf
response.blocks = newSeq[BlockEntry](blocksMeta.len)
var offset = 0
for i, bm in blocksMeta:
let blockDataLen = bm.dataLen.int
var proof: StorageMerkleProof
if bm.proof.isSome:
proof = bm.proof.get
response.blocks[i] = BlockEntry(
index: bm.index,
cid: bm.cid,
dataOffset: offset,
dataLen: blockDataLen,
proof: proof,
)
offset += blockDataLen
return ok(response)
except LPStreamError as e:
return err(wantBlocksError(RequestFailed, e.msg))
proc readWantBlocksRequest*(
conn: Connection, dataLen: int
): Future[WantBlocksResult[WantBlocksRequest]] {.async: (raises: [CancelledError]).} =
try:
if dataLen.uint32 > MaxWantBlocksRequestBytes:
return err(wantBlocksError(RequestTooLarge, "Request too large: " & $dataLen))
var reqBuf = newSeqUninit[byte](dataLen)
if dataLen > 0:
await conn.readExactly(addr reqBuf[0], dataLen)
return decodeRequest(reqBuf)
except LPStreamError as e:
return err(wantBlocksError(RequestFailed, e.msg))
proc toBlockDeliveryView*(
entry: BlockEntry, treeCid: Cid, sharedBuf: SharedBlocksBuffer
): WantBlocksResult[BlockDeliveryView] =
if entry.dataOffset < 0 or entry.dataLen < 0:
return err(
wantBlocksError(
DataSizeMismatch,
"Invalid offset or length: offset=" & $entry.dataOffset & " len=" &
$entry.dataLen,
)
)
if entry.dataOffset + entry.dataLen > sharedBuf.data.len:
return err(
wantBlocksError(
DataSizeMismatch,
"Block data exceeds buffer: offset=" & $entry.dataOffset & " len=" &
$entry.dataLen & " bufLen=" & $sharedBuf.data.len,
)
)
ok(
BlockDeliveryView(
cid: entry.cid,
address: BlockAddress(treeCid: treeCid, index: entry.index.Natural),
proof: some(entry.proof),
sharedBuf: sharedBuf,
dataOffset: entry.dataOffset,
dataLen: entry.dataLen,
)
)
proc toBlockDelivery*(view: BlockDeliveryView): BlockDelivery =
var data = newSeqUninit[byte](view.dataLen)
if view.dataLen > 0:
copyMem(addr data[0], unsafeAddr view.sharedBuf.data[view.dataOffset], view.dataLen)
var dataRef: ref seq[byte]
new(dataRef)
dataRef[] = move(data)
BlockDelivery(
blk: Block(cid: view.cid, data: dataRef), address: view.address, proof: view.proof
)

View File

@ -0,0 +1,206 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/algorithm
import pkg/libp2p/cid
import ../blocktype
type
BlockRange* = object
cid*: Cid
ranges*: seq[tuple[start: uint64, count: uint64]]
BlockAvailabilityKind* = enum
bakUnknown
bakComplete
bakRanges
bakBitmap
BlockAvailability* = object
case kind*: BlockAvailabilityKind
of bakUnknown:
discard
of bakComplete:
discard
of bakRanges:
ranges*: seq[tuple[start: uint64, count: uint64]]
of bakBitmap:
bitmap*: seq[byte]
totalBlocks*: uint64
proc unknown*(_: type BlockAvailability): BlockAvailability =
BlockAvailability(kind: bakUnknown)
proc complete*(_: type BlockAvailability): BlockAvailability =
BlockAvailability(kind: bakComplete)
proc fromRanges*(
_: type BlockAvailability, ranges: seq[tuple[start: uint64, count: uint64]]
): BlockAvailability =
BlockAvailability(kind: bakRanges, ranges: ranges)
proc fromBitmap*(
_: type BlockAvailability, bitmap: seq[byte], totalBlocks: uint64
): BlockAvailability =
BlockAvailability(kind: bakBitmap, bitmap: bitmap, totalBlocks: totalBlocks)
proc hasBlock*(avail: BlockAvailability, index: uint64): bool =
case avail.kind
of bakUnknown:
false
of bakComplete:
true
of bakRanges:
for (start, count) in avail.ranges:
if count > high(uint64) - start:
continue
if index >= start and index < start + count:
return true
false
of bakBitmap:
if index >= avail.totalBlocks:
return false
let
byteIdx = index div 8
bitIdx = index mod 8
if byteIdx.int >= avail.bitmap.len:
return false
(avail.bitmap[byteIdx] and (1'u8 shl bitIdx)) != 0
proc hasRange*(avail: BlockAvailability, start: uint64, count: uint64): bool =
if count > high(uint64) - start:
return false
case avail.kind
of bakUnknown:
false
of bakComplete:
true
of bakRanges:
let reqEnd = start + count
for (rangeStart, rangeCount) in avail.ranges:
if rangeCount > high(uint64) - rangeStart:
continue
let rangeEnd = rangeStart + rangeCount
if start >= rangeStart and reqEnd <= rangeEnd:
return true
false
of bakBitmap:
for i in start ..< start + count:
if not avail.hasBlock(i):
return false
true
proc hasAnyInRange*(avail: BlockAvailability, start: uint64, count: uint64): bool =
if count > high(uint64) - start:
return false
case avail.kind
of bakUnknown:
false
of bakComplete:
true
of bakRanges:
let reqEnd = start + count
for (rangeStart, rangeCount) in avail.ranges:
if rangeCount > high(uint64) - rangeStart:
continue
let rangeEnd = rangeStart + rangeCount
# check if they overlap
if start < rangeEnd and rangeStart < reqEnd:
return true
false
of bakBitmap:
for i in start ..< start + count:
if avail.hasBlock(i):
return true
false
proc mergeRanges(
ranges: seq[tuple[start: uint64, count: uint64]]
): seq[tuple[start: uint64, count: uint64]] =
if ranges.len == 0:
return @[]
var sorted = ranges
sorted.sort(
proc(a, b: tuple[start: uint64, count: uint64]): int =
if a.start < b.start:
-1
elif a.start > b.start:
1
else:
0
)
result = @[]
var current = sorted[0]
if current.count > high(uint64) - current.start:
return @[]
for i in 1 ..< sorted.len:
let next = sorted[i]
if next.count > high(uint64) - next.start:
continue #cnanakos: warn??
let currentEnd = current.start + current.count
if next.start <= currentEnd:
let nextEnd = next.start + next.count
if nextEnd > currentEnd:
current.count = nextEnd - current.start
else:
result.add(current)
current = next
result.add(current)
proc merge*(current: BlockAvailability, other: BlockAvailability): BlockAvailability =
## merge by keeping the union of all known blocks
if current.kind == bakComplete or other.kind == bakComplete:
return BlockAvailability.complete()
if current.kind == bakUnknown:
return other
if other.kind == bakUnknown:
return current
proc bitmapToRanges(
avail: BlockAvailability
): seq[tuple[start: uint64, count: uint64]] =
result = @[]
var
inRange = false
rangeStart: uint64 = 0
for i in 0'u64 ..< avail.totalBlocks:
let hasIt = avail.hasBlock(i)
if hasIt and not inRange:
rangeStart = i
inRange = true
elif not hasIt and inRange:
result.add((rangeStart, i - rangeStart))
inRange = false
if inRange:
result.add((rangeStart, avail.totalBlocks - rangeStart))
let currentRanges =
if current.kind == bakRanges:
current.ranges
else:
bitmapToRanges(current)
let otherRanges =
if other.kind == bakRanges:
other.ranges
else:
bitmapToRanges(other)
return BlockAvailability.fromRanges(mergeRanges(currentRanges & otherRanges))

View File

@ -0,0 +1,54 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/algorithm
import ./protocol/constants
func isIndexInRanges*(
index: uint64, ranges: openArray[(uint64, uint64)], sortedRanges: bool = false
): bool =
func binarySearch(r: openArray[(uint64, uint64)]): bool =
var
lo = 0
hi = r.len - 1
candidate = -1
while lo <= hi:
let mid = (lo + hi) div 2
if r[mid][0] <= index:
candidate = mid
lo = mid + 1
else:
hi = mid - 1
if candidate >= 0:
let (start, count) = r[candidate]
return index < start + count
return false
if ranges.len == 0:
return false
if sortedRanges:
binarySearch(ranges)
else:
let sorted = @ranges.sorted(
proc(a, b: (uint64, uint64)): int =
cmp(a[0], b[0])
)
binarySearch(sorted)
proc computeBatchSize*(blockSize: uint32): uint32 =
doAssert blockSize > 0, "computeBatchSize requires blockSize > 0"
let
optimal = TargetBatchBytes div blockSize
maxFromBytes = MaxWantBlocksResponseBytes div blockSize
return clamp(optimal, MinBatchSize, maxFromBytes)

View File

@ -7,11 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/tables
import std/sugar
import std/hashes
export tables
import std/[tables, sugar, hashes]
{.push raises: [], gcsafe.}
@ -32,61 +28,34 @@ export errors, logutils, units, storagetypes
type
Block* = ref object of RootObj
cid*: Cid
data*: seq[byte]
data*: ref seq[byte]
BlockAddress* = object
case leaf*: bool
of true:
treeCid* {.serialize.}: Cid
index* {.serialize.}: Natural
else:
cid* {.serialize.}: Cid
treeCid* {.serialize.}: Cid
index* {.serialize.}: Natural
logutils.formatIt(LogFormat.textLines, BlockAddress):
if it.leaf:
"treeCid: " & shortLog($it.treeCid) & ", index: " & $it.index
else:
"cid: " & shortLog($it.cid)
"treeCid: " & shortLog($it.treeCid) & ", index: " & $it.index
logutils.formatIt(LogFormat.json, BlockAddress):
%it
proc `==`*(a, b: BlockAddress): bool =
a.leaf == b.leaf and (
if a.leaf:
a.treeCid == b.treeCid and a.index == b.index
else:
a.cid == b.cid
)
a.treeCid == b.treeCid and a.index == b.index
proc `$`*(a: BlockAddress): string =
if a.leaf:
"treeCid: " & $a.treeCid & ", index: " & $a.index
else:
"cid: " & $a.cid
"treeCid: " & $a.treeCid & ", index: " & $a.index
proc hash*(a: BlockAddress): Hash =
if a.leaf:
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
hash(data)
else:
hash(a.cid.data.buffer)
proc cidOrTreeCid*(a: BlockAddress): Cid =
if a.leaf: a.treeCid else: a.cid
proc address*(b: Block): BlockAddress =
BlockAddress(leaf: false, cid: b.cid)
proc init*(_: type BlockAddress, cid: Cid): BlockAddress =
BlockAddress(leaf: false, cid: cid)
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
hash(data)
proc init*(_: type BlockAddress, treeCid: Cid, index: Natural): BlockAddress =
BlockAddress(leaf: true, treeCid: treeCid, index: index)
BlockAddress(treeCid: treeCid, index: index)
proc `$`*(b: Block): string =
result &= "cid: " & $b.cid
result &= "\ndata: " & string.fromBytes(b.data)
result &= "\ndata: " & string.fromBytes(b.data[])
func new*(
T: type Block,
@ -96,7 +65,6 @@ func new*(
codec = BlockCodec,
): ?!Block =
## creates a new block for both storage and network IO
##
let
hash = ?MultiHash.digest($mcodec, data).mapFailure
@ -105,13 +73,14 @@ func new*(
# TODO: If the hash is `>=` to the data,
# use the Cid as a container!
Block(cid: cid, data: @data).success
var dataRef: ref seq[byte]
new(dataRef)
dataRef[] = @data
Block(cid: cid, data: dataRef).success
proc new*(
T: type Block, cid: Cid, data: openArray[byte], verify: bool = true
): ?!Block =
proc new*(T: type Block, cid: Cid, data: sink seq[byte], verify: bool = true): ?!Block =
## creates a new block for both storage and network IO
##
## takes ownership of the data seq to avoid copying
if verify:
let
@ -121,7 +90,16 @@ proc new*(
if computedCid != cid:
return "Cid doesn't match the data".failure
return Block(cid: cid, data: @data).success
var dataRef: ref seq[byte]
new(dataRef)
dataRef[] = move(data)
return Block(cid: cid, data: dataRef).success
proc new*(
T: type Block, cid: Cid, data: openArray[byte], verify: bool = true
): ?!Block =
## creates a new block for both storage and network IO
Block.new(cid, @data, verify)
proc emptyBlock*(version: CidVersion, hcodec: MultiCodec): ?!Block =
emptyCid(version, hcodec, BlockCodec).flatMap(

View File

@ -44,7 +44,7 @@ import ./utils
import ./nat
import ./utils/natutils
from ./blockexchange/engine/pendingblocks import DefaultBlockRetries
from ./blockexchange/engine/downloadmanager import DefaultBlockRetries
export units, net, storagetypes, logutils, completeCmdArg, parseCmdArg, NatConfig
@ -136,8 +136,7 @@ type
.}: OutDir
listenIp* {.
desc:
"IP address to listen on for remote peer connections, can be ipv4 or ipv6",
desc: "IP address to listen on for remote peer connections, can be ipv4 or ipv6",
defaultValue: "0.0.0.0".parseIpAddress,
defaultValueDesc: "Listens on all addresses.",
abbr: "i",

View File

@ -23,8 +23,37 @@ type
StorageError* = object of CatchableError # base Storage error
StorageResult*[T] = Result[T, ref StorageError]
WantBlocksErrorKind* = enum
RequestTooShort
RequestTooLarge
RequestTruncated
InvalidCid
InvalidCodec
MetadataTooShort
MetadataTruncated
ResponseTooLarge
MetadataTooLarge
DataSizeMismatch
ProofTooShort
ProofTruncated
ProofCreationFailed
ProofPathTooLarge
ProofDecodeFailed
TooManyBlocks
NoConnection
ConnectionClosed
RequestFailed
WantBlocksError* = object of StorageError
kind*: WantBlocksErrorKind
WantBlocksResult*[T] = Result[T, ref WantBlocksError]
FinishedFailed*[T] = tuple[success: seq[Future[T]], failure: seq[Future[T]]]
proc wantBlocksError*(kind: WantBlocksErrorKind, msg: string): ref WantBlocksError =
(ref WantBlocksError)(kind: kind, msg: msg)
template mapFailure*[T, V, E](
exp: Result[T, V], exc: typedesc[E]
): Result[T, ref CatchableError] =

View File

@ -1,4 +1,5 @@
import ./manifest/coders
import ./manifest/manifest
import ./manifest/protocol
export manifest, coders
export manifest, coders, protocol

View File

@ -14,7 +14,6 @@ import times
{.push raises: [].}
import std/tables
import std/sequtils
import pkg/libp2p
import pkg/questionable
@ -139,4 +138,4 @@ func decode*(_: type Manifest, blk: Block): ?!Manifest =
if not ?blk.cid.isManifest:
return failure "Cid not a manifest codec"
Manifest.decode(blk.data)
Manifest.decode(blk.data[])

View File

@ -0,0 +1,253 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import pkg/chronos
import pkg/libp2p
import pkg/questionable
import pkg/questionable/results
import pkg/stew/endians2
import ../blocktype as bt
import ../stores/blockstore
import ../discovery
import ../logutils
import ../errors
import ./manifest
import ./coders
export manifest, coders
logScope:
topics = "storage manifestprotocol"
const
ManifestProtocolCodec* = "/storage/manifest/1.0.0"
ManifestMaxCidSize = 512
ManifestMaxDataSize = 65536 # 64KB
DefaultManifestRetries* = 10
DefaultManifestRetryDelay* = 3.seconds
DefaultManifestFetchTimeout* = 30.seconds
type
ManifestProtocol* = ref object of LPProtocol
switch*: Switch
localStore*: BlockStore
discovery*: Discovery
retries*: int
retryDelay*: Duration
fetchTimeout*: Duration
ManifestFetchStatus* = enum
Found = 0
NotFound = 1
proc writeManifestResponse(
conn: Connection, status: ManifestFetchStatus, data: seq[byte] = @[]
) {.async: (raises: [CancelledError, LPStreamError]).} =
let contentLen = 1 + data.len
var buf = newSeqUninit[byte](4 + contentLen)
let contentLenLE = contentLen.uint32.toLE
copyMem(addr buf[0], unsafeAddr contentLenLE, 4)
buf[4] = status.uint8
if data.len > 0:
copyMem(addr buf[5], unsafeAddr data[0], data.len)
await conn.write(buf)
proc readManifestResponse(
conn: Connection
): Future[?!(ManifestFetchStatus, seq[byte])] {.
async: (raises: [CancelledError, LPStreamError])
.} =
var lenBuf: array[4, byte]
await conn.readExactly(addr lenBuf[0], 4)
let contentLen = uint32.fromBytes(lenBuf, littleEndian).int
if contentLen < 1:
return failure("Manifest response too short: " & $contentLen)
if contentLen > 1 + ManifestMaxDataSize:
return failure("Manifest response too large: " & $contentLen)
var content = newSeq[byte](contentLen)
await conn.readExactly(addr content[0], contentLen)
let statusByte = content[0]
if statusByte > ManifestFetchStatus.high.uint8:
return failure("Invalid manifest response status: " & $statusByte)
let
status = ManifestFetchStatus(statusByte)
data =
if contentLen > 1:
content[1 ..< contentLen]
else:
newSeq[byte]()
return success (status, data)
proc handleManifestRequest(
self: ManifestProtocol, conn: Connection
) {.async: (raises: [CancelledError]).} =
try:
var cidLenBuf: array[2, byte]
await conn.readExactly(addr cidLenBuf[0], 2)
let cidLen = uint16.fromBytes(cidLenBuf, littleEndian).int
if cidLen == 0 or cidLen > ManifestMaxCidSize:
warn "Invalid CID length in manifest request", cidLen
await writeManifestResponse(conn, ManifestFetchStatus.NotFound)
return
var cidBuf = newSeq[byte](cidLen)
await conn.readExactly(addr cidBuf[0], cidLen)
let cid = Cid.init(cidBuf).valueOr:
warn "Invalid CID in manifest request"
await writeManifestResponse(conn, ManifestFetchStatus.NotFound)
return
without blk =? await self.localStore.getBlock(cid), err:
trace "Manifest not found locally", cid, err = err.msg
await writeManifestResponse(conn, ManifestFetchStatus.NotFound)
return
await writeManifestResponse(conn, ManifestFetchStatus.Found, blk.data[])
except CancelledError as exc:
raise exc
except CatchableError as exc:
warn "Error handling manifest request", exc = exc.msg
proc fetchManifestFromPeer(
self: ManifestProtocol, peer: PeerRecord, cid: Cid
): Future[?!bt.Block] {.async: (raises: [CancelledError]).} =
var conn: Connection
try:
conn = await self.switch.dial(
peer.peerId, peer.addresses.mapIt(it.address), ManifestProtocolCodec
)
let cidBytes = cid.data.buffer
var reqBuf = newSeqUninit[byte](2 + cidBytes.len)
let cidLenLE = cidBytes.len.uint16.toLE
copyMem(addr reqBuf[0], unsafeAddr cidLenLE, 2)
if cidBytes.len > 0:
copyMem(addr reqBuf[2], unsafeAddr cidBytes[0], cidBytes.len)
await conn.write(reqBuf)
without (status, data) =? await readManifestResponse(conn), err:
return failure(err)
if status == ManifestFetchStatus.NotFound:
return failure(
newException(BlockNotFoundError, "Manifest not found on peer " & $peer.peerId)
)
without blk =? bt.Block.new(cid, data, verify = true), err:
return failure("Manifest CID verification failed: " & err.msg)
return success blk
except CancelledError as exc:
raise exc
except CatchableError as exc:
return failure("Error fetching manifest from peer " & $peer.peerId & ": " & exc.msg)
finally:
if not conn.isNil:
await conn.close()
proc fetchManifest*(
self: ManifestProtocol, cid: Cid
): Future[?!Manifest] {.async: (raises: [CancelledError]).} =
if err =? cid.isManifest.errorOption:
return failure "CID has invalid content type for manifest {$cid}"
trace "Fetching manifest", cid
without localBlk =? await self.localStore.getBlock(cid), err:
if not (err of BlockNotFoundError):
return failure err
trace "Manifest not in local store, starting discovery loop", cid
var lastErr = err
for attempt in 0 ..< self.retries:
trace "Manifest fetch attempt", cid, attempt, maxRetries = self.retries
let providers = await self.discovery.find(cid)
if providers.len > 0:
for provider in providers:
let fetchFut = self.fetchManifestFromPeer(provider.data, cid)
var blkResult: ?!bt.Block
if (await fetchFut.withTimeout(self.fetchTimeout)):
blkResult = await fetchFut
else:
trace "Manifest fetch from peer timed out", cid, peer = provider.data.peerId
continue
without blk =? blkResult, fetchErr:
trace "Failed to fetch manifest from peer",
cid, peer = provider.data.peerId, err = fetchErr.msg
lastErr = fetchErr
continue
if putErr =? (await self.localStore.putBlock(blk)).errorOption:
warn "Failed to store fetched manifest locally", cid, err = putErr.msg
without manifest =? Manifest.decode(blk), err:
return failure("Unable to decode manifest: " & err.msg)
return success manifest
else:
trace "No providers found for manifest, will retry", cid, attempt
if attempt < self.retries - 1:
await sleepAsync(self.retryDelay)
return failure(
newException(
BlockNotFoundError,
"Failed to fetch manifest " & $cid & " after " & $self.retries & " attempts: " &
lastErr.msg,
)
)
without manifest =? Manifest.decode(localBlk), err:
return failure("Unable to decode manifest: " & err.msg)
return success manifest
proc new*(
T: type ManifestProtocol,
switch: Switch,
localStore: BlockStore,
discovery: Discovery,
retries: int = DefaultManifestRetries,
retryDelay: Duration = DefaultManifestRetryDelay,
fetchTimeout: Duration = DefaultManifestFetchTimeout,
): ManifestProtocol =
let self = ManifestProtocol(
switch: switch,
localStore: localStore,
discovery: discovery,
retries: retries,
retryDelay: retryDelay,
fetchTimeout: fetchTimeout,
)
proc handler(
conn: Connection, proto: string
): Future[void] {.async: (raises: [CancelledError]).} =
await self.handleManifestRequest(conn)
self.handler = handler
self.codec = ManifestProtocolCodec
return self

View File

@ -10,7 +10,6 @@
import
std/[options, os, strutils, times, net, atomics],
stew/[objects],
nat_traversal/[miniupnpc, natpmp],
json_serialization/std/net,
results

View File

@ -12,7 +12,6 @@
import std/options
import std/sequtils
import std/strformat
import std/sugar
import times
import pkg/taskpools
@ -47,11 +46,6 @@ export logutils
logScope:
topics = "storage node"
const
DefaultFetchBatch = 1024
MaxOnBatchBlocks = 128
BatchRefillThreshold = 0.75 # Refill when 75% of window completes
type
StorageNode* = object
switch: Switch
@ -59,6 +53,7 @@ type
networkStore: NetworkStore
engine: BlockExcEngine
discovery: Discovery
manifestProto: ManifestProtocol
clock*: Clock
taskPool: Taskpool
trackedFutures: TrackedFutures
@ -66,8 +61,6 @@ type
StorageNodeRef* = ref StorageNode
OnManifest* = proc(cid: Cid, manifest: Manifest): void {.gcsafe, raises: [].}
BatchProc* =
proc(blocks: seq[bt.Block]): Future[?!void] {.async: (raises: [CancelledError]).}
OnBlockStoredProc = proc(chunk: seq[byte]): void {.gcsafe, raises: [].}
func switch*(self: StorageNodeRef): Switch =
@ -102,27 +95,8 @@ proc storeManifest*(
proc fetchManifest*(
self: StorageNodeRef, cid: Cid
): Future[?!Manifest] {.async: (raises: [CancelledError]).} =
## Fetch and decode a manifest block
##
if err =? cid.isManifest.errorOption:
return failure "CID has invalid content type for manifest {$cid}"
trace "Retrieving manifest for cid", cid
without blk =? await self.networkStore.getBlock(BlockAddress.init(cid)), err:
trace "Error retrieve manifest block", cid, err = err.msg
return failure err
trace "Decoding manifest for cid", cid
without manifest =? Manifest.decode(blk), err:
trace "Unable to decode as manifest", err = err.msg
return failure("Unable to decode as manifest")
trace "Decoded manifest", cid
return manifest.success
## Fetch and decode a manifest
return await self.manifestProto.fetchManifest(cid)
proc findPeer*(self: StorageNodeRef, peerId: PeerId): Future[?PeerRecord] {.async.} =
## Find peer using the discovery service from the given StorageNode
@ -157,118 +131,35 @@ proc updateExpiry*(
return success()
proc fetchBatched*(
self: StorageNodeRef,
cid: Cid,
iter: Iter[int],
batchSize = DefaultFetchBatch,
onBatch: BatchProc = nil,
fetchLocal = true,
): Future[?!void] {.async: (raises: [CancelledError]), gcsafe.} =
## Fetch blocks in batches of `batchSize`
##
# TODO: doesn't work if callee is annotated with async
# let
# iter = iter.map(
# (i: int) => self.networkStore.getBlock(BlockAddress.init(cid, i))
# )
# Sliding window: maintain batchSize blocks in-flight
let
refillThreshold = int(float(batchSize) * BatchRefillThreshold)
refillSize = max(refillThreshold, 1)
maxCallbackBlocks = min(batchSize, MaxOnBatchBlocks)
var
blockData: seq[bt.Block]
failedBlocks = 0
successfulBlocks = 0
completedInWindow = 0
var addresses = newSeqOfCap[BlockAddress](batchSize)
for i in 0 ..< batchSize:
if not iter.finished:
let address = BlockAddress.init(cid, iter.next())
if fetchLocal or not (await address in self.networkStore):
addresses.add(address)
var blockResults = await self.networkStore.getBlocks(addresses)
while not blockResults.finished:
without blk =? await blockResults.next(), err:
inc(failedBlocks)
continue
inc(successfulBlocks)
inc(completedInWindow)
if not onBatch.isNil:
blockData.add(blk)
if blockData.len >= maxCallbackBlocks:
if batchErr =? (await onBatch(blockData)).errorOption:
return failure(batchErr)
blockData = @[]
if completedInWindow >= refillThreshold and not iter.finished:
var refillAddresses = newSeqOfCap[BlockAddress](refillSize)
for i in 0 ..< refillSize:
if not iter.finished:
let address = BlockAddress.init(cid, iter.next())
if fetchLocal or not (await address in self.networkStore):
refillAddresses.add(address)
if refillAddresses.len > 0:
blockResults =
chain(blockResults, await self.networkStore.getBlocks(refillAddresses))
completedInWindow = 0
if failedBlocks > 0:
return failure("Some blocks failed (Result) to fetch (" & $failedBlocks & ")")
if not onBatch.isNil and blockData.len > 0:
if batchErr =? (await onBatch(blockData)).errorOption:
return failure(batchErr)
success()
proc fetchBatched*(
self: StorageNodeRef,
manifest: Manifest,
batchSize = DefaultFetchBatch,
onBatch: BatchProc = nil,
fetchLocal = true,
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
## Fetch manifest in batches of `batchSize`
##
trace "Fetching blocks in batches of",
size = batchSize, blocksCount = manifest.blocksCount
let iter = Iter[int].new(0 ..< manifest.blocksCount)
self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch, fetchLocal)
proc fetchDatasetAsync*(
self: StorageNodeRef, manifest: Manifest, fetchLocal = true
): Future[void] {.async: (raises: []).} =
## Asynchronously fetch a dataset in the background.
## This task will be tracked and cleaned up on node shutdown.
##
): Future[?!void] {.async: (raises: [CancelledError]).} =
let
treeCid = manifest.treeCid
download = ?self.engine.startTreeDownloadOpaque(
treeCid, manifest.blockSize.uint32, manifest.blocksCount.uint64
)
try:
if err =? (
await self.fetchBatched(
manifest = manifest, batchSize = DefaultFetchBatch, fetchLocal = fetchLocal
)
).errorOption:
error "Unable to fetch blocks", err = err.msg
except CancelledError as exc:
trace "Cancelled fetching blocks", exc = exc.msg
trace "Starting tree download",
treeCid = treeCid, totalBlocks = manifest.blocksCount
return await download.waitForComplete()
finally:
self.engine.releaseDownload(download)
proc fetchDatasetAsyncTask*(self: StorageNodeRef, manifest: Manifest) =
## Start fetching a dataset in the background.
## The task will be tracked and cleaned up on node shutdown.
##
self.trackedFutures.track(self.fetchDatasetAsync(manifest, fetchLocal = false))
proc fetchTask(): Future[void] {.async: (raises: []).} =
try:
if err =? (await self.fetchDatasetAsync(manifest, fetchLocal = false)).errorOption:
error "Background dataset fetch failed",
treeCid = manifest.treeCid, err = err.msg
except CancelledError:
trace "Background dataset fetch cancelled", treeCid = manifest.treeCid
self.trackedFutures.track(fetchTask())
proc streamSingleBlock(
self: StorageNodeRef, cid: Cid
@ -279,14 +170,14 @@ proc streamSingleBlock(
let stream = BufferStream.new()
without blk =? (await self.networkStore.getBlock(BlockAddress.init(cid))), err:
without blk =? (await self.networkStore.localStore.getBlock(cid)), err:
return failure(err)
proc streamOneBlock(): Future[void] {.async: (raises: []).} =
try:
defer:
await stream.pushEof()
await stream.pushData(blk.data)
await stream.pushData(blk.data[])
except CancelledError as exc:
trace "Streaming block cancelled", cid, exc = exc.msg
except LPStreamError as exc:
@ -305,7 +196,15 @@ proc streamEntireDataset(
var jobs: seq[Future[void]]
let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false))
jobs.add(self.fetchDatasetAsync(manifest, fetchLocal = false))
proc fetchTask(): Future[void] {.async: (raises: []).} =
try:
if err =? (await self.fetchDatasetAsync(manifest, fetchLocal = false)).errorOption:
error "Dataset fetch failed during streaming", manifestCid, err = err.msg
await stream.close()
except CancelledError:
trace "Dataset fetch cancelled during streaming", manifestCid
jobs.add(fetchTask())
# Monitor stream completion and cancel background jobs when done
proc monitorStream() {.async: (raises: []).} =
@ -543,6 +442,7 @@ proc new*(
networkStore: NetworkStore,
engine: BlockExcEngine,
discovery: Discovery,
manifestProto: ManifestProtocol,
taskpool: Taskpool,
): StorageNodeRef =
## Create new instance of a Storage self, call `start` to run it
@ -553,6 +453,7 @@ proc new*(
networkStore: networkStore,
engine: engine,
discovery: discovery,
manifestProto: manifestProto,
taskPool: taskpool,
trackedFutures: TrackedFutures(),
)

View File

@ -19,7 +19,6 @@ import pkg/chronos
import pkg/presto except toJson
import pkg/metrics except toJson
import pkg/stew/base10
import pkg/stew/byteutils
import pkg/confutils
import pkg/libp2p
@ -29,10 +28,12 @@ import pkg/codexdht/discv5/spr as spr
import ../logutils
import ../node
import ../blocktype
import ../storagetypes
import ../conf
import ../manifest
import ../streams/asyncstreamwrapper
import ../stores
import ../units
import ../utils/options
import ./coders
@ -120,7 +121,7 @@ proc retrieveCid(
while not stream.atEof:
var
buff = newSeqUninitialized[byte](DefaultBlockSize.int)
buff = newSeqUninit[byte](manifest.blockSize.int)
len = await stream.readOnce(addr buff[0], buff.len)
buff.setLen(len)
@ -191,8 +192,29 @@ proc initDataApi(node: StorageNodeRef, repoStore: RepoStore, router: var RestRou
router.rawApi(MethodPost, "/api/storage/v1/data") do() -> RestApiResponse:
## Upload a file in a streaming manner
##
## Optional query parameter:
## blockSize - size of blocks in bytes (default: 64KiB, min: 4KiB, max: 512KiB)
##
trace "Handling file upload"
# Parse blockSize query parameter
var blockSize = DefaultBlockSize
let blockSizeStr = request.query.getString("blockSize", "")
if blockSizeStr != "":
let parsedSize = Base10.decode(uint64, blockSizeStr)
if parsedSize.isErr:
return RestApiResponse.error(Http400, "Invalid blockSize parameter")
let size = parsedSize.get()
# Validate block size
if size < MinBlockSize or size > MaxBlockSize or not isPowerOfTwo(size):
return RestApiResponse.error(
Http400,
"blockSize must be a power of two between " & $MinBlockSize & " and " &
$MaxBlockSize & " bytes",
)
blockSize = NBytes(size)
var bodyReader = request.getBodyReader()
if bodyReader.isErr():
return RestApiResponse.error(Http500, msg = bodyReader.error())
@ -223,6 +245,16 @@ proc initDataApi(node: StorageNodeRef, repoStore: RepoStore, router: var RestRou
if filename.isSome and not isValidFilename(filename.get()):
return RestApiResponse.error(Http422, "The filename is not valid.")
if filename.isSome and filename.get().len > MaxFilenameSize:
return RestApiResponse.error(
Http422, "Filename exceeds maximum size of " & $MaxFilenameSize & " bytes"
)
if mimetype.isSome and mimetype.get().len > MaxMimetypeSize:
return RestApiResponse.error(
Http422, "Mimetype exceeds maximum size of " & $MaxMimetypeSize & " bytes"
)
# Here we could check if the extension matches the filename if needed
let reader = bodyReader.get()
@ -233,13 +265,14 @@ proc initDataApi(node: StorageNodeRef, repoStore: RepoStore, router: var RestRou
AsyncStreamWrapper.new(reader = AsyncStreamReader(reader)),
filename = filename,
mimetype = mimetype,
blockSize = blockSize,
)
), error:
error "Error uploading file", exc = error.msg
return RestApiResponse.error(Http500, error.msg)
storage_api_uploads.inc()
trace "Uploaded file", cid
trace "Uploaded file", cid, blockSize
return RestApiResponse.response($cid)
except CancelledError:
trace "Upload cancelled error"
@ -477,6 +510,7 @@ proc initDebugApi(node: StorageNodeRef, conf: StorageConf, router: var RestRoute
try:
let table = RestRoutingTable.init(node.discovery.protocol.routingTable)
let json = %*{
"id": $node.switch.peerInfo.peerId,
"addrs": node.switch.peerInfo.addrs.mapIt($it),

View File

@ -7,8 +7,6 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/sugar
import pkg/presto
import pkg/chronos
import pkg/libp2p

View File

@ -1,5 +1,4 @@
import pkg/questionable
import pkg/stew/byteutils
import pkg/libp2p
import pkg/codexdht/discv5/node as dn
import pkg/codexdht/discv5/routing_table as rt

View File

@ -7,8 +7,6 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/sequtils
import std/strutils
import std/os
import std/tables
import std/cpuinfo
@ -25,6 +23,7 @@ import pkg/datastore
import pkg/stew/io2
import ./node
import ./manifest/protocol
import ./conf
import ./rng as random
import ./rest/api
@ -147,7 +146,7 @@ proc new*(
.withAddresses(@[listenMultiAddr])
.withRng(random.Rng.instance())
.withNoise()
.withMplex(5.minutes, 5.minutes)
.withYamux()
.withMaxConnections(config.maxPeers)
.withAgentVersion(config.agentString)
.withSignedPeerRecord(true)
@ -236,21 +235,22 @@ proc new*(
numberOfBlocksPerInterval = config.blockMaintenanceNumberOfBlocks,
)
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new(retries = config.blockRetries)
peerStore = PeerContextStore.new()
downloadManager = DownloadManager.new(retries = config.blockRetries)
advertiser = Advertiser.new(repoStore, discovery)
blockDiscovery =
DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
blockDiscovery = DiscoveryEngine.new(repoStore, peerStore, network, discovery)
engine = BlockExcEngine.new(
repoStore, network, blockDiscovery, advertiser, peerStore, pendingBlocks
repoStore, network, blockDiscovery, advertiser, peerStore, downloadManager
)
store = NetworkStore.new(engine, repoStore)
manifestProto = ManifestProtocol.new(switch, repoStore, discovery)
storageNode = StorageNodeRef.new(
switch = switch,
networkStore = store,
engine = engine,
discovery = discovery,
manifestProto = manifestProto,
taskPool = taskPool,
)
@ -267,6 +267,7 @@ proc new*(
.expect("Should create rest server!")
switch.mount(network)
switch.mount(manifestProto)
StorageServer(
config: config,

View File

@ -23,10 +23,27 @@ import ./errors
export tables
const
# Size of blocks for storage / network exchange,
DefaultBlockSize* = NBytes 1024 * 64
func isPowerOfTwo*(x: uint64): bool =
(x > 0) and ((x and (x - 1)) == 0)
const
# Block size limits for storage / network exchange
MinBlockSize* = 4096'u64 # 4 KiB minimum
MaxBlockSize* = 524288'u64 # 512 KiB maximum
DefaultBlockSize* = NBytes 65536 # 64 KiB default
# Manifest field limits (ensure manifest fits in MinBlockSize)
MaxFilenameSize* = 255
MaxMimetypeSize* = 128
static:
# Validate block size constants are powers of two
doAssert isPowerOfTwo(MinBlockSize), "MinBlockSize must be a power of two"
doAssert isPowerOfTwo(MaxBlockSize), "MaxBlockSize must be a power of two"
doAssert isPowerOfTwo(DefaultBlockSize.uint64),
"DefaultBlockSize must be a power of two"
const
# hashes
Sha256HashCodec* = multiCodec("sha2-256")

View File

@ -23,6 +23,10 @@ export blocktype
type
BlockNotFoundError* = object of StorageError
BlockCorruptedError* = object of StorageError
## Raised when a block received from the network fails validation
## (CID doesn't match the data). This indicates either malicious peer
## or data corruption in transit.
BlockType* {.pure.} = enum
Manifest
@ -65,14 +69,9 @@ method getBlock*(
raiseAssert("getBlock by addr not implemented!")
method completeBlock*(
self: BlockStore, address: BlockAddress, blk: Block
) {.base, gcsafe.} =
discard
method getBlocks*(
self: BlockStore, addresses: seq[BlockAddress]
): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} =
): Future[SafeAsyncIter[Block]] {.base, async: (raises: [CancelledError]).} =
## Gets a set of blocks from the blockstore. Blocks might
## be returned in any order.
@ -195,8 +194,4 @@ proc contains*(
proc contains*(
self: BlockStore, address: BlockAddress
): Future[bool] {.async: (raises: [CancelledError]), gcsafe.} =
return
if address.leaf:
(await self.hasBlock(address.treeCid, address.index)) |? false
else:
(await self.hasBlock(address.cid)) |? false
return (await self.hasBlock(address.treeCid, address.index)) |? false

View File

@ -117,10 +117,7 @@ method getBlockAndProof*(
method getBlock*(
self: CacheStore, address: BlockAddress
): Future[?!Block] {.async: (raw: true, raises: [CancelledError]).} =
if address.leaf:
self.getBlock(address.treeCid, address.index)
else:
self.getBlock(address.cid)
self.getBlock(address.treeCid, address.index)
method hasBlock*(
self: CacheStore, cid: Cid
@ -188,7 +185,7 @@ method listBlocks*(
success(iter)
func putBlockSync(self: CacheStore, blk: Block): bool =
let blkSize = blk.data.len.NBytes # in bytes
let blkSize = blk.data[].len.NBytes # in bytes
if blkSize > self.size:
trace "Block size is larger than cache size", blk = blkSize, cache = self.size
@ -197,7 +194,7 @@ func putBlockSync(self: CacheStore, blk: Block): bool =
while self.currentSize + blkSize > self.size:
try:
let removed = self.cache.removeLru()
self.currentSize -= removed.data.len.NBytes
self.currentSize -= removed.data[].len.NBytes
except EmptyLruCacheError as exc:
# if the cache is empty, can't remove anything, so break and add item
# to the cache
@ -264,7 +261,7 @@ method delBlock*(
let removed = self.cache.del(cid)
if removed.isSome:
self.currentSize -= removed.get.data.len.NBytes
self.currentSize -= removed.get.data[].len.NBytes
return success()
@ -278,9 +275,6 @@ method delBlock*(
return success()
method completeBlock*(self: CacheStore, address: BlockAddress, blk: Block) {.gcsafe.} =
discard
method close*(self: CacheStore): Future[void] {.async: (raises: []).} =
## Close the blockstore, a no-op for this implementation
##

View File

@ -34,12 +34,11 @@ type NetworkStore* = ref object of BlockStore
method getBlocks*(
self: NetworkStore, addresses: seq[BlockAddress]
): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} =
let runtimeQuota = 10.milliseconds
var
localAddresses: seq[BlockAddress]
remoteAddresses: seq[BlockAddress]
let runtimeQuota = 10.milliseconds
var lastIdle = Moment.now()
lastIdle = Moment.now()
for address in addresses:
if not (await address in self.localStore):
@ -59,26 +58,29 @@ method getBlocks*(
method getBlock*(
self: NetworkStore, address: BlockAddress
): Future[?!Block] {.async: (raises: [CancelledError]).} =
without blk =? (await self.localStore.getBlock(address)), err:
if not (err of BlockNotFoundError):
error "Error getting block from local store", address, err = err.msg
return failure err
let downloadOpt = self.engine.downloadManager.getDownload(address.treeCid)
if downloadOpt.isSome:
let handle = downloadOpt.get().getWantHandle(address)
without blk =? (await self.localStore.getBlock(address)), err:
if not (err of BlockNotFoundError):
handle.cancel()
return failure err
return await handle
discard downloadOpt.get().completeWantHandle(address, some(blk))
return success blk
without newBlock =? (await self.engine.requestBlock(address)), err:
error "Unable to get block from exchange engine", address, err = err.msg
return failure err
without newBlock =? (await self.engine.requestBlock(address)), err:
error "Unable to get block from exchange engine", address, err = err.msg
return failure err
return success newBlock
return success blk
return success newBlock
method getBlock*(
self: NetworkStore, cid: Cid
): Future[?!Block] {.async: (raw: true, raises: [CancelledError]).} =
## Get a block from the blockstore
## Get a block from the local blockstore only.
##
self.getBlock(BlockAddress.init(cid))
self.localStore.getBlock(cid)
method getBlock*(
self: NetworkStore, treeCid: Cid, index: Natural
@ -88,9 +90,6 @@ method getBlock*(
self.getBlock(BlockAddress.init(treeCid, index))
method completeBlock*(self: NetworkStore, address: BlockAddress, blk: Block) =
self.engine.completeBlock(address, blk)
method putBlock*(
self: NetworkStore, blk: Block, ttl = Duration.none
): Future[?!void] {.async: (raises: [CancelledError]).} =
@ -100,7 +99,6 @@ method putBlock*(
if res.isErr:
return res
await self.engine.resolveBlocks(@[blk])
return success()
method putCidAndProof*(

View File

@ -185,7 +185,7 @@ proc storeBlock*(
res: StoreResult
if currMd =? maybeCurrMd:
if currMd.size == blk.data.len.NBytes:
if currMd.size == blk.data[].len.NBytes:
md = BlockMetadata(
size: currMd.size,
expiry: max(currMd.expiry, minExpiry),
@ -200,7 +200,7 @@ proc storeBlock*(
if not hasBlock:
warn "Block metadata is present, but block is absent. Restoring block.",
cid = blk.cid
if err =? (await self.repoDs.put(blkKey, blk.data)).errorOption:
if err =? (await self.repoDs.put(blkKey, blk.data[])).errorOption:
raise err
else:
raise newException(
@ -209,9 +209,9 @@ proc storeBlock*(
$blk.cid,
)
else:
md = BlockMetadata(size: blk.data.len.NBytes, expiry: minExpiry, refCount: 0)
res = StoreResult(kind: Stored, used: blk.data.len.NBytes)
if err =? (await self.repoDs.put(blkKey, blk.data)).errorOption:
md = BlockMetadata(size: blk.data[].len.NBytes, expiry: minExpiry, refCount: 0)
res = StoreResult(kind: Stored, used: blk.data[].len.NBytes)
if err =? (await self.repoDs.put(blkKey, blk.data[])).errorOption:
raise err
(md.some, res),

View File

@ -70,15 +70,18 @@ method getBlock*(
trace "Error getting key from provider", err = err.msg
return failure(err)
without data =? await self.repoDs.get(key), err:
# Manual pattern to avoid questionable copy
var dataResult = await self.repoDs.get(key)
if dataResult.isErr:
let err = dataResult.error
if not (err of DatastoreKeyNotFound):
trace "Error getting block from datastore", err = err.msg, key
return failure(err)
return failure(newException(BlockNotFoundError, err.msg))
trace "Got block for cid", cid
return Block.new(cid, data, verify = true)
# Zero-copy: move data out of Result, then into Block
return Block.new(cid, move(dataResult.unsafeGet()), verify = true)
method getBlockAndProof*(
self: RepoStore, treeCid: Cid, index: Natural
@ -86,10 +89,12 @@ method getBlockAndProof*(
without leafMd =? await self.getLeafMetadata(treeCid, index), err:
return failure(err)
without blk =? await self.getBlock(leafMd.blkCid), err:
return failure(err)
# Manual pattern to avoid questionable copy for Block (contains seq[byte])
var blkResult = await self.getBlock(leafMd.blkCid)
if blkResult.isErr:
return failure(blkResult.error)
success((blk, leafMd.proof))
success((move(blkResult.unsafeGet()), leafMd.proof))
method getBlock*(
self: RepoStore, treeCid: Cid, index: Natural
@ -105,10 +110,7 @@ method getBlock*(
## Get a block from the blockstore
##
if address.leaf:
self.getBlock(address.treeCid, address.index)
else:
self.getBlock(address.cid)
self.getBlock(address.treeCid, address.index)
method ensureExpiry*(
self: RepoStore, cid: Cid, expiry: SecondsSince1970

View File

@ -94,8 +94,7 @@ method readOnce*(
self.manifest.blockSize.int - blockOffset,
]
)
address =
BlockAddress(leaf: true, treeCid: self.manifest.treeCid, index: blockNum)
address = BlockAddress(treeCid: self.manifest.treeCid, index: blockNum)
# Read contents of block `blockNum`
without blk =? (await self.store.getBlock(address)).tryGet.catch, error:
@ -113,7 +112,7 @@ method readOnce*(
if blk.isEmpty:
zeroMem(pbytes.offset(read), readBytes)
else:
copyMem(pbytes.offset(read), blk.data[blockOffset].unsafeAddr, readBytes)
copyMem(pbytes.offset(read), blk.data[][blockOffset].unsafeAddr, readBytes)
# Update current positions in the stream and outbuf
self.offset += readBytes

View File

@ -8,7 +8,6 @@ import pkg/storage/stores
import pkg/storage/units
import pkg/chronos
import pkg/stew/byteutils
import pkg/stint
import ./storage/helpers/randomchunker

View File

@ -1,7 +1,6 @@
import helpers/multisetup
import helpers/trackers
import helpers/templeveldb
import std/times
import std/sequtils, chronos
import ./asynctest

View File

@ -1,7 +1,5 @@
import std/importutils
import std/net
import std/sequtils
import std/strformat
from pkg/libp2p import `==`, `$`, Cid
import pkg/storage/units
import pkg/storage/manifest

View File

@ -1,6 +1,5 @@
import std/times
import pkg/storage/conf
import pkg/stint
from pkg/libp2p import Cid, `$`
import ../../asynctest
import ../../checktest

View File

@ -1,6 +1,5 @@
import std/httpclient
import std/os
import std/sequtils
import std/strutils
import std/sugar
import std/times

View File

@ -5,7 +5,6 @@ import pkg/chronicles
import pkg/chronos/asyncproc
import pkg/libp2p
import std/os
import std/strformat
import std/strutils
import storage/conf
import storage/utils/exceptions

View File

@ -4,8 +4,6 @@ import std/tables
import pkg/chronos
import pkg/libp2p/errors
import pkg/storage/rng
import pkg/storage/stores
import pkg/storage/blockexchange
@ -28,14 +26,14 @@ asyncchecksuite "Block Advertising and Discovery":
tree: StorageMerkleTree
manifestBlock: bt.Block
switch: Switch
peerStore: PeerCtxStore
peerStore: PeerContextStore
blockDiscovery: MockDiscovery
discovery: DiscoveryEngine
advertiser: Advertiser
network: BlockExcNetwork
localStore: CacheStore
engine: BlockExcEngine
pendingBlocks: PendingBlocksManager
downloadManager: DownloadManager
setup:
while true:
@ -49,8 +47,8 @@ asyncchecksuite "Block Advertising and Discovery":
blockDiscovery = MockDiscovery.new()
network = BlockExcNetwork.new(switch)
localStore = CacheStore.new(blocks.mapIt(it))
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
peerStore = PeerContextStore.new()
downloadManager = DownloadManager.new()
(_, tree, manifest) = makeDataset(blocks).tryGet()
manifestBlock =
@ -59,26 +57,25 @@ asyncchecksuite "Block Advertising and Discovery":
(await localStore.putBlock(manifestBlock)).tryGet()
discovery = DiscoveryEngine.new(
localStore,
peerStore,
network,
blockDiscovery,
pendingBlocks,
minPeersPerBlock = 1,
localStore, peerStore, network, blockDiscovery, concurrentDiscReqs = 20
)
advertiser = Advertiser.new(localStore, blockDiscovery)
engine = BlockExcEngine.new(
localStore, network, discovery, advertiser, peerStore, pendingBlocks
localStore, network, discovery, advertiser, peerStore, downloadManager
)
switch.mount(network)
test "Should discover want list":
let pendingBlocks = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid))
await engine.start()
var handles: seq[Future[?!bt.Block]]
for blk in blocks:
let
address = BlockAddress.init(blk.cid, 0)
desc = toDownloadDesc(address, blockSize = 0)
download = engine.downloadManager.startDownload(desc)
handles.add(download.getWantHandle(address))
blockDiscovery.publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
@ -88,9 +85,18 @@ asyncchecksuite "Block Advertising and Discovery":
blockDiscovery.findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
await engine.resolveBlocks(blocks.filterIt(it.cid == cid))
let matching = blocks.filterIt(it.cid == cid)
for blk in matching:
let address = BlockAddress(treeCid: blk.cid, index: 0)
let dlOpt = engine.downloadManager.getDownload(blk.cid)
if dlOpt.isSome:
discard dlOpt.get().completeWantHandle(address, some(blk))
await allFuturesThrowing(allFinished(pendingBlocks))
await engine.start()
discovery.queueFindBlocksReq(blocks.mapIt(it.cid))
await allFuturesThrowing(allFinished(handles)).wait(10.seconds)
await engine.stop()
@ -123,230 +129,3 @@ asyncchecksuite "Block Advertising and Discovery":
await engine.start()
await sleepAsync(3.seconds)
await engine.stop()
test "Should not launch discovery if remote peer has block":
let
pendingBlocks = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid))
peerId = PeerId.example
haves = collect(initTable()):
for blk in blocks:
{blk.address: Presence(address: blk.address)}
engine.peers.add(BlockExcPeerCtx(id: peerId, blocks: haves))
blockDiscovery.findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
check false
await engine.start()
engine.pendingBlocks.resolve(
blocks.mapIt(BlockDelivery(blk: it, address: it.address))
)
await allFuturesThrowing(allFinished(pendingBlocks))
await engine.stop()
proc asBlock(m: Manifest): bt.Block =
let mdata = m.encode().tryGet()
bt.Block.new(data = mdata, codec = ManifestCodec).tryGet()
asyncchecksuite "E2E - Multiple Nodes Discovery":
var
switch: seq[Switch]
blockexc: seq[NetworkStore]
manifests: seq[Manifest]
mBlocks: seq[bt.Block]
trees: seq[StorageMerkleTree]
setup:
for _ in 0 ..< 4:
let chunker = RandomChunker.new(Rng.instance(), size = 4096, chunkSize = 256)
var blocks = newSeq[bt.Block]()
while true:
let chunk = await chunker.getBytes()
if chunk.len <= 0:
break
blocks.add(bt.Block.new(chunk).tryGet())
let (_, tree, manifest) = makeDataset(blocks).tryGet()
manifests.add(manifest)
mBlocks.add(manifest.asBlock())
trees.add(tree)
let
s = newStandardSwitch(transportFlags = {ServerFlags.ReuseAddr})
blockDiscovery = MockDiscovery.new()
network = BlockExcNetwork.new(s)
localStore = CacheStore.new()
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
discovery = DiscoveryEngine.new(
localStore,
peerStore,
network,
blockDiscovery,
pendingBlocks,
minPeersPerBlock = 1,
)
advertiser = Advertiser.new(localStore, blockDiscovery)
engine = BlockExcEngine.new(
localStore, network, discovery, advertiser, peerStore, pendingBlocks
)
networkStore = NetworkStore.new(engine, localStore)
s.mount(network)
switch.add(s)
blockexc.add(networkStore)
teardown:
switch = @[]
blockexc = @[]
manifests = @[]
mBlocks = @[]
trees = @[]
test "E2E - Should advertise and discover blocks":
# Distribute the manifests and trees amongst 1..3
# Ask 0 to download everything without connecting him beforehand
var advertised: Table[Cid, SignedPeerRecord]
MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[1].peerInfo.signedPeerRecord
MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[2].peerInfo.signedPeerRecord
MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[3].peerInfo.signedPeerRecord
discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid)
await blockexc[1].engine.blocksDeliveryHandler(
switch[0].peerInfo.peerId,
@[
BlockDelivery(
blk: mBlocks[0], address: BlockAddress(leaf: false, cid: mBlocks[0].cid)
)
],
allowSpurious = true,
)
discard blockexc[2].engine.pendingBlocks.getWantHandle(mBlocks[1].cid)
await blockexc[2].engine.blocksDeliveryHandler(
switch[0].peerInfo.peerId,
@[
BlockDelivery(
blk: mBlocks[1], address: BlockAddress(leaf: false, cid: mBlocks[1].cid)
)
],
allowSpurious = true,
)
discard blockexc[3].engine.pendingBlocks.getWantHandle(mBlocks[2].cid)
await blockexc[3].engine.blocksDeliveryHandler(
switch[0].peerInfo.peerId,
@[
BlockDelivery(
blk: mBlocks[2], address: BlockAddress(leaf: false, cid: mBlocks[2].cid)
)
],
allowSpurious = true,
)
MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
advertised.withValue(cid, val):
result.add(val[])
let futs = collect(newSeq):
for m in mBlocks[0 .. 2]:
blockexc[0].engine.requestBlock(m.cid)
await allFuturesThrowing(switch.mapIt(it.start())).wait(10.seconds)
await allFuturesThrowing(blockexc.mapIt(it.engine.start())).wait(10.seconds)
await allFutures(futs).wait(10.seconds)
await allFuturesThrowing(blockexc.mapIt(it.engine.stop())).wait(10.seconds)
await allFuturesThrowing(switch.mapIt(it.stop())).wait(10.seconds)
test "E2E - Should advertise and discover blocks with peers already connected":
# Distribute the blocks amongst 1..3
# Ask 0 to download everything *WITH* connecting him beforehand
var advertised: Table[Cid, SignedPeerRecord]
MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[1].peerInfo.signedPeerRecord
MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[2].peerInfo.signedPeerRecord
MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[3].peerInfo.signedPeerRecord
discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid)
await blockexc[1].engine.blocksDeliveryHandler(
switch[0].peerInfo.peerId,
@[
BlockDelivery(
blk: mBlocks[0], address: BlockAddress(leaf: false, cid: mBlocks[0].cid)
)
],
allowSpurious = true,
)
discard blockexc[2].engine.pendingBlocks.getWantHandle(mBlocks[1].cid)
await blockexc[2].engine.blocksDeliveryHandler(
switch[0].peerInfo.peerId,
@[
BlockDelivery(
blk: mBlocks[1], address: BlockAddress(leaf: false, cid: mBlocks[1].cid)
)
],
allowSpurious = true,
)
discard blockexc[3].engine.pendingBlocks.getWantHandle(mBlocks[2].cid)
await blockexc[3].engine.blocksDeliveryHandler(
switch[0].peerInfo.peerId,
@[
BlockDelivery(
blk: mBlocks[2], address: BlockAddress(leaf: false, cid: mBlocks[2].cid)
)
],
allowSpurious = true,
)
MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
advertised.withValue(cid, val):
return @[val[]]
let futs = mBlocks[0 .. 2].mapIt(blockexc[0].engine.requestBlock(it.cid))
await allFuturesThrowing(switch.mapIt(it.start())).wait(10.seconds)
await allFuturesThrowing(blockexc.mapIt(it.engine.start())).wait(10.seconds)
await allFutures(futs).wait(10.seconds)
await allFuturesThrowing(blockexc.mapIt(it.engine.stop())).wait(10.seconds)
await allFuturesThrowing(switch.mapIt(it.stop())).wait(10.seconds)

View File

@ -1,5 +1,4 @@
import std/sequtils
import std/tables
import pkg/chronos
@ -30,9 +29,9 @@ asyncchecksuite "Test Discovery Engine":
tree: StorageMerkleTree
manifestBlock: bt.Block
switch: Switch
peerStore: PeerCtxStore
peerStore: PeerContextStore
blockDiscovery: MockDiscovery
pendingBlocks: PendingBlocksManager
downloadManager: DownloadManager
network: BlockExcNetwork
setup:
@ -49,47 +48,15 @@ asyncchecksuite "Test Discovery Engine":
switch = newStandardSwitch(transportFlags = {ServerFlags.ReuseAddr})
network = BlockExcNetwork.new(switch)
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
peerStore = PeerContextStore.new()
downloadManager = DownloadManager.new()
blockDiscovery = MockDiscovery.new()
test "Should Query Wants":
var
localStore = CacheStore.new()
discoveryEngine = DiscoveryEngine.new(
localStore,
peerStore,
network,
blockDiscovery,
pendingBlocks,
discoveryLoopSleep = 100.millis,
)
wants = blocks.mapIt(pendingBlocks.getWantHandle(it.cid))
blockDiscovery.findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
pendingBlocks.resolve(
blocks.filterIt(it.cid == cid).mapIt(
BlockDelivery(blk: it, address: it.address)
)
)
await discoveryEngine.start()
await allFuturesThrowing(allFinished(wants)).wait(100.millis)
await discoveryEngine.stop()
test "Should queue discovery request":
var
localStore = CacheStore.new()
discoveryEngine = DiscoveryEngine.new(
localStore,
peerStore,
network,
blockDiscovery,
pendingBlocks,
discoveryLoopSleep = 100.millis,
)
discoveryEngine =
DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery)
want = newFuture[void]()
blockDiscovery.findBlockProvidersHandler = proc(
@ -104,60 +71,11 @@ asyncchecksuite "Test Discovery Engine":
await want.wait(100.millis)
await discoveryEngine.stop()
test "Should not request more than minPeersPerBlock":
var
localStore = CacheStore.new()
minPeers = 2
discoveryEngine = DiscoveryEngine.new(
localStore,
peerStore,
network,
blockDiscovery,
pendingBlocks,
discoveryLoopSleep = 5.minutes,
minPeersPerBlock = minPeers,
)
want = newAsyncEvent()
var pendingCids = newSeq[Cid]()
blockDiscovery.findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
check cid in pendingCids
pendingCids.keepItIf(it != cid)
check peerStore.len < minPeers
var peerCtx = BlockExcPeerCtx(id: PeerId.example)
let address = BlockAddress(leaf: false, cid: cid)
peerCtx.blocks[address] = Presence(address: address)
peerStore.add(peerCtx)
want.fire()
await discoveryEngine.start()
var idx = 0
while peerStore.len < minPeers:
let cid = blocks[idx].cid
inc idx
pendingCids.add(cid)
discoveryEngine.queueFindBlocksReq(@[cid])
await want.wait()
want.clear()
check peerStore.len == minPeers
await discoveryEngine.stop()
test "Should not request if there is already an inflight discovery request":
var
localStore = CacheStore.new()
discoveryEngine = DiscoveryEngine.new(
localStore,
peerStore,
network,
blockDiscovery,
pendingBlocks,
discoveryLoopSleep = 100.millis,
concurrentDiscReqs = 2,
localStore, peerStore, network, blockDiscovery, concurrentDiscReqs = 2
)
reqs = Future[void].Raising([CancelledError]).init()
count = 0
@ -170,7 +88,7 @@ asyncchecksuite "Test Discovery Engine":
check false
count.inc
await reqs # queue the request
await reqs
await discoveryEngine.start()
discoveryEngine.queueFindBlocksReq(@[blocks[0].cid])

View File

@ -1,200 +1,593 @@
import std/sequtils
import std/algorithm
import std/importutils
import pkg/chronos
import pkg/stew/byteutils
import pkg/storage/stores
import pkg/storage/blockexchange
import pkg/storage/blockexchange/engine/engine {.all.}
import pkg/storage/blockexchange/engine/scheduler {.all.}
import pkg/storage/blockexchange/engine/downloadmanager {.all.}
import pkg/storage/blockexchange/engine/activedownload {.all.}
import pkg/storage/chunker
import pkg/storage/discovery
import pkg/storage/blocktype as bt
import pkg/storage/utils/safeasynciter
import ../../../asynctest
import ../../examples
import ../../helpers
asyncchecksuite "NetworkStore engine - 2 nodes":
proc waitForPeerInSwarm(
download: ActiveDownload,
peerId: PeerId,
timeout = 5.seconds,
pollInterval = 50.milliseconds,
): Future[bool] {.async.} =
let deadline = Moment.now() + timeout
while Moment.now() < deadline:
if download.getSwarm().getPeer(peerId).isSome:
return true
await sleepAsync(pollInterval)
return false
asyncchecksuite "BlockExchange - Basic Block Transfer":
var
nodeCmps1, nodeCmps2: NodesComponents
peerCtx1, peerCtx2: BlockExcPeerCtx
blocks1, blocks2: seq[bt.Block]
pendingBlocks1, pendingBlocks2: seq[BlockHandle]
cluster: NodesCluster
seeder: NodesComponents
leecher: NodesComponents
dataset: TestDataset
setup:
blocks1 = await makeRandomBlocks(datasetSize = 2048, blockSize = 256'nb)
blocks2 = await makeRandomBlocks(datasetSize = 2048, blockSize = 256'nb)
nodeCmps1 = generateNodes(1, blocks1).components[0]
nodeCmps2 = generateNodes(1, blocks2).components[0]
# Create two nodes
cluster = generateNodes(2, config = NodeConfig(findFreePorts: true))
seeder = cluster.components[0]
leecher = cluster.components[1]
await allFuturesThrowing(nodeCmps1.start(), nodeCmps2.start())
# Create test dataset (small - 4 blocks)
let blocks = await makeRandomBlocks(4 * 1024, 1024.NBytes)
dataset = makeDataset(blocks).tryGet()
# initialize our want lists
pendingBlocks1 =
blocks2[0 .. 3].mapIt(nodeCmps1.pendingBlocks.getWantHandle(it.cid))
# Assign all blocks to seeder
await seeder.assignBlocks(dataset)
pendingBlocks2 =
blocks1[0 .. 3].mapIt(nodeCmps2.pendingBlocks.getWantHandle(it.cid))
await nodeCmps1.switch.connect(
nodeCmps2.switch.peerInfo.peerId, nodeCmps2.switch.peerInfo.addrs
)
await sleepAsync(100.millis) # give some time to exchange lists
peerCtx2 = nodeCmps1.peerStore.get(nodeCmps2.switch.peerInfo.peerId)
peerCtx1 = nodeCmps2.peerStore.get(nodeCmps1.switch.peerInfo.peerId)
check isNil(peerCtx1).not
check isNil(peerCtx2).not
# Start nodes and connect them
await cluster.components.start()
await connectNodes(cluster)
teardown:
await allFuturesThrowing(nodeCmps1.stop(), nodeCmps2.stop())
await cluster.components.stop()
test "Should exchange blocks on connect":
await allFuturesThrowing(allFinished(pendingBlocks1)).wait(10.seconds)
await allFuturesThrowing(allFinished(pendingBlocks2)).wait(10.seconds)
test "Should request and receive a single block":
let
blk = dataset.blocks[0]
address = BlockAddress(treeCid: dataset.manifest.treeCid, index: 0)
res = await leecher.engine.requestBlock(address)
check:
(await allFinished(blocks1[0 .. 3].mapIt(nodeCmps2.localStore.getBlock(it.cid))))
.filterIt(it.completed and it.read.isOk)
.mapIt($it.read.get.cid)
.sorted(cmp[string]) == blocks1[0 .. 3].mapIt($it.cid).sorted(cmp[string])
check res.isOk
check res.get.cid == blk.cid
check res.get.data[] == blk.data[]
(await allFinished(blocks2[0 .. 3].mapIt(nodeCmps1.localStore.getBlock(it.cid))))
.filterIt(it.completed and it.read.isOk)
.mapIt($it.read.get.cid)
.sorted(cmp[string]) == blocks2[0 .. 3].mapIt($it.cid).sorted(cmp[string])
test "Should download dataset using networkStore":
await leecher.downloadDataset(dataset)
test "Should send want-have for block":
let blk = bt.Block.new("Block 1".toBytes).tryGet()
let blkFut = nodeCmps1.pendingBlocks.getWantHandle(blk.cid)
peerCtx2.blockRequestScheduled(blk.address)
for blk in dataset.blocks:
let hasBlock = await blk.cid in leecher.localStore
check hasBlock
(await nodeCmps2.localStore.putBlock(blk)).tryGet()
peerCtx1.wantedBlocks.incl(blk.address)
check nodeCmps2.engine.taskQueue.pushOrUpdateNoWait(peerCtx1).isOk
check eventually (await nodeCmps1.localStore.hasBlock(blk.cid)).tryGet()
check eventually (await blkFut) == blk
test "Should get blocks from remote":
let blocks =
await allFinished(blocks2[4 .. 7].mapIt(nodeCmps1.networkStore.getBlock(it.cid)))
check blocks.mapIt(it.read().tryGet()) == blocks2[4 .. 7]
test "Remote should send blocks when available":
let blk = bt.Block.new("Block 1".toBytes).tryGet()
# should fail retrieving block from remote
check not await blk.cid in nodeCmps1.networkStore
# second trigger blockexc to resolve any pending requests
# for the block
(await nodeCmps2.networkStore.putBlock(blk)).tryGet()
# should succeed retrieving block from remote
check await nodeCmps1.networkStore.getBlock(blk.cid).withTimeout(100.millis)
# should succeed
asyncchecksuite "NetworkStore - multiple nodes":
asyncchecksuite "BlockExchange - Presence Discovery":
var
nodes: seq[NodesComponents]
blocks: seq[bt.Block]
cluster: NodesCluster
seeder: NodesComponents
leecher: NodesComponents
dataset: TestDataset
setup:
blocks = await makeRandomBlocks(datasetSize = 4096, blockSize = 256'nb)
nodes = generateNodes(5)
for e in nodes:
await e.engine.start()
cluster = generateNodes(2, config = NodeConfig(findFreePorts: true))
seeder = cluster.components[0]
leecher = cluster.components[1]
await allFuturesThrowing(nodes.mapIt(it.switch.start()))
let blocks = await makeRandomBlocks(4 * 1024, 1024.NBytes)
dataset = makeDataset(blocks).tryGet()
await seeder.assignBlocks(dataset)
await cluster.components.start()
await connectNodes(cluster)
teardown:
await allFuturesThrowing(nodes.mapIt(it.switch.stop()))
await cluster.components.stop()
nodes = @[]
test "Should receive blocks for own want list":
test "Should receive presence response for blocks peer has":
let
downloader = nodes[4].networkStore
engine = downloader.engine
treeCid = dataset.manifest.treeCid
totalBlocks = dataset.blocks.len.uint64
blockSize = dataset.manifest.blockSize.uint32
desc = toDownloadDesc(treeCid, totalBlocks, blockSize)
download = leecher.downloadManager.startDownload(desc)
address = BlockAddress(treeCid: treeCid, index: 0)
# Add blocks from 1st peer to want list
let
downloadCids = blocks[0 .. 3].mapIt(it.cid) & blocks[12 .. 15].mapIt(it.cid)
pendingBlocks = downloadCids.mapIt(engine.pendingBlocks.getWantHandle(it))
for i in 0 .. 15:
(await nodes[i div 4].networkStore.engine.localStore.putBlock(blocks[i])).tryGet()
await connectNodes(nodes)
await sleepAsync(100.millis)
await allFuturesThrowing(allFinished(pendingBlocks))
check:
(await allFinished(downloadCids.mapIt(downloader.localStore.getBlock(it))))
.filterIt(it.completed and it.read.isOk)
.mapIt($it.read.get.cid)
.sorted(cmp[string]) == downloadCids.mapIt($it).sorted(cmp[string])
test "Should exchange blocks with multiple nodes":
let
downloader = nodes[4].networkStore
engine = downloader.engine
# Add blocks from 1st peer to want list
let
pendingBlocks1 = blocks[0 .. 3].mapIt(engine.pendingBlocks.getWantHandle(it.cid))
pendingBlocks2 =
blocks[12 .. 15].mapIt(engine.pendingBlocks.getWantHandle(it.cid))
for i in 0 .. 15:
(await nodes[i div 4].networkStore.engine.localStore.putBlock(blocks[i])).tryGet()
await connectNodes(nodes)
await sleepAsync(100.millis)
await allFuturesThrowing(allFinished(pendingBlocks1), allFinished(pendingBlocks2))
check pendingBlocks1.mapIt(it.read) == blocks[0 .. 3]
check pendingBlocks2.mapIt(it.read) == blocks[12 .. 15]
asyncchecksuite "NetworkStore - dissemination":
var nodes: seq[NodesComponents]
teardown:
if nodes.len > 0:
await nodes.stop()
test "Should disseminate blocks across large diameter swarm":
let dataset = makeDataset(await makeRandomBlocks(60 * 256, 256'nb)).tryGet()
nodes = generateNodes(
6,
config = NodeConfig(
useRepoStore: false,
findFreePorts: false,
basePort: 8080,
createFullNode: false,
enableBootstrap: false,
enableDiscovery: true,
),
await leecher.network.request.sendWantList(
seeder.switch.peerInfo.peerId,
@[address],
priority = 0,
cancel = false,
wantType = WantType.WantHave,
full = false,
sendDontHave = false,
rangeCount = totalBlocks,
downloadId = download.id,
)
await assignBlocks(nodes[0], dataset, 0 .. 9)
await assignBlocks(nodes[1], dataset, 10 .. 19)
await assignBlocks(nodes[2], dataset, 20 .. 29)
await assignBlocks(nodes[3], dataset, 30 .. 39)
await assignBlocks(nodes[4], dataset, 40 .. 49)
await assignBlocks(nodes[5], dataset, 50 .. 59)
let seederId = seeder.switch.peerInfo.peerId
check await download.waitForPeerInSwarm(seederId)
await nodes.start()
await nodes.linearTopology()
leecher.downloadManager.cancelDownload(treeCid)
let downloads = nodes.mapIt(downloadDataset(it, dataset))
await allFuturesThrowing(downloads).wait(30.seconds)
test "Peer availability should propagate across downloads for same CID":
let
treeCid = dataset.manifest.treeCid
totalBlocks = dataset.blocks.len.uint64
blockSize = dataset.manifest.blockSize.uint32
desc = toDownloadDesc(treeCid, totalBlocks, blockSize)
download1 = leecher.engine.startDownload(desc)
download2 = leecher.engine.startDownload(desc)
address = BlockAddress(treeCid: treeCid, index: 0)
await leecher.network.request.sendWantList(
seeder.switch.peerInfo.peerId,
@[address],
priority = 0,
cancel = false,
wantType = WantType.WantHave,
full = false,
sendDontHave = false,
rangeCount = totalBlocks,
downloadId = download1.id,
)
let seederId = seeder.switch.peerInfo.peerId
check await download1.waitForPeerInSwarm(seederId)
check download2.getSwarm().getPeer(seederId).isSome
leecher.downloadManager.cancelDownload(treeCid)
test "Should update swarm when peer reports availability":
let
treeCid = dataset.manifest.treeCid
blockSize = dataset.manifest.blockSize.uint32
desc = toDownloadDesc(treeCid, dataset.blocks.len.uint64, blockSize)
download = leecher.downloadManager.startDownload(desc)
availability = BlockAvailability.complete()
download.updatePeerAvailability(seeder.switch.peerInfo.peerId, availability)
let swarm = download.getSwarm()
check swarm.activePeerCount() == 1
let peerOpt = swarm.getPeer(seeder.switch.peerInfo.peerId)
check peerOpt.isSome
check peerOpt.get().availability.kind == bakComplete
leecher.downloadManager.cancelDownload(treeCid)
asyncchecksuite "BlockExchange - Multi-Peer Download":
var
cluster: NodesCluster
seeder1: NodesComponents
seeder2: NodesComponents
leecher: NodesComponents
dataset: TestDataset
setup:
cluster = generateNodes(3, config = NodeConfig(findFreePorts: true))
seeder1 = cluster.components[0]
seeder2 = cluster.components[1]
leecher = cluster.components[2]
let blocks = await makeRandomBlocks(8 * 1024, 1024.NBytes)
dataset = makeDataset(blocks).tryGet()
let halfPoint = dataset.blocks.len div 2
await seeder1.assignBlocks(dataset, 0 ..< halfPoint)
await seeder2.assignBlocks(dataset, halfPoint ..< dataset.blocks.len)
await cluster.components.start()
await connectNodes(cluster)
teardown:
await cluster.components.stop()
test "Should download blocks from multiple peers":
await leecher.downloadDataset(dataset)
for blk in dataset.blocks:
let hasBlock = await blk.cid in leecher.localStore
check hasBlock
test "Should handle partial availability from peers":
let
treeCid = dataset.manifest.treeCid
blockSize = dataset.manifest.blockSize.uint32
desc = toDownloadDesc(treeCid, dataset.blocks.len.uint64, blockSize)
download = leecher.downloadManager.startDownload(desc)
halfPoint = (dataset.blocks.len div 2).uint64
ranges1 = @[(start: 0'u64, count: halfPoint)]
download.updatePeerAvailability(
seeder1.switch.peerInfo.peerId, BlockAvailability.fromRanges(ranges1)
)
let ranges2 = @[(start: halfPoint, count: dataset.blocks.len.uint64 - halfPoint)]
download.updatePeerAvailability(
seeder2.switch.peerInfo.peerId, BlockAvailability.fromRanges(ranges2)
)
let swarm = download.getSwarm()
check swarm.activePeerCount() == 2
let peersForFirst = swarm.peersWithRange(0, halfPoint)
check seeder1.switch.peerInfo.peerId in peersForFirst
let peersForSecond =
swarm.peersWithRange(halfPoint, dataset.blocks.len.uint64 - halfPoint)
check seeder2.switch.peerInfo.peerId in peersForSecond
leecher.downloadManager.cancelDownload(treeCid)
asyncchecksuite "BlockExchange - Download Lifecycle":
var
cluster: NodesCluster
seeder: NodesComponents
leecher: NodesComponents
dataset: TestDataset
setup:
cluster = generateNodes(2, config = NodeConfig(findFreePorts: true))
seeder = cluster.components[0]
leecher = cluster.components[1]
let blocks = await makeRandomBlocks(4 * 1024, 1024.NBytes)
dataset = makeDataset(blocks).tryGet()
await seeder.assignBlocks(dataset)
await cluster.components.start()
await connectNodes(cluster)
teardown:
await cluster.components.stop()
test "Should allow multiple downloads for same CID":
let
treeCid = dataset.manifest.treeCid
totalBlocks = dataset.blocks.len.uint64
blockSize = dataset.manifest.blockSize.uint32
desc = toDownloadDesc(treeCid, totalBlocks, blockSize)
download1 = leecher.downloadManager.startDownload(desc)
download2 = leecher.downloadManager.startDownload(desc)
check download1.id != download2.id
check download1.cid == download2.cid
leecher.downloadManager.cancelDownload(treeCid)
check leecher.downloadManager.getDownload(treeCid).isNone
test "Two concurrent full downloads for same CID should both complete":
let
treeCid = dataset.manifest.treeCid
totalBlocks = dataset.blocks.len.uint64
blockSize = dataset.manifest.blockSize.uint32
let handle1 = leecher.engine.startTreeDownload(treeCid, blockSize, totalBlocks)
require handle1.isOk == true
let handle2 = leecher.engine.startTreeDownload(treeCid, blockSize, totalBlocks)
require handle2.isOk == true
let
h1 = handle1.get()
h2 = handle2.get()
var
blocksReceived1 = 0
blocksReceived2 = 0
while not h1.finished:
let blk = await h1.next()
if blk.isOk:
blocksReceived1 += 1
while not h2.finished:
let blk = await h2.next()
if blk.isOk:
blocksReceived2 += 1
check blocksReceived1 == totalBlocks.int
check blocksReceived2 == totalBlocks.int
leecher.engine.releaseDownload(h1)
leecher.engine.releaseDownload(h2)
test "Releasing one download should not cancel other downloads for same CID":
let
treeCid = dataset.manifest.treeCid
totalBlocks = dataset.blocks.len.uint64
blockSize = dataset.manifest.blockSize.uint32
let handle1 = leecher.engine.startTreeDownload(treeCid, blockSize, totalBlocks)
require handle1.isOk
let h1 = handle1.get()
let handle2 = leecher.engine.startTreeDownload(treeCid, blockSize, totalBlocks)
require handle2.isOk
let h2 = handle2.get()
leecher.engine.releaseDownload(h1)
check leecher.downloadManager.getDownload(treeCid).isSome
var blocksReceived = 0
while not h2.finished:
let blk = await h2.next()
if blk.isOk:
blocksReceived += 1
check blocksReceived == totalBlocks.int
leecher.engine.releaseDownload(h2)
check leecher.downloadManager.getDownload(treeCid).isNone
test "Should cancel download":
let
treeCid = dataset.manifest.treeCid
totalBlocks = dataset.blocks.len.uint64
blockSize = dataset.manifest.blockSize.uint32
desc = toDownloadDesc(treeCid, totalBlocks, blockSize)
discard leecher.downloadManager.startDownload(desc)
leecher.downloadManager.cancelDownload(treeCid)
check leecher.downloadManager.getDownload(treeCid).isNone
asyncchecksuite "BlockExchange - Error Handling":
var
cluster: NodesCluster
seeder: NodesComponents
leecher: NodesComponents
dataset: TestDataset
setup:
cluster = generateNodes(2, config = NodeConfig(findFreePorts: true))
seeder = cluster.components[0]
leecher = cluster.components[1]
let blocks = await makeRandomBlocks(4 * 1024, 1024.NBytes)
dataset = makeDataset(blocks).tryGet()
await seeder.assignBlocks(dataset, 0 ..< 2)
await cluster.components.start()
await connectNodes(cluster)
teardown:
await cluster.components.stop()
test "Should handle peer with partial blocks in swarm":
let
treeCid = dataset.manifest.treeCid
blockSize = dataset.manifest.blockSize.uint32
desc = toDownloadDesc(treeCid, dataset.blocks.len.uint64, blockSize)
download = leecher.downloadManager.startDownload(desc)
ranges = @[(start: 0'u64, count: 2'u64)]
download.updatePeerAvailability(
seeder.switch.peerInfo.peerId, BlockAvailability.fromRanges(ranges)
)
let
swarm = download.getSwarm()
candidates = swarm.peersWithRange(0, 2)
check seeder.switch.peerInfo.peerId in candidates
let candidatesForMissing = swarm.peersWithRange(2, 2)
check seeder.switch.peerInfo.peerId notin candidatesForMissing
leecher.downloadManager.cancelDownload(treeCid)
test "Should requeue batch on peer failure":
let
treeCid = dataset.manifest.treeCid
blockSize = dataset.manifest.blockSize.uint32
desc = toDownloadDesc(treeCid, dataset.blocks.len.uint64, blockSize)
download = leecher.downloadManager.startDownload(desc)
batch = leecher.downloadManager.getNextBatch(download)
check batch.isSome
download.markBatchInFlight(
batch.get.start, batch.get.count, 0, seeder.switch.peerInfo.peerId
)
check download.pendingBatchCount() == 1
download.handlePeerFailure(seeder.switch.peerInfo.peerId)
check download.pendingBatchCount() == 0
check download.ctx.scheduler.requeuedCount() == 1
leecher.downloadManager.cancelDownload(treeCid)
asyncchecksuite "BlockExchange - Local Block Resolution":
var
cluster: NodesCluster
node1: NodesComponents
dataset: TestDataset
setup:
cluster = generateNodes(1, config = NodeConfig(findFreePorts: true))
node1 = cluster.components[0]
let blocks = await makeRandomBlocks(4 * 1024, 1024.NBytes)
dataset = makeDataset(blocks).tryGet()
await node1.assignBlocks(dataset)
await cluster.components.start()
teardown:
await cluster.components.stop()
test "Should return local blocks directly":
for i, blk in dataset.blocks:
let
address = BlockAddress(treeCid: dataset.manifest.treeCid, index: i)
res = await node1.engine.requestBlock(address)
check res.isOk
check res.get.cid == blk.cid
test "Download worker should complete wantHandles when all blocks are local":
let
treeCid = dataset.manifest.treeCid
totalBlocks = dataset.blocks.len.uint64
blockSize = dataset.manifest.blockSize.uint32
desc = toDownloadDesc(treeCid, totalBlocks, blockSize)
download = node1.downloadManager.startDownload(desc)
var handles: seq[BlockHandle] = @[]
for i in 0'u64 ..< totalBlocks:
let address = download.makeBlockAddress(i)
handles.add(download.getWantHandle(address))
await node1.engine.downloadWorker(download)
for handle in handles:
check handle.finished
let blk = await handle
check blk.isOk
node1.downloadManager.cancelDownload(treeCid)
asyncchecksuite "BlockExchange - Mixed Local and Network":
var
cluster: NodesCluster
seeder: NodesComponents
leecher: NodesComponents
dataset: TestDataset
setup:
cluster = generateNodes(2, config = NodeConfig(findFreePorts: true))
seeder = cluster.components[0]
leecher = cluster.components[1]
let blocks = await makeRandomBlocks(8 * 1024, 1024.NBytes)
dataset = makeDataset(blocks).tryGet()
await seeder.assignBlocks(dataset)
let halfPoint = dataset.blocks.len div 2
await leecher.assignBlocks(dataset, 0 ..< halfPoint)
await cluster.components.start()
await connectNodes(cluster)
teardown:
await cluster.components.stop()
test "Should download dataset with some blocks local and some from network":
await leecher.downloadDataset(dataset)
for blk in dataset.blocks:
let hasBlock = await blk.cid in leecher.localStore
check hasBlock
test "Should handle interleaved local and network blocks":
for i, blk in dataset.blocks:
if i mod 2 == 0:
(await leecher.localStore.putBlock(blk)).tryGet()
await leecher.downloadDataset(dataset)
for blk in dataset.blocks:
let hasBlock = await blk.cid in leecher.localStore
check hasBlock
asyncchecksuite "BlockExchange - Re-download from Local":
var
cluster: NodesCluster
seeder: NodesComponents
leecher: NodesComponents
dataset: TestDataset
setup:
cluster = generateNodes(2, config = NodeConfig(findFreePorts: true))
seeder = cluster.components[0]
leecher = cluster.components[1]
let blocks = await makeRandomBlocks(4 * 1024, 1024.NBytes)
dataset = makeDataset(blocks).tryGet()
await seeder.assignBlocks(dataset)
await cluster.components.start()
await connectNodes(cluster)
teardown:
await cluster.components.stop()
test "Should re-download from local after network download":
await leecher.downloadDataset(dataset)
for blk in dataset.blocks:
let hasBlock = await blk.cid in leecher.localStore
check hasBlock
await leecher.downloadDataset(dataset)
for blk in dataset.blocks:
let hasBlock = await blk.cid in leecher.localStore
check hasBlock
asyncchecksuite "BlockExchange - NetworkStore getBlocks":
var
cluster: NodesCluster
seeder: NodesComponents
leecher: NodesComponents
dataset: TestDataset
setup:
cluster = generateNodes(2, config = NodeConfig(findFreePorts: true))
seeder = cluster.components[0]
leecher = cluster.components[1]
let blocks = await makeRandomBlocks(4 * 1024, 1024.NBytes)
dataset = makeDataset(blocks).tryGet()
await seeder.assignBlocks(dataset)
await cluster.components.start()
await connectNodes(cluster)
teardown:
await cluster.components.stop()
test "getBlocks all local":
await leecher.assignBlocks(dataset)
await leecher.downloadDataset(dataset)
test "getBlocks all from network":
await leecher.downloadDataset(dataset)
test "getBlocks mixed local and network":
await leecher.assignBlocks(dataset, 0 ..< 2)
await leecher.downloadDataset(dataset)
test "getBlocks subset with some local":
let
treeCid = dataset.manifest.treeCid
totalBlocks = dataset.blocks.len
await leecher.assignBlocks(dataset, 0 ..< totalBlocks - 2)
var addresses: seq[BlockAddress]
for i in totalBlocks - 2 ..< totalBlocks:
addresses.add(BlockAddress.init(treeCid, i))
var count = 0
for blkFut in (await leecher.networkStore.getBlocks(addresses)):
let blk = (await blkFut).tryGet()
count += 1
check count == 2

View File

@ -1,98 +1,33 @@
import std/sequtils
import std/random
import std/algorithm
import std/[sequtils, options]
import pkg/stew/byteutils
import pkg/chronos
import pkg/libp2p/errors
import pkg/libp2p/routing_record
import pkg/codexdht/discv5/protocol as discv5
import pkg/storage/rng
import pkg/storage/blockexchange
import pkg/storage/stores
import pkg/storage/chunker
import pkg/storage/discovery
import pkg/storage/blocktype
import pkg/storage/utils/asyncheapqueue
import pkg/storage/merkletree
import pkg/storage/blockexchange/utils
import pkg/storage/blockexchange/engine/activedownload {.all.}
import pkg/storage/blockexchange/engine/downloadmanager {.all.}
import ../../../asynctest
import ../../helpers
import ../../examples
const NopSendWantCancellationsProc = proc(
id: PeerId, addresses: seq[BlockAddress]
) {.async: (raises: [CancelledError]).} =
discard
asyncchecksuite "NetworkStore engine basic":
var
peerId: PeerId
chunker: Chunker
blockDiscovery: Discovery
peerStore: PeerCtxStore
pendingBlocks: PendingBlocksManager
blocks: seq[Block]
done: Future[void]
setup:
peerId = PeerId.example
chunker = RandomChunker.new(Rng.instance(), size = 1024'nb, chunkSize = 256'nb)
blockDiscovery = Discovery.new()
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
while true:
let chunk = await chunker.getBytes()
if chunk.len <= 0:
break
blocks.add(Block.new(chunk).tryGet())
done = newFuture[void]()
test "Should send want list to new peers":
proc sendWantList(
id: PeerId,
addresses: seq[BlockAddress],
priority: int32 = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
) {.async: (raises: [CancelledError]).} =
check addresses.mapIt($it.cidOrTreeCid).sorted == blocks.mapIt($it.cid).sorted
done.complete()
let
network = BlockExcNetwork(request: BlockExcRequest(sendWantList: sendWantList))
localStore = CacheStore.new(blocks.mapIt(it))
discovery = DiscoveryEngine.new(
localStore, peerStore, network, blockDiscovery, pendingBlocks
)
advertiser = Advertiser.new(localStore, blockDiscovery)
engine = BlockExcEngine.new(
localStore, network, discovery, advertiser, peerStore, pendingBlocks
)
for b in blocks:
discard engine.pendingBlocks.getWantHandle(b.cid)
await engine.peerAddedHandler(peerId)
await done.wait(100.millis)
asyncchecksuite "NetworkStore engine handlers":
var
peerId: PeerId
chunker: Chunker
blockDiscovery: Discovery
peerStore: PeerCtxStore
pendingBlocks: PendingBlocksManager
peerStore: PeerContextStore
downloadManager: DownloadManager
network: BlockExcNetwork
engine: BlockExcEngine
discovery: DiscoveryEngine
advertiser: Advertiser
peerCtx: BlockExcPeerCtx
peerCtx: PeerContext
localStore: BlockStore
blocks: seq[Block]
@ -108,61 +43,55 @@ asyncchecksuite "NetworkStore engine handlers":
peerId = PeerId.example
blockDiscovery = Discovery.new()
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
peerStore = PeerContextStore.new()
downloadManager = DownloadManager.new()
localStore = CacheStore.new()
network = BlockExcNetwork()
discovery =
DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks)
discovery = DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery)
advertiser = Advertiser.new(localStore, blockDiscovery)
engine = BlockExcEngine.new(
localStore, network, discovery, advertiser, peerStore, pendingBlocks
localStore, network, discovery, advertiser, peerStore, downloadManager
)
peerCtx = BlockExcPeerCtx(id: peerId)
peerCtx = PeerContext(id: peerId)
engine.peers.add(peerCtx)
test "Should schedule block requests":
let wantList = makeWantList(blocks.mapIt(it.cid), wantType = WantType.WantBlock)
# only `wantBlock` are stored in `peerWants`
proc handler() {.async.} =
let ctx = await engine.taskQueue.pop()
check ctx.id == peerId
# only `wantBlock` scheduled
check ctx.wantedBlocks == blocks.mapIt(it.address).toHashSet
let done = handler()
await engine.wantListHandler(peerId, wantList)
await done
test "Should handle want list":
let
tree = StorageMerkleTree.init(blocks.mapIt(it.cid)).tryGet
rootCid = tree.rootCid.tryGet()
for i, blk in blocks:
(await localStore.putBlock(blk)).tryGet()
(await localStore.putCidAndProof(rootCid, i, blk.cid, tree.getProof(i).tryGet())).tryGet()
let
done = newFuture[void]()
wantList = makeWantList(blocks.mapIt(it.cid))
wantList = makeWantList(rootCid, blocks.len)
proc sendPresence(
peerId: PeerId, presence: seq[BlockPresence]
) {.async: (raises: [CancelledError]).} =
check presence.mapIt(it.address) == wantList.entries.mapIt(it.address)
for p in presence:
check p.kind in {BlockPresenceType.HaveRange, BlockPresenceType.Complete}
done.complete()
engine.network =
BlockExcNetwork(request: BlockExcRequest(sendPresence: sendPresence))
await allFuturesThrowing(allFinished(blocks.mapIt(localStore.putBlock(it))))
await engine.wantListHandler(peerId, wantList)
await done
test "Should handle want list - `dont-have`":
let
done = newFuture[void]()
wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true)
treeCid = Cid.example
wantList = makeWantList(treeCid, blocks.len, sendDontHave = true)
proc sendPresence(
peerId: PeerId, presence: seq[BlockPresence]
@ -170,7 +99,7 @@ asyncchecksuite "NetworkStore engine handlers":
check presence.mapIt(it.address) == wantList.entries.mapIt(it.address)
for p in presence:
check:
p.`type` == BlockPresenceType.DontHave
p.kind == BlockPresenceType.DontHave
done.complete()
@ -181,55 +110,42 @@ asyncchecksuite "NetworkStore engine handlers":
await done
test "Should handle want list - `dont-have` some blocks":
let
tree = StorageMerkleTree.init(blocks.mapIt(it.cid)).tryGet
rootCid = tree.rootCid.tryGet()
for i in 0 ..< 2:
(await engine.localStore.putBlock(blocks[i])).tryGet()
(
await engine.localStore.putCidAndProof(
rootCid, i, blocks[i].cid, tree.getProof(i).tryGet()
)
).tryGet()
let
done = newFuture[void]()
wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true)
wantList = makeWantList(rootCid, blocks.len, sendDontHave = true)
proc sendPresence(
peerId: PeerId, presence: seq[BlockPresence]
) {.async: (raises: [CancelledError]).} =
for p in presence:
if p.address.cidOrTreeCid != blocks[0].cid and
p.address.cidOrTreeCid != blocks[1].cid:
check p.`type` == BlockPresenceType.DontHave
if p.address.index >= 2:
check p.kind == BlockPresenceType.DontHave
else:
check p.`type` == BlockPresenceType.Have
check p.kind in {BlockPresenceType.HaveRange, BlockPresenceType.Complete}
done.complete()
engine.network =
BlockExcNetwork(request: BlockExcRequest(sendPresence: sendPresence))
(await engine.localStore.putBlock(blocks[0])).tryGet()
(await engine.localStore.putBlock(blocks[1])).tryGet()
await engine.wantListHandler(peerId, wantList)
await done
test "Should store blocks in local store":
let pending = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid))
for blk in blocks:
peerCtx.blockRequestScheduled(blk.address)
let blocksDelivery = blocks.mapIt(BlockDelivery(blk: it, address: it.address))
# Install NOP for want list cancellations so they don't cause a crash
engine.network = BlockExcNetwork(
request: BlockExcRequest(sendWantCancellations: NopSendWantCancellationsProc)
)
await engine.blocksDeliveryHandler(peerId, blocksDelivery)
let resolved = await allFinished(pending)
check resolved.mapIt(it.read) == blocks
for b in blocks:
let present = await engine.localStore.hasBlock(b.cid)
check present.tryGet()
test "Should handle block presence":
var handles:
Table[Cid, Future[Block].Raising([CancelledError, RetriesExhaustedError])]
proc sendWantList(
id: PeerId,
addresses: seq[BlockAddress],
@ -238,335 +154,169 @@ asyncchecksuite "NetworkStore engine handlers":
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
rangeCount: uint64 = 0,
downloadId: uint64 = 0,
) {.async: (raises: [CancelledError]).} =
engine.pendingBlocks.resolve(
blocks.filterIt(it.address in addresses).mapIt(
BlockDelivery(blk: it, address: it.address)
)
)
discard
engine.network =
BlockExcNetwork(request: BlockExcRequest(sendWantList: sendWantList))
# only Cids in peer want lists are requested
handles = blocks.mapIt((it.cid, engine.pendingBlocks.getWantHandle(it.cid))).toTable
let
blockCid = blocks[0].cid
address = BlockAddress(treeCid: blockCid, index: 0)
desc = toDownloadDesc(address, DefaultBlockSize.uint32)
download = engine.downloadManager.startDownload(desc)
discard download.getWantHandle(address)
await engine.blockPresenceHandler(
peerId,
blocks.mapIt(PresenceMessage.init(Presence(address: it.address, have: true))),
@[
BlockPresence(
address: address, kind: BlockPresenceType.Complete, downloadId: download.id
)
],
)
for a in blocks.mapIt(it.address):
check a in peerCtx.peerHave
test "Should send cancellations for requested blocks only":
let
pendingPeer = peerId # peer towards which we have pending block requests
pendingPeerCtx = peerCtx
senderPeer = PeerId.example # peer that will actually send the blocks
senderPeerCtx = BlockExcPeerCtx(id: senderPeer)
reqBlocks = @[blocks[0], blocks[4]] # blocks that we requested to pendingPeer
reqBlockAddrs = reqBlocks.mapIt(it.address)
blockHandles = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid))
swarm = download.getSwarm()
peerOpt = swarm.getPeer(peerId)
check peerOpt.isSome
var cancelled: HashSet[BlockAddress]
engine.peers.add(senderPeerCtx)
for address in reqBlockAddrs:
pendingPeerCtx.blockRequestScheduled(address)
for address in blocks.mapIt(it.address):
senderPeerCtx.blockRequestScheduled(address)
proc sendWantCancellations(
id: PeerId, addresses: seq[BlockAddress]
) {.async: (raises: [CancelledError]).} =
assert id == pendingPeer
for address in addresses:
cancelled.incl(address)
engine.network = BlockExcNetwork(
request: BlockExcRequest(sendWantCancellations: sendWantCancellations)
)
let blocksDelivery = blocks.mapIt(BlockDelivery(blk: it, address: it.address))
await engine.blocksDeliveryHandler(senderPeer, blocksDelivery)
discard await allFinished(blockHandles).wait(100.millis)
check cancelled == reqBlockAddrs.toHashSet()
asyncchecksuite "Block Download":
var
seckey: PrivateKey
peerId: PeerId
chunker: Chunker
blockDiscovery: Discovery
peerStore: PeerCtxStore
pendingBlocks: PendingBlocksManager
network: BlockExcNetwork
engine: BlockExcEngine
discovery: DiscoveryEngine
advertiser: Advertiser
peerCtx: BlockExcPeerCtx
localStore: BlockStore
blocks: seq[Block]
setup:
chunker = RandomChunker.new(Rng.instance(), size = 1024'nb, chunkSize = 256'nb)
while true:
let chunk = await chunker.getBytes()
if chunk.len <= 0:
break
blocks.add(Block.new(chunk).tryGet())
peerId = PeerId.example
blockDiscovery = Discovery.new()
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
localStore = CacheStore.new()
network = BlockExcNetwork()
discovery =
DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks)
advertiser = Advertiser.new(localStore, blockDiscovery)
engine = BlockExcEngine.new(
localStore, network, discovery, advertiser, peerStore, pendingBlocks
)
peerCtx = BlockExcPeerCtx(id: peerId, activityTimeout: 100.milliseconds)
engine.peers.add(peerCtx)
test "Should reschedule blocks on peer timeout":
test "Should handle range want list":
let
slowPeer = peerId
fastPeer = PeerId.example
slowPeerCtx = peerCtx
# "Fast" peer has in fact a generous timeout. This should avoid timing issues
# in the test.
fastPeerCtx = BlockExcPeerCtx(id: fastPeer, activityTimeout: 60.seconds)
requestedBlock = blocks[0]
var
slowPeerWantList = newFuture[void]("slowPeerWantList")
fastPeerWantList = newFuture[void]("fastPeerWantList")
slowPeerDropped = newFuture[void]("slowPeerDropped")
slowPeerBlockRequest = newFuture[void]("slowPeerBlockRequest")
fastPeerBlockRequest = newFuture[void]("fastPeerBlockRequest")
engine.peers.add(fastPeerCtx)
proc sendWantList(
id: PeerId,
addresses: seq[BlockAddress],
priority: int32 = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
) {.async: (raises: [CancelledError]).} =
check addresses == @[requestedBlock.address]
if wantType == WantBlock:
if id == slowPeer:
slowPeerBlockRequest.complete()
else:
fastPeerBlockRequest.complete()
if wantType == WantHave:
if id == slowPeer:
slowPeerWantList.complete()
else:
fastPeerWantList.complete()
proc onPeerDropped(
peer: PeerId
): Future[void] {.async: (raises: [CancelledError]).} =
assert peer == slowPeer
slowPeerDropped.complete()
proc selectPeer(peers: seq[BlockExcPeerCtx]): BlockExcPeerCtx =
# Looks for the slow peer.
for peer in peers:
if peer.id == slowPeer:
return peer
return peers[0]
engine.selectPeer = selectPeer
engine.pendingBlocks.retryInterval = 200.milliseconds
engine.network =
BlockExcNetwork(request: BlockExcRequest(sendWantList: sendWantList))
engine.network.handlers.onPeerDropped = onPeerDropped
let blockHandle = engine.requestBlock(requestedBlock.address)
# Waits for the peer to send its want list to both peers.
await slowPeerWantList.wait(5.seconds)
await fastPeerWantList.wait(5.seconds)
let blockPresence =
@[BlockPresence(address: requestedBlock.address, type: BlockPresenceType.Have)]
await engine.blockPresenceHandler(slowPeer, blockPresence)
await engine.blockPresenceHandler(fastPeer, blockPresence)
# Waits for the peer to ask for the block.
await slowPeerBlockRequest.wait(5.seconds)
# Don't reply and wait for the peer to be dropped by timeout.
await slowPeerDropped.wait(5.seconds)
# The engine should retry and ask the fast peer for the block.
await fastPeerBlockRequest.wait(5.seconds)
await engine.blocksDeliveryHandler(
fastPeer, @[BlockDelivery(blk: requestedBlock, address: requestedBlock.address)]
)
discard await blockHandle.wait(5.seconds)
test "Should cancel block request":
var
address = BlockAddress.init(blocks[0].cid)
done = newFuture[void]()
treeCid = Cid.example
tree = StorageMerkleTree.init(blocks.mapIt(it.cid)).tryGet
rootCid = tree.rootCid.tryGet()
proc sendWantList(
id: PeerId,
addresses: seq[BlockAddress],
priority: int32 = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
for i, blk in blocks:
(await localStore.putBlock(blk)).tryGet()
let proof = tree.getProof(i).tryGet()
(await localStore.putCidAndProof(rootCid, i, blk.cid, proof)).tryGet()
let wantList = WantList(
entries: @[
WantListEntry(
address: BlockAddress(treeCid: rootCid, index: 0),
priority: 0,
cancel: false,
wantType: WantType.WantHave,
sendDontHave: false,
rangeCount: blocks.len.uint64,
)
],
full: false,
)
proc sendPresence(
peerId: PeerId, presence: seq[BlockPresence]
) {.async: (raises: [CancelledError]).} =
check presence.len == 1
check presence[0].kind == BlockPresenceType.HaveRange
check presence[0].ranges.len > 0
done.complete()
engine.pendingBlocks.blockRetries = 10
engine.pendingBlocks.retryInterval = 1.seconds
engine.network = BlockExcNetwork(
request: BlockExcRequest(
sendWantList: sendWantList, sendWantCancellations: NopSendWantCancellationsProc
)
engine.network =
BlockExcNetwork(request: BlockExcRequest(sendPresence: sendPresence))
await engine.wantListHandler(peerId, wantList)
await done
test "Should not send presence for blocks not in range":
let
done = newFuture[void]()
treeCid = Cid.example
tree = StorageMerkleTree.init(blocks.mapIt(it.cid)).tryGet
rootCid = tree.rootCid.tryGet()
for i in 0 ..< 2:
(await localStore.putBlock(blocks[i])).tryGet()
let proof = tree.getProof(i).tryGet()
(await localStore.putCidAndProof(rootCid, i, blocks[i].cid, proof)).tryGet()
let wantList = WantList(
entries: @[
WantListEntry(
address: BlockAddress(treeCid: rootCid, index: 0),
priority: 0,
cancel: false,
wantType: WantType.WantHave,
sendDontHave: false,
rangeCount: blocks.len.uint64,
)
],
full: false,
)
let pending = engine.requestBlock(address)
await done.wait(100.millis)
pending.cancel()
expect CancelledError:
discard (await pending).tryGet()
asyncchecksuite "Task Handler":
var
peerId: PeerId
chunker: Chunker
blockDiscovery: Discovery
peerStore: PeerCtxStore
pendingBlocks: PendingBlocksManager
network: BlockExcNetwork
engine: BlockExcEngine
discovery: DiscoveryEngine
advertiser: Advertiser
localStore: BlockStore
peersCtx: seq[BlockExcPeerCtx]
peers: seq[PeerId]
blocks: seq[Block]
setup:
chunker = RandomChunker.new(Rng.instance(), size = 1024, chunkSize = 256'nb)
while true:
let chunk = await chunker.getBytes()
if chunk.len <= 0:
break
blocks.add(Block.new(chunk).tryGet())
peerId = PeerId.example
blockDiscovery = Discovery.new()
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
localStore = CacheStore.new()
network = BlockExcNetwork()
discovery =
DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks)
advertiser = Advertiser.new(localStore, blockDiscovery)
engine = BlockExcEngine.new(
localStore, network, discovery, advertiser, peerStore, pendingBlocks
)
peersCtx = @[]
for i in 0 .. 3:
peers.add(PeerId.example)
peersCtx.add(BlockExcPeerCtx(id: peers[i]))
peerStore.add(peersCtx[i])
# FIXME: this is disabled for now: I've dropped block priorities to make
# my life easier as I try to optimize the protocol, and also because
# they were not being used anywhere.
#
# test "Should send want-blocks in priority order":
# proc sendBlocksDelivery(
# id: PeerId, blocksDelivery: seq[BlockDelivery]
# ) {.async: (raises: [CancelledError]).} =
# check blocksDelivery.len == 2
# check:
# blocksDelivery[1].address == blocks[0].address
# blocksDelivery[0].address == blocks[1].address
# for blk in blocks:
# (await engine.localStore.putBlock(blk)).tryGet()
# engine.network.request.sendBlocksDelivery = sendBlocksDelivery
# # second block to send by priority
# peersCtx[0].peerWants.add(
# WantListEntry(
# address: blocks[0].address,
# priority: 49,
# cancel: false,
# wantType: WantType.WantBlock,
# sendDontHave: false,
# )
# )
# # first block to send by priority
# peersCtx[0].peerWants.add(
# WantListEntry(
# address: blocks[1].address,
# priority: 50,
# cancel: false,
# wantType: WantType.WantBlock,
# sendDontHave: false,
# )
# )
# await engine.taskHandler(peersCtx[0])
test "Should mark outgoing blocks as sent":
proc sendBlocksDelivery(
id: PeerId, blocksDelivery: seq[BlockDelivery]
proc sendPresence(
peerId: PeerId, presence: seq[BlockPresence]
) {.async: (raises: [CancelledError]).} =
let blockAddress = peersCtx[0].wantedBlocks.toSeq[0]
check peersCtx[0].isBlockSent(blockAddress)
check presence.len == 1
check presence[0].kind == BlockPresenceType.HaveRange
for (start, count) in presence[0].ranges:
check start < 2
done.complete()
for blk in blocks:
(await engine.localStore.putBlock(blk)).tryGet()
engine.network.request.sendBlocksDelivery = sendBlocksDelivery
engine.network =
BlockExcNetwork(request: BlockExcRequest(sendPresence: sendPresence))
peersCtx[0].wantedBlocks.incl(blocks[0].address)
await engine.wantListHandler(peerId, wantList)
await done
await engine.taskHandler(peersCtx[0])
suite "IsIndexInRanges":
test "Empty ranges returns false":
let ranges: seq[(uint64, uint64)] = @[]
check not isIndexInRanges(0, ranges)
check not isIndexInRanges(100, ranges)
test "Should not mark blocks for which local look fails as sent":
peersCtx[0].wantedBlocks.incl(blocks[0].address)
test "Single range - index inside":
let ranges = @[(10'u64, 5'u64)]
check isIndexInRanges(10, ranges, sortedRanges = true)
check isIndexInRanges(12, ranges, sortedRanges = true)
check isIndexInRanges(14, ranges, sortedRanges = true)
await engine.taskHandler(peersCtx[0])
test "Single range - index outside":
let ranges = @[(10'u64, 5'u64)]
check not isIndexInRanges(9, ranges, sortedRanges = true)
check not isIndexInRanges(15, ranges, sortedRanges = true)
check not isIndexInRanges(100, ranges, sortedRanges = true)
let blockAddress = peersCtx[0].wantedBlocks.toSeq[0]
check not peersCtx[0].isBlockSent(blockAddress)
test "Multiple sorted ranges - index in each":
let ranges = @[(0'u64, 3'u64), (10'u64, 5'u64), (100'u64, 10'u64)]
check isIndexInRanges(0, ranges, sortedRanges = true)
check isIndexInRanges(2, ranges, sortedRanges = true)
check isIndexInRanges(10, ranges, sortedRanges = true)
check isIndexInRanges(14, ranges, sortedRanges = true)
check isIndexInRanges(100, ranges, sortedRanges = true)
check isIndexInRanges(109, ranges, sortedRanges = true)
test "Multiple ranges - index in gaps":
let ranges = @[(0'u64, 3'u64), (10'u64, 5'u64), (100'u64, 10'u64)]
check not isIndexInRanges(3, ranges, sortedRanges = true)
check not isIndexInRanges(9, ranges, sortedRanges = true)
check not isIndexInRanges(15, ranges, sortedRanges = true)
check not isIndexInRanges(99, ranges, sortedRanges = true)
check not isIndexInRanges(110, ranges, sortedRanges = true)
test "Unsorted ranges with sortedRanges=false":
let ranges = @[(100'u64, 10'u64), (0'u64, 3'u64), (10'u64, 5'u64)]
check isIndexInRanges(0, ranges, sortedRanges = false)
check isIndexInRanges(2, ranges, sortedRanges = false)
check isIndexInRanges(10, ranges, sortedRanges = false)
check isIndexInRanges(105, ranges, sortedRanges = false)
check not isIndexInRanges(50, ranges, sortedRanges = false)
test "Adjacent ranges":
let ranges = @[(0'u64, 5'u64), (5'u64, 5'u64), (10'u64, 5'u64)]
for i in 0'u64 ..< 15:
check isIndexInRanges(i, ranges, sortedRanges = true)
check not isIndexInRanges(15, ranges, sortedRanges = true)
test "Large range values":
let ranges = @[(1_000_000_000'u64, 1_000_000'u64)]
check isIndexInRanges(1_000_000_000, ranges, sortedRanges = true)
check isIndexInRanges(1_000_500_000, ranges, sortedRanges = true)
check not isIndexInRanges(999_999_999, ranges, sortedRanges = true)
check not isIndexInRanges(1_001_000_000, ranges, sortedRanges = true)

View File

@ -0,0 +1,516 @@
import std/options
import pkg/unittest2
import pkg/storage/blockexchange/engine/scheduler {.all.}
suite "Scheduler":
var scheduler: Scheduler
setup:
scheduler = Scheduler.new()
test "Should initialize with correct parameters":
scheduler.init(1000, 100)
check scheduler.totalBlockCount() == 1000
check scheduler.batchSizeCount() == 100
check scheduler.hasWork() == true
check scheduler.isEmpty() == false
test "Should take batches in order":
scheduler.init(1000, 100)
let batch1 = scheduler.take()
check batch1.isSome
check batch1.get.start == 0
check batch1.get.count == 100
let batch2 = scheduler.take()
check batch2.isSome
check batch2.get.start == 100
check batch2.get.count == 100
test "Should handle last batch with fewer blocks":
scheduler.init(250, 100)
discard scheduler.take()
discard scheduler.take()
let lastBatch = scheduler.take()
check lastBatch.isSome
check lastBatch.get.start == 200
check lastBatch.get.count == 50
test "Should mark batch as complete":
scheduler.init(300, 100)
let batch = scheduler.take()
check batch.isSome
check batch.get.start == 0
scheduler.markComplete(0)
let next = scheduler.take()
check next.isSome
check next.get.start == 100
test "Should requeue batch at front":
scheduler.init(500, 100)
let batch1 = scheduler.take()
check batch1.get.start == 0
let batch2 = scheduler.take()
check batch2.get.start == 100
scheduler.requeueFront(0, 100)
let requeued = scheduler.take()
check requeued.isSome
check requeued.get.start == 0
check requeued.get.count == 100
test "Should requeue batch at back":
scheduler.init(500, 100)
let
batch1 = scheduler.take()
batch2 = scheduler.take()
scheduler.requeueBack(0, 100)
scheduler.requeueFront(100, 100)
let first = scheduler.take()
check first.get.start == 100
let second = scheduler.take()
check second.get.start == 0
test "Should handle partialComplete with single missing range":
scheduler.init(1000, 100)
let batch = scheduler.take()
check batch.isSome
check batch.get.start == 0
check batch.get.count == 100
let missingRanges = @[(start: 50'u64, count: 50'u64)]
scheduler.partialComplete(0, missingRanges)
let next = scheduler.take()
check next.isSome
check next.get.start == 50
check next.get.count == 50
test "Should handle partialComplete with multiple missing ranges":
scheduler.init(1000, 100)
let batch = scheduler.take()
check batch.isSome
check batch.get.start == 0
let missingRanges =
@[(start: 25'u64, count: 25'u64), (start: 75'u64, count: 25'u64)]
scheduler.partialComplete(0, missingRanges)
let next1 = scheduler.take()
check next1.isSome
check next1.get.start == 25
check next1.get.count == 25
let next2 = scheduler.take()
check next2.isSome
check next2.get.start == 75
check next2.get.count == 25
test "Should handle partialComplete with non-contiguous missing ranges":
scheduler.init(1000, 256)
let batch = scheduler.take()
check batch.isSome
check batch.get.start == 0
check batch.get.count == 256
let missingRanges =
@[(start: 101'u64, count: 49'u64), (start: 201'u64, count: 55'u64)]
scheduler.partialComplete(0, missingRanges)
let next1 = scheduler.take()
check next1.isSome
check next1.get.start == 101
check next1.get.count == 49
let next2 = scheduler.take()
check next2.isSome
check next2.get.start == 201
check next2.get.count == 55
test "Should not skip completed batches after partialComplete":
scheduler.init(500, 100)
let batch1 = scheduler.take()
check batch1.get.start == 0
scheduler.markComplete(0)
let batch2 = scheduler.take()
check batch2.get.start == 100
let missingRanges = @[(start: 150'u64, count: 50'u64)]
scheduler.partialComplete(100, missingRanges)
let next = scheduler.take()
check next.isSome
check next.get.start == 150
check next.get.count == 50
test "Should become empty after all batches complete":
scheduler.init(200, 100)
let batch1 = scheduler.take()
scheduler.markComplete(batch1.get.start)
let batch2 = scheduler.take()
scheduler.markComplete(batch2.get.start)
check scheduler.isEmpty() == true
check scheduler.hasWork() == false
test "Should handle out-of-order completion":
scheduler.init(500, 100)
let
batch0 = scheduler.take()
batch1 = scheduler.take()
batch2 = scheduler.take()
check batch0.get.start == 0
check batch1.get.start == 100
check batch2.get.start == 200
scheduler.markComplete(200)
scheduler.markComplete(0)
scheduler.markComplete(100)
let next = scheduler.take()
check next.isSome
check next.get.start == 300
test "Should initialize with range":
scheduler.initRange(500, 200, 100)
check scheduler.totalBlockCount() == 700
check scheduler.batchSizeCount() == 100
check scheduler.completedWatermark() == 500
let batch1 = scheduler.take()
check batch1.isSome
check batch1.get.start == 500
check batch1.get.count == 100
let batch2 = scheduler.take()
check batch2.isSome
check batch2.get.start == 600
check batch2.get.count == 100
test "Should add specific batches":
scheduler.add(100, 50)
scheduler.add(300, 75)
check scheduler.totalBlockCount() == 375
check scheduler.batchSizeCount() == 50
let batch1 = scheduler.take()
check batch1.isSome
check batch1.get.start == 100
check batch1.get.count == 50
let batch2 = scheduler.take()
check batch2.isSome
check batch2.get.start == 300
check batch2.get.count == 75
test "Should clear scheduler":
scheduler.init(500, 100)
discard scheduler.take()
discard scheduler.take()
scheduler.requeueFront(0, 100)
scheduler.clear()
check scheduler.hasWork() == false
check scheduler.isEmpty() == true
check scheduler.requeuedCount() == 0
check scheduler.totalBlockCount() == 0
check scheduler.batchSizeCount() == 0
let batch = scheduler.take()
check batch.isNone
test "Should return pending batches":
scheduler.init(500, 100)
check scheduler.pending().len == 0
discard scheduler.take()
scheduler.requeueFront(0, 100)
let pending = scheduler.pending()
check pending.len == 1
check pending[0].start == 0
check pending[0].count == 100
test "Should return correct requeuedCount":
scheduler.init(500, 100)
check scheduler.requeuedCount() == 0
discard scheduler.take()
discard scheduler.take()
scheduler.requeueFront(0, 100)
scheduler.requeueBack(100, 100)
check scheduler.requeuedCount() == 2
test "Should return none when exhausted":
scheduler.init(200, 100)
let
b1 = scheduler.take()
b2 = scheduler.take()
check b1.isSome
check b2.isSome
let b3 = scheduler.take()
check b3.isNone
test "Should handle single block":
scheduler.init(1, 100)
let batch = scheduler.take()
check batch.isSome
check batch.get.start == 0
check batch.get.count == 1
scheduler.markComplete(0)
check scheduler.isEmpty() == true
test "Should handle batch size larger than total":
scheduler.init(50, 100)
let batch = scheduler.take()
check batch.isSome
check batch.get.start == 0
check batch.get.count == 50
scheduler.markComplete(0)
check scheduler.isEmpty() == true
test "Should handle zero blocks":
scheduler.init(0, 100)
check scheduler.hasWork() == false
check scheduler.isEmpty() == true
let batch = scheduler.take()
check batch.isNone
test "Should ignore requeue of completed batch":
scheduler.init(300, 100)
let batch = scheduler.take()
scheduler.markComplete(batch.get.start)
scheduler.requeueFront(0, 100)
scheduler.requeueBack(0, 100)
check scheduler.requeuedCount() == 0
test "Should track in-flight batches":
scheduler.init(300, 100)
let batch = scheduler.take()
check batch.isSome
let batch2 = scheduler.take()
check batch2.isSome
check batch2.get.start == 100
scheduler.markComplete(0)
scheduler.requeueFront(100, 100)
let batch3 = scheduler.take()
check batch3.isSome
check batch3.get.start == 100
test "Should skip completed batches in requeued":
scheduler.init(500, 100)
discard scheduler.take()
scheduler.requeueBack(0, 100)
discard scheduler.take()
scheduler.markComplete(0)
scheduler.requeueBack(0, 100)
let next = scheduler.take()
check next.isSome
check next.get.start == 100
test "Watermark advances after all sub-ranges of partial batch complete":
scheduler.init(16, 8)
let batch = scheduler.take()
check batch.get.start == 0
check batch.get.count == 8
let missingRanges = @[
(start: 1'u64, count: 1'u64),
(start: 3'u64, count: 1'u64),
(start: 5'u64, count: 1'u64),
(start: 7'u64, count: 1'u64),
]
scheduler.partialComplete(0, missingRanges)
check scheduler.completedWatermark() == 0
let sub1 = scheduler.take()
check sub1.get.start == 1
scheduler.markComplete(1)
check scheduler.completedWatermark() == 0
let sub2 = scheduler.take()
check sub2.get.start == 3
scheduler.markComplete(3)
check scheduler.completedWatermark() == 0
let sub3 = scheduler.take()
check sub3.get.start == 5
scheduler.markComplete(5)
check scheduler.completedWatermark() == 0
let sub4 = scheduler.take()
check sub4.get.start == 7
scheduler.markComplete(7)
check scheduler.completedWatermark() == 8
test "Watermark merges OOO after partial batch completes":
scheduler.init(24, 8)
let
batch0 = scheduler.take()
batch1 = scheduler.take()
batch2 = scheduler.take()
check batch0.get.start == 0
check batch1.get.start == 8
check batch2.get.start == 16
scheduler.markComplete(8)
scheduler.markComplete(16)
check scheduler.completedWatermark() == 0
scheduler.partialComplete(0, @[(start: 3'u64, count: 1'u64)])
check scheduler.completedWatermark() == 0
let sub = scheduler.take()
check sub.get.start == 3
scheduler.markComplete(3)
check scheduler.completedWatermark() == 24
check scheduler.isEmpty() == true
test "Nested partials, requeues, OOO merge, multiple partial batches":
scheduler.init(40, 8)
let
b0 = scheduler.take()
b1 = scheduler.take()
b2 = scheduler.take()
b3 = scheduler.take()
b4 = scheduler.take()
check b0.get.start == 0
check b4.get.start == 32
scheduler.markComplete(32)
check scheduler.completedWatermark() == 0
scheduler.markComplete(16)
check scheduler.completedWatermark() == 0
scheduler.partialComplete(0, @[(start: 2'u64, count: 2'u64)])
check scheduler.completedWatermark() == 0
scheduler.partialComplete(
8, @[(start: 10'u64, count: 3'u64), (start: 13'u64, count: 3'u64)]
)
check scheduler.completedWatermark() == 0
scheduler.markComplete(24)
check scheduler.completedWatermark() == 0
let sub1a = scheduler.take()
check sub1a.get.start == 10
check sub1a.get.count == 3
let sub1b = scheduler.take()
check sub1b.get.start == 13
check sub1b.get.count == 3
let sub0a = scheduler.take()
check sub0a.get.start == 2
check sub0a.get.count == 2
scheduler.requeueFront(2, 2)
check scheduler.completedWatermark() == 0
scheduler.markComplete(13)
check scheduler.completedWatermark() == 0
scheduler.partialComplete(10, @[(start: 11'u64, count: 2'u64)])
check scheduler.completedWatermark() == 0
let sub1c = scheduler.take()
check sub1c.get.start == 11
check sub1c.get.count == 2
let sub0b = scheduler.take()
check sub0b.get.start == 2
scheduler.markComplete(2)
check scheduler.completedWatermark() == 8
scheduler.markComplete(11)
check scheduler.completedWatermark() == 40
check scheduler.isEmpty() == true
check scheduler.hasWork() == false
test "BlockBatch batchEnd":
let batch: BlockBatch = (start: 100'u64, count: 50'u64)
check batch.batchEnd == 150
test "BlockBatch contains":
let batch: BlockBatch = (start: 100'u64, count: 50'u64)
check batch.contains(100) == true
check batch.contains(149) == true
check batch.contains(99) == false
check batch.contains(150) == false
test "BlockBatch merge":
let
batch1: BlockBatch = (start: 100'u64, count: 50'u64)
batch2: BlockBatch = (start: 140'u64, count: 30'u64)
batch3: BlockBatch = (start: 200'u64, count: 20'u64)
let merged1 = merge(batch1, batch2)
check merged1.isSome
check merged1.get.start == 100
check merged1.get.count == 70
let merged2 = merge(batch1, batch3)
check merged2.isNone

View File

@ -0,0 +1,474 @@
import std/[options, tables]
import pkg/unittest2
import pkg/chronos
import pkg/libp2p/cid
import pkg/libp2p/peerid
import pkg/storage/blockexchange/engine/swarm
import pkg/storage/blockexchange/peers/peercontext
import pkg/storage/blockexchange/peers/peerstats
import pkg/storage/blockexchange/utils
import pkg/storage/storagetypes
import ../../examples
const
TestBlockSize = DefaultBlockSize.uint32
TestBatchBytes = computeBatchSize(TestBlockSize).uint64 * TestBlockSize.uint64
suite "BlockAvailability":
test "unknown availability":
let avail = BlockAvailability.unknown()
check avail.kind == bakUnknown
check avail.hasBlock(0) == false
check avail.hasBlock(100) == false
check avail.hasRange(0, 10) == false
check avail.hasAnyInRange(0, 10) == false
test "complete availability":
let avail = BlockAvailability.complete()
check avail.kind == bakComplete
check avail.hasBlock(0) == true
check avail.hasBlock(100) == true
check avail.hasBlock(uint64.high) == true
check avail.hasRange(0, 1000) == true
check avail.hasAnyInRange(0, 1000) == true
test "ranges availability - hasBlock":
let avail = BlockAvailability.fromRanges(
@[(start: 10'u64, count: 20'u64), (start: 50'u64, count: 10'u64)]
)
check avail.kind == bakRanges
check avail.hasBlock(10) == true
check avail.hasBlock(29) == true
check avail.hasBlock(30) == false
check avail.hasBlock(50) == true
check avail.hasBlock(59) == true
check avail.hasBlock(60) == false
check avail.hasBlock(0) == false
check avail.hasBlock(9) == false
check avail.hasBlock(35) == false
test "ranges availability - hasRange":
let avail = BlockAvailability.fromRanges(
@[(start: 10'u64, count: 20'u64), (start: 50'u64, count: 10'u64)]
)
check avail.hasRange(10, 20) == true
check avail.hasRange(15, 10) == true
check avail.hasRange(10, 21) == false
check avail.hasRange(25, 10) == false
check avail.hasRange(50, 10) == true
check avail.hasRange(55, 5) == true
check avail.hasRange(25, 30) == false
test "ranges availability - hasAnyInRange":
let avail = BlockAvailability.fromRanges(
@[(start: 10'u64, count: 20'u64), (start: 50'u64, count: 10'u64)]
)
check avail.hasAnyInRange(5, 10) == true
check avail.hasAnyInRange(25, 10) == true
check avail.hasAnyInRange(45, 10) == true
check avail.hasAnyInRange(30, 20) == false
check avail.hasAnyInRange(0, 5) == false
check avail.hasAnyInRange(100, 10) == false
test "bitmap availability - hasBlock":
let avail = BlockAvailability.fromBitmap(@[0x55'u8], 8)
check avail.kind == bakBitmap
check avail.hasBlock(0) == true
check avail.hasBlock(1) == false
check avail.hasBlock(2) == true
check avail.hasBlock(3) == false
check avail.hasBlock(4) == true
check avail.hasBlock(5) == false
check avail.hasBlock(6) == true
check avail.hasBlock(7) == false
check avail.hasBlock(8) == false
check avail.hasBlock(100) == false
test "bitmap availability - hasRange":
let avail = BlockAvailability.fromBitmap(@[0xF0'u8], 8)
check avail.hasRange(4, 4) == true
check avail.hasRange(4, 2) == true
check avail.hasRange(0, 4) == false
check avail.hasRange(2, 4) == false
test "bitmap availability - hasAnyInRange":
let avail = BlockAvailability.fromBitmap(@[0xF0'u8], 8)
check avail.hasAnyInRange(0, 8) == true
check avail.hasAnyInRange(0, 4) == false
check avail.hasAnyInRange(3, 2) == true
check avail.hasAnyInRange(6, 4) == true
test "merge unknown with complete":
let
unknown = BlockAvailability.unknown()
complete = BlockAvailability.complete()
check unknown.merge(complete).kind == bakComplete
check complete.merge(unknown).kind == bakComplete
test "merge unknown with ranges":
let
unknown = BlockAvailability.unknown()
ranges = BlockAvailability.fromRanges(@[(start: 10'u64, count: 20'u64)])
merged = unknown.merge(ranges)
check merged.kind == bakRanges
check merged.hasBlock(15) == true
test "merge ranges with ranges":
let
r1 = BlockAvailability.fromRanges(@[(start: 0'u64, count: 10'u64)])
r2 = BlockAvailability.fromRanges(@[(start: 20'u64, count: 10'u64)])
merged = r1.merge(r2)
check merged.kind == bakRanges
check merged.hasBlock(5) == true
check merged.hasBlock(25) == true
check merged.hasBlock(15) == false
test "merge overlapping ranges":
let
r1 = BlockAvailability.fromRanges(@[(start: 0'u64, count: 15'u64)])
r2 = BlockAvailability.fromRanges(@[(start: 10'u64, count: 15'u64)])
merged = r1.merge(r2)
check merged.kind == bakRanges
check merged.ranges.len == 1
check merged.ranges[0].start == 0
check merged.ranges[0].count == 25
test "merge bitmap with ranges converts bitmap to ranges":
let
bitmap = BlockAvailability.fromBitmap(@[0x0F'u8], 8)
ranges = BlockAvailability.fromRanges(@[(start: 6'u64, count: 2'u64)])
merged = bitmap.merge(ranges)
check merged.kind == bakRanges
check merged.ranges.len == 2
check merged.ranges[0] == (start: 0'u64, count: 4'u64)
check merged.ranges[1] == (start: 6'u64, count: 2'u64)
suite "SwarmPeer":
test "new peer":
let peer = SwarmPeer.new(BlockAvailability.complete())
check peer.availability.kind == bakComplete
check peer.failureCount == 0
test "touch updates lastSeen":
let
peer = SwarmPeer.new(BlockAvailability.unknown())
before = peer.lastSeen
peer.touch()
check peer.lastSeen >= before
test "updateAvailability merges":
let peer =
SwarmPeer.new(BlockAvailability.fromRanges(@[(start: 0'u64, count: 10'u64)]))
peer.updateAvailability(
BlockAvailability.fromRanges(@[(start: 20'u64, count: 10'u64)])
)
check peer.availability.hasBlock(5) == true
check peer.availability.hasBlock(25) == true
check peer.availability.hasBlock(15) == false
test "recordFailure and resetFailures":
let peer = SwarmPeer.new(BlockAvailability.unknown())
check peer.failureCount == 0
peer.recordFailure()
check peer.failureCount == 1
peer.recordFailure()
check peer.failureCount == 2
peer.resetFailures()
check peer.failureCount == 0
suite "Swarm":
var swarm: Swarm
setup:
swarm = Swarm.new()
test "addPeer and getPeer":
let peerId = PeerId.example
check swarm.addPeer(peerId, BlockAvailability.complete()) == true
let peerOpt = swarm.getPeer(peerId)
check peerOpt.isSome
check peerOpt.get().availability.kind == bakComplete
test "addPeer respects deltaMax":
let config =
SwarmConfig(deltaMin: 1, deltaMax: 2, deltaTarget: 2, maxPeerFailures: 3)
swarm = Swarm.new(config)
check swarm.addPeer(PeerId.example, BlockAvailability.complete()) == true
check swarm.addPeer(PeerId.example, BlockAvailability.complete()) == true
check swarm.addPeer(PeerId.example, BlockAvailability.complete()) == false
check swarm.peerCount() == 2
test "removePeer":
let peerId = PeerId.example
discard swarm.addPeer(peerId, BlockAvailability.complete())
let removed = swarm.removePeer(peerId)
check removed.isSome
check swarm.getPeer(peerId).isNone
test "banPeer prevents re-adding":
let peerId = PeerId.example
discard swarm.addPeer(peerId, BlockAvailability.complete())
swarm.banPeer(peerId)
check swarm.getPeer(peerId).isNone
check swarm.addPeer(peerId, BlockAvailability.complete()) == false
test "updatePeerAvailability":
let peerId = PeerId.example
discard swarm.addPeer(
peerId, BlockAvailability.fromRanges(@[(start: 0'u64, count: 10'u64)])
)
swarm.updatePeerAvailability(
peerId, BlockAvailability.fromRanges(@[(start: 20'u64, count: 10'u64)])
)
let peer = swarm.getPeer(peerId).get()
check peer.availability.hasBlock(5) == true
check peer.availability.hasBlock(25) == true
test "recordPeerFailure returns true when max reached":
let config =
SwarmConfig(deltaMin: 1, deltaMax: 10, deltaTarget: 5, maxPeerFailures: 2)
swarm = Swarm.new(config)
let peerId = PeerId.example
discard swarm.addPeer(peerId, BlockAvailability.complete())
check swarm.recordPeerFailure(peerId) == false
check swarm.recordPeerFailure(peerId) == true
test "recordPeerSuccess resets failures":
let peerId = PeerId.example
discard swarm.addPeer(peerId, BlockAvailability.complete())
discard swarm.recordPeerFailure(peerId)
discard swarm.recordPeerFailure(peerId)
check swarm.getPeer(peerId).get().failureCount == 2
swarm.recordPeerSuccess(peerId)
check swarm.getPeer(peerId).get().failureCount == 0
test "peerCount":
check swarm.peerCount() == 0
discard swarm.addPeer(PeerId.example, BlockAvailability.complete())
check swarm.peerCount() == 1
discard swarm.addPeer(PeerId.example, BlockAvailability.complete())
check swarm.peerCount() == 2
test "connectedPeers":
discard swarm.addPeer(PeerId.example, BlockAvailability.complete())
discard swarm.addPeer(PeerId.example, BlockAvailability.complete())
let connected = swarm.connectedPeers()
check connected.len == 2
test "peersWithRange":
let
peer1 = PeerId.example
peer2 = PeerId.example
discard swarm.addPeer(peer1, BlockAvailability.complete())
discard swarm.addPeer(
peer2, BlockAvailability.fromRanges(@[(start: 0'u64, count: 100'u64)])
)
let peersForRange = swarm.peersWithRange(0, 50)
check peersForRange.len == 2
let peersForLargeRange = swarm.peersWithRange(0, 150)
check peersForLargeRange.len == 1
test "peersWithAnyInRange":
let
peer1 = PeerId.example
peer2 = PeerId.example
discard swarm.addPeer(
peer1, BlockAvailability.fromRanges(@[(start: 0'u64, count: 50'u64)])
)
discard swarm.addPeer(
peer2, BlockAvailability.fromRanges(@[(start: 100'u64, count: 50'u64)])
)
let peers1 = swarm.peersWithAnyInRange(25, 50)
check peers1.len == 1
let peers2 = swarm.peersWithAnyInRange(75, 50)
check peers2.len == 1
let peers3 = swarm.peersWithAnyInRange(60, 30)
check peers3.len == 0
test "needsPeers":
let config =
SwarmConfig(deltaMin: 2, deltaMax: 10, deltaTarget: 5, maxPeerFailures: 3)
swarm = Swarm.new(config)
check swarm.needsPeers() == true
discard swarm.addPeer(PeerId.example, BlockAvailability.complete())
check swarm.needsPeers() == true
discard swarm.addPeer(PeerId.example, BlockAvailability.complete())
check swarm.needsPeers() == false
test "peersNeeded":
let config =
SwarmConfig(deltaMin: 2, deltaMax: 10, deltaTarget: 5, maxPeerFailures: 3)
swarm = Swarm.new(config)
check swarm.peersNeeded() == 5
discard swarm.addPeer(PeerId.example, BlockAvailability.complete())
check swarm.peersNeeded() == 4
discard swarm.addPeer(PeerId.example, BlockAvailability.complete())
discard swarm.addPeer(PeerId.example, BlockAvailability.complete())
discard swarm.addPeer(PeerId.example, BlockAvailability.complete())
discard swarm.addPeer(PeerId.example, BlockAvailability.complete())
check swarm.peersNeeded() == 0
suite "BDP Peer Selection":
var peerCtxs: seq[PeerContext]
setup:
peerCtxs = @[]
for i in 0 ..< 5:
let ctx = PeerContext.new(PeerId.example)
peerCtxs.add(ctx)
test "Should return none for empty peers":
var
emptyInFlight = initTable[PeerId, seq[Future[void]]]()
emptyPenalties = initTable[PeerId, float]()
let res = selectByBDP(@[], TestBatchBytes, emptyInFlight, emptyPenalties)
check res.isNone
test "Should return single peer":
var
emptyInFlight = initTable[PeerId, seq[Future[void]]]()
emptyPenalties = initTable[PeerId, float]()
let res = selectByBDP(@[peerCtxs[0]], TestBatchBytes, emptyInFlight, emptyPenalties)
check res.isSome
check res.get == peerCtxs[0]
test "Should prefer untried peers (round-robin)":
for peer in peerCtxs:
check peer.stats.throughputBps().isNone
var
emptyInFlight = initTable[PeerId, seq[Future[void]]]()
emptyPenalties = initTable[PeerId, float]()
let res = selectByBDP(peerCtxs, TestBatchBytes, emptyInFlight, emptyPenalties)
check res.isSome
test "Should select peer with capacity":
peerCtxs[0].stats.recordRequest(1000, 65536)
peerCtxs[1].stats.recordRequest(1000, 65536)
var
inFlightBatches = initTable[PeerId, seq[Future[void]]]()
emptyPenalties = initTable[PeerId, float]()
fakeFutures: seq[Future[void]] = @[]
for i in 0 ..< 10:
fakeFutures.add(newFuture[void]())
inFlightBatches[peerCtxs[1].id] = fakeFutures
let res = selectByBDP(peerCtxs, TestBatchBytes, inFlightBatches, emptyPenalties)
check res.isSome
test "Should deprioritize peer with timeout penalty":
peerCtxs[0].stats.recordRequest(1000, 65536)
peerCtxs[1].stats.recordRequest(1000, 65536)
waitFor sleepAsync(MinThroughputDuration)
peerCtxs[0].stats.recordRequest(1000, 65536)
peerCtxs[1].stats.recordRequest(1000, 65536)
check peerCtxs[0].stats.throughputBps().isSome
check peerCtxs[1].stats.throughputBps().isSome
var
emptyInFlight = initTable[PeerId, seq[Future[void]]]()
penalties = initTable[PeerId, float]()
penalties[peerCtxs[0].id] = 1.0 * TimeoutPenaltyWeight
let res = selectByBDP(
@[peerCtxs[0], peerCtxs[1]],
TestBatchBytes,
emptyInFlight,
penalties,
explorationProb = 0.0,
)
check res.isSome
check res.get == peerCtxs[1]
test "Should still select penalized peer when only option":
peerCtxs[0].stats.recordRequest(1000, 65536)
waitFor sleepAsync(MinThroughputDuration)
peerCtxs[0].stats.recordRequest(1000, 65536)
var
emptyInFlight = initTable[PeerId, seq[Future[void]]]()
penalties = initTable[PeerId, float]()
penalties[peerCtxs[0].id] = 3.0 * TimeoutPenaltyWeight
let res = selectByBDP(@[peerCtxs[0]], TestBatchBytes, emptyInFlight, penalties)
check res.isSome
check res.get == peerCtxs[0]
test "Should prefer peer with fewer timeouts":
peerCtxs[0].stats.recordRequest(1000, 65536)
peerCtxs[1].stats.recordRequest(1000, 65536)
waitFor sleepAsync(MinThroughputDuration)
peerCtxs[0].stats.recordRequest(1000, 65536)
peerCtxs[1].stats.recordRequest(1000, 65536)
var
emptyInFlight = initTable[PeerId, seq[Future[void]]]()
penalties = initTable[PeerId, float]()
penalties[peerCtxs[0].id] = 2.0 * TimeoutPenaltyWeight
penalties[peerCtxs[1].id] = 1.0 * TimeoutPenaltyWeight
let res = selectByBDP(
@[peerCtxs[0], peerCtxs[1]],
TestBatchBytes,
emptyInFlight,
penalties,
explorationProb = 0.0,
)
check res.isSome
check res.get == peerCtxs[1]

View File

@ -1,31 +0,0 @@
import pkg/chronos
import pkg/storage/blockexchange/protobuf/presence
import ../../../asynctest
import ../../examples
import ../../helpers
suite "block presence protobuf messages":
let
cid = Cid.example
address = BlockAddress(leaf: false, cid: cid)
presence = Presence(address: address, have: true)
message = PresenceMessage.init(presence)
test "encodes have/donthave":
var presence = presence
presence.have = true
check PresenceMessage.init(presence).`type` == Have
presence.have = false
check PresenceMessage.init(presence).`type` == DontHave
test "decodes CID":
check Presence.init(message) .? address == address.some
test "decodes have/donthave":
var message = message
message.`type` = BlockPresenceType.Have
check Presence.init(message) .? have == true.some
message.`type` = BlockPresenceType.DontHave
check Presence.init(message) .? have == false.some

View File

@ -0,0 +1,275 @@
import pkg/unittest2
import pkg/storage/blockexchange/protocol/message
import ../../examples
import ../../helpers
suite "BlockAddress protobuf encoding":
test "Should encode and decode block address":
let
treeCid = Cid.example
address = BlockAddress(treeCid: treeCid, index: 42)
var buffer = initProtoBuffer()
buffer.write(1, address)
buffer.finish()
var decoded: ProtoBuffer
check buffer.getField(1, decoded).isOk
let res = BlockAddress.decode(decoded)
check res.isOk
check res.get.treeCid == treeCid
check res.get.index == 42
test "Should encode and decode block address with index 0":
let
blockCid = Cid.example
address = BlockAddress(treeCid: blockCid, index: 0)
var buffer = initProtoBuffer()
buffer.write(1, address)
buffer.finish()
var decoded: ProtoBuffer
check buffer.getField(1, decoded).isOk
let res = BlockAddress.decode(decoded)
check res.isOk
check res.get.treeCid == blockCid
check res.get.index == 0
suite "WantListEntry protobuf encoding":
test "Should encode and decode WantListEntry":
let
treeCid = Cid.example
entry = WantListEntry(
address: BlockAddress(treeCid: treeCid, index: 10),
priority: 5,
cancel: false,
wantType: WantType.WantHave,
sendDontHave: true,
rangeCount: 100,
)
var buffer = initProtoBuffer()
buffer.write(1, entry)
buffer.finish()
var decoded: ProtoBuffer
check buffer.getField(1, decoded).isOk
let res = WantListEntry.decode(decoded)
check res.isOk
check res.get.address.treeCid == treeCid
check res.get.address.index == 10
check res.get.priority == 5
check res.get.cancel == false
check res.get.wantType == WantType.WantHave
check res.get.sendDontHave == true
check res.get.rangeCount == 100
test "Should handle WantListEntry with cancel flag":
let
blockCid = Cid.example
entry = WantListEntry(
address: BlockAddress(treeCid: blockCid, index: 0),
priority: 1,
cancel: true,
wantType: WantType.WantHave,
sendDontHave: false,
rangeCount: 0,
)
var buffer = initProtoBuffer()
buffer.write(1, entry)
buffer.finish()
var decoded: ProtoBuffer
check buffer.getField(1, decoded).isOk
let res = WantListEntry.decode(decoded)
check res.isOk
check res.get.cancel == true
suite "WantList protobuf encoding":
test "Should encode and decode empty WantList":
let wantList = WantList(entries: @[], full: false)
var buffer = initProtoBuffer()
buffer.write(1, wantList)
buffer.finish()
var decoded: ProtoBuffer
check buffer.getField(1, decoded).isOk
let res = WantList.decode(decoded)
check res.isOk
check res.get.entries.len == 0
check res.get.full == false
test "Should encode and decode WantList with entries":
let
treeCid = Cid.example
wantList = WantList(
entries: @[
WantListEntry(
address: BlockAddress(treeCid: treeCid, index: 0),
priority: 1,
cancel: false,
wantType: WantType.WantHave,
sendDontHave: false,
rangeCount: 10,
),
WantListEntry(
address: BlockAddress(treeCid: treeCid, index: 1),
priority: 2,
cancel: false,
wantType: WantType.WantHave,
sendDontHave: true,
rangeCount: 0,
),
],
full: true,
)
var buffer = initProtoBuffer()
buffer.write(1, wantList)
buffer.finish()
var decoded: ProtoBuffer
check buffer.getField(1, decoded).isOk
let res = WantList.decode(decoded)
check res.isOk
check res.get.entries.len == 2
check res.get.entries[0].rangeCount == 10
check res.get.entries[1].sendDontHave == true
check res.get.full == true
suite "BlockPresence protobuf encoding":
test "Should encode and decode BlockPresence with DontHave":
let
treeCid = Cid.example
presence = BlockPresence(
address: BlockAddress(treeCid: treeCid, index: 0),
kind: BlockPresenceType.DontHave,
ranges: @[],
)
var buffer = initProtoBuffer()
buffer.write(1, presence)
buffer.finish()
var decoded: ProtoBuffer
check buffer.getField(1, decoded).isOk
let res = BlockPresence.decode(decoded)
check res.isOk
check res.get.kind == BlockPresenceType.DontHave
check res.get.ranges.len == 0
test "Should encode and decode BlockPresence with HaveRange":
let
treeCid = Cid.example
presence = BlockPresence(
address: BlockAddress(treeCid: treeCid, index: 0),
kind: BlockPresenceType.HaveRange,
ranges: @[(start: 0'u64, count: 100'u64), (start: 200'u64, count: 50'u64)],
)
var buffer = initProtoBuffer()
buffer.write(1, presence)
buffer.finish()
var decoded: ProtoBuffer
check buffer.getField(1, decoded).isOk
let res = BlockPresence.decode(decoded)
check res.isOk
check res.get.kind == BlockPresenceType.HaveRange
check res.get.ranges.len == 2
check res.get.ranges[0].start == 0
check res.get.ranges[0].count == 100
check res.get.ranges[1].start == 200
check res.get.ranges[1].count == 50
test "Should encode and decode BlockPresence with Complete":
let
treeCid = Cid.example
presence = BlockPresence(
address: BlockAddress(treeCid: treeCid, index: 0),
kind: BlockPresenceType.Complete,
ranges: @[],
)
var buffer = initProtoBuffer()
buffer.write(1, presence)
buffer.finish()
var decoded: ProtoBuffer
check buffer.getField(1, decoded).isOk
let res = BlockPresence.decode(decoded)
check res.isOk
check res.get.kind == BlockPresenceType.Complete
suite "Full Message protobuf encoding":
test "Should encode and decode empty Message":
let
msg = Message(wantList: WantList(entries: @[], full: false), blockPresences: @[])
encoded = msg.protobufEncode()
decoded = Message.protobufDecode(encoded)
check decoded.isOk
check decoded.get.wantList.entries.len == 0
check decoded.get.blockPresences.len == 0
test "Should encode and decode Message with WantList":
let
treeCid = Cid.example
msg = Message(
wantList: WantList(
entries: @[
WantListEntry(
address: BlockAddress(treeCid: treeCid, index: 0),
priority: 1,
cancel: false,
wantType: WantType.WantHave,
sendDontHave: false,
rangeCount: 100,
)
],
full: false,
),
blockPresences: @[],
)
encoded = msg.protobufEncode()
decoded = Message.protobufDecode(encoded)
check decoded.isOk
check decoded.get.wantList.entries.len == 1
check decoded.get.wantList.entries[0].rangeCount == 100
test "Should encode and decode Message with BlockPresences":
let
treeCid = Cid.example
msg = Message(
wantList: WantList(entries: @[], full: false),
blockPresences: @[
BlockPresence(
address: BlockAddress(treeCid: treeCid, index: 0),
kind: BlockPresenceType.HaveRange,
ranges: @[(start: 0'u64, count: 500'u64)],
)
],
)
encoded = msg.protobufEncode()
decoded = Message.protobufDecode(encoded)
check decoded.isOk
check decoded.get.blockPresences.len == 1
check decoded.get.blockPresences[0].kind == BlockPresenceType.HaveRange
check decoded.get.blockPresences[0].ranges.len == 1
check decoded.get.blockPresences[0].ranges[0].count == 500

View File

@ -0,0 +1,32 @@
import pkg/chronos
import pkg/storage/blockexchange/protocol/presence
import ../../../asynctest
import ../../examples
import ../../helpers
suite "Block presence protobuf messages":
let
cid = Cid.example
address = BlockAddress(treeCid: cid, index: 0)
presence =
Presence(address: address, have: true, presenceType: BlockPresenceType.HaveRange)
message = PresenceMessage.init(presence)
test "encodes have/donthave":
var presence = presence
presence.presenceType = BlockPresenceType.HaveRange
check PresenceMessage.init(presence).kind == BlockPresenceType.HaveRange
presence.presenceType = BlockPresenceType.DontHave
check PresenceMessage.init(presence).kind == BlockPresenceType.DontHave
test "decodes CID":
check Presence.init(message) .? address == address.some
test "decodes have/donthave":
var message = message
message.kind = BlockPresenceType.HaveRange
check Presence.init(message) .? have == true.some
message.kind = BlockPresenceType.DontHave
check Presence.init(message) .? have == false.some

View File

@ -0,0 +1,994 @@
import std/[sequtils, algorithm, options]
import pkg/chronos
import pkg/stew/byteutils
import pkg/libp2p/peerid
import pkg/libp2p/cid
import pkg/storage/merkletree
import pkg/storage/blocktype as bt
import pkg/storage/blockexchange
import pkg/storage/blockexchange/engine/downloadcontext {.all.}
import pkg/storage/blockexchange/engine/activedownload {.all.}
import pkg/storage/blockexchange/engine/downloadmanager {.all.}
import pkg/storage/blockexchange/engine/scheduler {.all.}
import pkg/storage/blockexchange/engine/swarm
import ../helpers
import ../examples
import ../../asynctest
suite "DownloadManager - Want Handles":
test "Should add want handle":
let
downloadManager = DownloadManager.new()
blk = bt.Block.new("Hello".toBytes).tryGet
address = BlockAddress.init(blk.cid, 0)
desc = toDownloadDesc(address, blockSize = 0)
download = downloadManager.startDownload(desc)
discard download.getWantHandle(address)
check address in download
test "Should resolve want handle":
let
downloadManager = DownloadManager.new()
blk = bt.Block.new("Hello".toBytes).tryGet
address = BlockAddress.init(blk.cid, 0)
desc = toDownloadDesc(address, blockSize = 0)
download = downloadManager.startDownload(desc)
handle = download.getWantHandle(address)
check address in download
discard download.completeWantHandle(address, some(blk))
let resolved = (await handle).tryGet
check resolved == blk
test "Should cancel want handle":
let
downloadManager = DownloadManager.new()
blk = bt.Block.new("Hello".toBytes).tryGet
address = BlockAddress.init(blk.cid, 0)
desc = toDownloadDesc(address, blockSize = 0)
download = downloadManager.startDownload(desc)
handle = download.getWantHandle(address)
check address in download
await handle.cancelAndWait()
check address notin download
test "Should handle retry counters":
let
dm = DownloadManager.new(3)
blk = bt.Block.new("Hello".toBytes).tryGet
address = BlockAddress.init(blk.cid, 0)
desc = toDownloadDesc(address, blockSize = 0)
download = dm.startDownload(desc)
discard download.getWantHandle(address)
check download.retries(address) == 3
download.decRetries(address)
check download.retries(address) == 2
download.decRetries(address)
check download.retries(address) == 1
download.decRetries(address)
check download.retries(address) == 0
check download.retriesExhausted(address)
asyncchecksuite "DownloadManager - Download Lifecycle":
test "Should start new download":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 100, 65536)
let download = dm.startDownload(desc)
check download.cid == treeCid
test "Should allow multiple downloads for same CID":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 100, 65536)
download1 = dm.startDownload(desc)
download2 = dm.startDownload(desc)
check download1.id != download2.id
check download1.cid == download2.cid
test "Multiple downloads for same CID have independent block state":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 100, 65536)
download1 = dm.startDownload(desc)
download2 = dm.startDownload(desc)
address = BlockAddress(treeCid: treeCid, index: 0)
handle1 = download1.getWantHandle(address)
check address in download1
check address notin download2
let blk = bt.Block.new("test data".toBytes).tryGet()
discard download1.completeWantHandle(address, some(blk))
let res = await handle1
check res.isOk
check address notin download2
test "Cancel one download for same CID while other continues":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 100, 65536)
download1 = dm.startDownload(desc)
download2 = dm.startDownload(desc)
address = BlockAddress(treeCid: treeCid, index: 0)
discard download1.getWantHandle(address)
let handle2 = download2.getWantHandle(address)
dm.cancelDownload(download1)
check download1.cancelled == true
check download2.cancelled == false
let blk = bt.Block.new("test data".toBytes).tryGet()
discard download2.completeWantHandle(address, some(blk))
let res = await handle2
check res.isOk
check dm.getDownload(download2.id, treeCid).isSome
check dm.getDownload(download1.id, treeCid).isNone
test "Should start range download":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 50'u64, 100'u64, 65536)
download = dm.startDownload(desc)
check download.ctx.totalBlocks == 150 # 50 + 100
test "Should start download with missing blocks":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 1000, 65536)
missingBlocks = @[10'u64, 11, 12, 50, 51, 100]
download = dm.startDownload(desc, missingBlocks)
check download.ctx.scheduler.hasWork() == true
test "Should release download":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 100, 65536)
discard dm.startDownload(desc)
dm.releaseDownload(treeCid)
check dm.getDownload(treeCid).isNone
test "Should cancel download":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 100, 65536)
discard dm.startDownload(desc)
dm.cancelDownload(treeCid)
check dm.getDownload(treeCid).isNone
test "Should return none for non-existent download":
let
dm = DownloadManager.new()
treeCid = Cid.example
check dm.getDownload(treeCid).isNone
test "Should set cancelled flag when download is cancelled":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 100, 65536)
let downloadBefore = dm.startDownload(desc)
check downloadBefore.cancelled == false
dm.cancelDownload(treeCid)
check dm.getDownload(treeCid).isNone
check downloadBefore.cancelled == true
test "Should allow new download for same CID after cancellation":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 100, 65536)
oldDownload = dm.startDownload(desc)
dm.cancelDownload(treeCid)
check oldDownload.cancelled == true
let newDownload = dm.startDownload(desc)
check newDownload.cancelled == false
check newDownload != oldDownload
check oldDownload.cancelled == true
test "Should set cancelled flag when released":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 100, 65536)
let downloadRef = dm.startDownload(desc)
check downloadRef.cancelled == false
dm.releaseDownload(treeCid)
check dm.getDownload(treeCid).isNone
check downloadRef.cancelled == true
suite "DownloadManager - Batch Management":
test "Should get next batch":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 1000, 65536)
download = dm.startDownload(desc)
batch = dm.getNextBatch(download)
check batch.isSome
check batch.get.start == 0
test "Should mark batch in flight":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 1000, 65536)
peerId = PeerId.example
download = dm.startDownload(desc)
batch = dm.getNextBatch(download)
check batch.isSome
download.markBatchInFlight(batch.get.start, batch.get.count, 0, peerId)
check download.pendingBatches.len == 1
check batch.get.start in download.pendingBatches
test "Should complete batch":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 100, 65536)
peerId = PeerId.example
download = dm.startDownload(desc)
batch = dm.getNextBatch(download)
check batch.isSome
download.markBatchInFlight(batch.get.start, batch.get.count, 0, peerId)
download.completeBatch(batch.get.start, 0, 0)
check download.pendingBatches.len == 0
test "Should requeue batch at back":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 1000, 65536)
peerId = PeerId.example
download = dm.startDownload(desc)
batch1 = dm.getNextBatch(download)
download.markBatchInFlight(batch1.get.start, batch1.get.count, 0, peerId)
let batch2 = dm.getNextBatch(download)
download.markBatchInFlight(batch2.get.start, batch2.get.count, 0, peerId)
download.requeueBatch(batch1.get.start, batch1.get.count, front = false)
check download.pendingBatches.len == 1
check download.ctx.scheduler.requeuedCount() == 1
test "Should requeue batch at front":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 1000, 65536)
peerId = PeerId.example
download = dm.startDownload(desc)
batch1 = dm.getNextBatch(download)
download.markBatchInFlight(batch1.get.start, batch1.get.count, 0, peerId)
download.requeueBatch(batch1.get.start, batch1.get.count, front = true)
let nextBatch = dm.getNextBatch(download)
check nextBatch.isSome
check nextBatch.get.start == batch1.get.start
test "Should handle partial batch completion":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 1000, 65536)
peerId = PeerId.example
download = dm.startDownload(desc)
batch = dm.getNextBatch(download)
check batch.isSome
download.markBatchInFlight(batch.get.start, batch.get.count, 0, peerId)
let missingRanges = @[(start: 50'u64, count: 50'u64)]
download.partialCompleteBatch(batch.get.start, batch.get.count, 0, missingRanges, 0)
check download.ctx.scheduler.requeuedCount() >= 1
suite "DownloadManager - Download Status":
test "Should check if download is complete":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 10, 65536)
download = dm.startDownload(desc)
check download.isDownloadComplete() == false
download.ctx.received = 10
check download.isDownloadComplete() == true
test "Should check if work remains":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 1000, 65536)
download = dm.startDownload(desc)
check download.hasWorkRemaining() == true
test "Should return pending batch count":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 1000, 65536)
peerId = PeerId.example
download = dm.startDownload(desc)
check download.pendingBatchCount() == 0
let batch = dm.getNextBatch(download)
download.markBatchInFlight(batch.get.start, batch.get.count, 0, peerId)
check download.pendingBatchCount() == 1
suite "DownloadManager - Peer Management":
test "Should handle peer failure":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 1000, 65536)
peerId = PeerId.example
download = dm.startDownload(desc)
batch1 = dm.getNextBatch(download)
download.markBatchInFlight(batch1.get.start, batch1.get.count, 0, peerId)
let batch2 = dm.getNextBatch(download)
download.markBatchInFlight(batch2.get.start, batch2.get.count, 0, peerId)
check download.pendingBatchCount() == 2
download.handlePeerFailure(peerId)
check download.pendingBatchCount() == 0
check download.ctx.scheduler.requeuedCount() == 2
test "Should get swarm":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 100, 65536)
download = dm.startDownload(desc)
swarm = download.getSwarm()
check swarm != nil
test "Should update peer availability - add new peer":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 100, 65536)
peerId = PeerId.example
availability = BlockAvailability.complete()
download = dm.startDownload(desc)
download.updatePeerAvailability(peerId, availability)
let
swarm = download.getSwarm()
peer = swarm.getPeer(peerId)
check peer.isSome
check peer.get.availability.kind == bakComplete
test "Should update peer availability - update existing peer":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 100, 65536)
peerId = PeerId.example
download = dm.startDownload(desc)
download.updatePeerAvailability(peerId, BlockAvailability.unknown())
let peerBefore = download.getSwarm().getPeer(peerId)
check peerBefore.get.availability.kind == bakUnknown
download.updatePeerAvailability(peerId, BlockAvailability.complete())
let peerAfter = download.getSwarm().getPeer(peerId)
check peerAfter.get.availability.kind == bakComplete
suite "DownloadManager - Retry Management":
test "Should decrement block retries":
let
dm = DownloadManager.new(retries = 5)
blk = bt.Block.new("Hello".toBytes).tryGet
address = BlockAddress.init(blk.cid, 0)
desc = toDownloadDesc(address, blockSize = 0)
download = dm.startDownload(desc)
discard download.getWantHandle(address)
check download.retries(address) == 5
let exhausted = download.decrementBlockRetries(@[address])
check exhausted.len == 0
check download.retries(address) == 4
test "Should return exhausted blocks":
let
dm = DownloadManager.new(retries = 2)
blk = bt.Block.new("Hello".toBytes).tryGet
address = BlockAddress.init(blk.cid, 0)
desc = toDownloadDesc(address, blockSize = 0)
download = dm.startDownload(desc)
discard download.getWantHandle(address)
discard download.decrementBlockRetries(@[address])
check download.retries(address) == 1
let exhausted = download.decrementBlockRetries(@[address])
check exhausted.len == 1
check address in exhausted
test "Should fail exhausted blocks":
let
dm = DownloadManager.new(retries = 1)
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 100, 65536)
address = BlockAddress(treeCid: treeCid, index: 0)
download = dm.startDownload(desc)
discard download.getWantHandle(address)
discard download.decrementBlockRetries(@[address])
download.failExhaustedBlocks(@[address])
check download.isBlockExhausted(address) == true
check address notin download
test "Should get block addresses for range":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 100, 65536)
download = dm.startDownload(desc)
for i in 0'u64 ..< 5:
let address = BlockAddress(treeCid: treeCid, index: i.int)
discard download.getWantHandle(address)
let addresses = download.getBlockAddressesForRange(0, 10)
check addresses.len == 5
suite "DownloadManager - Request Tracking":
test "Should mark block as requested":
let
dm = DownloadManager.new()
blk = bt.Block.new("Hello".toBytes).tryGet
address = BlockAddress.init(blk.cid, 0)
desc = toDownloadDesc(address, blockSize = 0)
download = dm.startDownload(desc)
peerId = PeerId.example
discard download.getWantHandle(address)
check download.isRequested(address) == false
let marked = download.markRequested(address, peerId)
check marked == true
check download.isRequested(address) == true
check download.getRequestPeer(address) == some(peerId)
test "Should not mark already requested block":
let
dm = DownloadManager.new()
blk = bt.Block.new("Hello".toBytes).tryGet
address = BlockAddress.init(blk.cid, 0)
desc = toDownloadDesc(address, blockSize = 0)
download = dm.startDownload(desc)
peer1 = PeerId.example
peer2 = PeerId.example
discard download.getWantHandle(address)
let marked1 = download.markRequested(address, peer1)
check marked1 == true
let marked2 = download.markRequested(address, peer2)
check marked2 == false
check download.getRequestPeer(address) == some(peer1) # Still first peer
test "Should clear request":
let
dm = DownloadManager.new()
blk = bt.Block.new("Hello".toBytes).tryGet
address = BlockAddress.init(blk.cid, 0)
desc = toDownloadDesc(address, blockSize = 0)
download = dm.startDownload(desc)
peerId = PeerId.example
discard download.getWantHandle(address)
discard download.markRequested(address, peerId)
download.clearRequest(address)
check download.isRequested(address) == false
check download.getRequestPeer(address).isNone
suite "DownloadManager - DownloadDesc":
test "Should create full tree download desc":
let
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 1000, 65536)
check desc.cid == treeCid
check desc.startIndex == 0
check desc.count == 1000
check desc.id == treeCid
test "Should create range download desc":
let
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 500, 200, 65536)
check desc.cid == treeCid
check desc.startIndex == 500
check desc.count == 200
test "Should create leaf block download desc from address":
let
treeCid = Cid.example
address = BlockAddress(treeCid: treeCid, index: 42)
desc = toDownloadDesc(address, 65536)
check desc.cid == treeCid
check desc.startIndex == 42
check desc.count == 1
suite "DownloadContext - Basics":
test "Should create download context":
let
treeCid = Cid.example
ctx = DownloadContext.new(treeCid, 65536, 1000)
check ctx.treeCid == treeCid
check ctx.blockSize == 65536
check ctx.totalBlocks == 1000
check ctx.received == 0
check ctx.bytesReceived == 0
test "Should create context with already have blocks":
let
treeCid = Cid.example
ctx = DownloadContext.new(treeCid, 65536, 1000, alreadyHave = 100)
check ctx.received == 100
test "Should report not complete initially":
let
treeCid = Cid.example
ctx = DownloadContext.new(treeCid, 65536, 100)
check ctx.isComplete() == false
test "Should report complete when all received":
let
treeCid = Cid.example
ctx = DownloadContext.new(treeCid, 65536, 100)
ctx.received = 100
check ctx.isComplete() == true
test "Should return progress":
let
treeCid = Cid.example
ctx = DownloadContext.new(treeCid, 65536, 100)
ctx.received = 50
ctx.bytesReceived = 50'u64 * 65536
let progress = ctx.progress()
check progress.blocksCompleted == 50
check progress.totalBlocks == 100
check progress.bytesTransferred == 50'u64 * 65536
test "Should return remaining blocks":
let
treeCid = Cid.example
ctx = DownloadContext.new(treeCid, 65536, 100)
check ctx.remainingBlocks() == 100
ctx.received = 60
check ctx.remainingBlocks() == 40
ctx.received = 100
check ctx.remainingBlocks() == 0
test "Should init scheduler with missing blocks":
let
treeCid = Cid.example
ctx = DownloadContext.new(treeCid, 65536, 1000)
missingBlocks = @[10'u64, 11, 12, 50, 51, 100]
ctx.scheduler.initFromIndices(missingBlocks, 256)
check ctx.scheduler.hasWork() == true
test "Should mark batch received":
let
treeCid = Cid.example
ctx = DownloadContext.new(treeCid, 65536, 100)
peerId = PeerId.example
ctx.markBatchInFlight(0, 10, peerId)
check ctx.inFlightCount() == 10
ctx.markBatchReceived(0, 10, 10'u64 * 65536)
check ctx.received == 10
check ctx.bytesReceived == 10'u64 * 65536
check ctx.inFlightCount() == 0
test "Should mark block in flight":
let
treeCid = Cid.example
ctx = DownloadContext.new(treeCid, 65536, 100)
peerId = PeerId.example
ctx.markBlockInFlight(42, peerId)
check ctx.isBlockInFlight(42) == true
check ctx.isBlockInFlight(43) == false
check ctx.inFlightCount() == 1
test "Should mark batch in flight":
let
treeCid = Cid.example
ctx = DownloadContext.new(treeCid, 65536, 100)
peerId = PeerId.example
ctx.markBatchInFlight(10, 5, peerId)
for i in 10'u64 ..< 15:
check ctx.isBlockInFlight(i) == true
check ctx.isBlockInFlight(15) == false
check ctx.inFlightCount() == 5
test "Should clear in-flight for peer":
let
treeCid = Cid.example
ctx = DownloadContext.new(treeCid, 65536, 100)
peer1 = PeerId.example
peer2 = PeerId.example
ctx.markBatchInFlight(0, 5, peer1)
ctx.markBatchInFlight(10, 5, peer2)
check ctx.inFlightCount() == 10
ctx.clearInFlightForPeer(peer1)
check ctx.inFlightCount() == 5
for i in 0'u64 ..< 5:
check ctx.isBlockInFlight(i) == false
for i in 10'u64 ..< 15:
check ctx.isBlockInFlight(i) == true
suite "DownloadContext - Windowed Presence":
test "Should compute presence window size":
check computePresenceWindowSize(65536) == 1024'u64 * 1024 * 1024 div 65536
check computePresenceWindowSize(1024) == 1024'u64 * 1024 * 1024 div 1024
check computePresenceWindowSize(2'u32 * 1024 * 1024 * 1024) >= 1'u64
test "Should initialize presence window":
let
treeCid = Cid.example
ctx = DownloadContext.new(treeCid, 65536, 100000)
check ctx.presenceWindowStart == 0
check ctx.presenceWindowEnd > 0
check ctx.presenceWindowSize > 0
test "Should get current presence window":
let
treeCid = Cid.example
ctx = DownloadContext.new(treeCid, 65536, 100000)
window = ctx.currentPresenceWindow()
check window.start == 0
check window.count == ctx.presenceWindowEnd
test "Should check if block is in presence window":
let
treeCid = Cid.example
ctx = DownloadContext.new(treeCid, 65536, 100000)
check ctx.presenceWindowContains(0) == true
check ctx.presenceWindowContains(ctx.presenceWindowEnd - 1) == true
check ctx.presenceWindowContains(ctx.presenceWindowEnd) == false
test "Should advance presence window":
let
treeCid = Cid.example
ctx = DownloadContext.new(treeCid, 65536, 100000)
oldEnd = ctx.presenceWindowEnd
newWindow = ctx.advancePresenceWindow()
check newWindow.start == oldEnd
check ctx.presenceWindowStart == oldEnd
check ctx.presenceWindowEnd > oldEnd
test "Should check if needs next presence window":
let
treeCid = Cid.example
ctx = DownloadContext.new(treeCid, 65536, 100000)
ctx.scheduler.init(ctx.totalBlocks, 256)
check ctx.needsNextPresenceWindow() == false
let threshold = (ctx.presenceWindowEnd.float * 0.75).uint64
var pos: uint64 = 0
while pos <= threshold:
discard ctx.scheduler.take()
ctx.scheduler.markComplete(pos)
pos += 256
if ctx.presenceWindowEnd < ctx.totalBlocks:
check ctx.needsNextPresenceWindow() == true
test "Should not need next window when at last window":
let
treeCid = Cid.example
ctx = DownloadContext.new(treeCid, 65536, 100) # Small total, fits in one window
ctx.scheduler.init(ctx.totalBlocks, 256)
discard ctx.scheduler.take()
ctx.scheduler.markComplete(0)
check ctx.needsNextPresenceWindow() == false
test "Should trim ranges entirely below watermark":
let
treeCid = Cid.example
ctx = DownloadContext.new(treeCid, 65536, 100000)
peerId = PeerId.example
ranges = @[(start: 0'u64, count: 400'u64), (start: 2000'u64, count: 500'u64)]
discard ctx.swarm.addPeer(peerId, BlockAvailability.fromRanges(ranges))
ctx.scheduler.init(ctx.totalBlocks, 256)
discard ctx.scheduler.take()
ctx.scheduler.markComplete(0)
discard ctx.scheduler.take()
ctx.scheduler.markComplete(256)
ctx.trimPresenceBeforeWatermark()
let peer = ctx.swarm.getPeer(peerId)
check peer.isSome
check peer.get.availability.kind == bakRanges
check peer.get.availability.ranges.len == 1
check peer.get.availability.ranges[0].start == 2000
check peer.get.availability.ranges[0].count == 500
test "Should keep ranges spanning the watermark intact":
let
treeCid = Cid.example
ctx = DownloadContext.new(treeCid, 65536, 100000)
peerId = PeerId.example
ranges = @[(start: 0'u64, count: 1000'u64)]
discard ctx.swarm.addPeer(peerId, BlockAvailability.fromRanges(ranges))
ctx.scheduler.init(ctx.totalBlocks, 256)
discard ctx.scheduler.take()
ctx.scheduler.markComplete(0)
discard ctx.scheduler.take()
ctx.scheduler.markComplete(256)
ctx.trimPresenceBeforeWatermark()
let peer = ctx.swarm.getPeer(peerId)
check peer.isSome
check peer.get.availability.kind == bakRanges
check peer.get.availability.ranges.len == 1
check peer.get.availability.ranges[0].start == 0
check peer.get.availability.ranges[0].count == 1000
test "Should not trim bakComplete peers":
let
treeCid = Cid.example
ctx = DownloadContext.new(treeCid, 65536, 100000)
peerId = PeerId.example
discard ctx.swarm.addPeer(peerId, BlockAvailability.complete())
ctx.scheduler.init(ctx.totalBlocks, 256)
discard ctx.scheduler.take()
ctx.scheduler.markComplete(0)
discard ctx.scheduler.take()
ctx.scheduler.markComplete(256)
ctx.trimPresenceBeforeWatermark()
let peer = ctx.swarm.getPeer(peerId)
check peer.isSome
check peer.get.availability.kind == bakComplete
suite "DownloadManager - Completion Future":
test "Should complete batch locally":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 10, 65536)
download = dm.startDownload(desc)
batch = dm.getNextBatch(download)
check batch.isSome
download.completeBatchLocal(batch.get.start, batch.get.count)
check download.ctx.scheduler.isEmpty()
check download.ctx.received == 10
check download.ctx.bytesReceived == 0
check download.pendingBatches.len == 0
check download.ctx.isComplete()
test "Should resolve completion future on success":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 10, 65536)
download = dm.startDownload(desc)
check not download.completionFuture.finished
let batch = dm.getNextBatch(download)
check batch.isSome
download.completeBatchLocal(batch.get.start, batch.get.count)
check download.completionFuture.finished
check not download.completionFuture.failed
let res = await download.waitForComplete()
check res.isOk
test "Should resolve completion future with error on exhausted blocks":
let
dm = DownloadManager.new(retries = 1)
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 10, 65536)
download = dm.startDownload(desc)
var addresses: seq[BlockAddress] = @[]
for i in 0'u64 ..< 10:
let address = BlockAddress(treeCid: treeCid, index: i.int)
discard download.getWantHandle(address)
addresses.add(address)
discard download.decrementBlockRetries(addresses)
download.failExhaustedBlocks(addresses)
check download.completionFuture.finished
check not download.completionFuture.failed
let res = await download.waitForComplete()
check res.isErr
check res.error of RetriesExhaustedError
test "Should fail completion future on cancel":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 100, 65536)
download = dm.startDownload(desc)
check not download.completionFuture.finished
dm.cancelDownload(treeCid)
check download.completionFuture.finished
check download.completionFuture.failed
test "Should not double-complete completion future":
let
dm = DownloadManager.new()
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 10, 65536)
download = dm.startDownload(desc)
batch = dm.getNextBatch(download)
check batch.isSome
download.completeBatchLocal(batch.get.start, batch.get.count)
check download.completionFuture.finished
check not download.completionFuture.failed
let result1 = await download.waitForComplete()
check result1.isOk
let error = (ref RetriesExhaustedError)(msg: "test error")
download.signalCompletionIfDone(error)
check not download.completionFuture.failed
let result2 = await download.waitForComplete()
check result2.isOk
test "Should propagate error through waitForComplete async":
let
dm = DownloadManager.new(retries = 1)
treeCid = Cid.example
desc = toDownloadDesc(treeCid, 10, 65536)
download = dm.startDownload(desc)
waiter = download.waitForComplete()
check not waiter.finished
var addresses: seq[BlockAddress] = @[]
for i in 0'u64 ..< 10:
let address = BlockAddress(treeCid: treeCid, index: i.int)
discard download.getWantHandle(address)
addresses.add(address)
discard download.decrementBlockRetries(addresses)
download.failExhaustedBlocks(addresses)
let res = await waiter
check res.isErr
check res.error of RetriesExhaustedError

View File

@ -1,5 +1,7 @@
import ./engine/testengine
import ./engine/testblockexc
import ./engine/testadvertiser
import ./engine/testscheduler
import ./engine/testswarm
{.warning[UnusedImport]: off.}

View File

@ -1,5 +1,4 @@
import std/sequtils
import std/tables
import std/[sequtils, tables]
import pkg/chronos
@ -7,6 +6,7 @@ import pkg/storage/rng
import pkg/storage/chunker
import pkg/storage/blocktype as bt
import pkg/storage/blockexchange
import pkg/storage/blockexchange/protocol/wantblocks
import ../../asynctest
import ../examples
@ -45,13 +45,13 @@ asyncchecksuite "Network - Handlers":
discard await networkPeer.connect()
test "Want List handler":
let treeCid = Cid.example
proc wantListHandler(peer: PeerId, wantList: WantList) {.async: (raises: []).} =
# check that we got the correct amount of entries
check wantList.entries.len == 4
for b in blocks:
check b.address in wantList.entries
let entry = wantList.entries[wantList.entries.find(b.address)]
for entry in wantList.entries:
check entry.address.treeCid == treeCid
check entry.wantType == WantType.WantHave
check entry.priority == 1
check entry.cancel == true
@ -62,35 +62,24 @@ asyncchecksuite "Network - Handlers":
network.handlers.onWantList = wantListHandler
let wantList =
makeWantList(blocks.mapIt(it.cid), 1, true, WantType.WantHave, true, true)
makeWantList(treeCid, blocks.len, 1, true, WantType.WantHave, true, true)
let msg = Message(wantlist: wantList)
await buffer.pushData(lenPrefix(protobufEncode(msg)))
await done.wait(500.millis)
test "Blocks Handler":
proc blocksDeliveryHandler(
peer: PeerId, blocksDelivery: seq[BlockDelivery]
) {.async: (raises: []).} =
check blocks == blocksDelivery.mapIt(it.blk)
done.complete()
network.handlers.onBlocksDelivery = blocksDeliveryHandler
let msg =
Message(payload: blocks.mapIt(BlockDelivery(blk: it, address: it.address)))
await buffer.pushData(lenPrefix(protobufEncode(msg)))
await buffer.pushData(frameProtobufMessage(protobufEncode(msg)))
await done.wait(500.millis)
test "Presence Handler":
let
treeCid = Cid.example
addresses = (0 ..< blocks.len).mapIt(BlockAddress(treeCid: treeCid, index: it))
proc presenceHandler(
peer: PeerId, presence: seq[BlockPresence]
) {.async: (raises: []).} =
for b in blocks:
check:
b.address in presence
check presence.len == blocks.len
for p in presence:
check p.address.treeCid == treeCid
done.complete()
@ -98,9 +87,9 @@ asyncchecksuite "Network - Handlers":
let msg = Message(
blockPresences:
blocks.mapIt(BlockPresence(address: it.address, type: BlockPresenceType.Have))
addresses.mapIt(BlockPresence(address: it, kind: BlockPresenceType.HaveRange))
)
await buffer.pushData(lenPrefix(protobufEncode(msg)))
await buffer.pushData(frameProtobufMessage(protobufEncode(msg)))
await done.wait(500.millis)
@ -139,13 +128,15 @@ asyncchecksuite "Network - Senders":
await allFuturesThrowing(switch1.stop(), switch2.stop())
test "Send want list":
let
treeCid = Cid.example
addresses = (0 ..< blocks.len).mapIt(BlockAddress(treeCid: treeCid, index: it))
proc wantListHandler(peer: PeerId, wantList: WantList) {.async: (raises: []).} =
# check that we got the correct amount of entries
check wantList.entries.len == 4
for b in blocks:
check b.address in wantList.entries
let entry = wantList.entries[wantList.entries.find(b.address)]
for entry in wantList.entries:
check entry.address.treeCid == treeCid
check entry.wantType == WantType.WantHave
check entry.priority == 1
check entry.cancel == true
@ -155,38 +146,22 @@ asyncchecksuite "Network - Senders":
network2.handlers.onWantList = wantListHandler
await network1.sendWantList(
switch2.peerInfo.peerId,
blocks.mapIt(it.address),
1,
true,
WantType.WantHave,
true,
true,
)
await done.wait(500.millis)
test "send blocks":
proc blocksDeliveryHandler(
peer: PeerId, blocksDelivery: seq[BlockDelivery]
) {.async: (raises: []).} =
check blocks == blocksDelivery.mapIt(it.blk)
done.complete()
network2.handlers.onBlocksDelivery = blocksDeliveryHandler
await network1.sendBlocksDelivery(
switch2.peerInfo.peerId, blocks.mapIt(BlockDelivery(blk: it, address: it.address))
switch2.peerInfo.peerId, addresses, 1, true, WantType.WantHave, true, true
)
await done.wait(500.millis)
test "send presence":
let
treeCid = Cid.example
addresses = (0 ..< blocks.len).mapIt(BlockAddress(treeCid: treeCid, index: it))
proc presenceHandler(
peer: PeerId, precense: seq[BlockPresence]
) {.async: (raises: []).} =
for b in blocks:
check:
b.address in precense
check precense.len == blocks.len
for p in precense:
check p.address.treeCid == treeCid
done.complete()
@ -194,7 +169,7 @@ asyncchecksuite "Network - Senders":
await network1.sendBlockPresence(
switch2.peerInfo.peerId,
blocks.mapIt(BlockPresence(address: it.address, type: BlockPresenceType.Have)),
addresses.mapIt(BlockPresence(address: it, kind: BlockPresenceType.HaveRange)),
)
await done.wait(500.millis)

View File

@ -1,24 +1,28 @@
import std/sugar
import std/sequtils
import std/options
import pkg/unittest2
import pkg/libp2p
import pkg/storage/blockexchange/peers
import pkg/storage/blockexchange/protobuf/blockexc
import pkg/storage/blockexchange/protobuf/presence
import pkg/storage/blockexchange/peers/peerstats
import pkg/storage/blockexchange/utils
import pkg/storage/storagetypes
import ../helpers
import ../examples
const
TestBlockSize = DefaultBlockSize.uint32
TestBatchBytes = computeBatchSize(TestBlockSize).uint64 * TestBlockSize.uint64
suite "Peer Context Store":
var
store: PeerCtxStore
peerCtx: BlockExcPeerCtx
store: PeerContextStore
peerCtx: PeerContext
setup:
store = PeerCtxStore.new()
peerCtx = BlockExcPeerCtx.example
store = PeerContextStore.new()
peerCtx = PeerContext.example
store.add(peerCtx)
test "Should add peer":
@ -31,78 +35,130 @@ suite "Peer Context Store":
test "Should get peer":
check store.get(peerCtx.id) == peerCtx
suite "Peer Context Store Peer Selection":
var
store: PeerCtxStore
peerCtxs: seq[BlockExcPeerCtx]
addresses: seq[BlockAddress]
test "Should return nil for unknown peer":
let unknownId = PeerId.example
check store.get(unknownId) == nil
setup:
store = PeerCtxStore.new()
addresses = collect(newSeq):
for i in 0 ..< 10:
BlockAddress(leaf: false, cid: Cid.example)
test "Should return correct length":
check store.len == 1
peerCtxs = collect(newSeq):
for i in 0 ..< 10:
BlockExcPeerCtx.example
let peer2 = PeerContext.new(PeerId.example)
store.add(peer2)
check store.len == 2
for p in peerCtxs:
store.add(p)
store.remove(peer2.id)
check store.len == 1
teardown:
store = nil
addresses = @[]
peerCtxs = @[]
test "Should return peer IDs":
let peer2 = PeerContext.new(PeerId.example)
let peer3 = PeerContext.new(PeerId.example)
store.add(peer2)
store.add(peer3)
test "Should select peers that have Cid":
peerCtxs[0].blocks = collect(initTable):
for i, a in addresses:
{a: Presence(address: a)}
let ids = store.peerIds
check ids.len == 3
check peerCtx.id in ids
check peer2.id in ids
check peer3.id in ids
peerCtxs[5].blocks = collect(initTable):
for i, a in addresses:
{a: Presence(address: a)}
test "Should iterate over peers":
let peer2 = PeerContext.new(PeerId.example)
let peer3 = PeerContext.new(PeerId.example)
store.add(peer2)
store.add(peer3)
let peers = store.peersHave(addresses[0])
var seenPeers: seq[PeerId]
for peer in store:
seenPeers.add(peer.id)
check peers.len == 2
check peerCtxs[0] in peers
check peerCtxs[5] in peers
check seenPeers.len == 3
check peerCtx.id in seenPeers
check peer2.id in seenPeers
check peer3.id in seenPeers
test "Should select peers that want Cid":
let entries = addresses.mapIt(
WantListEntry(
address: it,
priority: 1,
cancel: false,
wantType: WantType.WantBlock,
sendDontHave: false,
)
)
test "Should replace peer with same ID":
let newPeerCtx = PeerContext.new(peerCtx.id)
store.add(newPeerCtx)
for address in addresses:
peerCtxs[0].wantedBlocks.incl(address)
peerCtxs[5].wantedBlocks.incl(address)
check store.len == 1 # Still only one peer
check store.get(peerCtx.id) == newPeerCtx # New context replaces old
let peers = store.peersWant(addresses[4])
test "Should handle contains check":
check peerCtx.id in store
let unknownId = PeerId.example
check unknownId notin store
check peers.len == 2
check peerCtxs[0] in peers
check peerCtxs[5] in peers
test "Should be empty initially":
let newStore = PeerContextStore.new()
check newStore.len == 0
check newStore.peerIds.len == 0
test "Should return peers with and without block":
let address = addresses[2]
test "Should check contains in array":
let peers = @[peerCtx]
check peerCtx.id in peers
peerCtxs[1].blocks[address] = Presence(address: address)
peerCtxs[2].blocks[address] = Presence(address: address)
let unknownId = PeerId.example
check unknownId notin peers
let peers = store.getPeersForBlock(address)
suite "PeerContext":
test "Should create new PeerContext":
let
peerId = PeerId.example
ctx = PeerContext.new(peerId)
for i, pc in peerCtxs:
if i == 1 or i == 2:
check pc in peers.with
check pc notin peers.without
else:
check pc notin peers.with
check pc in peers.without
check ctx.id == peerId
check ctx.stats.throughputBps().isNone
test "Should compute optimal pipeline depth without stats":
let
ctx = PeerContext.new(PeerId.example)
depth = ctx.optimalPipelineDepth(TestBatchBytes)
check depth == DefaultRequestsPerPeer
suite "PeerPerfStats":
test "Should create new stats":
let stats = PeerPerfStats.new()
check stats.throughputBps().isNone
check stats.avgRttMicros().isNone
check stats.totalBytes() == 0
check stats.sampleCount() == 0
test "Should record requests":
var stats = PeerPerfStats.new()
stats.recordRequest(1000, 65536)
check stats.sampleCount() == 1
check stats.totalBytes() == 65536
test "Should compute average RTT":
var stats = PeerPerfStats.new()
stats.recordRequest(1000, 65536)
stats.recordRequest(2000, 65536)
stats.recordRequest(3000, 65536)
let avgRtt = stats.avgRttMicros()
check avgRtt.isSome
check avgRtt.get == 2000
test "Should limit RTT samples":
var stats = PeerPerfStats.new()
for i in 1 .. RttSampleCount + 5:
stats.recordRequest(i.uint64 * 100, 1024)
check stats.sampleCount() == RttSampleCount
test "Should reset stats":
var stats = PeerPerfStats.new()
stats.recordRequest(1000, 65536)
check stats.sampleCount() == 1
stats.reset()
check stats.sampleCount() == 0
check stats.totalBytes() == 0
check stats.throughputBps().isNone
check stats.avgRttMicros().isNone
test "Should compute batch size":
check computeBatchSize(65536) > 0
check computeBatchSize(1024) > computeBatchSize(65536)

View File

@ -1,86 +0,0 @@
import std/sequtils
import std/algorithm
import pkg/chronos
import pkg/stew/byteutils
import pkg/storage/blocktype as bt
import pkg/storage/blockexchange
import ../helpers
import ../../asynctest
suite "Pending Blocks":
test "Should add want handle":
let
pendingBlocks = PendingBlocksManager.new()
blk = bt.Block.new("Hello".toBytes).tryGet
discard pendingBlocks.getWantHandle(blk.cid)
check blk.cid in pendingBlocks
test "Should resolve want handle":
let
pendingBlocks = PendingBlocksManager.new()
blk = bt.Block.new("Hello".toBytes).tryGet
handle = pendingBlocks.getWantHandle(blk.cid)
check blk.cid in pendingBlocks
pendingBlocks.resolve(@[blk].mapIt(BlockDelivery(blk: it, address: it.address)))
await sleepAsync(0.millis)
# trigger the event loop, otherwise the block finishes before poll runs
let resolved = await handle
check resolved == blk
check blk.cid notin pendingBlocks
test "Should cancel want handle":
let
pendingBlocks = PendingBlocksManager.new()
blk = bt.Block.new("Hello".toBytes).tryGet
handle = pendingBlocks.getWantHandle(blk.cid)
check blk.cid in pendingBlocks
await handle.cancelAndWait()
check blk.cid notin pendingBlocks
test "Should get wants list":
let
pendingBlocks = PendingBlocksManager.new()
blks = (0 .. 9).mapIt(bt.Block.new(("Hello " & $it).toBytes).tryGet)
discard blks.mapIt(pendingBlocks.getWantHandle(it.cid))
check:
blks.mapIt($it.cid).sorted(cmp[string]) ==
toSeq(pendingBlocks.wantListBlockCids).mapIt($it).sorted(cmp[string])
test "Should get want handles list":
let
pendingBlocks = PendingBlocksManager.new()
blks = (0 .. 9).mapIt(bt.Block.new(("Hello " & $it).toBytes).tryGet)
handles = blks.mapIt(pendingBlocks.getWantHandle(it.cid))
wantHandles = toSeq(pendingBlocks.wantHandles)
check wantHandles.len == handles.len
pendingBlocks.resolve(blks.mapIt(BlockDelivery(blk: it, address: it.address)))
check:
(await allFinished(wantHandles)).mapIt($it.read.cid).sorted(cmp[string]) ==
(await allFinished(handles)).mapIt($it.read.cid).sorted(cmp[string])
test "Should handle retry counters":
let
pendingBlocks = PendingBlocksManager.new(3)
blk = bt.Block.new("Hello".toBytes).tryGet
address = BlockAddress.init(blk.cid)
handle = pendingBlocks.getWantHandle(blk.cid)
check pendingBlocks.retries(address) == 3
pendingBlocks.decRetries(address)
check pendingBlocks.retries(address) == 2
pendingBlocks.decRetries(address)
check pendingBlocks.retries(address) == 1
pendingBlocks.decRetries(address)
check pendingBlocks.retries(address) == 0
check pendingBlocks.retriesExhausted(address)

View File

@ -1,3 +1,4 @@
import ./protobuf/testpresence
import ./protocol/testpresence
import ./protocol/testmessage
{.warning[UnusedImport]: off.}

View File

@ -1,5 +1,5 @@
import std/random
import std/sequtils
import std/[random, sequtils]
import pkg/libp2p
import pkg/stint
import pkg/storage/rng
@ -19,15 +19,14 @@ proc example*(_: type PeerId): PeerId =
let key = PrivateKey.random(Rng.instance[]).get
PeerId.init(key.getPublicKey().get).get
proc example*(_: type BlockExcPeerCtx): BlockExcPeerCtx =
BlockExcPeerCtx(id: PeerId.example)
proc example*(_: type PeerContext): PeerContext =
PeerContext(id: PeerId.example)
proc example*(_: type Cid): Cid =
bt.Block.example.cid
proc example*(_: type BlockAddress): BlockAddress =
let cid = Cid.example
BlockAddress.init(cid)
BlockAddress.init(Cid.example, 0)
proc example*(_: type Manifest): Manifest =
Manifest.new(

View File

@ -25,11 +25,8 @@ export
export libp2p except setup, eventually
# NOTE: The meaning of equality for blocks
# is changed here, because blocks are now `ref`
# types. This is only in tests!!!
func `==`*(a, b: Block): bool =
(a.cid == b.cid) and (a.data == b.data)
(a.cid == b.cid) and (a.data[] == b.data[])
proc calcEcBlocksCount*(blocksCount: int, ecK, ecM: int): int =
let
@ -50,7 +47,8 @@ proc lenPrefix*(msg: openArray[byte]): seq[byte] =
return buf
proc makeWantList*(
cids: seq[Cid],
treeCid: Cid,
count: int,
priority: int = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
@ -58,9 +56,9 @@ proc makeWantList*(
sendDontHave: bool = false,
): WantList =
WantList(
entries: cids.mapIt(
entries: (0 ..< count).mapIt(
WantListEntry(
address: BlockAddress(leaf: false, cid: it),
address: BlockAddress(treeCid: treeCid, index: it),
priority: priority.int32,
cancel: cancel,
wantType: wantType,
@ -112,13 +110,13 @@ proc corruptBlocks*(
blk = (await store.getBlock(manifest.treeCid, i)).tryGet()
bytePos: seq[int]
doAssert bytes < blk.data.len
doAssert bytes < blk.data[].len
while bytePos.len <= bytes:
let ii = Rng.instance.rand(blk.data.len - 1)
let ii = Rng.instance.rand(blk.data[].len - 1)
if bytePos.find(ii) >= 0:
continue
bytePos.add(ii)
blk.data[ii] = byte 0
blk.data[][ii] = byte 0
return pos

View File

@ -33,8 +33,8 @@ proc makeDataset*(blocks: seq[Block]): ?!TestDataset =
return failure("Blocks list was empty")
let
datasetSize = blocks.mapIt(it.data.len).foldl(a + b)
blockSize = blocks.mapIt(it.data.len).foldl(max(a, b))
datasetSize = blocks.mapIt(it.data[].len).foldl(a + b)
blockSize = blocks.mapIt(it.data[].len).foldl(max(a, b))
tree = ?StorageMerkleTree.init(blocks.mapIt(it.cid))
treeCid = ?tree.rootCid
manifest = Manifest.new(

View File

@ -1,5 +1,4 @@
import std/sequtils
import std/sets
import std/[sequtils, sets]
import pkg/chronos
import pkg/taskpools
@ -44,8 +43,8 @@ type
blockDiscovery*: Discovery
network*: BlockExcNetwork
localStore*: BlockStore
peerStore*: PeerCtxStore
pendingBlocks*: PendingBlocksManager
peerStore*: PeerContextStore
downloadManager*: DownloadManager
discovery*: DiscoveryEngine
engine*: BlockExcEngine
networkStore*: NetworkStore
@ -71,15 +70,15 @@ converter toTuple*(
blockDiscovery: Discovery,
network: BlockExcNetwork,
localStore: BlockStore,
peerStore: PeerCtxStore,
pendingBlocks: PendingBlocksManager,
peerStore: PeerContextStore,
downloadManager: DownloadManager,
discovery: DiscoveryEngine,
engine: BlockExcEngine,
networkStore: NetworkStore,
] =
(
nc.switch, nc.blockDiscovery, nc.network, nc.localStore, nc.peerStore,
nc.pendingBlocks, nc.discovery, nc.engine, nc.networkStore,
nc.downloadManager, nc.discovery, nc.engine, nc.networkStore,
)
converter toComponents*(cluster: NodesCluster): seq[NodesComponents] =
@ -162,8 +161,8 @@ proc generateNodes*(
)
network = BlockExcNetwork.new(switch)
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
peerStore = PeerContextStore.new()
downloadManager = DownloadManager.new()
let (localStore, tempDbs, blockDiscovery) =
if config.useRepoStore:
@ -196,16 +195,16 @@ proc generateNodes*(
(store.BlockStore, newSeq[TempLevelDb](), discovery)
let
discovery = DiscoveryEngine.new(
localStore, peerStore, network, blockDiscovery, pendingBlocks
)
discovery = DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery)
advertiser = Advertiser.new(localStore, blockDiscovery)
engine = BlockExcEngine.new(
localStore, network, discovery, advertiser, peerStore, pendingBlocks
localStore, network, discovery, advertiser, peerStore, downloadManager
)
networkStore = NetworkStore.new(engine, localStore)
manifestProto = ManifestProtocol.new(switch, localStore, blockDiscovery)
switch.mount(network)
switch.mount(manifestProto)
let node =
if config.createFullNode:
@ -214,6 +213,7 @@ proc generateNodes*(
networkStore = networkStore,
engine = engine,
discovery = blockDiscovery,
manifestProto = manifestProto,
taskpool = taskpool,
)
@ -239,7 +239,7 @@ proc generateNodes*(
network: network,
localStore: localStore,
peerStore: peerStore,
pendingBlocks: pendingBlocks,
downloadManager: downloadManager,
discovery: discovery,
engine: engine,
networkStore: networkStore,

View File

@ -18,7 +18,7 @@ const data = [
"00000000000000000000000000000009".toBytes, "00000000000000000000000000000010".toBytes,
]
suite "merkletree - coders":
suite "Merkletree - coders":
test "encoding and decoding a tree yields the same tree":
let
tree = StorageMerkleTree.init(Sha256HashCodec, data).tryGet()

View File

@ -1,5 +1,4 @@
import std/sequtils
import std/times
import pkg/questionable/results
import pkg/stew/byteutils

View File

@ -1,12 +1,11 @@
import std/tables
import std/times
import std/[tables, times]
import pkg/libp2p
import pkg/chronos
import pkg/storage/storagetypes
import pkg/storage/chunker
import pkg/storage/stores
import pkg/taskpools
import pkg/storage/manifest
import ../../asynctest
@ -22,7 +21,7 @@ proc new*(
method getBlock*(
self: CountingStore, address: BlockAddress
): Future[?!Block] {.async.} =
self.lookups.mgetOrPut(address.cid, 0).inc
self.lookups.mgetOrPut(address.treeCid, 0).inc
await procCall getBlock(NetworkStore(self), address)
proc toTimesDuration*(d: chronos.Duration): times.Duration =
@ -73,8 +72,8 @@ template setupAndTearDown*() {.dirty.} =
store: NetworkStore
node: StorageNodeRef
blockDiscovery: Discovery
peerStore: PeerCtxStore
pendingBlocks: PendingBlocksManager
peerStore: PeerContextStore
downloadManager: DownloadManager
discovery: DiscoveryEngine
advertiser: Advertiser
@ -101,20 +100,22 @@ template setupAndTearDown*() {.dirty.} =
MultiAddress.init("/ip4/127.0.0.1/tcp/0").expect("Should return multiaddress")
],
)
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
discovery =
DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks)
peerStore = PeerContextStore.new()
downloadManager = DownloadManager.new()
discovery = DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery)
advertiser = Advertiser.new(localStore, blockDiscovery)
engine = BlockExcEngine.new(
localStore, network, discovery, advertiser, peerStore, pendingBlocks
localStore, network, discovery, advertiser, peerStore, downloadManager
)
store = NetworkStore.new(engine, localStore)
let manifestProto = ManifestProtocol.new(switch, localStore, blockDiscovery)
switch.mount(manifestProto)
node = StorageNodeRef.new(
switch = switch,
networkStore = store,
engine = engine,
discovery = blockDiscovery,
manifestProto = manifestProto,
taskpool = Taskpool.new(),
)

View File

@ -1,5 +1,4 @@
import std/os
import std/options
import std/math
import std/importutils
@ -7,9 +6,7 @@ import pkg/chronos
import pkg/stew/byteutils
import pkg/datastore
import pkg/datastore/typedds
import pkg/questionable
import pkg/questionable/results
import pkg/stint
import pkg/taskpools
import pkg/codexdht/discv5/protocol as discv5
@ -22,9 +19,7 @@ import pkg/storage/blockexchange
import pkg/storage/chunker
import pkg/storage/manifest
import pkg/storage/discovery
import pkg/storage/merkletree
import pkg/storage/blocktype as bt
import pkg/storage/rng
import pkg/storage/node {.all.}
@ -63,48 +58,16 @@ asyncchecksuite "Test Node - Basic":
check:
fetched == manifest
test "Block Batching":
test "Fetch Dataset":
let manifest = await storeDataGetManifest(localStore, chunker)
for batchSize in 1 .. 12:
(
await node.fetchBatched(
manifest,
batchSize = batchSize,
proc(
blocks: seq[bt.Block]
): Future[?!void] {.async: (raises: [CancelledError]).} =
check blocks.len > 0 and blocks.len <= batchSize
return success(),
)
).tryGet()
# Fetch the dataset using the download manager
(await node.fetchDatasetAsync(manifest, fetchLocal = true)).tryGet()
test "Block Batching with corrupted blocks":
let blocks = await makeRandomBlocks(datasetSize = 65536, blockSize = 64.KiBs)
assert blocks.len == 1
let blk = blocks[0]
# corrupt block
let pos = rng.Rng.instance.rand(blk.data.len - 1)
blk.data[pos] = byte 0
let manifest = await storeDataGetManifest(localStore, blocks)
let batchSize = manifest.blocksCount
let res = (
await node.fetchBatched(
manifest,
batchSize = batchSize,
proc(
blocks: seq[bt.Block]
): Future[?!void] {.async: (raises: [CancelledError]).} =
return failure("Should not be called"),
)
)
check res.isFailure
check res.error of CatchableError
check res.error.msg == "Some blocks failed (Result) to fetch (1)"
# Verify all blocks are accessible from local store
for i in 0 ..< manifest.blocksCount:
let blk = (await localStore.getBlock(manifest.treeCid, i)).tryGet()
check blk.data[].len > 0
test "Should store Data Stream":
let
@ -131,7 +94,7 @@ asyncchecksuite "Test Node - Basic":
var data: seq[byte]
for i in 0 ..< localManifest.blocksCount:
let blk = (await localStore.getBlock(localManifest.treeCid, i)).tryGet()
data &= blk.data
data &= blk.data[]
data.setLen(localManifest.datasetSize.int) # truncate data to original size
check:
@ -150,7 +113,7 @@ asyncchecksuite "Test Node - Basic":
var storedData: seq[byte]
for i in 0 ..< manifest.blocksCount:
let blk = (await localStore.getBlock(manifest.treeCid, i)).tryGet()
storedData &= blk.data
storedData &= blk.data[]
storedData.setLen(manifest.datasetSize.int) # truncate data to original size
check:

View File

@ -9,8 +9,6 @@ import pkg/storage/merkletree
import pkg/storage/manifest
import pkg/storage/blocktype as bt
import pkg/storage/chunker
import pkg/storage/rng
import pkg/taskpools
import ../helpers

View File

@ -61,7 +61,7 @@ suite "Cache Store":
not (await store.hasBlock(newBlock1.cid)).tryGet()
(await store.hasBlock(newBlock2.cid)).tryGet()
(await store.hasBlock(newBlock2.cid)).tryGet()
store.currentSize.int == newBlock2.data.len + newBlock3.data.len # 200
store.currentSize.int == newBlock2.data[].len + newBlock3.data[].len # 200
commonBlockStoreTests(
"Cache",

View File

@ -14,12 +14,8 @@ import pkg/questionable/results
import pkg/libp2p
import pkg/storage/blocktype as bt
import pkg/storage/stores/repostore
import pkg/storage/clock
import ../../asynctest
import ../helpers/mocktimer
import ../helpers/mockrepostore
import ../helpers/mockclock
import ../examples
import storage/namespaces

View File

@ -3,6 +3,6 @@ import ./blockexchange/testnetwork
import ./blockexchange/testpeerctxstore
import ./blockexchange/testdiscovery
import ./blockexchange/testprotobuf
import ./blockexchange/testpendingblocks
import ./blockexchange/testdownloadmanager
{.warning[UnusedImport]: off.}

View File

@ -5,40 +5,21 @@ import pkg/storage/blocktype
import ./examples
suite "blocktype":
test "should hash equal non-leaf block addresses onto the same hash":
suite "Blocktype":
test "should hash equal block addresses onto the same hash":
let
cid1 = Cid.example
nonLeaf1 = BlockAddress.init(cid1)
nonLeaf2 = BlockAddress.init(cid1)
addr1 = BlockAddress.init(cid1, 0)
addr2 = BlockAddress.init(cid1, 0)
check nonLeaf1 == nonLeaf2
check nonLeaf1.hash == nonLeaf2.hash
check addr1 == addr2
check addr1.hash == addr2.hash
test "should hash equal leaf block addresses onto the same hash":
test "should hash different block addresses onto different hashes":
let
cid1 = Cid.example
leaf1 = BlockAddress.init(cid1, 0)
leaf2 = BlockAddress.init(cid1, 0)
addr1 = BlockAddress.init(cid1, 0)
addr2 = BlockAddress.init(cid1, 1)
check leaf1 == leaf2
check leaf1.hash == leaf2.hash
test "should hash different non-leaf block addresses onto different hashes":
let
cid1 = Cid.example
cid2 = Cid.example
nonLeaf1 = BlockAddress.init(cid1)
nonLeaf2 = BlockAddress.init(cid2)
check nonLeaf1 != nonLeaf2
check nonLeaf1.hash != nonLeaf2.hash
test "should hash different leaf block addresses onto different hashes":
let
cid1 = Cid.example
leaf1 = BlockAddress.init(cid1, 0)
leaf2 = BlockAddress.init(cid1, 1)
check leaf1 != leaf2
check leaf1.hash != leaf2.hash
check addr1 != addr2
check addr1.hash != addr2.hash

View File

@ -4,8 +4,6 @@ import pkg/storage/chunker
import pkg/storage/blocktype as bt
import pkg/storage/manifest
import pkg/storage/merkletree
import ../asynctest
import ./helpers
import ./examples

View File

@ -1,10 +1,9 @@
import std/[unittest, options, net]
import std/[unittest, net]
import pkg/chronos
import pkg/libp2p/[multiaddress, multihash, multicodec]
import pkg/results
import ../../storage/nat
import ../../storage/utils/natutils
import ../../storage/utils
suite "NAT Address Tests":

View File

@ -44,7 +44,7 @@ method onMoveToNextStateEvent(state: State2): ?State =
method onMoveToNextStateEvent(state: State3): ?State =
some State(State1.new())
asyncchecksuite "async state machines":
asyncchecksuite "Async state machines":
var machine: Machine
proc moveToNextStateEvent(state: State): ?State =

View File

@ -8,7 +8,7 @@ import ../helpers
when defined(windows):
import stew/windows/acl
suite "keyutils":
suite "Keyutils":
let path = getTempDir() / "StorageTest"
setup:

View File

@ -3,7 +3,7 @@ import pkg/storage/utils/options
import ../helpers
suite "optional casts":
suite "Optional casts":
test "casting value to same type works":
check 42 as int == some 42

View File

@ -7,7 +7,7 @@ import ../helpers
type Module = object
trackedFutures: TrackedFutures
asyncchecksuite "tracked futures":
asyncchecksuite "Tracked futures":
var module: Module
setup:

View File

@ -2,7 +2,7 @@ import pkg/unittest2
import pkg/storage/utils
suite "findIt":
suite "FindIt":
setup:
type AnObject = object
attribute1*: int
@ -20,7 +20,7 @@ suite "findIt":
test "should return -1 when no object matches predicate":
assert objList.findIt(it.attribute1 == 15) == -1
suite "parseDuration":
suite "ParseDuration":
test "should parse durations":
var res: Duration # caller must still know if 'b' refers to bytes|bits
check parseDuration("10Hr", res) == 3

@ -1 +1 @@
Subproject commit 99884b5971759a0da437db3d2e834b92a058527d
Subproject commit 754765ba31a58f60f58b4136fde05481812f28ac