Merge branch 'master' into fix/merkle-tree-api

This commit is contained in:
Giuliano Mega 2026-04-30 17:30:39 -03:00 committed by GitHub
commit eaae00109e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
143 changed files with 8751 additions and 3491 deletions

View File

@ -9,7 +9,7 @@ on:
env:
cache_nonce: 0 # Allows for easily busting actions/cache caches
nim_version: v2.2.4
nim_version: v2.2.8
concurrency:
group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
@ -76,7 +76,7 @@ jobs:
shell: bash
- name: Upload coverage data to Codecov
uses: codecov/codecov-action@v4
uses: codecov/codecov-action@v5
with:
directory: ./coverage/
fail_ci_if_error: true

14
.gitmodules vendored
View File

@ -33,11 +33,6 @@
url = https://github.com/status-im/nim-stew.git
ignore = untracked
branch = master
[submodule "vendor/nim-nitro"]
path = vendor/nim-nitro
url = https://github.com/status-im/nim-nitro.git
ignore = untracked
branch = main
[submodule "vendor/questionable"]
path = vendor/questionable
url = https://github.com/status-im/questionable.git
@ -131,15 +126,9 @@
[submodule "vendor/nim-contract-abi"]
path = vendor/nim-contract-abi
url = https://github.com/status-im/nim-contract-abi
[submodule "vendor/nim-json-rpc"]
path = vendor/nim-json-rpc
url = https://github.com/status-im/nim-json-rpc
[submodule "vendor/nim-zlib"]
path = vendor/nim-zlib
url = https://github.com/status-im/nim-zlib
[submodule "vendor/nim-ethers"]
path = vendor/nim-ethers
url = https://github.com/status-im/nim-ethers
[submodule "vendor/lrucache.nim"]
path = vendor/lrucache.nim
url = https://github.com/status-im/lrucache.nim
@ -167,9 +156,6 @@
[submodule "vendor/nim-sqlite3-abi"]
path = vendor/nim-sqlite3-abi
url = https://github.com/arnetheduck/nim-sqlite3-abi.git
[submodule "vendor/nim-eth"]
path = vendor/nim-eth
url = https://github.com/status-im/nim-eth
[submodule "vendor/nim-protobuf-serialization"]
path = vendor/nim-protobuf-serialization
url = https://github.com/status-im/nim-protobuf-serialization

View File

@ -15,7 +15,7 @@
#
# If NIM_COMMIT is set to "nimbusbuild", this will use the
# version pinned by nimbus-build-system.
PINNED_NIM_VERSION := v2.2.4
PINNED_NIM_VERSION := v2.2.8
ifeq ($(NIM_COMMIT),)
NIM_COMMIT := $(PINNED_NIM_VERSION)
@ -145,7 +145,7 @@ test: | build deps
# Builds and runs the integration tests
testIntegration: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testIntegration $(TEST_PARAMS) $(NIM_PARAMS) --define:ws_resubscribe=240 build.nims
$(ENV_SCRIPT) nim testIntegration $(TEST_PARAMS) $(NIM_PARAMS) build.nims
# Builds a C example that uses the libstorage C library and runs it
testLibstorage: | build deps
@ -216,7 +216,7 @@ build-nph:
ifeq ("$(wildcard $(NPH))","")
cd vendor/nph && \
nimble setup -l && \
nimble build && \
nimble build -d:disable_libbacktrace && \
mv ./nph ../../$(shell dirname $(NPH)) && \
echo "nph utility is available at " $(NPH)
endif

View File

@ -91,6 +91,6 @@ Feel free to dive in, contributions are welcomed! Open an issue or submit PRs.
`logos-storage-nim` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling.
If you are setting up fresh setup, in order to get `nph` run `make build-nph`.
In order to format files run `make nph/<file/folder you want to format>`.
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them.
If you are using VSCode and the [NimLang](https://marketplace.visualstudio.com/items?itemName=NimLang.nimlang) extension you can enable "Format On Save" (eq. the `nim.formatOnSave` property) that will format the files using `nph`.
In order to format files run `make nph/<file/folder you want to format>`.
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them.
If you are using VSCode and the [NimLang](https://marketplace.visualstudio.com/items?itemName=NimLang.nimlang) extension you can enable "Format On Save" (eq. the `nim.formatOnSave` property) that will format the files using `nph`.

View File

@ -114,7 +114,7 @@ when (NimMajor, NimMinor, NimPatch) >= (1, 6, 11):
"BareExcept:off"
when (NimMajor, NimMinor) >= (2, 0):
--mm:
refc
orc
switch("define", "withoutPCRE")
@ -148,7 +148,7 @@ switch("define", "chronicles_sinks=textlines[dynamic],json[dynamic],textlines[dy
# Workaround for assembler incompatibility between constantine and secp256k1
switch("define", "use_asm_syntax_intel=false")
switch("define", "ctt_asm=false")
switch("define", "ctt_asm=true")
# Allow the use of old-style case objects for nim config compatibility
switch("define", "nimOldCaseObjects")

View File

@ -1,6 +1,3 @@
## Can be shared safely between threads
type SharedSeq*[T] = tuple[data: ptr UncheckedArray[T], len: int]
proc alloc*(str: cstring): cstring =
# Byte allocation from the given address.
# There should be the corresponding manual deallocation with deallocShared !
@ -22,21 +19,3 @@ proc alloc*(str: string): cstring =
ret[i] = s[i]
ret[str.len] = '\0'
return ret
proc allocSharedSeq*[T](s: seq[T]): SharedSeq[T] =
let data = allocShared(sizeof(T) * s.len)
if s.len != 0:
copyMem(data, unsafeAddr s[0], s.len)
return (cast[ptr UncheckedArray[T]](data), s.len)
proc deallocSharedSeq*[T](s: var SharedSeq[T]) =
deallocShared(s.data)
s.len = 0
proc toSeq*[T](s: SharedSeq[T]): seq[T] =
## Creates a seq[T] from a SharedSeq[T]. No explicit dealloc is required
## as req[T] is a GC managed type.
var ret = newSeq[T]()
for i in 0 ..< s.len:
ret.add(s.data[i])
return ret

View File

@ -6,6 +6,7 @@
################################################################################
### Exported types
import results
import stew/ptrops
type StorageCallback* = proc(
callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer
@ -19,7 +20,7 @@ const RET_PROGRESS*: cint = 3
## Returns RET_OK as acknowledgment and call the callback
## with RET_OK code and the provided message.
proc success*(callback: StorageCallback, msg: string, userData: pointer): cint =
callback(RET_OK, cast[ptr cchar](msg), cast[csize_t](len(msg)), userData)
callback(RET_OK, baseAddr msg, cast[csize_t](len(msg)), userData)
return RET_OK
@ -27,7 +28,7 @@ proc success*(callback: StorageCallback, msg: string, userData: pointer): cint =
## with RET_ERR code and the provided message.
proc error*(callback: StorageCallback, msg: string, userData: pointer): cint =
let msg = "libstorage error: " & msg
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
callback(RET_ERR, baseAddr msg, cast[csize_t](len(msg)), userData)
return RET_ERR

View File

@ -52,14 +52,15 @@ proc getDebug(
let node = storage[].node
let table = RestRoutingTable.init(node.discovery.protocol.routingTable)
let json = %*{
"id": $node.switch.peerInfo.peerId,
"addrs": node.switch.peerInfo.addrs.mapIt($it),
"spr":
if node.discovery.dhtRecord.isSome: node.discovery.dhtRecord.get.toURI else: "",
"announceAddresses": node.discovery.announceAddrs,
"table": table,
}
let json =
%*{
"id": $node.switch.peerInfo.peerId,
"addrs": node.switch.peerInfo.addrs.mapIt($it),
"spr":
if node.discovery.dhtRecord.isSome: node.discovery.dhtRecord.get.toURI else: "",
"announceAddresses": node.discovery.announceAddrs,
"table": table,
}
return ok($json)

View File

@ -107,7 +107,7 @@ proc createStorage(
except ConfigurationError as e:
return err("Failed to create Storage: unable to load configuration: " & e.msg)
conf.setupLogging()
let logFile = conf.setupLogging()
try:
{.gcsafe.}:
@ -115,7 +115,8 @@ proc createStorage(
except ValueError as err:
return err("Failed to create Storage: invalid value for log level: " & err.msg)
conf.setupMetrics()
if err =? conf.setupMetrics().errorOption:
return err("Failed to start metrics server: " & err.msg)
if not (checkAndCreateDataDir((conf.dataDir).string)):
# We are unable to access/create data folder or data folder's
@ -149,7 +150,7 @@ proc createStorage(
let server =
try:
StorageServer.new(conf, pk)
StorageServer.new(conf, pk, logFile)
except Exception as exc:
return err("Failed to create Storage: " & exc.msg)
@ -186,6 +187,6 @@ proc process*(
try:
await storage[].close()
except Exception as e:
error "Failed to STOP_NODE.", error = e.msg
error "Failed to CLOSE_NODE.", error = e.msg
return err(e.msg)
return ok("")

View File

@ -109,7 +109,9 @@ proc fetch(
if manifest.isErr:
return err("Failed to fetch the data: " & manifest.error.msg)
node.fetchDatasetAsyncTask(manifest.get())
node.fetchDatasetAsyncTask(
ManifestDescriptor(manifest: manifest.get(), manifestCid: cid.get())
)
return ok(serde.toJson(manifest.get()))
except CancelledError:

View File

@ -53,7 +53,8 @@ when isMainModule:
sources.addConfigFile(Toml, configFile)
,
)
config.setupLogging()
let logFile = config.setupLogging()
try:
updateLogLevel(config.logLevel)
@ -64,7 +65,9 @@ when isMainModule:
echo "Invalid value for --log-level. " & err.msg
quit QuitFailure
config.setupMetrics()
if err =? config.setupMetrics().errorOption:
fatal "Failed to start metrics server", err = err.msg
quit QuitFailure
if not (checkAndCreateDataDir((config.dataDir).string)):
# We are unable to access/create data folder or data folder's
@ -92,9 +95,10 @@ when isMainModule:
config.dataDir / config.netPrivKeyFile
privateKey = setupKey(keyPath).expect("Should setup private key!")
server =
try:
StorageServer.new(config, privateKey)
StorageServer.new(config, privateKey, logFile)
except Exception as exc:
error "Failed to start Logos Storage", msg = exc.msg
quit QuitFailure

View File

@ -1,5 +1,5 @@
import ./blockexchange/[network, engine, peers]
import ./blockexchange/protobuf/[blockexc, presence]
import ./blockexchange/protocol/[message, presence]
export network, engine, blockexc, presence, peers
export network, engine, message, presence, peers

View File

@ -1,5 +1,12 @@
import ./engine/discovery
import ./engine/advertiser
import ./engine/engine
import ./engine/scheduler
import ./engine/swarm
import ./engine/downloadcontext
import ./engine/activedownload
import ./engine/downloadmanager
export discovery, advertiser, engine
export
discovery, advertiser, engine, scheduler, swarm, downloadcontext, activedownload,
downloadmanager

View File

@ -0,0 +1,388 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/[tables, sets, monotimes, options]
import pkg/chronos
import pkg/libp2p
import pkg/metrics
import pkg/questionable
import ../../blocktype
import ../../logutils
import ../../stores/blockstore
import ./scheduler
import ./swarm
import ./downloadcontext
export scheduler, swarm, downloadcontext
logScope:
topics = "storage activedownload"
declareGauge(
storage_block_exchange_retrieval_time_us,
"storage blockexchange block retrieval time us",
)
type
RetriesExhaustedError* = object of StorageError
DownloadTerminatedError* = object of StorageError
BlockHandle* = Future[?!Block].Raising([CancelledError])
BlockHandleOpaque* = Future[?!void].Raising([CancelledError])
BlockReq* = object
handle*: BlockHandle
opaqueHandle*: BlockHandleOpaque
blockRetries*: int
startTime*: int64
PendingBatch* = object
start*: uint64
count*: uint64
localCount*: uint64 # blocks already local when batch was scheduled
peerId*: PeerId
sentAt*: Moment
timeoutFuture*: Future[void] # timeout handler to cancel on completion
requestFuture*: Future[void] # request future to cancel on timeout
ActiveDownload* = ref object
id*: uint64 # for request/response correlation - echoed in protocol
ctx*: DownloadContext
blocks*: Table[BlockAddress, BlockReq] # per-download block requests
pendingBatches*: Table[uint64, PendingBatch] # batch start -> pending info
exhaustedBlocks*: HashSet[BlockAddress]
# blocks that exhausted retries - failed permanently
maxBlockRetries*: int
retryInterval*: Duration
cancelled*: bool
isBackground*: bool
fetchLocal*: bool
completionFuture*: Future[?!void].Raising([CancelledError])
proc waitForComplete*(
download: ActiveDownload
): Future[?!void] {.async: (raises: [CancelledError]).} =
return await download.completionFuture
proc signalCompletionIfDone(download: ActiveDownload, error: ref StorageError = nil) =
if download.completionFuture.finished:
return
if error != nil:
download.completionFuture.complete(void.failure(error))
let termErr = (ref DownloadTerminatedError)(msg: "Download terminated")
for _, blockReq in download.blocks:
if not blockReq.handle.finished:
blockReq.handle.complete(Block.failure(termErr))
blockReq.opaqueHandle.complete(Result[void, ref CatchableError].err(termErr))
elif download.ctx.isComplete:
download.completionFuture.complete(success())
proc treeCid*(download: ActiveDownload): Cid =
download.ctx.md.manifest.treeCid
proc manifestCid*(download: ActiveDownload): Cid =
download.ctx.md.manifestCid
proc makeBlockAddress*(download: ActiveDownload, index: uint64): BlockAddress =
BlockAddress(treeCid: download.treeCid, index: index.int)
proc getOrCreateBlockReq(download: ActiveDownload, address: BlockAddress): BlockReq =
download.blocks.withValue(address, blkReq):
return blkReq[]
do:
let blkReq = BlockReq(
handle: BlockHandle.init("ActiveDownload.getWantHandle"),
opaqueHandle: BlockHandleOpaque.init("ActiveDownload.getWantHandleOpaque"),
blockRetries: download.maxBlockRetries,
startTime: getMonoTime().ticks,
)
download.blocks[address] = blkReq
let handle = blkReq.handle
proc cleanUpBlock(data: pointer) {.raises: [].} =
download.blocks.del(address)
handle.addCallback(cleanUpBlock)
handle.cancelCallback = proc(data: pointer) {.raises: [].} =
if not handle.finished:
handle.removeCallback(cleanUpBlock)
cleanUpBlock(nil)
return blkReq
proc getWantHandle*(
download: ActiveDownload, address: BlockAddress
): Future[?!Block] {.async: (raw: true, raises: [CancelledError]).} =
let blkReq = download.getOrCreateBlockReq(address)
if download.completionFuture.finished and not blkReq.handle.finished:
let err = (ref DownloadTerminatedError)(msg: "Download terminated")
blkReq.handle.complete(Block.failure(err))
blkReq.opaqueHandle.complete(Result[void, ref CatchableError].err(err))
blkReq.handle
proc getWantHandleOpaque*(
download: ActiveDownload, address: BlockAddress
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
download.getOrCreateBlockReq(address).opaqueHandle
proc completeWantHandle*(
download: ActiveDownload, address: BlockAddress, blk: Option[Block] = none(Block)
): bool {.raises: [].} =
download.blocks.withValue(address, blockReq):
proc recordRetrievalTime(startTime: int64) =
let
stopTime = getMonoTime().ticks
retrievalDurationUs = (stopTime - startTime) div 1000
storage_block_exchange_retrieval_time_us.set(retrievalDurationUs)
if blk.isSome:
if not blockReq[].handle.finished:
blockReq[].handle.complete(success(blk.get))
blockReq[].opaqueHandle.complete(success())
recordRetrievalTime(blockReq[].startTime)
return true
else:
trace "Want handle already completed", address
return false
else:
if not blockReq[].opaqueHandle.finished:
blockReq[].opaqueHandle.complete(success())
recordRetrievalTime(blockReq[].startTime)
return true
else:
return false
do:
trace "No pending want handle found", address
return false
proc failWantHandle(
download: ActiveDownload, address: BlockAddress, error: ref StorageError
) {.raises: [].} =
download.blocks.withValue(address, blockReq):
if not blockReq[].handle.finished:
blockReq[].handle.complete(Block.failure(error))
blockReq[].opaqueHandle.complete(Result[void, ref CatchableError].err(error))
func retries*(download: ActiveDownload, address: BlockAddress): int =
download.blocks.withValue(address, pending):
result = pending[].blockRetries
do:
result = 0
func decRetries*(download: ActiveDownload, address: BlockAddress) =
download.blocks.withValue(address, pending):
pending[].blockRetries -= 1
func retriesExhausted*(download: ActiveDownload, address: BlockAddress): bool =
download.blocks.withValue(address, pending):
result = pending[].blockRetries <= 0
proc decrementBlockRetries*(
download: ActiveDownload, addresses: seq[BlockAddress]
): seq[BlockAddress] =
result = @[]
for address in addresses:
download.blocks.withValue(address, req):
req[].blockRetries -= 1
if req[].blockRetries <= 0:
result.add(address)
proc failExhaustedBlocks*(download: ActiveDownload, addresses: seq[BlockAddress]) =
if addresses.len == 0:
return
for address in addresses:
download.exhaustedBlocks.incl(address)
download.ctx.received += 1
let error = (ref RetriesExhaustedError)(
msg: "Block retries exhausted after " & $download.maxBlockRetries & " attempts"
)
for address in addresses:
download.failWantHandle(address, error)
download.blocks.del(address)
download.signalCompletionIfDone(error)
proc failLocalMissing*(download: ActiveDownload, address: BlockAddress) =
let error = (ref BlockNotFoundError)(msg: "Block not found locally: " & $address)
download.failWantHandle(address, error)
download.signalCompletionIfDone(error)
proc isBlockExhausted*(download: ActiveDownload, address: BlockAddress): bool =
address in download.exhaustedBlocks
proc getBlockAddressesForRange*(
download: ActiveDownload, start: uint64, count: uint64
): seq[BlockAddress] =
result = @[]
for i in start ..< start + count:
let address = download.makeBlockAddress(i)
if address in download.blocks:
result.add(address)
func contains*(download: ActiveDownload, address: BlockAddress): bool =
address in download.blocks
proc markBlockReturned*(download: ActiveDownload) =
download.ctx.markBlockReturned()
proc markBatchInFlight*(
download: ActiveDownload,
start: uint64,
count: uint64,
localCount: uint64,
peerId: PeerId,
timeoutFuture: Future[void] = nil,
) =
download.pendingBatches[start] = PendingBatch(
start: start,
count: count,
localCount: localCount,
peerId: peerId,
sentAt: Moment.now(),
timeoutFuture: timeoutFuture,
)
proc setBatchTimeoutFuture*(
download: ActiveDownload, start: uint64, timeoutFuture: Future[void]
) =
download.pendingBatches.withValue(start, pending):
pending[].timeoutFuture = timeoutFuture
proc setBatchRequestFuture*(
download: ActiveDownload, start: uint64, requestFuture: Future[void]
) =
download.pendingBatches.withValue(start, pending):
pending[].requestFuture = requestFuture
proc completeBatchLocal*(download: ActiveDownload, start: uint64, count: uint64) =
download.ctx.scheduler.markComplete(start)
download.ctx.markBatchReceived(start, count, 0)
download.signalCompletionIfDone()
proc completeBatch*(
download: ActiveDownload,
start: uint64,
blocksDeliveryCount: uint64,
totalBytes: uint64,
) =
var localCount: uint64 = 0
download.pendingBatches.withValue(start, pending):
localCount = pending[].localCount
if not pending[].timeoutFuture.isNil and not pending[].timeoutFuture.finished:
pending[].timeoutFuture.cancelSoon()
download.pendingBatches.del(start)
download.ctx.scheduler.markComplete(start)
download.ctx.markBatchReceived(start, localCount + blocksDeliveryCount, totalBytes)
download.signalCompletionIfDone()
proc requeueBatch*(
download: ActiveDownload, start: uint64, count: uint64, front: bool = false
) =
download.pendingBatches.withValue(start, pending):
if not pending[].timeoutFuture.isNil and not pending[].timeoutFuture.finished:
pending[].timeoutFuture.cancelSoon()
download.pendingBatches.del(start)
if front:
download.ctx.scheduler.requeueFront(start, count)
else:
download.ctx.scheduler.requeueBack(start, count)
proc partialCompleteBatch*(
download: ActiveDownload,
originalStart: uint64,
originalCount: uint64,
receivedBlocksCount: uint64,
missingRanges: seq[tuple[start: uint64, count: uint64]],
totalBytes: uint64,
) =
var localCount: uint64 = 0
download.pendingBatches.withValue(originalStart, pending):
localCount = pending[].localCount
if not pending[].timeoutFuture.isNil and not pending[].timeoutFuture.finished:
pending[].timeoutFuture.cancelSoon()
download.pendingBatches.del(originalStart)
var missingBatches: seq[BlockBatch] = @[]
for r in missingRanges:
missingBatches.add((start: r.start, count: r.count))
download.ctx.scheduler.partialComplete(originalStart, missingBatches)
download.ctx.markBatchReceived(
originalStart, localCount + receivedBlocksCount, totalBytes
)
download.signalCompletionIfDone()
proc isDownloadComplete*(download: ActiveDownload): bool =
download.ctx.isComplete()
proc hasWorkRemaining*(download: ActiveDownload): bool =
not download.ctx.scheduler.isEmpty()
proc pendingBatchCount*(download: ActiveDownload): int =
download.pendingBatches.len
proc handlePeerFailure*(download: ActiveDownload, peerId: PeerId) =
var toRequeue: seq[tuple[start: uint64, count: uint64]] = @[]
for start, batch in download.pendingBatches:
if batch.peerId == peerId:
toRequeue.add((start, batch.count))
for (start, count) in toRequeue:
download.requeueBatch(start, count, front = true)
trace "Requeued batches from failed peer", peer = peerId, batches = toRequeue.len
proc getSwarm(download: ActiveDownload): Swarm =
download.ctx.swarm
proc updatePeerAvailability*(
download: ActiveDownload, peerId: PeerId, availability: BlockAvailability
) =
if download.ctx.swarm.getPeer(peerId).isNone:
discard download.ctx.swarm.addPeer(peerId, availability)
else:
download.ctx.swarm.updatePeerAvailability(peerId, availability)
proc addPeerIfAbsent*(
download: ActiveDownload, peerId: PeerId, availability: BlockAvailability
): bool =
let existingPeer = download.ctx.swarm.getPeer(peerId)
if existingPeer.isSome:
# peer already tracked, skip if bakComplete
return existingPeer.get().availability.kind != bakComplete
discard download.ctx.swarm.addPeer(peerId, availability)
return true # new peer added, send WantHave
proc handleBatchRetry*(
download: ActiveDownload, start: uint64, count: uint64, waitTime: Duration
) {.async: (raises: [CancelledError]).} =
let
addresses = download.getBlockAddressesForRange(start, count)
exhausted = download.decrementBlockRetries(addresses)
if exhausted.len > 0:
warn "Block retries exhausted",
treeCid = download.treeCid, exhaustedCount = exhausted.len
download.failExhaustedBlocks(exhausted)
download.requeueBatch(start, count, front = false)
await sleepAsync(waitTime)

View File

@ -11,14 +11,10 @@
import pkg/chronos
import pkg/libp2p/cid
import pkg/libp2p/multicodec
import pkg/metrics
import pkg/questionable
import pkg/questionable/results
import ../protobuf/presence
import ../peers
import ../../utils
import ../../utils/exceptions
import ../../utils/trackedfutures
@ -63,17 +59,8 @@ proc advertiseBlock(b: Advertiser, cid: Cid) {.async: (raises: [CancelledError])
try:
if isM:
without blk =? await b.localStore.getBlock(cid), err:
error "Error retrieving manifest block", cid, err = err.msg
return
without manifest =? Manifest.decode(blk), err:
error "Unable to decode as manifest", err = err.msg
return
# announce manifest cid and tree cid
# announce manifest cid
await b.addCidToQueue(cid)
await b.addCidToQueue(manifest.treeCid)
except CancelledError as exc:
trace "Cancelled advertise block", cid
raise exc

View File

@ -7,19 +7,12 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/sequtils
import std/algorithm
import pkg/chronos
import pkg/libp2p/cid
import pkg/libp2p/multicodec
import pkg/metrics
import pkg/questionable
import pkg/questionable/results
import ./pendingblocks
import ../protobuf/presence
import ../network
import ../peers
@ -28,7 +21,6 @@ import ../../utils/trackedfutures
import ../../discovery
import ../../stores/blockstore
import ../../logutils
import ../../manifest
logScope:
topics = "storage discoveryengine"
@ -38,60 +30,24 @@ declareGauge(storage_inflight_discovery, "inflight discovery requests")
const
DefaultConcurrentDiscRequests = 10
DefaultDiscoveryTimeout = 1.minutes
DefaultMinPeersPerBlock = 3
DefaultMaxPeersPerBlock = 8
DefaultDiscoveryLoopSleep = 3.seconds
RoutingTableHealthInterval = 30.seconds
type DiscoveryEngine* = ref object of RootObj
localStore*: BlockStore # Local block store for this instance
peers*: PeerCtxStore # Peer context store
peers*: PeerContextStore # Peer context store
network*: BlockExcNetwork # Network interface
discovery*: Discovery # Discovery interface
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
discEngineRunning*: bool # Indicates if discovery is running
concurrentDiscReqs: int # Concurrent discovery requests
discoveryLoop*: Future[void].Raising([]) # Discovery loop task handle
discoveryQueue*: AsyncQueue[Cid] # Discovery queue
trackedFutures*: TrackedFutures # Tracked Discovery tasks futures
minPeersPerBlock*: int # Min number of peers with block
maxPeersPerBlock*: int # Max number of peers with block
discoveryLoopSleep: Duration # Discovery loop sleep
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]]
# Inflight discovery requests
proc cleanupExcessPeers(b: DiscoveryEngine, cid: Cid) {.gcsafe, raises: [].} =
var haves = b.peers.peersHave(cid)
let count = haves.len - b.maxPeersPerBlock
if count <= 0:
return
haves.sort(
proc(a, b: BlockExcPeerCtx): int =
cmp(a.lastExchange, b.lastExchange)
)
let toRemove = haves[0 ..< count]
for peer in toRemove:
try:
peer.cleanPresence(BlockAddress.init(cid))
trace "Removed block presence from peer", cid, peer = peer.id
except CatchableError as exc:
error "Failed to clean presence for peer",
cid, peer = peer.id, error = exc.msg, name = exc.name
proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} =
try:
while b.discEngineRunning:
for cid in toSeq(b.pendingBlocks.wantListBlockCids):
await b.discoveryQueue.put(cid)
await sleepAsync(b.discoveryLoopSleep)
except CancelledError:
trace "Discovery loop cancelled"
proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
## Run discovery tasks
##
## Peer availability is tracked per-download in DownloadContext.swarm.
## This loop just runs discovery for CIDs that are queued.
try:
while b.discEngineRunning:
@ -103,36 +59,54 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
trace "Running discovery task for cid", cid
let haves = b.peers.peersHave(cid)
let request = b.discovery.find(cid)
b.inFlightDiscReqs[cid] = request
storage_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
if haves.len > b.maxPeersPerBlock:
trace "Cleaning up excess peers",
cid, peers = haves.len, max = b.maxPeersPerBlock
b.cleanupExcessPeers(cid)
continue
if haves.len < b.minPeersPerBlock:
let request = b.discovery.find(cid)
b.inFlightDiscReqs[cid] = request
defer:
b.inFlightDiscReqs.del(cid)
storage_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
defer:
b.inFlightDiscReqs.del(cid)
storage_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
if (await request.withTimeout(DefaultDiscoveryTimeout)) and
peers =? (await request).catch:
let dialed = await allFinished(peers.mapIt(b.network.dialPeer(it.data)))
if (await request.withTimeout(DefaultDiscoveryTimeout)) and
peers =? (await request).catch:
let dialed = await allFinished(peers.mapIt(b.network.dialPeer(it.data)))
for i, f in dialed:
if f.failed:
await b.discovery.removeProvider(peers[i].data.peerId)
for i, f in dialed:
if f.failed:
await b.discovery.removeProvider(peers[i].data.peerId)
except CancelledError:
trace "Discovery task cancelled"
return
info "Exiting discovery task runner"
proc routingTableHealthLoop(b: DiscoveryEngine) {.async: (raises: []).} =
## Re-seed the DHT routing table from the configured bootstrap records when
## it goes empty.
try:
while b.discEngineRunning:
await sleepAsync(RoutingTableHealthInterval)
if b.discovery.protocol.nodesDiscovered() != 0:
continue
warn "Routing table empty, re-seeding from bootstrap records",
bootstrap = b.discovery.protocol.bootstrapRecords.len
b.discovery.protocol.seedTable()
try:
await b.discovery.protocol.populateTable()
debug "Routing table re-populated",
total = b.discovery.protocol.nodesDiscovered()
except CancelledError:
return
except CatchableError as exc:
warn "Failed to re-populate routing table", exc = exc.msg
except CancelledError:
trace "Routing table health loop cancelled"
return
proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) =
for cid in cids:
if cid notin b.discoveryQueue:
@ -156,8 +130,10 @@ proc start*(b: DiscoveryEngine) {.async: (raises: []).} =
let fut = b.discoveryTaskLoop()
b.trackedFutures.track(fut)
b.discoveryLoop = b.discoveryQueueLoop()
b.trackedFutures.track(b.discoveryLoop)
if not b.discovery.protocol.isNil and b.discovery.protocol.bootstrapRecords.len > 0:
b.trackedFutures.track(b.routingTableHealthLoop())
else:
trace "No bootstrap records configured, routing table health watchdog disabled"
trace "Discovery engine started"
@ -180,28 +156,20 @@ proc stop*(b: DiscoveryEngine) {.async: (raises: []).} =
proc new*(
T: type DiscoveryEngine,
localStore: BlockStore,
peers: PeerCtxStore,
peers: PeerContextStore,
network: BlockExcNetwork,
discovery: Discovery,
pendingBlocks: PendingBlocksManager,
concurrentDiscReqs = DefaultConcurrentDiscRequests,
discoveryLoopSleep = DefaultDiscoveryLoopSleep,
minPeersPerBlock = DefaultMinPeersPerBlock,
maxPeersPerBlock = DefaultMaxPeersPerBlock,
): DiscoveryEngine =
## Create a discovery engine instance for advertising services
## Create a discovery engine instance
##
DiscoveryEngine(
localStore: localStore,
peers: peers,
network: network,
discovery: discovery,
pendingBlocks: pendingBlocks,
concurrentDiscReqs: concurrentDiscReqs,
discoveryQueue: newAsyncQueue[Cid](concurrentDiscReqs),
trackedFutures: TrackedFutures.new(),
inFlightDiscReqs: initTable[Cid, Future[seq[SignedPeerRecord]]](),
discoveryLoopSleep: discoveryLoopSleep,
minPeersPerBlock: minPeersPerBlock,
maxPeersPerBlock: maxPeersPerBlock,
)

View File

@ -0,0 +1,290 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/[options, random, sets]
import pkg/chronos
import pkg/libp2p/cid
import pkg/libp2p/peerid
import ./scheduler
import ./swarm
import ../peers/peercontext
import ../../manifest
import ../../storagetypes
import ../../blocktype
import ../protocol/constants
import ../utils
export scheduler, peercontext, manifest
const
PresenceWindowBytes*: uint64 = 1024 * 1024 * 1024
PresenceWindowBlocks*: uint64 = PresenceWindowBytes div DefaultBlockSize.uint64
MaxPresenceWindowBlocks*: uint64 = PresenceWindowBytes div MinBlockSize
PresenceWindowThreshold*: float = 0.75
PresenceBroadcastIntervalMin*: Duration = 5.seconds
PresenceBroadcastIntervalMax*: Duration = 10.seconds
PresenceBroadcastBlockThreshold*: uint64 = PresenceWindowBlocks div 2
static:
const
worstCaseRanges = MaxPresenceWindowBlocks div 2
worstCasePresenceBytes = worstCaseRanges * 16 + 1024 # +1KB safe overhead
doAssert worstCasePresenceBytes < MaxMessageSize,
"Presence window too large for MaxMessageSize with minimum block size. " &
"Worst case: " & $worstCasePresenceBytes & " bytes, limit: " & $MaxMessageSize &
" bytes"
type
DownloadProgress* = object
blocksCompleted*: uint64
totalBlocks*: uint64
bytesTransferred*: uint64
DownloadDesc* = object
md*: ManifestDescriptor
startIndex*: uint64
count*: uint64
selectionPolicy*: SelectionPolicy
isBackground*: bool
fetchLocal*: bool
BroadcastAvailabilityTracker = object
case policy: SelectionPolicy
of spSequential:
lastBroadcastedWatermark: uint64
broadcastedOutOfOrder: HashSet[uint64]
pendingOOOSnapshot: HashSet[uint64]
lastBroadcastTime: Moment
broadcastInterval: Duration
of spRandomWindow:
pendingRanges: seq[tuple[start: uint64, count: uint64]]
DownloadContext* = ref object
md*: ManifestDescriptor
totalBlocks*: uint64
received*: uint64
blocksReturned*: uint64
bytesReceived*: uint64
scheduler*: Scheduler
swarm*: Swarm
availabilityTracker: BroadcastAvailabilityTracker
proc computeWindowSize*(blockSize: uint32): uint64 =
result = PresenceWindowBytes div blockSize.uint64
if result == 0:
result = 1
proc randomBroadcastInterval(): Duration =
rand(
PresenceBroadcastIntervalMin.milliseconds.int ..
PresenceBroadcastIntervalMax.milliseconds.int
).milliseconds
proc hasNewOutOfOrder(t: BroadcastAvailabilityTracker, scheduler: Scheduler): bool =
for batchStart in scheduler.completedOutOfOrderItems:
if batchStart notin t.broadcastedOutOfOrder:
return true
false
proc shouldBroadcast(t: BroadcastAvailabilityTracker, scheduler: Scheduler): bool =
case t.policy
of spRandomWindow:
t.pendingRanges.len > 0
of spSequential:
let
newBlocks = scheduler.completedWatermark() - t.lastBroadcastedWatermark
hasNewOOO = t.hasNewOutOfOrder(scheduler)
if newBlocks == 0 and not hasNewOOO:
return false
let timeSinceLast = Moment.now() - t.lastBroadcastTime
newBlocks >= PresenceBroadcastBlockThreshold or timeSinceLast >= t.broadcastInterval or
hasNewOOO
proc getRanges(
t: var BroadcastAvailabilityTracker, scheduler: Scheduler
): seq[tuple[start: uint64, count: uint64]] =
case t.policy
of spRandomWindow:
t.pendingRanges
of spSequential:
let watermark = scheduler.completedWatermark()
var ranges: seq[tuple[start: uint64, count: uint64]] = @[]
if watermark > t.lastBroadcastedWatermark:
ranges.add(
(
start: t.lastBroadcastedWatermark,
count: watermark - t.lastBroadcastedWatermark,
)
)
t.pendingOOOSnapshot.clear()
for batchStart in scheduler.completedOutOfOrderItems:
if batchStart notin t.broadcastedOutOfOrder:
ranges.add((start: batchStart, count: scheduler.batchSizeCount))
t.pendingOOOSnapshot.incl(batchStart)
ranges
proc markBroadcasted(t: var BroadcastAvailabilityTracker, scheduler: Scheduler) =
case t.policy
of spRandomWindow:
t.pendingRanges.setLen(0)
of spSequential:
let watermark = scheduler.completedWatermark()
for batchStart in t.pendingOOOSnapshot:
t.broadcastedOutOfOrder.incl(batchStart)
var toRemove: seq[uint64] = @[]
for batchStart in t.broadcastedOutOfOrder:
if batchStart < watermark:
toRemove.add(batchStart)
for batchStart in toRemove:
t.broadcastedOutOfOrder.excl(batchStart)
t.lastBroadcastedWatermark = watermark
t.lastBroadcastTime = Moment.now()
t.broadcastInterval = randomBroadcastInterval()
proc addPendingRange(
t: var BroadcastAvailabilityTracker, range: tuple[start: uint64, count: uint64]
) =
case t.policy
of spRandomWindow:
t.pendingRanges.add(range)
of spSequential:
discard
proc currentPresenceWindow*(ctx: DownloadContext): tuple[start: uint64, count: uint64] =
ctx.scheduler.currentPresenceWindow()
proc needsNextPresenceWindow*(ctx: DownloadContext): bool =
ctx.scheduler.needsNextPresenceWindow()
proc advancePresenceWindow*(ctx: DownloadContext): tuple[start: uint64, count: uint64] =
ctx.availabilityTracker.addPendingRange(ctx.scheduler.currentPresenceWindow())
discard ctx.scheduler.advancePresenceWindow()
ctx.scheduler.currentPresenceWindow()
proc blockSize*(ctx: DownloadContext): uint32 =
ctx.md.manifest.blockSize.uint32
proc new*(
T: type DownloadContext, desc: DownloadDesc, missingBlocks: seq[uint64] = @[]
): DownloadContext =
doAssert desc.md != nil, "ManifestDescriptor must be provided"
let blockSize = desc.md.manifest.blockSize.uint32
doAssert blockSize > 0, "blockSize must be known at download creation"
let
totalBlocks = desc.startIndex + desc.count
batchSize = computeBatchSize(blockSize)
windowSize = computeWindowSize(blockSize)
result = DownloadContext(
md: desc.md,
totalBlocks: totalBlocks,
scheduler: Scheduler.new(),
swarm: Swarm.new(),
)
case desc.selectionPolicy
of spSequential:
result.availabilityTracker = BroadcastAvailabilityTracker(
policy: spSequential,
lastBroadcastedWatermark: 0,
broadcastedOutOfOrder: initHashSet[uint64](),
pendingOOOSnapshot: initHashSet[uint64](),
lastBroadcastTime: Moment.now(),
broadcastInterval: randomBroadcastInterval(),
)
if missingBlocks.len > 0:
result.scheduler.initFromIndices(
missingBlocks, batchSize.uint64, windowSize, PresenceWindowThreshold
)
elif desc.count > batchSize.uint64:
if desc.startIndex == 0:
result.scheduler.init(
desc.count, batchSize.uint64, windowSize, PresenceWindowThreshold
)
else:
result.scheduler.initRange(
desc.startIndex, desc.count, batchSize.uint64, windowSize,
PresenceWindowThreshold,
)
else:
var indices: seq[uint64] = @[]
for i in desc.startIndex ..< desc.startIndex + desc.count:
indices.add(i)
result.scheduler.initFromIndices(
indices, batchSize.uint64, windowSize, PresenceWindowThreshold
)
of spRandomWindow:
result.availabilityTracker = BroadcastAvailabilityTracker(policy: spRandomWindow)
result.scheduler.initRandomWindows(totalBlocks, batchSize.uint64, windowSize)
proc isComplete*(ctx: DownloadContext): bool =
ctx.blocksReturned >= ctx.totalBlocks or ctx.received >= ctx.totalBlocks
proc markBlockReturned*(ctx: DownloadContext) =
# mark that a block was returned to the consumer by the iterator
ctx.blocksReturned += 1
proc markBatchReceived*(
ctx: DownloadContext, start: uint64, count: uint64, totalBytes: uint64
) =
ctx.received += count
ctx.bytesReceived += totalBytes
proc trimPresenceBeforeWatermark*(ctx: DownloadContext) =
let watermark = ctx.scheduler.completedWatermark()
for peerId in ctx.swarm.connectedPeers():
let peerOpt = ctx.swarm.getPeer(peerId)
if peerOpt.isSome:
let peer = peerOpt.get()
# only trim range-based availability
if peer.availability.kind == bakRanges:
var newRanges: seq[tuple[start: uint64, count: uint64]] = @[]
for (start, count) in peer.availability.ranges:
let rangeEnd = start + count
if rangeEnd > watermark:
# keep ranges not entirely below watermark
newRanges.add((start, count))
peer.availability = BlockAvailability.fromRanges(newRanges)
proc shouldBroadcastAvailability*(ctx: DownloadContext): bool =
ctx.availabilityTracker.shouldBroadcast(ctx.scheduler)
proc getAvailabilityBroadcast*(
ctx: DownloadContext
): seq[tuple[start: uint64, count: uint64]] =
ctx.availabilityTracker.getRanges(ctx.scheduler)
proc markAvailabilityBroadcasted*(ctx: DownloadContext) =
ctx.availabilityTracker.markBroadcasted(ctx.scheduler)
proc batchBytes*(ctx: DownloadContext): uint64 =
ctx.scheduler.batchSizeCount.uint64 * ctx.blockSize.uint64
proc batchTimeout*(
ctx: DownloadContext, peer: PeerContext, batchCount: uint64
): Duration =
peer.batchTimeout(batchCount * ctx.blockSize.uint64)
proc progress*(ctx: DownloadContext): DownloadProgress =
DownloadProgress(
blocksCompleted: ctx.received,
totalBlocks: ctx.totalBlocks,
bytesTransferred: ctx.bytesReceived,
)
proc remainingBlocks(ctx: DownloadContext): uint64 =
if ctx.totalBlocks > ctx.received:
ctx.totalBlocks - ctx.received
else:
0

View File

@ -0,0 +1,179 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/[tables, sets, options]
import pkg/chronos
import pkg/libp2p
import pkg/questionable
import ../utils
import ../../blocktype
import ../../logutils
import ./activedownload
import ./peertracker
export activedownload
logScope:
topics = "storage downloadmanager"
const
DefaultBlockRetries* = 300
DefaultRetryInterval* = 2.seconds
type DownloadManager* = ref object of RootObj
nextDownloadId*: uint64 = 1 # 0 is invalid
maxBlockRetries*: int
retryInterval*: Duration
downloads*: Table[Cid, Table[uint64, ActiveDownload]]
peerTracker*: PeerInFlightTracker # peer-wide in-flight tracking
proc getDownload*(self: DownloadManager, treeCid: Cid): Option[ActiveDownload] =
self.downloads.withValue(treeCid, innerTable):
for _, download in innerTable[]:
return some(download)
return none(ActiveDownload)
proc getBackgroundDownload*(
self: DownloadManager, treeCid: Cid
): Option[ActiveDownload] =
self.downloads.withValue(treeCid, innerTable):
for _, download in innerTable[]:
if download.isBackground:
return some(download)
return none(ActiveDownload)
proc getDownload*(
self: DownloadManager, downloadId: uint64, treeCid: Cid
): Option[ActiveDownload] =
self.downloads.withValue(treeCid, innerTable):
innerTable[].withValue(downloadId, download):
return some(download[])
return none(ActiveDownload)
proc cancelDownload*(self: DownloadManager, download: ActiveDownload) =
download.cancelled = true
for _, batch in download.pendingBatches:
if not batch.timeoutFuture.isNil and not batch.timeoutFuture.finished:
batch.timeoutFuture.cancelSoon()
if not batch.requestFuture.isNil and not batch.requestFuture.finished:
batch.requestFuture.cancelSoon()
for address, req in download.blocks:
if not req.handle.finished:
req.handle.fail(newException(CancelledError, "Download cancelled"))
if not req.opaqueHandle.finished:
req.opaqueHandle.fail(newException(CancelledError, "Download cancelled"))
download.blocks.clear()
if not download.completionFuture.finished:
download.completionFuture.fail(newException(CancelledError, "Download cancelled"))
self.downloads.withValue(download.treeCid, innerTable):
innerTable[].del(download.id)
if innerTable[].len == 0:
self.downloads.del(download.treeCid)
proc cancelDownload*(self: DownloadManager, treeCid: Cid) =
self.downloads.withValue(treeCid, innerTable):
var toCancel: seq[ActiveDownload] = @[]
for _, download in innerTable[]:
toCancel.add(download)
for download in toCancel:
self.cancelDownload(download)
proc releaseDownload*(self: DownloadManager, downloadId: uint64, treeCid: Cid) =
let download = self.getDownload(downloadId, treeCid)
if download.isSome:
self.cancelDownload(download.get())
proc cancelBackgroundDownload*(
self: DownloadManager, downloadId: uint64, treeCid: Cid
): bool =
let download = self.getDownload(downloadId, treeCid)
if download.isSome and download.get().isBackground:
self.cancelDownload(download.get())
return true
return false
proc getNextBatch*(
self: DownloadManager, download: ActiveDownload
): Option[tuple[start: uint64, count: uint64]] =
let batch = download.ctx.scheduler.take()
if batch.isSome:
return some((start: batch.get().start, count: batch.get().count))
none(tuple[start: uint64, count: uint64])
proc startDownload*(
self: DownloadManager, desc: DownloadDesc, missingBlocks: seq[uint64] = @[]
): ActiveDownload =
let
ctx = DownloadContext.new(desc, missingBlocks)
downloadId = self.nextDownloadId
self.nextDownloadId += 1
let download = ActiveDownload(
id: downloadId,
ctx: ctx,
blocks: initTable[BlockAddress, BlockReq](),
pendingBatches: initTable[uint64, PendingBatch](),
exhaustedBlocks: initHashSet[BlockAddress](),
maxBlockRetries: self.maxBlockRetries,
retryInterval: self.retryInterval,
isBackground: desc.isBackground,
fetchLocal: desc.fetchLocal,
completionFuture:
Future[?!void].Raising([CancelledError]).init("ActiveDownload.completion"),
)
self.downloads.mgetOrPut(
desc.md.manifest.treeCid, initTable[uint64, ActiveDownload]()
)[downloadId] = download
trace "Started download",
treeCid = desc.md.manifest.treeCid,
startIndex = desc.startIndex,
count = desc.count,
batchSize = ctx.scheduler.batchSizeCount
return download
proc getDownloadProgress*(
self: DownloadManager, treeCid: Cid
): Option[DownloadProgress] =
let downloadOpt = self.getDownload(treeCid)
if downloadOpt.isNone:
return none(DownloadProgress)
some(downloadOpt.get().ctx.progress())
proc getDownloadProgress*(
self: DownloadManager, downloadId: uint64, treeCid: Cid
): Option[DownloadProgress] =
let downloadOpt = self.getDownload(downloadId, treeCid)
if downloadOpt.isNone:
return none(DownloadProgress)
some(downloadOpt.get().ctx.progress())
proc new*(
T: type DownloadManager,
retries = DefaultBlockRetries,
interval = DefaultRetryInterval,
): DownloadManager =
DownloadManager(
maxBlockRetries: retries,
retryInterval: interval,
downloads: initTable[Cid, Table[uint64, ActiveDownload]](),
peerTracker: PeerInFlightTracker.new(),
)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,52 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/[tables, sequtils]
import pkg/chronos
import pkg/libp2p/peerid
const SweepRuntimeQuota* = 5.milliseconds
type PeerInFlightTracker* = ref object
peerInFlight*: Table[PeerId, seq[Future[void]]]
# track in-flight requests per peer for BDP - used as self-cleaning counter
proc new*(T: type PeerInFlightTracker): PeerInFlightTracker =
PeerInFlightTracker(peerInFlight: initTable[PeerId, seq[Future[void]]]())
proc count*(self: PeerInFlightTracker, peerId: PeerId): int =
self.peerInFlight.withValue(peerId, lst):
var live: seq[Future[void]]
for fut in lst[]:
if not fut.finished:
live.add(fut)
if live.len < lst[].len:
if live.len == 0:
self.peerInFlight.del(peerId)
else:
self.peerInFlight[peerId] = live
return live.len
return 0
proc track*(self: PeerInFlightTracker, peerId: PeerId, fut: Future[void]) =
self.peerInFlight.mgetOrPut(peerId, @[]).add(fut)
proc clearPeer*(self: PeerInFlightTracker, peerId: PeerId) =
self.peerInFlight.del(peerId)
proc sweep*(self: PeerInFlightTracker) {.async: (raises: [CancelledError]).} =
var lastIdle = Moment.now()
for peerId in self.peerInFlight.keys.toSeq:
discard self.count(peerId)
if (Moment.now() - lastIdle) >= SweepRuntimeQuota:
await idleAsync()
lastIdle = Moment.now()

View File

@ -1,218 +0,0 @@
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/tables
import std/monotimes
import std/strutils
import pkg/chronos
import pkg/libp2p
import pkg/metrics
import ../protobuf/blockexc
import ../../blocktype
import ../../logutils
logScope:
topics = "storage pendingblocks"
declareGauge(
storage_block_exchange_pending_block_requests,
"storage blockexchange pending block requests",
)
declareGauge(
storage_block_exchange_retrieval_time_us,
"storage blockexchange block retrieval time us",
)
const
DefaultBlockRetries* = 3000
DefaultRetryInterval* = 2.seconds
type
RetriesExhaustedError* = object of CatchableError
BlockHandle* = Future[Block].Raising([CancelledError, RetriesExhaustedError])
BlockReq* = object
handle*: BlockHandle
requested*: ?PeerId
blockRetries*: int
startTime*: int64
PendingBlocksManager* = ref object of RootObj
blockRetries*: int = DefaultBlockRetries
retryInterval*: Duration = DefaultRetryInterval
blocks*: Table[BlockAddress, BlockReq] # pending Block requests
lastInclusion*: Moment # time at which we last included a block into our wantlist
proc updatePendingBlockGauge(p: PendingBlocksManager) =
storage_block_exchange_pending_block_requests.set(p.blocks.len.int64)
proc getWantHandle*(
self: PendingBlocksManager, address: BlockAddress, requested: ?PeerId = PeerId.none
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
## Add an event for a block
##
self.blocks.withValue(address, blk):
return blk[].handle
do:
let blk = BlockReq(
handle: newFuture[Block]("pendingBlocks.getWantHandle"),
requested: requested,
blockRetries: self.blockRetries,
startTime: getMonoTime().ticks,
)
self.blocks[address] = blk
self.lastInclusion = Moment.now()
let handle = blk.handle
proc cleanUpBlock(data: pointer) {.raises: [].} =
self.blocks.del(address)
self.updatePendingBlockGauge()
handle.addCallback(cleanUpBlock)
handle.cancelCallback = proc(data: pointer) {.raises: [].} =
if not handle.finished:
handle.removeCallback(cleanUpBlock)
cleanUpBlock(nil)
self.updatePendingBlockGauge()
return handle
proc getWantHandle*(
self: PendingBlocksManager, cid: Cid, requested: ?PeerId = PeerId.none
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
self.getWantHandle(BlockAddress.init(cid), requested)
proc completeWantHandle*(
self: PendingBlocksManager, address: BlockAddress, blk: Block
) {.raises: [].} =
## Complete a pending want handle
self.blocks.withValue(address, blockReq):
if not blockReq[].handle.finished:
trace "Completing want handle from provided block", address
blockReq[].handle.complete(blk)
else:
trace "Want handle already completed", address
do:
trace "No pending want handle found for address", address
proc resolve*(
self: PendingBlocksManager, blocksDelivery: seq[BlockDelivery]
) {.gcsafe, raises: [].} =
## Resolve pending blocks
##
for bd in blocksDelivery:
self.blocks.withValue(bd.address, blockReq):
if not blockReq[].handle.finished:
trace "Resolving pending block", address = bd.address
let
startTime = blockReq[].startTime
stopTime = getMonoTime().ticks
retrievalDurationUs = (stopTime - startTime) div 1000
blockReq.handle.complete(bd.blk)
storage_block_exchange_retrieval_time_us.set(retrievalDurationUs)
else:
trace "Block handle already finished", address = bd.address
func retries*(self: PendingBlocksManager, address: BlockAddress): int =
self.blocks.withValue(address, pending):
result = pending[].blockRetries
do:
result = 0
func decRetries*(self: PendingBlocksManager, address: BlockAddress) =
self.blocks.withValue(address, pending):
pending[].blockRetries -= 1
func retriesExhausted*(self: PendingBlocksManager, address: BlockAddress): bool =
self.blocks.withValue(address, pending):
result = pending[].blockRetries <= 0
func isRequested*(self: PendingBlocksManager, address: BlockAddress): bool =
## Check if a block has been requested to a peer
##
result = false
self.blocks.withValue(address, pending):
result = pending[].requested.isSome
func getRequestPeer*(self: PendingBlocksManager, address: BlockAddress): ?PeerId =
## Returns the peer that requested this block
##
result = PeerId.none
self.blocks.withValue(address, pending):
result = pending[].requested
proc markRequested*(
self: PendingBlocksManager, address: BlockAddress, peer: PeerId
): bool =
## Marks this block as having been requested to a peer
##
if self.isRequested(address):
return false
self.blocks.withValue(address, pending):
pending[].requested = peer.some
return true
proc clearRequest*(
self: PendingBlocksManager, address: BlockAddress, peer: ?PeerId = PeerId.none
) =
self.blocks.withValue(address, pending):
if peer.isSome:
assert peer == pending[].requested
pending[].requested = PeerId.none
func contains*(self: PendingBlocksManager, cid: Cid): bool =
BlockAddress.init(cid) in self.blocks
func contains*(self: PendingBlocksManager, address: BlockAddress): bool =
address in self.blocks
iterator wantList*(self: PendingBlocksManager): BlockAddress =
for a in self.blocks.keys:
yield a
iterator wantListBlockCids*(self: PendingBlocksManager): Cid =
for a in self.blocks.keys:
if not a.leaf:
yield a.cid
iterator wantListCids*(self: PendingBlocksManager): Cid =
var yieldedCids = initHashSet[Cid]()
for a in self.blocks.keys:
let cid = a.cidOrTreeCid
if cid notin yieldedCids:
yieldedCids.incl(cid)
yield cid
iterator wantHandles*(self: PendingBlocksManager): Future[Block] =
for v in self.blocks.values:
yield v.handle
proc wantListLen*(self: PendingBlocksManager): int =
self.blocks.len
func len*(self: PendingBlocksManager): int =
self.blocks.len
func new*(
T: type PendingBlocksManager,
retries = DefaultBlockRetries,
interval = DefaultRetryInterval,
): PendingBlocksManager =
PendingBlocksManager(blockRetries: retries, retryInterval: interval)

View File

@ -0,0 +1,479 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/[algorithm, deques, sets, tables, options, random, bitops, math]
type
BlockBatch* = tuple[start: uint64, count: uint64]
SelectionPolicy* = enum
spSequential
spRandomWindow
SequentialWindowCursor = object
advanceThreshold: float
RandomWindowCursor = object
totalWindows: uint64
halfBits: uint8
roundKeys: array[4, uint64]
nextIdx: uint64
WindowCursor = object
windowStart: uint64
windowSize: uint64
totalBlocks: uint64
case policy: SelectionPolicy
of spSequential:
sequential: SequentialWindowCursor
of spRandomWindow:
random: RandomWindowCursor
Scheduler* = ref object
totalBlocks: uint64
batchSize: uint64
nextBatchStart: uint64
requeued: Deque[BlockBatch]
completedWatermark: uint64
completedOutOfOrder: HashSet[uint64]
inFlight: Table[uint64, uint64] # batch start -> block count
batchRemaining: Table[uint64, uint64] # parent batch start -> remaining blocks
windowCursor: WindowCursor
proc canAdvance(
p: SequentialWindowCursor, windowStart, windowSize, totalBlocks: uint64
): bool =
windowStart + windowSize < totalBlocks
proc needsAdvance(
p: SequentialWindowCursor, windowStart, windowSize, totalBlocks, watermark: uint64
): bool =
if not p.canAdvance(windowStart, windowSize, totalBlocks):
return false
let thresholdPos = windowStart + (windowSize.float * p.advanceThreshold).uint64
watermark >= thresholdPos
proc advance(
p: SequentialWindowCursor, windowStart, windowSize, totalBlocks: uint64
): (bool, uint64) =
if not p.canAdvance(windowStart, windowSize, totalBlocks):
return (false, 0)
(true, min(windowStart + windowSize, totalBlocks))
proc permuteWindowIndex(p: RandomWindowCursor, x: uint64): uint64 =
func fmix64(x: uint64, seed: uint64): uint64 =
var h = x xor seed
h = h xor (h shr 33)
h = h * 0xff51afd7ed558ccd'u64
h = h xor (h shr 33)
h = h * 0xc4ceb9fe1a85ec53'u64
h = h xor (h shr 33)
h
let mask = (1'u64 shl p.halfBits) - 1
var
left = (x shr p.halfBits) and mask
right = x and mask
for i in 0 .. 3:
let
f = fmix64(right, p.roundKeys[i]) and mask
newLeft = right
newRight = left xor f
left = newLeft
right = newRight
(left shl p.halfBits) or right
proc pickNext(p: var RandomWindowCursor): uint64 =
if p.totalWindows <= 1:
p.nextIdx += 1
return 0
var x = p.nextIdx
while true:
let permuted = p.permuteWindowIndex(x)
x = permuted
if permuted < p.totalWindows:
p.nextIdx += 1
return permuted
proc isDone(p: RandomWindowCursor): bool =
p.nextIdx >= p.totalWindows
proc advance(p: var RandomWindowCursor, windowSize: uint64): (bool, uint64) =
if p.isDone:
return (false, 0)
let windowIdx = p.pickNext()
(true, windowIdx * windowSize)
proc currentWindow(p: WindowCursor): tuple[start: uint64, count: uint64] =
(start: p.windowStart, count: min(p.windowSize, p.totalBlocks - p.windowStart))
proc isDone(p: WindowCursor): bool =
case p.policy
of spSequential:
not p.sequential.canAdvance(p.windowStart, p.windowSize, p.totalBlocks)
of spRandomWindow:
p.random.isDone
proc canAdvance(p: WindowCursor): bool =
not p.isDone
proc needsAdvance(p: WindowCursor, watermark: uint64): bool =
case p.policy
of spSequential:
p.sequential.needsAdvance(p.windowStart, p.windowSize, p.totalBlocks, watermark)
of spRandomWindow:
false
proc advance(p: var WindowCursor): bool =
let (ok, newStart) =
case p.policy
of spSequential:
p.sequential.advance(p.windowStart, p.windowSize, p.totalBlocks)
of spRandomWindow:
p.random.advance(p.windowSize)
if ok:
p.windowStart = newStart
ok
proc initSequentialWindowCursor(
totalBlocks: uint64, windowSize: uint64, advanceThreshold: float
): WindowCursor =
WindowCursor(
policy: spSequential,
windowStart: 0,
windowSize: windowSize,
totalBlocks: totalBlocks,
sequential: SequentialWindowCursor(advanceThreshold: advanceThreshold),
)
proc initRandomWindowCursor(totalBlocks: uint64, windowSize: uint64): WindowCursor =
if totalBlocks == 0 or windowSize == 0:
return WindowCursor(policy: spRandomWindow)
var rng = initRand()
let
totalWindows = ceilDiv(totalBlocks, windowSize)
seed = cast[uint64](rng.next())
var
rngKeys = initRand(cast[int64](seed))
random = RandomWindowCursor(totalWindows: totalWindows)
if totalWindows <= 1:
random.halfBits = 1
else:
let bits = fastLog2(int(totalWindows - 1)) + 1
random.halfBits = max(1'u8, uint8((bits + 1) div 2))
for i in 0 .. 3:
random.roundKeys[i] = rngKeys.next().uint64
let windowIdx = random.pickNext()
result = WindowCursor(
policy: spRandomWindow,
windowSize: windowSize,
totalBlocks: totalBlocks,
random: random,
)
result.windowStart = windowIdx * result.windowSize
proc new*(T: type Scheduler): Scheduler =
Scheduler(
totalBlocks: 0,
batchSize: 0,
nextBatchStart: 0,
requeued: initDeque[BlockBatch](),
completedWatermark: 0,
completedOutOfOrder: initHashSet[uint64](),
inFlight: initTable[uint64, uint64](),
batchRemaining: initTable[uint64, uint64](),
windowCursor: WindowCursor(policy: spSequential),
)
proc resetState(self: Scheduler, batchSize: uint64) =
self.batchSize = batchSize
self.nextBatchStart = 0
self.completedWatermark = 0
self.requeued.clear()
self.completedOutOfOrder.clear()
self.inFlight.clear()
self.batchRemaining.clear()
proc add*(self: Scheduler, start: uint64, count: uint64) =
self.requeued.addLast((start: start, count: count))
let batchEnd = start + count
if batchEnd > self.totalBlocks:
self.totalBlocks = batchEnd
if self.batchSize == 0:
self.batchSize = count
proc init*(
self: Scheduler,
totalBlocks: uint64,
batchSize: uint64,
windowSize: uint64,
advanceThreshold: float,
) =
self.totalBlocks = totalBlocks
self.resetState(batchSize)
self.windowCursor =
initSequentialWindowCursor(totalBlocks, windowSize, advanceThreshold)
proc initRange*(
self: Scheduler,
startIndex: uint64,
count: uint64,
batchSize: uint64,
windowSize: uint64,
advanceThreshold: float,
) =
self.totalBlocks = startIndex + count
self.resetState(batchSize)
self.nextBatchStart = startIndex
self.completedWatermark = startIndex
self.windowCursor =
initSequentialWindowCursor(self.totalBlocks, windowSize, advanceThreshold)
proc initFromIndices*(
self: Scheduler,
indices: seq[uint64],
batchSize: uint64,
windowSize: uint64,
advanceThreshold: float,
) =
let sortedIndices = indices.sorted()
self.totalBlocks = 0
self.resetState(batchSize)
var
batchStart: uint64 = 0
batchCount: uint64 = 0
inBatch = false
for blockIdx in sortedIndices:
if not inBatch:
batchStart = blockIdx
batchCount = 1
inBatch = true
elif blockIdx == batchStart + batchCount:
batchCount += 1
else:
self.add(batchStart, batchCount)
batchStart = blockIdx
batchCount = 1
if batchCount >= batchSize:
self.add(batchStart, batchCount)
inBatch = false
batchCount = 0
if inBatch and batchCount > 0:
self.add(batchStart, batchCount)
self.windowCursor =
initSequentialWindowCursor(self.totalBlocks, windowSize, advanceThreshold)
proc initRandomWindows*(
self: Scheduler, totalBlocks: uint64, batchSize: uint64, windowSize: uint64
) =
self.totalBlocks = totalBlocks
self.resetState(batchSize)
self.windowCursor = initRandomWindowCursor(totalBlocks, windowSize)
self.nextBatchStart = self.windowCursor.currentWindow().start
proc currentPresenceWindow*(self: Scheduler): tuple[start: uint64, count: uint64] =
self.windowCursor.currentWindow()
proc generateNextBatchInternal(self: Scheduler): Option[BlockBatch] {.inline.} =
## does NOT add to inFlight - we must do that
let (windowStart, windowCount) = self.windowCursor.currentWindow()
while self.nextBatchStart < windowStart + windowCount:
let
start = self.nextBatchStart
count = min(self.batchSize, windowStart + windowCount - start)
self.nextBatchStart = start + count
if start < self.completedWatermark:
continue
if start in self.inFlight:
continue
if start in self.completedOutOfOrder:
continue
return some((start: start, count: count))
return none(BlockBatch)
proc take*(self: Scheduler): Option[BlockBatch] =
while self.requeued.len > 0:
let batch = self.requeued.popFirst()
if batch.start < self.completedWatermark:
continue
if batch.start in self.completedOutOfOrder:
continue
self.inFlight[batch.start] = batch.count
return some(batch)
let batchOpt = self.generateNextBatchInternal()
if batchOpt.isSome:
let batch = batchOpt.get()
self.inFlight[batch.start] = batch.count
return batchOpt
proc requeueBack*(self: Scheduler, start: uint64, count: uint64) {.inline.} =
## requeue batch at back (peer didn't have it, try later).
self.inFlight.del(start)
if start < self.completedWatermark:
return
if start in self.completedOutOfOrder:
return
self.requeued.addLast((start: start, count: count))
proc requeueFront*(self: Scheduler, start: uint64, count: uint64) {.inline.} =
## requeue batch at front (failed/timed out, retry soon).
self.inFlight.del(start)
if start < self.completedWatermark:
return
if start in self.completedOutOfOrder:
return
self.requeued.addFirst((start: start, count: count))
proc advanceWatermark(self: Scheduler, batchStart: uint64) =
if batchStart == self.completedWatermark:
self.completedWatermark = batchStart + self.batchSize
while self.completedWatermark in self.completedOutOfOrder:
self.completedOutOfOrder.excl(self.completedWatermark)
self.completedWatermark += self.batchSize
elif batchStart > self.completedWatermark:
self.completedOutOfOrder.incl(batchStart)
proc findPartialParent(self: Scheduler, start: uint64): Option[uint64] =
for parent, remaining in self.batchRemaining:
if start >= parent and start < parent + self.batchSize:
return some parent
return none(uint64)
proc onBatchCompleted(self: Scheduler, batchStart: uint64) =
case self.windowCursor.policy
of spSequential:
self.advanceWatermark(batchStart)
of spRandomWindow:
discard
proc markComplete*(self: Scheduler, start: uint64) =
let count = self.inFlight.getOrDefault(start, 0'u64)
self.inFlight.del(start)
let parent = self.findPartialParent(start)
if parent.isSome:
self.batchRemaining.withValue(parent.get, remaining):
remaining[] -= count
if remaining[] <= 0:
self.batchRemaining.del(parent.get)
self.onBatchCompleted(parent.get)
return
self.onBatchCompleted(start)
proc partialComplete*(
self: Scheduler, originalStart: uint64, missingRanges: seq[BlockBatch]
) =
let originalCount = self.inFlight.getOrDefault(originalStart, self.batchSize)
self.inFlight.del(originalStart)
var totalMissing: uint64 = 0
for batch in missingRanges:
totalMissing += batch.count
let parent = self.findPartialParent(originalStart)
if parent.isSome:
let delivered = originalCount - totalMissing
self.batchRemaining.withValue(parent.get, remaining):
remaining[] -= delivered
else:
self.batchRemaining[originalStart] = totalMissing
for i in countdown(missingRanges.len - 1, 0):
let batch = missingRanges[i]
self.requeued.addFirst(batch)
proc isEmpty*(self: Scheduler): bool =
case self.windowCursor.policy
of spSequential:
self.completedWatermark >= self.totalBlocks and self.requeued.len == 0 and
self.inFlight.len == 0
of spRandomWindow:
let (start, count) = self.windowCursor.currentWindow()
self.windowCursor.isDone and self.nextBatchStart >= start + count and
self.requeued.len == 0 and self.inFlight.len == 0
proc needsNextPresenceWindow*(self: Scheduler): bool =
case self.windowCursor.policy
of spSequential:
let (windowStart, windowCount) = self.windowCursor.currentWindow()
self.nextBatchStart >= windowStart + windowCount and
self.windowCursor.needsAdvance(self.completedWatermark)
of spRandomWindow:
let (start, count) = self.windowCursor.currentWindow()
not self.windowCursor.isDone and self.nextBatchStart >= start + count and
self.requeued.len == 0 and self.inFlight.len == 0
proc advancePresenceWindow*(self: Scheduler): bool =
if not self.windowCursor.advance():
return false
self.nextBatchStart = self.windowCursor.currentWindow().start
true
proc completedWatermark*(self: Scheduler): uint64 =
self.completedWatermark
proc hasWork*(self: Scheduler): bool {.inline.} =
if self.requeued.len > 0:
return true
let (start, count) = self.windowCursor.currentWindow()
if self.nextBatchStart < start + count:
return true
self.windowCursor.canAdvance()
proc requeuedCount*(self: Scheduler): int {.inline.} =
self.requeued.len
proc pending*(self: Scheduler): seq[BlockBatch] =
var res = newSeqUninit[BlockBatch](self.requeued.len)
for i, batch in self.requeued:
res[i] = batch
return res
proc clear*(self: Scheduler) =
self.totalBlocks = 0
self.resetState(0)
self.windowCursor = WindowCursor(policy: spSequential)
proc totalBlockCount*(self: Scheduler): uint64 =
self.totalBlocks
proc batchSizeCount*(self: Scheduler): uint64 =
self.batchSize
iterator completedOutOfOrderItems*(self: Scheduler): uint64 =
for batchStart in self.completedOutOfOrder:
yield batchStart
proc batchEnd*(batch: BlockBatch): uint64 =
batch.start + batch.count
proc contains*(batch: BlockBatch, blockIndex: uint64): bool =
blockIndex >= batch.start and blockIndex < batch.batchEnd
proc merge*(a, b: BlockBatch): Option[BlockBatch] =
if a.batchEnd < b.start or b.batchEnd < a.start:
return none(BlockBatch)
let
newStart = min(a.start, b.start)
newEnd = max(a.batchEnd, b.batchEnd)
some((start: newStart, count: newEnd - newStart))

View File

@ -0,0 +1,334 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/[tables, sets, options, random]
import pkg/chronos
import pkg/libp2p/peerid
import ../peers/peerctxstore
import ../peers/peerstats
import ../types
import ../../logutils
import ./peertracker
export peerctxstore, types
randomize()
logScope:
topics = "logos-storage swarm"
const
DefaultDeltaMin* = 2
DefaultDeltaMax* = 16
DefaultDeltaTarget* = 8
PeerStaleTimeout* = 30.seconds
PeerDefaultMaxFailures*: uint32 = 2
PeerDefaultMaxTimeouts*: uint32 = 5
ExplorationProbability* = 0.2
TimeoutPenaltyWeight* = 3.0
type
SwarmPeer* = ref object
availability*: BlockAvailability
lastSeen*: Moment
availabilityUpdated*: Moment
failureCount*: uint32
timeoutCount*: uint32
SwarmConfig* = object
deltaMin*: int
deltaMax*: int
deltaTarget*: int
maxPeerFailures*: uint32
maxPeerTimeouts*: uint32
SwarmHealth* = enum
shHealthy
shBelowTarget
shBelowMin
PeerSelectionKind* = enum
pskFound
pskAtCapacity
pskNoPeers
PeerSelection* = object
case kind*: PeerSelectionKind
of pskFound:
peer*: PeerContext
of pskAtCapacity, pskNoPeers:
discard
Swarm* = ref object
config*: SwarmConfig
peers: Table[PeerId, SwarmPeer]
removedPeers: HashSet[PeerId]
proc new*(T: type SwarmPeer, availability: BlockAvailability): SwarmPeer =
let now = Moment.now()
SwarmPeer(
availability: availability,
lastSeen: now,
availabilityUpdated: now,
failureCount: 0,
timeoutCount: 0,
)
proc isStale*(peer: SwarmPeer): bool =
Moment.now() - peer.lastSeen > PeerStaleTimeout
proc touch*(peer: SwarmPeer) =
peer.lastSeen = Moment.now()
proc updateAvailability*(peer: SwarmPeer, availability: BlockAvailability) =
peer.availability = peer.availability.merge(availability)
peer.availabilityUpdated = Moment.now()
peer.touch()
proc recordFailure*(peer: SwarmPeer) =
peer.failureCount += 1
proc recordTimeout*(peer: SwarmPeer) =
peer.timeoutCount += 1
proc resetFailures*(peer: SwarmPeer) =
peer.failureCount = 0
peer.timeoutCount = 0
proc defaultConfig*(_: type SwarmConfig): SwarmConfig =
SwarmConfig(
deltaMin: DefaultDeltaMin,
deltaMax: DefaultDeltaMax,
deltaTarget: DefaultDeltaTarget,
maxPeerFailures: PeerDefaultMaxFailures,
maxPeerTimeouts: PeerDefaultMaxTimeouts,
)
proc new*(T: type Swarm, config: SwarmConfig = SwarmConfig.defaultConfig()): Swarm =
Swarm(
config: config,
peers: initTable[PeerId, SwarmPeer](),
removedPeers: initHashSet[PeerId](),
)
proc addPeer*(swarm: Swarm, peerId: PeerId, availability: BlockAvailability): bool =
if peerId in swarm.removedPeers:
return false
if swarm.peers.len >= swarm.config.deltaMax:
return false
swarm.peers[peerId] = SwarmPeer.new(availability)
true
proc removePeer*(swarm: Swarm, peerId: PeerId): Option[SwarmPeer] =
swarm.peers.withValue(peerId, peer):
let res = some(peer[])
swarm.peers.del(peerId)
return res
return none(SwarmPeer)
proc banPeer*(swarm: Swarm, peerId: PeerId) =
swarm.peers.del(peerId)
swarm.removedPeers.incl(peerId)
proc getPeer*(swarm: Swarm, peerId: PeerId): Option[SwarmPeer] =
swarm.peers.withValue(peerId, peer):
return some(peer[])
return none(SwarmPeer)
proc updatePeerAvailability*(
swarm: Swarm, peerId: PeerId, availability: BlockAvailability
) =
swarm.peers.withValue(peerId, peer):
peer[].updateAvailability(availability)
proc recordPeerFailure*(swarm: Swarm, peerId: PeerId): bool =
## return true if peer should be removed
swarm.peers.withValue(peerId, peer):
peer[].recordFailure()
return peer[].failureCount >= swarm.config.maxPeerFailures
return false
proc recordPeerTimeout*(swarm: Swarm, peerId: PeerId): bool =
## return true if peer should be removed
swarm.peers.withValue(peerId, peer):
peer[].recordTimeout()
return peer[].timeoutCount >= swarm.config.maxPeerTimeouts
return false
proc recordBatchSuccess*(
swarm: Swarm, peer: PeerContext, rttMicros: uint64, totalBytes: uint64
) =
swarm.peers.withValue(peer.id, swarmPeer):
swarmPeer[].resetFailures()
swarmPeer[].touch()
peer.stats.recordRequest(rttMicros, totalBytes)
proc activePeerCount*(swarm: Swarm): int =
for peer in swarm.peers.values:
if not peer.isStale:
result += 1
proc peerCount*(swarm: Swarm): int =
swarm.peers.len
proc peersNeeded*(swarm: Swarm): SwarmHealth =
let active = swarm.activePeerCount()
if active < swarm.config.deltaMin:
shBelowMin
elif active < swarm.config.deltaTarget:
shBelowTarget
else:
shHealthy
proc connectedPeers*(swarm: Swarm): seq[PeerId] =
for peerId in swarm.peers.keys:
result.add(peerId)
proc peersWithRange*(swarm: Swarm, start: uint64, count: uint64): seq[PeerId] =
for peerId, peer in swarm.peers:
if not peer.isStale and peer.availability.hasRange(start, count):
result.add(peerId)
proc peersWithAnyInRange*(swarm: Swarm, start: uint64, count: uint64): seq[PeerId] =
for peerId, peer in swarm.peers:
if not peer.isStale and peer.availability.hasAnyInRange(start, count):
result.add(peerId)
proc staleUnknownPeers*(swarm: Swarm): seq[PeerId] =
for peerId, peer in swarm.peers:
if peer.isStale and peer.availability.kind == bakUnknown:
result.add(peerId)
proc selectByBDP*(
peers: seq[PeerContext],
batchBytes: uint64,
tracker: PeerInFlightTracker,
penalties: var Table[PeerId, float],
explorationProb: float = ExplorationProbability,
): Option[PeerContext] {.gcsafe, raises: [].} =
if peers.len == 0:
return none(PeerContext)
if peers.len == 1:
return some(peers[0])
var untriedPeers: seq[PeerContext]
for peer in peers:
if peer.stats.throughputBps().isNone:
let
pipelineDepth = peer.optimalPipelineDepth(batchBytes)
currentLoad = tracker.count(peer.id)
if currentLoad < pipelineDepth:
untriedPeers.add(peer)
if untriedPeers.len > 0:
var
bestPeer = untriedPeers[0]
bestLoad = tracker.count(bestPeer.id)
for i in 1 ..< untriedPeers.len:
let load = tracker.count(untriedPeers[i].id)
if load < bestLoad:
bestLoad = load
bestPeer = untriedPeers[i]
return some(bestPeer)
let exploreRoll = rand(1.0)
if exploreRoll < explorationProb:
var peersWithCapacity: seq[PeerContext]
for peer in peers:
let
pipelineDepth = peer.optimalPipelineDepth(batchBytes)
currentLoad = tracker.count(peer.id)
if currentLoad < pipelineDepth:
peersWithCapacity.add(peer)
if peersWithCapacity.len > 0:
let idx = rand(peersWithCapacity.len - 1)
return some(peersWithCapacity[idx])
var
bestPeers: seq[PeerContext] = @[peers[0]]
bestScore = peers[0].evalBDPScore(
batchBytes, tracker.count(peers[0].id), penalties.getOrDefault(peers[0].id, 0.0)
)
for i in 1 ..< peers.len:
let score = peers[i].evalBDPScore(
batchBytes, tracker.count(peers[i].id), penalties.getOrDefault(peers[i].id, 0.0)
)
if score < bestScore:
bestScore = score
bestPeers = @[peers[i]]
elif score == bestScore:
bestPeers.add(peers[i])
if bestPeers.len > 1:
let idx = rand(bestPeers.len - 1)
return some(bestPeers[idx])
else:
return some(bestPeers[0])
proc selectPeerForBatch*(
swarm: Swarm,
peers: PeerContextStore,
start: uint64,
count: uint64,
batchBytes: uint64,
tracker: PeerInFlightTracker,
): PeerSelection =
var penalties: Table[PeerId, float]
for peerId, swarmPeer in swarm.peers:
if swarmPeer.timeoutCount > 0:
penalties[peerId] = swarmPeer.timeoutCount.float * TimeoutPenaltyWeight
let candidates = swarm.peersWithRange(start, count)
if candidates.len == 0:
let partialCandidates = swarm.peersWithAnyInRange(start, count)
trace "No full range peers, checking partial",
start = start, count = count, partialPeers = partialCandidates.len
if partialCandidates.len == 0:
return PeerSelection(kind: pskNoPeers)
var peerCtxs: seq[PeerContext]
for peerId in partialCandidates:
let peer = peers.get(peerId)
if peer.isNil:
# peer disconnected, remove from swarm immediately
discard swarm.removePeer(peerId)
continue
if tracker.count(peerId) < peer.optimalPipelineDepth(batchBytes):
peerCtxs.add(peer)
if peerCtxs.len == 0:
return PeerSelection(kind: pskAtCapacity)
let selected = selectByBDP(peerCtxs, batchBytes, tracker, penalties)
if selected.isSome:
return PeerSelection(kind: pskFound, peer: selected.get())
return PeerSelection(kind: pskNoPeers)
var peerCtxs: seq[PeerContext]
for peerId in candidates:
let peer = peers.get(peerId)
if peer.isNil:
# peer disconnected - remove from swarm immediately
discard swarm.removePeer(peerId)
continue
if tracker.count(peerId) < peer.optimalPipelineDepth(batchBytes):
peerCtxs.add(peer)
if peerCtxs.len == 0:
return PeerSelection(kind: pskAtCapacity)
let selected = selectByBDP(peerCtxs, batchBytes, tracker, penalties)
if selected.isSome:
return PeerSelection(kind: pskFound, peer: selected.get())
return PeerSelection(kind: pskNoPeers)

View File

@ -1,4 +1,5 @@
import ./network/network
import ./network/networkpeer
import ./protocol/wantblocks
export network, networkpeer
export network, networkpeer, wantblocks

View File

@ -19,12 +19,14 @@ import pkg/questionable/results
import ../../blocktype as bt
import ../../logutils
import ../protobuf/blockexc as pb
import ../types
import ../protocol/message
import ../../utils/trackedfutures
import ./networkpeer
import ../protocol/wantblocks
export networkpeer
export networkpeer, wantblocks
logScope:
topics = "storage blockexcnetwork"
@ -35,19 +37,19 @@ const
type
WantListHandler* = proc(peer: PeerId, wantList: WantList) {.async: (raises: []).}
BlocksDeliveryHandler* =
proc(peer: PeerId, blocks: seq[BlockDelivery]) {.async: (raises: []).}
BlockPresenceHandler* =
proc(peer: PeerId, precense: seq[BlockPresence]) {.async: (raises: []).}
PeerEventHandler* = proc(peer: PeerId) {.async: (raises: [CancelledError]).}
WantBlocksRequestHandlerProc* = proc(
peer: PeerId, req: WantBlocksRequest
): Future[seq[BlockDelivery]] {.async: (raises: [CancelledError]).}
BlockExcHandlers* = object
onWantList*: WantListHandler
onBlocksDelivery*: BlocksDeliveryHandler
onPresence*: BlockPresenceHandler
onWantBlocksRequest*: WantBlocksRequestHandlerProc
onPeerJoined*: PeerEventHandler
onPeerDeparted*: PeerEventHandler
onPeerDropped*: PeerEventHandler
WantListSender* = proc(
id: PeerId,
@ -57,21 +59,15 @@ type
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
rangeCount: uint64 = 0,
downloadId: uint64 = 0,
) {.async: (raises: [CancelledError]).}
WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]) {.
async: (raises: [CancelledError])
.}
BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {.
async: (raises: [CancelledError])
.}
PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]) {.
async: (raises: [CancelledError])
.}
BlockExcRequest* = object
sendWantList*: WantListSender
sendWantCancellations*: WantCancellationSender
sendBlocksDelivery*: BlocksDeliverySender
sendPresence*: PresenceSender
BlockExcNetwork* = ref object of LPProtocol
@ -97,13 +93,14 @@ proc isSelf*(b: BlockExcNetwork, peer: PeerId): bool =
return b.peerId == peer
proc send*(
b: BlockExcNetwork, id: PeerId, msg: pb.Message
b: BlockExcNetwork, id: PeerId, msg: Message
) {.async: (raises: [CancelledError]).} =
## Send message to peer
##
if not (id in b.peers):
trace "Unable to send, peer not found", peerId = id
trace "Unable to send protobuf, peer not in network.peers",
peerId = id, hasWantList = msg.wantList.entries.len > 0
return
try:
@ -136,6 +133,8 @@ proc sendWantList*(
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
rangeCount: uint64 = 0,
downloadId: uint64 = 0,
) {.async: (raw: true, raises: [CancelledError]).} =
## Send a want message to peer
##
@ -148,6 +147,8 @@ proc sendWantList*(
cancel: cancel,
wantType: wantType,
sendDontHave: sendDontHave,
rangeCount: rangeCount,
downloadId: downloadId,
)
),
full: full,
@ -155,30 +156,6 @@ proc sendWantList*(
b.send(id, Message(wantlist: msg))
proc sendWantCancellations*(
b: BlockExcNetwork, id: PeerId, addresses: seq[BlockAddress]
): Future[void] {.async: (raises: [CancelledError]).} =
## Informs a remote peer that we're no longer interested in a set of blocks
##
await b.sendWantList(id = id, addresses = addresses, cancel = true)
proc handleBlocksDelivery(
b: BlockExcNetwork, peer: NetworkPeer, blocksDelivery: seq[BlockDelivery]
) {.async: (raises: []).} =
## Handle incoming blocks
##
if not b.handlers.onBlocksDelivery.isNil:
await b.handlers.onBlocksDelivery(peer.id, blocksDelivery)
proc sendBlocksDelivery*(
b: BlockExcNetwork, id: PeerId, blocksDelivery: seq[BlockDelivery]
) {.async: (raw: true, raises: [CancelledError]).} =
## Send blocks to remote
##
b.send(id, pb.Message(payload: blocksDelivery))
proc handleBlockPresence(
b: BlockExcNetwork, peer: NetworkPeer, presence: seq[BlockPresence]
) {.async: (raises: []).} =
@ -204,9 +181,6 @@ proc rpcHandler(
if msg.wantList.entries.len > 0:
self.trackedFutures.track(self.handleWantList(peer, msg.wantList))
if msg.payload.len > 0:
self.trackedFutures.track(self.handleBlocksDelivery(peer, msg.payload))
if msg.blockPresences.len > 0:
self.trackedFutures.track(self.handleBlockPresence(peer, msg.blockPresences))
@ -234,14 +208,25 @@ proc getOrCreatePeer(self: BlockExcNetwork, peer: PeerId): NetworkPeer =
let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async: (raises: []).} =
await self.rpcHandler(p, msg)
let wantBlocksHandler = proc(
peerId: PeerId, req: WantBlocksRequest
): Future[seq[BlockDelivery]] {.async: (raises: [CancelledError]).} =
return await self.handlers.onWantBlocksRequest(peerId, req)
# create new pubsub peer
let blockExcPeer = NetworkPeer.new(peer, getConn, rpcHandler)
let blockExcPeer = NetworkPeer.new(peer, getConn, rpcHandler, wantBlocksHandler)
debug "Created new blockexc peer", peer
self.peers[peer] = blockExcPeer
return blockExcPeer
proc sendWantBlocksRequest*(
self: BlockExcNetwork, peer: PeerId, blockRange: BlockRange
): Future[WantBlocksResult[WantBlocksResponse]] {.async: (raises: [CancelledError]).} =
let networkPeer = self.getOrCreatePeer(peer)
return await networkPeer.sendWantBlocksRequest(blockRange)
proc dialPeer*(self: BlockExcNetwork, peer: PeerRecord) {.async.} =
## Dial a peer
##
@ -267,9 +252,6 @@ proc dropPeer*(
except CatchableError as error:
warn "Error attempting to disconnect from peer", peer = peer, error = error.msg
if not self.handlers.onPeerDropped.isNil:
await self.handlers.onPeerDropped(peer)
proc handlePeerJoined*(
self: BlockExcNetwork, peer: PeerId
) {.async: (raises: [CancelledError]).} =
@ -344,30 +326,19 @@ proc new*(
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
rangeCount: uint64 = 0,
downloadId: uint64 = 0,
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendWantList(id, cids, priority, cancel, wantType, full, sendDontHave)
proc sendWantCancellations(
id: PeerId, addresses: seq[BlockAddress]
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendWantCancellations(id, addresses)
proc sendBlocksDelivery(
id: PeerId, blocksDelivery: seq[BlockDelivery]
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendBlocksDelivery(id, blocksDelivery)
self.sendWantList(
id, cids, priority, cancel, wantType, full, sendDontHave, rangeCount, downloadId
)
proc sendPresence(
id: PeerId, presence: seq[BlockPresence]
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendBlockPresence(id, presence)
self.request = BlockExcRequest(
sendWantList: sendWantList,
sendWantCancellations: sendWantCancellations,
sendBlocksDelivery: sendBlocksDelivery,
sendPresence: sendPresence,
)
self.request = BlockExcRequest(sendWantList: sendWantList, sendPresence: sendPresence)
self.init()
return self

View File

@ -11,12 +11,19 @@
import pkg/chronos
import pkg/libp2p
import pkg/stew/endians2
import std/tables
import ../protobuf/blockexc
import ../protobuf/message
import ../protocol/message
import ../protocol/constants
import ../../errors
import ../../logutils
import ../../utils/trackedfutures
import ../../blocktype
import ../types
import ../protocol/wantblocks
export wantblocks
logScope:
topics = "storage blockexcnetworkpeer"
@ -28,13 +35,22 @@ type
RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.async: (raises: []).}
WantBlocksRequestHandler* = proc(
peer: PeerId, req: WantBlocksRequest
): Future[seq[BlockDelivery]] {.async: (raises: [CancelledError]).}
WantBlocksResponseFuture* = Future[WantBlocksResult[WantBlocksResponse]]
NetworkPeer* = ref object of RootObj
id*: PeerId
handler*: RPCHandler
wantBlocksHandler*: WantBlocksRequestHandler
sendConn: Connection
getConn: ConnProvider
yieldInterval*: Duration = DefaultYieldInterval
trackedFutures: TrackedFutures
pendingWantBlocksRequests*: Table[uint64, WantBlocksResponseFuture]
nextRequestId*: uint64
proc connected*(self: NetworkPeer): bool =
not (isNil(self.sendConn)) and not (self.sendConn.closed or self.sendConn.atEof)
@ -47,24 +63,82 @@ proc readLoop*(self: NetworkPeer, conn: Connection) {.async: (raises: []).} =
trace "Attaching read loop", peer = self.id, connId = conn.oid
try:
var nextYield = Moment.now() + self.yieldInterval
while not conn.atEof or not conn.closed:
while not conn.atEof and not conn.closed:
if Moment.now() > nextYield:
nextYield = Moment.now() + self.yieldInterval
trace "Yielding in read loop",
peer = self.id, nextYield = nextYield, interval = self.yieldInterval
await sleepAsync(10.millis)
var lenBuf: array[4, byte]
await conn.readExactly(addr lenBuf[0], 4)
let frameLen = uint32.fromBytes(lenBuf, littleEndian).int
if frameLen < 1:
warn "Frame too short", peer = self.id, frameLen = frameLen
return
var typeByte: array[1, byte]
await conn.readExactly(addr typeByte[0], 1)
if typeByte[0] > ord(high(MessageType)):
warn "Invalid message type byte", peer = self.id, typeByte = typeByte[0]
return
let
data = await conn.readLp(MaxMessageSize.int)
msg = Message.protobufDecode(data).mapFailure().tryGet()
trace "Received message", peer = self.id, connId = conn.oid
await self.handler(self, msg)
msgType = MessageType(typeByte[0])
dataLen = frameLen - 1
case msgType
of mtProtobuf:
if dataLen > MaxMessageSize.int:
warn "Protobuf message too large", peer = self.id, size = dataLen
return
var data = newSeq[byte](dataLen)
if dataLen > 0:
await conn.readExactly(addr data[0], dataLen)
let msg = Message.protobufDecode(data).mapFailure().tryGet()
await self.handler(self, msg)
of mtWantBlocksRequest:
let reqResult = await readWantBlocksRequest(conn, dataLen)
if reqResult.isErr:
warn "Failed to read WantBlocks request",
peer = self.id, error = reqResult.error.msg
return
let
req = reqResult.get
blocks = await self.wantBlocksHandler(self.id, req)
await writeWantBlocksResponse(conn, req.requestId, req.treeCid, blocks)
of mtWantBlocksResponse:
let respResult = await readWantBlocksResponse(conn, dataLen)
if respResult.isErr:
warn "Failed to read WantBlocks response",
peer = self.id, error = respResult.error.msg
return
let response = respResult.get
self.pendingWantBlocksRequests.withValue(response.requestId, fut):
if not fut[].finished:
fut[].complete(WantBlocksResult[WantBlocksResponse].ok(response))
self.pendingWantBlocksRequests.del(response.requestId)
do:
warn "Received WantBlocks response for unknown request ID",
peer = self.id, requestId = response.requestId
except CancelledError:
trace "Read loop cancelled"
except CatchableError as err:
warn "Exception in blockexc read loop", msg = err.msg
finally:
warn "Detaching read loop", peer = self.id, connId = conn.oid
for requestId, fut in self.pendingWantBlocksRequests:
if not fut.finished:
fut.complete(
WantBlocksResult[WantBlocksResponse].err(
wantBlocksError(ConnectionClosed, "Read loop exited")
)
)
self.pendingWantBlocksRequests.clear()
if self.sendConn == conn:
self.sendConn = nil
await conn.close()
@ -89,19 +163,61 @@ proc send*(
warn "Unable to get send connection for peer message not sent", peer = self.id
return
trace "Sending message", peer = self.id, connId = conn.oid
try:
await conn.writeLp(protobufEncode(msg))
let msgData = protobufEncode(msg)
let
frameLen = 1 + msgData.len
totalSize = 4 + frameLen
var buf = newSeq[byte](totalSize)
let lenBytes = uint32(frameLen).toBytes(littleEndian)
copyMem(addr buf[0], unsafeAddr lenBytes[0], 4)
buf[4] = mtProtobuf.byte
if msgData.len > 0:
copyMem(addr buf[5], unsafeAddr msgData[0], msgData.len)
await conn.write(buf)
except CatchableError as err:
if self.sendConn == conn:
self.sendConn = nil
raise newException(LPStreamError, "Failed to send message: " & err.msg)
proc sendWantBlocksRequest*(
self: NetworkPeer, blockRange: BlockRange
): Future[WantBlocksResult[WantBlocksResponse]] {.async: (raises: [CancelledError]).} =
let requestId = self.nextRequestId
self.nextRequestId += 1
let responseFuture = WantBlocksResponseFuture.init("wantBlocksRequest")
self.pendingWantBlocksRequests[requestId] = responseFuture
try:
let conn = await self.connect()
if isNil(conn):
self.pendingWantBlocksRequests.del(requestId)
return err(wantBlocksError(NoConnection, "No connection available"))
let req = WantBlocksRequest(
requestId: requestId, treeCid: blockRange.treeCid, ranges: blockRange.ranges
)
await writeWantBlocksRequest(conn, req)
return await responseFuture
except CancelledError as exc:
self.pendingWantBlocksRequests.del(requestId)
raise exc
except CatchableError as err:
self.pendingWantBlocksRequests.del(requestId)
return err(wantBlocksError(RequestFailed, "WantBlocks request failed: " & err.msg))
func new*(
T: type NetworkPeer,
peer: PeerId,
connProvider: ConnProvider,
rpcHandler: RPCHandler,
wantBlocksHandler: WantBlocksRequestHandler,
): NetworkPeer =
doAssert(not isNil(connProvider), "should supply connection provider")
@ -109,5 +225,6 @@ func new*(
id: peer,
getConn: connProvider,
handler: rpcHandler,
wantBlocksHandler: wantBlocksHandler,
trackedFutures: TrackedFutures(),
)

View File

@ -1,4 +1,5 @@
import ./peers/peerctxstore
import ./peers/peercontext
import ./peers/peerstats
export peerctxstore, peercontext
export peerctxstore, peercontext, peerstats

View File

@ -7,127 +7,120 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/sequtils
import std/tables
import std/sets
import std/math
import pkg/libp2p
import pkg/chronos
import pkg/questionable
import ../protobuf/blockexc
import ../protobuf/presence
import ../../blocktype
import ../../logutils
import ./peerstats
const
MinRefreshInterval = 1.seconds
MaxRefreshBackoff = 36 # 36 seconds
MaxWantListBatchSize* = 1024 # Maximum blocks to send per WantList message
WeightCapacity* = 0.30
WeightThroughput* = 0.25
WeightRtt* = 0.25
WeightPenalty* = 0.20
type BlockExcPeerCtx* = ref object of RootObj
BestRatio* = 0.0
WorstRatio* = 1.0
# Absolute reference points for normalization. Peers far beyond these
# saturate at BestRatio or WorstRatio.
RefMaxBps* = 104_857_600.0 # 100 MiB/s — peer implementation's peak throughput
RefMaxRttMicros* = 500_000.0 # 500 ms
RefMaxPenalty* = 15.0 # e.g. ~5 failures at TimeoutPenaltyWeight=3
# Fallback ratios used when a peer lacks a specific metric.
# 0.5 places the peer mid-range so it's neither preferred nor punished.
FallbackThroughputRatio* = 0.5
FallbackRttRatio* = 0.5
DefaultBatchTimeout* = 30.seconds # fallback when no BDP stats available
TimeoutSafetyFactor* = 3.0
# multiplier to account for variance (network jitter, congestion, GC pauses )
MinBatchTimeout* = 5.seconds # min to avoid too aggressive timeouts
MaxBatchTimeout* = 45.seconds # max to handle high contention scenarios
static:
doAssert (WeightCapacity + WeightThroughput + WeightRtt + WeightPenalty) == 1.0,
"BDP score weights must sum to 1.0"
type PeerContext* = ref object of RootObj
id*: PeerId
blocks*: Table[BlockAddress, Presence] # remote peer have list
wantedBlocks*: HashSet[BlockAddress] # blocks that the peer wants
exchanged*: int # times peer has exchanged with us
refreshInProgress*: bool # indicates if a refresh is in progress
lastRefresh*: Moment # last time we refreshed our knowledge of the blocks this peer has
refreshBackoff*: int = 1 # backoff factor for refresh requests
blocksSent*: HashSet[BlockAddress] # blocks sent to peer
blocksRequested*: HashSet[BlockAddress] # pending block requests to this peer
lastExchange*: Moment # last time peer has sent us a block
activityTimeout*: Duration
lastSentWants*: HashSet[BlockAddress]
# track what wantList we last sent for delta updates
stats*: PeerPerfStats
proc isKnowledgeStale*(self: BlockExcPeerCtx): bool =
let staleness =
self.lastRefresh + self.refreshBackoff * MinRefreshInterval < Moment.now()
proc new*(T: type PeerContext, id: PeerId): PeerContext =
PeerContext(id: id, stats: PeerPerfStats.new())
if staleness and self.refreshInProgress:
trace "Cleaning up refresh state", peer = self.id
self.refreshInProgress = false
self.refreshBackoff = 1
proc optimalPipelineDepth*(self: PeerContext, batchBytes: uint64): int =
self.stats.optimalPipelineDepth(batchBytes)
staleness
proc batchTimeout*(self: PeerContext, batchBytes: uint64): Duration =
## find optimal timeout for a batch based on BDP
## timeout = min((batchBytes / throughput + RTT) * safetyFactor, maxTimeout)
## it falls back to default if no stats available.
let
throughputOpt = self.stats.throughputBps()
rttOpt = self.stats.avgRttMicros()
proc isBlockSent*(self: BlockExcPeerCtx, address: BlockAddress): bool =
address in self.blocksSent
if throughputOpt.isNone or rttOpt.isNone:
return DefaultBatchTimeout
proc markBlockAsSent*(self: BlockExcPeerCtx, address: BlockAddress) =
self.blocksSent.incl(address)
let
throughput = throughputOpt.get()
rttMicros = rttOpt.get()
proc markBlockAsNotSent*(self: BlockExcPeerCtx, address: BlockAddress) =
self.blocksSent.excl(address)
if throughput == 0:
return DefaultBatchTimeout
proc refreshRequested*(self: BlockExcPeerCtx) =
trace "Refresh requested for peer", peer = self.id, backoff = self.refreshBackoff
self.refreshInProgress = true
self.lastRefresh = Moment.now()
let
transferTimeMicros = (batchBytes * 1_000_000) div throughput
totalTimeMicros = transferTimeMicros + rttMicros
timeoutMicros = (totalTimeMicros.float * TimeoutSafetyFactor).uint64
timeout = microseconds(timeoutMicros.int64)
proc refreshReplied*(self: BlockExcPeerCtx) =
self.refreshInProgress = false
self.lastRefresh = Moment.now()
self.refreshBackoff = min(self.refreshBackoff * 2, MaxRefreshBackoff)
if timeout < MinBatchTimeout:
return MinBatchTimeout
proc havesUpdated(self: BlockExcPeerCtx) =
self.refreshBackoff = 1
if timeout > MaxBatchTimeout:
return MaxBatchTimeout
proc wantsUpdated*(self: BlockExcPeerCtx) =
self.refreshBackoff = 1
return timeout
proc peerHave*(self: BlockExcPeerCtx): HashSet[BlockAddress] =
# XXX: this is ugly an inefficient, but since those will typically
# be used in "joins", it's better to pay the price here and have
# a linear join than to not do it and have a quadratic join.
toHashSet(self.blocks.keys.toSeq)
proc evalBDPScore*(
self: PeerContext, batchBytes: uint64, currentLoad: int, penalty: float
): float =
## Weighted sum of normalized components. Each component is in [0, 1]
## where 0 = best and 1 = worst. Lower final score is better.
let
pipelineDepth = self.optimalPipelineDepth(batchBytes)
capacityRatio =
if currentLoad >= pipelineDepth:
WorstRatio
elif pipelineDepth > 0:
currentLoad.float / pipelineDepth.float
else:
WorstRatio
proc contains*(self: BlockExcPeerCtx, address: BlockAddress): bool =
address in self.blocks
throughputRatio =
if self.stats.throughputBps().isSome:
let bps = self.stats.throughputBps().get().float
if bps <= 0:
WorstRatio
else:
clamp(WorstRatio - bps / RefMaxBps, BestRatio, WorstRatio)
else:
FallbackThroughputRatio
func setPresence*(self: BlockExcPeerCtx, presence: Presence) =
if presence.address notin self.blocks:
self.havesUpdated()
rttRatio =
if self.stats.avgRttMicros().isSome:
clamp(
self.stats.avgRttMicros().get().float / RefMaxRttMicros, BestRatio, WorstRatio
)
else:
FallbackRttRatio
self.blocks[presence.address] = presence
penaltyRatio = clamp(penalty / RefMaxPenalty, BestRatio, WorstRatio)
func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) =
for a in addresses:
self.blocks.del(a)
func cleanPresence*(self: BlockExcPeerCtx, address: BlockAddress) =
self.cleanPresence(@[address])
proc blockRequestScheduled*(self: BlockExcPeerCtx, address: BlockAddress) =
## Adds a block the set of blocks that have been requested to this peer
## (its request schedule).
if self.blocksRequested.len == 0:
self.lastExchange = Moment.now()
self.blocksRequested.incl(address)
proc blockRequestCancelled*(self: BlockExcPeerCtx, address: BlockAddress) =
## Removes a block from the set of blocks that have been requested to this peer
## (its request schedule).
self.blocksRequested.excl(address)
proc blockReceived*(self: BlockExcPeerCtx, address: BlockAddress): bool =
let wasRequested = address in self.blocksRequested
self.blocksRequested.excl(address)
self.lastExchange = Moment.now()
wasRequested
proc activityTimer*(
self: BlockExcPeerCtx
): Future[void] {.async: (raises: [CancelledError]).} =
## This is called by the block exchange when a block is scheduled for this peer.
## If the peer sends no blocks for a while, it is considered inactive/uncooperative
## and the peer is dropped. Note that ANY block that the peer sends will reset this
## timer for all blocks.
##
while true:
let idleTime = Moment.now() - self.lastExchange
if idleTime > self.activityTimeout:
return
await sleepAsync(self.activityTimeout - idleTime)
WeightCapacity * capacityRatio + WeightThroughput * throughputRatio +
WeightRtt * rttRatio + WeightPenalty * penaltyRatio

View File

@ -9,16 +9,10 @@
{.push raises: [].}
import std/sequtils
import std/tables
import std/algorithm
import std/sequtils
import pkg/chronos
import pkg/libp2p
import ../protobuf/blockexc
import ../../blocktype
import ../../logutils
import ./peercontext
@ -27,63 +21,37 @@ export peercontext
logScope:
topics = "storage peerctxstore"
type
PeerCtxStore* = ref object of RootObj
peers*: OrderedTable[PeerId, BlockExcPeerCtx]
type PeerContextStore* = ref object of RootObj
peers*: OrderedTable[PeerId, PeerContext]
PeersForBlock* = tuple[with: seq[BlockExcPeerCtx], without: seq[BlockExcPeerCtx]]
iterator items*(self: PeerCtxStore): BlockExcPeerCtx =
iterator items*(self: PeerContextStore): PeerContext =
for p in self.peers.values:
yield p
proc contains*(a: openArray[BlockExcPeerCtx], b: PeerId): bool =
proc contains*(a: openArray[PeerContext], b: PeerId): bool =
## Convenience method to check for peer precense
##
a.anyIt(it.id == b)
func peerIds*(self: PeerCtxStore): seq[PeerId] =
func peerIds*(self: PeerContextStore): seq[PeerId] =
toSeq(self.peers.keys)
func contains*(self: PeerCtxStore, peerId: PeerId): bool =
func contains*(self: PeerContextStore, peerId: PeerId): bool =
peerId in self.peers
func add*(self: PeerCtxStore, peer: BlockExcPeerCtx) =
func add*(self: PeerContextStore, peer: PeerContext) =
self.peers[peer.id] = peer
func remove*(self: PeerCtxStore, peerId: PeerId) =
func remove*(self: PeerContextStore, peerId: PeerId) =
self.peers.del(peerId)
func get*(self: PeerCtxStore, peerId: PeerId): BlockExcPeerCtx =
func get*(self: PeerContextStore, peerId: PeerId): PeerContext =
self.peers.getOrDefault(peerId, nil)
func len*(self: PeerCtxStore): int =
func len*(self: PeerContextStore): int =
self.peers.len
func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(address in it.peerHave)
func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
# FIXME: this is way slower and can end up leading to unexpected performance loss.
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it.cidOrTreeCid == cid))
func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(address in it.wantedBlocks)
func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
# FIXME: this is way slower and can end up leading to unexpected performance loss.
toSeq(self.peers.values).filterIt(it.wantedBlocks.anyIt(it.cidOrTreeCid == cid))
proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock =
var res: PeersForBlock = (@[], @[])
for peer in self:
if address in peer:
res.with.add(peer)
else:
res.without.add(peer)
res
proc new*(T: type PeerCtxStore): PeerCtxStore =
proc new*(T: type PeerContextStore): PeerContextStore =
## create new instance of a peer context store
PeerCtxStore(peers: initOrderedTable[PeerId, BlockExcPeerCtx]())
PeerContextStore(peers: initOrderedTable[PeerId, PeerContext]())

View File

@ -0,0 +1,217 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/[deques, options, math]
import pkg/chronos
const
RttSampleCount* = 16
MinRequestsPerPeer* = 2
MaxRequestsPerPeer* = 32
DefaultRequestsPerPeer* = 2
DefaultPipelineDepth* = 2
MinThroughputDuration* = 100.milliseconds
ThroughputWindow* = 3.seconds
ProbeIntervalBatches* = 16
ProbeWindowBatches* = 16
GainThresholdPct* = 8
LossThresholdPct* = 20
MaxProbeBackoffShift* = 4
type
ProbeMode* = enum
Stable
Probing
ThroughputSample = object
time: Moment
cumBytes: uint64
PeerPerfStats* = object
rttSamples: Deque[uint64]
throughputSamples: Deque[ThroughputSample]
totalBytesDelivered: uint64
currentDepth: int
lastDepthChangeTime: Moment
probeMode: ProbeMode
probeBaselineBps: uint64
probeStartTotalBytes: uint64
probeStartTime: Moment
batchesSinceProbe: int
batchesInProbeWindow: int
consecutiveReverts: int
proc new*(T: type PeerPerfStats): PeerPerfStats =
PeerPerfStats(
rttSamples: initDeque[uint64](RttSampleCount),
throughputSamples: initDeque[ThroughputSample](),
totalBytesDelivered: 0,
currentDepth: DefaultRequestsPerPeer,
lastDepthChangeTime: Moment.now(),
probeMode: Stable,
probeBaselineBps: 0,
probeStartTotalBytes: 0,
batchesSinceProbe: 0,
batchesInProbeWindow: 0,
consecutiveReverts: 0,
)
proc trimThroughputWindow(self: var PeerPerfStats, now: Moment) =
while self.throughputSamples.len > 0 and
(now - self.throughputSamples[0].time) > ThroughputWindow:
discard self.throughputSamples.popFirst()
proc avgThroughputBps(self: var PeerPerfStats, now: Moment): Option[uint64] =
self.trimThroughputWindow(now)
if self.throughputSamples.len < 2:
return none(uint64)
let
first = self.throughputSamples[0]
last = self.throughputSamples[self.throughputSamples.len - 1]
duration = last.time - first.time
if duration < MinThroughputDuration:
return none(uint64)
let
delta = last.cumBytes - first.cumBytes
secs = duration.nanoseconds.float64 / 1_000_000_000.0
some((delta.float64 / secs).uint64)
proc avgRttMicros*(self: PeerPerfStats): Option[uint64] =
if self.rttSamples.len == 0:
return none(uint64)
var total: uint64 = 0
for sample in self.rttSamples:
total += sample
some(total div self.rttSamples.len.uint64)
proc throughputBps*(self: var PeerPerfStats): Option[uint64] =
self.avgThroughputBps(Moment.now())
proc recordRequest*(self: var PeerPerfStats, rttMicros: uint64, bytes: uint64) =
if self.rttSamples.len >= RttSampleCount:
discard self.rttSamples.popFirst()
self.rttSamples.addLast(rttMicros)
let now = Moment.now()
self.totalBytesDelivered += bytes
self.throughputSamples.addLast(
ThroughputSample(time: now, cumBytes: self.totalBytesDelivered)
)
self.trimThroughputWindow(now)
self.batchesSinceProbe += 1
if self.probeMode == Probing:
self.batchesInProbeWindow += 1
proc computeBdpDepth(self: var PeerPerfStats, batchBytes: uint64, now: Moment): int =
if batchBytes == 0:
return DefaultPipelineDepth
let rttMicrosOpt = self.avgRttMicros()
if rttMicrosOpt.isNone:
return DefaultRequestsPerPeer
let throughputOpt = self.avgThroughputBps(now)
if throughputOpt.isNone:
return DefaultRequestsPerPeer
let
rttMicros = rttMicrosOpt.get()
throughput = throughputOpt.get()
rttSecs = rttMicros.float64 / 1_000_000.0
bdpBytes = throughput.float64 * rttSecs
depth = ceil(bdpBytes / batchBytes.float64).int
clamp(depth, MinRequestsPerPeer, MaxRequestsPerPeer)
proc optimalPipelineDepth*(self: var PeerPerfStats, batchBytes: uint64): int =
let now = Moment.now()
case self.probeMode
of Stable:
let
bdpDepth = self.computeBdpDepth(batchBytes, now)
gracePassed = (now - self.lastDepthChangeTime) >= ThroughputWindow
if bdpDepth < self.currentDepth and gracePassed:
self.currentDepth = max(MinRequestsPerPeer, bdpDepth)
self.lastDepthChangeTime = now
let effectiveInterval =
ProbeIntervalBatches * (1 shl min(self.consecutiveReverts, MaxProbeBackoffShift))
if self.batchesSinceProbe >= effectiveInterval and
self.currentDepth < MaxRequestsPerPeer:
let baseline = self.avgThroughputBps(now)
if baseline.isSome:
self.probeBaselineBps = baseline.get()
self.probeStartTotalBytes = self.totalBytesDelivered
self.probeStartTime = now
self.probeMode = Probing
self.batchesInProbeWindow = 0
self.currentDepth = self.currentDepth + 1
self.lastDepthChangeTime = now
return self.currentDepth
of Probing:
if self.batchesInProbeWindow < ProbeWindowBatches:
return self.currentDepth
let
probeBytes = self.totalBytesDelivered - self.probeStartTotalBytes
probeDuration = now - self.probeStartTime
probeDurationSecs = probeDuration.nanoseconds.float64 / 1_000_000_000.0
if probeDurationSecs > 0 and self.probeBaselineBps > 0:
let
probeBps = (probeBytes.float64 / probeDurationSecs).uint64
baselineBps = self.probeBaselineBps
deltaPct = ((probeBps.int64 - baselineBps.int64) * 100) div baselineBps.int64
if deltaPct >= GainThresholdPct:
self.consecutiveReverts = 0
self.lastDepthChangeTime = now
elif deltaPct <= -LossThresholdPct:
self.consecutiveReverts = 0
self.currentDepth = max(MinRequestsPerPeer, self.currentDepth - 2)
self.lastDepthChangeTime = now
else:
self.consecutiveReverts += 1
self.currentDepth = max(MinRequestsPerPeer, self.currentDepth - 1)
self.lastDepthChangeTime = now
else:
self.consecutiveReverts += 1
self.currentDepth = max(MinRequestsPerPeer, self.currentDepth - 1)
self.lastDepthChangeTime = now
self.probeMode = Stable
self.batchesSinceProbe = 0
self.batchesInProbeWindow = 0
self.probeBaselineBps = 0
self.probeStartTotalBytes = 0
return self.currentDepth
proc sampleCount*(self: PeerPerfStats): int =
self.rttSamples.len
proc reset*(self: var PeerPerfStats) =
self.rttSamples.clear()
self.throughputSamples.clear()
self.totalBytesDelivered = 0
self.currentDepth = DefaultRequestsPerPeer
self.lastDepthChangeTime = Moment.now()
self.probeMode = Stable
self.probeBaselineBps = 0
self.probeStartTotalBytes = 0
self.batchesSinceProbe = 0
self.batchesInProbeWindow = 0
self.consecutiveReverts = 0

View File

@ -1,43 +0,0 @@
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/hashes
import std/sequtils
import message
import ../../blocktype
export Message, protobufEncode, protobufDecode
export Wantlist, WantType, WantListEntry
export BlockDelivery, BlockPresenceType, BlockPresence
proc hash*(e: WantListEntry): Hash =
hash(e.address)
proc contains*(a: openArray[WantListEntry], b: BlockAddress): bool =
## Convenience method to check for peer precense
##
a.anyIt(it.address == b)
proc `==`*(a: WantListEntry, b: BlockAddress): bool =
return a.address == b
proc `<`*(a, b: WantListEntry): bool =
a.priority < b.priority
proc `==`*(a: BlockPresence, b: BlockAddress): bool =
return a.address == b
proc contains*(a: openArray[BlockPresence], b: BlockAddress): bool =
## Convenience method to check for peer precense
##
a.anyIt(it.address == b)

View File

@ -1,47 +0,0 @@
// Protocol of data exchange between Logos Storage nodes.
// Extended version of https://github.com/ipfs/specs/blob/main/BITSWAP.md
syntax = "proto3";
package blockexc.message.pb;
message Message {
message Wantlist {
enum WantType {
wantBlock = 0;
wantHave = 1;
}
message Entry {
bytes block = 1; // the block cid
int32 priority = 2; // the priority (normalized). default to 1
bool cancel = 3; // whether this revokes an entry
WantType wantType = 4; // Note: defaults to enum 0, ie Block
bool sendDontHave = 5; // Note: defaults to false
}
repeated Entry entries = 1; // a list of wantlist entries
bool full = 2; // whether this is the full wantlist. default to false
}
message Block {
bytes prefix = 1; // CID prefix (cid version, multicodec and multihash prefix (type + length)
bytes data = 2;
}
enum BlockPresenceType {
presenceHave = 0;
presenceDontHave = 1;
}
message BlockPresence {
bytes cid = 1;
BlockPresenceType type = 2;
}
Wantlist wantlist = 1;
repeated Block payload = 3; // what happened to 2?
repeated BlockPresence blockPresences = 4;
int32 pendingBytes = 5;
}

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014-2018 Juan Batiz-Benet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,35 +0,0 @@
{.push raises: [].}
import libp2p
import pkg/stint
import pkg/questionable
import pkg/questionable/results
import ./blockexc
import ../../blocktype
export questionable
export stint
export BlockPresenceType
type
PresenceMessage* = blockexc.BlockPresence
Presence* = object
address*: BlockAddress
have*: bool
func parse(_: type UInt256, bytes: seq[byte]): ?UInt256 =
if bytes.len > 32:
return UInt256.none
UInt256.fromBytesBE(bytes).some
func init*(_: type Presence, message: PresenceMessage): ?Presence =
some Presence(
address: message.address, have: message.`type` == BlockPresenceType.Have
)
func init*(_: type PresenceMessage, presence: Presence): PresenceMessage =
PresenceMessage(
address: presence.address,
`type`: if presence.have: BlockPresenceType.Have else: BlockPresenceType.DontHave,
)

View File

@ -0,0 +1,48 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/chronos
import ../../units
import ../../storagetypes
const
# if it hangs longer than this, skip peer and continue
DefaultWantHaveSendTimeout* = 30.seconds
# message size limits for protobuf control messages
MaxMessageSize*: uint32 = 16.MiBs.uint32
TargetBatchBytes*: uint32 = 1024 * 1024
MinBatchSize*: uint32 = 1
MaxMetadataSize*: uint32 = 4 * 1024 * 1024
MaxWantBlocksResponseBytes*: uint32 = 4 + MaxMetadataSize + TargetBatchBytes
MaxBlocksPerBatch*: uint32 = TargetBatchBytes div MinBlockSize.uint32
# the worst case which is alternating missing blocks (0,2,4...) creates max ranges
# each range costs 16 bytes (start:u64 + count:u64)
MaxWantBlocksRequestBytes*: uint32 = (MaxBlocksPerBatch div 2) * 16 + 1024
static:
doAssert MinBatchSize >= 1, "MinBatchSize must be positive"
doAssert MaxBlocksPerBatch == TargetBatchBytes div MinBlockSize.uint32,
"MaxBlocksPerBatch must equal TargetBatchBytes / MinBlockSize"
doAssert MaxWantBlocksResponseBytes == 4 + MaxMetadataSize + TargetBatchBytes,
"MaxWantBlocksResponseBytes must equal 4 + MaxMetadataSize + TargetBatchBytes"
# should fit worst case sparse batch - max ranges
const
worstCaseRanges = MaxBlocksPerBatch div 2
worstCaseRangeBytes = worstCaseRanges * 16
fixedOverhead = 64'u32 # request id + cidLen + cid + rangeCount
doAssert MaxWantBlocksRequestBytes >= worstCaseRangeBytes + fixedOverhead,
"MaxWantBlocksRequestBytes too small for worst case sparse batch"

View File

@ -9,31 +9,22 @@ import pkg/libp2p/cid
import pkg/questionable
import ../../units
import ../../merkletree
import ../../blocktype
const
MaxBlockSize* = 100.MiBs.uint
MaxMessageSize* = 100.MiBs.uint
type
WantType* = enum
WantBlock = 0
WantHave = 1
WantHave = 0 # Presence query - the only type used with batch transfer protocol
WantListEntry* = object
address*: BlockAddress
# XXX: I think explicit priority is pointless as the peer will request
# the blocks in the order it wants to receive them, and all we have to
# do is process those in the same order as we send them back. It also
# complicates things for no reason at the moment, as the priority is
# always set to 0.
priority*: int32 # The priority (normalized). default to 1
cancel*: bool # Whether this revokes an entry
wantType*: WantType # Note: defaults to enum 0, ie Block
wantType*: WantType # Defaults to WantHave (only type supported)
sendDontHave*: bool # Note: defaults to false
rangeCount*: uint64
# For range queries: number of sequential blocks starting from address.index (0 = single block)
downloadId*: uint64 # Unique download ID for request/response correlation
WantList* = object
entries*: seq[WantListEntry] # A list of wantList entries
@ -42,24 +33,22 @@ type
BlockDelivery* = object
blk*: Block
address*: BlockAddress
proof*: ?StorageMerkleProof # Present only if `address.leaf` is true
proof*: ?StorageMerkleProof
BlockPresenceType* = enum
Have = 0
DontHave = 1
DontHave = 0
HaveRange = 1
Complete = 2
BlockPresence* = object
address*: BlockAddress
`type`*: BlockPresenceType
StateChannelUpdate* = object
update*: seq[byte] # Signed Nitro state, serialized as JSON
kind*: BlockPresenceType
ranges*: seq[tuple[start: uint64, count: uint64]]
downloadId*: uint64 # echoed for request/response correlation
Message* = object
wantList*: WantList
payload*: seq[BlockDelivery]
blockPresences*: seq[BlockPresence]
pendingBytes*: uint
#
# Encoding Message into seq[byte] in Protobuf format
@ -67,12 +56,8 @@ type
proc write*(pb: var ProtoBuffer, field: int, value: BlockAddress) =
var ipb = initProtoBuffer()
ipb.write(1, value.leaf.uint)
if value.leaf:
ipb.write(2, value.treeCid.data.buffer)
ipb.write(3, value.index.uint64)
else:
ipb.write(4, value.cid.data.buffer)
ipb.write(1, value.treeCid.data.buffer)
ipb.write(2, value.index.uint64)
ipb.finish()
pb.write(field, ipb)
@ -83,6 +68,8 @@ proc write*(pb: var ProtoBuffer, field: int, value: WantListEntry) =
ipb.write(3, value.cancel.uint)
ipb.write(4, value.wantType.uint)
ipb.write(5, value.sendDontHave.uint)
ipb.write(6, value.rangeCount)
ipb.write(7, value.downloadId)
ipb.finish()
pb.write(field, ipb)
@ -94,32 +81,26 @@ proc write*(pb: var ProtoBuffer, field: int, value: WantList) =
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) =
var ipb = initProtoBuffer()
ipb.write(1, value.blk.cid.data.buffer)
ipb.write(2, value.blk.data)
ipb.write(3, value.address)
if value.address.leaf:
if proof =? value.proof:
ipb.write(4, proof.encode())
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: BlockPresence) =
var ipb = initProtoBuffer()
ipb.write(1, value.address)
ipb.write(2, value.`type`.uint)
ipb.write(2, value.kind.uint)
# Encode ranges if present
for (start, count) in value.ranges:
var rangePb = initProtoBuffer()
rangePb.write(1, start)
rangePb.write(2, count)
rangePb.finish()
ipb.write(3, rangePb)
ipb.write(4, value.downloadId)
ipb.finish()
pb.write(field, ipb)
proc protobufEncode*(value: Message): seq[byte] =
var ipb = initProtoBuffer()
ipb.write(1, value.wantList)
for v in value.payload:
ipb.write(3, v) # is this meant to be 2?
for v in value.blockPresences:
ipb.write(4, v)
ipb.write(5, value.pendingBytes)
ipb.finish()
ipb.buffer
@ -129,27 +110,13 @@ proc protobufEncode*(value: Message): seq[byte] =
proc decode*(_: type BlockAddress, pb: ProtoBuffer): ProtoResult[BlockAddress] =
var
value: BlockAddress
leaf: bool
field: uint64
cidBuf = newSeq[byte]()
if ?pb.getField(1, field):
leaf = bool(field)
if leaf:
var
treeCid: Cid
index: Natural
if ?pb.getField(2, cidBuf):
treeCid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
if ?pb.getField(3, field):
index = field
value = BlockAddress(leaf: true, treeCid: treeCid, index: index)
else:
var cid: Cid
if ?pb.getField(4, cidBuf):
cid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
value = BlockAddress(leaf: false, cid: cid)
if ?pb.getField(1, cidBuf):
value.treeCid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
if ?pb.getField(2, field):
value.index = field
ok(value)
@ -168,6 +135,10 @@ proc decode*(_: type WantListEntry, pb: ProtoBuffer): ProtoResult[WantListEntry]
value.wantType = WantType(field)
if ?pb.getField(5, field):
value.sendDontHave = bool(field)
if ?pb.getField(6, field):
value.rangeCount = field
if ?pb.getField(7, field):
value.downloadId = field
ok(value)
proc decode*(_: type WantList, pb: ProtoBuffer): ProtoResult[WantList] =
@ -182,44 +153,25 @@ proc decode*(_: type WantList, pb: ProtoBuffer): ProtoResult[WantList] =
value.full = bool(field)
ok(value)
proc decode*(_: type BlockDelivery, pb: ProtoBuffer): ProtoResult[BlockDelivery] =
var
value = BlockDelivery()
dataBuf = newSeq[byte]()
cidBuf = newSeq[byte]()
cid: Cid
ipb: ProtoBuffer
if ?pb.getField(1, cidBuf):
cid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
if ?pb.getField(2, dataBuf):
value.blk =
?Block.new(cid, dataBuf, verify = true).mapErr(x => ProtoError.IncorrectBlob)
if ?pb.getField(3, ipb):
value.address = ?BlockAddress.decode(ipb)
if value.address.leaf:
var proofBuf = newSeq[byte]()
if ?pb.getField(4, proofBuf):
let proof =
?StorageMerkleProof.decode(proofBuf).mapErr(x => ProtoError.IncorrectBlob)
value.proof = proof.some
else:
value.proof = StorageMerkleProof.none
else:
value.proof = StorageMerkleProof.none
ok(value)
proc decode*(_: type BlockPresence, pb: ProtoBuffer): ProtoResult[BlockPresence] =
var
value = BlockPresence()
field: uint64
ipb: ProtoBuffer
rangelist: seq[seq[byte]]
if ?pb.getField(1, ipb):
value.address = ?BlockAddress.decode(ipb)
if ?pb.getField(2, field):
value.`type` = BlockPresenceType(field)
value.kind = BlockPresenceType(field)
if ?pb.getRepeatedField(3, rangelist):
for item in rangelist:
var rangePb = initProtoBuffer(item)
var start, count: uint64
discard ?rangePb.getField(1, start)
discard ?rangePb.getField(2, count)
value.ranges.add((start, count))
if ?pb.getField(4, field):
value.downloadId = field
ok(value)
proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
@ -230,11 +182,7 @@ proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
sublist: seq[seq[byte]]
if ?pb.getField(1, ipb):
value.wantList = ?WantList.decode(ipb)
if ?pb.getRepeatedField(3, sublist): # meant to be 2?
for item in sublist:
value.payload.add(?BlockDelivery.decode(initProtoBuffer(item)))
if ?pb.getRepeatedField(4, sublist):
for item in sublist:
value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item)))
discard ?pb.getField(5, value.pendingBytes)
ok(value)

View File

@ -0,0 +1,31 @@
{.push raises: [].}
import libp2p
import pkg/questionable
import ./message
import ../../blocktype
export questionable
export BlockPresenceType
type
PresenceMessage* = message.BlockPresence
Presence* = object
address*: BlockAddress
have*: bool
presenceType*: BlockPresenceType
ranges*: seq[tuple[start: uint64, count: uint64]]
func init*(_: type Presence, message: PresenceMessage): ?Presence =
some Presence(
address: message.address,
have: message.kind in {BlockPresenceType.HaveRange, BlockPresenceType.Complete},
presenceType: message.kind,
ranges: message.ranges,
)
func init*(_: type PresenceMessage, presence: Presence): PresenceMessage =
PresenceMessage(
address: presence.address, kind: presence.presenceType, ranges: presence.ranges
)

View File

@ -0,0 +1,637 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/options
import pkg/chronos
import pkg/libp2p
import pkg/libp2p/multicodec
import pkg/stew/endians2
import pkg/results
import ../../blocktype
import ../../merkletree
import ../../logutils
import ../../errors
import ./message
import ./constants
export message, results, errors
logScope:
topics = "storage wantblocks"
const
SizeRequestId = sizeof(uint64)
SizeCidLen = sizeof(uint16)
SizeRangeCount = sizeof(uint32)
SizeRange = sizeof(uint64) + sizeof(uint64) # start + count
SizeBlockCount = sizeof(uint32)
SizeBlockIndex = sizeof(uint64)
SizeDataLen = sizeof(uint32)
SizeProofLen = sizeof(uint16)
SizeNodeLen = sizeof(uint16)
SizeMcodec = sizeof(uint64)
SizeNleaves = sizeof(uint64)
SizePathCount = sizeof(uint32)
SizeProofHeader = SizeMcodec + SizeBlockIndex + SizeNleaves + SizePathCount
SizeMetaLen = sizeof(uint32)
MaxMerkleProofDepth = 64
type
MessageType* = enum
mtProtobuf = 0x00 # Protobuf control messages (want lists, presence)
mtWantBlocksRequest = 0x01 # WantBlocks request
mtWantBlocksResponse = 0x02 # WantBlocks response
WantBlocksRequest* = object
requestId*: uint64
treeCid*: Cid
ranges*: seq[tuple[start: uint64, count: uint64]]
SharedBlocksBuffer* = ref object
data*: seq[byte]
BlockEntry* = object
index*: uint64
cid*: Cid
dataOffset*: int
dataLen*: int
proof*: StorageMerkleProof
WantBlocksResponse* = object
requestId*: uint64 # echoed request ID
treeCid*: Cid
blocks*: seq[BlockEntry]
sharedBuffer*: SharedBlocksBuffer
BlockDeliveryView* = object
cid*: Cid
address*: BlockAddress
proof*: Option[StorageMerkleProof]
sharedBuf*: SharedBlocksBuffer
dataOffset*: int
dataLen*: int
BlockMetadata =
tuple[index: uint64, cid: Cid, dataLen: uint32, proof: Option[StorageMerkleProof]]
proc frameProtobufMessage*(data: openArray[byte]): seq[byte] =
let frameLen = (1 + data.len).uint32
var buf = newSeqUninit[byte](4 + frameLen.int)
let frameLenLE = frameLen.toLE
copyMem(addr buf[0], unsafeAddr frameLenLE, 4)
buf[4] = mtProtobuf.byte
if data.len > 0:
copyMem(addr buf[5], unsafeAddr data[0], data.len)
buf
proc decodeProofBinary*(data: openArray[byte]): WantBlocksResult[StorageMerkleProof] =
if data.len < SizeProofHeader:
return err(wantBlocksError(ProofTooShort, "Proof data too short"))
var offset = 0
let
mcodecVal = uint64.fromBytes(data.toOpenArray(offset, offset + 7), littleEndian)
mcodec = MultiCodec.codec(mcodecVal.int)
if mcodec == InvalidMultiCodec:
return err(wantBlocksError(InvalidCodec, "Invalid MultiCodec: " & $mcodecVal))
offset += 8
let index = uint64.fromBytes(data.toOpenArray(offset, offset + 7), littleEndian).int
offset += 8
let nleaves = uint64.fromBytes(data.toOpenArray(offset, offset + 7), littleEndian).int
offset += 8
let pathCount =
uint32.fromBytes(data.toOpenArray(offset, offset + 3), littleEndian).int
offset += 4
if pathCount > MaxMerkleProofDepth:
return err(
wantBlocksError(ProofPathTooLarge, "Proof path count too large: " & $pathCount)
)
var nodes = newSeq[seq[byte]](pathCount)
for i in 0 ..< pathCount:
if offset + SizeNodeLen > data.len:
return err(wantBlocksError(ProofTruncated, "Proof truncated at node " & $i))
let nodeLen =
uint16.fromBytes(data.toOpenArray(offset, offset + 1), littleEndian).int
offset += 2
if offset + nodeLen > data.len:
return err(wantBlocksError(ProofTruncated, "Proof truncated at node data " & $i))
if nodeLen == 0:
nodes[i] = @[]
else:
nodes[i] = @(data.toOpenArray(offset, offset + nodeLen - 1))
offset += nodeLen
ok(
?StorageMerkleProof.init(mcodec, index, nleaves, nodes).mapErr(
proc(e: auto): ref WantBlocksError =
wantBlocksError(ProofCreationFailed, "Failed to create proof: " & e.msg)
)
)
proc calcRequestSize*(req: WantBlocksRequest): int {.inline.} =
let cidBytes = req.treeCid.data.buffer
SizeRequestId + SizeCidLen + cidBytes.len + SizeRangeCount +
(req.ranges.len * SizeRange)
proc encodeRequestInto*(
req: WantBlocksRequest, buf: var openArray[byte], startOffset: int
): int =
var offset = startOffset
let reqIdLE = req.requestId.toLE
copyMem(addr buf[offset], unsafeAddr reqIdLE, 8)
offset += 8
let
cidBytes = req.treeCid.data.buffer
cidLenLE = cidBytes.len.uint16.toLE
copyMem(addr buf[offset], unsafeAddr cidLenLE, 2)
offset += 2
if cidBytes.len > 0:
copyMem(addr buf[offset], unsafeAddr cidBytes[0], cidBytes.len)
offset += cidBytes.len
let rangeCountLE = req.ranges.len.uint32.toLE
copyMem(addr buf[offset], unsafeAddr rangeCountLE, 4)
offset += 4
for (start, count) in req.ranges:
let startLE = start.toLE
copyMem(addr buf[offset], unsafeAddr startLE, 8)
offset += 8
let countLE = count.toLE
copyMem(addr buf[offset], unsafeAddr countLE, 8)
offset += 8
return offset - startOffset
proc decodeRequest*(data: openArray[byte]): WantBlocksResult[WantBlocksRequest] =
if data.len < SizeRequestId + SizeCidLen + SizeRangeCount:
return err(wantBlocksError(RequestTooShort, "Request too short"))
var offset = 0
let requestId = uint64.fromBytes(data.toOpenArray(offset, offset + 7), littleEndian)
offset += 8
let cidLen = uint16.fromBytes(data.toOpenArray(offset, offset + 1), littleEndian).int
offset += 2
if cidLen == 0:
return err(wantBlocksError(InvalidCid, "CID length is zero"))
if offset + cidLen + SizeRangeCount > data.len:
return err(wantBlocksError(RequestTruncated, "Request truncated (CID)"))
let treeCid = ?Cid.init(data.toOpenArray(offset, offset + cidLen - 1)).mapErr(
proc(e: auto): ref WantBlocksError =
wantBlocksError(InvalidCid, "Invalid CID: " & $e)
)
offset += cidLen
let rangeCount =
uint32.fromBytes(data.toOpenArray(offset, offset + 3), littleEndian).int
offset += 4
if offset + (rangeCount * SizeRange) > data.len:
return err(wantBlocksError(RequestTruncated, "Request truncated (ranges)"))
var ranges = newSeqOfCap[tuple[start: uint64, count: uint64]](rangeCount)
for _ in 0 ..< rangeCount:
let start = uint64.fromBytes(data.toOpenArray(offset, offset + 7), littleEndian)
offset += 8
let count = uint64.fromBytes(data.toOpenArray(offset, offset + 7), littleEndian)
offset += 8
ranges.add((start, count))
ok(WantBlocksRequest(requestId: requestId, treeCid: treeCid, ranges: ranges))
proc calcProofBinarySize*(proof: StorageMerkleProof): int {.inline.} =
result = SizeProofHeader
for node in proof.path:
result += SizeNodeLen + node.len
proc calcResponseMetadataSize*(treeCid: Cid, blocks: seq[BlockDelivery]): int =
let treeCidBytes = treeCid.data.buffer
result = SizeRequestId + SizeCidLen + treeCidBytes.len + SizeBlockCount
for bd in blocks:
let blockCidBytes = bd.blk.cid.data.buffer
result +=
SizeBlockIndex + SizeCidLen + blockCidBytes.len + SizeDataLen + SizeProofLen
if bd.proof.isSome:
result += calcProofBinarySize(bd.proof.get)
proc encodeProofBinaryInto*(
proof: StorageMerkleProof, buf: var openArray[byte], startOffset: int
): int =
var offset = startOffset
let mcodecLE = proof.mcodec.uint64.toLE
copyMem(addr buf[offset], unsafeAddr mcodecLE, 8)
offset += 8
let indexLE = proof.index.uint64.toLE
copyMem(addr buf[offset], unsafeAddr indexLE, 8)
offset += 8
let nleavesLE = proof.nleaves.uint64.toLE
copyMem(addr buf[offset], unsafeAddr nleavesLE, 8)
offset += 8
let pathCountLE = proof.path.len.uint32.toLE
copyMem(addr buf[offset], unsafeAddr pathCountLE, 4)
offset += 4
for node in proof.path:
let nodeLenLE = node.len.uint16.toLE
copyMem(addr buf[offset], unsafeAddr nodeLenLE, 2)
offset += 2
if node.len > 0:
copyMem(addr buf[offset], unsafeAddr node[0], node.len)
offset += node.len
return offset - startOffset
proc encodeResponseMetadataInto*(
requestId: uint64,
treeCid: Cid,
blocks: seq[BlockDelivery],
buf: var openArray[byte],
startOffset: int,
): int =
var offset = startOffset
let reqIdLE = requestId.toLE
copyMem(addr buf[offset], unsafeAddr reqIdLE, 8)
offset += 8
let
treeCidBytes = treeCid.data.buffer
treeCidLenLE = treeCidBytes.len.uint16.toLE
copyMem(addr buf[offset], unsafeAddr treeCidLenLE, 2)
offset += 2
if treeCidBytes.len > 0:
copyMem(addr buf[offset], unsafeAddr treeCidBytes[0], treeCidBytes.len)
offset += treeCidBytes.len
let blockCountLE = blocks.len.uint32.toLE
copyMem(addr buf[offset], unsafeAddr blockCountLE, 4)
offset += 4
for bd in blocks:
let
index = uint64(bd.address.index)
indexLE = index.toLE
copyMem(addr buf[offset], unsafeAddr indexLE, 8)
offset += 8
let
blockCidBytes = bd.blk.cid.data.buffer
blockCidLenLE = blockCidBytes.len.uint16.toLE
copyMem(addr buf[offset], unsafeAddr blockCidLenLE, 2)
offset += 2
if blockCidBytes.len > 0:
copyMem(addr buf[offset], unsafeAddr blockCidBytes[0], blockCidBytes.len)
offset += blockCidBytes.len
let dataLenLE = bd.blk.data[].len.uint32.toLE
copyMem(addr buf[offset], unsafeAddr dataLenLE, 4)
offset += 4
if bd.proof.isSome:
let
proofSize = calcProofBinarySize(bd.proof.get)
proofLenLE = proofSize.uint16.toLE
copyMem(addr buf[offset], unsafeAddr proofLenLE, 2)
offset += 2
offset += encodeProofBinaryInto(bd.proof.get, buf, offset)
else:
let zeroLE = 0'u16.toLE
copyMem(addr buf[offset], unsafeAddr zeroLE, 2)
offset += 2
return offset - startOffset
proc decodeResponseMetadata(
data: openArray[byte]
): WantBlocksResult[(uint64, Cid, seq[BlockMetadata])] =
if data.len < SizeRequestId + SizeCidLen + SizeBlockCount:
return err(wantBlocksError(MetadataTooShort, "Metadata too short"))
var offset = 0
let requestId = uint64.fromBytes(data.toOpenArray(offset, offset + 7), littleEndian)
offset += 8
let cidLen = uint16.fromBytes(data.toOpenArray(offset, offset + 1), littleEndian).int
offset += 2
if cidLen == 0:
return err(wantBlocksError(InvalidCid, "Tree CID length is zero"))
if offset + cidLen + SizeBlockCount > data.len:
return err(wantBlocksError(MetadataTruncated, "Metadata truncated at CID"))
let treeCid = ?Cid.init(data.toOpenArray(offset, offset + cidLen - 1)).mapErr(
proc(e: auto): ref WantBlocksError =
wantBlocksError(InvalidCid, "Invalid CID: " & $e)
)
offset += cidLen
let blockCount = uint32.fromBytes(data.toOpenArray(offset, offset + 3), littleEndian)
offset += 4
if blockCount > MaxBlocksPerBatch:
return err(
wantBlocksError(
TooManyBlocks,
"Block count " & $blockCount & " exceeds maximum " & $MaxBlocksPerBatch,
)
)
var blocksMeta = newSeq[BlockMetadata](blockCount.int)
for i in 0 ..< blockCount:
if offset + SizeBlockIndex > data.len:
return
err(wantBlocksError(MetadataTruncated, "Metadata truncated at block " & $i))
let index = uint64.fromBytes(data.toOpenArray(offset, offset + 7), littleEndian)
offset += 8
if offset + SizeCidLen > data.len:
return err(
wantBlocksError(MetadataTruncated, "Metadata truncated at block cidLen " & $i)
)
let blockCidLen =
uint16.fromBytes(data.toOpenArray(offset, offset + 1), littleEndian).int
offset += 2
if blockCidLen == 0:
return err(wantBlocksError(InvalidCid, "Block CID length is zero at block " & $i))
if offset + blockCidLen > data.len:
return
err(wantBlocksError(MetadataTruncated, "Metadata truncated at block CID " & $i))
let blockCid = ?Cid.init(data.toOpenArray(offset, offset + blockCidLen - 1)).mapErr(
proc(e: auto): ref WantBlocksError =
wantBlocksError(InvalidCid, "Invalid block CID at " & $i & ": " & $e)
)
offset += blockCidLen
if offset + SizeDataLen > data.len:
return
err(wantBlocksError(MetadataTruncated, "Metadata truncated at dataLen " & $i))
let dataLen = uint32.fromBytes(data.toOpenArray(offset, offset + 3), littleEndian)
offset += 4
if dataLen > MaxBlockSize.uint32:
return err(
wantBlocksError(
DataSizeMismatch,
"Block dataLen exceeds MaxBlockSize at " & $i & ": " & $dataLen,
)
)
if offset + SizeProofLen > data.len:
return
err(wantBlocksError(MetadataTruncated, "Metadata truncated at proofLen " & $i))
let proofLen =
uint16.fromBytes(data.toOpenArray(offset, offset + 1), littleEndian).int
offset += 2
var proof: Option[StorageMerkleProof] = none(StorageMerkleProof)
if proofLen > 0:
if offset + proofLen > data.len:
return
err(wantBlocksError(MetadataTruncated, "Metadata truncated at proof " & $i))
let proofResult =
decodeProofBinary(data.toOpenArray(offset, offset + proofLen - 1))
if proofResult.isErr:
return err(
wantBlocksError(
ProofDecodeFailed,
"Failed to decode proof at block " & $i & ": " & proofResult.error.msg,
)
)
proof = some(proofResult.get)
offset += proofLen
blocksMeta[i] = (index: index, cid: blockCid, dataLen: dataLen, proof: proof)
ok((requestId, treeCid, blocksMeta))
proc writeWantBlocksResponse*(
conn: Connection, requestId: uint64, treeCid: Cid, blocks: seq[BlockDelivery]
) {.async: (raises: [CancelledError, LPStreamError]).} =
let metaSize = calcResponseMetadataSize(treeCid, blocks)
if metaSize > MaxMetadataSize.int:
warn "Metadata exceeds limit, skipping response",
metaSize = metaSize, limit = MaxMetadataSize, blockCount = blocks.len
return
var totalDataSize: uint64 = 0
for bd in blocks:
totalDataSize += bd.blk.data[].len.uint64
let contentSize = SizeMetaLen.uint64 + metaSize.uint64 + totalDataSize
if contentSize > MaxWantBlocksResponseBytes:
warn "Response exceeds size limit, skipping",
contentSize = contentSize,
limit = MaxWantBlocksResponseBytes,
blockCount = blocks.len
return
let
frameLen = 1 + contentSize.int
totalSize = 4 + frameLen
var
buf = newSeqUninit[byte](totalSize)
offset = 0
let frameLenLE = frameLen.uint32.toLE
copyMem(addr buf[offset], unsafeAddr frameLenLE, 4)
offset += 4
buf[offset] = mtWantBlocksResponse.byte
offset += 1
let metaSizeLE = metaSize.uint32.toLE
copyMem(addr buf[offset], unsafeAddr metaSizeLE, 4)
offset += 4
offset += encodeResponseMetadataInto(requestId, treeCid, blocks, buf, offset)
for bd in blocks:
if bd.blk.data[].len > 0:
copyMem(addr buf[offset], unsafeAddr bd.blk.data[][0], bd.blk.data[].len)
offset += bd.blk.data[].len
await conn.write(buf)
proc writeWantBlocksRequest*(
conn: Connection, req: WantBlocksRequest
) {.async: (raises: [CancelledError, LPStreamError]).} =
let
reqSize = calcRequestSize(req)
totalSize = 4 + 1 + reqSize
var buf = newSeqUninit[byte](totalSize)
let frameLenLE = (1 + reqSize).uint32.toLE
copyMem(addr buf[0], unsafeAddr frameLenLE, 4)
buf[4] = mtWantBlocksRequest.byte
discard encodeRequestInto(req, buf, 5)
await conn.write(buf)
proc readWantBlocksResponse*(
conn: Connection, dataLen: int
): Future[WantBlocksResult[WantBlocksResponse]] {.async: (raises: [CancelledError]).} =
try:
let totalLen = dataLen.uint32
if totalLen > MaxWantBlocksResponseBytes:
return err(wantBlocksError(ResponseTooLarge, "Response too large: " & $totalLen))
var lenBuf: array[4, byte]
await conn.readExactly(addr lenBuf[0], 4)
let metaLen = uint32.fromBytes(lenBuf, littleEndian)
if metaLen > MaxMetadataSize:
return err(wantBlocksError(MetadataTooLarge, "Metadata too large: " & $metaLen))
var metaBuf = newSeqUninit[byte](metaLen.int)
if metaLen > 0:
await conn.readExactly(addr metaBuf[0], metaLen.int)
let (requestId, treeCid, blocksMeta) = ?decodeResponseMetadata(metaBuf)
var totalDataSize: uint64 = 0
for bm in blocksMeta:
totalDataSize += bm.dataLen.uint64
if totalLen < SizeMetaLen.uint32 + metaLen:
return err(
wantBlocksError(
DataSizeMismatch,
"Invalid lengths: totalLen=" & $totalLen & " metaLen=" & $metaLen,
)
)
let dataLen = totalLen - SizeMetaLen.uint32 - metaLen
if dataLen.uint64 != totalDataSize:
return err(
wantBlocksError(
DataSizeMismatch,
"Data size mismatch: expected " & $totalDataSize & ", got " & $dataLen,
)
)
var sharedBuf = SharedBlocksBuffer(data: newSeqUninit[byte](totalDataSize.int))
if totalDataSize > 0:
await conn.readExactly(addr sharedBuf.data[0], totalDataSize.int)
var response: WantBlocksResponse
response.requestId = requestId
response.treeCid = treeCid
response.sharedBuffer = sharedBuf
response.blocks = newSeq[BlockEntry](blocksMeta.len)
var offset = 0
for i, bm in blocksMeta:
let blockDataLen = bm.dataLen.int
var proof: StorageMerkleProof
if bm.proof.isSome:
proof = bm.proof.get
response.blocks[i] = BlockEntry(
index: bm.index,
cid: bm.cid,
dataOffset: offset,
dataLen: blockDataLen,
proof: proof,
)
offset += blockDataLen
return ok(response)
except LPStreamError as e:
return err(wantBlocksError(RequestFailed, e.msg))
proc readWantBlocksRequest*(
conn: Connection, dataLen: int
): Future[WantBlocksResult[WantBlocksRequest]] {.async: (raises: [CancelledError]).} =
try:
if dataLen.uint32 > MaxWantBlocksRequestBytes:
return err(wantBlocksError(RequestTooLarge, "Request too large: " & $dataLen))
var reqBuf = newSeqUninit[byte](dataLen)
if dataLen > 0:
await conn.readExactly(addr reqBuf[0], dataLen)
return decodeRequest(reqBuf)
except LPStreamError as e:
return err(wantBlocksError(RequestFailed, e.msg))
proc toBlockDeliveryView*(
entry: BlockEntry, treeCid: Cid, sharedBuf: SharedBlocksBuffer
): WantBlocksResult[BlockDeliveryView] =
if entry.dataOffset < 0 or entry.dataLen < 0:
return err(
wantBlocksError(
DataSizeMismatch,
"Invalid offset or length: offset=" & $entry.dataOffset & " len=" &
$entry.dataLen,
)
)
if entry.dataOffset + entry.dataLen > sharedBuf.data.len:
return err(
wantBlocksError(
DataSizeMismatch,
"Block data exceeds buffer: offset=" & $entry.dataOffset & " len=" &
$entry.dataLen & " bufLen=" & $sharedBuf.data.len,
)
)
ok(
BlockDeliveryView(
cid: entry.cid,
address: BlockAddress(treeCid: treeCid, index: entry.index.Natural),
proof: some(entry.proof),
sharedBuf: sharedBuf,
dataOffset: entry.dataOffset,
dataLen: entry.dataLen,
)
)
proc toBlockDelivery*(view: BlockDeliveryView): BlockDelivery =
var data = newSeqUninit[byte](view.dataLen)
if view.dataLen > 0:
copyMem(addr data[0], unsafeAddr view.sharedBuf.data[view.dataOffset], view.dataLen)
var dataRef: ref seq[byte]
new(dataRef)
dataRef[] = move(data)
BlockDelivery(
blk: Block(cid: view.cid, data: dataRef), address: view.address, proof: view.proof
)

View File

@ -0,0 +1,204 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/algorithm
import pkg/libp2p/cid
type
BlockRange* = object
treeCid*: Cid
ranges*: seq[tuple[start: uint64, count: uint64]]
BlockAvailabilityKind* = enum
bakUnknown
bakComplete
bakRanges
bakBitmap
BlockAvailability* = object
case kind*: BlockAvailabilityKind
of bakUnknown:
discard
of bakComplete:
discard
of bakRanges:
ranges*: seq[tuple[start: uint64, count: uint64]]
of bakBitmap:
bitmap*: seq[byte]
totalBlocks*: uint64
proc unknown*(_: type BlockAvailability): BlockAvailability =
BlockAvailability(kind: bakUnknown)
proc complete*(_: type BlockAvailability): BlockAvailability =
BlockAvailability(kind: bakComplete)
proc fromRanges*(
_: type BlockAvailability, ranges: seq[tuple[start: uint64, count: uint64]]
): BlockAvailability =
BlockAvailability(kind: bakRanges, ranges: ranges)
proc fromBitmap*(
_: type BlockAvailability, bitmap: seq[byte], totalBlocks: uint64
): BlockAvailability =
BlockAvailability(kind: bakBitmap, bitmap: bitmap, totalBlocks: totalBlocks)
proc hasBlock*(avail: BlockAvailability, index: uint64): bool =
case avail.kind
of bakUnknown:
false
of bakComplete:
true
of bakRanges:
for (start, count) in avail.ranges:
if count > high(uint64) - start:
continue
if index >= start and index < start + count:
return true
false
of bakBitmap:
if index >= avail.totalBlocks:
return false
let
byteIdx = index div 8
bitIdx = index mod 8
if byteIdx.int >= avail.bitmap.len:
return false
(avail.bitmap[byteIdx] and (1'u8 shl bitIdx)) != 0
proc hasRange*(avail: BlockAvailability, start: uint64, count: uint64): bool =
if count > high(uint64) - start:
return false
case avail.kind
of bakUnknown:
false
of bakComplete:
true
of bakRanges:
let reqEnd = start + count
for (rangeStart, rangeCount) in avail.ranges:
if rangeCount > high(uint64) - rangeStart:
continue
let rangeEnd = rangeStart + rangeCount
if start >= rangeStart and reqEnd <= rangeEnd:
return true
false
of bakBitmap:
for i in start ..< start + count:
if not avail.hasBlock(i):
return false
true
proc hasAnyInRange*(avail: BlockAvailability, start: uint64, count: uint64): bool =
if count > high(uint64) - start:
return false
case avail.kind
of bakUnknown:
false
of bakComplete:
true
of bakRanges:
let reqEnd = start + count
for (rangeStart, rangeCount) in avail.ranges:
if rangeCount > high(uint64) - rangeStart:
continue
let rangeEnd = rangeStart + rangeCount
# check if they overlap
if start < rangeEnd and rangeStart < reqEnd:
return true
false
of bakBitmap:
for i in start ..< start + count:
if avail.hasBlock(i):
return true
false
proc mergeRanges(
ranges: seq[tuple[start: uint64, count: uint64]]
): seq[tuple[start: uint64, count: uint64]] =
if ranges.len == 0:
return @[]
var sorted = ranges
sorted.sort(
proc(a, b: tuple[start: uint64, count: uint64]): int =
if a.start < b.start:
-1
elif a.start > b.start:
1
else:
0
)
result = @[]
var current = sorted[0]
if current.count > high(uint64) - current.start:
return @[]
for i in 1 ..< sorted.len:
let next = sorted[i]
if next.count > high(uint64) - next.start:
continue #cnanakos: warn??
let currentEnd = current.start + current.count
if next.start <= currentEnd:
let nextEnd = next.start + next.count
if nextEnd > currentEnd:
current.count = nextEnd - current.start
else:
result.add(current)
current = next
result.add(current)
proc merge*(current: BlockAvailability, other: BlockAvailability): BlockAvailability =
## merge by keeping the union of all known blocks
if current.kind == bakComplete or other.kind == bakComplete:
return BlockAvailability.complete()
if current.kind == bakUnknown:
return other
if other.kind == bakUnknown:
return current
proc bitmapToRanges(
avail: BlockAvailability
): seq[tuple[start: uint64, count: uint64]] =
result = @[]
var
inRange = false
rangeStart: uint64 = 0
for i in 0'u64 ..< avail.totalBlocks:
let hasIt = avail.hasBlock(i)
if hasIt and not inRange:
rangeStart = i
inRange = true
elif not hasIt and inRange:
result.add((rangeStart, i - rangeStart))
inRange = false
if inRange:
result.add((rangeStart, avail.totalBlocks - rangeStart))
let currentRanges =
if current.kind == bakRanges:
current.ranges
else:
bitmapToRanges(current)
let otherRanges =
if other.kind == bakRanges:
other.ranges
else:
bitmapToRanges(other)
return BlockAvailability.fromRanges(mergeRanges(currentRanges & otherRanges))

View File

@ -0,0 +1,54 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/algorithm
import ./protocol/constants
func isIndexInRanges*(
index: uint64, ranges: openArray[(uint64, uint64)], sortedRanges: bool = false
): bool =
func binarySearch(r: openArray[(uint64, uint64)]): bool =
var
lo = 0
hi = r.len - 1
candidate = -1
while lo <= hi:
let mid = (lo + hi) div 2
if r[mid][0] <= index:
candidate = mid
lo = mid + 1
else:
hi = mid - 1
if candidate >= 0:
let (start, count) = r[candidate]
return index < start + count
return false
if ranges.len == 0:
return false
if sortedRanges:
binarySearch(ranges)
else:
let sorted = @ranges.sorted(
proc(a, b: (uint64, uint64)): int =
cmp(a[0], b[0])
)
binarySearch(sorted)
proc computeBatchSize*(blockSize: uint32): uint32 =
doAssert blockSize > 0, "computeBatchSize requires blockSize > 0"
let
optimal = TargetBatchBytes div blockSize
maxFromBytes = MaxWantBlocksResponseBytes div blockSize
return clamp(optimal, MinBatchSize, maxFromBytes)

View File

@ -7,11 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/tables
import std/sugar
import std/hashes
export tables
import std/[tables, sugar, hashes]
{.push raises: [], gcsafe.}
@ -32,61 +28,34 @@ export errors, logutils, units, storagetypes
type
Block* = ref object of RootObj
cid*: Cid
data*: seq[byte]
data*: ref seq[byte]
BlockAddress* = object
case leaf*: bool
of true:
treeCid* {.serialize.}: Cid
index* {.serialize.}: Natural
else:
cid* {.serialize.}: Cid
treeCid* {.serialize.}: Cid
index* {.serialize.}: Natural
logutils.formatIt(LogFormat.textLines, BlockAddress):
if it.leaf:
"treeCid: " & shortLog($it.treeCid) & ", index: " & $it.index
else:
"cid: " & shortLog($it.cid)
"treeCid: " & shortLog($it.treeCid) & ", index: " & $it.index
logutils.formatIt(LogFormat.json, BlockAddress):
%it
proc `==`*(a, b: BlockAddress): bool =
a.leaf == b.leaf and (
if a.leaf:
a.treeCid == b.treeCid and a.index == b.index
else:
a.cid == b.cid
)
a.treeCid == b.treeCid and a.index == b.index
proc `$`*(a: BlockAddress): string =
if a.leaf:
"treeCid: " & $a.treeCid & ", index: " & $a.index
else:
"cid: " & $a.cid
"treeCid: " & $a.treeCid & ", index: " & $a.index
proc hash*(a: BlockAddress): Hash =
if a.leaf:
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
hash(data)
else:
hash(a.cid.data.buffer)
proc cidOrTreeCid*(a: BlockAddress): Cid =
if a.leaf: a.treeCid else: a.cid
proc address*(b: Block): BlockAddress =
BlockAddress(leaf: false, cid: b.cid)
proc init*(_: type BlockAddress, cid: Cid): BlockAddress =
BlockAddress(leaf: false, cid: cid)
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
hash(data)
proc init*(_: type BlockAddress, treeCid: Cid, index: Natural): BlockAddress =
BlockAddress(leaf: true, treeCid: treeCid, index: index)
BlockAddress(treeCid: treeCid, index: index)
proc `$`*(b: Block): string =
result &= "cid: " & $b.cid
result &= "\ndata: " & string.fromBytes(b.data)
result &= "\ndata: " & string.fromBytes(b.data[])
func new*(
T: type Block,
@ -96,7 +65,6 @@ func new*(
codec = BlockCodec,
): ?!Block =
## creates a new block for both storage and network IO
##
let
hash = ?MultiHash.digest($mcodec, data).mapFailure
@ -105,13 +73,14 @@ func new*(
# TODO: If the hash is `>=` to the data,
# use the Cid as a container!
Block(cid: cid, data: @data).success
var dataRef: ref seq[byte]
new(dataRef)
dataRef[] = @data
Block(cid: cid, data: dataRef).success
proc new*(
T: type Block, cid: Cid, data: openArray[byte], verify: bool = true
): ?!Block =
proc new*(T: type Block, cid: Cid, data: sink seq[byte], verify: bool = true): ?!Block =
## creates a new block for both storage and network IO
##
## takes ownership of the data seq to avoid copying
if verify:
let
@ -121,7 +90,16 @@ proc new*(
if computedCid != cid:
return "Cid doesn't match the data".failure
return Block(cid: cid, data: @data).success
var dataRef: ref seq[byte]
new(dataRef)
dataRef[] = move(data)
return Block(cid: cid, data: dataRef).success
proc new*(
T: type Block, cid: Cid, data: openArray[byte], verify: bool = true
): ?!Block =
## creates a new block for both storage and network IO
Block.new(cid, @data, verify)
proc emptyBlock*(version: CidVersion, hcodec: MultiCodec): ?!Block =
emptyCid(version, hcodec, BlockCodec).flatMap(

View File

@ -11,7 +11,7 @@
import std/os
{.push warning[UnusedImport]: on.}
{.push warning[UnusedImport]: off.}
import std/terminal # Is not used in tests
{.pop.}
@ -44,7 +44,7 @@ import ./utils
import ./nat
import ./utils/natutils
from ./blockexchange/engine/pendingblocks import DefaultBlockRetries
from ./blockexchange/engine/downloadmanager import DefaultBlockRetries
export units, net, storagetypes, logutils, completeCmdArg, parseCmdArg, NatConfig
@ -542,11 +542,27 @@ proc updateLogLevel*(logLevel: string) {.raises: [ValueError].} =
if not setTopicState(topicName, settings.state, settings.logLevel):
warn "Unrecognized logging topic", topic = topicName
proc setupLogging*(conf: StorageConf) =
proc openLogFile(conf: StorageConf): Option[IoHandle] =
if logFilePath =? conf.logFile and logFilePath.len > 0:
let logFileHandle =
openFile(logFilePath, {OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate})
if logFileHandle.isErr:
error "failed to open log file",
path = logFilePath, errorCode = $logFileHandle.error
else:
return logFileHandle.option
return IoHandle.none
proc setupLogging*(conf: StorageConf): Option[IoHandle] =
let ioHandle =
if conf.logFile.isSome:
conf.openLogFile()
else:
IoHandle.none
when defaultChroniclesStream.outputs.type.arity != 3:
warn "Logging configuration options not enabled in the current build"
else:
var logFile: ?IoHandle
proc noOutput(logLevel: LogLevel, msg: LogOutputStr) =
discard
@ -564,20 +580,13 @@ proc setupLogging*(conf: StorageConf) =
writeAndFlush(stdout, stripAnsi(msg))
proc fileFlush(logLevel: LogLevel, msg: LogOutputStr) =
if file =? logFile:
if file =? ioHandle:
if error =? file.writeFile(stripAnsi(msg).toBytes).errorOption:
error "failed to write to log file", errorCode = $error
defaultChroniclesStream.outputs[2].writer = noOutput
if logFilePath =? conf.logFile and logFilePath.len > 0:
let logFileHandle =
openFile(logFilePath, {OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate})
if logFileHandle.isErr:
error "failed to open log file",
path = logFilePath, errorCode = $logFileHandle.error
else:
logFile = logFileHandle.option
defaultChroniclesStream.outputs[2].writer = fileFlush
if ioHandle.isSome:
defaultChroniclesStream.outputs[2].writer = fileFlush
defaultChroniclesStream.outputs[1].writer = noOutput
@ -606,14 +615,19 @@ proc setupLogging*(conf: StorageConf) =
else:
defaultChroniclesStream.outputs[0].writer = writer
proc setupMetrics*(config: StorageConf) =
return ioHandle
proc setupMetrics*(config: StorageConf): ?!void =
if config.metricsEnabled:
let metricsAddress = config.metricsAddress
notice "Starting metrics HTTP server",
url = "http://" & $metricsAddress & ":" & $config.metricsPort & "/metrics"
let server = MetricsHttpServerRef.new($metricsAddress, config.metricsPort).valueOr:
return failure($error)
try:
startMetricsHttpServer($metricsAddress, config.metricsPort)
except CatchableError as exc:
raiseAssert exc.msg
except Exception as exc:
raiseAssert exc.msg # TODO fix metrics
waitFor server.start()
except MetricsError as exc:
return failure(exc.msg)
except CancelledError:
return failure("Metrics server start was cancelled")
success()

View File

@ -23,8 +23,37 @@ type
StorageError* = object of CatchableError # base Storage error
StorageResult*[T] = Result[T, ref StorageError]
WantBlocksErrorKind* = enum
RequestTooShort
RequestTooLarge
RequestTruncated
InvalidCid
InvalidCodec
MetadataTooShort
MetadataTruncated
ResponseTooLarge
MetadataTooLarge
DataSizeMismatch
ProofTooShort
ProofTruncated
ProofCreationFailed
ProofPathTooLarge
ProofDecodeFailed
TooManyBlocks
NoConnection
ConnectionClosed
RequestFailed
WantBlocksError* = object of StorageError
kind*: WantBlocksErrorKind
WantBlocksResult*[T] = Result[T, ref WantBlocksError]
FinishedFailed*[T] = tuple[success: seq[Future[T]], failure: seq[Future[T]]]
proc wantBlocksError*(kind: WantBlocksErrorKind, msg: string): ref WantBlocksError =
(ref WantBlocksError)(kind: kind, msg: msg)
template mapFailure*[T, V, E](
exp: Result[T, V], exc: typedesc[E]
): Result[T, ref CatchableError] =

View File

@ -92,6 +92,7 @@ import std/sugar
import std/typetraits
import pkg/chronicles except toJson, `%`
import json_serialization/writer as json_serialization_writer
from pkg/chronos import TransportAddress
from pkg/libp2p import Cid, MultiAddress, `$`
import pkg/questionable
@ -170,33 +171,28 @@ template formatIt*(format: LogFormat, T: typedesc, body: untyped) =
return jObj
json.`%`(body)
proc setProperty*(r: var JsonRecord, key: string, res: ?!T) =
proc writeValue*(w: var JsonWriter, res: ?!T) {.raises: [IOError].} =
var it {.inject, used.}: T
setProperty(r, key, res.formatJsonResult)
w.writeValue(res.formatJsonResult)
proc setProperty*(r: var JsonRecord, key: string, opt: ?T) =
proc writeValue*(w: var JsonWriter, opt: ?T) {.raises: [IOError].} =
var it {.inject, used.}: T
let v = opt.formatJsonOption
setProperty(r, key, v)
w.writeValue(opt.formatJsonOption)
proc setProperty*(r: var JsonRecord, key: string, opts: seq[?T]) =
proc writeValue*(w: var JsonWriter, opts: seq[?T]) {.raises: [IOError].} =
var it {.inject, used.}: T
let v = opts.map(opt => opt.formatJsonOption)
setProperty(r, key, json.`%`(v))
w.writeValue(json.`%`(v))
proc setProperty*(
r: var JsonRecord, key: string, val: seq[T]
) {.raises: [ValueError, IOError].} =
proc writeValue*(w: var JsonWriter, val: seq[T]) {.raises: [IOError].} =
var it {.inject, used.}: T
let v = val.map(it => body)
setProperty(r, key, json.`%`(v))
w.writeValue(json.`%`(v))
proc setProperty*(
r: var JsonRecord, key: string, val: T
) {.raises: [ValueError, IOError].} =
proc writeValue*(w: var JsonWriter, val: T) {.raises: [IOError].} =
var it {.inject, used.}: T = val
let v = body
setProperty(r, key, json.`%`(v))
w.writeValue(json.`%`(v))
elif format == LogFormat.textLines:
proc formatTextLineOption*(val: ?T): string =
@ -210,29 +206,29 @@ template formatIt*(format: LogFormat, T: typedesc, body: untyped) =
return "Error: " & error.msg
$(body)
proc setProperty*(r: var TextLineRecord, key: string, res: ?!T) =
proc setProperty*(r: var TextLogRecord, key: string, res: ?!T) =
var it {.inject, used.}: T
setProperty(r, key, res.formatTextLineResult)
proc setProperty*(r: var TextLineRecord, key: string, opt: ?T) =
proc setProperty*(r: var TextLogRecord, key: string, opt: ?T) =
var it {.inject, used.}: T
let v = opt.formatTextLineOption
setProperty(r, key, v)
proc setProperty*(r: var TextLineRecord, key: string, opts: seq[?T]) =
proc setProperty*(r: var TextLogRecord, key: string, opts: seq[?T]) =
var it {.inject, used.}: T
let v = opts.map(opt => opt.formatTextLineOption)
setProperty(r, key, v.formatTextLineSeq)
proc setProperty*(
r: var TextLineRecord, key: string, val: seq[T]
r: var TextLogRecord, key: string, val: seq[T]
) {.raises: [ValueError, IOError].} =
var it {.inject, used.}: T
let v = val.map(it => body)
setProperty(r, key, v.formatTextLineSeq)
proc setProperty*(
r: var TextLineRecord, key: string, val: T
r: var TextLogRecord, key: string, val: T
) {.raises: [ValueError, IOError].} =
var it {.inject, used.}: T = val
let v = body

View File

@ -1,4 +1,5 @@
import ./manifest/coders
import ./manifest/manifest
import ./manifest/protocol
export manifest, coders
export manifest, coders, protocol

View File

@ -14,7 +14,6 @@ import times
{.push raises: [].}
import std/tables
import std/sequtils
import pkg/libp2p
import pkg/questionable
@ -38,31 +37,32 @@ proc encode*(manifest: Manifest): ?!seq[byte] =
#
# ```protobuf
# Message Header {
# optional bytes treeCid = 1; # cid (root) of the tree
# optional uint32 blockSize = 2; # size of a single block
# optional uint64 datasetSize = 3; # size of the dataset
# optional codec: MultiCodec = 4; # Dataset codec
# optional hcodec: MultiCodec = 5 # Multihash codec
# optional version: CidVersion = 6; # Cid version
# optional filename: ?string = 7; # original filename
# optional mimetype: ?string = 8; # original mimetype
# required uint32 manifestVersion = 1; # manifest format version
# optional bytes treeCid = 2; # cid (root) of the tree
# optional uint32 blockSize = 3; # size of a single block
# optional uint64 datasetSize = 4; # size of the dataset
# optional codec: MultiCodec = 5; # Dataset codec
# optional hcodec: MultiCodec = 6; # Multihash codec
# optional version: CidVersion = 7; # Cid version
# optional filename: string = 8; # original filename
# optional mimetype: string = 9; # original mimetype
# }
# ```
#
# var treeRootVBuf = initVBuffer()
var header = initProtoBuffer()
header.write(1, manifest.treeCid.data.buffer)
header.write(2, manifest.blockSize.uint32)
header.write(3, manifest.datasetSize.uint64)
header.write(4, manifest.codec.uint32)
header.write(5, manifest.hcodec.uint32)
header.write(6, manifest.version.uint32)
header.write(1, manifest.manifestVersion)
header.write(2, manifest.treeCid.data.buffer)
header.write(3, manifest.blockSize.uint32)
header.write(4, manifest.datasetSize.uint64)
header.write(5, manifest.codec.uint32)
header.write(6, manifest.hcodec.uint32)
header.write(7, manifest.version.uint32)
if manifest.filename.isSome:
header.write(7, manifest.filename.get())
header.write(8, manifest.filename.get())
if manifest.mimetype.isSome:
header.write(8, manifest.mimetype.get())
header.write(9, manifest.mimetype.get())
pbNode.write(1, header) # set the treeCid as the data field
pbNode.finish()
@ -82,6 +82,7 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
hcodec: uint32
version: uint32
blockSize: uint32
manifestVersion: uint32
filename: string
mimetype: string
@ -90,30 +91,36 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
return failure("Unable to decode `Header` from dag-pb manifest!")
# Decode `Header` contents
if pbHeader.getField(1, treeCidBuf).isErr:
if pbHeader.getField(1, manifestVersion).isErr:
return failure("Unable to decode `manifestVersion` from manifest!")
if pbHeader.getField(2, treeCidBuf).isErr:
return failure("Unable to decode `treeCid` from manifest!")
if pbHeader.getField(2, blockSize).isErr:
if pbHeader.getField(3, blockSize).isErr:
return failure("Unable to decode `blockSize` from manifest!")
if pbHeader.getField(3, datasetSize).isErr:
if pbHeader.getField(4, datasetSize).isErr:
return failure("Unable to decode `datasetSize` from manifest!")
if pbHeader.getField(4, codec).isErr:
if pbHeader.getField(5, codec).isErr:
return failure("Unable to decode `codec` from manifest!")
if pbHeader.getField(5, hcodec).isErr:
if pbHeader.getField(6, hcodec).isErr:
return failure("Unable to decode `hcodec` from manifest!")
if pbHeader.getField(6, version).isErr:
if pbHeader.getField(7, version).isErr:
return failure("Unable to decode `version` from manifest!")
if pbHeader.getField(7, filename).isErr:
if pbHeader.getField(8, filename).isErr:
return failure("Unable to decode `filename` from manifest!")
if pbHeader.getField(8, mimetype).isErr:
if pbHeader.getField(9, mimetype).isErr:
return failure("Unable to decode `mimetype` from manifest!")
if manifestVersion != 0:
return failure("Unsupported manifest version: " & $manifestVersion)
let treeCid = ?Cid.init(treeCidBuf).mapFailure
var filenameOption = if filename.len == 0: string.none else: filename.some
@ -128,6 +135,7 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
codec = codec.MultiCodec,
filename = filenameOption,
mimetype = mimetypeOption,
manifestVersion = manifestVersion,
)
self.success
@ -139,4 +147,4 @@ func decode*(_: type Manifest, blk: Block): ?!Manifest =
if not ?blk.cid.isManifest:
return failure "Cid not a manifest codec"
Manifest.decode(blk.data)
Manifest.decode(blk.data[])

View File

@ -25,6 +25,7 @@ import ../logutils
# TODO: Manifest should be reworked to more concrete types,
# perhaps using inheritance
type Manifest* = ref object of RootObj
manifestVersion {.serialize.}: uint32 # Manifest format version
treeCid {.serialize.}: Cid # Root of the merkle tree
datasetSize {.serialize.}: NBytes # Total size of all blocks
blockSize {.serialize.}: NBytes
@ -35,6 +36,10 @@ type Manifest* = ref object of RootObj
filename {.serialize.}: ?string # The filename of the content uploaded (optional)
mimetype {.serialize.}: ?string # The mimetype of the content uploaded (optional)
type ManifestDescriptor* = ref object
manifest*: Manifest
manifestCid*: Cid
############################################################
# Accessors
############################################################
@ -66,6 +71,9 @@ func filename*(self: Manifest): ?string =
func mimetype*(self: Manifest): ?string =
self.mimetype
func manifestVersion*(self: Manifest): uint32 =
self.manifestVersion
############################################################
# Operations on block list
############################################################
@ -83,7 +91,8 @@ func isManifest*(mc: MultiCodec): ?!bool =
func `==`*(a, b: Manifest): bool =
(a.treeCid == b.treeCid) and (a.datasetSize == b.datasetSize) and
(a.blockSize == b.blockSize) and (a.version == b.version) and (a.hcodec == b.hcodec) and
(a.codec == b.codec) and (a.filename == b.filename) and (a.mimetype == b.mimetype)
(a.codec == b.codec) and (a.filename == b.filename) and (a.mimetype == b.mimetype) and
(a.manifestVersion == b.manifestVersion)
func `$`*(self: Manifest): string =
result =
@ -113,6 +122,7 @@ func new*(
codec = BlockCodec,
filename: ?string = string.none,
mimetype: ?string = string.none,
manifestVersion: uint32 = 0,
): Manifest =
T(
treeCid: treeCid,
@ -123,6 +133,7 @@ func new*(
hcodec: hcodec,
filename: filename,
mimetype: mimetype,
manifestVersion: manifestVersion,
)
func new*(T: type Manifest, data: openArray[byte]): ?!Manifest =

View File

@ -0,0 +1,253 @@
## Logos Storage
## Copyright (c) 2026 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import pkg/chronos
import pkg/libp2p
import pkg/questionable
import pkg/questionable/results
import pkg/stew/endians2
import ../blocktype as bt
import ../stores/blockstore
import ../discovery
import ../logutils
import ../errors
import ./manifest
import ./coders
export manifest, coders
logScope:
topics = "storage manifestprotocol"
const
ManifestProtocolCodec* = "/storage/manifest/1.0.0"
ManifestMaxCidSize = 512
ManifestMaxDataSize = 65536 # 64KB
DefaultManifestRetries* = 10
DefaultManifestRetryDelay* = 3.seconds
DefaultManifestFetchTimeout* = 30.seconds
type
ManifestProtocol* = ref object of LPProtocol
switch*: Switch
localStore*: BlockStore
discovery*: Discovery
retries*: int
retryDelay*: Duration
fetchTimeout*: Duration
ManifestFetchStatus* = enum
Found = 0
NotFound = 1
proc writeManifestResponse(
conn: Connection, status: ManifestFetchStatus, data: seq[byte] = @[]
) {.async: (raises: [CancelledError, LPStreamError]).} =
let contentLen = 1 + data.len
var buf = newSeqUninit[byte](4 + contentLen)
let contentLenLE = contentLen.uint32.toLE
copyMem(addr buf[0], unsafeAddr contentLenLE, 4)
buf[4] = status.uint8
if data.len > 0:
copyMem(addr buf[5], unsafeAddr data[0], data.len)
await conn.write(buf)
proc readManifestResponse(
conn: Connection
): Future[?!(ManifestFetchStatus, seq[byte])] {.
async: (raises: [CancelledError, LPStreamError])
.} =
var lenBuf: array[4, byte]
await conn.readExactly(addr lenBuf[0], 4)
let contentLen = uint32.fromBytes(lenBuf, littleEndian).int
if contentLen < 1:
return failure("Manifest response too short: " & $contentLen)
if contentLen > 1 + ManifestMaxDataSize:
return failure("Manifest response too large: " & $contentLen)
var content = newSeq[byte](contentLen)
await conn.readExactly(addr content[0], contentLen)
let statusByte = content[0]
if statusByte > ManifestFetchStatus.high.uint8:
return failure("Invalid manifest response status: " & $statusByte)
let
status = ManifestFetchStatus(statusByte)
data =
if contentLen > 1:
content[1 ..< contentLen]
else:
newSeq[byte]()
return success (status, data)
proc handleManifestRequest(
self: ManifestProtocol, conn: Connection
) {.async: (raises: [CancelledError]).} =
try:
var cidLenBuf: array[2, byte]
await conn.readExactly(addr cidLenBuf[0], 2)
let cidLen = uint16.fromBytes(cidLenBuf, littleEndian).int
if cidLen == 0 or cidLen > ManifestMaxCidSize:
warn "Invalid CID length in manifest request", cidLen
await writeManifestResponse(conn, ManifestFetchStatus.NotFound)
return
var cidBuf = newSeq[byte](cidLen)
await conn.readExactly(addr cidBuf[0], cidLen)
let cid = Cid.init(cidBuf).valueOr:
warn "Invalid CID in manifest request"
await writeManifestResponse(conn, ManifestFetchStatus.NotFound)
return
without blk =? await self.localStore.getBlock(cid), err:
trace "Manifest not found locally", cid, err = err.msg
await writeManifestResponse(conn, ManifestFetchStatus.NotFound)
return
await writeManifestResponse(conn, ManifestFetchStatus.Found, blk.data[])
except CancelledError as exc:
raise exc
except CatchableError as exc:
warn "Error handling manifest request", exc = exc.msg
proc fetchManifestFromPeer(
self: ManifestProtocol, peer: PeerRecord, cid: Cid
): Future[?!bt.Block] {.async: (raises: [CancelledError]).} =
var conn: Connection
try:
conn = await self.switch.dial(
peer.peerId, peer.addresses.mapIt(it.address), ManifestProtocolCodec
)
let cidBytes = cid.data.buffer
var reqBuf = newSeqUninit[byte](2 + cidBytes.len)
let cidLenLE = cidBytes.len.uint16.toLE
copyMem(addr reqBuf[0], unsafeAddr cidLenLE, 2)
if cidBytes.len > 0:
copyMem(addr reqBuf[2], unsafeAddr cidBytes[0], cidBytes.len)
await conn.write(reqBuf)
without (status, data) =? await readManifestResponse(conn), err:
return failure(err)
if status == ManifestFetchStatus.NotFound:
return failure(
newException(BlockNotFoundError, "Manifest not found on peer " & $peer.peerId)
)
without blk =? bt.Block.new(cid, data, verify = true), err:
return failure("Manifest CID verification failed: " & err.msg)
return success blk
except CancelledError as exc:
raise exc
except CatchableError as exc:
return failure("Error fetching manifest from peer " & $peer.peerId & ": " & exc.msg)
finally:
if not conn.isNil:
await conn.close()
proc fetchManifest*(
self: ManifestProtocol, cid: Cid
): Future[?!Manifest] {.async: (raises: [CancelledError]).} =
if err =? cid.isManifest.errorOption:
return failure "CID has invalid content type for manifest {$cid}"
trace "Fetching manifest", cid
without localBlk =? await self.localStore.getBlock(cid), err:
if not (err of BlockNotFoundError):
return failure err
trace "Manifest not in local store, starting discovery loop", cid
var lastErr = err
for attempt in 0 ..< self.retries:
trace "Manifest fetch attempt", cid, attempt, maxRetries = self.retries
let providers = await self.discovery.find(cid)
if providers.len > 0:
for provider in providers:
let fetchFut = self.fetchManifestFromPeer(provider.data, cid)
var blkResult: ?!bt.Block
if (await fetchFut.withTimeout(self.fetchTimeout)):
blkResult = await fetchFut
else:
trace "Manifest fetch from peer timed out", cid, peer = provider.data.peerId
continue
without blk =? blkResult, fetchErr:
trace "Failed to fetch manifest from peer",
cid, peer = provider.data.peerId, err = fetchErr.msg
lastErr = fetchErr
continue
if putErr =? (await self.localStore.putBlock(blk)).errorOption:
warn "Failed to store fetched manifest locally", cid, err = putErr.msg
without manifest =? Manifest.decode(blk), err:
return failure("Unable to decode manifest: " & err.msg)
return success manifest
else:
trace "No providers found for manifest, will retry", cid, attempt
if attempt < self.retries - 1:
await sleepAsync(self.retryDelay)
return failure(
newException(
BlockNotFoundError,
"Failed to fetch manifest " & $cid & " after " & $self.retries & " attempts: " &
lastErr.msg,
)
)
without manifest =? Manifest.decode(localBlk), err:
return failure("Unable to decode manifest: " & err.msg)
return success manifest
proc new*(
T: type ManifestProtocol,
switch: Switch,
localStore: BlockStore,
discovery: Discovery,
retries: int = DefaultManifestRetries,
retryDelay: Duration = DefaultManifestRetryDelay,
fetchTimeout: Duration = DefaultManifestFetchTimeout,
): ManifestProtocol =
let self = ManifestProtocol(
switch: switch,
localStore: localStore,
discovery: discovery,
retries: retries,
retryDelay: retryDelay,
fetchTimeout: fetchTimeout,
)
proc handler(
conn: Connection, proto: string
): Future[void] {.async: (raises: [CancelledError]).} =
await self.handleManifestRequest(conn)
self.handler = handler
self.codec = ManifestProtocolCodec
return self

View File

@ -1,5 +1,4 @@
import ./merkletree/merkletree
import ./merkletree/coders
import ./merkletree/coders
export merkletree, coders

View File

@ -10,7 +10,7 @@
{.push raises: [].}
import std/bitops
import std/[atomics, sequtils]
import std/sequtils
import pkg/questionable
import pkg/questionable/results

View File

@ -9,8 +9,7 @@
{.push raises: [].}
import
std/[options, os, strutils, times, net, atomics],
stew/[objects],
std/[options, os, times, net, atomics, exitprocs],
nat_traversal/[miniupnpc, natpmp],
json_serialization/std/net,
results
@ -329,7 +328,7 @@ proc redirectPorts*(
# atexit() in disguise
if natThreads.len == 1:
# we should register the thread termination function only once
addQuitProc(stopNatThreads)
addExitProc(stopNatThreads)
except Exception as exc:
warn "Failed to create NAT port mapping renewal thread", exc = exc.msg

View File

@ -12,7 +12,6 @@
import std/options
import std/sequtils
import std/strformat
import std/sugar
import times
import pkg/taskpools
@ -47,11 +46,6 @@ export logutils
logScope:
topics = "storage node"
const
DefaultFetchBatch = 1024
MaxOnBatchBlocks = 128
BatchRefillThreshold = 0.75 # Refill when 75% of window completes
type
StorageNode* = object
switch: Switch
@ -59,6 +53,7 @@ type
networkStore: NetworkStore
engine: BlockExcEngine
discovery: Discovery
manifestProto: ManifestProtocol
clock*: Clock
taskPool: Taskpool
trackedFutures: TrackedFutures
@ -66,8 +61,6 @@ type
StorageNodeRef* = ref StorageNode
OnManifest* = proc(cid: Cid, manifest: Manifest): void {.gcsafe, raises: [].}
BatchProc* =
proc(blocks: seq[bt.Block]): Future[?!void] {.async: (raises: [CancelledError]).}
OnBlockStoredProc = proc(chunk: seq[byte]): void {.gcsafe, raises: [].}
func switch*(self: StorageNodeRef): Switch =
@ -102,27 +95,8 @@ proc storeManifest*(
proc fetchManifest*(
self: StorageNodeRef, cid: Cid
): Future[?!Manifest] {.async: (raises: [CancelledError]).} =
## Fetch and decode a manifest block
##
if err =? cid.isManifest.errorOption:
return failure "CID has invalid content type for manifest {$cid}"
trace "Retrieving manifest for cid", cid
without blk =? await self.networkStore.getBlock(BlockAddress.init(cid)), err:
trace "Error retrieve manifest block", cid, err = err.msg
return failure err
trace "Decoding manifest for cid", cid
without manifest =? Manifest.decode(blk), err:
trace "Unable to decode as manifest", err = err.msg
return failure("Unable to decode as manifest")
trace "Decoded manifest", cid
return manifest.success
## Fetch and decode a manifest
return await self.manifestProto.fetchManifest(cid)
proc findPeer*(self: StorageNodeRef, peerId: PeerId): Future[?PeerRecord] {.async.} =
## Find peer using the discovery service from the given StorageNode
@ -157,118 +131,70 @@ proc updateExpiry*(
return success()
proc fetchBatched*(
self: StorageNodeRef,
cid: Cid,
iter: Iter[int],
batchSize = DefaultFetchBatch,
onBatch: BatchProc = nil,
fetchLocal = true,
): Future[?!void] {.async: (raises: [CancelledError]), gcsafe.} =
## Fetch blocks in batches of `batchSize`
##
# TODO: doesn't work if callee is annotated with async
# let
# iter = iter.map(
# (i: int) => self.networkStore.getBlock(BlockAddress.init(cid, i))
# )
# Sliding window: maintain batchSize blocks in-flight
let
refillThreshold = int(float(batchSize) * BatchRefillThreshold)
refillSize = max(refillThreshold, 1)
maxCallbackBlocks = min(batchSize, MaxOnBatchBlocks)
var
blockData: seq[bt.Block]
failedBlocks = 0
successfulBlocks = 0
completedInWindow = 0
var addresses = newSeqOfCap[BlockAddress](batchSize)
for i in 0 ..< batchSize:
if not iter.finished:
let address = BlockAddress.init(cid, iter.next())
if fetchLocal or not (await address in self.networkStore):
addresses.add(address)
var blockResults = await self.networkStore.getBlocks(addresses)
while not blockResults.finished:
without blk =? await blockResults.next(), err:
inc(failedBlocks)
continue
inc(successfulBlocks)
inc(completedInWindow)
if not onBatch.isNil:
blockData.add(blk)
if blockData.len >= maxCallbackBlocks:
if batchErr =? (await onBatch(blockData)).errorOption:
return failure(batchErr)
blockData = @[]
if completedInWindow >= refillThreshold and not iter.finished:
var refillAddresses = newSeqOfCap[BlockAddress](refillSize)
for i in 0 ..< refillSize:
if not iter.finished:
let address = BlockAddress.init(cid, iter.next())
if fetchLocal or not (await address in self.networkStore):
refillAddresses.add(address)
if refillAddresses.len > 0:
blockResults =
chain(blockResults, await self.networkStore.getBlocks(refillAddresses))
completedInWindow = 0
if failedBlocks > 0:
return failure("Some blocks failed (Result) to fetch (" & $failedBlocks & ")")
if not onBatch.isNil and blockData.len > 0:
if batchErr =? (await onBatch(blockData)).errorOption:
return failure(batchErr)
success()
proc fetchBatched*(
self: StorageNodeRef,
manifest: Manifest,
batchSize = DefaultFetchBatch,
onBatch: BatchProc = nil,
fetchLocal = true,
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
## Fetch manifest in batches of `batchSize`
##
trace "Fetching blocks in batches of",
size = batchSize, blocksCount = manifest.blocksCount
let iter = Iter[int].new(0 ..< manifest.blocksCount)
self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch, fetchLocal)
proc fetchDatasetAsync*(
self: StorageNodeRef, manifest: Manifest, fetchLocal = true
): Future[void] {.async: (raises: []).} =
## Asynchronously fetch a dataset in the background.
## This task will be tracked and cleaned up on node shutdown.
##
self: StorageNodeRef,
md: ManifestDescriptor,
fetchLocal = true,
selectionPolicy: SelectionPolicy = spSequential,
): Future[?!void] {.async: (raises: [CancelledError]).} =
let download = ?self.engine.startTreeDownloadOpaque(
md, selectionPolicy = selectionPolicy, fetchLocal = fetchLocal
)
try:
if err =? (
await self.fetchBatched(
manifest = manifest, batchSize = DefaultFetchBatch, fetchLocal = fetchLocal
)
).errorOption:
error "Unable to fetch blocks", err = err.msg
except CancelledError as exc:
trace "Cancelled fetching blocks", exc = exc.msg
trace "Starting tree download",
treeCid = md.manifest.treeCid, totalBlocks = md.manifest.blocksCount
return await download.waitForComplete()
finally:
self.engine.releaseDownload(download)
proc fetchDatasetAsyncTask*(self: StorageNodeRef, manifest: Manifest) =
## Start fetching a dataset in the background.
## The task will be tracked and cleaned up on node shutdown.
##
self.trackedFutures.track(self.fetchDatasetAsync(manifest, fetchLocal = false))
proc cancelBackgroundDownload*(
self: StorageNodeRef, downloadId: uint64, cid: Cid
): bool =
self.engine.cancelBackgroundDownload(downloadId, cid)
proc getDownloadProgress*(
self: StorageNodeRef, downloadId: uint64, cid: Cid
): Option[DownloadProgress] =
self.engine.getDownloadProgress(downloadId, cid)
proc startBackgroundDownload*(
self: StorageNodeRef,
md: ManifestDescriptor,
selectionPolicy: SelectionPolicy = spSequential,
): Future[?!uint64] {.async: (raises: [CancelledError]).} =
let
treeCid = md.manifest.treeCid
existing = self.engine.downloadManager.getBackgroundDownload(treeCid)
if existing.isSome:
return success(existing.get().id)
let
download = ?self.engine.startTreeDownloadOpaque(
md, selectionPolicy = selectionPolicy, isBackground = true
)
downloadId = download.downloadId
proc waitForCompleteTask(): Future[void] {.async: (raises: []).} =
try:
discard await download.waitForComplete()
except CancelledError:
trace "Background download cancelled", treeCid = treeCid, downloadId
finally:
self.engine.releaseDownload(download)
self.trackedFutures.track(waitForCompleteTask())
return success(downloadId)
proc fetchDatasetAsyncTask*(self: StorageNodeRef, md: ManifestDescriptor) =
## Kept for C library compatibility.
proc fetchTask(): Future[void] {.async: (raises: []).} =
try:
discard await self.startBackgroundDownload(md, selectionPolicy = spRandomWindow)
except CancelledError:
trace "Background dataset fetch cancelled", treeCid = md.manifest.treeCid
self.trackedFutures.track(fetchTask())
proc streamSingleBlock(
self: StorageNodeRef, cid: Cid
@ -279,14 +205,14 @@ proc streamSingleBlock(
let stream = BufferStream.new()
without blk =? (await self.networkStore.getBlock(BlockAddress.init(cid))), err:
without blk =? (await self.networkStore.localStore.getBlock(cid)), err:
return failure(err)
proc streamOneBlock(): Future[void] {.async: (raises: []).} =
try:
defer:
await stream.pushEof()
await stream.pushData(blk.data)
await stream.pushData(blk.data[])
except CancelledError as exc:
trace "Streaming block cancelled", cid, exc = exc.msg
except LPStreamError as exc:
@ -296,16 +222,33 @@ proc streamSingleBlock(
LPStream(stream).success
proc streamEntireDataset(
self: StorageNodeRef, manifest: Manifest, manifestCid: Cid
self: StorageNodeRef, md: ManifestDescriptor, fetchLocal: bool = false
): Future[?!LPStream] {.async: (raises: [CancelledError]).} =
## Streams the contents of the entire dataset described by the manifest.
##
trace "Retrieving blocks from manifest", manifestCid
trace "Retrieving blocks from manifest", manifestCid = md.manifestCid
let
treeCid = md.manifest.treeCid
download = ?self.engine.startTreeDownloadOpaque(md, fetchLocal = fetchLocal)
stream = LPStream(StoreStream.new(self.networkStore, md.manifest, pad = false))
var jobs: seq[Future[void]]
let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false))
jobs.add(self.fetchDatasetAsync(manifest, fetchLocal = false))
proc fetchTask(): Future[void] {.async: (raises: []).} =
try:
trace "Starting tree download",
treeCid = treeCid, totalBlocks = md.manifest.blocksCount
if err =? (await download.waitForComplete()).errorOption:
error "Dataset fetch failed during streaming",
manifestCid = md.manifestCid, err = err.msg
await stream.close()
except CancelledError:
trace "Dataset fetch cancelled during streaming", manifestCid = md.manifestCid
finally:
self.engine.releaseDownload(download)
jobs.add(fetchTask())
# Monitor stream completion and cancel background jobs when done
proc monitorStream() {.async: (raises: []).} =
@ -319,7 +262,7 @@ proc streamEntireDataset(
self.trackedFutures.track(monitorStream())
# Retrieve all blocks of the dataset sequentially from the local store or network
trace "Creating store stream for manifest", manifestCid
trace "Creating store stream for manifest", manifestCid = md.manifestCid
stream.success
@ -338,7 +281,9 @@ proc retrieve*(
return await self.streamSingleBlock(cid)
await self.streamEntireDataset(manifest, cid)
await self.streamEntireDataset(
ManifestDescriptor(manifest: manifest, manifestCid: cid)
)
proc deleteSingleBlock(self: StorageNodeRef, cid: Cid): Future[?!void] {.async.} =
if err =? (await self.networkStore.delBlock(cid)).errorOption:
@ -543,6 +488,7 @@ proc new*(
networkStore: NetworkStore,
engine: BlockExcEngine,
discovery: Discovery,
manifestProto: ManifestProtocol,
taskpool: Taskpool,
): StorageNodeRef =
## Create new instance of a Storage self, call `start` to run it
@ -553,6 +499,7 @@ proc new*(
networkStore: networkStore,
engine: engine,
discovery: discovery,
manifestProto: manifestProto,
taskPool: taskpool,
trackedFutures: TrackedFutures(),
)

View File

@ -19,7 +19,6 @@ import pkg/chronos
import pkg/presto except toJson
import pkg/metrics except toJson
import pkg/stew/base10
import pkg/stew/byteutils
import pkg/confutils
import pkg/libp2p
@ -29,10 +28,14 @@ import pkg/codexdht/discv5/spr as spr
import ../logutils
import ../node
import ../blocktype
import ../storagetypes
import ../conf
import ../manifest
import ../streams/asyncstreamwrapper
import ../stores
import ../stores/repostore
import ../blockexchange
import ../units
import ../utils/options
import ./coders
@ -120,7 +123,7 @@ proc retrieveCid(
while not stream.atEof:
var
buff = newSeqUninitialized[byte](DefaultBlockSize.int)
buff = newSeqUninit[byte](manifest.blockSize.int)
len = await stream.readOnce(addr buff[0], buff.len)
buff.setLen(len)
@ -191,8 +194,29 @@ proc initDataApi(node: StorageNodeRef, repoStore: RepoStore, router: var RestRou
router.rawApi(MethodPost, "/api/storage/v1/data") do() -> RestApiResponse:
## Upload a file in a streaming manner
##
## Optional query parameter:
## blockSize - size of blocks in bytes (default: 64KiB, min: 4KiB, max: 512KiB)
##
trace "Handling file upload"
# Parse blockSize query parameter
var blockSize = DefaultBlockSize
let blockSizeStr = request.query.getString("blockSize", "")
if blockSizeStr != "":
let parsedSize = Base10.decode(uint64, blockSizeStr)
if parsedSize.isErr:
return RestApiResponse.error(Http400, "Invalid blockSize parameter")
let size = parsedSize.get()
# Validate block size
if size < MinBlockSize or size > MaxBlockSize or not isPowerOfTwo(size):
return RestApiResponse.error(
Http400,
"blockSize must be a power of two between " & $MinBlockSize & " and " &
$MaxBlockSize & " bytes",
)
blockSize = NBytes(size)
var bodyReader = request.getBodyReader()
if bodyReader.isErr():
return RestApiResponse.error(Http500, msg = bodyReader.error())
@ -223,6 +247,16 @@ proc initDataApi(node: StorageNodeRef, repoStore: RepoStore, router: var RestRou
if filename.isSome and not isValidFilename(filename.get()):
return RestApiResponse.error(Http422, "The filename is not valid.")
if filename.isSome and filename.get().len > MaxFilenameSize:
return RestApiResponse.error(
Http422, "Filename exceeds maximum size of " & $MaxFilenameSize & " bytes"
)
if mimetype.isSome and mimetype.get().len > MaxMimetypeSize:
return RestApiResponse.error(
Http422, "Mimetype exceeds maximum size of " & $MaxMimetypeSize & " bytes"
)
# Here we could check if the extension matches the filename if needed
let reader = bodyReader.get()
@ -233,13 +267,14 @@ proc initDataApi(node: StorageNodeRef, repoStore: RepoStore, router: var RestRou
AsyncStreamWrapper.new(reader = AsyncStreamReader(reader)),
filename = filename,
mimetype = mimetype,
blockSize = blockSize,
)
), error:
error "Error uploading file", exc = error.msg
return RestApiResponse.error(Http500, error.msg)
storage_api_uploads.inc()
trace "Uploaded file", cid
trace "Uploaded file", cid, blockSize
return RestApiResponse.response($cid)
except CancelledError:
trace "Upload cancelled error"
@ -304,9 +339,10 @@ proc initDataApi(node: StorageNodeRef, repoStore: RepoStore, router: var RestRou
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
## Download a file from the network to the local node
## Returns the download ID for progress tracking and cancellation.
##
var headers = buildCorsHeaders("GET", allowedOrigin)
var headers = buildCorsHeaders("POST", allowedOrigin)
if cid.isErr:
return RestApiResponse.error(Http400, $cid.error(), headers = headers)
@ -316,11 +352,65 @@ proc initDataApi(node: StorageNodeRef, repoStore: RepoStore, router: var RestRou
return RestApiResponse.error(Http404, err.msg, headers = headers)
# Start fetching the dataset in the background
node.fetchDatasetAsyncTask(manifest)
let md = ManifestDescriptor(manifest: manifest, manifestCid: cid.get())
without downloadId =?
(await node.startBackgroundDownload(md, selectionPolicy = spRandomWindow)), err:
return RestApiResponse.error(Http409, err.msg, headers = headers)
let json = %formatManifest(cid.get(), manifest)
var json = %formatManifest(cid.get(), manifest)
json["downloadId"] = %downloadId
return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodDelete, "/api/storage/v1/data/{cid}/network/{downloadId}") do(
cid: Cid, downloadId: uint64, resp: HttpResponseRef
) -> RestApiResponse:
## Cancel a specific background download
##
var headers = buildCorsHeaders("DELETE", allowedOrigin)
if cid.isErr:
return RestApiResponse.error(Http400, $cid.error(), headers = headers)
if downloadId.isErr:
return RestApiResponse.error(Http400, "Invalid download ID", headers = headers)
if not node.cancelBackgroundDownload(downloadId.get(), cid.get()):
return RestApiResponse.error(
Http404, "Background download not found", headers = headers
)
resp.status = Http204
await resp.sendBody("")
router.api(MethodGet, "/api/storage/v1/data/{cid}/network/progress/{downloadId}") do(
cid: Cid, downloadId: uint64, resp: HttpResponseRef
) -> RestApiResponse:
## Get progress of a specific background download
##
var headers = buildCorsHeaders("GET", allowedOrigin)
if cid.isErr:
return RestApiResponse.error(Http400, $cid.error(), headers = headers)
if downloadId.isErr:
return RestApiResponse.error(Http400, "Invalid download ID", headers = headers)
let progress = node.getDownloadProgress(downloadId.get(), cid.get())
if progress.isSome:
let
p = progress.get()
json = %*{
"active": true,
"received": p.blocksCompleted,
"total": p.totalBlocks,
"bytes": p.bytesTransferred,
}
return RestApiResponse.response($json, contentType = "application/json")
else:
let json = %*{"active": false}
return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodGet, "/api/storage/v1/data/{cid}/network/stream") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
@ -477,6 +567,7 @@ proc initDebugApi(node: StorageNodeRef, conf: StorageConf, router: var RestRoute
try:
let table = RestRoutingTable.init(node.discovery.protocol.routingTable)
let json = %*{
"id": $node.switch.peerInfo.peerId,
"addrs": node.switch.peerInfo.addrs.mapIt($it),

View File

@ -7,8 +7,6 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/sugar
import pkg/presto
import pkg/chronos
import pkg/libp2p

View File

@ -1,5 +1,4 @@
import pkg/questionable
import pkg/stew/byteutils
import pkg/libp2p
import pkg/codexdht/discv5/node as dn
import pkg/codexdht/discv5/routing_table as rt

View File

@ -7,12 +7,11 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/sequtils
import std/strutils
import std/os
import std/tables
import std/cpuinfo
import std/net
import std/sequtils
import pkg/chronos
import pkg/taskpools
@ -25,6 +24,7 @@ import pkg/datastore
import pkg/stew/io2
import ./node
import ./manifest/protocol
import ./conf
import ./rng as random
import ./rest/api
@ -32,8 +32,8 @@ import ./stores
import ./blockexchange
import ./utils/fileutils
import ./discovery
import ./systemclock
import ./utils/addrutils
import ./utils/natutils
import ./namespaces
import ./storagetypes
import ./logutils
@ -45,6 +45,7 @@ logScope:
type
StorageServer* = ref object
config: StorageConf
logFile*: Option[IoHandle]
restServer: RestServerRef
storageNode: StorageNodeRef
repoStore: RepoStore
@ -79,6 +80,16 @@ proc start*(s: StorageServer) {.async.} =
s.config.nat, s.storageNode.switch.peerInfo.addrs, s.config.discoveryPort
)
var hasPublicAddr = false
for announceAddr in announceAddrs:
let (maybeIp, _) = getAddressAndPort(announceAddr)
if maybeIp.isSome and maybeIp.get.isGlobalUnicast():
hasPublicAddr = true
break
if not hasPublicAddr:
warn "Unable to determine a public IP address. This node will only be reachable on a private network."
s.storageNode.discovery.updateAnnounceRecord(announceAddrs)
s.storageNode.discovery.updateDhtRecord(discoveryAddrs)
@ -112,7 +123,10 @@ proc stop*(s: StorageServer) {.async.} =
if res.failure.len > 0:
error "Failed to stop Storage node", failures = res.failure.len
raiseAssert "Failed to stop Storage node"
raise newException(
StorageError,
"Failed to stop Storage node: " & res.failure.mapIt(it.error.msg).join(", "),
)
proc close*(s: StorageServer) {.async.} =
var futures =
@ -125,18 +139,34 @@ proc close*(s: StorageServer) {.async.} =
s.taskpool.shutdown()
except Exception as exc:
error "Failed to stop the taskpool", failures = res.failure.len
raiseAssert("Failure in taskpool shutdown:" & exc.msg)
raise newException(StorageError, "Failure in taskpool shutdown: " & exc.msg)
when defaultChroniclesStream.outputs.type.arity >= 3:
proc noOutput(logLevel: LogLevel, msg: LogOutputStr) =
discard
defaultChroniclesStream.outputs[2].writer = noOutput
if s.logFile.isSome:
if error =? closeFile(s.logFile.get()).errorOption:
error "Failed to close log file", errorCode = $error
if res.failure.len > 0:
error "Failed to close Storage node", failures = res.failure.len
raiseAssert "Failed to close Storage node"
raise newException(
StorageError,
"Failed to close Storage node: " & res.failure.mapIt(it.error.msg).join(", "),
)
proc shutdown*(server: StorageServer) {.async.} =
await server.stop()
await server.close()
proc new*(
T: type StorageServer, config: StorageConf, privateKey: StoragePrivateKey
T: type StorageServer,
config: StorageConf,
privateKey: StoragePrivateKey,
logFile: Option[IoHandle] = IoHandle.none,
): StorageServer =
## create StorageServer including setting up datastore, repostore, etc
let listenMultiAddr = getMultiAddrWithIpAndTcpPort(config.listenIp, config.listenPort)
@ -147,7 +177,7 @@ proc new*(
.withAddresses(@[listenMultiAddr])
.withRng(random.Rng.instance())
.withNoise()
.withMplex(5.minutes, 5.minutes)
.withYamux()
.withMaxConnections(config.maxPeers)
.withAgentVersion(config.agentString)
.withSignedPeerRecord(true)
@ -236,21 +266,22 @@ proc new*(
numberOfBlocksPerInterval = config.blockMaintenanceNumberOfBlocks,
)
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new(retries = config.blockRetries)
peerStore = PeerContextStore.new()
downloadManager = DownloadManager.new(retries = config.blockRetries)
advertiser = Advertiser.new(repoStore, discovery)
blockDiscovery =
DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
blockDiscovery = DiscoveryEngine.new(repoStore, peerStore, network, discovery)
engine = BlockExcEngine.new(
repoStore, network, blockDiscovery, advertiser, peerStore, pendingBlocks
repoStore, network, blockDiscovery, advertiser, peerStore, downloadManager
)
store = NetworkStore.new(engine, repoStore)
manifestProto = ManifestProtocol.new(switch, repoStore, discovery)
storageNode = StorageNodeRef.new(
switch = switch,
networkStore = store,
engine = engine,
discovery = discovery,
manifestProto = manifestProto,
taskPool = taskPool,
)
@ -267,6 +298,7 @@ proc new*(
.expect("Should create rest server!")
switch.mount(network)
switch.mount(manifestProto)
StorageServer(
config: config,
@ -275,4 +307,5 @@ proc new*(
repoStore: repoStore,
maintenance: maintenance,
taskPool: taskPool,
logFile: logFile,
)

View File

@ -23,10 +23,27 @@ import ./errors
export tables
const
# Size of blocks for storage / network exchange,
DefaultBlockSize* = NBytes 1024 * 64
func isPowerOfTwo*(x: uint64): bool =
(x > 0) and ((x and (x - 1)) == 0)
const
# Block size limits for storage / network exchange
MinBlockSize* = 4096'u64 # 4 KiB minimum
MaxBlockSize* = 524288'u64 # 512 KiB maximum
DefaultBlockSize* = NBytes 65536 # 64 KiB default
# Manifest field limits (ensure manifest fits in MinBlockSize)
MaxFilenameSize* = 255
MaxMimetypeSize* = 128
static:
# Validate block size constants are powers of two
doAssert isPowerOfTwo(MinBlockSize), "MinBlockSize must be a power of two"
doAssert isPowerOfTwo(MaxBlockSize), "MaxBlockSize must be a power of two"
doAssert isPowerOfTwo(DefaultBlockSize.uint64),
"DefaultBlockSize must be a power of two"
const
# hashes
Sha256HashCodec* = multiCodec("sha2-256")

View File

@ -23,6 +23,10 @@ export blocktype
type
BlockNotFoundError* = object of StorageError
BlockCorruptedError* = object of StorageError
## Raised when a block received from the network fails validation
## (CID doesn't match the data). This indicates either malicious peer
## or data corruption in transit.
BlockType* {.pure.} = enum
Manifest
@ -65,14 +69,9 @@ method getBlock*(
raiseAssert("getBlock by addr not implemented!")
method completeBlock*(
self: BlockStore, address: BlockAddress, blk: Block
) {.base, gcsafe.} =
discard
method getBlocks*(
self: BlockStore, addresses: seq[BlockAddress]
): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} =
): Future[SafeAsyncIter[Block]] {.base, async: (raises: [CancelledError]).} =
## Gets a set of blocks from the blockstore. Blocks might
## be returned in any order.
@ -195,8 +194,4 @@ proc contains*(
proc contains*(
self: BlockStore, address: BlockAddress
): Future[bool] {.async: (raises: [CancelledError]), gcsafe.} =
return
if address.leaf:
(await self.hasBlock(address.treeCid, address.index)) |? false
else:
(await self.hasBlock(address.cid)) |? false
return (await self.hasBlock(address.treeCid, address.index)) |? false

View File

@ -117,10 +117,7 @@ method getBlockAndProof*(
method getBlock*(
self: CacheStore, address: BlockAddress
): Future[?!Block] {.async: (raw: true, raises: [CancelledError]).} =
if address.leaf:
self.getBlock(address.treeCid, address.index)
else:
self.getBlock(address.cid)
self.getBlock(address.treeCid, address.index)
method hasBlock*(
self: CacheStore, cid: Cid
@ -188,7 +185,7 @@ method listBlocks*(
success(iter)
func putBlockSync(self: CacheStore, blk: Block): bool =
let blkSize = blk.data.len.NBytes # in bytes
let blkSize = blk.data[].len.NBytes # in bytes
if blkSize > self.size:
trace "Block size is larger than cache size", blk = blkSize, cache = self.size
@ -197,7 +194,7 @@ func putBlockSync(self: CacheStore, blk: Block): bool =
while self.currentSize + blkSize > self.size:
try:
let removed = self.cache.removeLru()
self.currentSize -= removed.data.len.NBytes
self.currentSize -= removed.data[].len.NBytes
except EmptyLruCacheError as exc:
# if the cache is empty, can't remove anything, so break and add item
# to the cache
@ -264,7 +261,7 @@ method delBlock*(
let removed = self.cache.del(cid)
if removed.isSome:
self.currentSize -= removed.get.data.len.NBytes
self.currentSize -= removed.get.data[].len.NBytes
return success()
@ -278,9 +275,6 @@ method delBlock*(
return success()
method completeBlock*(self: CacheStore, address: BlockAddress, blk: Block) {.gcsafe.} =
discard
method close*(self: CacheStore): Future[void] {.async: (raises: []).} =
## Close the blockstore, a no-op for this implementation
##

View File

@ -33,52 +33,31 @@ type NetworkStore* = ref object of BlockStore
method getBlocks*(
self: NetworkStore, addresses: seq[BlockAddress]
): Future[SafeAsyncIter[Block]] {.async: (raises: [CancelledError]).} =
var
localAddresses: seq[BlockAddress]
remoteAddresses: seq[BlockAddress]
let runtimeQuota = 10.milliseconds
var lastIdle = Moment.now()
for address in addresses:
if not (await address in self.localStore):
remoteAddresses.add(address)
else:
localAddresses.add(address)
if (Moment.now() - lastIdle) >= runtimeQuota:
await idleAsync()
lastIdle = Moment.now()
return chain(
await self.localStore.getBlocks(localAddresses),
self.engine.requestBlocks(remoteAddresses),
)
): Future[SafeAsyncIter[Block]] {.async: (raw: true, raises: [CancelledError]).} =
self.localStore.getBlocks(addresses)
method getBlock*(
self: NetworkStore, address: BlockAddress
): Future[?!Block] {.async: (raises: [CancelledError]).} =
without blk =? (await self.localStore.getBlock(address)), err:
if not (err of BlockNotFoundError):
error "Error getting block from local store", address, err = err.msg
return failure err
let downloadOpt = self.engine.downloadManager.getDownload(address.treeCid)
if downloadOpt.isSome:
let handle = downloadOpt.get().getWantHandle(address)
without blk =? (await self.localStore.getBlock(address)), err:
if not (err of BlockNotFoundError):
handle.cancelSoon()
return failure err
return await handle
discard downloadOpt.get().completeWantHandle(address, some(blk))
return success blk
without newBlock =? (await self.engine.requestBlock(address)), err:
error "Unable to get block from exchange engine", address, err = err.msg
return failure err
return success newBlock
return success blk
return await self.localStore.getBlock(address)
method getBlock*(
self: NetworkStore, cid: Cid
): Future[?!Block] {.async: (raw: true, raises: [CancelledError]).} =
## Get a block from the blockstore
## Get a block from the local blockstore only.
##
self.getBlock(BlockAddress.init(cid))
self.localStore.getBlock(cid)
method getBlock*(
self: NetworkStore, treeCid: Cid, index: Natural
@ -88,9 +67,6 @@ method getBlock*(
self.getBlock(BlockAddress.init(treeCid, index))
method completeBlock*(self: NetworkStore, address: BlockAddress, blk: Block) =
self.engine.completeBlock(address, blk)
method putBlock*(
self: NetworkStore, blk: Block, ttl = Duration.none
): Future[?!void] {.async: (raises: [CancelledError]).} =
@ -100,7 +76,6 @@ method putBlock*(
if res.isErr:
return res
await self.engine.resolveBlocks(@[blk])
return success()
method putCidAndProof*(

View File

@ -185,7 +185,7 @@ proc storeBlock*(
res: StoreResult
if currMd =? maybeCurrMd:
if currMd.size == blk.data.len.NBytes:
if currMd.size == blk.data[].len.NBytes:
md = BlockMetadata(
size: currMd.size,
expiry: max(currMd.expiry, minExpiry),
@ -200,7 +200,7 @@ proc storeBlock*(
if not hasBlock:
warn "Block metadata is present, but block is absent. Restoring block.",
cid = blk.cid
if err =? (await self.repoDs.put(blkKey, blk.data)).errorOption:
if err =? (await self.repoDs.put(blkKey, blk.data[])).errorOption:
raise err
else:
raise newException(
@ -209,9 +209,9 @@ proc storeBlock*(
$blk.cid,
)
else:
md = BlockMetadata(size: blk.data.len.NBytes, expiry: minExpiry, refCount: 0)
res = StoreResult(kind: Stored, used: blk.data.len.NBytes)
if err =? (await self.repoDs.put(blkKey, blk.data)).errorOption:
md = BlockMetadata(size: blk.data[].len.NBytes, expiry: minExpiry, refCount: 0)
res = StoreResult(kind: Stored, used: blk.data[].len.NBytes)
if err =? (await self.repoDs.put(blkKey, blk.data[])).errorOption:
raise err
(md.some, res),

View File

@ -70,15 +70,18 @@ method getBlock*(
trace "Error getting key from provider", err = err.msg
return failure(err)
without data =? await self.repoDs.get(key), err:
# Manual pattern to avoid questionable copy
var dataResult = await self.repoDs.get(key)
if dataResult.isErr:
let err = dataResult.error
if not (err of DatastoreKeyNotFound):
trace "Error getting block from datastore", err = err.msg, key
return failure(err)
return failure(newException(BlockNotFoundError, err.msg))
trace "Got block for cid", cid
return Block.new(cid, data, verify = true)
# Zero-copy: move data out of Result, then into Block
return Block.new(cid, move(dataResult.unsafeGet()), verify = true)
method getBlockAndProof*(
self: RepoStore, treeCid: Cid, index: Natural
@ -86,10 +89,12 @@ method getBlockAndProof*(
without leafMd =? await self.getLeafMetadata(treeCid, index), err:
return failure(err)
without blk =? await self.getBlock(leafMd.blkCid), err:
return failure(err)
# Manual pattern to avoid questionable copy for Block (contains seq[byte])
var blkResult = await self.getBlock(leafMd.blkCid)
if blkResult.isErr:
return failure(blkResult.error)
success((blk, leafMd.proof))
success((move(blkResult.unsafeGet()), leafMd.proof))
method getBlock*(
self: RepoStore, treeCid: Cid, index: Natural
@ -105,10 +110,7 @@ method getBlock*(
## Get a block from the blockstore
##
if address.leaf:
self.getBlock(address.treeCid, address.index)
else:
self.getBlock(address.cid)
self.getBlock(address.treeCid, address.index)
method ensureExpiry*(
self: RepoStore, cid: Cid, expiry: SecondsSince1970
@ -338,7 +340,8 @@ method listBlocks*(
if queryIter.finished:
iter.finish
else:
if pair =? (await queryIter.next()) and cid =? pair.key:
let res = await queryIter.next()
if pair =? res and cid =? pair.key:
doAssert pair.data.len == 0
trace "Retrieved record from repo", cid
return Cid.init(cid.value).mapFailure

View File

@ -94,8 +94,7 @@ method readOnce*(
self.manifest.blockSize.int - blockOffset,
]
)
address =
BlockAddress(leaf: true, treeCid: self.manifest.treeCid, index: blockNum)
address = BlockAddress(treeCid: self.manifest.treeCid, index: blockNum)
# Read contents of block `blockNum`
without blk =? (await self.store.getBlock(address)).tryGet.catch, error:
@ -113,7 +112,7 @@ method readOnce*(
if blk.isEmpty:
zeroMem(pbytes.offset(read), readBytes)
else:
copyMem(pbytes.offset(read), blk.data[blockOffset].unsafeAddr, readBytes)
copyMem(pbytes.offset(read), blk.data[][blockOffset].unsafeAddr, readBytes)
# Update current positions in the stream and outbuf
self.offset += readBytes

View File

@ -10,7 +10,6 @@
{.push raises: [].}
import std/enumerate
import std/parseutils
import std/options
@ -41,24 +40,6 @@ func roundUp*[T](a, b: T): T =
proc orElse*[A](a, b: Option[A]): Option[A] =
if (a.isSome()): a else: b
template findIt*(s, pred: untyped): untyped =
## Returns the index of the first object matching a predicate, or -1 if no
## object matches it.
runnableExamples:
type MyType = object
att: int
var s = @[MyType(att: 1), MyType(att: 2), MyType(att: 3)]
doAssert s.findIt(it.att == 2) == 1
doAssert s.findIt(it.att == 4) == -1
var index = -1
for i, it {.inject.} in enumerate(items(s)):
if pred:
index = i
break
index
when not declared(parseDuration): # Odd code formatting to minimize diff v. mainLine
const Whitespace = {' ', '\t', '\v', '\r', '\l', '\f'}

View File

@ -1,4 +1,3 @@
import std/options
import std/typetraits
from pkg/libp2p import
Cid, PeerId, SignedPeerRecord, MultiAddress, AddressInfo, init, `$`

View File

@ -15,8 +15,7 @@ proc `as`*[T](value: T, U: type): ?U =
# In that case, we do not need to do anything, just return the value as it is.
when value is Option[U]:
return value
when value is U:
elif value is U:
return some value
elif value is ref object:
if value of U:

View File

@ -663,7 +663,7 @@ int check_download_manifest(void *storage_ctx, const char *cid)
int ret = is_resp_ok(r, &res);
const char *expected_manifest = "{\"treeCid\":\"zDzSvJTf8JYwvysKPmG7BtzpbiAHfuwFMRphxm4hdvnMJ4XPJjKX\",\"datasetSize\":12,\"blockSize\":65536,\"filename\":\"hello_world.txt\",\"mimetype\":\"text/plain\"}";
const char *expected_manifest = "{\"manifestVersion\":0,\"treeCid\":\"zDzSvJTf8JYwvysKPmG7BtzpbiAHfuwFMRphxm4hdvnMJ4XPJjKX\",\"datasetSize\":12,\"blockSize\":65536,\"filename\":\"hello_world.txt\",\"mimetype\":\"text/plain\"}";
if (strncmp(res, expected_manifest, strlen(expected_manifest)) != 0)
{
@ -689,7 +689,7 @@ int check_list(void *storage_ctx)
int ret = is_resp_ok(r, &res);
const char *expected_manifest = "{\"treeCid\":\"zDzSvJTf8JYwvysKPmG7BtzpbiAHfuwFMRphxm4hdvnMJ4XPJjKX\",\"datasetSize\":12,\"blockSize\":65536,\"filename\":\"hello_world.txt\",\"mimetype\":\"text/plain\"}";
const char *expected_manifest = "{\"manifestVersion\":0,\"treeCid\":\"zDzSvJTf8JYwvysKPmG7BtzpbiAHfuwFMRphxm4hdvnMJ4XPJjKX\",\"datasetSize\":12,\"blockSize\":65536,\"filename\":\"hello_world.txt\",\"mimetype\":\"text/plain\"}";
if (strstr(res, expected_manifest) == NULL)
{

View File

@ -8,7 +8,6 @@ import pkg/storage/stores
import pkg/storage/units
import pkg/chronos
import pkg/stew/byteutils
import pkg/stint
import ./storage/helpers/randomchunker

View File

@ -1,7 +1,6 @@
import helpers/multisetup
import helpers/trackers
import helpers/templeveldb
import std/times
import std/sequtils, chronos
import ./asynctest

View File

@ -1,31 +1,13 @@
import pkg/storage/streams/storestream
import pkg/unittest2
# From lip2p/tests/helpers
const trackerNames = [StoreStreamTrackerName]
iterator testTrackers*(extras: openArray[string] = []): TrackerBase =
for name in trackerNames:
let t = getTracker(name)
if not isNil(t):
yield t
for name in extras:
let t = getTracker(name)
if not isNil(t):
yield t
proc checkTracker*(name: string) =
var tracker = getTracker(name)
if tracker.isLeaked():
checkpoint tracker.dump()
fail()
proc checkTrackers*() =
for tracker in testTrackers():
if tracker.isLeaked():
checkpoint tracker.dump()
for name in trackerNames:
let counter = getTrackerCounter(name)
if counter.opened != counter.closed:
# show how many streams were opened vs closed to help diagnose the leak
checkpoint name & ": opened=" & $counter.opened & ", closed=" & $counter.closed
fail()
try:
GC_fullCollect()
except:
discard
GC_fullCollect()

View File

@ -1,7 +1,5 @@
import std/importutils
import std/net
import std/sequtils
import std/strformat
from pkg/libp2p import `==`, `$`, Cid
import pkg/storage/units
import pkg/storage/manifest

View File

@ -1,6 +1,5 @@
import std/times
import pkg/storage/conf
import pkg/stint
from pkg/libp2p import Cid, `$`
import ../../asynctest
import ../../checktest

View File

@ -83,6 +83,26 @@ twonodessuite "Uploads and downloads":
check:
content1 == resp2
test "background download with progress polling", twoNodesConfig:
let
data = await RandomChunker.example(blocks = 8)
cid = (await client1.upload(data)).get
downloadId = (await client2.startDownload(cid)).get
var completed = false
for _ in 0 ..< 60:
let progress = (await client2.getDownloadProgress(cid, downloadId)).get
if not progress["active"].getBool():
completed = true
break
await sleepAsync(500.milliseconds)
check completed
let response = (await client2.download(cid, local = true)).get
check:
@response.mapIt(it.byte) == data
test "reliable transfer test", twoNodesConfig:
proc transferTest(a: StorageClient, b: StorageClient) {.async.} =
let data = await RandomChunker.example(blocks = 8)

View File

@ -1,6 +1,5 @@
import std/httpclient
import std/os
import std/sequtils
import std/strutils
import std/sugar
import std/times
@ -33,7 +32,6 @@ type
MultiNodeSuiteError = object of CatchableError
SuiteTimeoutError = object of MultiNodeSuiteError
const HardhatPort {.intdefine.}: int = 8545
const StorageApiPort {.intdefine.}: int = 8080
const StorageDiscPort {.intdefine.}: int = 8090
const TestId {.strdefine.}: string = "TestId"
@ -76,7 +74,6 @@ template multinodesuite*(suiteName: string, body: untyped) =
var currentTestName = ""
var nodeConfigs: NodeConfigs
var snapshot: JsonNode
var lastUsedHardhatPort = HardhatPort
var lastUsedStorageApiPort = StorageApiPort
var lastUsedStorageDiscPort = StorageDiscPort
var storagePortLock: AsyncLock

View File

@ -5,7 +5,6 @@ import pkg/chronicles
import pkg/chronos/asyncproc
import pkg/libp2p
import std/os
import std/strformat
import std/strutils
import storage/conf
import storage/utils/exceptions

View File

@ -1,7 +1,6 @@
import std/strutils
from pkg/libp2p import Cid, `$`, init
import pkg/stint
import pkg/questionable/results
import pkg/chronos/apps/http/[httpserver, shttpserver, httpclient, httptable]
import pkg/storage/logutils
@ -170,6 +169,32 @@ proc downloadManifestOnly*(
success await response.body
proc startDownload*(
client: StorageClient, cid: Cid
): Future[?!uint64] {.async: (raises: [CancelledError, HttpError]).} =
let response = await client.post(client.baseurl & "/data/" & $cid & "/network")
if response.status != 200:
return failure($response.status)
without jsonData =? JsonNode.parse(await response.body), err:
return failure(err)
let idNode = jsonData.getOrDefault("downloadId")
if idNode.isNil:
return failure("missing downloadId in response")
success idNode.getInt().uint64
proc getDownloadProgress*(
client: StorageClient, cid: Cid, downloadId: uint64
): Future[?!JsonNode] {.async: (raises: [CancelledError, HttpError]).} =
let url = client.baseurl & "/data/" & $cid & "/network/progress/" & $downloadId
let response = await client.get(url)
if response.status != 200:
return failure($response.status)
return JsonNode.parse(await response.body)
proc deleteRaw*(
client: StorageClient, cid: string
): Future[HttpClientResponseRef] {.

View File

@ -106,7 +106,7 @@ method restart*(node: StorageProcess) {.async.} =
await node.waitUntilStarted()
trace "storage process restarted"
method stop*(node: StorageProcess) {.async: (raises: []).} =
method stop*(node: StorageProcess, expectedExitCode: int = 0) {.async: (raises: []).} =
logScope:
nodeName = node.name

View File

@ -1,9 +1,11 @@
import std/os
import std/strformat
import pkg/chronos
import pkg/chronos/asyncproc
import pkg/storage/logutils
when defined(windows):
import std/strformat
import pkg/chronos/asyncproc
{.push raises: [].}
proc nextFreePort*(startPort: int): Future[int] {.async: (raises: [CancelledError]).} =

View File

@ -4,8 +4,6 @@ import std/tables
import pkg/chronos
import pkg/libp2p/errors
import pkg/storage/rng
import pkg/storage/stores
import pkg/storage/blockexchange
@ -13,6 +11,7 @@ import pkg/storage/chunker
import pkg/storage/manifest
import pkg/storage/merkletree
import pkg/storage/blocktype as bt
import pkg/storage/storagetypes
import ../../../asynctest
import ../../helpers
@ -28,14 +27,14 @@ asyncchecksuite "Block Advertising and Discovery":
tree: StorageMerkleTree
manifestBlock: bt.Block
switch: Switch
peerStore: PeerCtxStore
peerStore: PeerContextStore
blockDiscovery: MockDiscovery
discovery: DiscoveryEngine
advertiser: Advertiser
network: BlockExcNetwork
localStore: CacheStore
engine: BlockExcEngine
pendingBlocks: PendingBlocksManager
downloadManager: DownloadManager
setup:
while true:
@ -49,36 +48,36 @@ asyncchecksuite "Block Advertising and Discovery":
blockDiscovery = MockDiscovery.new()
network = BlockExcNetwork.new(switch)
localStore = CacheStore.new(blocks.mapIt(it))
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
peerStore = PeerContextStore.new()
downloadManager = DownloadManager.new()
(_, tree, manifest) = makeDataset(blocks).tryGet()
(_, tree, manifest, _) = makeDataset(blocks).tryGet()
manifestBlock =
bt.Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet()
(await localStore.putBlock(manifestBlock)).tryGet()
discovery = DiscoveryEngine.new(
localStore,
peerStore,
network,
blockDiscovery,
pendingBlocks,
minPeersPerBlock = 1,
localStore, peerStore, network, blockDiscovery, concurrentDiscReqs = 20
)
advertiser = Advertiser.new(localStore, blockDiscovery)
engine = BlockExcEngine.new(
localStore, network, discovery, advertiser, peerStore, pendingBlocks
localStore, network, discovery, advertiser, peerStore, downloadManager
)
switch.mount(network)
test "Should discover want list":
let pendingBlocks = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid))
await engine.start()
var handles: seq[Future[?!bt.Block]]
for blk in blocks:
let
address = BlockAddress.init(blk.cid, 0)
md = testManifestDesc(blk.cid, DefaultBlockSize.uint32, 1)
desc = DownloadDesc(md: md, startIndex: address.index.uint64, count: 1)
download = engine.downloadManager.startDownload(desc)
handles.add(download.getWantHandle(address))
blockDiscovery.publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
@ -88,14 +87,23 @@ asyncchecksuite "Block Advertising and Discovery":
blockDiscovery.findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
await engine.resolveBlocks(blocks.filterIt(it.cid == cid))
let matching = blocks.filterIt(it.cid == cid)
for blk in matching:
let address = BlockAddress(treeCid: blk.cid, index: 0)
let dlOpt = engine.downloadManager.getDownload(blk.cid)
if dlOpt.isSome:
discard dlOpt.get().completeWantHandle(address, some(blk))
await allFuturesThrowing(allFinished(pendingBlocks))
await engine.start()
discovery.queueFindBlocksReq(blocks.mapIt(it.cid))
await allFuturesThrowing(allFinished(handles)).wait(10.seconds)
await engine.stop()
test "Should advertise trees":
let cids = @[manifest.treeCid]
test "Should advertise manifests":
let cids = @[manifestBlock.cid]
var advertised = initTable.collect:
for cid in cids:
{cid: newFuture[void]()}
@ -123,230 +131,3 @@ asyncchecksuite "Block Advertising and Discovery":
await engine.start()
await sleepAsync(3.seconds)
await engine.stop()
test "Should not launch discovery if remote peer has block":
let
pendingBlocks = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid))
peerId = PeerId.example
haves = collect(initTable()):
for blk in blocks:
{blk.address: Presence(address: blk.address)}
engine.peers.add(BlockExcPeerCtx(id: peerId, blocks: haves))
blockDiscovery.findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
check false
await engine.start()
engine.pendingBlocks.resolve(
blocks.mapIt(BlockDelivery(blk: it, address: it.address))
)
await allFuturesThrowing(allFinished(pendingBlocks))
await engine.stop()
proc asBlock(m: Manifest): bt.Block =
let mdata = m.encode().tryGet()
bt.Block.new(data = mdata, codec = ManifestCodec).tryGet()
asyncchecksuite "E2E - Multiple Nodes Discovery":
var
switch: seq[Switch]
blockexc: seq[NetworkStore]
manifests: seq[Manifest]
mBlocks: seq[bt.Block]
trees: seq[StorageMerkleTree]
setup:
for _ in 0 ..< 4:
let chunker = RandomChunker.new(Rng.instance(), size = 4096, chunkSize = 256)
var blocks = newSeq[bt.Block]()
while true:
let chunk = await chunker.getBytes()
if chunk.len <= 0:
break
blocks.add(bt.Block.new(chunk).tryGet())
let (_, tree, manifest) = makeDataset(blocks).tryGet()
manifests.add(manifest)
mBlocks.add(manifest.asBlock())
trees.add(tree)
let
s = newStandardSwitch(transportFlags = {ServerFlags.ReuseAddr})
blockDiscovery = MockDiscovery.new()
network = BlockExcNetwork.new(s)
localStore = CacheStore.new()
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
discovery = DiscoveryEngine.new(
localStore,
peerStore,
network,
blockDiscovery,
pendingBlocks,
minPeersPerBlock = 1,
)
advertiser = Advertiser.new(localStore, blockDiscovery)
engine = BlockExcEngine.new(
localStore, network, discovery, advertiser, peerStore, pendingBlocks
)
networkStore = NetworkStore.new(engine, localStore)
s.mount(network)
switch.add(s)
blockexc.add(networkStore)
teardown:
switch = @[]
blockexc = @[]
manifests = @[]
mBlocks = @[]
trees = @[]
test "E2E - Should advertise and discover blocks":
# Distribute the manifests and trees amongst 1..3
# Ask 0 to download everything without connecting him beforehand
var advertised: Table[Cid, SignedPeerRecord]
MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[1].peerInfo.signedPeerRecord
MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[2].peerInfo.signedPeerRecord
MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[3].peerInfo.signedPeerRecord
discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid)
await blockexc[1].engine.blocksDeliveryHandler(
switch[0].peerInfo.peerId,
@[
BlockDelivery(
blk: mBlocks[0], address: BlockAddress(leaf: false, cid: mBlocks[0].cid)
)
],
allowSpurious = true,
)
discard blockexc[2].engine.pendingBlocks.getWantHandle(mBlocks[1].cid)
await blockexc[2].engine.blocksDeliveryHandler(
switch[0].peerInfo.peerId,
@[
BlockDelivery(
blk: mBlocks[1], address: BlockAddress(leaf: false, cid: mBlocks[1].cid)
)
],
allowSpurious = true,
)
discard blockexc[3].engine.pendingBlocks.getWantHandle(mBlocks[2].cid)
await blockexc[3].engine.blocksDeliveryHandler(
switch[0].peerInfo.peerId,
@[
BlockDelivery(
blk: mBlocks[2], address: BlockAddress(leaf: false, cid: mBlocks[2].cid)
)
],
allowSpurious = true,
)
MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
advertised.withValue(cid, val):
result.add(val[])
let futs = collect(newSeq):
for m in mBlocks[0 .. 2]:
blockexc[0].engine.requestBlock(m.cid)
await allFuturesThrowing(switch.mapIt(it.start())).wait(10.seconds)
await allFuturesThrowing(blockexc.mapIt(it.engine.start())).wait(10.seconds)
await allFutures(futs).wait(10.seconds)
await allFuturesThrowing(blockexc.mapIt(it.engine.stop())).wait(10.seconds)
await allFuturesThrowing(switch.mapIt(it.stop())).wait(10.seconds)
test "E2E - Should advertise and discover blocks with peers already connected":
# Distribute the blocks amongst 1..3
# Ask 0 to download everything *WITH* connecting him beforehand
var advertised: Table[Cid, SignedPeerRecord]
MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[1].peerInfo.signedPeerRecord
MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[2].peerInfo.signedPeerRecord
MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[3].peerInfo.signedPeerRecord
discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid)
await blockexc[1].engine.blocksDeliveryHandler(
switch[0].peerInfo.peerId,
@[
BlockDelivery(
blk: mBlocks[0], address: BlockAddress(leaf: false, cid: mBlocks[0].cid)
)
],
allowSpurious = true,
)
discard blockexc[2].engine.pendingBlocks.getWantHandle(mBlocks[1].cid)
await blockexc[2].engine.blocksDeliveryHandler(
switch[0].peerInfo.peerId,
@[
BlockDelivery(
blk: mBlocks[1], address: BlockAddress(leaf: false, cid: mBlocks[1].cid)
)
],
allowSpurious = true,
)
discard blockexc[3].engine.pendingBlocks.getWantHandle(mBlocks[2].cid)
await blockexc[3].engine.blocksDeliveryHandler(
switch[0].peerInfo.peerId,
@[
BlockDelivery(
blk: mBlocks[2], address: BlockAddress(leaf: false, cid: mBlocks[2].cid)
)
],
allowSpurious = true,
)
MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
advertised.withValue(cid, val):
return @[val[]]
let futs = mBlocks[0 .. 2].mapIt(blockexc[0].engine.requestBlock(it.cid))
await allFuturesThrowing(switch.mapIt(it.start())).wait(10.seconds)
await allFuturesThrowing(blockexc.mapIt(it.engine.start())).wait(10.seconds)
await allFutures(futs).wait(10.seconds)
await allFuturesThrowing(blockexc.mapIt(it.engine.stop())).wait(10.seconds)
await allFuturesThrowing(switch.mapIt(it.stop())).wait(10.seconds)

View File

@ -1,6 +1,3 @@
import std/sequtils
import std/tables
import pkg/chronos
import pkg/storage/rng
@ -30,9 +27,9 @@ asyncchecksuite "Test Discovery Engine":
tree: StorageMerkleTree
manifestBlock: bt.Block
switch: Switch
peerStore: PeerCtxStore
peerStore: PeerContextStore
blockDiscovery: MockDiscovery
pendingBlocks: PendingBlocksManager
downloadManager: DownloadManager
network: BlockExcNetwork
setup:
@ -43,53 +40,21 @@ asyncchecksuite "Test Discovery Engine":
blocks.add(bt.Block.new(chunk).tryGet())
(_, tree, manifest) = makeDataset(blocks).tryGet()
(_, tree, manifest, _) = makeDataset(blocks).tryGet()
manifestBlock = manifest.asBlock()
blocks.add(manifestBlock)
switch = newStandardSwitch(transportFlags = {ServerFlags.ReuseAddr})
network = BlockExcNetwork.new(switch)
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
peerStore = PeerContextStore.new()
downloadManager = DownloadManager.new()
blockDiscovery = MockDiscovery.new()
test "Should Query Wants":
var
localStore = CacheStore.new()
discoveryEngine = DiscoveryEngine.new(
localStore,
peerStore,
network,
blockDiscovery,
pendingBlocks,
discoveryLoopSleep = 100.millis,
)
wants = blocks.mapIt(pendingBlocks.getWantHandle(it.cid))
blockDiscovery.findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
pendingBlocks.resolve(
blocks.filterIt(it.cid == cid).mapIt(
BlockDelivery(blk: it, address: it.address)
)
)
await discoveryEngine.start()
await allFuturesThrowing(allFinished(wants)).wait(100.millis)
await discoveryEngine.stop()
test "Should queue discovery request":
var
localStore = CacheStore.new()
discoveryEngine = DiscoveryEngine.new(
localStore,
peerStore,
network,
blockDiscovery,
pendingBlocks,
discoveryLoopSleep = 100.millis,
)
discoveryEngine =
DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery)
want = newFuture[void]()
blockDiscovery.findBlockProvidersHandler = proc(
@ -104,60 +69,11 @@ asyncchecksuite "Test Discovery Engine":
await want.wait(100.millis)
await discoveryEngine.stop()
test "Should not request more than minPeersPerBlock":
var
localStore = CacheStore.new()
minPeers = 2
discoveryEngine = DiscoveryEngine.new(
localStore,
peerStore,
network,
blockDiscovery,
pendingBlocks,
discoveryLoopSleep = 5.minutes,
minPeersPerBlock = minPeers,
)
want = newAsyncEvent()
var pendingCids = newSeq[Cid]()
blockDiscovery.findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
check cid in pendingCids
pendingCids.keepItIf(it != cid)
check peerStore.len < minPeers
var peerCtx = BlockExcPeerCtx(id: PeerId.example)
let address = BlockAddress(leaf: false, cid: cid)
peerCtx.blocks[address] = Presence(address: address)
peerStore.add(peerCtx)
want.fire()
await discoveryEngine.start()
var idx = 0
while peerStore.len < minPeers:
let cid = blocks[idx].cid
inc idx
pendingCids.add(cid)
discoveryEngine.queueFindBlocksReq(@[cid])
await want.wait()
want.clear()
check peerStore.len == minPeers
await discoveryEngine.stop()
test "Should not request if there is already an inflight discovery request":
var
localStore = CacheStore.new()
discoveryEngine = DiscoveryEngine.new(
localStore,
peerStore,
network,
blockDiscovery,
pendingBlocks,
discoveryLoopSleep = 100.millis,
concurrentDiscReqs = 2,
localStore, peerStore, network, blockDiscovery, concurrentDiscReqs = 2
)
reqs = Future[void].Raising([CancelledError]).init()
count = 0
@ -170,7 +86,7 @@ asyncchecksuite "Test Discovery Engine":
check false
count.inc
await reqs # queue the request
await reqs
await discoveryEngine.start()
discoveryEngine.queueFindBlocksReq(@[blocks[0].cid])

View File

@ -55,15 +55,15 @@ asyncchecksuite "Advertiser":
check:
manifestBlk.cid in advertised
test "blockStored should queue tree Cid for advertising":
test "blockStored should not queue tree Cid for advertising":
(await localStore.putBlock(manifestBlk)).tryGet()
await waitTillQueueEmpty()
check:
manifest.treeCid in advertised
manifest.treeCid notin advertised
test "blockStored should not queue non-manifest non-tree CIDs for discovery":
test "blockStored should not queue non-manifest CIDs for discovery":
let blk = bt.Block.example
(await localStore.putBlock(blk)).tryGet()
@ -79,11 +79,10 @@ asyncchecksuite "Advertiser":
await waitTillQueueEmpty()
check eventually advertised.len == 2
check eventually advertised.len == 1
check manifestBlk.cid in advertised
check manifest.treeCid in advertised
test "Should advertise existing manifests and their trees":
test "Should advertise existing manifests":
let newStore = CacheStore.new([manifestBlk])
await advertiser.stop()
@ -91,7 +90,7 @@ asyncchecksuite "Advertiser":
await advertiser.start()
check eventually manifestBlk.cid in advertised
check eventually manifest.treeCid in advertised
check manifest.treeCid notin advertised
test "Stop should clear onBlockStored callback":
await advertiser.stop()

View File

@ -1,12 +1,10 @@
import std/sequtils
import std/algorithm
import std/importutils
import pkg/chronos
import pkg/stew/byteutils
import pkg/storage/stores
import pkg/storage/blockexchange
import pkg/storage/blockexchange/engine/engine {.all.}
import pkg/storage/blockexchange/engine/scheduler {.all.}
import pkg/storage/blockexchange/engine/downloadmanager {.all.}
import pkg/storage/blockexchange/engine/activedownload {.all.}
import pkg/storage/chunker
import pkg/storage/discovery
import pkg/storage/blocktype as bt
@ -15,186 +13,547 @@ import ../../../asynctest
import ../../examples
import ../../helpers
asyncchecksuite "NetworkStore engine - 2 nodes":
proc waitForPeerInSwarm(
download: ActiveDownload,
peerId: PeerId,
timeout = 5.seconds,
pollInterval = 50.milliseconds,
): Future[bool] {.async.} =
let deadline = Moment.now() + timeout
while Moment.now() < deadline:
if download.getSwarm().getPeer(peerId).isSome:
return true
await sleepAsync(pollInterval)
return false
asyncchecksuite "BlockExchange - Basic Block Transfer":
var
nodeCmps1, nodeCmps2: NodesComponents
peerCtx1, peerCtx2: BlockExcPeerCtx
blocks1, blocks2: seq[bt.Block]
pendingBlocks1, pendingBlocks2: seq[BlockHandle]
cluster: NodesCluster
seeder: NodesComponents
leecher: NodesComponents
dataset: TestDataset
setup:
blocks1 = await makeRandomBlocks(datasetSize = 2048, blockSize = 256'nb)
blocks2 = await makeRandomBlocks(datasetSize = 2048, blockSize = 256'nb)
nodeCmps1 = generateNodes(1, blocks1).components[0]
nodeCmps2 = generateNodes(1, blocks2).components[0]
# Create two nodes
cluster = generateNodes(2, config = NodeConfig(findFreePorts: true))
seeder = cluster.components[0]
leecher = cluster.components[1]
await allFuturesThrowing(nodeCmps1.start(), nodeCmps2.start())
# Create test dataset (small - 4 blocks)
let blocks = await makeRandomBlocks(4 * 1024, 1024.NBytes)
dataset = makeDataset(blocks).tryGet()
# initialize our want lists
pendingBlocks1 =
blocks2[0 .. 3].mapIt(nodeCmps1.pendingBlocks.getWantHandle(it.cid))
# Assign all blocks to seeder
await seeder.assignBlocks(dataset)
pendingBlocks2 =
blocks1[0 .. 3].mapIt(nodeCmps2.pendingBlocks.getWantHandle(it.cid))
await nodeCmps1.switch.connect(
nodeCmps2.switch.peerInfo.peerId, nodeCmps2.switch.peerInfo.addrs
)
await sleepAsync(100.millis) # give some time to exchange lists
peerCtx2 = nodeCmps1.peerStore.get(nodeCmps2.switch.peerInfo.peerId)
peerCtx1 = nodeCmps2.peerStore.get(nodeCmps1.switch.peerInfo.peerId)
check isNil(peerCtx1).not
check isNil(peerCtx2).not
# Start nodes and connect them
await cluster.components.start()
await connectNodes(cluster)
teardown:
await allFuturesThrowing(nodeCmps1.stop(), nodeCmps2.stop())
await cluster.components.stop()
test "Should exchange blocks on connect":
await allFuturesThrowing(allFinished(pendingBlocks1)).wait(10.seconds)
await allFuturesThrowing(allFinished(pendingBlocks2)).wait(10.seconds)
test "Should download dataset using networkStore":
await leecher.downloadDataset(dataset)
check:
(await allFinished(blocks1[0 .. 3].mapIt(nodeCmps2.localStore.getBlock(it.cid))))
.filterIt(it.completed and it.read.isOk)
.mapIt($it.read.get.cid)
.sorted(cmp[string]) == blocks1[0 .. 3].mapIt($it.cid).sorted(cmp[string])
for blk in dataset.blocks:
let hasBlock = await blk.cid in leecher.localStore
check hasBlock
(await allFinished(blocks2[0 .. 3].mapIt(nodeCmps1.localStore.getBlock(it.cid))))
.filterIt(it.completed and it.read.isOk)
.mapIt($it.read.get.cid)
.sorted(cmp[string]) == blocks2[0 .. 3].mapIt($it.cid).sorted(cmp[string])
test "Should send want-have for block":
let blk = bt.Block.new("Block 1".toBytes).tryGet()
let blkFut = nodeCmps1.pendingBlocks.getWantHandle(blk.cid)
peerCtx2.blockRequestScheduled(blk.address)
(await nodeCmps2.localStore.putBlock(blk)).tryGet()
peerCtx1.wantedBlocks.incl(blk.address)
check nodeCmps2.engine.taskQueue.pushOrUpdateNoWait(peerCtx1).isOk
check eventually (await nodeCmps1.localStore.hasBlock(blk.cid)).tryGet()
check eventually (await blkFut) == blk
test "Should get blocks from remote":
let blocks =
await allFinished(blocks2[4 .. 7].mapIt(nodeCmps1.networkStore.getBlock(it.cid)))
check blocks.mapIt(it.read().tryGet()) == blocks2[4 .. 7]
test "Remote should send blocks when available":
let blk = bt.Block.new("Block 1".toBytes).tryGet()
# should fail retrieving block from remote
check not await blk.cid in nodeCmps1.networkStore
# second trigger blockexc to resolve any pending requests
# for the block
(await nodeCmps2.networkStore.putBlock(blk)).tryGet()
# should succeed retrieving block from remote
check await nodeCmps1.networkStore.getBlock(blk.cid).withTimeout(100.millis)
# should succeed
asyncchecksuite "NetworkStore - multiple nodes":
asyncchecksuite "BlockExchange - Presence Discovery":
var
nodes: seq[NodesComponents]
blocks: seq[bt.Block]
cluster: NodesCluster
seeder: NodesComponents
leecher: NodesComponents
dataset: TestDataset
setup:
blocks = await makeRandomBlocks(datasetSize = 4096, blockSize = 256'nb)
nodes = generateNodes(5)
for e in nodes:
await e.engine.start()
cluster = generateNodes(2, config = NodeConfig(findFreePorts: true))
seeder = cluster.components[0]
leecher = cluster.components[1]
await allFuturesThrowing(nodes.mapIt(it.switch.start()))
let blocks = await makeRandomBlocks(4 * 1024, 1024.NBytes)
dataset = makeDataset(blocks).tryGet()
await seeder.assignBlocks(dataset)
await cluster.components.start()
await connectNodes(cluster)
teardown:
await allFuturesThrowing(nodes.mapIt(it.switch.stop()))
await cluster.components.stop()
nodes = @[]
test "Should receive blocks for own want list":
test "Should receive presence response for blocks peer has":
let
downloader = nodes[4].networkStore
engine = downloader.engine
manifestCid = dataset.manifestCid
treeCid = dataset.manifest.treeCid
totalBlocks = dataset.blocks.len.uint64
blockSize = dataset.manifest.blockSize.uint32
desc = DownloadDesc(md: dataset.manifestDesc, count: totalBlocks)
download = leecher.downloadManager.startDownload(desc)
address = BlockAddress(treeCid: treeCid, index: 0)
# Add blocks from 1st peer to want list
let
downloadCids = blocks[0 .. 3].mapIt(it.cid) & blocks[12 .. 15].mapIt(it.cid)
pendingBlocks = downloadCids.mapIt(engine.pendingBlocks.getWantHandle(it))
for i in 0 .. 15:
(await nodes[i div 4].networkStore.engine.localStore.putBlock(blocks[i])).tryGet()
await connectNodes(nodes)
await sleepAsync(100.millis)
await allFuturesThrowing(allFinished(pendingBlocks))
check:
(await allFinished(downloadCids.mapIt(downloader.localStore.getBlock(it))))
.filterIt(it.completed and it.read.isOk)
.mapIt($it.read.get.cid)
.sorted(cmp[string]) == downloadCids.mapIt($it).sorted(cmp[string])
test "Should exchange blocks with multiple nodes":
let
downloader = nodes[4].networkStore
engine = downloader.engine
# Add blocks from 1st peer to want list
let
pendingBlocks1 = blocks[0 .. 3].mapIt(engine.pendingBlocks.getWantHandle(it.cid))
pendingBlocks2 =
blocks[12 .. 15].mapIt(engine.pendingBlocks.getWantHandle(it.cid))
for i in 0 .. 15:
(await nodes[i div 4].networkStore.engine.localStore.putBlock(blocks[i])).tryGet()
await connectNodes(nodes)
await sleepAsync(100.millis)
await allFuturesThrowing(allFinished(pendingBlocks1), allFinished(pendingBlocks2))
check pendingBlocks1.mapIt(it.read) == blocks[0 .. 3]
check pendingBlocks2.mapIt(it.read) == blocks[12 .. 15]
asyncchecksuite "NetworkStore - dissemination":
var nodes: seq[NodesComponents]
teardown:
if nodes.len > 0:
await nodes.stop()
test "Should disseminate blocks across large diameter swarm":
let dataset = makeDataset(await makeRandomBlocks(60 * 256, 256'nb)).tryGet()
nodes = generateNodes(
6,
config = NodeConfig(
useRepoStore: false,
findFreePorts: false,
basePort: 8080,
createFullNode: false,
enableBootstrap: false,
enableDiscovery: true,
),
await leecher.network.request.sendWantList(
seeder.switch.peerInfo.peerId,
@[address],
priority = 0,
cancel = false,
wantType = WantType.WantHave,
full = false,
sendDontHave = false,
rangeCount = totalBlocks,
downloadId = download.id,
)
await assignBlocks(nodes[0], dataset, 0 .. 9)
await assignBlocks(nodes[1], dataset, 10 .. 19)
await assignBlocks(nodes[2], dataset, 20 .. 29)
await assignBlocks(nodes[3], dataset, 30 .. 39)
await assignBlocks(nodes[4], dataset, 40 .. 49)
await assignBlocks(nodes[5], dataset, 50 .. 59)
let seederId = seeder.switch.peerInfo.peerId
check await download.waitForPeerInSwarm(seederId)
await nodes.start()
await nodes.linearTopology()
leecher.downloadManager.cancelDownload(treeCid)
let downloads = nodes.mapIt(downloadDataset(it, dataset))
await allFuturesThrowing(downloads).wait(30.seconds)
test "Peer availability should propagate across downloads for same CID":
let
manifestCid = dataset.manifestCid
treeCid = dataset.manifest.treeCid
totalBlocks = dataset.blocks.len.uint64
blockSize = dataset.manifest.blockSize.uint32
desc = DownloadDesc(md: dataset.manifestDesc, count: totalBlocks)
download1 = leecher.engine.startDownload(desc)
download2 = leecher.engine.startDownload(desc)
address = BlockAddress(treeCid: treeCid, index: 0)
await leecher.network.request.sendWantList(
seeder.switch.peerInfo.peerId,
@[address],
priority = 0,
cancel = false,
wantType = WantType.WantHave,
full = false,
sendDontHave = false,
rangeCount = totalBlocks,
downloadId = download1.id,
)
let seederId = seeder.switch.peerInfo.peerId
check await download1.waitForPeerInSwarm(seederId)
check download2.getSwarm().getPeer(seederId).isSome
leecher.downloadManager.cancelDownload(treeCid)
test "Should update swarm when peer reports availability":
let
manifestCid = dataset.manifestCid
treeCid = dataset.manifest.treeCid
blockSize = dataset.manifest.blockSize.uint32
desc = DownloadDesc(md: dataset.manifestDesc, count: dataset.blocks.len.uint64)
download = leecher.downloadManager.startDownload(desc)
availability = BlockAvailability.complete()
download.updatePeerAvailability(seeder.switch.peerInfo.peerId, availability)
let swarm = download.getSwarm()
check swarm.activePeerCount() == 1
let peerOpt = swarm.getPeer(seeder.switch.peerInfo.peerId)
check peerOpt.isSome
check peerOpt.get().availability.kind == bakComplete
leecher.downloadManager.cancelDownload(treeCid)
asyncchecksuite "BlockExchange - Multi-Peer Download":
var
cluster: NodesCluster
seeder1: NodesComponents
seeder2: NodesComponents
leecher: NodesComponents
dataset: TestDataset
setup:
cluster = generateNodes(3, config = NodeConfig(findFreePorts: true))
seeder1 = cluster.components[0]
seeder2 = cluster.components[1]
leecher = cluster.components[2]
let blocks = await makeRandomBlocks(8 * 1024, 1024.NBytes)
dataset = makeDataset(blocks).tryGet()
let halfPoint = dataset.blocks.len div 2
await seeder1.assignBlocks(dataset, 0 ..< halfPoint)
await seeder2.assignBlocks(dataset, halfPoint ..< dataset.blocks.len)
await cluster.components.start()
await connectNodes(cluster)
teardown:
await cluster.components.stop()
test "Should download blocks from multiple peers":
await leecher.downloadDataset(dataset)
for blk in dataset.blocks:
let hasBlock = await blk.cid in leecher.localStore
check hasBlock
test "Should handle partial availability from peers":
let
manifestCid = dataset.manifestCid
treeCid = dataset.manifest.treeCid
blockSize = dataset.manifest.blockSize.uint32
desc = DownloadDesc(md: dataset.manifestDesc, count: dataset.blocks.len.uint64)
download = leecher.downloadManager.startDownload(desc)
halfPoint = (dataset.blocks.len div 2).uint64
ranges1 = @[(start: 0'u64, count: halfPoint)]
download.updatePeerAvailability(
seeder1.switch.peerInfo.peerId, BlockAvailability.fromRanges(ranges1)
)
let ranges2 = @[(start: halfPoint, count: dataset.blocks.len.uint64 - halfPoint)]
download.updatePeerAvailability(
seeder2.switch.peerInfo.peerId, BlockAvailability.fromRanges(ranges2)
)
let swarm = download.getSwarm()
check swarm.activePeerCount() == 2
let peersForFirst = swarm.peersWithRange(0, halfPoint)
check seeder1.switch.peerInfo.peerId in peersForFirst
let peersForSecond =
swarm.peersWithRange(halfPoint, dataset.blocks.len.uint64 - halfPoint)
check seeder2.switch.peerInfo.peerId in peersForSecond
leecher.downloadManager.cancelDownload(treeCid)
asyncchecksuite "BlockExchange - Download Lifecycle":
var
cluster: NodesCluster
seeder: NodesComponents
leecher: NodesComponents
dataset: TestDataset
setup:
cluster = generateNodes(2, config = NodeConfig(findFreePorts: true))
seeder = cluster.components[0]
leecher = cluster.components[1]
let blocks = await makeRandomBlocks(4 * 1024, 1024.NBytes)
dataset = makeDataset(blocks).tryGet()
await seeder.assignBlocks(dataset)
await cluster.components.start()
await connectNodes(cluster)
teardown:
await cluster.components.stop()
test "Should allow multiple downloads for same CID":
let
manifestCid = dataset.manifestCid
treeCid = dataset.manifest.treeCid
totalBlocks = dataset.blocks.len.uint64
blockSize = dataset.manifest.blockSize.uint32
desc = DownloadDesc(md: dataset.manifestDesc, count: totalBlocks)
download1 = leecher.downloadManager.startDownload(desc)
download2 = leecher.downloadManager.startDownload(desc)
check download1.id != download2.id
check download1.treeCid == download2.treeCid
leecher.downloadManager.cancelDownload(treeCid)
check leecher.downloadManager.getDownload(treeCid).isNone
test "Two concurrent full downloads for same CID should both complete":
let
treeCid = dataset.manifest.treeCid
totalBlocks = dataset.blocks.len.uint64
let handle1 = leecher.engine.startTreeDownload(dataset.manifestDesc)
require handle1.isOk == true
let handle2 = leecher.engine.startTreeDownload(dataset.manifestDesc)
require handle2.isOk == true
let
h1 = handle1.get()
h2 = handle2.get()
var
blocksReceived1 = 0
blocksReceived2 = 0
for i in 0 ..< totalBlocks.int:
if (await leecher.networkStore.getBlock(treeCid, i.Natural)).isOk:
blocksReceived1 += 1
for i in 0 ..< totalBlocks.int:
if (await leecher.networkStore.getBlock(treeCid, i.Natural)).isOk:
blocksReceived2 += 1
check blocksReceived1 == totalBlocks.int
check blocksReceived2 == totalBlocks.int
leecher.engine.releaseDownload(h1)
leecher.engine.releaseDownload(h2)
test "Releasing one download should not cancel other downloads for same CID":
let
treeCid = dataset.manifest.treeCid
totalBlocks = dataset.blocks.len.uint64
let handle1 = leecher.engine.startTreeDownload(dataset.manifestDesc)
require handle1.isOk
let h1 = handle1.get()
let handle2 = leecher.engine.startTreeDownload(dataset.manifestDesc)
require handle2.isOk
let h2 = handle2.get()
leecher.engine.releaseDownload(h1)
check leecher.downloadManager.getDownload(treeCid).isSome
var blocksReceived = 0
for i in 0 ..< totalBlocks.int:
if (await leecher.networkStore.getBlock(treeCid, i.Natural)).isOk:
blocksReceived += 1
check blocksReceived == totalBlocks.int
leecher.engine.releaseDownload(h2)
check leecher.downloadManager.getDownload(treeCid).isNone
test "Should cancel download":
let
manifestCid = dataset.manifestCid
treeCid = dataset.manifest.treeCid
totalBlocks = dataset.blocks.len.uint64
blockSize = dataset.manifest.blockSize.uint32
desc = DownloadDesc(md: dataset.manifestDesc, count: totalBlocks)
discard leecher.downloadManager.startDownload(desc)
leecher.downloadManager.cancelDownload(treeCid)
check leecher.downloadManager.getDownload(treeCid).isNone
asyncchecksuite "BlockExchange - Error Handling":
var
cluster: NodesCluster
seeder: NodesComponents
leecher: NodesComponents
dataset: TestDataset
setup:
cluster = generateNodes(2, config = NodeConfig(findFreePorts: true))
seeder = cluster.components[0]
leecher = cluster.components[1]
let blocks = await makeRandomBlocks(4 * 1024, 1024.NBytes)
dataset = makeDataset(blocks).tryGet()
await seeder.assignBlocks(dataset, 0 ..< 2)
await cluster.components.start()
await connectNodes(cluster)
teardown:
await cluster.components.stop()
test "Should handle peer with partial blocks in swarm":
let
manifestCid = dataset.manifestCid
treeCid = dataset.manifest.treeCid
blockSize = dataset.manifest.blockSize.uint32
desc = DownloadDesc(md: dataset.manifestDesc, count: dataset.blocks.len.uint64)
download = leecher.downloadManager.startDownload(desc)
ranges = @[(start: 0'u64, count: 2'u64)]
download.updatePeerAvailability(
seeder.switch.peerInfo.peerId, BlockAvailability.fromRanges(ranges)
)
let
swarm = download.getSwarm()
candidates = swarm.peersWithRange(0, 2)
check seeder.switch.peerInfo.peerId in candidates
let candidatesForMissing = swarm.peersWithRange(2, 2)
check seeder.switch.peerInfo.peerId notin candidatesForMissing
leecher.downloadManager.cancelDownload(treeCid)
test "Should requeue batch on peer failure":
let
manifestCid = dataset.manifestCid
treeCid = dataset.manifest.treeCid
blockSize = dataset.manifest.blockSize.uint32
desc = DownloadDesc(md: dataset.manifestDesc, count: dataset.blocks.len.uint64)
download = leecher.downloadManager.startDownload(desc)
batch = leecher.downloadManager.getNextBatch(download)
check batch.isSome
download.markBatchInFlight(
batch.get.start, batch.get.count, 0, seeder.switch.peerInfo.peerId
)
check download.pendingBatchCount() == 1
download.handlePeerFailure(seeder.switch.peerInfo.peerId)
check download.pendingBatchCount() == 0
check download.ctx.scheduler.requeuedCount() == 1
leecher.downloadManager.cancelDownload(treeCid)
asyncchecksuite "BlockExchange - Local Block Resolution":
var
cluster: NodesCluster
node1: NodesComponents
dataset: TestDataset
setup:
cluster = generateNodes(1, config = NodeConfig(findFreePorts: true))
node1 = cluster.components[0]
let blocks = await makeRandomBlocks(4 * 1024, 1024.NBytes)
dataset = makeDataset(blocks).tryGet()
await node1.assignBlocks(dataset)
await cluster.components.start()
teardown:
await cluster.components.stop()
test "Download worker should complete wantHandles when all blocks are local":
let
manifestCid = dataset.manifestCid
treeCid = dataset.manifest.treeCid
totalBlocks = dataset.blocks.len.uint64
blockSize = dataset.manifest.blockSize.uint32
desc = DownloadDesc(md: dataset.manifestDesc, count: totalBlocks)
download = node1.downloadManager.startDownload(desc)
var handles: seq[BlockHandle] = @[]
for i in 0'u64 ..< totalBlocks:
let address = download.makeBlockAddress(i)
handles.add(download.getWantHandle(address))
await node1.engine.downloadWorker(download)
for handle in handles:
check handle.finished
let blk = await handle
check blk.isOk
node1.downloadManager.cancelDownload(treeCid)
asyncchecksuite "BlockExchange - Mixed Local and Network":
var
cluster: NodesCluster
seeder: NodesComponents
leecher: NodesComponents
dataset: TestDataset
setup:
cluster = generateNodes(2, config = NodeConfig(findFreePorts: true))
seeder = cluster.components[0]
leecher = cluster.components[1]
let blocks = await makeRandomBlocks(8 * 1024, 1024.NBytes)
dataset = makeDataset(blocks).tryGet()
await seeder.assignBlocks(dataset)
let halfPoint = dataset.blocks.len div 2
await leecher.assignBlocks(dataset, 0 ..< halfPoint)
await cluster.components.start()
await connectNodes(cluster)
teardown:
await cluster.components.stop()
test "Should download dataset with some blocks local and some from network":
await leecher.downloadDataset(dataset)
for blk in dataset.blocks:
let hasBlock = await blk.cid in leecher.localStore
check hasBlock
test "Should handle interleaved local and network blocks":
for i, blk in dataset.blocks:
if i mod 2 == 0:
(await leecher.localStore.putBlock(blk)).tryGet()
await leecher.downloadDataset(dataset)
for blk in dataset.blocks:
let hasBlock = await blk.cid in leecher.localStore
check hasBlock
asyncchecksuite "BlockExchange - Re-download from Local":
var
cluster: NodesCluster
seeder: NodesComponents
leecher: NodesComponents
dataset: TestDataset
setup:
cluster = generateNodes(2, config = NodeConfig(findFreePorts: true))
seeder = cluster.components[0]
leecher = cluster.components[1]
let blocks = await makeRandomBlocks(4 * 1024, 1024.NBytes)
dataset = makeDataset(blocks).tryGet()
await seeder.assignBlocks(dataset)
await cluster.components.start()
await connectNodes(cluster)
teardown:
await cluster.components.stop()
test "Should re-download from local after network download":
await leecher.downloadDataset(dataset)
for blk in dataset.blocks:
let hasBlock = await blk.cid in leecher.localStore
check hasBlock
await leecher.downloadDataset(dataset)
for blk in dataset.blocks:
let hasBlock = await blk.cid in leecher.localStore
check hasBlock
asyncchecksuite "BlockExchange - NetworkStore getBlocks":
var
cluster: NodesCluster
seeder: NodesComponents
leecher: NodesComponents
dataset: TestDataset
setup:
cluster = generateNodes(2, config = NodeConfig(findFreePorts: true))
seeder = cluster.components[0]
leecher = cluster.components[1]
let blocks = await makeRandomBlocks(4 * 1024, 1024.NBytes)
dataset = makeDataset(blocks).tryGet()
await seeder.assignBlocks(dataset)
await cluster.components.start()
await connectNodes(cluster)
teardown:
await cluster.components.stop()
test "getBlocks all local":
await leecher.assignBlocks(dataset)
await leecher.downloadDataset(dataset)
test "getBlocks all from network":
await leecher.downloadDataset(dataset)
test "getBlocks mixed local and network":
await leecher.assignBlocks(dataset, 0 ..< 2)
await leecher.downloadDataset(dataset)

View File

@ -0,0 +1,157 @@
import std/sets
import std/importutils
import pkg/unittest2
import pkg/chronos
import pkg/storage/blockexchange/engine/downloadcontext {.all.}
import pkg/storage/blockexchange/engine/scheduler {.all.}
privateAccess(BroadcastAvailabilityTracker)
suite "BroadcastAvailabilityTracker (sequential OOO)":
const
WindowSize = 16384'u64
Threshold = 0.75
BatchSize = 100'u64
TotalBlocks = 100_000'u64
var
tracker: BroadcastAvailabilityTracker
sched: Scheduler
template expireInterval() =
tracker.lastBroadcastTime = Moment.now() - 1.hours
tracker.broadcastInterval = 1.milliseconds
setup:
sched = Scheduler.new()
sched.init(TotalBlocks, BatchSize, WindowSize, Threshold)
tracker = BroadcastAvailabilityTracker(
policy: spSequential,
lastBroadcastedWatermark: 0,
broadcastedOutOfOrder: initHashSet[uint64](),
pendingOOOSnapshot: initHashSet[uint64](),
lastBroadcastTime: Moment.now(),
broadcastInterval: 1.hours,
)
test "OOO batch triggers broadcast when watermark has not moved":
discard sched.take()
discard sched.take()
sched.markComplete(100)
check sched.completedWatermark() == 0
check tracker.shouldBroadcast(sched)
let ranges = tracker.getRanges(sched)
check ranges == @[(start: 100'u64, count: BatchSize)]
check tracker.pendingOOOSnapshot == toHashSet([100'u64])
tracker.markBroadcasted(sched)
check tracker.broadcastedOutOfOrder == toHashSet([100'u64])
check tracker.lastBroadcastedWatermark == 0
test "Already-broadcast OOO is not re-emitted next cycle":
for _ in 0 ..< 3:
discard sched.take()
sched.markComplete(100)
discard tracker.getRanges(sched)
tracker.markBroadcasted(sched)
sched.markComplete(200)
check tracker.shouldBroadcast(sched)
let ranges = tracker.getRanges(sched)
check ranges == @[(start: 200'u64, count: BatchSize)]
test "Multiple new OOO batches are all emitted in a single broadcast":
for _ in 0 ..< 4:
discard sched.take()
sched.markComplete(100)
sched.markComplete(200)
sched.markComplete(300)
let ranges = tracker.getRanges(sched)
check ranges.len == 3
var starts: HashSet[uint64]
for r in ranges:
check r.count == BatchSize
starts.incl(r.start)
check starts == toHashSet([100'u64, 200, 300])
check tracker.pendingOOOSnapshot == toHashSet([100'u64, 200, 300])
test "Watermark absorbing already-broadcast OOO produces prefix overlap":
discard sched.take()
discard sched.take()
sched.markComplete(100)
discard tracker.getRanges(sched)
tracker.markBroadcasted(sched)
check tracker.broadcastedOutOfOrder == toHashSet([100'u64])
sched.markComplete(0)
check sched.completedWatermark() == 200
expireInterval()
check tracker.shouldBroadcast(sched)
let ranges = tracker.getRanges(sched)
check ranges == @[(start: 0'u64, count: 200'u64)]
tracker.markBroadcasted(sched)
check tracker.broadcastedOutOfOrder.len == 0
test "No new blocks and no new OOO → shouldBroadcast stays false":
expireInterval()
check not tracker.shouldBroadcast(sched)
test "Prefix-only broadcast fires only after the interval elapses":
discard sched.take()
sched.markComplete(0)
check not tracker.shouldBroadcast(sched)
expireInterval()
check tracker.shouldBroadcast(sched)
let ranges = tracker.getRanges(sched)
check ranges == @[(start: 0'u64, count: BatchSize)]
check tracker.pendingOOOSnapshot.len == 0
tracker.markBroadcasted(sched)
check tracker.lastBroadcastedWatermark == BatchSize
check tracker.broadcastedOutOfOrder.len == 0
test "OOO arriving between getRanges and markBroadcasted is not marked":
for _ in 0 ..< 3:
discard sched.take()
sched.markComplete(100)
let ranges = tracker.getRanges(sched)
check ranges == @[(start: 100'u64, count: BatchSize)]
check tracker.pendingOOOSnapshot == toHashSet([100'u64])
sched.markComplete(200)
tracker.markBroadcasted(sched)
check tracker.broadcastedOutOfOrder == toHashSet([100'u64])
check 200'u64 notin tracker.broadcastedOutOfOrder
check tracker.shouldBroadcast(sched)
let ranges2 = tracker.getRanges(sched)
check ranges2 == @[(start: 200'u64, count: BatchSize)]
check tracker.pendingOOOSnapshot == toHashSet([200'u64])
test "getRanges clears stale snapshot when mark was skipped":
discard sched.take()
discard sched.take()
sched.markComplete(100)
discard tracker.getRanges(sched)
check tracker.pendingOOOSnapshot.len == 1
sched.markComplete(0)
check sched.completedWatermark() == 200
let ranges = tracker.getRanges(sched)
check ranges == @[(start: 0'u64, count: 200'u64)]
check tracker.pendingOOOSnapshot.len == 0

View File

@ -1,10 +1,6 @@
import std/sequtils
import std/random
import std/algorithm
import std/[sequtils, options]
import pkg/stew/byteutils
import pkg/chronos
import pkg/libp2p/errors
import pkg/libp2p/routing_record
import pkg/codexdht/discv5/protocol as discv5
@ -14,85 +10,27 @@ import pkg/storage/stores
import pkg/storage/chunker
import pkg/storage/discovery
import pkg/storage/blocktype
import pkg/storage/utils/asyncheapqueue
import pkg/storage/merkletree
import pkg/storage/blockexchange/utils
import pkg/storage/blockexchange/engine/activedownload {.all.}
import pkg/storage/blockexchange/engine/downloadmanager {.all.}
import ../../../asynctest
import ../../helpers
import ../../examples
const NopSendWantCancellationsProc = proc(
id: PeerId, addresses: seq[BlockAddress]
) {.async: (raises: [CancelledError]).} =
discard
asyncchecksuite "NetworkStore engine basic":
var
peerId: PeerId
chunker: Chunker
blockDiscovery: Discovery
peerStore: PeerCtxStore
pendingBlocks: PendingBlocksManager
blocks: seq[Block]
done: Future[void]
setup:
peerId = PeerId.example
chunker = RandomChunker.new(Rng.instance(), size = 1024'nb, chunkSize = 256'nb)
blockDiscovery = Discovery.new()
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
while true:
let chunk = await chunker.getBytes()
if chunk.len <= 0:
break
blocks.add(Block.new(chunk).tryGet())
done = newFuture[void]()
test "Should send want list to new peers":
proc sendWantList(
id: PeerId,
addresses: seq[BlockAddress],
priority: int32 = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
) {.async: (raises: [CancelledError]).} =
check addresses.mapIt($it.cidOrTreeCid).sorted == blocks.mapIt($it.cid).sorted
done.complete()
let
network = BlockExcNetwork(request: BlockExcRequest(sendWantList: sendWantList))
localStore = CacheStore.new(blocks.mapIt(it))
discovery = DiscoveryEngine.new(
localStore, peerStore, network, blockDiscovery, pendingBlocks
)
advertiser = Advertiser.new(localStore, blockDiscovery)
engine = BlockExcEngine.new(
localStore, network, discovery, advertiser, peerStore, pendingBlocks
)
for b in blocks:
discard engine.pendingBlocks.getWantHandle(b.cid)
await engine.peerAddedHandler(peerId)
await done.wait(100.millis)
asyncchecksuite "NetworkStore engine handlers":
var
peerId: PeerId
chunker: Chunker
blockDiscovery: Discovery
peerStore: PeerCtxStore
pendingBlocks: PendingBlocksManager
peerStore: PeerContextStore
downloadManager: DownloadManager
network: BlockExcNetwork
engine: BlockExcEngine
discovery: DiscoveryEngine
advertiser: Advertiser
peerCtx: BlockExcPeerCtx
peerCtx: PeerContext
localStore: BlockStore
blocks: seq[Block]
@ -108,61 +46,55 @@ asyncchecksuite "NetworkStore engine handlers":
peerId = PeerId.example
blockDiscovery = Discovery.new()
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
peerStore = PeerContextStore.new()
downloadManager = DownloadManager.new()
localStore = CacheStore.new()
network = BlockExcNetwork()
discovery =
DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks)
discovery = DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery)
advertiser = Advertiser.new(localStore, blockDiscovery)
engine = BlockExcEngine.new(
localStore, network, discovery, advertiser, peerStore, pendingBlocks
localStore, network, discovery, advertiser, peerStore, downloadManager
)
peerCtx = BlockExcPeerCtx(id: peerId)
peerCtx = PeerContext(id: peerId)
engine.peers.add(peerCtx)
test "Should schedule block requests":
let wantList = makeWantList(blocks.mapIt(it.cid), wantType = WantType.WantBlock)
# only `wantBlock` are stored in `peerWants`
proc handler() {.async.} =
let ctx = await engine.taskQueue.pop()
check ctx.id == peerId
# only `wantBlock` scheduled
check ctx.wantedBlocks == blocks.mapIt(it.address).toHashSet
let done = handler()
await engine.wantListHandler(peerId, wantList)
await done
test "Should handle want list":
let
tree = StorageMerkleTree.init(blocks.mapIt(it.cid)).tryGet
rootCid = tree.rootCid.tryGet()
for i, blk in blocks:
(await localStore.putBlock(blk)).tryGet()
(await localStore.putCidAndProof(rootCid, i, blk.cid, tree.getProof(i).tryGet())).tryGet()
let
done = newFuture[void]()
wantList = makeWantList(blocks.mapIt(it.cid))
wantList = makeWantList(rootCid, blocks.len)
proc sendPresence(
peerId: PeerId, presence: seq[BlockPresence]
) {.async: (raises: [CancelledError]).} =
check presence.mapIt(it.address) == wantList.entries.mapIt(it.address)
for p in presence:
check p.kind in {BlockPresenceType.HaveRange, BlockPresenceType.Complete}
done.complete()
engine.network =
BlockExcNetwork(request: BlockExcRequest(sendPresence: sendPresence))
await allFuturesThrowing(allFinished(blocks.mapIt(localStore.putBlock(it))))
await engine.wantListHandler(peerId, wantList)
await done
test "Should handle want list - `dont-have`":
let
done = newFuture[void]()
wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true)
treeCid = Cid.example
wantList = makeWantList(treeCid, blocks.len, sendDontHave = true)
proc sendPresence(
peerId: PeerId, presence: seq[BlockPresence]
@ -170,7 +102,7 @@ asyncchecksuite "NetworkStore engine handlers":
check presence.mapIt(it.address) == wantList.entries.mapIt(it.address)
for p in presence:
check:
p.`type` == BlockPresenceType.DontHave
p.kind == BlockPresenceType.DontHave
done.complete()
@ -181,55 +113,42 @@ asyncchecksuite "NetworkStore engine handlers":
await done
test "Should handle want list - `dont-have` some blocks":
let
tree = StorageMerkleTree.init(blocks.mapIt(it.cid)).tryGet
rootCid = tree.rootCid.tryGet()
for i in 0 ..< 2:
(await engine.localStore.putBlock(blocks[i])).tryGet()
(
await engine.localStore.putCidAndProof(
rootCid, i, blocks[i].cid, tree.getProof(i).tryGet()
)
).tryGet()
let
done = newFuture[void]()
wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true)
wantList = makeWantList(rootCid, blocks.len, sendDontHave = true)
proc sendPresence(
peerId: PeerId, presence: seq[BlockPresence]
) {.async: (raises: [CancelledError]).} =
for p in presence:
if p.address.cidOrTreeCid != blocks[0].cid and
p.address.cidOrTreeCid != blocks[1].cid:
check p.`type` == BlockPresenceType.DontHave
if p.address.index >= 2:
check p.kind == BlockPresenceType.DontHave
else:
check p.`type` == BlockPresenceType.Have
check p.kind in {BlockPresenceType.HaveRange, BlockPresenceType.Complete}
done.complete()
engine.network =
BlockExcNetwork(request: BlockExcRequest(sendPresence: sendPresence))
(await engine.localStore.putBlock(blocks[0])).tryGet()
(await engine.localStore.putBlock(blocks[1])).tryGet()
await engine.wantListHandler(peerId, wantList)
await done
test "Should store blocks in local store":
let pending = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid))
for blk in blocks:
peerCtx.blockRequestScheduled(blk.address)
let blocksDelivery = blocks.mapIt(BlockDelivery(blk: it, address: it.address))
# Install NOP for want list cancellations so they don't cause a crash
engine.network = BlockExcNetwork(
request: BlockExcRequest(sendWantCancellations: NopSendWantCancellationsProc)
)
await engine.blocksDeliveryHandler(peerId, blocksDelivery)
let resolved = await allFinished(pending)
check resolved.mapIt(it.read) == blocks
for b in blocks:
let present = await engine.localStore.hasBlock(b.cid)
check present.tryGet()
test "Should handle block presence":
var handles:
Table[Cid, Future[Block].Raising([CancelledError, RetriesExhaustedError])]
proc sendWantList(
id: PeerId,
addresses: seq[BlockAddress],
@ -238,335 +157,169 @@ asyncchecksuite "NetworkStore engine handlers":
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
rangeCount: uint64 = 0,
downloadId: uint64 = 0,
) {.async: (raises: [CancelledError]).} =
engine.pendingBlocks.resolve(
blocks.filterIt(it.address in addresses).mapIt(
BlockDelivery(blk: it, address: it.address)
)
)
discard
engine.network =
BlockExcNetwork(request: BlockExcRequest(sendWantList: sendWantList))
# only Cids in peer want lists are requested
handles = blocks.mapIt((it.cid, engine.pendingBlocks.getWantHandle(it.cid))).toTable
let
md = testManifestDesc(blocks[0].cid, DefaultBlockSize.uint32, 1)
address = BlockAddress(treeCid: md.manifest.treeCid, index: 0)
desc = DownloadDesc(md: md, startIndex: address.index.uint64, count: 1)
download = engine.downloadManager.startDownload(desc)
discard download.getWantHandle(address)
await engine.blockPresenceHandler(
peerId,
blocks.mapIt(PresenceMessage.init(Presence(address: it.address, have: true))),
@[
BlockPresence(
address: address, kind: BlockPresenceType.Complete, downloadId: download.id
)
],
)
for a in blocks.mapIt(it.address):
check a in peerCtx.peerHave
test "Should send cancellations for requested blocks only":
let
pendingPeer = peerId # peer towards which we have pending block requests
pendingPeerCtx = peerCtx
senderPeer = PeerId.example # peer that will actually send the blocks
senderPeerCtx = BlockExcPeerCtx(id: senderPeer)
reqBlocks = @[blocks[0], blocks[4]] # blocks that we requested to pendingPeer
reqBlockAddrs = reqBlocks.mapIt(it.address)
blockHandles = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid))
swarm = download.getSwarm()
peerOpt = swarm.getPeer(peerId)
check peerOpt.isSome
var cancelled: HashSet[BlockAddress]
engine.peers.add(senderPeerCtx)
for address in reqBlockAddrs:
pendingPeerCtx.blockRequestScheduled(address)
for address in blocks.mapIt(it.address):
senderPeerCtx.blockRequestScheduled(address)
proc sendWantCancellations(
id: PeerId, addresses: seq[BlockAddress]
) {.async: (raises: [CancelledError]).} =
assert id == pendingPeer
for address in addresses:
cancelled.incl(address)
engine.network = BlockExcNetwork(
request: BlockExcRequest(sendWantCancellations: sendWantCancellations)
)
let blocksDelivery = blocks.mapIt(BlockDelivery(blk: it, address: it.address))
await engine.blocksDeliveryHandler(senderPeer, blocksDelivery)
discard await allFinished(blockHandles).wait(100.millis)
check cancelled == reqBlockAddrs.toHashSet()
asyncchecksuite "Block Download":
var
seckey: PrivateKey
peerId: PeerId
chunker: Chunker
blockDiscovery: Discovery
peerStore: PeerCtxStore
pendingBlocks: PendingBlocksManager
network: BlockExcNetwork
engine: BlockExcEngine
discovery: DiscoveryEngine
advertiser: Advertiser
peerCtx: BlockExcPeerCtx
localStore: BlockStore
blocks: seq[Block]
setup:
chunker = RandomChunker.new(Rng.instance(), size = 1024'nb, chunkSize = 256'nb)
while true:
let chunk = await chunker.getBytes()
if chunk.len <= 0:
break
blocks.add(Block.new(chunk).tryGet())
peerId = PeerId.example
blockDiscovery = Discovery.new()
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
localStore = CacheStore.new()
network = BlockExcNetwork()
discovery =
DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks)
advertiser = Advertiser.new(localStore, blockDiscovery)
engine = BlockExcEngine.new(
localStore, network, discovery, advertiser, peerStore, pendingBlocks
)
peerCtx = BlockExcPeerCtx(id: peerId, activityTimeout: 100.milliseconds)
engine.peers.add(peerCtx)
test "Should reschedule blocks on peer timeout":
test "Should handle range want list":
let
slowPeer = peerId
fastPeer = PeerId.example
slowPeerCtx = peerCtx
# "Fast" peer has in fact a generous timeout. This should avoid timing issues
# in the test.
fastPeerCtx = BlockExcPeerCtx(id: fastPeer, activityTimeout: 60.seconds)
requestedBlock = blocks[0]
var
slowPeerWantList = newFuture[void]("slowPeerWantList")
fastPeerWantList = newFuture[void]("fastPeerWantList")
slowPeerDropped = newFuture[void]("slowPeerDropped")
slowPeerBlockRequest = newFuture[void]("slowPeerBlockRequest")
fastPeerBlockRequest = newFuture[void]("fastPeerBlockRequest")
engine.peers.add(fastPeerCtx)
proc sendWantList(
id: PeerId,
addresses: seq[BlockAddress],
priority: int32 = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
) {.async: (raises: [CancelledError]).} =
check addresses == @[requestedBlock.address]
if wantType == WantBlock:
if id == slowPeer:
slowPeerBlockRequest.complete()
else:
fastPeerBlockRequest.complete()
if wantType == WantHave:
if id == slowPeer:
slowPeerWantList.complete()
else:
fastPeerWantList.complete()
proc onPeerDropped(
peer: PeerId
): Future[void] {.async: (raises: [CancelledError]).} =
assert peer == slowPeer
slowPeerDropped.complete()
proc selectPeer(peers: seq[BlockExcPeerCtx]): BlockExcPeerCtx =
# Looks for the slow peer.
for peer in peers:
if peer.id == slowPeer:
return peer
return peers[0]
engine.selectPeer = selectPeer
engine.pendingBlocks.retryInterval = 200.milliseconds
engine.network =
BlockExcNetwork(request: BlockExcRequest(sendWantList: sendWantList))
engine.network.handlers.onPeerDropped = onPeerDropped
let blockHandle = engine.requestBlock(requestedBlock.address)
# Waits for the peer to send its want list to both peers.
await slowPeerWantList.wait(5.seconds)
await fastPeerWantList.wait(5.seconds)
let blockPresence =
@[BlockPresence(address: requestedBlock.address, type: BlockPresenceType.Have)]
await engine.blockPresenceHandler(slowPeer, blockPresence)
await engine.blockPresenceHandler(fastPeer, blockPresence)
# Waits for the peer to ask for the block.
await slowPeerBlockRequest.wait(5.seconds)
# Don't reply and wait for the peer to be dropped by timeout.
await slowPeerDropped.wait(5.seconds)
# The engine should retry and ask the fast peer for the block.
await fastPeerBlockRequest.wait(5.seconds)
await engine.blocksDeliveryHandler(
fastPeer, @[BlockDelivery(blk: requestedBlock, address: requestedBlock.address)]
)
discard await blockHandle.wait(5.seconds)
test "Should cancel block request":
var
address = BlockAddress.init(blocks[0].cid)
done = newFuture[void]()
treeCid = Cid.example
tree = StorageMerkleTree.init(blocks.mapIt(it.cid)).tryGet
rootCid = tree.rootCid.tryGet()
proc sendWantList(
id: PeerId,
addresses: seq[BlockAddress],
priority: int32 = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
for i, blk in blocks:
(await localStore.putBlock(blk)).tryGet()
let proof = tree.getProof(i).tryGet()
(await localStore.putCidAndProof(rootCid, i, blk.cid, proof)).tryGet()
let wantList = WantList(
entries: @[
WantListEntry(
address: BlockAddress(treeCid: rootCid, index: 0),
priority: 0,
cancel: false,
wantType: WantType.WantHave,
sendDontHave: false,
rangeCount: blocks.len.uint64,
)
],
full: false,
)
proc sendPresence(
peerId: PeerId, presence: seq[BlockPresence]
) {.async: (raises: [CancelledError]).} =
check presence.len == 1
check presence[0].kind == BlockPresenceType.HaveRange
check presence[0].ranges.len > 0
done.complete()
engine.pendingBlocks.blockRetries = 10
engine.pendingBlocks.retryInterval = 1.seconds
engine.network = BlockExcNetwork(
request: BlockExcRequest(
sendWantList: sendWantList, sendWantCancellations: NopSendWantCancellationsProc
)
engine.network =
BlockExcNetwork(request: BlockExcRequest(sendPresence: sendPresence))
await engine.wantListHandler(peerId, wantList)
await done
test "Should not send presence for blocks not in range":
let
done = newFuture[void]()
treeCid = Cid.example
tree = StorageMerkleTree.init(blocks.mapIt(it.cid)).tryGet
rootCid = tree.rootCid.tryGet()
for i in 0 ..< 2:
(await localStore.putBlock(blocks[i])).tryGet()
let proof = tree.getProof(i).tryGet()
(await localStore.putCidAndProof(rootCid, i, blocks[i].cid, proof)).tryGet()
let wantList = WantList(
entries: @[
WantListEntry(
address: BlockAddress(treeCid: rootCid, index: 0),
priority: 0,
cancel: false,
wantType: WantType.WantHave,
sendDontHave: false,
rangeCount: blocks.len.uint64,
)
],
full: false,
)
let pending = engine.requestBlock(address)
await done.wait(100.millis)
pending.cancel()
expect CancelledError:
discard (await pending).tryGet()
asyncchecksuite "Task Handler":
var
peerId: PeerId
chunker: Chunker
blockDiscovery: Discovery
peerStore: PeerCtxStore
pendingBlocks: PendingBlocksManager
network: BlockExcNetwork
engine: BlockExcEngine
discovery: DiscoveryEngine
advertiser: Advertiser
localStore: BlockStore
peersCtx: seq[BlockExcPeerCtx]
peers: seq[PeerId]
blocks: seq[Block]
setup:
chunker = RandomChunker.new(Rng.instance(), size = 1024, chunkSize = 256'nb)
while true:
let chunk = await chunker.getBytes()
if chunk.len <= 0:
break
blocks.add(Block.new(chunk).tryGet())
peerId = PeerId.example
blockDiscovery = Discovery.new()
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
localStore = CacheStore.new()
network = BlockExcNetwork()
discovery =
DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks)
advertiser = Advertiser.new(localStore, blockDiscovery)
engine = BlockExcEngine.new(
localStore, network, discovery, advertiser, peerStore, pendingBlocks
)
peersCtx = @[]
for i in 0 .. 3:
peers.add(PeerId.example)
peersCtx.add(BlockExcPeerCtx(id: peers[i]))
peerStore.add(peersCtx[i])
# FIXME: this is disabled for now: I've dropped block priorities to make
# my life easier as I try to optimize the protocol, and also because
# they were not being used anywhere.
#
# test "Should send want-blocks in priority order":
# proc sendBlocksDelivery(
# id: PeerId, blocksDelivery: seq[BlockDelivery]
# ) {.async: (raises: [CancelledError]).} =
# check blocksDelivery.len == 2
# check:
# blocksDelivery[1].address == blocks[0].address
# blocksDelivery[0].address == blocks[1].address
# for blk in blocks:
# (await engine.localStore.putBlock(blk)).tryGet()
# engine.network.request.sendBlocksDelivery = sendBlocksDelivery
# # second block to send by priority
# peersCtx[0].peerWants.add(
# WantListEntry(
# address: blocks[0].address,
# priority: 49,
# cancel: false,
# wantType: WantType.WantBlock,
# sendDontHave: false,
# )
# )
# # first block to send by priority
# peersCtx[0].peerWants.add(
# WantListEntry(
# address: blocks[1].address,
# priority: 50,
# cancel: false,
# wantType: WantType.WantBlock,
# sendDontHave: false,
# )
# )
# await engine.taskHandler(peersCtx[0])
test "Should mark outgoing blocks as sent":
proc sendBlocksDelivery(
id: PeerId, blocksDelivery: seq[BlockDelivery]
proc sendPresence(
peerId: PeerId, presence: seq[BlockPresence]
) {.async: (raises: [CancelledError]).} =
let blockAddress = peersCtx[0].wantedBlocks.toSeq[0]
check peersCtx[0].isBlockSent(blockAddress)
check presence.len == 1
check presence[0].kind == BlockPresenceType.HaveRange
for (start, count) in presence[0].ranges:
check start < 2
done.complete()
for blk in blocks:
(await engine.localStore.putBlock(blk)).tryGet()
engine.network.request.sendBlocksDelivery = sendBlocksDelivery
engine.network =
BlockExcNetwork(request: BlockExcRequest(sendPresence: sendPresence))
peersCtx[0].wantedBlocks.incl(blocks[0].address)
await engine.wantListHandler(peerId, wantList)
await done
await engine.taskHandler(peersCtx[0])
suite "IsIndexInRanges":
test "Empty ranges returns false":
let ranges: seq[(uint64, uint64)] = @[]
check not isIndexInRanges(0, ranges)
check not isIndexInRanges(100, ranges)
test "Should not mark blocks for which local look fails as sent":
peersCtx[0].wantedBlocks.incl(blocks[0].address)
test "Single range - index inside":
let ranges = @[(10'u64, 5'u64)]
check isIndexInRanges(10, ranges, sortedRanges = true)
check isIndexInRanges(12, ranges, sortedRanges = true)
check isIndexInRanges(14, ranges, sortedRanges = true)
await engine.taskHandler(peersCtx[0])
test "Single range - index outside":
let ranges = @[(10'u64, 5'u64)]
check not isIndexInRanges(9, ranges, sortedRanges = true)
check not isIndexInRanges(15, ranges, sortedRanges = true)
check not isIndexInRanges(100, ranges, sortedRanges = true)
let blockAddress = peersCtx[0].wantedBlocks.toSeq[0]
check not peersCtx[0].isBlockSent(blockAddress)
test "Multiple sorted ranges - index in each":
let ranges = @[(0'u64, 3'u64), (10'u64, 5'u64), (100'u64, 10'u64)]
check isIndexInRanges(0, ranges, sortedRanges = true)
check isIndexInRanges(2, ranges, sortedRanges = true)
check isIndexInRanges(10, ranges, sortedRanges = true)
check isIndexInRanges(14, ranges, sortedRanges = true)
check isIndexInRanges(100, ranges, sortedRanges = true)
check isIndexInRanges(109, ranges, sortedRanges = true)
test "Multiple ranges - index in gaps":
let ranges = @[(0'u64, 3'u64), (10'u64, 5'u64), (100'u64, 10'u64)]
check not isIndexInRanges(3, ranges, sortedRanges = true)
check not isIndexInRanges(9, ranges, sortedRanges = true)
check not isIndexInRanges(15, ranges, sortedRanges = true)
check not isIndexInRanges(99, ranges, sortedRanges = true)
check not isIndexInRanges(110, ranges, sortedRanges = true)
test "Unsorted ranges with sortedRanges=false":
let ranges = @[(100'u64, 10'u64), (0'u64, 3'u64), (10'u64, 5'u64)]
check isIndexInRanges(0, ranges, sortedRanges = false)
check isIndexInRanges(2, ranges, sortedRanges = false)
check isIndexInRanges(10, ranges, sortedRanges = false)
check isIndexInRanges(105, ranges, sortedRanges = false)
check not isIndexInRanges(50, ranges, sortedRanges = false)
test "Adjacent ranges":
let ranges = @[(0'u64, 5'u64), (5'u64, 5'u64), (10'u64, 5'u64)]
for i in 0'u64 ..< 15:
check isIndexInRanges(i, ranges, sortedRanges = true)
check not isIndexInRanges(15, ranges, sortedRanges = true)
test "Large range values":
let ranges = @[(1_000_000_000'u64, 1_000_000'u64)]
check isIndexInRanges(1_000_000_000, ranges, sortedRanges = true)
check isIndexInRanges(1_000_500_000, ranges, sortedRanges = true)
check not isIndexInRanges(999_999_999, ranges, sortedRanges = true)
check not isIndexInRanges(1_001_000_000, ranges, sortedRanges = true)

View File

@ -0,0 +1,95 @@
import pkg/unittest2
import pkg/chronos
import pkg/libp2p/peerid
import pkg/storage/blockexchange/engine/peertracker
import ../../examples
suite "PeerInFlightTracker":
var tracker: PeerInFlightTracker
var peerA, peerB: PeerId
setup:
tracker = PeerInFlightTracker.new()
peerA = PeerId.example
peerB = PeerId.example
test "count returns 0 for unknown peer":
check tracker.count(peerA) == 0
test "count reflects tracked futures":
tracker.track(peerA, newFuture[void]())
tracker.track(peerA, newFuture[void]())
check tracker.count(peerA) == 2
test "count is per-peer":
tracker.track(peerA, newFuture[void]())
tracker.track(peerB, newFuture[void]())
tracker.track(peerB, newFuture[void]())
check tracker.count(peerA) == 1
check tracker.count(peerB) == 2
test "count filters finished futures (lazy cleanup)":
let done = newFuture[void]()
let live = newFuture[void]()
tracker.track(peerA, done)
tracker.track(peerA, live)
done.complete()
check tracker.count(peerA) == 1
test "count removes peer entry when all futures finish":
let fut = newFuture[void]()
tracker.track(peerA, fut)
fut.complete()
check tracker.count(peerA) == 0
check peerA notin tracker.peerInFlight
test "track accumulates across calls from different owners":
let downloadAFut = newFuture[void]()
let downloadBFut = newFuture[void]()
tracker.track(peerA, downloadAFut)
tracker.track(peerA, downloadBFut)
check tracker.count(peerA) == 2
test "clearPeer removes all entries for peer":
tracker.track(peerA, newFuture[void]())
tracker.track(peerA, newFuture[void]())
tracker.track(peerB, newFuture[void]())
tracker.clearPeer(peerA)
check tracker.count(peerA) == 0
check tracker.count(peerB) == 1
test "clearPeer on unknown peer is a no-op":
tracker.clearPeer(peerA)
check tracker.count(peerA) == 0
suite "PeerInFlightTracker - sweep":
var tracker: PeerInFlightTracker
var peerA, peerB: PeerId
setup:
tracker = PeerInFlightTracker.new()
peerA = PeerId.example
peerB = PeerId.example
test "sweep compacts all peers":
let
doneA = newFuture[void]()
liveA = newFuture[void]()
doneB = newFuture[void]()
tracker.track(peerA, doneA)
tracker.track(peerA, liveA)
tracker.track(peerB, doneB)
doneA.complete()
doneB.complete()
waitFor tracker.sweep()
check tracker.count(peerA) == 1
check peerB notin tracker.peerInFlight

View File

@ -0,0 +1,179 @@
import std/[sets, options]
import pkg/unittest2
import pkg/libp2p/cid
import pkg/storage/blockexchange/engine/downloadcontext {.all.}
import pkg/storage/blockexchange/engine/scheduler {.all.}
import ../../examples
import ../../helpers
suite "Random Window Cursor":
test "Produces all windows exactly once (full permutation)":
for totalWindows in [
1'u64, 2, 3, 5, 7, 10, 16, 17, 31, 32, 63, 64, 100, 127, 128, 255, 256, 1000
]:
let
windowSize = 100'u64
totalBlocks = totalWindows * windowSize
var
cursor = initRandomWindowCursor(totalBlocks, windowSize)
seen = initHashSet[uint64]()
seen.incl(cursor.currentWindow().start div windowSize)
for i in 1'u64 ..< totalWindows:
check cursor.advance()
let windowIdx = cursor.currentWindow().start div windowSize
check windowIdx < totalWindows
check windowIdx notin seen
seen.incl(windowIdx)
check seen.len.uint64 == totalWindows
check cursor.isDone
test "Different inits produce different permutations":
let
windowSize = 100'u64
totalBlocks = 10000'u64
totalWindows = totalBlocks div windowSize
var
cursor1 = initRandomWindowCursor(totalBlocks, windowSize)
cursor2 = initRandomWindowCursor(totalBlocks, windowSize)
same = 0
if cursor1.currentWindow().start == cursor2.currentWindow().start:
same += 1
for i in 1'u64 ..< totalWindows:
discard cursor1.advance()
discard cursor2.advance()
if cursor1.currentWindow().start == cursor2.currentWindow().start:
same += 1
check same < 50
test "Edge case: single window":
var cursor = initRandomWindowCursor(50, 100)
check cursor.currentWindow().start == 0
check cursor.currentWindow().count == 50
check cursor.isDone
test "Edge case: two windows":
var
cursor = initRandomWindowCursor(200, 100)
seen = initHashSet[uint64]()
seen.incl(cursor.currentWindow().start)
check cursor.advance()
seen.incl(cursor.currentWindow().start)
check seen.len == 2
check 0'u64 in seen
check 100'u64 in seen
check cursor.isDone
test "Last window is truncated when totalBlocks not divisible by windowSize":
var
cursor = initRandomWindowCursor(350, 100)
foundShort = false
if cursor.currentWindow().count < 100:
foundShort = true
while cursor.advance():
if cursor.currentWindow().count < 100:
foundShort = true
check cursor.currentWindow().count == 50
check foundShort
suite "DownloadContext Random Windows":
test "initRandomWindows sets up first window":
let
md = testManifestDesc(Cid.example, 65536, 100000)
ctx = DownloadContext.new(
DownloadDesc(md: md, count: 100000, selectionPolicy: spRandomWindow)
)
(start, count) = ctx.currentPresenceWindow()
check count > 0
check start < ctx.totalBlocks
check start + count <= ctx.totalBlocks
test "advanceWindow cycles through all windows":
let
blockSize = 65536'u32
windowSize = computeWindowSize(blockSize)
totalBlocks = windowSize * 5
md = testManifestDesc(Cid.example, blockSize, totalBlocks.int)
ctx = DownloadContext.new(
DownloadDesc(md: md, count: totalBlocks, selectionPolicy: spRandomWindow)
)
var windowStarts = initHashSet[uint64]()
windowStarts.incl(ctx.currentPresenceWindow().start)
for i in 1 ..< 5:
while true:
let batch = ctx.scheduler.take()
if batch.isNone:
break
ctx.scheduler.markComplete(batch.get().start)
check ctx.needsNextPresenceWindow()
discard ctx.advancePresenceWindow()
windowStarts.incl(ctx.currentPresenceWindow().start)
check windowStarts.len == 5
check not ctx.needsNextPresenceWindow()
test "scheduler.isEmpty returns true when all batches complete":
let
md = testManifestDesc(Cid.example, 65536, 100)
ctx = DownloadContext.new(
DownloadDesc(md: md, count: 100, selectionPolicy: spRandomWindow)
)
while true:
let batch = ctx.scheduler.take()
if batch.isNone:
break
ctx.scheduler.markComplete(batch.get().start)
check ctx.scheduler.isEmpty
test "File smaller than 1 window (single window)":
let
blockSize = 65536'u32
windowSize = computeWindowSize(blockSize)
totalBlocks = windowSize div 2
md = testManifestDesc(Cid.example, blockSize, totalBlocks.int)
ctx = DownloadContext.new(
DownloadDesc(md: md, count: totalBlocks, selectionPolicy: spRandomWindow)
)
(start, count) = ctx.currentPresenceWindow()
check start == 0
check count == totalBlocks
check not ctx.needsNextPresenceWindow()
test "File exactly N windows":
let
blockSize = 65536'u32
windowSize = computeWindowSize(blockSize)
totalBlocks = windowSize * 3
md = testManifestDesc(Cid.example, blockSize, totalBlocks.int)
ctx = DownloadContext.new(
DownloadDesc(md: md, count: totalBlocks, selectionPolicy: spRandomWindow)
)
var count = 1
while true:
while true:
let batch = ctx.scheduler.take()
if batch.isNone:
break
ctx.scheduler.markComplete(batch.get().start)
if not ctx.needsNextPresenceWindow():
break
discard ctx.advancePresenceWindow()
count += 1
check count == 3

View File

@ -0,0 +1,520 @@
import std/options
import pkg/unittest2
import pkg/storage/blockexchange/engine/scheduler {.all.}
suite "Scheduler":
const
WindowSize = 16384'u64
Threshold = 0.75
var scheduler: Scheduler
setup:
scheduler = Scheduler.new()
test "Should initialize with correct parameters":
scheduler.init(1000, 100, WindowSize, Threshold)
check scheduler.totalBlockCount() == 1000
check scheduler.batchSizeCount() == 100
check scheduler.hasWork() == true
check scheduler.isEmpty() == false
test "Should take batches in order":
scheduler.init(1000, 100, WindowSize, Threshold)
let batch1 = scheduler.take()
check batch1.isSome
check batch1.get.start == 0
check batch1.get.count == 100
let batch2 = scheduler.take()
check batch2.isSome
check batch2.get.start == 100
check batch2.get.count == 100
test "Should handle last batch with fewer blocks":
scheduler.init(250, 100, WindowSize, Threshold)
discard scheduler.take()
discard scheduler.take()
let lastBatch = scheduler.take()
check lastBatch.isSome
check lastBatch.get.start == 200
check lastBatch.get.count == 50
test "Should mark batch as complete":
scheduler.init(300, 100, WindowSize, Threshold)
let batch = scheduler.take()
check batch.isSome
check batch.get.start == 0
scheduler.markComplete(0)
let next = scheduler.take()
check next.isSome
check next.get.start == 100
test "Should requeue batch at front":
scheduler.init(500, 100, WindowSize, Threshold)
let batch1 = scheduler.take()
check batch1.get.start == 0
let batch2 = scheduler.take()
check batch2.get.start == 100
scheduler.requeueFront(0, 100)
let requeued = scheduler.take()
check requeued.isSome
check requeued.get.start == 0
check requeued.get.count == 100
test "Should requeue batch at back":
scheduler.init(500, 100, WindowSize, Threshold)
let
batch1 = scheduler.take()
batch2 = scheduler.take()
scheduler.requeueBack(0, 100)
scheduler.requeueFront(100, 100)
let first = scheduler.take()
check first.get.start == 100
let second = scheduler.take()
check second.get.start == 0
test "Should handle partialComplete with single missing range":
scheduler.init(1000, 100, WindowSize, Threshold)
let batch = scheduler.take()
check batch.isSome
check batch.get.start == 0
check batch.get.count == 100
let missingRanges = @[(start: 50'u64, count: 50'u64)]
scheduler.partialComplete(0, missingRanges)
let next = scheduler.take()
check next.isSome
check next.get.start == 50
check next.get.count == 50
test "Should handle partialComplete with multiple missing ranges":
scheduler.init(1000, 100, WindowSize, Threshold)
let batch = scheduler.take()
check batch.isSome
check batch.get.start == 0
let missingRanges =
@[(start: 25'u64, count: 25'u64), (start: 75'u64, count: 25'u64)]
scheduler.partialComplete(0, missingRanges)
let next1 = scheduler.take()
check next1.isSome
check next1.get.start == 25
check next1.get.count == 25
let next2 = scheduler.take()
check next2.isSome
check next2.get.start == 75
check next2.get.count == 25
test "Should handle partialComplete with non-contiguous missing ranges":
scheduler.init(1000, 256, WindowSize, Threshold)
let batch = scheduler.take()
check batch.isSome
check batch.get.start == 0
check batch.get.count == 256
let missingRanges =
@[(start: 101'u64, count: 49'u64), (start: 201'u64, count: 55'u64)]
scheduler.partialComplete(0, missingRanges)
let next1 = scheduler.take()
check next1.isSome
check next1.get.start == 101
check next1.get.count == 49
let next2 = scheduler.take()
check next2.isSome
check next2.get.start == 201
check next2.get.count == 55
test "Should not skip completed batches after partialComplete":
scheduler.init(500, 100, WindowSize, Threshold)
let batch1 = scheduler.take()
check batch1.get.start == 0
scheduler.markComplete(0)
let batch2 = scheduler.take()
check batch2.get.start == 100
let missingRanges = @[(start: 150'u64, count: 50'u64)]
scheduler.partialComplete(100, missingRanges)
let next = scheduler.take()
check next.isSome
check next.get.start == 150
check next.get.count == 50
test "Should become empty after all batches complete":
scheduler.init(200, 100, WindowSize, Threshold)
let batch1 = scheduler.take()
scheduler.markComplete(batch1.get.start)
let batch2 = scheduler.take()
scheduler.markComplete(batch2.get.start)
check scheduler.isEmpty() == true
check scheduler.hasWork() == false
test "Should handle out-of-order completion":
scheduler.init(500, 100, WindowSize, Threshold)
let
batch0 = scheduler.take()
batch1 = scheduler.take()
batch2 = scheduler.take()
check batch0.get.start == 0
check batch1.get.start == 100
check batch2.get.start == 200
scheduler.markComplete(200)
scheduler.markComplete(0)
scheduler.markComplete(100)
let next = scheduler.take()
check next.isSome
check next.get.start == 300
test "Should initialize with range":
scheduler.initRange(500, 200, 100, WindowSize, Threshold)
check scheduler.totalBlockCount() == 700
check scheduler.batchSizeCount() == 100
check scheduler.completedWatermark() == 500
let batch1 = scheduler.take()
check batch1.isSome
check batch1.get.start == 500
check batch1.get.count == 100
let batch2 = scheduler.take()
check batch2.isSome
check batch2.get.start == 600
check batch2.get.count == 100
test "Should add specific batches":
scheduler.add(100, 50)
scheduler.add(300, 75)
check scheduler.totalBlockCount() == 375
check scheduler.batchSizeCount() == 50
let batch1 = scheduler.take()
check batch1.isSome
check batch1.get.start == 100
check batch1.get.count == 50
let batch2 = scheduler.take()
check batch2.isSome
check batch2.get.start == 300
check batch2.get.count == 75
test "Should clear scheduler":
scheduler.init(500, 100, WindowSize, Threshold)
discard scheduler.take()
discard scheduler.take()
scheduler.requeueFront(0, 100)
scheduler.clear()
check scheduler.hasWork() == false
check scheduler.isEmpty() == true
check scheduler.requeuedCount() == 0
check scheduler.totalBlockCount() == 0
check scheduler.batchSizeCount() == 0
let batch = scheduler.take()
check batch.isNone
test "Should return pending batches":
scheduler.init(500, 100, WindowSize, Threshold)
check scheduler.pending().len == 0
discard scheduler.take()
scheduler.requeueFront(0, 100)
let pending = scheduler.pending()
check pending.len == 1
check pending[0].start == 0
check pending[0].count == 100
test "Should return correct requeuedCount":
scheduler.init(500, 100, WindowSize, Threshold)
check scheduler.requeuedCount() == 0
discard scheduler.take()
discard scheduler.take()
scheduler.requeueFront(0, 100)
scheduler.requeueBack(100, 100)
check scheduler.requeuedCount() == 2
test "Should return none when exhausted":
scheduler.init(200, 100, WindowSize, Threshold)
let
b1 = scheduler.take()
b2 = scheduler.take()
check b1.isSome
check b2.isSome
let b3 = scheduler.take()
check b3.isNone
test "Should handle single block":
scheduler.init(1, 100, WindowSize, Threshold)
let batch = scheduler.take()
check batch.isSome
check batch.get.start == 0
check batch.get.count == 1
scheduler.markComplete(0)
check scheduler.isEmpty() == true
test "Should handle batch size larger than total":
scheduler.init(50, 100, WindowSize, Threshold)
let batch = scheduler.take()
check batch.isSome
check batch.get.start == 0
check batch.get.count == 50
scheduler.markComplete(0)
check scheduler.isEmpty() == true
test "Should handle zero blocks":
scheduler.init(0, 100, WindowSize, Threshold)
check scheduler.hasWork() == false
check scheduler.isEmpty() == true
let batch = scheduler.take()
check batch.isNone
test "Should ignore requeue of completed batch":
scheduler.init(300, 100, WindowSize, Threshold)
let batch = scheduler.take()
scheduler.markComplete(batch.get.start)
scheduler.requeueFront(0, 100)
scheduler.requeueBack(0, 100)
check scheduler.requeuedCount() == 0
test "Should track in-flight batches":
scheduler.init(300, 100, WindowSize, Threshold)
let batch = scheduler.take()
check batch.isSome
let batch2 = scheduler.take()
check batch2.isSome
check batch2.get.start == 100
scheduler.markComplete(0)
scheduler.requeueFront(100, 100)
let batch3 = scheduler.take()
check batch3.isSome
check batch3.get.start == 100
test "Should skip completed batches in requeued":
scheduler.init(500, 100, WindowSize, Threshold)
discard scheduler.take()
scheduler.requeueBack(0, 100)
discard scheduler.take()
scheduler.markComplete(0)
scheduler.requeueBack(0, 100)
let next = scheduler.take()
check next.isSome
check next.get.start == 100
test "Watermark advances after all sub-ranges of partial batch complete":
scheduler.init(16, 8, WindowSize, Threshold)
let batch = scheduler.take()
check batch.get.start == 0
check batch.get.count == 8
let missingRanges = @[
(start: 1'u64, count: 1'u64),
(start: 3'u64, count: 1'u64),
(start: 5'u64, count: 1'u64),
(start: 7'u64, count: 1'u64),
]
scheduler.partialComplete(0, missingRanges)
check scheduler.completedWatermark() == 0
let sub1 = scheduler.take()
check sub1.get.start == 1
scheduler.markComplete(1)
check scheduler.completedWatermark() == 0
let sub2 = scheduler.take()
check sub2.get.start == 3
scheduler.markComplete(3)
check scheduler.completedWatermark() == 0
let sub3 = scheduler.take()
check sub3.get.start == 5
scheduler.markComplete(5)
check scheduler.completedWatermark() == 0
let sub4 = scheduler.take()
check sub4.get.start == 7
scheduler.markComplete(7)
check scheduler.completedWatermark() == 8
test "Watermark merges OOO after partial batch completes":
scheduler.init(24, 8, WindowSize, Threshold)
let
batch0 = scheduler.take()
batch1 = scheduler.take()
batch2 = scheduler.take()
check batch0.get.start == 0
check batch1.get.start == 8
check batch2.get.start == 16
scheduler.markComplete(8)
scheduler.markComplete(16)
check scheduler.completedWatermark() == 0
scheduler.partialComplete(0, @[(start: 3'u64, count: 1'u64)])
check scheduler.completedWatermark() == 0
let sub = scheduler.take()
check sub.get.start == 3
scheduler.markComplete(3)
check scheduler.completedWatermark() == 24
check scheduler.isEmpty() == true
test "Nested partials, requeues, OOO merge, multiple partial batches":
scheduler.init(40, 8, WindowSize, Threshold)
let
b0 = scheduler.take()
b1 = scheduler.take()
b2 = scheduler.take()
b3 = scheduler.take()
b4 = scheduler.take()
check b0.get.start == 0
check b4.get.start == 32
scheduler.markComplete(32)
check scheduler.completedWatermark() == 0
scheduler.markComplete(16)
check scheduler.completedWatermark() == 0
scheduler.partialComplete(0, @[(start: 2'u64, count: 2'u64)])
check scheduler.completedWatermark() == 0
scheduler.partialComplete(
8, @[(start: 10'u64, count: 3'u64), (start: 13'u64, count: 3'u64)]
)
check scheduler.completedWatermark() == 0
scheduler.markComplete(24)
check scheduler.completedWatermark() == 0
let sub1a = scheduler.take()
check sub1a.get.start == 10
check sub1a.get.count == 3
let sub1b = scheduler.take()
check sub1b.get.start == 13
check sub1b.get.count == 3
let sub0a = scheduler.take()
check sub0a.get.start == 2
check sub0a.get.count == 2
scheduler.requeueFront(2, 2)
check scheduler.completedWatermark() == 0
scheduler.markComplete(13)
check scheduler.completedWatermark() == 0
scheduler.partialComplete(10, @[(start: 11'u64, count: 2'u64)])
check scheduler.completedWatermark() == 0
let sub1c = scheduler.take()
check sub1c.get.start == 11
check sub1c.get.count == 2
let sub0b = scheduler.take()
check sub0b.get.start == 2
scheduler.markComplete(2)
check scheduler.completedWatermark() == 8
scheduler.markComplete(11)
check scheduler.completedWatermark() == 40
check scheduler.isEmpty() == true
check scheduler.hasWork() == false
test "BlockBatch batchEnd":
let batch: BlockBatch = (start: 100'u64, count: 50'u64)
check batch.batchEnd == 150
test "BlockBatch contains":
let batch: BlockBatch = (start: 100'u64, count: 50'u64)
check batch.contains(100) == true
check batch.contains(149) == true
check batch.contains(99) == false
check batch.contains(150) == false
test "BlockBatch merge":
let
batch1: BlockBatch = (start: 100'u64, count: 50'u64)
batch2: BlockBatch = (start: 140'u64, count: 30'u64)
batch3: BlockBatch = (start: 200'u64, count: 20'u64)
let merged1 = merge(batch1, batch2)
check merged1.isSome
check merged1.get.start == 100
check merged1.get.count == 70
let merged2 = merge(batch1, batch3)
check merged2.isNone

View File

@ -0,0 +1,430 @@
import std/[options, tables]
import pkg/unittest2
import pkg/chronos
import pkg/libp2p/cid
import pkg/libp2p/peerid
import pkg/storage/blockexchange/engine/swarm
import pkg/storage/blockexchange/engine/peertracker
import pkg/storage/blockexchange/peers/peercontext
import pkg/storage/blockexchange/peers/peerstats
import pkg/storage/blockexchange/utils
import pkg/storage/storagetypes
import ../../examples
const
TestBlockSize = DefaultBlockSize.uint32
TestBatchBytes = computeBatchSize(TestBlockSize).uint64 * TestBlockSize.uint64
suite "BlockAvailability":
test "unknown availability":
let avail = BlockAvailability.unknown()
check avail.kind == bakUnknown
check avail.hasBlock(0) == false
check avail.hasBlock(100) == false
check avail.hasRange(0, 10) == false
check avail.hasAnyInRange(0, 10) == false
test "complete availability":
let avail = BlockAvailability.complete()
check avail.kind == bakComplete
check avail.hasBlock(0) == true
check avail.hasBlock(100) == true
check avail.hasBlock(uint64.high) == true
check avail.hasRange(0, 1000) == true
check avail.hasAnyInRange(0, 1000) == true
test "ranges availability - hasBlock":
let avail = BlockAvailability.fromRanges(
@[(start: 10'u64, count: 20'u64), (start: 50'u64, count: 10'u64)]
)
check avail.kind == bakRanges
check avail.hasBlock(10) == true
check avail.hasBlock(29) == true
check avail.hasBlock(30) == false
check avail.hasBlock(50) == true
check avail.hasBlock(59) == true
check avail.hasBlock(60) == false
check avail.hasBlock(0) == false
check avail.hasBlock(9) == false
check avail.hasBlock(35) == false
test "ranges availability - hasRange":
let avail = BlockAvailability.fromRanges(
@[(start: 10'u64, count: 20'u64), (start: 50'u64, count: 10'u64)]
)
check avail.hasRange(10, 20) == true
check avail.hasRange(15, 10) == true
check avail.hasRange(10, 21) == false
check avail.hasRange(25, 10) == false
check avail.hasRange(50, 10) == true
check avail.hasRange(55, 5) == true
check avail.hasRange(25, 30) == false
test "ranges availability - hasAnyInRange":
let avail = BlockAvailability.fromRanges(
@[(start: 10'u64, count: 20'u64), (start: 50'u64, count: 10'u64)]
)
check avail.hasAnyInRange(5, 10) == true
check avail.hasAnyInRange(25, 10) == true
check avail.hasAnyInRange(45, 10) == true
check avail.hasAnyInRange(30, 20) == false
check avail.hasAnyInRange(0, 5) == false
check avail.hasAnyInRange(100, 10) == false
test "bitmap availability - hasBlock":
let avail = BlockAvailability.fromBitmap(@[0x55'u8], 8)
check avail.kind == bakBitmap
check avail.hasBlock(0) == true
check avail.hasBlock(1) == false
check avail.hasBlock(2) == true
check avail.hasBlock(3) == false
check avail.hasBlock(4) == true
check avail.hasBlock(5) == false
check avail.hasBlock(6) == true
check avail.hasBlock(7) == false
check avail.hasBlock(8) == false
check avail.hasBlock(100) == false
test "bitmap availability - hasRange":
let avail = BlockAvailability.fromBitmap(@[0xF0'u8], 8)
check avail.hasRange(4, 4) == true
check avail.hasRange(4, 2) == true
check avail.hasRange(0, 4) == false
check avail.hasRange(2, 4) == false
test "bitmap availability - hasAnyInRange":
let avail = BlockAvailability.fromBitmap(@[0xF0'u8], 8)
check avail.hasAnyInRange(0, 8) == true
check avail.hasAnyInRange(0, 4) == false
check avail.hasAnyInRange(3, 2) == true
check avail.hasAnyInRange(6, 4) == true
test "merge unknown with complete":
let
unknown = BlockAvailability.unknown()
complete = BlockAvailability.complete()
check unknown.merge(complete).kind == bakComplete
check complete.merge(unknown).kind == bakComplete
test "merge unknown with ranges":
let
unknown = BlockAvailability.unknown()
ranges = BlockAvailability.fromRanges(@[(start: 10'u64, count: 20'u64)])
merged = unknown.merge(ranges)
check merged.kind == bakRanges
check merged.hasBlock(15) == true
test "merge ranges with ranges":
let
r1 = BlockAvailability.fromRanges(@[(start: 0'u64, count: 10'u64)])
r2 = BlockAvailability.fromRanges(@[(start: 20'u64, count: 10'u64)])
merged = r1.merge(r2)
check merged.kind == bakRanges
check merged.hasBlock(5) == true
check merged.hasBlock(25) == true
check merged.hasBlock(15) == false
test "merge overlapping ranges":
let
r1 = BlockAvailability.fromRanges(@[(start: 0'u64, count: 15'u64)])
r2 = BlockAvailability.fromRanges(@[(start: 10'u64, count: 15'u64)])
merged = r1.merge(r2)
check merged.kind == bakRanges
check merged.ranges.len == 1
check merged.ranges[0].start == 0
check merged.ranges[0].count == 25
test "merge bitmap with ranges converts bitmap to ranges":
let
bitmap = BlockAvailability.fromBitmap(@[0x0F'u8], 8)
ranges = BlockAvailability.fromRanges(@[(start: 6'u64, count: 2'u64)])
merged = bitmap.merge(ranges)
check merged.kind == bakRanges
check merged.ranges.len == 2
check merged.ranges[0] == (start: 0'u64, count: 4'u64)
check merged.ranges[1] == (start: 6'u64, count: 2'u64)
suite "SwarmPeer":
test "touch updates lastSeen":
let
peer = SwarmPeer.new(BlockAvailability.unknown())
before = peer.lastSeen
peer.touch()
check peer.lastSeen >= before
test "updateAvailability merges":
let peer =
SwarmPeer.new(BlockAvailability.fromRanges(@[(start: 0'u64, count: 10'u64)]))
peer.updateAvailability(
BlockAvailability.fromRanges(@[(start: 20'u64, count: 10'u64)])
)
check peer.availability.hasBlock(5) == true
check peer.availability.hasBlock(25) == true
check peer.availability.hasBlock(15) == false
test "recordFailure and resetFailures":
let peer = SwarmPeer.new(BlockAvailability.unknown())
check peer.failureCount == 0
peer.recordFailure()
check peer.failureCount == 1
peer.recordFailure()
check peer.failureCount == 2
peer.resetFailures()
check peer.failureCount == 0
suite "Swarm":
var swarm: Swarm
setup:
swarm = Swarm.new()
test "addPeer and getPeer":
let peerId = PeerId.example
check swarm.addPeer(peerId, BlockAvailability.complete()) == true
let peerOpt = swarm.getPeer(peerId)
check peerOpt.isSome
check peerOpt.get().availability.kind == bakComplete
test "addPeer respects deltaMax":
let config =
SwarmConfig(deltaMin: 1, deltaMax: 2, deltaTarget: 2, maxPeerFailures: 3)
swarm = Swarm.new(config)
check swarm.addPeer(PeerId.example, BlockAvailability.complete()) == true
check swarm.addPeer(PeerId.example, BlockAvailability.complete()) == true
check swarm.addPeer(PeerId.example, BlockAvailability.complete()) == false
check swarm.peerCount() == 2
test "removePeer":
let peerId = PeerId.example
discard swarm.addPeer(peerId, BlockAvailability.complete())
let removed = swarm.removePeer(peerId)
check removed.isSome
check swarm.getPeer(peerId).isNone
test "banPeer prevents re-adding":
let peerId = PeerId.example
discard swarm.addPeer(peerId, BlockAvailability.complete())
swarm.banPeer(peerId)
check swarm.getPeer(peerId).isNone
check swarm.addPeer(peerId, BlockAvailability.complete()) == false
test "updatePeerAvailability":
let peerId = PeerId.example
discard swarm.addPeer(
peerId, BlockAvailability.fromRanges(@[(start: 0'u64, count: 10'u64)])
)
swarm.updatePeerAvailability(
peerId, BlockAvailability.fromRanges(@[(start: 20'u64, count: 10'u64)])
)
let peer = swarm.getPeer(peerId).get()
check peer.availability.hasBlock(5) == true
check peer.availability.hasBlock(25) == true
test "recordPeerFailure returns true when max reached":
let config =
SwarmConfig(deltaMin: 1, deltaMax: 10, deltaTarget: 5, maxPeerFailures: 2)
swarm = Swarm.new(config)
let peerId = PeerId.example
discard swarm.addPeer(peerId, BlockAvailability.complete())
check swarm.recordPeerFailure(peerId) == false
check swarm.recordPeerFailure(peerId) == true
test "peersWithRange":
let
peer1 = PeerId.example
peer2 = PeerId.example
discard swarm.addPeer(peer1, BlockAvailability.complete())
discard swarm.addPeer(
peer2, BlockAvailability.fromRanges(@[(start: 0'u64, count: 100'u64)])
)
let peersForRange = swarm.peersWithRange(0, 50)
check peersForRange.len == 2
let peersForLargeRange = swarm.peersWithRange(0, 150)
check peersForLargeRange.len == 1
test "peersWithAnyInRange":
let
peer1 = PeerId.example
peer2 = PeerId.example
discard swarm.addPeer(
peer1, BlockAvailability.fromRanges(@[(start: 0'u64, count: 50'u64)])
)
discard swarm.addPeer(
peer2, BlockAvailability.fromRanges(@[(start: 100'u64, count: 50'u64)])
)
let peers1 = swarm.peersWithAnyInRange(25, 50)
check peers1.len == 1
let peers2 = swarm.peersWithAnyInRange(75, 50)
check peers2.len == 1
let peers3 = swarm.peersWithAnyInRange(60, 30)
check peers3.len == 0
test "peersNeeded":
let config =
SwarmConfig(deltaMin: 2, deltaMax: 10, deltaTarget: 5, maxPeerFailures: 3)
swarm = Swarm.new(config)
check swarm.peersNeeded() == shBelowMin
discard swarm.addPeer(PeerId.example, BlockAvailability.complete())
check swarm.peersNeeded() == shBelowMin
discard swarm.addPeer(PeerId.example, BlockAvailability.complete())
check swarm.peersNeeded() == shBelowTarget
discard swarm.addPeer(PeerId.example, BlockAvailability.complete())
discard swarm.addPeer(PeerId.example, BlockAvailability.complete())
discard swarm.addPeer(PeerId.example, BlockAvailability.complete())
check swarm.peersNeeded() == shHealthy
suite "BDP Peer Selection":
var peerCtxs: seq[PeerContext]
setup:
peerCtxs = @[]
for i in 0 ..< 5:
let ctx = PeerContext.new(PeerId.example)
peerCtxs.add(ctx)
test "Should return none for empty peers":
var
emptyTracker = PeerInFlightTracker.new()
emptyPenalties = initTable[PeerId, float]()
let res = selectByBDP(@[], TestBatchBytes, emptyTracker, emptyPenalties)
check res.isNone
test "Should return single peer":
var
emptyTracker = PeerInFlightTracker.new()
emptyPenalties = initTable[PeerId, float]()
let res = selectByBDP(@[peerCtxs[0]], TestBatchBytes, emptyTracker, emptyPenalties)
check res.isSome
check res.get == peerCtxs[0]
test "Should prefer untried peers (round-robin)":
for peer in peerCtxs:
check peer.stats.throughputBps().isNone
var
emptyTracker = PeerInFlightTracker.new()
emptyPenalties = initTable[PeerId, float]()
let res = selectByBDP(peerCtxs, TestBatchBytes, emptyTracker, emptyPenalties)
check res.isSome
test "Should select peer with capacity":
peerCtxs[0].stats.recordRequest(1000, 65536)
peerCtxs[1].stats.recordRequest(1000, 65536)
var
tracker = PeerInFlightTracker.new()
emptyPenalties = initTable[PeerId, float]()
for i in 0 ..< 10:
tracker.track(peerCtxs[1].id, newFuture[void]())
let res = selectByBDP(peerCtxs, TestBatchBytes, tracker, emptyPenalties)
check res.isSome
test "Should deprioritize peer with timeout penalty":
peerCtxs[0].stats.recordRequest(1000, 65536)
peerCtxs[1].stats.recordRequest(1000, 65536)
waitFor sleepAsync(MinThroughputDuration)
peerCtxs[0].stats.recordRequest(1000, 65536)
peerCtxs[1].stats.recordRequest(1000, 65536)
check peerCtxs[0].stats.throughputBps().isSome
check peerCtxs[1].stats.throughputBps().isSome
var
emptyTracker = PeerInFlightTracker.new()
penalties = initTable[PeerId, float]()
penalties[peerCtxs[0].id] = 1.0 * TimeoutPenaltyWeight
let res = selectByBDP(
@[peerCtxs[0], peerCtxs[1]],
TestBatchBytes,
emptyTracker,
penalties,
explorationProb = 0.0,
)
check res.isSome
check res.get == peerCtxs[1]
test "Should still select penalized peer when only option":
peerCtxs[0].stats.recordRequest(1000, 65536)
waitFor sleepAsync(MinThroughputDuration)
peerCtxs[0].stats.recordRequest(1000, 65536)
var
emptyTracker = PeerInFlightTracker.new()
penalties = initTable[PeerId, float]()
penalties[peerCtxs[0].id] = 3.0 * TimeoutPenaltyWeight
let res = selectByBDP(@[peerCtxs[0]], TestBatchBytes, emptyTracker, penalties)
check res.isSome
check res.get == peerCtxs[0]
test "Should prefer peer with fewer timeouts":
peerCtxs[0].stats.recordRequest(1000, 65536)
peerCtxs[1].stats.recordRequest(1000, 65536)
waitFor sleepAsync(MinThroughputDuration)
peerCtxs[0].stats.recordRequest(1000, 65536)
peerCtxs[1].stats.recordRequest(1000, 65536)
var
emptyTracker = PeerInFlightTracker.new()
penalties = initTable[PeerId, float]()
penalties[peerCtxs[0].id] = 2.0 * TimeoutPenaltyWeight
penalties[peerCtxs[1].id] = 1.0 * TimeoutPenaltyWeight
let res = selectByBDP(
@[peerCtxs[0], peerCtxs[1]],
TestBatchBytes,
emptyTracker,
penalties,
explorationProb = 0.0,
)
check res.isSome
check res.get == peerCtxs[1]

View File

@ -1,31 +0,0 @@
import pkg/chronos
import pkg/storage/blockexchange/protobuf/presence
import ../../../asynctest
import ../../examples
import ../../helpers
suite "block presence protobuf messages":
let
cid = Cid.example
address = BlockAddress(leaf: false, cid: cid)
presence = Presence(address: address, have: true)
message = PresenceMessage.init(presence)
test "encodes have/donthave":
var presence = presence
presence.have = true
check PresenceMessage.init(presence).`type` == Have
presence.have = false
check PresenceMessage.init(presence).`type` == DontHave
test "decodes CID":
check Presence.init(message) .? address == address.some
test "decodes have/donthave":
var message = message
message.`type` = BlockPresenceType.Have
check Presence.init(message) .? have == true.some
message.`type` = BlockPresenceType.DontHave
check Presence.init(message) .? have == false.some

View File

@ -0,0 +1,275 @@
import pkg/unittest2
import pkg/storage/blockexchange/protocol/message
import ../../examples
import ../../helpers
suite "BlockAddress protobuf encoding":
test "Should encode and decode block address":
let
treeCid = Cid.example
address = BlockAddress(treeCid: treeCid, index: 42)
var buffer = initProtoBuffer()
buffer.write(1, address)
buffer.finish()
var decoded: ProtoBuffer
check buffer.getField(1, decoded).isOk
let res = BlockAddress.decode(decoded)
check res.isOk
check res.get.treeCid == treeCid
check res.get.index == 42
test "Should encode and decode block address with index 0":
let
blockCid = Cid.example
address = BlockAddress(treeCid: blockCid, index: 0)
var buffer = initProtoBuffer()
buffer.write(1, address)
buffer.finish()
var decoded: ProtoBuffer
check buffer.getField(1, decoded).isOk
let res = BlockAddress.decode(decoded)
check res.isOk
check res.get.treeCid == blockCid
check res.get.index == 0
suite "WantListEntry protobuf encoding":
test "Should encode and decode WantListEntry":
let
treeCid = Cid.example
entry = WantListEntry(
address: BlockAddress(treeCid: treeCid, index: 10),
priority: 5,
cancel: false,
wantType: WantType.WantHave,
sendDontHave: true,
rangeCount: 100,
)
var buffer = initProtoBuffer()
buffer.write(1, entry)
buffer.finish()
var decoded: ProtoBuffer
check buffer.getField(1, decoded).isOk
let res = WantListEntry.decode(decoded)
check res.isOk
check res.get.address.treeCid == treeCid
check res.get.address.index == 10
check res.get.priority == 5
check res.get.cancel == false
check res.get.wantType == WantType.WantHave
check res.get.sendDontHave == true
check res.get.rangeCount == 100
test "Should handle WantListEntry with cancel flag":
let
blockCid = Cid.example
entry = WantListEntry(
address: BlockAddress(treeCid: blockCid, index: 0),
priority: 1,
cancel: true,
wantType: WantType.WantHave,
sendDontHave: false,
rangeCount: 0,
)
var buffer = initProtoBuffer()
buffer.write(1, entry)
buffer.finish()
var decoded: ProtoBuffer
check buffer.getField(1, decoded).isOk
let res = WantListEntry.decode(decoded)
check res.isOk
check res.get.cancel == true
suite "WantList protobuf encoding":
test "Should encode and decode empty WantList":
let wantList = WantList(entries: @[], full: false)
var buffer = initProtoBuffer()
buffer.write(1, wantList)
buffer.finish()
var decoded: ProtoBuffer
check buffer.getField(1, decoded).isOk
let res = WantList.decode(decoded)
check res.isOk
check res.get.entries.len == 0
check res.get.full == false
test "Should encode and decode WantList with entries":
let
treeCid = Cid.example
wantList = WantList(
entries: @[
WantListEntry(
address: BlockAddress(treeCid: treeCid, index: 0),
priority: 1,
cancel: false,
wantType: WantType.WantHave,
sendDontHave: false,
rangeCount: 10,
),
WantListEntry(
address: BlockAddress(treeCid: treeCid, index: 1),
priority: 2,
cancel: false,
wantType: WantType.WantHave,
sendDontHave: true,
rangeCount: 0,
),
],
full: true,
)
var buffer = initProtoBuffer()
buffer.write(1, wantList)
buffer.finish()
var decoded: ProtoBuffer
check buffer.getField(1, decoded).isOk
let res = WantList.decode(decoded)
check res.isOk
check res.get.entries.len == 2
check res.get.entries[0].rangeCount == 10
check res.get.entries[1].sendDontHave == true
check res.get.full == true
suite "BlockPresence protobuf encoding":
test "Should encode and decode BlockPresence with DontHave":
let
treeCid = Cid.example
presence = BlockPresence(
address: BlockAddress(treeCid: treeCid, index: 0),
kind: BlockPresenceType.DontHave,
ranges: @[],
)
var buffer = initProtoBuffer()
buffer.write(1, presence)
buffer.finish()
var decoded: ProtoBuffer
check buffer.getField(1, decoded).isOk
let res = BlockPresence.decode(decoded)
check res.isOk
check res.get.kind == BlockPresenceType.DontHave
check res.get.ranges.len == 0
test "Should encode and decode BlockPresence with HaveRange":
let
treeCid = Cid.example
presence = BlockPresence(
address: BlockAddress(treeCid: treeCid, index: 0),
kind: BlockPresenceType.HaveRange,
ranges: @[(start: 0'u64, count: 100'u64), (start: 200'u64, count: 50'u64)],
)
var buffer = initProtoBuffer()
buffer.write(1, presence)
buffer.finish()
var decoded: ProtoBuffer
check buffer.getField(1, decoded).isOk
let res = BlockPresence.decode(decoded)
check res.isOk
check res.get.kind == BlockPresenceType.HaveRange
check res.get.ranges.len == 2
check res.get.ranges[0].start == 0
check res.get.ranges[0].count == 100
check res.get.ranges[1].start == 200
check res.get.ranges[1].count == 50
test "Should encode and decode BlockPresence with Complete":
let
treeCid = Cid.example
presence = BlockPresence(
address: BlockAddress(treeCid: treeCid, index: 0),
kind: BlockPresenceType.Complete,
ranges: @[],
)
var buffer = initProtoBuffer()
buffer.write(1, presence)
buffer.finish()
var decoded: ProtoBuffer
check buffer.getField(1, decoded).isOk
let res = BlockPresence.decode(decoded)
check res.isOk
check res.get.kind == BlockPresenceType.Complete
suite "Full Message protobuf encoding":
test "Should encode and decode empty Message":
let
msg = Message(wantList: WantList(entries: @[], full: false), blockPresences: @[])
encoded = msg.protobufEncode()
decoded = Message.protobufDecode(encoded)
check decoded.isOk
check decoded.get.wantList.entries.len == 0
check decoded.get.blockPresences.len == 0
test "Should encode and decode Message with WantList":
let
treeCid = Cid.example
msg = Message(
wantList: WantList(
entries: @[
WantListEntry(
address: BlockAddress(treeCid: treeCid, index: 0),
priority: 1,
cancel: false,
wantType: WantType.WantHave,
sendDontHave: false,
rangeCount: 100,
)
],
full: false,
),
blockPresences: @[],
)
encoded = msg.protobufEncode()
decoded = Message.protobufDecode(encoded)
check decoded.isOk
check decoded.get.wantList.entries.len == 1
check decoded.get.wantList.entries[0].rangeCount == 100
test "Should encode and decode Message with BlockPresences":
let
treeCid = Cid.example
msg = Message(
wantList: WantList(entries: @[], full: false),
blockPresences: @[
BlockPresence(
address: BlockAddress(treeCid: treeCid, index: 0),
kind: BlockPresenceType.HaveRange,
ranges: @[(start: 0'u64, count: 500'u64)],
)
],
)
encoded = msg.protobufEncode()
decoded = Message.protobufDecode(encoded)
check decoded.isOk
check decoded.get.blockPresences.len == 1
check decoded.get.blockPresences[0].kind == BlockPresenceType.HaveRange
check decoded.get.blockPresences[0].ranges.len == 1
check decoded.get.blockPresences[0].ranges[0].count == 500

View File

@ -0,0 +1,32 @@
import pkg/chronos
import pkg/storage/blockexchange/protocol/presence
import ../../../asynctest
import ../../examples
import ../../helpers
suite "Block presence protobuf messages":
let
cid = Cid.example
address = BlockAddress(treeCid: cid, index: 0)
presence =
Presence(address: address, have: true, presenceType: BlockPresenceType.HaveRange)
message = PresenceMessage.init(presence)
test "encodes have/donthave":
var presence = presence
presence.presenceType = BlockPresenceType.HaveRange
check PresenceMessage.init(presence).kind == BlockPresenceType.HaveRange
presence.presenceType = BlockPresenceType.DontHave
check PresenceMessage.init(presence).kind == BlockPresenceType.DontHave
test "decodes CID":
check Presence.init(message) .? address == address.some
test "decodes have/donthave":
var message = message
message.kind = BlockPresenceType.HaveRange
check Presence.init(message) .? have == true.some
message.kind = BlockPresenceType.DontHave
check Presence.init(message) .? have == false.some

View File

@ -0,0 +1,878 @@
import std/options
import pkg/chronos
import pkg/stew/byteutils
import pkg/libp2p/peerid
import pkg/libp2p/cid
import pkg/storage/blocktype as bt
import pkg/storage/blockexchange
import pkg/storage/blockexchange/engine/downloadcontext {.all.}
import pkg/storage/blockexchange/engine/activedownload {.all.}
import pkg/storage/blockexchange/engine/downloadmanager {.all.}
import pkg/storage/blockexchange/engine/scheduler {.all.}
import pkg/storage/blockexchange/engine/swarm
import pkg/storage/storagetypes
import ../helpers
import ../examples
import ../../asynctest
const
WindowSize = 16384'u64
Threshold = 0.75
suite "DownloadManager - Want Handles":
test "Should add want handle":
let
downloadManager = DownloadManager.new()
blk = bt.Block.new("Hello".toBytes).tryGet
address = BlockAddress.init(blk.cid, 0)
md = testManifestDesc(Cid.example, DefaultBlockSize.uint32, 1)
desc = DownloadDesc(md: md, startIndex: address.index.uint64, count: 1)
download = downloadManager.startDownload(desc)
discard download.getWantHandle(address)
check address in download
test "Should resolve want handle":
let
downloadManager = DownloadManager.new()
blk = bt.Block.new("Hello".toBytes).tryGet
address = BlockAddress.init(blk.cid, 0)
md = testManifestDesc(Cid.example, DefaultBlockSize.uint32, 1)
desc = DownloadDesc(md: md, startIndex: address.index.uint64, count: 1)
download = downloadManager.startDownload(desc)
handle = download.getWantHandle(address)
check address in download
discard download.completeWantHandle(address, some(blk))
let resolved = (await handle).tryGet
check resolved == blk
test "Should cancel want handle":
let
downloadManager = DownloadManager.new()
blk = bt.Block.new("Hello".toBytes).tryGet
address = BlockAddress.init(blk.cid, 0)
md = testManifestDesc(Cid.example, DefaultBlockSize.uint32, 1)
desc = DownloadDesc(md: md, startIndex: address.index.uint64, count: 1)
download = downloadManager.startDownload(desc)
handle = download.getWantHandle(address)
check address in download
await handle.cancelAndWait()
check address notin download
test "Should handle retry counters":
let
dm = DownloadManager.new(3)
blk = bt.Block.new("Hello".toBytes).tryGet
address = BlockAddress.init(blk.cid, 0)
md = testManifestDesc(Cid.example, DefaultBlockSize.uint32, 1)
desc = DownloadDesc(md: md, startIndex: address.index.uint64, count: 1)
download = dm.startDownload(desc)
discard download.getWantHandle(address)
check download.retries(address) == 3
download.decRetries(address)
check download.retries(address) == 2
download.decRetries(address)
check download.retries(address) == 1
download.decRetries(address)
check download.retries(address) == 0
check download.retriesExhausted(address)
asyncchecksuite "DownloadManager - Download Lifecycle":
test "Should start new download":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 100)
desc = DownloadDesc(md: md, count: 100)
discard dm.startDownload(desc)
test "Should allow multiple downloads for same CID":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 100)
desc = DownloadDesc(md: md, count: 100)
download1 = dm.startDownload(desc)
download2 = dm.startDownload(desc)
check download1.id != download2.id
check download1.treeCid == download2.treeCid
test "Multiple downloads for same CID have independent block state":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 100)
desc = DownloadDesc(md: md, count: 100)
download1 = dm.startDownload(desc)
download2 = dm.startDownload(desc)
address = BlockAddress(treeCid: md.manifest.treeCid, index: 0)
handle1 = download1.getWantHandle(address)
check address in download1
check address notin download2
let blk = bt.Block.new("test data".toBytes).tryGet()
discard download1.completeWantHandle(address, some(blk))
let res = await handle1
check res.isOk
check address notin download2
test "Cancel one download for same CID while other continues":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 100)
desc = DownloadDesc(md: md, count: 100)
download1 = dm.startDownload(desc)
download2 = dm.startDownload(desc)
address = BlockAddress(treeCid: md.manifest.treeCid, index: 0)
discard download1.getWantHandle(address)
let handle2 = download2.getWantHandle(address)
dm.cancelDownload(download1)
check download1.cancelled == true
check download2.cancelled == false
let blk = bt.Block.new("test data".toBytes).tryGet()
discard download2.completeWantHandle(address, some(blk))
let res = await handle2
check res.isOk
check dm.getDownload(download2.id, md.manifest.treeCid).isSome
check dm.getDownload(download1.id, md.manifest.treeCid).isNone
test "Should start range download":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 150)
desc = DownloadDesc(md: md, startIndex: 50, count: 100)
download = dm.startDownload(desc)
check download.ctx.totalBlocks == 150 # 50 + 100
test "Should start download with missing blocks":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 1000)
desc = DownloadDesc(md: md, count: 1000)
missingBlocks = @[10'u64, 11, 12, 50, 51, 100]
download = dm.startDownload(desc, missingBlocks)
check download.ctx.scheduler.hasWork() == true
test "Should release download":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 100)
desc = DownloadDesc(md: md, count: 100)
treeCid = md.manifest.treeCid
discard dm.startDownload(desc)
dm.cancelDownload(treeCid)
check dm.getDownload(treeCid).isNone
test "Should cancel download":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 100)
desc = DownloadDesc(md: md, count: 100)
treeCid = md.manifest.treeCid
discard dm.startDownload(desc)
dm.cancelDownload(treeCid)
check dm.getDownload(treeCid).isNone
test "Should return none for non-existent download":
let
dm = DownloadManager.new()
treeCid = Cid.example
check dm.getDownload(treeCid).isNone
test "Should set cancelled flag when download is cancelled":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 100)
desc = DownloadDesc(md: md, count: 100)
treeCid = md.manifest.treeCid
let downloadBefore = dm.startDownload(desc)
check downloadBefore.cancelled == false
dm.cancelDownload(treeCid)
check dm.getDownload(treeCid).isNone
check downloadBefore.cancelled == true
test "Should allow new download for same CID after cancellation":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 100)
desc = DownloadDesc(md: md, count: 100)
oldDownload = dm.startDownload(desc)
treeCid = md.manifest.treeCid
dm.cancelDownload(treeCid)
check oldDownload.cancelled == true
let newDownload = dm.startDownload(desc)
check newDownload.cancelled == false
check newDownload != oldDownload
check oldDownload.cancelled == true
test "Should set cancelled flag when released":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 100)
desc = DownloadDesc(md: md, count: 100)
downloadRef = dm.startDownload(desc)
treeCid = md.manifest.treeCid
check downloadRef.cancelled == false
dm.cancelDownload(treeCid)
check dm.getDownload(treeCid).isNone
check downloadRef.cancelled == true
suite "DownloadManager - Batch Management":
test "Should get next batch":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 1000)
desc = DownloadDesc(md: md, count: 1000)
download = dm.startDownload(desc)
batch = dm.getNextBatch(download)
check batch.isSome
check batch.get.start == 0
test "Should mark batch in flight":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 1000)
desc = DownloadDesc(md: md, count: 1000)
peerId = PeerId.example
download = dm.startDownload(desc)
batch = dm.getNextBatch(download)
check batch.isSome
download.markBatchInFlight(batch.get.start, batch.get.count, 0, peerId)
check download.pendingBatches.len == 1
check batch.get.start in download.pendingBatches
test "Should complete batch":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 100)
desc = DownloadDesc(md: md, count: 100)
peerId = PeerId.example
download = dm.startDownload(desc)
batch = dm.getNextBatch(download)
check batch.isSome
download.markBatchInFlight(batch.get.start, batch.get.count, 0, peerId)
download.completeBatch(batch.get.start, 0, 0)
check download.pendingBatches.len == 0
test "Should requeue batch at back":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 1000)
desc = DownloadDesc(md: md, count: 1000)
peerId = PeerId.example
download = dm.startDownload(desc)
batch1 = dm.getNextBatch(download)
download.markBatchInFlight(batch1.get.start, batch1.get.count, 0, peerId)
let batch2 = dm.getNextBatch(download)
download.markBatchInFlight(batch2.get.start, batch2.get.count, 0, peerId)
download.requeueBatch(batch1.get.start, batch1.get.count, front = false)
check download.pendingBatches.len == 1
check download.ctx.scheduler.requeuedCount() == 1
test "Should requeue batch at front":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 1000)
desc = DownloadDesc(md: md, count: 1000)
peerId = PeerId.example
download = dm.startDownload(desc)
batch1 = dm.getNextBatch(download)
download.markBatchInFlight(batch1.get.start, batch1.get.count, 0, peerId)
download.requeueBatch(batch1.get.start, batch1.get.count, front = true)
let nextBatch = dm.getNextBatch(download)
check nextBatch.isSome
check nextBatch.get.start == batch1.get.start
test "Should handle partial batch completion":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 1000)
desc = DownloadDesc(md: md, count: 1000)
peerId = PeerId.example
download = dm.startDownload(desc)
batch = dm.getNextBatch(download)
check batch.isSome
download.markBatchInFlight(batch.get.start, batch.get.count, 0, peerId)
let missingRanges = @[(start: 50'u64, count: 50'u64)]
download.partialCompleteBatch(batch.get.start, batch.get.count, 0, missingRanges, 0)
check download.ctx.scheduler.requeuedCount() >= 1
test "Should count local blocks on partial completion":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 1000)
desc = DownloadDesc(md: md, count: 1000)
peerId = PeerId.example
download = dm.startDownload(desc)
batch = dm.getNextBatch(download)
check batch.isSome
# 3 blocks local, peer delivered 2, rest is missing
download.markBatchInFlight(batch.get.start, batch.get.count, 3, peerId)
let missingRanges = @[(start: batch.get.start + 5, count: batch.get.count - 5)]
download.partialCompleteBatch(
batch.get.start, batch.get.count, 2, missingRanges, 2'u64 * 65536
)
# received should include both local blocks (3) and peer-delivered (2)
check download.ctx.received == 5
check download.ctx.bytesReceived == 2'u64 * 65536
suite "DownloadManager - Download Status":
test "Should check if download is complete":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 10)
desc = DownloadDesc(md: md, count: 10)
download = dm.startDownload(desc)
check download.isDownloadComplete() == false
download.ctx.received = 10
check download.isDownloadComplete() == true
test "Should check if work remains":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 1000)
desc = DownloadDesc(md: md, count: 1000)
download = dm.startDownload(desc)
check download.hasWorkRemaining() == true
test "Should return pending batch count":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 1000)
desc = DownloadDesc(md: md, count: 1000)
peerId = PeerId.example
download = dm.startDownload(desc)
check download.pendingBatchCount() == 0
let batch = dm.getNextBatch(download)
download.markBatchInFlight(batch.get.start, batch.get.count, 0, peerId)
check download.pendingBatchCount() == 1
suite "DownloadManager - Peer Management":
test "Should handle peer failure":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 1000)
desc = DownloadDesc(md: md, count: 1000)
peerId = PeerId.example
download = dm.startDownload(desc)
batch1 = dm.getNextBatch(download)
download.markBatchInFlight(batch1.get.start, batch1.get.count, 0, peerId)
let batch2 = dm.getNextBatch(download)
download.markBatchInFlight(batch2.get.start, batch2.get.count, 0, peerId)
check download.pendingBatchCount() == 2
download.handlePeerFailure(peerId)
check download.pendingBatchCount() == 0
check download.ctx.scheduler.requeuedCount() == 2
test "Should get swarm":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 100)
desc = DownloadDesc(md: md, count: 100)
download = dm.startDownload(desc)
swarm = download.getSwarm()
check swarm != nil
test "Should update peer availability - add new peer":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 100)
desc = DownloadDesc(md: md, count: 100)
peerId = PeerId.example
availability = BlockAvailability.complete()
download = dm.startDownload(desc)
download.updatePeerAvailability(peerId, availability)
let
swarm = download.getSwarm()
peer = swarm.getPeer(peerId)
check peer.isSome
check peer.get.availability.kind == bakComplete
test "Should update peer availability - update existing peer":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 100)
desc = DownloadDesc(md: md, count: 100)
peerId = PeerId.example
download = dm.startDownload(desc)
download.updatePeerAvailability(peerId, BlockAvailability.unknown())
let peerBefore = download.getSwarm().getPeer(peerId)
check peerBefore.get.availability.kind == bakUnknown
download.updatePeerAvailability(peerId, BlockAvailability.complete())
let peerAfter = download.getSwarm().getPeer(peerId)
check peerAfter.get.availability.kind == bakComplete
suite "DownloadManager - Retry Management":
test "Should decrement block retries":
let
dm = DownloadManager.new(retries = 5)
blk = bt.Block.new("Hello".toBytes).tryGet
address = BlockAddress.init(blk.cid, 0)
md = testManifestDesc(Cid.example, DefaultBlockSize.uint32, 1)
desc = DownloadDesc(md: md, startIndex: address.index.uint64, count: 1)
download = dm.startDownload(desc)
discard download.getWantHandle(address)
check download.retries(address) == 5
let exhausted = download.decrementBlockRetries(@[address])
check exhausted.len == 0
check download.retries(address) == 4
test "Should return exhausted blocks":
let
dm = DownloadManager.new(retries = 2)
blk = bt.Block.new("Hello".toBytes).tryGet
address = BlockAddress.init(blk.cid, 0)
md = testManifestDesc(Cid.example, DefaultBlockSize.uint32, 1)
desc = DownloadDesc(md: md, startIndex: address.index.uint64, count: 1)
download = dm.startDownload(desc)
discard download.getWantHandle(address)
discard download.decrementBlockRetries(@[address])
check download.retries(address) == 1
let exhausted = download.decrementBlockRetries(@[address])
check exhausted.len == 1
check address in exhausted
test "Should fail exhausted blocks":
let
dm = DownloadManager.new(retries = 1)
md = testManifestDesc(Cid.example, 65536, 100)
desc = DownloadDesc(md: md, count: 100)
address = BlockAddress(treeCid: md.manifest.treeCid, index: 0)
download = dm.startDownload(desc)
discard download.getWantHandle(address)
discard download.decrementBlockRetries(@[address])
download.failExhaustedBlocks(@[address])
check download.isBlockExhausted(address) == true
check address notin download
test "Should get block addresses for range":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 100)
desc = DownloadDesc(md: md, count: 100)
download = dm.startDownload(desc)
treeCid = md.manifest.treeCid
for i in 0'u64 ..< 5:
let address = BlockAddress(treeCid: treeCid, index: i.int)
discard download.getWantHandle(address)
let addresses = download.getBlockAddressesForRange(0, 10)
check addresses.len == 5
suite "DownloadContext - Basics":
test "Should create download context":
let
md = testManifestDesc(Cid.example, 65536, 1000)
ctx = DownloadContext.new(DownloadDesc(md: md, count: 1000))
check ctx.blockSize == 65536
check ctx.totalBlocks == 1000
check ctx.received == 0
check ctx.bytesReceived == 0
test "Should report not complete initially":
let
md = testManifestDesc(Cid.example, 65536, 100)
ctx = DownloadContext.new(DownloadDesc(md: md, count: 100))
check ctx.isComplete() == false
test "Should report complete when all received":
let
md = testManifestDesc(Cid.example, 65536, 100)
ctx = DownloadContext.new(DownloadDesc(md: md, count: 100))
ctx.received = 100
check ctx.isComplete() == true
test "Should return progress":
let
md = testManifestDesc(Cid.example, 65536, 100)
ctx = DownloadContext.new(DownloadDesc(md: md, count: 100))
ctx.received = 50
ctx.bytesReceived = 50'u64 * 65536
let progress = ctx.progress()
check progress.blocksCompleted == 50
check progress.totalBlocks == 100
check progress.bytesTransferred == 50'u64 * 65536
test "Should return remaining blocks":
let
md = testManifestDesc(Cid.example, 65536, 100)
ctx = DownloadContext.new(DownloadDesc(md: md, count: 100))
check ctx.remainingBlocks() == 100
ctx.received = 60
check ctx.remainingBlocks() == 40
ctx.received = 100
check ctx.remainingBlocks() == 0
test "Should init scheduler with missing blocks":
let
md = testManifestDesc(Cid.example, 65536, 1000)
ctx = DownloadContext.new(DownloadDesc(md: md, count: 1000))
missingBlocks = @[10'u64, 11, 12, 50, 51, 100]
ctx.scheduler.initFromIndices(missingBlocks, 256, WindowSize, Threshold)
check ctx.scheduler.hasWork() == true
test "Should mark batch received":
let
md = testManifestDesc(Cid.example, 65536, 100)
ctx = DownloadContext.new(DownloadDesc(md: md, count: 100))
ctx.markBatchReceived(0, 10, 10'u64 * 65536)
check ctx.received == 10
check ctx.bytesReceived == 10'u64 * 65536
suite "DownloadContext - Windowed Presence":
test "Should compute presence window size":
check computeWindowSize(65536) == 1024'u64 * 1024 * 1024 div 65536
check computeWindowSize(1024) == 1024'u64 * 1024 * 1024 div 1024
check computeWindowSize(2'u32 * 1024 * 1024 * 1024) >= 1'u64
test "Should initialize presence window":
let
md = testManifestDesc(Cid.example, 65536, 100000)
ctx = DownloadContext.new(DownloadDesc(md: md, count: 100000))
window = ctx.currentPresenceWindow()
check window.start == 0
check window.count > 0
test "Should advance presence window":
let
md = testManifestDesc(Cid.example, 65536, 100000)
ctx = DownloadContext.new(DownloadDesc(md: md, count: 100000))
oldWindow = ctx.currentPresenceWindow()
oldEnd = oldWindow.start + oldWindow.count
advancedWindow = ctx.advancePresenceWindow()
check advancedWindow.start == oldEnd
check advancedWindow.start + advancedWindow.count > oldEnd
test "Should check if needs next presence window":
let
md = testManifestDesc(Cid.example, 65536, 100000)
ctx = DownloadContext.new(DownloadDesc(md: md, count: 100000))
ctx.scheduler.init(ctx.totalBlocks, 256, WindowSize, Threshold)
check ctx.needsNextPresenceWindow() == false
let
window = ctx.currentPresenceWindow()
windowEnd = window.start + window.count
threshold = (windowEnd.float * 0.75).uint64
var pos: uint64 = 0
while pos < windowEnd:
discard ctx.scheduler.take()
pos += 256
pos = 0
while pos <= threshold:
ctx.scheduler.markComplete(pos)
pos += 256
if windowEnd < ctx.totalBlocks:
check ctx.needsNextPresenceWindow() == true
test "Should not need next window when at last window":
let
md = testManifestDesc(Cid.example, 65536, 100)
ctx = DownloadContext.new(DownloadDesc(md: md, count: 100))
# Small total, fits in one window
ctx.scheduler.init(ctx.totalBlocks, 256, WindowSize, Threshold)
discard ctx.scheduler.take()
ctx.scheduler.markComplete(0)
check ctx.needsNextPresenceWindow() == false
test "Should trim ranges entirely below watermark":
let
md = testManifestDesc(Cid.example, 65536, 100000)
ctx = DownloadContext.new(DownloadDesc(md: md, count: 100000))
peerId = PeerId.example
ranges = @[(start: 0'u64, count: 400'u64), (start: 2000'u64, count: 500'u64)]
discard ctx.swarm.addPeer(peerId, BlockAvailability.fromRanges(ranges))
ctx.scheduler.init(ctx.totalBlocks, 256, WindowSize, Threshold)
discard ctx.scheduler.take()
ctx.scheduler.markComplete(0)
discard ctx.scheduler.take()
ctx.scheduler.markComplete(256)
ctx.trimPresenceBeforeWatermark()
let peer = ctx.swarm.getPeer(peerId)
check peer.isSome
check peer.get.availability.kind == bakRanges
check peer.get.availability.ranges.len == 1
check peer.get.availability.ranges[0].start == 2000
check peer.get.availability.ranges[0].count == 500
test "Should keep ranges spanning the watermark intact":
let
md = testManifestDesc(Cid.example, 65536, 100000)
ctx = DownloadContext.new(DownloadDesc(md: md, count: 100000))
peerId = PeerId.example
ranges = @[(start: 0'u64, count: 1000'u64)]
discard ctx.swarm.addPeer(peerId, BlockAvailability.fromRanges(ranges))
ctx.scheduler.init(ctx.totalBlocks, 256, WindowSize, Threshold)
discard ctx.scheduler.take()
ctx.scheduler.markComplete(0)
discard ctx.scheduler.take()
ctx.scheduler.markComplete(256)
ctx.trimPresenceBeforeWatermark()
let peer = ctx.swarm.getPeer(peerId)
check peer.isSome
check peer.get.availability.kind == bakRanges
check peer.get.availability.ranges.len == 1
check peer.get.availability.ranges[0].start == 0
check peer.get.availability.ranges[0].count == 1000
test "Should not trim bakComplete peers":
let
md = testManifestDesc(Cid.example, 65536, 100000)
ctx = DownloadContext.new(DownloadDesc(md: md, count: 100000))
peerId = PeerId.example
discard ctx.swarm.addPeer(peerId, BlockAvailability.complete())
ctx.scheduler.init(ctx.totalBlocks, 256, WindowSize, Threshold)
discard ctx.scheduler.take()
ctx.scheduler.markComplete(0)
discard ctx.scheduler.take()
ctx.scheduler.markComplete(256)
ctx.trimPresenceBeforeWatermark()
let peer = ctx.swarm.getPeer(peerId)
check peer.isSome
check peer.get.availability.kind == bakComplete
suite "DownloadManager - Completion Future":
test "Should complete batch locally":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 10)
desc = DownloadDesc(md: md, count: 10)
download = dm.startDownload(desc)
batch = dm.getNextBatch(download)
check batch.isSome
download.completeBatchLocal(batch.get.start, batch.get.count)
check download.ctx.scheduler.isEmpty()
check download.ctx.received == 10
check download.ctx.bytesReceived == 0
check download.pendingBatches.len == 0
check download.ctx.isComplete()
test "Should resolve completion future on success":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 10)
desc = DownloadDesc(md: md, count: 10)
download = dm.startDownload(desc)
check not download.completionFuture.finished
let batch = dm.getNextBatch(download)
check batch.isSome
download.completeBatchLocal(batch.get.start, batch.get.count)
check download.completionFuture.finished
check not download.completionFuture.failed
let res = await download.waitForComplete()
check res.isOk
test "Should resolve completion future with error on exhausted blocks":
let
dm = DownloadManager.new(retries = 1)
md = testManifestDesc(Cid.example, 65536, 10)
desc = DownloadDesc(md: md, count: 10)
download = dm.startDownload(desc)
treeCid = md.manifest.treeCid
var addresses: seq[BlockAddress] = @[]
for i in 0'u64 ..< 10:
let address = BlockAddress(treeCid: treeCid, index: i.int)
discard download.getWantHandle(address)
addresses.add(address)
discard download.decrementBlockRetries(addresses)
download.failExhaustedBlocks(addresses)
check download.completionFuture.finished
check not download.completionFuture.failed
let res = await download.waitForComplete()
check res.isErr
check res.error of RetriesExhaustedError
test "Should fail completion future on cancel":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 100)
desc = DownloadDesc(md: md, count: 100)
download = dm.startDownload(desc)
treeCid = md.manifest.treeCid
check not download.completionFuture.finished
dm.cancelDownload(treeCid)
check download.completionFuture.finished
check download.completionFuture.failed
test "Should not double-complete completion future":
let
dm = DownloadManager.new()
md = testManifestDesc(Cid.example, 65536, 10)
desc = DownloadDesc(md: md, count: 10)
download = dm.startDownload(desc)
batch = dm.getNextBatch(download)
check batch.isSome
download.completeBatchLocal(batch.get.start, batch.get.count)
check download.completionFuture.finished
check not download.completionFuture.failed
let result1 = await download.waitForComplete()
check result1.isOk
let error = (ref RetriesExhaustedError)(msg: "test error")
download.signalCompletionIfDone(error)
check not download.completionFuture.failed
let result2 = await download.waitForComplete()
check result2.isOk
test "Should propagate error through waitForComplete async":
let
dm = DownloadManager.new(retries = 1)
md = testManifestDesc(Cid.example, 65536, 10)
desc = DownloadDesc(md: md, count: 10)
download = dm.startDownload(desc)
waiter = download.waitForComplete()
treeCid = md.manifest.treeCid
check not waiter.finished
var addresses: seq[BlockAddress] = @[]
for i in 0'u64 ..< 10:
let address = BlockAddress(treeCid: treeCid, index: i.int)
discard download.getWantHandle(address)
addresses.add(address)
discard download.decrementBlockRetries(addresses)
download.failExhaustedBlocks(addresses)
let res = await waiter
check res.isErr
check res.error of RetriesExhaustedError

View File

@ -1,5 +1,8 @@
import ./engine/testengine
import ./engine/testblockexc
import ./engine/testadvertiser
import ./engine/testscheduler
import ./engine/testswarm
import ./engine/testpeertracker
{.warning[UnusedImport]: off.}

View File

@ -1,5 +1,4 @@
import std/sequtils
import std/tables
import std/[sequtils, tables]
import pkg/chronos
@ -7,6 +6,7 @@ import pkg/storage/rng
import pkg/storage/chunker
import pkg/storage/blocktype as bt
import pkg/storage/blockexchange
import pkg/storage/blockexchange/protocol/wantblocks
import ../../asynctest
import ../examples
@ -45,13 +45,13 @@ asyncchecksuite "Network - Handlers":
discard await networkPeer.connect()
test "Want List handler":
let treeCid = Cid.example
proc wantListHandler(peer: PeerId, wantList: WantList) {.async: (raises: []).} =
# check that we got the correct amount of entries
check wantList.entries.len == 4
for b in blocks:
check b.address in wantList.entries
let entry = wantList.entries[wantList.entries.find(b.address)]
for entry in wantList.entries:
check entry.address.treeCid == treeCid
check entry.wantType == WantType.WantHave
check entry.priority == 1
check entry.cancel == true
@ -62,35 +62,24 @@ asyncchecksuite "Network - Handlers":
network.handlers.onWantList = wantListHandler
let wantList =
makeWantList(blocks.mapIt(it.cid), 1, true, WantType.WantHave, true, true)
makeWantList(treeCid, blocks.len, 1, true, WantType.WantHave, true, true)
let msg = Message(wantlist: wantList)
await buffer.pushData(lenPrefix(protobufEncode(msg)))
await done.wait(500.millis)
test "Blocks Handler":
proc blocksDeliveryHandler(
peer: PeerId, blocksDelivery: seq[BlockDelivery]
) {.async: (raises: []).} =
check blocks == blocksDelivery.mapIt(it.blk)
done.complete()
network.handlers.onBlocksDelivery = blocksDeliveryHandler
let msg =
Message(payload: blocks.mapIt(BlockDelivery(blk: it, address: it.address)))
await buffer.pushData(lenPrefix(protobufEncode(msg)))
await buffer.pushData(frameProtobufMessage(protobufEncode(msg)))
await done.wait(500.millis)
test "Presence Handler":
let
treeCid = Cid.example
addresses = (0 ..< blocks.len).mapIt(BlockAddress(treeCid: treeCid, index: it))
proc presenceHandler(
peer: PeerId, presence: seq[BlockPresence]
) {.async: (raises: []).} =
for b in blocks:
check:
b.address in presence
check presence.len == blocks.len
for p in presence:
check p.address.treeCid == treeCid
done.complete()
@ -98,9 +87,9 @@ asyncchecksuite "Network - Handlers":
let msg = Message(
blockPresences:
blocks.mapIt(BlockPresence(address: it.address, type: BlockPresenceType.Have))
addresses.mapIt(BlockPresence(address: it, kind: BlockPresenceType.HaveRange))
)
await buffer.pushData(lenPrefix(protobufEncode(msg)))
await buffer.pushData(frameProtobufMessage(protobufEncode(msg)))
await done.wait(500.millis)
@ -139,13 +128,15 @@ asyncchecksuite "Network - Senders":
await allFuturesThrowing(switch1.stop(), switch2.stop())
test "Send want list":
let
treeCid = Cid.example
addresses = (0 ..< blocks.len).mapIt(BlockAddress(treeCid: treeCid, index: it))
proc wantListHandler(peer: PeerId, wantList: WantList) {.async: (raises: []).} =
# check that we got the correct amount of entries
check wantList.entries.len == 4
for b in blocks:
check b.address in wantList.entries
let entry = wantList.entries[wantList.entries.find(b.address)]
for entry in wantList.entries:
check entry.address.treeCid == treeCid
check entry.wantType == WantType.WantHave
check entry.priority == 1
check entry.cancel == true
@ -155,38 +146,22 @@ asyncchecksuite "Network - Senders":
network2.handlers.onWantList = wantListHandler
await network1.sendWantList(
switch2.peerInfo.peerId,
blocks.mapIt(it.address),
1,
true,
WantType.WantHave,
true,
true,
)
await done.wait(500.millis)
test "send blocks":
proc blocksDeliveryHandler(
peer: PeerId, blocksDelivery: seq[BlockDelivery]
) {.async: (raises: []).} =
check blocks == blocksDelivery.mapIt(it.blk)
done.complete()
network2.handlers.onBlocksDelivery = blocksDeliveryHandler
await network1.sendBlocksDelivery(
switch2.peerInfo.peerId, blocks.mapIt(BlockDelivery(blk: it, address: it.address))
switch2.peerInfo.peerId, addresses, 1, true, WantType.WantHave, true, true
)
await done.wait(500.millis)
test "send presence":
let
treeCid = Cid.example
addresses = (0 ..< blocks.len).mapIt(BlockAddress(treeCid: treeCid, index: it))
proc presenceHandler(
peer: PeerId, precense: seq[BlockPresence]
) {.async: (raises: []).} =
for b in blocks:
check:
b.address in precense
check precense.len == blocks.len
for p in precense:
check p.address.treeCid == treeCid
done.complete()
@ -194,7 +169,7 @@ asyncchecksuite "Network - Senders":
await network1.sendBlockPresence(
switch2.peerInfo.peerId,
blocks.mapIt(BlockPresence(address: it.address, type: BlockPresenceType.Have)),
addresses.mapIt(BlockPresence(address: it, kind: BlockPresenceType.HaveRange)),
)
await done.wait(500.millis)

View File

@ -1,24 +1,28 @@
import std/sugar
import std/sequtils
import std/options
import pkg/unittest2
import pkg/libp2p
import pkg/storage/blockexchange/peers
import pkg/storage/blockexchange/protobuf/blockexc
import pkg/storage/blockexchange/protobuf/presence
import pkg/storage/blockexchange/peers/peerstats
import pkg/storage/blockexchange/utils
import pkg/storage/storagetypes
import ../helpers
import ../examples
const
TestBlockSize = DefaultBlockSize.uint32
TestBatchBytes = computeBatchSize(TestBlockSize).uint64 * TestBlockSize.uint64
suite "Peer Context Store":
var
store: PeerCtxStore
peerCtx: BlockExcPeerCtx
store: PeerContextStore
peerCtx: PeerContext
setup:
store = PeerCtxStore.new()
peerCtx = BlockExcPeerCtx.example
store = PeerContextStore.new()
peerCtx = PeerContext.example
store.add(peerCtx)
test "Should add peer":
@ -31,78 +35,127 @@ suite "Peer Context Store":
test "Should get peer":
check store.get(peerCtx.id) == peerCtx
suite "Peer Context Store Peer Selection":
var
store: PeerCtxStore
peerCtxs: seq[BlockExcPeerCtx]
addresses: seq[BlockAddress]
test "Should return nil for unknown peer":
let unknownId = PeerId.example
check store.get(unknownId) == nil
setup:
store = PeerCtxStore.new()
addresses = collect(newSeq):
for i in 0 ..< 10:
BlockAddress(leaf: false, cid: Cid.example)
test "Should return correct length":
check store.len == 1
peerCtxs = collect(newSeq):
for i in 0 ..< 10:
BlockExcPeerCtx.example
let peer2 = PeerContext.new(PeerId.example)
store.add(peer2)
check store.len == 2
for p in peerCtxs:
store.add(p)
store.remove(peer2.id)
check store.len == 1
teardown:
store = nil
addresses = @[]
peerCtxs = @[]
test "Should return peer IDs":
let peer2 = PeerContext.new(PeerId.example)
let peer3 = PeerContext.new(PeerId.example)
store.add(peer2)
store.add(peer3)
test "Should select peers that have Cid":
peerCtxs[0].blocks = collect(initTable):
for i, a in addresses:
{a: Presence(address: a)}
let ids = store.peerIds
check ids.len == 3
check peerCtx.id in ids
check peer2.id in ids
check peer3.id in ids
peerCtxs[5].blocks = collect(initTable):
for i, a in addresses:
{a: Presence(address: a)}
test "Should iterate over peers":
let peer2 = PeerContext.new(PeerId.example)
let peer3 = PeerContext.new(PeerId.example)
store.add(peer2)
store.add(peer3)
let peers = store.peersHave(addresses[0])
var seenPeers: seq[PeerId]
for peer in store:
seenPeers.add(peer.id)
check peers.len == 2
check peerCtxs[0] in peers
check peerCtxs[5] in peers
check seenPeers.len == 3
check peerCtx.id in seenPeers
check peer2.id in seenPeers
check peer3.id in seenPeers
test "Should select peers that want Cid":
let entries = addresses.mapIt(
WantListEntry(
address: it,
priority: 1,
cancel: false,
wantType: WantType.WantBlock,
sendDontHave: false,
)
)
test "Should replace peer with same ID":
let newPeerCtx = PeerContext.new(peerCtx.id)
store.add(newPeerCtx)
for address in addresses:
peerCtxs[0].wantedBlocks.incl(address)
peerCtxs[5].wantedBlocks.incl(address)
check store.len == 1 # Still only one peer
check store.get(peerCtx.id) == newPeerCtx # New context replaces old
let peers = store.peersWant(addresses[4])
test "Should handle contains check":
check peerCtx.id in store
let unknownId = PeerId.example
check unknownId notin store
check peers.len == 2
check peerCtxs[0] in peers
check peerCtxs[5] in peers
test "Should be empty initially":
let newStore = PeerContextStore.new()
check newStore.len == 0
check newStore.peerIds.len == 0
test "Should return peers with and without block":
let address = addresses[2]
test "Should check contains in array":
let peers = @[peerCtx]
check peerCtx.id in peers
peerCtxs[1].blocks[address] = Presence(address: address)
peerCtxs[2].blocks[address] = Presence(address: address)
let unknownId = PeerId.example
check unknownId notin peers
let peers = store.getPeersForBlock(address)
suite "PeerContext":
test "Should create new PeerContext":
let
peerId = PeerId.example
ctx = PeerContext.new(peerId)
for i, pc in peerCtxs:
if i == 1 or i == 2:
check pc in peers.with
check pc notin peers.without
else:
check pc notin peers.with
check pc in peers.without
check ctx.id == peerId
check ctx.stats.throughputBps().isNone
test "Should compute optimal pipeline depth without stats":
let
ctx = PeerContext.new(PeerId.example)
depth = ctx.optimalPipelineDepth(TestBatchBytes)
check depth == DefaultRequestsPerPeer
suite "PeerPerfStats":
test "Should create new stats":
var stats = PeerPerfStats.new()
check stats.throughputBps().isNone
check stats.avgRttMicros().isNone
check stats.sampleCount() == 0
test "Should record requests":
var stats = PeerPerfStats.new()
stats.recordRequest(1000, 65536)
check stats.sampleCount() == 1
test "Should compute average RTT":
var stats = PeerPerfStats.new()
stats.recordRequest(1000, 65536)
stats.recordRequest(2000, 65536)
stats.recordRequest(3000, 65536)
let avgRtt = stats.avgRttMicros()
check avgRtt.isSome
check avgRtt.get == 2000
test "Should limit RTT samples":
var stats = PeerPerfStats.new()
for i in 1 .. RttSampleCount + 5:
stats.recordRequest(i.uint64 * 100, 1024)
check stats.sampleCount() == RttSampleCount
test "Should reset stats":
var stats = PeerPerfStats.new()
stats.recordRequest(1000, 65536)
check stats.sampleCount() == 1
stats.reset()
check stats.sampleCount() == 0
check stats.throughputBps().isNone
check stats.avgRttMicros().isNone
test "Should compute batch size":
check computeBatchSize(65536) > 0
check computeBatchSize(1024) > computeBatchSize(65536)

View File

@ -1,86 +0,0 @@
import std/sequtils
import std/algorithm
import pkg/chronos
import pkg/stew/byteutils
import pkg/storage/blocktype as bt
import pkg/storage/blockexchange
import ../helpers
import ../../asynctest
suite "Pending Blocks":
test "Should add want handle":
let
pendingBlocks = PendingBlocksManager.new()
blk = bt.Block.new("Hello".toBytes).tryGet
discard pendingBlocks.getWantHandle(blk.cid)
check blk.cid in pendingBlocks
test "Should resolve want handle":
let
pendingBlocks = PendingBlocksManager.new()
blk = bt.Block.new("Hello".toBytes).tryGet
handle = pendingBlocks.getWantHandle(blk.cid)
check blk.cid in pendingBlocks
pendingBlocks.resolve(@[blk].mapIt(BlockDelivery(blk: it, address: it.address)))
await sleepAsync(0.millis)
# trigger the event loop, otherwise the block finishes before poll runs
let resolved = await handle
check resolved == blk
check blk.cid notin pendingBlocks
test "Should cancel want handle":
let
pendingBlocks = PendingBlocksManager.new()
blk = bt.Block.new("Hello".toBytes).tryGet
handle = pendingBlocks.getWantHandle(blk.cid)
check blk.cid in pendingBlocks
await handle.cancelAndWait()
check blk.cid notin pendingBlocks
test "Should get wants list":
let
pendingBlocks = PendingBlocksManager.new()
blks = (0 .. 9).mapIt(bt.Block.new(("Hello " & $it).toBytes).tryGet)
discard blks.mapIt(pendingBlocks.getWantHandle(it.cid))
check:
blks.mapIt($it.cid).sorted(cmp[string]) ==
toSeq(pendingBlocks.wantListBlockCids).mapIt($it).sorted(cmp[string])
test "Should get want handles list":
let
pendingBlocks = PendingBlocksManager.new()
blks = (0 .. 9).mapIt(bt.Block.new(("Hello " & $it).toBytes).tryGet)
handles = blks.mapIt(pendingBlocks.getWantHandle(it.cid))
wantHandles = toSeq(pendingBlocks.wantHandles)
check wantHandles.len == handles.len
pendingBlocks.resolve(blks.mapIt(BlockDelivery(blk: it, address: it.address)))
check:
(await allFinished(wantHandles)).mapIt($it.read.cid).sorted(cmp[string]) ==
(await allFinished(handles)).mapIt($it.read.cid).sorted(cmp[string])
test "Should handle retry counters":
let
pendingBlocks = PendingBlocksManager.new(3)
blk = bt.Block.new("Hello".toBytes).tryGet
address = BlockAddress.init(blk.cid)
handle = pendingBlocks.getWantHandle(blk.cid)
check pendingBlocks.retries(address) == 3
pendingBlocks.decRetries(address)
check pendingBlocks.retries(address) == 2
pendingBlocks.decRetries(address)
check pendingBlocks.retries(address) == 1
pendingBlocks.decRetries(address)
check pendingBlocks.retries(address) == 0
check pendingBlocks.retriesExhausted(address)

View File

@ -1,3 +1,4 @@
import ./protobuf/testpresence
import ./protocol/testpresence
import ./protocol/testmessage
{.warning[UnusedImport]: off.}

View File

@ -1,5 +1,5 @@
import std/random
import std/sequtils
import std/[random, sequtils]
import pkg/libp2p
import pkg/stint
import pkg/storage/rng
@ -19,15 +19,14 @@ proc example*(_: type PeerId): PeerId =
let key = PrivateKey.random(Rng.instance[]).get
PeerId.init(key.getPublicKey().get).get
proc example*(_: type BlockExcPeerCtx): BlockExcPeerCtx =
BlockExcPeerCtx(id: PeerId.example)
proc example*(_: type PeerContext): PeerContext =
PeerContext(id: PeerId.example)
proc example*(_: type Cid): Cid =
bt.Block.example.cid
proc example*(_: type BlockAddress): BlockAddress =
let cid = Cid.example
BlockAddress.init(cid)
BlockAddress.init(Cid.example, 0)
proc example*(_: type Manifest): Manifest =
Manifest.new(

View File

@ -9,8 +9,10 @@ import pkg/storage/manifest
import pkg/storage/merkletree
import pkg/storage/blockexchange
import pkg/storage/rng
import pkg/storage/units
import pkg/storage/utils
import ./examples
import ./helpers/nodeutils
import ./helpers/datasetutils
import ./helpers/randomchunker
@ -25,11 +27,8 @@ export
export libp2p except setup, eventually
# NOTE: The meaning of equality for blocks
# is changed here, because blocks are now `ref`
# types. This is only in tests!!!
func `==`*(a, b: Block): bool =
(a.cid == b.cid) and (a.data == b.data)
(a.cid == b.cid) and (a.data[] == b.data[])
proc calcEcBlocksCount*(blocksCount: int, ecK, ecM: int): int =
let
@ -43,14 +42,15 @@ proc lenPrefix*(msg: openArray[byte]): seq[byte] =
##
let vbytes = PB.toBytes(msg.len().uint64)
var buf = newSeqUninitialized[byte](msg.len() + vbytes.len)
var buf = newSeqUninit[byte](msg.len() + vbytes.len)
buf[0 ..< vbytes.len] = vbytes.toOpenArray()
buf[vbytes.len ..< buf.len] = msg
return buf
proc makeWantList*(
cids: seq[Cid],
treeCid: Cid,
count: int,
priority: int = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
@ -58,9 +58,9 @@ proc makeWantList*(
sendDontHave: bool = false,
): WantList =
WantList(
entries: cids.mapIt(
entries: (0 ..< count).mapIt(
WantListEntry(
address: BlockAddress(leaf: false, cid: it),
address: BlockAddress(treeCid: treeCid, index: it),
priority: priority.int32,
cancel: cancel,
wantType: wantType,
@ -70,25 +70,39 @@ proc makeWantList*(
full: full,
)
proc testManifestDesc*(
treeCid: Cid, blockSize: uint32, blocksCount: int
): ManifestDescriptor =
let manifest = Manifest.new(
treeCid = treeCid,
blockSize = blockSize.NBytes,
datasetSize = (blockSize.int * blocksCount).NBytes,
)
ManifestDescriptor(manifest: manifest, manifestCid: Cid.example)
proc storeDataGetManifest*(
store: BlockStore, blocks: seq[Block]
): Future[Manifest] {.async.} =
): Future[ManifestDescriptor] {.async.} =
for blk in blocks:
(await store.putBlock(blk)).tryGet()
let
(_, tree, manifest) = makeDataset(blocks).tryGet()
(_, tree, manifest, manifestCid) = makeDataset(blocks).tryGet()
treeCid = tree.rootCid.tryGet()
manifestBlock =
Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet()
(await store.putBlock(manifestBlock)).tryGet()
for i in 0 ..< tree.leavesCount:
let proof = tree.getProof(i).tryGet()
(await store.putCidAndProof(treeCid, i, blocks[i].cid, proof)).tryGet()
return manifest
return ManifestDescriptor(manifest: manifest, manifestCid: manifestCid)
proc storeDataGetManifest*(
store: BlockStore, chunker: Chunker
): Future[Manifest] {.async.} =
): Future[ManifestDescriptor] {.async.} =
var blocks = newSeq[Block]()
while (let chunk = await chunker.getBytes(); chunk.len > 0):
@ -112,13 +126,13 @@ proc corruptBlocks*(
blk = (await store.getBlock(manifest.treeCid, i)).tryGet()
bytePos: seq[int]
doAssert bytes < blk.data.len
doAssert bytes < blk.data[].len
while bytePos.len <= bytes:
let ii = Rng.instance.rand(blk.data.len - 1)
let ii = Rng.instance.rand(blk.data[].len - 1)
if bytePos.find(ii) >= 0:
continue
bytePos.add(ii)
blk.data[ii] = byte 0
blk.data[][ii] = byte 0
return pos

View File

@ -1,6 +1,7 @@
import std/random
import pkg/chronos
import pkg/libp2p/cid
import pkg/storage/blocktype as bt
import pkg/storage/merkletree
import pkg/storage/manifest
@ -9,7 +10,12 @@ import pkg/storage/rng
import ./randomchunker
type TestDataset* =
tuple[blocks: seq[Block], tree: StorageMerkleTree, manifest: Manifest]
tuple[
blocks: seq[Block], tree: StorageMerkleTree, manifest: Manifest, manifestCid: Cid
]
proc manifestDesc*(ds: TestDataset): ManifestDescriptor =
ManifestDescriptor(manifest: ds.manifest, manifestCid: ds.manifestCid)
proc makeRandomBlock*(size: NBytes): Block =
let bytes = newSeqWith(size.int, rand(uint8))
@ -33,8 +39,8 @@ proc makeDataset*(blocks: seq[Block]): ?!TestDataset =
return failure("Blocks list was empty")
let
datasetSize = blocks.mapIt(it.data.len).foldl(a + b)
blockSize = blocks.mapIt(it.data.len).foldl(max(a, b))
datasetSize = blocks.mapIt(it.data[].len).foldl(a + b)
blockSize = blocks.mapIt(it.data[].len).foldl(max(a, b))
tree = ?StorageMerkleTree.init(blocks.mapIt(it.cid))
treeCid = ?tree.rootCid
manifest = Manifest.new(
@ -42,5 +48,6 @@ proc makeDataset*(blocks: seq[Block]): ?!TestDataset =
blockSize = NBytes(blockSize),
datasetSize = NBytes(datasetSize),
)
manifestBlock = ?Block.new(?manifest.encode(), codec = ManifestCodec)
return success((blocks, tree, manifest))
return success((blocks, tree, manifest, manifestBlock.cid))

Some files were not shown because too many files have changed in this diff Show More