Merge branch 'master' into feat/sampling-primitives

This commit is contained in:
Dmitriy Ryajov 2025-03-20 20:12:17 -06:00 committed by GitHub
commit 3852415f39
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
118 changed files with 2445 additions and 1557 deletions

View File

@ -89,7 +89,7 @@ runs:
- name: Install gcc 14 on Linux - name: Install gcc 14 on Linux
# We don't want to install gcc 14 for coverage (Ubuntu 20.04) # We don't want to install gcc 14 for coverage (Ubuntu 20.04)
if : ${{ inputs.os == 'linux' && !inputs.coverage }} if : ${{ inputs.os == 'linux' && inputs.coverage != 'true' }}
shell: ${{ inputs.shell }} {0} shell: ${{ inputs.shell }} {0}
run: | run: |
# Add GCC-14 to alternatives # Add GCC-14 to alternatives
@ -202,7 +202,7 @@ runs:
- name: Restore Nim toolchain binaries from cache - name: Restore Nim toolchain binaries from cache
id: nim-cache id: nim-cache
uses: actions/cache@v4 uses: actions/cache@v4
if : ${{ !inputs.coverage }} if : ${{ inputs.coverage != 'true' }}
with: with:
path: NimBinaries path: NimBinaries
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }} key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }}

View File

@ -20,10 +20,10 @@ jobs:
uses: fabiocaccamo/create-matrix-action@v5 uses: fabiocaccamo/create-matrix-action@v5
with: with:
matrix: | matrix: |
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
build: build:
needs: matrix needs: matrix

10
.gitmodules vendored
View File

@ -221,3 +221,13 @@
[submodule "vendor/nph"] [submodule "vendor/nph"]
path = vendor/nph path = vendor/nph
url = https://github.com/arnetheduck/nph.git url = https://github.com/arnetheduck/nph.git
[submodule "vendor/nim-quic"]
path = vendor/nim-quic
url = https://github.com/vacp2p/nim-quic.git
ignore = untracked
branch = master
[submodule "vendor/nim-ngtcp2"]
path = vendor/nim-ngtcp2
url = https://github.com/vacp2p/nim-ngtcp2.git
ignore = untracked
branch = master

View File

@ -59,8 +59,8 @@ Feel free to dive in, contributions are welcomed! Open an issue or submit PRs.
### Linting and formatting ### Linting and formatting
`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is requrired to adhere to its styling. `nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling.
If you are setting up fresh setup, in order to get `nph` run `make build-nph`. If you are setting up fresh setup, in order to get `nph` run `make build-nph`.
In order to format files run `make nph/<file/folder you want to format>`. In order to format files run `make nph/<file/folder you want to format>`.
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior commiting them. If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them.
If you are using VSCode and the [NimLang](https://marketplace.visualstudio.com/items?itemName=NimLang.nimlang) extension you can enable "Format On Save" (eq. the `nim.formatOnSave` property) that will format the files using `nph`. If you are using VSCode and the [NimLang](https://marketplace.visualstudio.com/items?itemName=NimLang.nimlang) extension you can enable "Format On Save" (eq. the `nim.formatOnSave` property) that will format the files using `nph`.

View File

@ -41,80 +41,86 @@ type Advertiser* = ref object of RootObj
advertiserRunning*: bool # Indicates if discovery is running advertiserRunning*: bool # Indicates if discovery is running
concurrentAdvReqs: int # Concurrent advertise requests concurrentAdvReqs: int # Concurrent advertise requests
advertiseLocalStoreLoop*: Future[void] # Advertise loop task handle advertiseLocalStoreLoop*: Future[void].Raising([]) # Advertise loop task handle
advertiseQueue*: AsyncQueue[Cid] # Advertise queue advertiseQueue*: AsyncQueue[Cid] # Advertise queue
trackedFutures*: TrackedFutures # Advertise tasks futures trackedFutures*: TrackedFutures # Advertise tasks futures
advertiseLocalStoreLoopSleep: Duration # Advertise loop sleep advertiseLocalStoreLoopSleep: Duration # Advertise loop sleep
inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests
proc addCidToQueue(b: Advertiser, cid: Cid) {.async.} = proc addCidToQueue(b: Advertiser, cid: Cid) {.async: (raises: [CancelledError]).} =
if cid notin b.advertiseQueue: if cid notin b.advertiseQueue:
await b.advertiseQueue.put(cid) await b.advertiseQueue.put(cid)
trace "Advertising", cid trace "Advertising", cid
proc advertiseBlock(b: Advertiser, cid: Cid) {.async.} = proc advertiseBlock(b: Advertiser, cid: Cid) {.async: (raises: [CancelledError]).} =
without isM =? cid.isManifest, err: without isM =? cid.isManifest, err:
warn "Unable to determine if cid is manifest" warn "Unable to determine if cid is manifest"
return return
if isM: try:
without blk =? await b.localStore.getBlock(cid), err: if isM:
error "Error retrieving manifest block", cid, err = err.msg without blk =? await b.localStore.getBlock(cid), err:
return error "Error retrieving manifest block", cid, err = err.msg
return
without manifest =? Manifest.decode(blk), err: without manifest =? Manifest.decode(blk), err:
error "Unable to decode as manifest", err = err.msg error "Unable to decode as manifest", err = err.msg
return return
# announce manifest cid and tree cid # announce manifest cid and tree cid
await b.addCidToQueue(cid) await b.addCidToQueue(cid)
await b.addCidToQueue(manifest.treeCid) await b.addCidToQueue(manifest.treeCid)
except CancelledError as exc:
trace "Cancelled advertise block", cid
raise exc
except CatchableError as e:
error "failed to advertise block", cid, error = e.msgDetail
proc advertiseLocalStoreLoop(b: Advertiser) {.async: (raises: []).} = proc advertiseLocalStoreLoop(b: Advertiser) {.async: (raises: []).} =
while b.advertiserRunning: try:
try: while b.advertiserRunning:
if cids =? await b.localStore.listBlocks(blockType = BlockType.Manifest): try:
trace "Advertiser begins iterating blocks..." if cids =? await b.localStore.listBlocks(blockType = BlockType.Manifest):
for c in cids: trace "Advertiser begins iterating blocks..."
if cid =? await c: for c in cids:
await b.advertiseBlock(cid) if cid =? await c:
trace "Advertiser iterating blocks finished." await b.advertiseBlock(cid)
trace "Advertiser iterating blocks finished."
except CatchableError as e:
error "Error in advertise local store loop", error = e.msgDetail
raiseAssert("Unexpected exception in advertiseLocalStoreLoop")
await sleepAsync(b.advertiseLocalStoreLoopSleep) await sleepAsync(b.advertiseLocalStoreLoopSleep)
except CancelledError: except CancelledError:
break # do not propagate as advertiseLocalStoreLoop was asyncSpawned warn "Cancelled advertise local store loop"
except CatchableError as e:
error "failed to advertise blocks in local store", error = e.msgDetail
info "Exiting advertise task loop" info "Exiting advertise task loop"
proc processQueueLoop(b: Advertiser) {.async: (raises: []).} = proc processQueueLoop(b: Advertiser) {.async: (raises: []).} =
while b.advertiserRunning: try:
try: while b.advertiserRunning:
let cid = await b.advertiseQueue.get() let cid = await b.advertiseQueue.get()
if cid in b.inFlightAdvReqs: if cid in b.inFlightAdvReqs:
continue continue
try: let request = b.discovery.provide(cid)
let request = b.discovery.provide(cid) b.inFlightAdvReqs[cid] = request
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
b.inFlightAdvReqs[cid] = request defer:
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
await request
finally:
b.inFlightAdvReqs.del(cid) b.inFlightAdvReqs.del(cid)
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64) codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
except CancelledError:
trace "Advertise task cancelled" await request
return except CancelledError:
except CatchableError as exc: warn "Cancelled advertise task runner"
warn "Exception in advertise task runner", exc = exc.msg
info "Exiting advertise task runner" info "Exiting advertise task runner"
proc start*(b: Advertiser) {.async.} = proc start*(b: Advertiser) {.async: (raises: []).} =
## Start the advertiser ## Start the advertiser
## ##
@ -134,13 +140,11 @@ proc start*(b: Advertiser) {.async.} =
for i in 0 ..< b.concurrentAdvReqs: for i in 0 ..< b.concurrentAdvReqs:
let fut = b.processQueueLoop() let fut = b.processQueueLoop()
b.trackedFutures.track(fut) b.trackedFutures.track(fut)
asyncSpawn fut
b.advertiseLocalStoreLoop = advertiseLocalStoreLoop(b) b.advertiseLocalStoreLoop = advertiseLocalStoreLoop(b)
b.trackedFutures.track(b.advertiseLocalStoreLoop) b.trackedFutures.track(b.advertiseLocalStoreLoop)
asyncSpawn b.advertiseLocalStoreLoop
proc stop*(b: Advertiser) {.async.} = proc stop*(b: Advertiser) {.async: (raises: []).} =
## Stop the advertiser ## Stop the advertiser
## ##

View File

@ -48,7 +48,7 @@ type DiscoveryEngine* = ref object of RootObj
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
discEngineRunning*: bool # Indicates if discovery is running discEngineRunning*: bool # Indicates if discovery is running
concurrentDiscReqs: int # Concurrent discovery requests concurrentDiscReqs: int # Concurrent discovery requests
discoveryLoop*: Future[void] # Discovery loop task handle discoveryLoop*: Future[void].Raising([]) # Discovery loop task handle
discoveryQueue*: AsyncQueue[Cid] # Discovery queue discoveryQueue*: AsyncQueue[Cid] # Discovery queue
trackedFutures*: TrackedFutures # Tracked Discovery tasks futures trackedFutures*: TrackedFutures # Tracked Discovery tasks futures
minPeersPerBlock*: int # Max number of peers with block minPeersPerBlock*: int # Max number of peers with block
@ -57,30 +57,21 @@ type DiscoveryEngine* = ref object of RootObj
# Inflight discovery requests # Inflight discovery requests
proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} = proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} =
while b.discEngineRunning: try:
for cid in toSeq(b.pendingBlocks.wantListBlockCids): while b.discEngineRunning:
try: for cid in toSeq(b.pendingBlocks.wantListBlockCids):
await b.discoveryQueue.put(cid) await b.discoveryQueue.put(cid)
except CancelledError:
trace "Discovery loop cancelled"
return
except CatchableError as exc:
warn "Exception in discovery loop", exc = exc.msg
try:
logScope:
sleep = b.discoveryLoopSleep
wanted = b.pendingBlocks.len
await sleepAsync(b.discoveryLoopSleep) await sleepAsync(b.discoveryLoopSleep)
except CancelledError: except CancelledError:
discard # do not propagate as discoveryQueueLoop was asyncSpawned trace "Discovery loop cancelled"
proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} = proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
## Run discovery tasks ## Run discovery tasks
## ##
while b.discEngineRunning: try:
try: while b.discEngineRunning:
let cid = await b.discoveryQueue.get() let cid = await b.discoveryQueue.get()
if cid in b.inFlightDiscReqs: if cid in b.inFlightDiscReqs:
@ -90,35 +81,28 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
let haves = b.peers.peersHave(cid) let haves = b.peers.peersHave(cid)
if haves.len < b.minPeersPerBlock: if haves.len < b.minPeersPerBlock:
try: let request = b.discovery.find(cid)
let request = b.discovery.find(cid).wait(DefaultDiscoveryTimeout) b.inFlightDiscReqs[cid] = request
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
b.inFlightDiscReqs[cid] = request defer:
b.inFlightDiscReqs.del(cid)
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64) codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
let peers = await request
if (await request.withTimeout(DefaultDiscoveryTimeout)) and
peers =? (await request).catch:
let dialed = await allFinished(peers.mapIt(b.network.dialPeer(it.data))) let dialed = await allFinished(peers.mapIt(b.network.dialPeer(it.data)))
for i, f in dialed: for i, f in dialed:
if f.failed: if f.failed:
await b.discovery.removeProvider(peers[i].data.peerId) await b.discovery.removeProvider(peers[i].data.peerId)
finally: except CancelledError:
b.inFlightDiscReqs.del(cid) trace "Discovery task cancelled"
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64) return
except CancelledError:
trace "Discovery task cancelled"
return
except CatchableError as exc:
warn "Exception in discovery task runner", exc = exc.msg
except Exception as e:
# Raised by b.discovery.removeProvider somehow...
# This should not be catchable, and we should never get here. Therefore,
# raise a Defect.
raiseAssert "Exception when removing provider"
info "Exiting discovery task runner" info "Exiting discovery task runner"
proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} = proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) =
for cid in cids: for cid in cids:
if cid notin b.discoveryQueue: if cid notin b.discoveryQueue:
try: try:
@ -126,11 +110,11 @@ proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
except CatchableError as exc: except CatchableError as exc:
warn "Exception queueing discovery request", exc = exc.msg warn "Exception queueing discovery request", exc = exc.msg
proc start*(b: DiscoveryEngine) {.async.} = proc start*(b: DiscoveryEngine) {.async: (raises: []).} =
## Start the discengine task ## Start the discengine task
## ##
trace "Discovery engine start" trace "Discovery engine starting"
if b.discEngineRunning: if b.discEngineRunning:
warn "Starting discovery engine twice" warn "Starting discovery engine twice"
@ -140,12 +124,13 @@ proc start*(b: DiscoveryEngine) {.async.} =
for i in 0 ..< b.concurrentDiscReqs: for i in 0 ..< b.concurrentDiscReqs:
let fut = b.discoveryTaskLoop() let fut = b.discoveryTaskLoop()
b.trackedFutures.track(fut) b.trackedFutures.track(fut)
asyncSpawn fut
b.discoveryLoop = b.discoveryQueueLoop() b.discoveryLoop = b.discoveryQueueLoop()
b.trackedFutures.track(b.discoveryLoop) b.trackedFutures.track(b.discoveryLoop)
proc stop*(b: DiscoveryEngine) {.async.} = trace "Discovery engine started"
proc stop*(b: DiscoveryEngine) {.async: (raises: []).} =
## Stop the discovery engine ## Stop the discovery engine
## ##

View File

@ -93,12 +93,15 @@ type
price*: UInt256 price*: UInt256
# attach task scheduler to engine # attach task scheduler to engine
proc scheduleTask(self: BlockExcEngine, task: BlockExcPeerCtx): bool {.gcsafe.} = proc scheduleTask(self: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, raises: [].} =
self.taskQueue.pushOrUpdateNoWait(task).isOk() if self.taskQueue.pushOrUpdateNoWait(task).isOk():
trace "Task scheduled for peer", peer = task.id
else:
warn "Unable to schedule task for peer", peer = task.id
proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).}
proc start*(self: BlockExcEngine) {.async.} = proc start*(self: BlockExcEngine) {.async: (raises: []).} =
## Start the blockexc task ## Start the blockexc task
## ##
@ -115,7 +118,7 @@ proc start*(self: BlockExcEngine) {.async.} =
let fut = self.blockexcTaskRunner() let fut = self.blockexcTaskRunner()
self.trackedFutures.track(fut) self.trackedFutures.track(fut)
proc stop*(self: BlockExcEngine) {.async.} = proc stop*(self: BlockExcEngine) {.async: (raises: []).} =
## Stop the blockexc blockexc ## Stop the blockexc blockexc
## ##
@ -135,7 +138,7 @@ proc stop*(self: BlockExcEngine) {.async.} =
proc sendWantHave( proc sendWantHave(
self: BlockExcEngine, addresses: seq[BlockAddress], peers: seq[BlockExcPeerCtx] self: BlockExcEngine, addresses: seq[BlockAddress], peers: seq[BlockExcPeerCtx]
): Future[void] {.async.} = ): Future[void] {.async: (raises: [CancelledError]).} =
for p in peers: for p in peers:
let toAsk = addresses.filterIt(it notin p.peerHave) let toAsk = addresses.filterIt(it notin p.peerHave)
trace "Sending wantHave request", toAsk, peer = p.id trace "Sending wantHave request", toAsk, peer = p.id
@ -144,7 +147,7 @@ proc sendWantHave(
proc sendWantBlock( proc sendWantBlock(
self: BlockExcEngine, addresses: seq[BlockAddress], blockPeer: BlockExcPeerCtx self: BlockExcEngine, addresses: seq[BlockAddress], blockPeer: BlockExcPeerCtx
): Future[void] {.async.} = ): Future[void] {.async: (raises: [CancelledError]).} =
trace "Sending wantBlock request to", addresses, peer = blockPeer.id trace "Sending wantBlock request to", addresses, peer = blockPeer.id
await self.network.request.sendWantList( await self.network.request.sendWantList(
blockPeer.id, addresses, wantType = WantType.WantBlock blockPeer.id, addresses, wantType = WantType.WantBlock
@ -229,7 +232,7 @@ proc requestBlock*(
proc blockPresenceHandler*( proc blockPresenceHandler*(
self: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence] self: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence]
) {.async.} = ) {.async: (raises: []).} =
trace "Received block presence from peer", peer, blocks = blocks.mapIt($it) trace "Received block presence from peer", peer, blocks = blocks.mapIt($it)
let let
peerCtx = self.peers.get(peer) peerCtx = self.peers.get(peer)
@ -249,20 +252,23 @@ proc blockPresenceHandler*(
if dontWantCids.len > 0: if dontWantCids.len > 0:
peerCtx.cleanPresence(dontWantCids) peerCtx.cleanPresence(dontWantCids)
let ourWantCids = ourWantList.filter do(address: BlockAddress) -> bool: let ourWantCids = ourWantList.filterIt(
if address in peerHave and not self.pendingBlocks.retriesExhausted(address) and it in peerHave and not self.pendingBlocks.retriesExhausted(it) and
not self.pendingBlocks.isInFlight(address): not self.pendingBlocks.isInFlight(it)
self.pendingBlocks.setInFlight(address, true) )
self.pendingBlocks.decRetries(address)
true for address in ourWantCids:
else: self.pendingBlocks.setInFlight(address, true)
false self.pendingBlocks.decRetries(address)
if ourWantCids.len > 0: if ourWantCids.len > 0:
trace "Peer has blocks in our wantList", peer, wants = ourWantCids trace "Peer has blocks in our wantList", peer, wants = ourWantCids
await self.sendWantBlock(ourWantCids, peerCtx) if err =? catch(await self.sendWantBlock(ourWantCids, peerCtx)).errorOption:
warn "Failed to send wantBlock to peer", peer, err = err.msg
proc scheduleTasks(self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} = proc scheduleTasks(
self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]
) {.async: (raises: [CancelledError]).} =
let cids = blocksDelivery.mapIt(it.blk.cid) let cids = blocksDelivery.mapIt(it.blk.cid)
# schedule any new peers to provide blocks to # schedule any new peers to provide blocks to
@ -271,15 +277,21 @@ proc scheduleTasks(self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.a
# schedule a peer if it wants at least one cid # schedule a peer if it wants at least one cid
# and we have it in our local store # and we have it in our local store
if c in p.peerWantsCids: if c in p.peerWantsCids:
if await (c in self.localStore): try:
if self.scheduleTask(p): if await (c in self.localStore):
trace "Task scheduled for peer", peer = p.id # TODO: the try/except should go away once blockstore tracks exceptions
else: self.scheduleTask(p)
warn "Unable to schedule task for peer", peer = p.id break
except CancelledError as exc:
warn "Checking local store canceled", cid = c, err = exc.msg
return
except CatchableError as exc:
error "Error checking local store for cid", cid = c, err = exc.msg
raiseAssert "Unexpected error checking local store for cid"
break # do next peer proc cancelBlocks(
self: BlockExcEngine, addrs: seq[BlockAddress]
proc cancelBlocks(self: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} = ) {.async: (raises: [CancelledError]).} =
## Tells neighboring peers that we're no longer interested in a block. ## Tells neighboring peers that we're no longer interested in a block.
## ##
@ -289,35 +301,43 @@ proc cancelBlocks(self: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} =
trace "Sending block request cancellations to peers", trace "Sending block request cancellations to peers",
addrs, peers = self.peers.peerIds addrs, peers = self.peers.peerIds
proc mapPeers(peerCtx: BlockExcPeerCtx): Future[BlockExcPeerCtx] {.async.} = proc processPeer(peerCtx: BlockExcPeerCtx): Future[BlockExcPeerCtx] {.async.} =
let blocks = addrs.filter do(a: BlockAddress) -> bool: await self.network.request.sendWantCancellations(
a in peerCtx.blocks peer = peerCtx.id, addresses = addrs.filterIt(it in peerCtx)
)
if blocks.len > 0: return peerCtx
trace "Sending block request cancellations to peer", peer = peerCtx.id, blocks
await self.network.request.sendWantCancellations( try:
peer = peerCtx.id, addresses = blocks let (succeededFuts, failedFuts) = await allFinishedFailed(
toSeq(self.peers.peers.values).filterIt(it.peerHave.anyIt(it in addrs)).map(
processPeer
) )
)
(await allFinished(succeededFuts)).mapIt(it.read).apply do(peerCtx: BlockExcPeerCtx):
peerCtx.cleanPresence(addrs) peerCtx.cleanPresence(addrs)
peerCtx
let failed = (await allFinished(map(toSeq(self.peers.peers.values), mapPeers))).filterIt( if failedFuts.len > 0:
it.failed warn "Failed to send block request cancellations to peers", peers = failedFuts.len
) else:
trace "Block request cancellations sent to peers", peers = self.peers.len
if failed.len > 0: except CancelledError as exc:
warn "Failed to send block request cancellations to peers", peers = failed.len warn "Error sending block request cancellations", error = exc.msg
else: raise exc
trace "Block request cancellations sent to peers", peers = self.peers.len except CatchableError as exc:
warn "Error sending block request cancellations", error = exc.msg
proc resolveBlocks*( proc resolveBlocks*(
self: BlockExcEngine, blocksDelivery: seq[BlockDelivery] self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]
) {.async.} = ) {.async: (raises: [CancelledError]).} =
self.pendingBlocks.resolve(blocksDelivery) self.pendingBlocks.resolve(blocksDelivery)
await self.scheduleTasks(blocksDelivery) await self.scheduleTasks(blocksDelivery)
await self.cancelBlocks(blocksDelivery.mapIt(it.address)) await self.cancelBlocks(blocksDelivery.mapIt(it.address))
proc resolveBlocks*(self: BlockExcEngine, blocks: seq[Block]) {.async.} = proc resolveBlocks*(
self: BlockExcEngine, blocks: seq[Block]
) {.async: (raises: [CancelledError]).} =
await self.resolveBlocks( await self.resolveBlocks(
blocks.mapIt( blocks.mapIt(
BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid)) BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid))
@ -326,7 +346,7 @@ proc resolveBlocks*(self: BlockExcEngine, blocks: seq[Block]) {.async.} =
proc payForBlocks( proc payForBlocks(
self: BlockExcEngine, peer: BlockExcPeerCtx, blocksDelivery: seq[BlockDelivery] self: BlockExcEngine, peer: BlockExcPeerCtx, blocksDelivery: seq[BlockDelivery]
) {.async.} = ) {.async: (raises: [CancelledError]).} =
let let
sendPayment = self.network.request.sendPayment sendPayment = self.network.request.sendPayment
price = peer.price(blocksDelivery.mapIt(it.address)) price = peer.price(blocksDelivery.mapIt(it.address))
@ -367,7 +387,7 @@ proc validateBlockDelivery(self: BlockExcEngine, bd: BlockDelivery): ?!void =
proc blocksDeliveryHandler*( proc blocksDeliveryHandler*(
self: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery] self: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery]
) {.async.} = ) {.async: (raises: []).} =
trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt(it.address)) trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt(it.address))
var validatedBlocksDelivery: seq[BlockDelivery] var validatedBlocksDelivery: seq[BlockDelivery]
@ -376,41 +396,47 @@ proc blocksDeliveryHandler*(
peer = peer peer = peer
address = bd.address address = bd.address
if err =? self.validateBlockDelivery(bd).errorOption: try:
warn "Block validation failed", msg = err.msg if err =? self.validateBlockDelivery(bd).errorOption:
continue warn "Block validation failed", msg = err.msg
if err =? (await self.localStore.putBlock(bd.blk)).errorOption:
error "Unable to store block", err = err.msg
continue
if bd.address.leaf:
without proof =? bd.proof:
error "Proof expected for a leaf block delivery"
continue continue
if err =? (
await self.localStore.putCidAndProof( if err =? (await self.localStore.putBlock(bd.blk)).errorOption:
bd.address.treeCid, bd.address.index, bd.blk.cid, proof error "Unable to store block", err = err.msg
)
).errorOption:
error "Unable to store proof and cid for a block"
continue continue
if bd.address.leaf:
without proof =? bd.proof:
warn "Proof expected for a leaf block delivery"
continue
if err =? (
await self.localStore.putCidAndProof(
bd.address.treeCid, bd.address.index, bd.blk.cid, proof
)
).errorOption:
warn "Unable to store proof and cid for a block"
continue
except CatchableError as exc:
warn "Error handling block delivery", error = exc.msg
continue
validatedBlocksDelivery.add(bd) validatedBlocksDelivery.add(bd)
await self.resolveBlocks(validatedBlocksDelivery)
codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64) codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64)
let peerCtx = self.peers.get(peer) let peerCtx = self.peers.get(peer)
if peerCtx != nil: if peerCtx != nil:
await self.payForBlocks(peerCtx, blocksDelivery) if err =? catch(await self.payForBlocks(peerCtx, blocksDelivery)).errorOption:
## shouldn't we remove them from the want-list instead of this: warn "Error paying for blocks", err = err.msg
peerCtx.cleanPresence(blocksDelivery.mapIt(it.address)) return
if err =? catch(await self.resolveBlocks(validatedBlocksDelivery)).errorOption:
warn "Error resolving blocks", err = err.msg
return
proc wantListHandler*( proc wantListHandler*(
self: BlockExcEngine, peer: PeerId, wantList: WantList self: BlockExcEngine, peer: PeerId, wantList: WantList
) {.async.} = ) {.async: (raises: []).} =
trace "Received want list from peer", peer, wantList = wantList.entries.len trace "Received want list from peer", peer, wantList = wantList.entries.len
let peerCtx = self.peers.get(peer) let peerCtx = self.peers.get(peer)
@ -422,68 +448,81 @@ proc wantListHandler*(
presence: seq[BlockPresence] presence: seq[BlockPresence]
schedulePeer = false schedulePeer = false
for e in wantList.entries: try:
let idx = peerCtx.peerWants.findIt(it.address == e.address) for e in wantList.entries:
let idx = peerCtx.peerWants.findIt(it.address == e.address)
logScope: logScope:
peer = peerCtx.id peer = peerCtx.id
address = e.address address = e.address
wantType = $e.wantType wantType = $e.wantType
if idx < 0: # Adding new entry to peer wants if idx < 0: # Adding new entry to peer wants
let let
have = await e.address in self.localStore have =
price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE) try:
await e.address in self.localStore
except CatchableError as exc:
# TODO: should not be necessary once we have proper exception tracking on the BlockStore interface
false
price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE)
if e.cancel: if e.cancel:
trace "Received cancelation for untracked block, skipping", address = e.address trace "Received cancelation for untracked block, skipping",
continue address = e.address
continue
trace "Processing want list entry", wantList = $e trace "Processing want list entry", wantList = $e
case e.wantType case e.wantType
of WantType.WantHave: of WantType.WantHave:
if have: if have:
presence.add(
BlockPresence(
address: e.address, `type`: BlockPresenceType.Have, price: price
)
)
else:
if e.sendDontHave:
presence.add( presence.add(
BlockPresence( BlockPresence(
address: e.address, `type`: BlockPresenceType.DontHave, price: price address: e.address, `type`: BlockPresenceType.Have, price: price
) )
) )
else:
if e.sendDontHave:
presence.add(
BlockPresence(
address: e.address, `type`: BlockPresenceType.DontHave, price: price
)
)
codex_block_exchange_want_have_lists_received.inc() codex_block_exchange_want_have_lists_received.inc()
of WantType.WantBlock: of WantType.WantBlock:
peerCtx.peerWants.add(e) peerCtx.peerWants.add(e)
schedulePeer = true
codex_block_exchange_want_block_lists_received.inc()
else: # Updating existing entry in peer wants
# peer doesn't want this block anymore
if e.cancel:
trace "Canceling want for block", address = e.address
peerCtx.peerWants.del(idx)
trace "Canceled block request", address = e.address, len = peerCtx.peerWants.len
else:
if e.wantType == WantType.WantBlock:
schedulePeer = true schedulePeer = true
# peer might want to ask for the same cid with codex_block_exchange_want_block_lists_received.inc()
# different want params else: # Updating existing entry in peer wants
trace "Updating want for block", address = e.address # peer doesn't want this block anymore
peerCtx.peerWants[idx] = e # update entry if e.cancel:
trace "Updated block request", address = e.address, len = peerCtx.peerWants.len trace "Canceling want for block", address = e.address
peerCtx.peerWants.del(idx)
trace "Canceled block request",
address = e.address, len = peerCtx.peerWants.len
else:
if e.wantType == WantType.WantBlock:
schedulePeer = true
# peer might want to ask for the same cid with
# different want params
trace "Updating want for block", address = e.address
peerCtx.peerWants[idx] = e # update entry
trace "Updated block request",
address = e.address, len = peerCtx.peerWants.len
if presence.len > 0: if presence.len > 0:
trace "Sending presence to remote", items = presence.mapIt($it).join(",") trace "Sending presence to remote", items = presence.mapIt($it).join(",")
await self.network.request.sendPresence(peer, presence) await self.network.request.sendPresence(peer, presence)
if schedulePeer and not self.scheduleTask(peerCtx): if schedulePeer:
warn "Unable to schedule task for peer", peer self.scheduleTask(peerCtx)
except CancelledError as exc: #TODO: replace with CancelledError
warn "Error processing want list", error = exc.msg
proc accountHandler*(self: BlockExcEngine, peer: PeerId, account: Account) {.async.} = proc accountHandler*(
self: BlockExcEngine, peer: PeerId, account: Account
) {.async: (raises: []).} =
let context = self.peers.get(peer) let context = self.peers.get(peer)
if context.isNil: if context.isNil:
return return
@ -492,7 +531,7 @@ proc accountHandler*(self: BlockExcEngine, peer: PeerId, account: Account) {.asy
proc paymentHandler*( proc paymentHandler*(
self: BlockExcEngine, peer: PeerId, payment: SignedState self: BlockExcEngine, peer: PeerId, payment: SignedState
) {.async.} = ) {.async: (raises: []).} =
trace "Handling payments", peer trace "Handling payments", peer
without context =? self.peers.get(peer).option and account =? context.account: without context =? self.peers.get(peer).option and account =? context.account:
@ -505,7 +544,9 @@ proc paymentHandler*(
else: else:
context.paymentChannel = self.wallet.acceptChannel(payment).option context.paymentChannel = self.wallet.acceptChannel(payment).option
proc setupPeer*(self: BlockExcEngine, peer: PeerId) {.async.} = proc setupPeer*(
self: BlockExcEngine, peer: PeerId
) {.async: (raises: [CancelledError]).} =
## Perform initial setup, such as want ## Perform initial setup, such as want
## list exchange ## list exchange
## ##
@ -524,9 +565,10 @@ proc setupPeer*(self: BlockExcEngine, peer: PeerId) {.async.} =
await self.network.request.sendWantList(peer, cids, full = true) await self.network.request.sendWantList(peer, cids, full = true)
if address =? self.pricing .? address: if address =? self.pricing .? address:
trace "Sending account to peer", peer
await self.network.request.sendAccount(peer, Account(address: address)) await self.network.request.sendAccount(peer, Account(address: address))
proc dropPeer*(self: BlockExcEngine, peer: PeerId) = proc dropPeer*(self: BlockExcEngine, peer: PeerId) {.raises: [].} =
## Cleanup disconnected peer ## Cleanup disconnected peer
## ##
@ -535,7 +577,9 @@ proc dropPeer*(self: BlockExcEngine, peer: PeerId) =
# drop the peer from the peers table # drop the peer from the peers table
self.peers.remove(peer) self.peers.remove(peer)
proc taskHandler*(self: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} = proc taskHandler*(
self: BlockExcEngine, task: BlockExcPeerCtx
) {.gcsafe, async: (raises: [CancelledError, RetriesExhaustedError]).} =
# Send to the peer blocks he wants to get, # Send to the peer blocks he wants to get,
# if they present in our local store # if they present in our local store
@ -572,8 +616,11 @@ proc taskHandler*(self: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.}
let let
blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup)) blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup))
blocksDelivery = blocksDelivery = blocksDeliveryFut.filterIt(it.completed and it.value.isOk).mapIt:
blocksDeliveryFut.filterIt(it.completed and it.read.isOk).mapIt(it.read.get) if bd =? it.value:
bd
else:
raiseAssert "Unexpected error in local lookup"
# All the wants that failed local lookup must be set to not-in-flight again. # All the wants that failed local lookup must be set to not-in-flight again.
let let
@ -595,15 +642,12 @@ proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} =
## ##
trace "Starting blockexc task runner" trace "Starting blockexc task runner"
while self.blockexcRunning: try:
try: while self.blockexcRunning:
let peerCtx = await self.taskQueue.pop() let peerCtx = await self.taskQueue.pop()
await self.taskHandler(peerCtx) await self.taskHandler(peerCtx)
except CancelledError: except CatchableError as exc:
break # do not propagate as blockexcTaskRunner was asyncSpawned error "error running block exchange task", error = exc.msg
except CatchableError as e:
error "error running block exchange task", error = e.msgDetail
info "Exiting blockexc task runner" info "Exiting blockexc task runner"
@ -634,7 +678,9 @@ proc new*(
advertiser: advertiser, advertiser: advertiser,
) )
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} = proc peerEventHandler(
peerId: PeerId, event: PeerEvent
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined: if event.kind == PeerEventKind.Joined:
await self.setupPeer(peerId) await self.setupPeer(peerId)
else: else:
@ -644,23 +690,29 @@ proc new*(
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined) network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left) network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
proc blockWantListHandler(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.} = proc blockWantListHandler(
peer: PeerId, wantList: WantList
): Future[void] {.async: (raises: []).} =
self.wantListHandler(peer, wantList) self.wantListHandler(peer, wantList)
proc blockPresenceHandler( proc blockPresenceHandler(
peer: PeerId, presence: seq[BlockPresence] peer: PeerId, presence: seq[BlockPresence]
): Future[void] {.gcsafe.} = ): Future[void] {.async: (raises: []).} =
self.blockPresenceHandler(peer, presence) self.blockPresenceHandler(peer, presence)
proc blocksDeliveryHandler( proc blocksDeliveryHandler(
peer: PeerId, blocksDelivery: seq[BlockDelivery] peer: PeerId, blocksDelivery: seq[BlockDelivery]
): Future[void] {.gcsafe.} = ): Future[void] {.async: (raises: []).} =
self.blocksDeliveryHandler(peer, blocksDelivery) self.blocksDeliveryHandler(peer, blocksDelivery)
proc accountHandler(peer: PeerId, account: Account): Future[void] {.gcsafe.} = proc accountHandler(
peer: PeerId, account: Account
): Future[void] {.async: (raises: []).} =
self.accountHandler(peer, account) self.accountHandler(peer, account)
proc paymentHandler(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} = proc paymentHandler(
peer: PeerId, payment: SignedState
): Future[void] {.async: (raises: []).} =
self.paymentHandler(peer, payment) self.paymentHandler(peer, payment)
network.handlers = BlockExcHandlers( network.handlers = BlockExcHandlers(

View File

@ -7,6 +7,8 @@
## This file may not be copied, modified, or distributed except according to ## This file may not be copied, modified, or distributed except according to
## those terms. ## those terms.
{.push raises: [].}
import std/math import std/math
import pkg/nitro import pkg/nitro
import pkg/questionable/results import pkg/questionable/results
@ -15,9 +17,6 @@ import ../peers
export nitro export nitro
export results export results
push:
{.upraises: [].}
const ChainId* = 0.u256 # invalid chain id for now const ChainId* = 0.u256 # invalid chain id for now
const Asset* = EthAddress.zero # invalid ERC20 asset address for now const Asset* = EthAddress.zero # invalid ERC20 asset address for now
const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals

View File

@ -35,13 +35,15 @@ const
DefaultMaxInflight* = 100 DefaultMaxInflight* = 100
type type
WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.} WantListHandler* =
proc(peer: PeerId, wantList: WantList) {.gcsafe, async: (raises: []).}
BlocksDeliveryHandler* = BlocksDeliveryHandler* =
proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.} proc(peer: PeerId, blocks: seq[BlockDelivery]) {.gcsafe, async: (raises: []).}
BlockPresenceHandler* = BlockPresenceHandler* =
proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.} proc(peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async: (raises: []).}
AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.} AccountHandler* = proc(peer: PeerId, account: Account) {.gcsafe, async: (raises: []).}
PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} PaymentHandler* =
proc(peer: PeerId, payment: SignedState) {.gcsafe, async: (raises: []).}
BlockExcHandlers* = object BlockExcHandlers* = object
onWantList*: WantListHandler onWantList*: WantListHandler
@ -58,15 +60,20 @@ type
wantType: WantType = WantType.WantHave, wantType: WantType = WantType.WantHave,
full: bool = false, full: bool = false,
sendDontHave: bool = false, sendDontHave: bool = false,
): Future[void] {.gcsafe.} ) {.async: (raises: [CancelledError]).}
WantCancellationSender* = WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]) {.
proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.} async: (raises: [CancelledError])
BlocksDeliverySender* = .}
proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {.
PresenceSender* = async: (raises: [CancelledError])
proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} .}
AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.} PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]) {.
PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} async: (raises: [CancelledError])
.}
AccountSender* =
proc(peer: PeerId, account: Account) {.async: (raises: [CancelledError]).}
PaymentSender* =
proc(peer: PeerId, payment: SignedState) {.async: (raises: [CancelledError]).}
BlockExcRequest* = object BlockExcRequest* = object
sendWantList*: WantListSender sendWantList*: WantListSender
@ -98,7 +105,9 @@ proc isSelf*(b: BlockExcNetwork, peer: PeerId): bool =
return b.peerId == peer return b.peerId == peer
proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} = proc send*(
b: BlockExcNetwork, id: PeerId, msg: pb.Message
) {.async: (raises: [CancelledError]).} =
## Send message to peer ## Send message to peer
## ##
@ -106,8 +115,9 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
trace "Unable to send, peer not found", peerId = id trace "Unable to send, peer not found", peerId = id
return return
let peer = b.peers[id]
try: try:
let peer = b.peers[id]
await b.inflightSema.acquire() await b.inflightSema.acquire()
await peer.send(msg) await peer.send(msg)
except CancelledError as error: except CancelledError as error:
@ -117,7 +127,9 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
finally: finally:
b.inflightSema.release() b.inflightSema.release()
proc handleWantList(b: BlockExcNetwork, peer: NetworkPeer, list: WantList) {.async.} = proc handleWantList(
b: BlockExcNetwork, peer: NetworkPeer, list: WantList
) {.async: (raises: []).} =
## Handle incoming want list ## Handle incoming want list
## ##
@ -133,7 +145,7 @@ proc sendWantList*(
wantType: WantType = WantType.WantHave, wantType: WantType = WantType.WantHave,
full: bool = false, full: bool = false,
sendDontHave: bool = false, sendDontHave: bool = false,
): Future[void] = ) {.async: (raw: true, raises: [CancelledError]).} =
## Send a want message to peer ## Send a want message to peer
## ##
@ -154,14 +166,14 @@ proc sendWantList*(
proc sendWantCancellations*( proc sendWantCancellations*(
b: BlockExcNetwork, id: PeerId, addresses: seq[BlockAddress] b: BlockExcNetwork, id: PeerId, addresses: seq[BlockAddress]
): Future[void] {.async.} = ): Future[void] {.async: (raises: [CancelledError]).} =
## Informs a remote peer that we're no longer interested in a set of blocks ## Informs a remote peer that we're no longer interested in a set of blocks
## ##
await b.sendWantList(id = id, addresses = addresses, cancel = true) await b.sendWantList(id = id, addresses = addresses, cancel = true)
proc handleBlocksDelivery( proc handleBlocksDelivery(
b: BlockExcNetwork, peer: NetworkPeer, blocksDelivery: seq[BlockDelivery] b: BlockExcNetwork, peer: NetworkPeer, blocksDelivery: seq[BlockDelivery]
) {.async.} = ) {.async: (raises: []).} =
## Handle incoming blocks ## Handle incoming blocks
## ##
@ -170,7 +182,7 @@ proc handleBlocksDelivery(
proc sendBlocksDelivery*( proc sendBlocksDelivery*(
b: BlockExcNetwork, id: PeerId, blocksDelivery: seq[BlockDelivery] b: BlockExcNetwork, id: PeerId, blocksDelivery: seq[BlockDelivery]
): Future[void] = ) {.async: (raw: true, raises: [CancelledError]).} =
## Send blocks to remote ## Send blocks to remote
## ##
@ -178,7 +190,7 @@ proc sendBlocksDelivery*(
proc handleBlockPresence( proc handleBlockPresence(
b: BlockExcNetwork, peer: NetworkPeer, presence: seq[BlockPresence] b: BlockExcNetwork, peer: NetworkPeer, presence: seq[BlockPresence]
) {.async.} = ) {.async: (raises: []).} =
## Handle block presence ## Handle block presence
## ##
@ -187,7 +199,7 @@ proc handleBlockPresence(
proc sendBlockPresence*( proc sendBlockPresence*(
b: BlockExcNetwork, id: PeerId, presence: seq[BlockPresence] b: BlockExcNetwork, id: PeerId, presence: seq[BlockPresence]
): Future[void] = ) {.async: (raw: true, raises: [CancelledError]).} =
## Send presence to remote ## Send presence to remote
## ##
@ -195,20 +207,24 @@ proc sendBlockPresence*(
proc handleAccount( proc handleAccount(
network: BlockExcNetwork, peer: NetworkPeer, account: Account network: BlockExcNetwork, peer: NetworkPeer, account: Account
) {.async.} = ) {.async: (raises: []).} =
## Handle account info ## Handle account info
## ##
if not network.handlers.onAccount.isNil: if not network.handlers.onAccount.isNil:
await network.handlers.onAccount(peer.id, account) await network.handlers.onAccount(peer.id, account)
proc sendAccount*(b: BlockExcNetwork, id: PeerId, account: Account): Future[void] = proc sendAccount*(
b: BlockExcNetwork, id: PeerId, account: Account
) {.async: (raw: true, raises: [CancelledError]).} =
## Send account info to remote ## Send account info to remote
## ##
b.send(id, Message(account: AccountMessage.init(account))) b.send(id, Message(account: AccountMessage.init(account)))
proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[void] = proc sendPayment*(
b: BlockExcNetwork, id: PeerId, payment: SignedState
) {.async: (raw: true, raises: [CancelledError]).} =
## Send payment to remote ## Send payment to remote
## ##
@ -216,7 +232,7 @@ proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[
proc handlePayment( proc handlePayment(
network: BlockExcNetwork, peer: NetworkPeer, payment: SignedState network: BlockExcNetwork, peer: NetworkPeer, payment: SignedState
) {.async.} = ) {.async: (raises: []).} =
## Handle payment ## Handle payment
## ##
@ -225,7 +241,7 @@ proc handlePayment(
proc rpcHandler( proc rpcHandler(
b: BlockExcNetwork, peer: NetworkPeer, msg: Message b: BlockExcNetwork, peer: NetworkPeer, msg: Message
) {.async: (raises: [CatchableError]).} = ) {.async: (raises: []).} =
## handle rpc messages ## handle rpc messages
## ##
if msg.wantList.entries.len > 0: if msg.wantList.entries.len > 0:
@ -250,7 +266,9 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
if peer in b.peers: if peer in b.peers:
return b.peers.getOrDefault(peer, nil) return b.peers.getOrDefault(peer, nil)
var getConn: ConnProvider = proc(): Future[Connection] {.async, gcsafe, closure.} = var getConn: ConnProvider = proc(): Future[Connection] {.
async: (raises: [CancelledError])
.} =
try: try:
trace "Getting new connection stream", peer trace "Getting new connection stream", peer
return await b.switch.dial(peer, Codec) return await b.switch.dial(peer, Codec)
@ -262,9 +280,7 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
if not isNil(b.getConn): if not isNil(b.getConn):
getConn = b.getConn getConn = b.getConn
let rpcHandler = proc( let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async: (raises: []).} =
p: NetworkPeer, msg: Message
) {.async: (raises: [CatchableError]).} =
await b.rpcHandler(p, msg) await b.rpcHandler(p, msg)
# create new pubsub peer # create new pubsub peer
@ -307,7 +323,9 @@ method init*(self: BlockExcNetwork) =
## Perform protocol initialization ## Perform protocol initialization
## ##
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} = proc peerEventHandler(
peerId: PeerId, event: PeerEvent
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined: if event.kind == PeerEventKind.Joined:
self.setupPeer(peerId) self.setupPeer(peerId)
else: else:
@ -316,7 +334,9 @@ method init*(self: BlockExcNetwork) =
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined) self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left) self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
proc handler(conn: Connection, proto: string) {.async.} = proc handler(
conn: Connection, proto: string
): Future[void] {.async: (raises: [CancelledError]).} =
let peerId = conn.peerId let peerId = conn.peerId
let blockexcPeer = self.getOrCreatePeer(peerId) let blockexcPeer = self.getOrCreatePeer(peerId)
await blockexcPeer.readLoop(conn) # attach read loop await blockexcPeer.readLoop(conn) # attach read loop
@ -353,26 +373,32 @@ proc new*(
wantType: WantType = WantType.WantHave, wantType: WantType = WantType.WantHave,
full: bool = false, full: bool = false,
sendDontHave: bool = false, sendDontHave: bool = false,
): Future[void] {.gcsafe.} = ): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendWantList(id, cids, priority, cancel, wantType, full, sendDontHave) self.sendWantList(id, cids, priority, cancel, wantType, full, sendDontHave)
proc sendWantCancellations( proc sendWantCancellations(
id: PeerId, addresses: seq[BlockAddress] id: PeerId, addresses: seq[BlockAddress]
): Future[void] {.gcsafe.} = ): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendWantCancellations(id, addresses) self.sendWantCancellations(id, addresses)
proc sendBlocksDelivery( proc sendBlocksDelivery(
id: PeerId, blocksDelivery: seq[BlockDelivery] id: PeerId, blocksDelivery: seq[BlockDelivery]
): Future[void] {.gcsafe.} = ): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendBlocksDelivery(id, blocksDelivery) self.sendBlocksDelivery(id, blocksDelivery)
proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} = proc sendPresence(
id: PeerId, presence: seq[BlockPresence]
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendBlockPresence(id, presence) self.sendBlockPresence(id, presence)
proc sendAccount(id: PeerId, account: Account): Future[void] {.gcsafe.} = proc sendAccount(
id: PeerId, account: Account
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendAccount(id, account) self.sendAccount(id, account)
proc sendPayment(id: PeerId, payment: SignedState): Future[void] {.gcsafe.} = proc sendPayment(
id: PeerId, payment: SignedState
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendPayment(id, payment) self.sendPayment(id, payment)
self.request = BlockExcRequest( self.request = BlockExcRequest(

View File

@ -7,9 +7,7 @@
## This file may not be copied, modified, or distributed except according to ## This file may not be copied, modified, or distributed except according to
## those terms. ## those terms.
import pkg/upraises {.push raises: [].}
push:
{.upraises: [].}
import pkg/chronos import pkg/chronos
import pkg/libp2p import pkg/libp2p
@ -18,6 +16,7 @@ import ../protobuf/blockexc
import ../protobuf/message import ../protobuf/message
import ../../errors import ../../errors
import ../../logutils import ../../logutils
import ../../utils/trackedfutures
logScope: logScope:
topics = "codex blockexcnetworkpeer" topics = "codex blockexcnetworkpeer"
@ -25,11 +24,10 @@ logScope:
const DefaultYieldInterval = 50.millis const DefaultYieldInterval = 50.millis
type type
ConnProvider* = proc(): Future[Connection] {.gcsafe, closure.} ConnProvider* =
proc(): Future[Connection] {.gcsafe, async: (raises: [CancelledError]).}
RPCHandler* = proc( RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.gcsafe, async: (raises: []).}
peer: NetworkPeer, msg: Message
): Future[void].Raising(CatchableError) {.gcsafe.}
NetworkPeer* = ref object of RootObj NetworkPeer* = ref object of RootObj
id*: PeerId id*: PeerId
@ -37,55 +35,60 @@ type
sendConn: Connection sendConn: Connection
getConn: ConnProvider getConn: ConnProvider
yieldInterval*: Duration = DefaultYieldInterval yieldInterval*: Duration = DefaultYieldInterval
trackedFutures: TrackedFutures
proc connected*(b: NetworkPeer): bool = proc connected*(self: NetworkPeer): bool =
not (isNil(b.sendConn)) and not (b.sendConn.closed or b.sendConn.atEof) not (isNil(self.sendConn)) and not (self.sendConn.closed or self.sendConn.atEof)
proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} = proc readLoop*(self: NetworkPeer, conn: Connection) {.async: (raises: []).} =
if isNil(conn): if isNil(conn):
trace "No connection to read from", peer = b.id trace "No connection to read from", peer = self.id
return return
trace "Attaching read loop", peer = b.id, connId = conn.oid trace "Attaching read loop", peer = self.id, connId = conn.oid
try: try:
var nextYield = Moment.now() + b.yieldInterval var nextYield = Moment.now() + self.yieldInterval
while not conn.atEof or not conn.closed: while not conn.atEof or not conn.closed:
if Moment.now() > nextYield: if Moment.now() > nextYield:
nextYield = Moment.now() + b.yieldInterval nextYield = Moment.now() + self.yieldInterval
trace "Yielding in read loop", trace "Yielding in read loop",
peer = b.id, nextYield = nextYield, interval = b.yieldInterval peer = self.id, nextYield = nextYield, interval = self.yieldInterval
await sleepAsync(10.millis) await sleepAsync(10.millis)
let let
data = await conn.readLp(MaxMessageSize.int) data = await conn.readLp(MaxMessageSize.int)
msg = Message.protobufDecode(data).mapFailure().tryGet() msg = Message.protobufDecode(data).mapFailure().tryGet()
trace "Received message", peer = b.id, connId = conn.oid trace "Received message", peer = self.id, connId = conn.oid
await b.handler(b, msg) await self.handler(self, msg)
except CancelledError: except CancelledError:
trace "Read loop cancelled" trace "Read loop cancelled"
except CatchableError as err: except CatchableError as err:
warn "Exception in blockexc read loop", msg = err.msg warn "Exception in blockexc read loop", msg = err.msg
finally: finally:
trace "Detaching read loop", peer = b.id, connId = conn.oid trace "Detaching read loop", peer = self.id, connId = conn.oid
await conn.close() await conn.close()
proc connect*(b: NetworkPeer): Future[Connection] {.async.} = proc connect*(
if b.connected: self: NetworkPeer
trace "Already connected", peer = b.id, connId = b.sendConn.oid ): Future[Connection] {.async: (raises: [CancelledError]).} =
return b.sendConn if self.connected:
trace "Already connected", peer = self.id, connId = self.sendConn.oid
return self.sendConn
b.sendConn = await b.getConn() self.sendConn = await self.getConn()
asyncSpawn b.readLoop(b.sendConn) self.trackedFutures.track(self.readLoop(self.sendConn))
return b.sendConn return self.sendConn
proc send*(b: NetworkPeer, msg: Message) {.async.} = proc send*(
let conn = await b.connect() self: NetworkPeer, msg: Message
) {.async: (raises: [CancelledError, LPStreamError]).} =
let conn = await self.connect()
if isNil(conn): if isNil(conn):
warn "Unable to get send connection for peer message not sent", peer = b.id warn "Unable to get send connection for peer message not sent", peer = self.id
return return
trace "Sending message", peer = b.id, connId = conn.oid trace "Sending message", peer = self.id, connId = conn.oid
await conn.writeLp(protobufEncode(msg)) await conn.writeLp(protobufEncode(msg))
func new*( func new*(
@ -96,4 +99,9 @@ func new*(
): NetworkPeer = ): NetworkPeer =
doAssert(not isNil(connProvider), "should supply connection provider") doAssert(not isNil(connProvider), "should supply connection provider")
NetworkPeer(id: peer, getConn: connProvider, handler: rpcHandler) NetworkPeer(
id: peer,
getConn: connProvider,
handler: rpcHandler,
trackedFutures: TrackedFutures(),
)

View File

@ -7,16 +7,13 @@
## This file may not be copied, modified, or distributed except according to ## This file may not be copied, modified, or distributed except according to
## those terms. ## those terms.
{.push raises: [].}
import std/sequtils import std/sequtils
import std/tables import std/tables
import std/algorithm import std/algorithm
import std/sequtils import std/sequtils
import pkg/upraises
push:
{.upraises: [].}
import pkg/chronos import pkg/chronos
import pkg/libp2p import pkg/libp2p

View File

@ -97,7 +97,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: WantList) =
pb.write(field, ipb) pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) = proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) =
var ipb = initProtoBuffer(maxSize = MaxBlockSize) var ipb = initProtoBuffer()
ipb.write(1, value.blk.cid.data.buffer) ipb.write(1, value.blk.cid.data.buffer)
ipb.write(2, value.blk.data) ipb.write(2, value.blk.data)
ipb.write(3, value.address) ipb.write(3, value.address)
@ -128,7 +128,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) =
pb.write(field, ipb) pb.write(field, ipb)
proc protobufEncode*(value: Message): seq[byte] = proc protobufEncode*(value: Message): seq[byte] =
var ipb = initProtoBuffer(maxSize = MaxMessageSize) var ipb = initProtoBuffer()
ipb.write(1, value.wantList) ipb.write(1, value.wantList)
for v in value.payload: for v in value.payload:
ipb.write(3, v) ipb.write(3, v)
@ -254,16 +254,14 @@ proc decode*(
proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] = proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
var var
value = Message() value = Message()
pb = initProtoBuffer(msg, maxSize = MaxMessageSize) pb = initProtoBuffer(msg)
ipb: ProtoBuffer ipb: ProtoBuffer
sublist: seq[seq[byte]] sublist: seq[seq[byte]]
if ?pb.getField(1, ipb): if ?pb.getField(1, ipb):
value.wantList = ?WantList.decode(ipb) value.wantList = ?WantList.decode(ipb)
if ?pb.getRepeatedField(3, sublist): if ?pb.getRepeatedField(3, sublist):
for item in sublist: for item in sublist:
value.payload.add( value.payload.add(?BlockDelivery.decode(initProtoBuffer(item)))
?BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize))
)
if ?pb.getRepeatedField(4, sublist): if ?pb.getRepeatedField(4, sublist):
for item in sublist: for item in sublist:
value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item))) value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item)))

View File

@ -1,8 +1,9 @@
{.push raises: [].}
import pkg/stew/byteutils import pkg/stew/byteutils
import pkg/stint import pkg/stint
import pkg/nitro import pkg/nitro
import pkg/questionable import pkg/questionable
import pkg/upraises
import ./blockexc import ./blockexc
export AccountMessage export AccountMessage
@ -11,9 +12,6 @@ export StateChannelUpdate
export stint export stint
export nitro export nitro
push:
{.upraises: [].}
type Account* = object type Account* = object
address*: EthAddress address*: EthAddress

View File

@ -1,8 +1,9 @@
{.push raises: [].}
import libp2p import libp2p
import pkg/stint import pkg/stint
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
import pkg/upraises
import ./blockexc import ./blockexc
import ../../blocktype import ../../blocktype
@ -11,9 +12,6 @@ export questionable
export stint export stint
export BlockPresenceType export BlockPresenceType
upraises.push:
{.upraises: [].}
type type
PresenceMessage* = blockexc.BlockPresence PresenceMessage* = blockexc.BlockPresence
Presence* = object Presence* = object

View File

@ -28,8 +28,11 @@ const DefaultChunkSize* = DefaultBlockSize
type type
# default reader type # default reader type
ChunkerError* = object of CatchableError
ChunkBuffer* = ptr UncheckedArray[byte] ChunkBuffer* = ptr UncheckedArray[byte]
Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.gcsafe, raises: [Defect].} Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.
gcsafe, async: (raises: [ChunkerError, CancelledError])
.}
# Reader that splits input data into fixed-size chunks # Reader that splits input data into fixed-size chunks
Chunker* = ref object Chunker* = ref object
@ -74,7 +77,7 @@ proc new*(
proc reader( proc reader(
data: ChunkBuffer, len: int data: ChunkBuffer, len: int
): Future[int] {.gcsafe, async, raises: [Defect].} = ): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
var res = 0 var res = 0
try: try:
while res < len: while res < len:
@ -85,7 +88,7 @@ proc new*(
raise error raise error
except LPStreamError as error: except LPStreamError as error:
error "LPStream error", err = error.msg error "LPStream error", err = error.msg
raise error raise newException(ChunkerError, "LPStream error", error)
except CatchableError as exc: except CatchableError as exc:
error "CatchableError exception", exc = exc.msg error "CatchableError exception", exc = exc.msg
raise newException(Defect, exc.msg) raise newException(Defect, exc.msg)
@ -102,7 +105,7 @@ proc new*(
proc reader( proc reader(
data: ChunkBuffer, len: int data: ChunkBuffer, len: int
): Future[int] {.gcsafe, async, raises: [Defect].} = ): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
var total = 0 var total = 0
try: try:
while total < len: while total < len:

View File

@ -134,6 +134,10 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
if config.simulateProofFailures > 0: if config.simulateProofFailures > 0:
warn "Proof failure simulation is not enabled for this build! Configuration ignored" warn "Proof failure simulation is not enabled for this build! Configuration ignored"
if error =? (await market.loadConfig()).errorOption:
fatal "Cannot load market configuration", error = error.msg
quit QuitFailure
let purchasing = Purchasing.new(market, clock) let purchasing = Purchasing.new(market, clock)
let sales = Sales.new(market, clock, repo, proofFailures) let sales = Sales.new(market, clock, repo, proofFailures)
client = some ClientInteractions.new(clock, purchasing) client = some ClientInteractions.new(clock, purchasing)
@ -173,14 +177,20 @@ proc start*(s: CodexServer) {.async.} =
proc stop*(s: CodexServer) {.async.} = proc stop*(s: CodexServer) {.async.} =
notice "Stopping codex node" notice "Stopping codex node"
await allFuturesThrowing( let res = await noCancel allFinishedFailed(
s.restServer.stop(), @[
s.codexNode.switch.stop(), s.restServer.stop(),
s.codexNode.stop(), s.codexNode.switch.stop(),
s.repoStore.stop(), s.codexNode.stop(),
s.maintenance.stop(), s.repoStore.stop(),
s.maintenance.stop(),
]
) )
if res.failure.len > 0:
error "Failed to stop codex node", failures = res.failure.len
raiseAssert "Failed to stop codex node"
proc new*( proc new*(
T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey
): CodexServer = ): CodexServer =

View File

@ -5,6 +5,7 @@ import pkg/chronos
import pkg/stint import pkg/stint
import ../clock import ../clock
import ../conf import ../conf
import ../utils/trackedfutures
export clock export clock
@ -18,9 +19,12 @@ type OnChainClock* = ref object of Clock
blockNumber: UInt256 blockNumber: UInt256
started: bool started: bool
newBlock: AsyncEvent newBlock: AsyncEvent
trackedFutures: TrackedFutures
proc new*(_: type OnChainClock, provider: Provider): OnChainClock = proc new*(_: type OnChainClock, provider: Provider): OnChainClock =
OnChainClock(provider: provider, newBlock: newAsyncEvent()) OnChainClock(
provider: provider, newBlock: newAsyncEvent(), trackedFutures: TrackedFutures()
)
proc update(clock: OnChainClock, blck: Block) = proc update(clock: OnChainClock, blck: Block) =
if number =? blck.number and number > clock.blockNumber: if number =? blck.number and number > clock.blockNumber:
@ -32,15 +36,12 @@ proc update(clock: OnChainClock, blck: Block) =
blockTime = blck.timestamp, blockNumber = number, offset = clock.offset blockTime = blck.timestamp, blockNumber = number, offset = clock.offset
clock.newBlock.fire() clock.newBlock.fire()
proc update(clock: OnChainClock) {.async.} = proc update(clock: OnChainClock) {.async: (raises: []).} =
try: try:
if latest =? (await clock.provider.getBlock(BlockTag.latest)): if latest =? (await clock.provider.getBlock(BlockTag.latest)):
clock.update(latest) clock.update(latest)
except CancelledError as error:
raise error
except CatchableError as error: except CatchableError as error:
debug "error updating clock: ", error = error.msg debug "error updating clock: ", error = error.msg
discard
method start*(clock: OnChainClock) {.async.} = method start*(clock: OnChainClock) {.async.} =
if clock.started: if clock.started:
@ -52,7 +53,7 @@ method start*(clock: OnChainClock) {.async.} =
return return
# ignore block parameter; hardhat may call this with pending blocks # ignore block parameter; hardhat may call this with pending blocks
asyncSpawn clock.update() clock.trackedFutures.track(clock.update())
await clock.update() await clock.update()
@ -64,6 +65,7 @@ method stop*(clock: OnChainClock) {.async.} =
return return
await clock.subscription.unsubscribe() await clock.subscription.unsubscribe()
await clock.trackedFutures.cancelTracked()
clock.started = false clock.started = false
method now*(clock: OnChainClock): SecondsSince1970 = method now*(clock: OnChainClock): SecondsSince1970 =

View File

@ -1,3 +1,4 @@
import std/strformat
import std/strutils import std/strutils
import pkg/ethers import pkg/ethers
import pkg/upraises import pkg/upraises
@ -49,130 +50,173 @@ func new*(
proc raiseMarketError(message: string) {.raises: [MarketError].} = proc raiseMarketError(message: string) {.raises: [MarketError].} =
raise newException(MarketError, message) raise newException(MarketError, message)
template convertEthersError(body) = func prefixWith(suffix, prefix: string, separator = ": "): string =
if prefix.len > 0:
return &"{prefix}{separator}{suffix}"
else:
return suffix
template convertEthersError(msg: string = "", body) =
try: try:
body body
except EthersError as error: except EthersError as error:
raiseMarketError(error.msgDetail) raiseMarketError(error.msgDetail.prefixWith(msg))
proc config(market: OnChainMarket): Future[MarketplaceConfig] {.async.} = proc config(
market: OnChainMarket
): Future[MarketplaceConfig] {.async: (raises: [CancelledError, MarketError]).} =
without resolvedConfig =? market.configuration: without resolvedConfig =? market.configuration:
let fetchedConfig = await market.contract.configuration() if err =? (await market.loadConfig()).errorOption:
market.configuration = some fetchedConfig raiseMarketError(err.msg)
return fetchedConfig
without config =? market.configuration:
raiseMarketError("Failed to access to config from the Marketplace contract")
return config
return resolvedConfig return resolvedConfig
proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} = proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} =
debug "Approving tokens", amount debug "Approving tokens", amount
convertEthersError: convertEthersError("Failed to approve funds"):
let tokenAddress = await market.contract.token() let tokenAddress = await market.contract.token()
let token = Erc20Token.new(tokenAddress, market.signer) let token = Erc20Token.new(tokenAddress, market.signer)
discard await token.increaseAllowance(market.contract.address(), amount).confirm(1) discard await token.increaseAllowance(market.contract.address(), amount).confirm(1)
method getZkeyHash*(market: OnChainMarket): Future[?string] {.async.} = method loadConfig*(
market: OnChainMarket
): Future[?!void] {.async: (raises: [CancelledError]).} =
try:
without config =? market.configuration:
let fetchedConfig = await market.contract.configuration()
market.configuration = some fetchedConfig
return success()
except EthersError as err:
return failure newException(
MarketError,
"Failed to fetch the config from the Marketplace contract: " & err.msg,
)
method getZkeyHash*(
market: OnChainMarket
): Future[?string] {.async: (raises: [CancelledError, MarketError]).} =
let config = await market.config() let config = await market.config()
return some config.proofs.zkeyHash return some config.proofs.zkeyHash
method getSigner*(market: OnChainMarket): Future[Address] {.async.} = method getSigner*(market: OnChainMarket): Future[Address] {.async.} =
convertEthersError: convertEthersError("Failed to get signer address"):
return await market.signer.getAddress() return await market.signer.getAddress()
method periodicity*(market: OnChainMarket): Future[Periodicity] {.async.} = method periodicity*(
convertEthersError: market: OnChainMarket
): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config() let config = await market.config()
let period = config.proofs.period let period = config.proofs.period
return Periodicity(seconds: period) return Periodicity(seconds: period)
method proofTimeout*(market: OnChainMarket): Future[uint64] {.async.} = method proofTimeout*(
convertEthersError: market: OnChainMarket
): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config() let config = await market.config()
return config.proofs.timeout return config.proofs.timeout
method repairRewardPercentage*(market: OnChainMarket): Future[uint8] {.async.} = method repairRewardPercentage*(
convertEthersError: market: OnChainMarket
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config() let config = await market.config()
return config.collateral.repairRewardPercentage return config.collateral.repairRewardPercentage
method requestDurationLimit*(market: OnChainMarket): Future[uint64] {.async.} = method requestDurationLimit*(market: OnChainMarket): Future[uint64] {.async.} =
convertEthersError: convertEthersError("Failed to get Marketplace config"):
let config = await market.config() let config = await market.config()
return config.requestDurationLimit return config.requestDurationLimit
method proofDowntime*(market: OnChainMarket): Future[uint8] {.async.} = method proofDowntime*(
convertEthersError: market: OnChainMarket
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config() let config = await market.config()
return config.proofs.downtime return config.proofs.downtime
method getPointer*(market: OnChainMarket, slotId: SlotId): Future[uint8] {.async.} = method getPointer*(market: OnChainMarket, slotId: SlotId): Future[uint8] {.async.} =
convertEthersError: convertEthersError("Failed to get slot pointer"):
let overrides = CallOverrides(blockTag: some BlockTag.pending) let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.getPointer(slotId, overrides) return await market.contract.getPointer(slotId, overrides)
method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} = method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} =
convertEthersError: convertEthersError("Failed to get my requests"):
return await market.contract.myRequests return await market.contract.myRequests
method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} = method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} =
convertEthersError: convertEthersError("Failed to get my slots"):
let slots = await market.contract.mySlots() let slots = await market.contract.mySlots()
debug "Fetched my slots", numSlots = len(slots) debug "Fetched my slots", numSlots = len(slots)
return slots return slots
method requestStorage(market: OnChainMarket, request: StorageRequest) {.async.} = method requestStorage(market: OnChainMarket, request: StorageRequest) {.async.} =
convertEthersError: convertEthersError("Failed to request storage"):
debug "Requesting storage" debug "Requesting storage"
await market.approveFunds(request.totalPrice()) await market.approveFunds(request.totalPrice())
discard await market.contract.requestStorage(request).confirm(1) discard await market.contract.requestStorage(request).confirm(1)
method getRequest*( method getRequest*(
market: OnChainMarket, id: RequestId market: OnChainMarket, id: RequestId
): Future[?StorageRequest] {.async.} = ): Future[?StorageRequest] {.async: (raises: [CancelledError]).} =
let key = $id try:
let key = $id
if market.requestCache.contains(key): if key in market.requestCache:
return some market.requestCache[key] return some market.requestCache[key]
convertEthersError: let request = await market.contract.getRequest(id)
try: market.requestCache[key] = request
let request = await market.contract.getRequest(id) return some request
market.requestCache[key] = request except Marketplace_UnknownRequest, KeyError:
return some request warn "Cannot retrieve the request", error = getCurrentExceptionMsg()
except Marketplace_UnknownRequest: return none StorageRequest
return none StorageRequest except EthersError as e:
error "Cannot retrieve the request", error = e.msg
return none StorageRequest
method requestState*( method requestState*(
market: OnChainMarket, requestId: RequestId market: OnChainMarket, requestId: RequestId
): Future[?RequestState] {.async.} = ): Future[?RequestState] {.async.} =
convertEthersError: convertEthersError("Failed to get request state"):
try: try:
let overrides = CallOverrides(blockTag: some BlockTag.pending) let overrides = CallOverrides(blockTag: some BlockTag.pending)
return some await market.contract.requestState(requestId, overrides) return some await market.contract.requestState(requestId, overrides)
except Marketplace_UnknownRequest: except Marketplace_UnknownRequest:
return none RequestState return none RequestState
method slotState*(market: OnChainMarket, slotId: SlotId): Future[SlotState] {.async.} = method slotState*(
convertEthersError: market: OnChainMarket, slotId: SlotId
): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to fetch the slot state from the Marketplace contract"):
let overrides = CallOverrides(blockTag: some BlockTag.pending) let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.slotState(slotId, overrides) return await market.contract.slotState(slotId, overrides)
method getRequestEnd*( method getRequestEnd*(
market: OnChainMarket, id: RequestId market: OnChainMarket, id: RequestId
): Future[SecondsSince1970] {.async.} = ): Future[SecondsSince1970] {.async.} =
convertEthersError: convertEthersError("Failed to get request end"):
return await market.contract.requestEnd(id) return await market.contract.requestEnd(id)
method requestExpiresAt*( method requestExpiresAt*(
market: OnChainMarket, id: RequestId market: OnChainMarket, id: RequestId
): Future[SecondsSince1970] {.async.} = ): Future[SecondsSince1970] {.async.} =
convertEthersError: convertEthersError("Failed to get request expiry"):
return await market.contract.requestExpiry(id) return await market.contract.requestExpiry(id)
method getHost( method getHost(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64 market: OnChainMarket, requestId: RequestId, slotIndex: uint64
): Future[?Address] {.async.} = ): Future[?Address] {.async.} =
convertEthersError: convertEthersError("Failed to get slot's host"):
let slotId = slotId(requestId, slotIndex) let slotId = slotId(requestId, slotIndex)
let address = await market.contract.getHost(slotId) let address = await market.contract.getHost(slotId)
if address != Address.default: if address != Address.default:
@ -183,11 +227,11 @@ method getHost(
method currentCollateral*( method currentCollateral*(
market: OnChainMarket, slotId: SlotId market: OnChainMarket, slotId: SlotId
): Future[UInt256] {.async.} = ): Future[UInt256] {.async.} =
convertEthersError: convertEthersError("Failed to get slot's current collateral"):
return await market.contract.currentCollateral(slotId) return await market.contract.currentCollateral(slotId)
method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} = method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} =
convertEthersError: convertEthersError("Failed to get active slot"):
try: try:
return some await market.contract.getActiveSlot(slotId) return some await market.contract.getActiveSlot(slotId)
except Marketplace_SlotIsFree: except Marketplace_SlotIsFree:
@ -200,18 +244,24 @@ method fillSlot(
proof: Groth16Proof, proof: Groth16Proof,
collateral: UInt256, collateral: UInt256,
) {.async.} = ) {.async.} =
convertEthersError: convertEthersError("Failed to fill slot"):
logScope: logScope:
requestId requestId
slotIndex slotIndex
await market.approveFunds(collateral) try:
trace "calling fillSlot on contract" await market.approveFunds(collateral)
discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(1) trace "calling fillSlot on contract"
trace "fillSlot transaction completed" discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(1)
trace "fillSlot transaction completed"
except Marketplace_SlotNotFree as parent:
raise newException(
SlotStateMismatchError, "Failed to fill slot because the slot is not free",
parent,
)
method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} = method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} =
convertEthersError: convertEthersError("Failed to free slot"):
var freeSlot: Future[Confirmable] var freeSlot: Future[Confirmable]
if rewardRecipient =? market.rewardRecipient: if rewardRecipient =? market.rewardRecipient:
# If --reward-recipient specified, use it as the reward recipient, and use # If --reward-recipient specified, use it as the reward recipient, and use
@ -230,11 +280,11 @@ method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} =
discard await freeSlot.confirm(1) discard await freeSlot.confirm(1)
method withdrawFunds(market: OnChainMarket, requestId: RequestId) {.async.} = method withdrawFunds(market: OnChainMarket, requestId: RequestId) {.async.} =
convertEthersError: convertEthersError("Failed to withdraw funds"):
discard await market.contract.withdrawFunds(requestId).confirm(1) discard await market.contract.withdrawFunds(requestId).confirm(1)
method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} = method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
convertEthersError: convertEthersError("Failed to get proof requirement"):
try: try:
let overrides = CallOverrides(blockTag: some BlockTag.pending) let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.isProofRequired(id, overrides) return await market.contract.isProofRequired(id, overrides)
@ -242,7 +292,7 @@ method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async
return false return false
method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} = method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
convertEthersError: convertEthersError("Failed to get future proof requirement"):
try: try:
let overrides = CallOverrides(blockTag: some BlockTag.pending) let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.willProofBeRequired(id, overrides) return await market.contract.willProofBeRequired(id, overrides)
@ -252,18 +302,18 @@ method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.a
method getChallenge*( method getChallenge*(
market: OnChainMarket, id: SlotId market: OnChainMarket, id: SlotId
): Future[ProofChallenge] {.async.} = ): Future[ProofChallenge] {.async.} =
convertEthersError: convertEthersError("Failed to get proof challenge"):
let overrides = CallOverrides(blockTag: some BlockTag.pending) let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.getChallenge(id, overrides) return await market.contract.getChallenge(id, overrides)
method submitProof*(market: OnChainMarket, id: SlotId, proof: Groth16Proof) {.async.} = method submitProof*(market: OnChainMarket, id: SlotId, proof: Groth16Proof) {.async.} =
convertEthersError: convertEthersError("Failed to submit proof"):
discard await market.contract.submitProof(id, proof).confirm(1) discard await market.contract.submitProof(id, proof).confirm(1)
method markProofAsMissing*( method markProofAsMissing*(
market: OnChainMarket, id: SlotId, period: Period market: OnChainMarket, id: SlotId, period: Period
) {.async.} = ) {.async.} =
convertEthersError: convertEthersError("Failed to mark proof as missing"):
discard await market.contract.markProofAsMissing(id, period).confirm(1) discard await market.contract.markProofAsMissing(id, period).confirm(1)
method canProofBeMarkedAsMissing*( method canProofBeMarkedAsMissing*(
@ -282,20 +332,26 @@ method canProofBeMarkedAsMissing*(
method reserveSlot*( method reserveSlot*(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64 market: OnChainMarket, requestId: RequestId, slotIndex: uint64
) {.async.} = ) {.async.} =
convertEthersError: convertEthersError("Failed to reserve slot"):
discard await market.contract try:
.reserveSlot( discard await market.contract
requestId, .reserveSlot(
slotIndex, requestId,
# reserveSlot runs out of gas for unknown reason, but 100k gas covers it slotIndex,
TransactionOverrides(gasLimit: some 100000.u256), # reserveSlot runs out of gas for unknown reason, but 100k gas covers it
) TransactionOverrides(gasLimit: some 100000.u256),
.confirm(1) )
.confirm(1)
except SlotReservations_ReservationNotAllowed:
raise newException(
SlotReservationNotAllowedError,
"Failed to reserve slot because reservation is not allowed",
)
method canReserveSlot*( method canReserveSlot*(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64 market: OnChainMarket, requestId: RequestId, slotIndex: uint64
): Future[bool] {.async.} = ): Future[bool] {.async.} =
convertEthersError: convertEthersError("Unable to determine if slot can be reserved"):
return await market.contract.canReserveSlot(requestId, slotIndex) return await market.contract.canReserveSlot(requestId, slotIndex)
method subscribeRequests*( method subscribeRequests*(
@ -308,7 +364,7 @@ method subscribeRequests*(
callback(event.requestId, event.ask, event.expiry) callback(event.requestId, event.ask, event.expiry)
convertEthersError: convertEthersError("Failed to subscribe to StorageRequested events"):
let subscription = await market.contract.subscribe(StorageRequested, onEvent) let subscription = await market.contract.subscribe(StorageRequested, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription) return OnChainMarketSubscription(eventSubscription: subscription)
@ -322,7 +378,7 @@ method subscribeSlotFilled*(
callback(event.requestId, event.slotIndex) callback(event.requestId, event.slotIndex)
convertEthersError: convertEthersError("Failed to subscribe to SlotFilled events"):
let subscription = await market.contract.subscribe(SlotFilled, onEvent) let subscription = await market.contract.subscribe(SlotFilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription) return OnChainMarketSubscription(eventSubscription: subscription)
@ -336,7 +392,7 @@ method subscribeSlotFilled*(
if eventRequestId == requestId and eventSlotIndex == slotIndex: if eventRequestId == requestId and eventSlotIndex == slotIndex:
callback(requestId, slotIndex) callback(requestId, slotIndex)
convertEthersError: convertEthersError("Failed to subscribe to SlotFilled events"):
return await market.subscribeSlotFilled(onSlotFilled) return await market.subscribeSlotFilled(onSlotFilled)
method subscribeSlotFreed*( method subscribeSlotFreed*(
@ -349,7 +405,7 @@ method subscribeSlotFreed*(
callback(event.requestId, event.slotIndex) callback(event.requestId, event.slotIndex)
convertEthersError: convertEthersError("Failed to subscribe to SlotFreed events"):
let subscription = await market.contract.subscribe(SlotFreed, onEvent) let subscription = await market.contract.subscribe(SlotFreed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription) return OnChainMarketSubscription(eventSubscription: subscription)
@ -364,7 +420,7 @@ method subscribeSlotReservationsFull*(
callback(event.requestId, event.slotIndex) callback(event.requestId, event.slotIndex)
convertEthersError: convertEthersError("Failed to subscribe to SlotReservationsFull events"):
let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent) let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription) return OnChainMarketSubscription(eventSubscription: subscription)
@ -378,7 +434,7 @@ method subscribeFulfillment(
callback(event.requestId) callback(event.requestId)
convertEthersError: convertEthersError("Failed to subscribe to RequestFulfilled events"):
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent) let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription) return OnChainMarketSubscription(eventSubscription: subscription)
@ -393,7 +449,7 @@ method subscribeFulfillment(
if event.requestId == requestId: if event.requestId == requestId:
callback(event.requestId) callback(event.requestId)
convertEthersError: convertEthersError("Failed to subscribe to RequestFulfilled events"):
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent) let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription) return OnChainMarketSubscription(eventSubscription: subscription)
@ -407,7 +463,7 @@ method subscribeRequestCancelled*(
callback(event.requestId) callback(event.requestId)
convertEthersError: convertEthersError("Failed to subscribe to RequestCancelled events"):
let subscription = await market.contract.subscribe(RequestCancelled, onEvent) let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription) return OnChainMarketSubscription(eventSubscription: subscription)
@ -422,7 +478,7 @@ method subscribeRequestCancelled*(
if event.requestId == requestId: if event.requestId == requestId:
callback(event.requestId) callback(event.requestId)
convertEthersError: convertEthersError("Failed to subscribe to RequestCancelled events"):
let subscription = await market.contract.subscribe(RequestCancelled, onEvent) let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription) return OnChainMarketSubscription(eventSubscription: subscription)
@ -436,7 +492,7 @@ method subscribeRequestFailed*(
callback(event.requestId) callback(event.requestId)
convertEthersError: convertEthersError("Failed to subscribe to RequestFailed events"):
let subscription = await market.contract.subscribe(RequestFailed, onEvent) let subscription = await market.contract.subscribe(RequestFailed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription) return OnChainMarketSubscription(eventSubscription: subscription)
@ -451,7 +507,7 @@ method subscribeRequestFailed*(
if event.requestId == requestId: if event.requestId == requestId:
callback(event.requestId) callback(event.requestId)
convertEthersError: convertEthersError("Failed to subscribe to RequestFailed events"):
let subscription = await market.contract.subscribe(RequestFailed, onEvent) let subscription = await market.contract.subscribe(RequestFailed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription) return OnChainMarketSubscription(eventSubscription: subscription)
@ -465,7 +521,7 @@ method subscribeProofSubmission*(
callback(event.id) callback(event.id)
convertEthersError: convertEthersError("Failed to subscribe to ProofSubmitted events"):
let subscription = await market.contract.subscribe(ProofSubmitted, onEvent) let subscription = await market.contract.subscribe(ProofSubmitted, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription) return OnChainMarketSubscription(eventSubscription: subscription)
@ -475,13 +531,13 @@ method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} =
method queryPastSlotFilledEvents*( method queryPastSlotFilledEvents*(
market: OnChainMarket, fromBlock: BlockTag market: OnChainMarket, fromBlock: BlockTag
): Future[seq[SlotFilled]] {.async.} = ): Future[seq[SlotFilled]] {.async.} =
convertEthersError: convertEthersError("Failed to get past SlotFilled events from block"):
return await market.contract.queryFilter(SlotFilled, fromBlock, BlockTag.latest) return await market.contract.queryFilter(SlotFilled, fromBlock, BlockTag.latest)
method queryPastSlotFilledEvents*( method queryPastSlotFilledEvents*(
market: OnChainMarket, blocksAgo: int market: OnChainMarket, blocksAgo: int
): Future[seq[SlotFilled]] {.async.} = ): Future[seq[SlotFilled]] {.async.} =
convertEthersError: convertEthersError("Failed to get past SlotFilled events"):
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo) let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
return await market.queryPastSlotFilledEvents(fromBlock) return await market.queryPastSlotFilledEvents(fromBlock)
@ -489,21 +545,58 @@ method queryPastSlotFilledEvents*(
method queryPastSlotFilledEvents*( method queryPastSlotFilledEvents*(
market: OnChainMarket, fromTime: SecondsSince1970 market: OnChainMarket, fromTime: SecondsSince1970
): Future[seq[SlotFilled]] {.async.} = ): Future[seq[SlotFilled]] {.async.} =
convertEthersError: convertEthersError("Failed to get past SlotFilled events from time"):
let fromBlock = await market.contract.provider.blockNumberForEpoch(fromTime) let fromBlock = await market.contract.provider.blockNumberForEpoch(fromTime)
return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock)) return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock))
method queryPastStorageRequestedEvents*( method queryPastStorageRequestedEvents*(
market: OnChainMarket, fromBlock: BlockTag market: OnChainMarket, fromBlock: BlockTag
): Future[seq[StorageRequested]] {.async.} = ): Future[seq[StorageRequested]] {.async.} =
convertEthersError: convertEthersError("Failed to get past StorageRequested events from block"):
return return
await market.contract.queryFilter(StorageRequested, fromBlock, BlockTag.latest) await market.contract.queryFilter(StorageRequested, fromBlock, BlockTag.latest)
method queryPastStorageRequestedEvents*( method queryPastStorageRequestedEvents*(
market: OnChainMarket, blocksAgo: int market: OnChainMarket, blocksAgo: int
): Future[seq[StorageRequested]] {.async.} = ): Future[seq[StorageRequested]] {.async.} =
convertEthersError: convertEthersError("Failed to get past StorageRequested events"):
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo) let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
return await market.queryPastStorageRequestedEvents(fromBlock) return await market.queryPastStorageRequestedEvents(fromBlock)
method slotCollateral*(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
): Future[?!UInt256] {.async: (raises: [CancelledError]).} =
let slotid = slotId(requestId, slotIndex)
try:
let slotState = await market.slotState(slotid)
without request =? await market.getRequest(requestId):
return failure newException(
MarketError, "Failure calculating the slotCollateral, cannot get the request"
)
return market.slotCollateral(request.ask.collateralPerSlot, slotState)
except MarketError as error:
error "Error when trying to calculate the slotCollateral", error = error.msg
return failure error
method slotCollateral*(
market: OnChainMarket, collateralPerSlot: UInt256, slotState: SlotState
): ?!UInt256 {.raises: [].} =
if slotState == SlotState.Repair:
without repairRewardPercentage =?
market.configuration .? collateral .? repairRewardPercentage:
return failure newException(
MarketError,
"Failure calculating the slotCollateral, cannot get the reward percentage",
)
return success (
collateralPerSlot - (collateralPerSlot * repairRewardPercentage.u256).div(
100.u256
)
)
return success(collateralPerSlot)

View File

@ -53,6 +53,7 @@ type
Proofs_ProofAlreadyMarkedMissing* = object of SolidityError Proofs_ProofAlreadyMarkedMissing* = object of SolidityError
Proofs_InvalidProbability* = object of SolidityError Proofs_InvalidProbability* = object of SolidityError
Periods_InvalidSecondsPerPeriod* = object of SolidityError Periods_InvalidSecondsPerPeriod* = object of SolidityError
SlotReservations_ReservationNotAllowed* = object of SolidityError
proc configuration*(marketplace: Marketplace): MarketplaceConfig {.contract, view.} proc configuration*(marketplace: Marketplace): MarketplaceConfig {.contract, view.}
proc token*(marketplace: Marketplace): Address {.contract, view.} proc token*(marketplace: Marketplace): Address {.contract, view.}

View File

@ -7,6 +7,8 @@
## This file may not be copied, modified, or distributed except according to ## This file may not be copied, modified, or distributed except according to
## those terms. ## those terms.
{.push raises: [].}
import std/algorithm import std/algorithm
import std/sequtils import std/sequtils
@ -54,70 +56,122 @@ proc toNodeId*(host: ca.Address): NodeId =
readUintBE[256](keccak256.digest(host.toArray).data) readUintBE[256](keccak256.digest(host.toArray).data)
proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} = proc findPeer*(
d: Discovery, peerId: PeerId
): Future[?PeerRecord] {.async: (raises: [CancelledError]).} =
trace "protocol.resolve..." trace "protocol.resolve..."
## Find peer using the given Discovery object ## Find peer using the given Discovery object
## ##
let node = await d.protocol.resolve(toNodeId(peerId))
return try:
if node.isSome(): let node = await d.protocol.resolve(toNodeId(peerId))
node.get().record.data.some
else:
PeerRecord.none
method find*(d: Discovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} = return
if node.isSome():
node.get().record.data.some
else:
PeerRecord.none
except CancelledError as exc:
warn "Error finding peer", peerId = peerId, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error finding peer", peerId = peerId, exc = exc.msg
return PeerRecord.none
method find*(
d: Discovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} =
## Find block providers ## Find block providers
## ##
without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure, error:
warn "Error finding providers for block", cid, error = error.msg
return providers.filterIt(not (it.data.peerId == d.peerId)) try:
without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure,
error:
warn "Error finding providers for block", cid, error = error.msg
method provide*(d: Discovery, cid: Cid) {.async, base.} = return providers.filterIt(not (it.data.peerId == d.peerId))
except CancelledError as exc:
warn "Error finding providers for block", cid, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error finding providers for block", cid, exc = exc.msg
method provide*(d: Discovery, cid: Cid) {.async: (raises: [CancelledError]), base.} =
## Provide a block Cid ## Provide a block Cid
## ##
let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get) try:
let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get)
if nodes.len <= 0: if nodes.len <= 0:
warn "Couldn't provide to any nodes!" warn "Couldn't provide to any nodes!"
except CancelledError as exc:
warn "Error providing block", cid, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error providing block", cid, exc = exc.msg
method find*( method find*(
d: Discovery, host: ca.Address d: Discovery, host: ca.Address
): Future[seq[SignedPeerRecord]] {.async, base.} = ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} =
## Find host providers ## Find host providers
## ##
trace "Finding providers for host", host = $host try:
without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure, trace "Finding providers for host", host = $host
error: without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure,
trace "Error finding providers for host", host = $host, exc = error.msg error:
return trace "Error finding providers for host", host = $host, exc = error.msg
return
if providers.len <= 0: if providers.len <= 0:
trace "No providers found", host = $host trace "No providers found", host = $host
return return
providers.sort do(a, b: SignedPeerRecord) -> int: providers.sort do(a, b: SignedPeerRecord) -> int:
system.cmp[uint64](a.data.seqNo, b.data.seqNo) system.cmp[uint64](a.data.seqNo, b.data.seqNo)
return providers return providers
except CancelledError as exc:
warn "Error finding providers for host", host = $host, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error finding providers for host", host = $host, exc = exc.msg
method provide*(d: Discovery, host: ca.Address) {.async, base.} = method provide*(
d: Discovery, host: ca.Address
) {.async: (raises: [CancelledError]), base.} =
## Provide hosts ## Provide hosts
## ##
trace "Providing host", host = $host try:
let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get) trace "Providing host", host = $host
if nodes.len > 0: let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get)
trace "Provided to nodes", nodes = nodes.len if nodes.len > 0:
trace "Provided to nodes", nodes = nodes.len
except CancelledError as exc:
warn "Error providing host", host = $host, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error providing host", host = $host, exc = exc.msg
method removeProvider*(d: Discovery, peerId: PeerId): Future[void] {.base, gcsafe.} = method removeProvider*(
d: Discovery, peerId: PeerId
): Future[void] {.base, gcsafe, async: (raises: [CancelledError]).} =
## Remove provider from providers table ## Remove provider from providers table
## ##
trace "Removing provider", peerId trace "Removing provider", peerId
d.protocol.removeProvidersLocal(peerId) try:
await d.protocol.removeProvidersLocal(peerId)
except CancelledError as exc:
warn "Error removing provider", peerId = peerId, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error removing provider", peerId = peerId, exc = exc.msg
except Exception as exc: # Something in discv5 is raising Exception
warn "Error removing provider", peerId = peerId, exc = exc.msg
raiseAssert("Unexpected Exception in removeProvider")
proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) = proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
## Update providers record ## Update providers record
@ -125,7 +179,7 @@ proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
d.announceAddrs = @addrs d.announceAddrs = @addrs
trace "Updating announce record", addrs = d.announceAddrs info "Updating announce record", addrs = d.announceAddrs
d.providerRecord = SignedPeerRecord d.providerRecord = SignedPeerRecord
.init(d.key, PeerRecord.init(d.peerId, d.announceAddrs)) .init(d.key, PeerRecord.init(d.peerId, d.announceAddrs))
.expect("Should construct signed record").some .expect("Should construct signed record").some
@ -137,7 +191,7 @@ proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
## Update providers record ## Update providers record
## ##
trace "Updating Dht record", addrs = addrs info "Updating Dht record", addrs = addrs
d.dhtRecord = SignedPeerRecord d.dhtRecord = SignedPeerRecord
.init(d.key, PeerRecord.init(d.peerId, @addrs)) .init(d.key, PeerRecord.init(d.peerId, @addrs))
.expect("Should construct signed record").some .expect("Should construct signed record").some
@ -145,12 +199,18 @@ proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
if not d.protocol.isNil: if not d.protocol.isNil:
d.protocol.updateRecord(d.dhtRecord).expect("Should update SPR") d.protocol.updateRecord(d.dhtRecord).expect("Should update SPR")
proc start*(d: Discovery) {.async.} = proc start*(d: Discovery) {.async: (raises: []).} =
d.protocol.open() try:
await d.protocol.start() d.protocol.open()
await d.protocol.start()
except CatchableError as exc:
error "Error starting discovery", exc = exc.msg
proc stop*(d: Discovery) {.async.} = proc stop*(d: Discovery) {.async: (raises: []).} =
await d.protocol.closeWait() try:
await noCancel d.protocol.closeWait()
except CatchableError as exc:
error "Error stopping discovery", exc = exc.msg
proc new*( proc new*(
T: type Discovery, T: type Discovery,

View File

@ -310,10 +310,10 @@ proc leopardEncodeTask(tp: Taskpool, task: ptr EncodeTask) {.gcsafe.} =
else: else:
task[].success.store(true) task[].success.store(true)
proc encodeAsync*( proc asyncEncode*(
self: Erasure, self: Erasure,
blockSize, blocksLen, parityLen: int, blockSize, blocksLen, parityLen: int,
data: ref seq[seq[byte]], blocks: ref seq[seq[byte]],
parity: ptr UncheckedArray[ptr UncheckedArray[byte]], parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
): Future[?!void] {.async: (raises: [CancelledError]).} = ): Future[?!void] {.async: (raises: [CancelledError]).} =
without threadPtr =? ThreadSignalPtr.new(): without threadPtr =? ThreadSignalPtr.new():
@ -322,21 +322,18 @@ proc encodeAsync*(
defer: defer:
threadPtr.close().expect("closing once works") threadPtr.close().expect("closing once works")
var blockData = createDoubleArray(blocksLen, blockSize) var data = makeUncheckedArray(blocks)
for i in 0 ..< data[].len:
copyMem(blockData[i], addr data[i][0], blockSize)
defer: defer:
freeDoubleArray(blockData, blocksLen) dealloc(data)
## Create an ecode task with block data ## Create an ecode task with block data
var task = EncodeTask( var task = EncodeTask(
erasure: addr self, erasure: addr self,
blockSize: blockSize, blockSize: blockSize,
blocksLen: blocksLen, blocksLen: blocksLen,
parityLen: parityLen, parityLen: parityLen,
blocks: blockData, blocks: data,
parity: parity, parity: parity,
signal: threadPtr, signal: threadPtr,
) )
@ -348,18 +345,13 @@ proc encodeAsync*(
self.taskPool.spawn leopardEncodeTask(self.taskPool, t) self.taskPool.spawn leopardEncodeTask(self.taskPool, t)
let threadFut = threadPtr.wait() let threadFut = threadPtr.wait()
try: if joinErr =? catch(await threadFut.join()).errorOption:
await threadFut.join() if err =? catch(await noCancel threadFut).errorOption:
except CatchableError as exc: return failure(err)
try: if joinErr of CancelledError:
await threadFut raise (ref CancelledError) joinErr
except AsyncError as asyncExc: else:
return failure(asyncExc.msg) return failure(joinErr)
finally:
if exc of CancelledError:
raise (ref CancelledError) exc
else:
return failure(exc.msg)
if not t.success.load(): if not t.success.load():
return failure("Leopard encoding failed") return failure("Leopard encoding failed")
@ -409,7 +401,7 @@ proc encodeData(
try: try:
if err =? ( if err =? (
await self.encodeAsync( await self.asyncEncode(
manifest.blockSize.int, params.ecK, params.ecM, data, parity manifest.blockSize.int, params.ecK, params.ecM, data, parity
) )
).errorOption: ).errorOption:
@ -489,6 +481,7 @@ proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} =
task[].erasure.decoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen) task[].erasure.decoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen)
defer: defer:
decoder.release() decoder.release()
discard task[].signal.fireSync()
if ( if (
let res = decoder.decode( let res = decoder.decode(
@ -506,9 +499,7 @@ proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} =
else: else:
task[].success.store(true) task[].success.store(true)
discard task[].signal.fireSync() proc asyncDecode*(
proc decodeAsync*(
self: Erasure, self: Erasure,
blockSize, blocksLen, parityLen: int, blockSize, blocksLen, parityLen: int,
blocks, parity: ref seq[seq[byte]], blocks, parity: ref seq[seq[byte]],
@ -521,33 +512,21 @@ proc decodeAsync*(
threadPtr.close().expect("closing once works") threadPtr.close().expect("closing once works")
var var
blocksData = createDoubleArray(blocksLen, blockSize) blockData = makeUncheckedArray(blocks)
parityData = createDoubleArray(parityLen, blockSize) parityData = makeUncheckedArray(parity)
for i in 0 ..< blocks[].len:
if blocks[i].len > 0:
copyMem(blocksData[i], addr blocks[i][0], blockSize)
else:
blocksData[i] = nil
for i in 0 ..< parity[].len:
if parity[i].len > 0:
copyMem(parityData[i], addr parity[i][0], blockSize)
else:
parityData[i] = nil
defer: defer:
freeDoubleArray(blocksData, blocksLen) dealloc(blockData)
freeDoubleArray(parityData, parityLen) dealloc(parityData)
## Create an decode task with block data ## Create an decode task with block data
var task = DecodeTask( var task = DecodeTask(
erasure: addr self, erasure: addr self,
blockSize: blockSize, blockSize: blockSize,
blocksLen: blocksLen, blocksLen: blocksLen,
parityLen: parityLen, parityLen: parityLen,
recoveredLen: blocksLen, recoveredLen: blocksLen,
blocks: blocksData, blocks: blockData,
parity: parityData, parity: parityData,
recovered: recovered, recovered: recovered,
signal: threadPtr, signal: threadPtr,
@ -560,18 +539,13 @@ proc decodeAsync*(
self.taskPool.spawn leopardDecodeTask(self.taskPool, t) self.taskPool.spawn leopardDecodeTask(self.taskPool, t)
let threadFut = threadPtr.wait() let threadFut = threadPtr.wait()
try: if joinErr =? catch(await threadFut.join()).errorOption:
await threadFut.join() if err =? catch(await noCancel threadFut).errorOption:
except CatchableError as exc: return failure(err)
try: if joinErr of CancelledError:
await threadFut raise (ref CancelledError) joinErr
except AsyncError as asyncExc: else:
return failure(asyncExc.msg) return failure(joinErr)
finally:
if exc of CancelledError:
raise (ref CancelledError) exc
else:
return failure(exc.msg)
if not t.success.load(): if not t.success.load():
return failure("Leopard encoding failed") return failure("Leopard encoding failed")
@ -627,7 +601,7 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
trace "Erasure decoding data" trace "Erasure decoding data"
try: try:
if err =? ( if err =? (
await self.decodeAsync( await self.asyncDecode(
encoded.blockSize.int, encoded.ecK, encoded.ecM, data, parityData, recovered encoded.blockSize.int, encoded.ecK, encoded.ecM, data, parityData, recovered
) )
).errorOption: ).errorOption:

View File

@ -19,6 +19,8 @@ type
CodexError* = object of CatchableError # base codex error CodexError* = object of CatchableError # base codex error
CodexResult*[T] = Result[T, ref CodexError] CodexResult*[T] = Result[T, ref CodexError]
FinishedFailed*[T] = tuple[success: seq[Future[T]], failure: seq[Future[T]]]
template mapFailure*[T, V, E]( template mapFailure*[T, V, E](
exp: Result[T, V], exc: typedesc[E] exp: Result[T, V], exc: typedesc[E]
): Result[T, ref CatchableError] = ): Result[T, ref CatchableError] =
@ -40,35 +42,18 @@ func toFailure*[T](exp: Option[T]): Result[T, ref CatchableError] {.inline.} =
else: else:
T.failure("Option is None") T.failure("Option is None")
# allFuturesThrowing was moved to the tests in libp2p proc allFinishedFailed*[T](futs: seq[Future[T]]): Future[FinishedFailed[T]] {.async.} =
proc allFuturesThrowing*[T](args: varargs[Future[T]]): Future[void] = ## Check if all futures have finished or failed
var futs: seq[Future[T]] ##
for fut in args: ## TODO: wip, not sure if we want this - at the minimum,
futs &= fut ## we should probably avoid the async transform
proc call() {.async.} =
var first: ref CatchableError = nil
futs = await allFinished(futs)
for fut in futs:
if fut.failed:
let err = fut.readError()
if err of Defect:
raise err
else:
if err of CancelledError:
raise err
if isNil(first):
first = err
if not isNil(first):
raise first
return call() var res: FinishedFailed[T] = (@[], @[])
await allFutures(futs)
for f in futs:
if f.failed:
res.failure.add f
else:
res.success.add f
proc allFutureResult*[T](fut: seq[Future[T]]): Future[?!void] {.async.} = return res
try:
await allFuturesThrowing(fut)
except CancelledError as exc:
raise exc
except CatchableError as exc:
return failure(exc.msg)
return success()

View File

@ -18,6 +18,8 @@ export periods
type type
Market* = ref object of RootObj Market* = ref object of RootObj
MarketError* = object of CodexError MarketError* = object of CodexError
SlotStateMismatchError* = object of MarketError
SlotReservationNotAllowedError* = object of MarketError
Subscription* = ref object of RootObj Subscription* = ref object of RootObj
OnRequest* = OnRequest* =
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, upraises: [].} proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, upraises: [].}
@ -62,25 +64,40 @@ type
ProofSubmitted* = object of MarketplaceEvent ProofSubmitted* = object of MarketplaceEvent
id*: SlotId id*: SlotId
method getZkeyHash*(market: Market): Future[?string] {.base, async.} = method loadConfig*(
market: Market
): Future[?!void] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method getZkeyHash*(
market: Market
): Future[?string] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented") raiseAssert("not implemented")
method getSigner*(market: Market): Future[Address] {.base, async.} = method getSigner*(market: Market): Future[Address] {.base, async.} =
raiseAssert("not implemented") raiseAssert("not implemented")
method periodicity*(market: Market): Future[Periodicity] {.base, async.} = method periodicity*(
market: Market
): Future[Periodicity] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented") raiseAssert("not implemented")
method proofTimeout*(market: Market): Future[uint64] {.base, async.} = method proofTimeout*(
market: Market
): Future[uint64] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented") raiseAssert("not implemented")
method repairRewardPercentage*(market: Market): Future[uint8] {.base, async.} = method repairRewardPercentage*(
market: Market
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented") raiseAssert("not implemented")
method requestDurationLimit*(market: Market): Future[uint64] {.base, async.} = method requestDurationLimit*(market: Market): Future[uint64] {.base, async.} =
raiseAssert("not implemented") raiseAssert("not implemented")
method proofDowntime*(market: Market): Future[uint8] {.base, async.} = method proofDowntime*(
market: Market
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented") raiseAssert("not implemented")
method getPointer*(market: Market, slotId: SlotId): Future[uint8] {.base, async.} = method getPointer*(market: Market, slotId: SlotId): Future[uint8] {.base, async.} =
@ -102,7 +119,7 @@ method mySlots*(market: Market): Future[seq[SlotId]] {.base, async.} =
method getRequest*( method getRequest*(
market: Market, id: RequestId market: Market, id: RequestId
): Future[?StorageRequest] {.base, async.} = ): Future[?StorageRequest] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented") raiseAssert("not implemented")
method requestState*( method requestState*(
@ -110,7 +127,9 @@ method requestState*(
): Future[?RequestState] {.base, async.} = ): Future[?RequestState] {.base, async.} =
raiseAssert("not implemented") raiseAssert("not implemented")
method slotState*(market: Market, slotId: SlotId): Future[SlotState] {.base, async.} = method slotState*(
market: Market, slotId: SlotId
): Future[SlotState] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented") raiseAssert("not implemented")
method getRequestEnd*( method getRequestEnd*(
@ -270,3 +289,13 @@ method queryPastStorageRequestedEvents*(
market: Market, blocksAgo: int market: Market, blocksAgo: int
): Future[seq[StorageRequested]] {.base, async.} = ): Future[seq[StorageRequested]] {.base, async.} =
raiseAssert("not implemented") raiseAssert("not implemented")
method slotCollateral*(
market: Market, requestId: RequestId, slotIndex: uint64
): Future[?!UInt256] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method slotCollateral*(
market: Market, collateralPerSlot: UInt256, slotState: SlotState
): ?!UInt256 {.base, gcsafe, raises: [].} =
raiseAssert("not implemented")

View File

@ -27,11 +27,11 @@ const MaxMerkleTreeSize = 100.MiBs.uint
const MaxMerkleProofSize = 1.MiBs.uint const MaxMerkleProofSize = 1.MiBs.uint
proc encode*(self: CodexTree): seq[byte] = proc encode*(self: CodexTree): seq[byte] =
var pb = initProtoBuffer(maxSize = MaxMerkleTreeSize) var pb = initProtoBuffer()
pb.write(1, self.mcodec.uint64) pb.write(1, self.mcodec.uint64)
pb.write(2, self.leavesCount.uint64) pb.write(2, self.leavesCount.uint64)
for node in self.nodes: for node in self.nodes:
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize) var nodesPb = initProtoBuffer()
nodesPb.write(1, node) nodesPb.write(1, node)
nodesPb.finish() nodesPb.finish()
pb.write(3, nodesPb) pb.write(3, nodesPb)
@ -40,7 +40,7 @@ proc encode*(self: CodexTree): seq[byte] =
pb.buffer pb.buffer
proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree = proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
var pb = initProtoBuffer(data, maxSize = MaxMerkleTreeSize) var pb = initProtoBuffer(data)
var mcodecCode: uint64 var mcodecCode: uint64
var leavesCount: uint64 var leavesCount: uint64
discard ?pb.getField(1, mcodecCode).mapFailure discard ?pb.getField(1, mcodecCode).mapFailure
@ -63,13 +63,13 @@ proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
CodexTree.fromNodes(mcodec, nodes, leavesCount.int) CodexTree.fromNodes(mcodec, nodes, leavesCount.int)
proc encode*(self: CodexProof): seq[byte] = proc encode*(self: CodexProof): seq[byte] =
var pb = initProtoBuffer(maxSize = MaxMerkleProofSize) var pb = initProtoBuffer()
pb.write(1, self.mcodec.uint64) pb.write(1, self.mcodec.uint64)
pb.write(2, self.index.uint64) pb.write(2, self.index.uint64)
pb.write(3, self.nleaves.uint64) pb.write(3, self.nleaves.uint64)
for node in self.path: for node in self.path:
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize) var nodesPb = initProtoBuffer()
nodesPb.write(1, node) nodesPb.write(1, node)
nodesPb.finish() nodesPb.finish()
pb.write(4, nodesPb) pb.write(4, nodesPb)
@ -78,7 +78,7 @@ proc encode*(self: CodexProof): seq[byte] =
pb.buffer pb.buffer
proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof = proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof =
var pb = initProtoBuffer(data, maxSize = MaxMerkleProofSize) var pb = initProtoBuffer(data)
var mcodecCode: uint64 var mcodecCode: uint64
var index: uint64 var index: uint64
var nleaves: uint64 var nleaves: uint64

View File

@ -153,7 +153,11 @@ proc updateExpiry*(
let ensuringFutures = Iter[int].new(0 ..< manifest.blocksCount).mapIt( let ensuringFutures = Iter[int].new(0 ..< manifest.blocksCount).mapIt(
self.networkStore.localStore.ensureExpiry(manifest.treeCid, it, expiry) self.networkStore.localStore.ensureExpiry(manifest.treeCid, it, expiry)
) )
await allFuturesThrowing(ensuringFutures)
let res = await allFinishedFailed(ensuringFutures)
if res.failure.len > 0:
trace "Some blocks failed to update expiry", len = res.failure.len
return failure("Some blocks failed to update expiry (" & $res.failure.len & " )")
except CancelledError as exc: except CancelledError as exc:
raise exc raise exc
except CatchableError as exc: except CatchableError as exc:
@ -186,8 +190,10 @@ proc fetchBatched*(
if not (await address in self.networkStore) or fetchLocal: if not (await address in self.networkStore) or fetchLocal:
self.networkStore.getBlock(address) self.networkStore.getBlock(address)
if blocksErr =? (await allFutureResult(blocks)).errorOption: let res = await allFinishedFailed(blocks)
return failure(blocksErr) if res.failure.len > 0:
trace "Some blocks failed to fetch", len = res.failure.len
return failure("Some blocks failed to fetch (" & $res.failure.len & " )")
if not onBatch.isNil and if not onBatch.isNil and
batchErr =? (await onBatch(blocks.mapIt(it.read.get))).errorOption: batchErr =? (await onBatch(blocks.mapIt(it.read.get))).errorOption:
@ -213,6 +219,30 @@ proc fetchBatched*(
let iter = Iter[int].new(0 ..< manifest.blocksCount) let iter = Iter[int].new(0 ..< manifest.blocksCount)
self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch, fetchLocal) self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch, fetchLocal)
proc fetchDatasetAsync*(
self: CodexNodeRef, manifest: Manifest, fetchLocal = true
): Future[void] {.async: (raises: []).} =
## Asynchronously fetch a dataset in the background.
## This task will be tracked and cleaned up on node shutdown.
##
try:
if err =? (
await self.fetchBatched(
manifest = manifest, batchSize = DefaultFetchBatch, fetchLocal = fetchLocal
)
).errorOption:
error "Unable to fetch blocks", err = err.msg
except CancelledError as exc:
trace "Cancelled fetching blocks", exc = exc.msg
except CatchableError as exc:
error "Error fetching blocks", exc = exc.msg
proc fetchDatasetAsyncTask*(self: CodexNodeRef, manifest: Manifest) =
## Start fetching a dataset in the background.
## The task will be tracked and cleaned up on node shutdown.
##
self.trackedFutures.track(self.fetchDatasetAsync(manifest, fetchLocal = false))
proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async.} = proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async.} =
## Streams the contents of a single block. ## Streams the contents of a single block.
## ##
@ -223,36 +253,27 @@ proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async
without blk =? (await self.networkStore.getBlock(BlockAddress.init(cid))), err: without blk =? (await self.networkStore.getBlock(BlockAddress.init(cid))), err:
return failure(err) return failure(err)
proc streamOneBlock(): Future[void] {.async.} = proc streamOneBlock(): Future[void] {.async: (raises: []).} =
try: try:
defer:
await stream.pushEof()
await stream.pushData(blk.data) await stream.pushData(blk.data)
except CatchableError as exc: except CatchableError as exc:
trace "Unable to send block", cid, exc = exc.msg trace "Unable to send block", cid, exc = exc.msg
discard
finally:
await stream.pushEof()
self.trackedFutures.track(streamOneBlock()) self.trackedFutures.track(streamOneBlock())
LPStream(stream).success LPStream(stream).success
proc streamEntireDataset( proc streamEntireDataset(
self: CodexNodeRef, self: CodexNodeRef, manifest: Manifest, manifestCid: Cid
manifest: Manifest,
manifestCid: Cid,
prefetchBatch = DefaultFetchBatch,
): Future[?!LPStream] {.async.} = ): Future[?!LPStream] {.async.} =
## Streams the contents of the entire dataset described by the manifest. ## Streams the contents of the entire dataset described by the manifest.
## Background jobs (erasure decoding and prefetching) will be cancelled when
## the stream is closed.
## ##
trace "Retrieving blocks from manifest", manifestCid trace "Retrieving blocks from manifest", manifestCid
let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false))
var jobs: seq[Future[void]]
if manifest.protected: if manifest.protected:
# Retrieve, decode and save to the local store all EС groups # Retrieve, decode and save to the local store all EС groups
proc erasureJob(): Future[void] {.async.} = proc erasureJob(): Future[void] {.async: (raises: []).} =
try: try:
# Spawn an erasure decoding job # Spawn an erasure decoding job
let erasure = Erasure.new( let erasure = Erasure.new(
@ -260,36 +281,17 @@ proc streamEntireDataset(
) )
without _ =? (await erasure.decode(manifest)), error: without _ =? (await erasure.decode(manifest)), error:
error "Unable to erasure decode manifest", manifestCid, exc = error.msg error "Unable to erasure decode manifest", manifestCid, exc = error.msg
except CancelledError:
trace "Erasure job cancelled", manifestCid
except CatchableError as exc: except CatchableError as exc:
trace "Error erasure decoding manifest", manifestCid, exc = exc.msg trace "Error erasure decoding manifest", manifestCid, exc = exc.msg
jobs.add(erasureJob()) self.trackedFutures.track(erasureJob())
proc prefetch(): Future[void] {.async.} = self.trackedFutures.track(self.fetchDatasetAsync(manifest, fetchLocal = false))
try: # prefetch task should not fetch from local store
if err =?
(await self.fetchBatched(manifest, prefetchBatch, fetchLocal = false)).errorOption:
error "Unable to fetch blocks", err = err.msg
except CancelledError:
trace "Prefetch job cancelled"
except CatchableError as exc:
error "Error fetching blocks", exc = exc.msg
jobs.add(prefetch())
# Monitor stream completion and cancel background jobs when done
proc monitorStream() {.async.} =
try:
await stream.join()
finally:
await allFutures(jobs.mapIt(it.cancelAndWait))
self.trackedFutures.track(monitorStream())
# Retrieve all blocks of the dataset sequentially from the local store or network
trace "Creating store stream for manifest", manifestCid trace "Creating store stream for manifest", manifestCid
stream.success LPStream(StoreStream.new(self.networkStore, manifest, pad = false)).success
proc retrieve*( proc retrieve*(
self: CodexNodeRef, cid: Cid, local: bool = true self: CodexNodeRef, cid: Cid, local: bool = true
@ -591,7 +593,11 @@ proc requestStorage*(
success purchase.id success purchase.id
proc onStore( proc onStore(
self: CodexNodeRef, request: StorageRequest, slotIdx: uint64, blocksCb: BlocksCb self: CodexNodeRef,
request: StorageRequest,
slotIdx: uint64,
blocksCb: BlocksCb,
isRepairing: bool = false,
): Future[?!void] {.async.} = ): Future[?!void] {.async.} =
## store data in local storage ## store data in local storage
## ##
@ -604,6 +610,10 @@ proc onStore(
trace "Received a request to store a slot" trace "Received a request to store a slot"
# TODO: Use the isRepairing to manage the slot download.
# If isRepairing is true, the slot has to be repaired before
# being downloaded.
without manifest =? (await self.fetchManifest(cid)), err: without manifest =? (await self.fetchManifest(cid)), err:
trace "Unable to fetch manifest for cid", cid, err = err.msg trace "Unable to fetch manifest for cid", cid, err = err.msg
return failure(err) return failure(err)
@ -624,8 +634,11 @@ proc onStore(
let ensureExpiryFutures = let ensureExpiryFutures =
blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry.toSecondsSince1970)) blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry.toSecondsSince1970))
if updateExpiryErr =? (await allFutureResult(ensureExpiryFutures)).errorOption:
return failure(updateExpiryErr) let res = await allFinishedFailed(ensureExpiryFutures)
if res.failure.len > 0:
trace "Some blocks failed to update expiry", len = res.failure.len
return failure("Some blocks failed to update expiry (" & $res.failure.len & " )")
if not blocksCb.isNil and err =? (await blocksCb(blocks)).errorOption: if not blocksCb.isNil and err =? (await blocksCb(blocks)).errorOption:
trace "Unable to process blocks", err = err.msg trace "Unable to process blocks", err = err.msg
@ -745,9 +758,12 @@ proc start*(self: CodexNodeRef) {.async.} =
if hostContracts =? self.contracts.host: if hostContracts =? self.contracts.host:
hostContracts.sales.onStore = proc( hostContracts.sales.onStore = proc(
request: StorageRequest, slot: uint64, onBatch: BatchProc request: StorageRequest,
slot: uint64,
onBatch: BatchProc,
isRepairing: bool = false,
): Future[?!void] = ): Future[?!void] =
self.onStore(request, slot, onBatch) self.onStore(request, slot, onBatch, isRepairing)
hostContracts.sales.onExpiryUpdate = proc( hostContracts.sales.onExpiryUpdate = proc(
rootCid: Cid, expiry: SecondsSince1970 rootCid: Cid, expiry: SecondsSince1970

View File

@ -65,9 +65,15 @@ proc formatManifestBlocks(node: CodexNodeRef): Future[JsonNode] {.async.} =
return %RestContentList.init(content) return %RestContentList.init(content)
proc isPending(resp: HttpResponseRef): bool =
## Checks that an HttpResponseRef object is still pending; i.e.,
## that no body has yet been sent. This helps us guard against calling
## sendBody(resp: HttpResponseRef, ...) twice, which is illegal.
return resp.getResponseState() == HttpResponseState.Empty
proc retrieveCid( proc retrieveCid(
node: CodexNodeRef, cid: Cid, local: bool = true, resp: HttpResponseRef node: CodexNodeRef, cid: Cid, local: bool = true, resp: HttpResponseRef
): Future[RestApiResponse] {.async.} = ): Future[void] {.async: (raises: [CancelledError, HttpWriteError]).} =
## Download a file from the node in a streaming ## Download a file from the node in a streaming
## manner ## manner
## ##
@ -79,16 +85,21 @@ proc retrieveCid(
without stream =? (await node.retrieve(cid, local)), error: without stream =? (await node.retrieve(cid, local)), error:
if error of BlockNotFoundError: if error of BlockNotFoundError:
resp.status = Http404 resp.status = Http404
return await resp.sendBody("") await resp.sendBody(
"The requested CID could not be retrieved (" & error.msg & ")."
)
return
else: else:
resp.status = Http500 resp.status = Http500
return await resp.sendBody(error.msg) await resp.sendBody(error.msg)
return
# It is ok to fetch again the manifest because it will hit the cache # It is ok to fetch again the manifest because it will hit the cache
without manifest =? (await node.fetchManifest(cid)), err: without manifest =? (await node.fetchManifest(cid)), err:
error "Failed to fetch manifest", err = err.msg error "Failed to fetch manifest", err = err.msg
resp.status = Http404 resp.status = Http404
return await resp.sendBody(err.msg) await resp.sendBody(err.msg)
return
if manifest.mimetype.isSome: if manifest.mimetype.isSome:
resp.setHeader("Content-Type", manifest.mimetype.get()) resp.setHeader("Content-Type", manifest.mimetype.get())
@ -103,7 +114,14 @@ proc retrieveCid(
else: else:
resp.setHeader("Content-Disposition", "attachment") resp.setHeader("Content-Disposition", "attachment")
await resp.prepareChunked() # For erasure-coded datasets, we need to return the _original_ length; i.e.,
# the length of the non-erasure-coded dataset, as that's what we will be
# returning to the client.
let contentLength =
if manifest.protected: manifest.originalDatasetSize else: manifest.datasetSize
resp.setHeader("Content-Length", $(contentLength.int))
await resp.prepare(HttpResponseStreamType.Plain)
while not stream.atEof: while not stream.atEof:
var var
@ -116,13 +134,16 @@ proc retrieveCid(
bytes += buff.len bytes += buff.len
await resp.sendChunk(addr buff[0], buff.len) await resp.send(addr buff[0], buff.len)
await resp.finish() await resp.finish()
codex_api_downloads.inc() codex_api_downloads.inc()
except CancelledError as exc:
raise exc
except CatchableError as exc: except CatchableError as exc:
warn "Error streaming blocks", exc = exc.msg warn "Error streaming blocks", exc = exc.msg
resp.status = Http500 resp.status = Http500
return await resp.sendBody("") if resp.isPending():
await resp.sendBody(exc.msg)
finally: finally:
info "Sent bytes", cid = cid, bytes info "Sent bytes", cid = cid, bytes
if not stream.isNil: if not stream.isNil:
@ -299,15 +320,8 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
error "Failed to fetch manifest", err = err.msg error "Failed to fetch manifest", err = err.msg
return RestApiResponse.error(Http404, err.msg, headers = headers) return RestApiResponse.error(Http404, err.msg, headers = headers)
proc fetchDatasetAsync(): Future[void] {.async.} = # Start fetching the dataset in the background
try: node.fetchDatasetAsyncTask(manifest)
if err =? (await node.fetchBatched(manifest)).errorOption:
error "Unable to fetch dataset", cid = cid.get(), err = err.msg
except CatchableError as exc:
error "CatchableError when fetching dataset", cid = cid.get(), exc = exc.msg
discard
asyncSpawn fetchDatasetAsync()
let json = %formatManifest(cid.get(), manifest) let json = %formatManifest(cid.get(), manifest)
return RestApiResponse.response($json, contentType = "application/json") return RestApiResponse.response($json, contentType = "application/json")
@ -328,6 +342,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.setCorsHeaders("GET", corsOrigin) resp.setCorsHeaders("GET", corsOrigin)
resp.setHeader("Access-Control-Headers", "X-Requested-With") resp.setHeader("Access-Control-Headers", "X-Requested-With")
resp.setHeader("Access-Control-Expose-Headers", "Content-Disposition")
await node.retrieveCid(cid.get(), local = false, resp = resp) await node.retrieveCid(cid.get(), local = false, resp = resp)
router.api(MethodGet, "/api/codex/v1/data/{cid}/network/manifest") do( router.api(MethodGet, "/api/codex/v1/data/{cid}/network/manifest") do(

View File

@ -157,13 +157,28 @@ proc cleanUp(
# Re-add items back into the queue to prevent small availabilities from # Re-add items back into the queue to prevent small availabilities from
# draining the queue. Seen items will be ordered last. # draining the queue. Seen items will be ordered last.
if reprocessSlot and request =? data.request: if reprocessSlot and request =? data.request:
let queue = sales.context.slotQueue try:
var seenItem = SlotQueueItem.init( without collateral =?
data.requestId, data.slotIndex.uint16, data.ask, request.expiry, seen = true await sales.context.market.slotCollateral(data.requestId, data.slotIndex), err:
) error "Failed to re-add item back to the slot queue: unable to calculate collateral",
trace "pushing ignored item to queue, marked as seen" error = err.msg
if err =? queue.push(seenItem).errorOption: return
error "failed to readd slot to queue", errorType = $(type err), error = err.msg
let queue = sales.context.slotQueue
var seenItem = SlotQueueItem.init(
data.requestId,
data.slotIndex.uint16,
data.ask,
request.expiry,
seen = true,
collateral = collateral,
)
trace "pushing ignored item to queue, marked as seen"
if err =? queue.push(seenItem).errorOption:
error "failed to readd slot to queue", errorType = $(type err), error = err.msg
except MarketError as e:
error "Failed to re-add item back to the slot queue.", error = e.msg
return
await sales.remove(agent) await sales.remove(agent)
@ -270,7 +285,7 @@ proc load*(sales: Sales) {.async.} =
agent.start(SaleUnknown()) agent.start(SaleUnknown())
sales.agents.add agent sales.agents.add agent
proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} = proc OnAvailabilitySaved(sales: Sales, availability: Availability) {.async.} =
## When availabilities are modified or added, the queue should be unpaused if ## When availabilities are modified or added, the queue should be unpaused if
## it was paused and any slots in the queue should have their `seen` flag ## it was paused and any slots in the queue should have their `seen` flag
## cleared. ## cleared.
@ -283,7 +298,7 @@ proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} =
proc onStorageRequested( proc onStorageRequested(
sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: uint64 sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: uint64
) = ) {.raises: [].} =
logScope: logScope:
topics = "marketplace sales onStorageRequested" topics = "marketplace sales onStorageRequested"
requestId requestId
@ -294,7 +309,14 @@ proc onStorageRequested(
trace "storage requested, adding slots to queue" trace "storage requested, adding slots to queue"
without items =? SlotQueueItem.init(requestId, ask, expiry).catch, err: let market = sales.context.market
without collateral =? market.slotCollateral(ask.collateralPerSlot, SlotState.Free),
err:
error "Request failure, unable to calculate collateral", error = err.msg
return
without items =? SlotQueueItem.init(requestId, ask, expiry, collateral).catch, err:
if err of SlotsOutOfRangeError: if err of SlotsOutOfRangeError:
warn "Too many slots, cannot add to queue" warn "Too many slots, cannot add to queue"
else: else:
@ -324,39 +346,54 @@ proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: uint64) =
let market = context.market let market = context.market
let queue = context.slotQueue let queue = context.slotQueue
if slotIndex > uint16.high.uint64: try:
error "Cannot cast slot index to uint16, value = ", slotIndex without request =? (await market.getRequest(requestId)), err:
return error "unknown request in contract", error = err.msgDetail
return
# first attempt to populate request using existing metadata in queue # Take the repairing state into consideration to calculate the collateral.
without var found =? queue.populateItem(requestId, slotIndex.uint16): # This is particularly needed because it will affect the priority in the queue
trace "no existing request metadata, getting request info from contract" # and we want to give the user the ability to tweak the parameters.
# if there's no existing slot for that request, retrieve the request # Adding the repairing state directly in the queue priority calculation
# from the contract. # would not allow this flexibility.
try: without collateral =?
without request =? await market.getRequest(requestId): market.slotCollateral(request.ask.collateralPerSlot, SlotState.Repair), err:
error "unknown request in contract" error "Failed to add freed slot to queue: unable to calculate collateral",
return error = err.msg
return
found = SlotQueueItem.init(request, slotIndex.uint16) if slotIndex > uint16.high.uint64:
except CancelledError: error "Cannot cast slot index to uint16, value = ", slotIndex
discard # do not propagate as addSlotToQueue was asyncSpawned return
except CatchableError as e:
error "failed to get request from contract and add slots to queue",
error = e.msgDetail
if err =? queue.push(found).errorOption: without slotQueueItem =?
error "failed to push slot items to queue", error = err.msgDetail SlotQueueItem.init(request, slotIndex.uint16, collateral = collateral).catch,
err:
warn "Too many slots, cannot add to queue", error = err.msgDetail
return
if err =? queue.push(slotQueueItem).errorOption:
if err of SlotQueueItemExistsError:
error "Failed to push item to queue becaue it already exists",
error = err.msgDetail
elif err of QueueNotRunningError:
warn "Failed to push item to queue becaue queue is not running",
error = err.msgDetail
except CatchableError as e:
warn "Failed to add slot to queue", error = e.msg
# We could get rid of this by adding the storage ask in the SlotFreed event,
# so we would not need to call getRequest to get the collateralPerSlot.
let fut = addSlotToQueue() let fut = addSlotToQueue()
sales.trackedFutures.track(fut) sales.trackedFutures.track(fut)
asyncSpawn fut
proc subscribeRequested(sales: Sales) {.async.} = proc subscribeRequested(sales: Sales) {.async.} =
let context = sales.context let context = sales.context
let market = context.market let market = context.market
proc onStorageRequested(requestId: RequestId, ask: StorageAsk, expiry: uint64) = proc onStorageRequested(
requestId: RequestId, ask: StorageAsk, expiry: uint64
) {.raises: [].} =
sales.onStorageRequested(requestId, ask, expiry) sales.onStorageRequested(requestId, ask, expiry)
try: try:
@ -488,16 +525,18 @@ proc startSlotQueue(sales: Sales) =
let slotQueue = sales.context.slotQueue let slotQueue = sales.context.slotQueue
let reservations = sales.context.reservations let reservations = sales.context.reservations
slotQueue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = slotQueue.onProcessSlot = proc(
item: SlotQueueItem, done: Future[void]
) {.async: (raises: []).} =
trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex
sales.processSlot(item, done) sales.processSlot(item, done)
slotQueue.start() slotQueue.start()
proc onAvailabilityAdded(availability: Availability) {.async.} = proc OnAvailabilitySaved(availability: Availability) {.async.} =
await sales.onAvailabilityAdded(availability) await sales.OnAvailabilitySaved(availability)
reservations.onAvailabilityAdded = onAvailabilityAdded reservations.OnAvailabilitySaved = OnAvailabilitySaved
proc subscribe(sales: Sales) {.async.} = proc subscribe(sales: Sales) {.async.} =
await sales.subscribeRequested() await sales.subscribeRequested()

View File

@ -82,11 +82,11 @@ type
availabilityLock: AsyncLock availabilityLock: AsyncLock
# Lock for protecting assertions of availability's sizes when searching for matching availability # Lock for protecting assertions of availability's sizes when searching for matching availability
repo: RepoStore repo: RepoStore
onAvailabilityAdded: ?OnAvailabilityAdded OnAvailabilitySaved: ?OnAvailabilitySaved
GetNext* = proc(): Future[?seq[byte]] {.upraises: [], gcsafe, closure.} GetNext* = proc(): Future[?seq[byte]] {.upraises: [], gcsafe, closure.}
IterDispose* = proc(): Future[?!void] {.gcsafe, closure.} IterDispose* = proc(): Future[?!void] {.gcsafe, closure.}
OnAvailabilityAdded* = OnAvailabilitySaved* =
proc(availability: Availability): Future[void] {.upraises: [], gcsafe.} proc(availability: Availability): Future[void] {.upraises: [], gcsafe.}
StorableIter* = ref object StorableIter* = ref object
finished*: bool finished*: bool
@ -189,10 +189,10 @@ logutils.formatIt(LogFormat.textLines, SomeStorableId):
logutils.formatIt(LogFormat.json, SomeStorableId): logutils.formatIt(LogFormat.json, SomeStorableId):
it.to0xHexLog it.to0xHexLog
proc `onAvailabilityAdded=`*( proc `OnAvailabilitySaved=`*(
self: Reservations, onAvailabilityAdded: OnAvailabilityAdded self: Reservations, OnAvailabilitySaved: OnAvailabilitySaved
) = ) =
self.onAvailabilityAdded = some onAvailabilityAdded self.OnAvailabilitySaved = some OnAvailabilitySaved
func key*(id: AvailabilityId): ?!Key = func key*(id: AvailabilityId): ?!Key =
## sales / reservations / <availabilityId> ## sales / reservations / <availabilityId>
@ -268,18 +268,18 @@ proc updateAvailability(
trace "Creating new Availability" trace "Creating new Availability"
let res = await self.updateImpl(obj) let res = await self.updateImpl(obj)
# inform subscribers that Availability has been added # inform subscribers that Availability has been added
if onAvailabilityAdded =? self.onAvailabilityAdded: if OnAvailabilitySaved =? self.OnAvailabilitySaved:
# when chronos v4 is implemented, and OnAvailabilityAdded is annotated # when chronos v4 is implemented, and OnAvailabilitySaved is annotated
# with async:(raises:[]), we can remove this try/catch as we know, with # with async:(raises:[]), we can remove this try/catch as we know, with
# certainty, that nothing will be raised # certainty, that nothing will be raised
try: try:
await onAvailabilityAdded(obj) await OnAvailabilitySaved(obj)
except CancelledError as e: except CancelledError as e:
raise e raise e
except CatchableError as e: except CatchableError as e:
# we don't have any insight into types of exceptions that # we don't have any insight into types of exceptions that
# `onAvailabilityAdded` can raise because it is caller-defined # `OnAvailabilitySaved` can raise because it is caller-defined
warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg warn "Unknown error during 'OnAvailabilitySaved' callback", error = e.msg
return res return res
else: else:
return failure(err) return failure(err)
@ -300,21 +300,23 @@ proc updateAvailability(
let res = await self.updateImpl(obj) let res = await self.updateImpl(obj)
if oldAvailability.freeSize < obj.freeSize: # availability added if oldAvailability.freeSize < obj.freeSize or oldAvailability.duration < obj.duration or
oldAvailability.minPricePerBytePerSecond < obj.minPricePerBytePerSecond or
oldAvailability.totalCollateral < obj.totalCollateral: # availability updated
# inform subscribers that Availability has been modified (with increased # inform subscribers that Availability has been modified (with increased
# size) # size)
if onAvailabilityAdded =? self.onAvailabilityAdded: if OnAvailabilitySaved =? self.OnAvailabilitySaved:
# when chronos v4 is implemented, and OnAvailabilityAdded is annotated # when chronos v4 is implemented, and OnAvailabilitySaved is annotated
# with async:(raises:[]), we can remove this try/catch as we know, with # with async:(raises:[]), we can remove this try/catch as we know, with
# certainty, that nothing will be raised # certainty, that nothing will be raised
try: try:
await onAvailabilityAdded(obj) await OnAvailabilitySaved(obj)
except CancelledError as e: except CancelledError as e:
raise e raise e
except CatchableError as e: except CatchableError as e:
# we don't have any insight into types of exceptions that # we don't have any insight into types of exceptions that
# `onAvailabilityAdded` can raise because it is caller-defined # `OnAvailabilitySaved` can raise because it is caller-defined
warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg warn "Unknown error during 'OnAvailabilitySaved' callback", error = e.msg
return res return res

View File

@ -103,7 +103,6 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} =
error "Error while waiting for expiry to lapse", error = e.msgDetail error "Error while waiting for expiry to lapse", error = e.msgDetail
data.cancelled = onCancelled() data.cancelled = onCancelled()
asyncSpawn data.cancelled
method onFulfilled*( method onFulfilled*(
agent: SalesAgent, requestId: RequestId agent: SalesAgent, requestId: RequestId

View File

@ -26,7 +26,7 @@ type
BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, raises: [].} BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, raises: [].}
OnStore* = proc( OnStore* = proc(
request: StorageRequest, slot: uint64, blocksCb: BlocksCb request: StorageRequest, slot: uint64, blocksCb: BlocksCb, isRepairing: bool
): Future[?!void] {.gcsafe, upraises: [].} ): Future[?!void] {.gcsafe, upraises: [].}
OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {. OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.
gcsafe, upraises: [] gcsafe, upraises: []

View File

@ -3,7 +3,6 @@ import std/tables
import pkg/chronos import pkg/chronos
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
import pkg/upraises
import ../errors import ../errors
import ../clock import ../clock
import ../logutils import ../logutils
@ -17,8 +16,9 @@ logScope:
topics = "marketplace slotqueue" topics = "marketplace slotqueue"
type type
OnProcessSlot* = OnProcessSlot* = proc(item: SlotQueueItem, done: Future[void]): Future[void] {.
proc(item: SlotQueueItem, done: Future[void]): Future[void] {.gcsafe, upraises: [].} gcsafe, async: (raises: [])
.}
# Non-ref obj copies value when assigned, preventing accidental modification # Non-ref obj copies value when assigned, preventing accidental modification
# of values which could cause an incorrect order (eg # of values which could cause an incorrect order (eg
@ -26,7 +26,7 @@ type
# but the heap invariant would no longer be honoured. When non-ref, the # but the heap invariant would no longer be honoured. When non-ref, the
# compiler can ensure that statement will fail). # compiler can ensure that statement will fail).
SlotQueueWorker = object SlotQueueWorker = object
doneProcessing*: Future[void] doneProcessing*: Future[void].Raising([])
SlotQueueItem* = object SlotQueueItem* = object
requestId: RequestId requestId: RequestId
@ -34,7 +34,7 @@ type
slotSize: uint64 slotSize: uint64
duration: uint64 duration: uint64
pricePerBytePerSecond: UInt256 pricePerBytePerSecond: UInt256
collateralPerByte: UInt256 collateral: UInt256 # Collateral computed
expiry: uint64 expiry: uint64
seen: bool seen: bool
@ -76,9 +76,6 @@ proc profitability(item: SlotQueueItem): UInt256 =
slotSize: item.slotSize, slotSize: item.slotSize,
).pricePerSlot ).pricePerSlot
proc collateralPerSlot(item: SlotQueueItem): UInt256 =
StorageAsk(collateralPerByte: item.collateralPerByte, slotSize: item.slotSize).collateralPerSlot
proc `<`*(a, b: SlotQueueItem): bool = proc `<`*(a, b: SlotQueueItem): bool =
# for A to have a higher priority than B (in a min queue), A must be less than # for A to have a higher priority than B (in a min queue), A must be less than
# B. # B.
@ -95,8 +92,8 @@ proc `<`*(a, b: SlotQueueItem): bool =
scoreA.addIf(a.profitability > b.profitability, 3) scoreA.addIf(a.profitability > b.profitability, 3)
scoreB.addIf(a.profitability < b.profitability, 3) scoreB.addIf(a.profitability < b.profitability, 3)
scoreA.addIf(a.collateralPerSlot < b.collateralPerSlot, 2) scoreA.addIf(a.collateral < b.collateral, 2)
scoreB.addIf(a.collateralPerSlot > b.collateralPerSlot, 2) scoreB.addIf(a.collateral > b.collateral, 2)
scoreA.addIf(a.expiry > b.expiry, 1) scoreA.addIf(a.expiry > b.expiry, 1)
scoreB.addIf(a.expiry < b.expiry, 1) scoreB.addIf(a.expiry < b.expiry, 1)
@ -129,7 +126,17 @@ proc new*(
# `newAsyncQueue` procedure # `newAsyncQueue` procedure
proc init(_: type SlotQueueWorker): SlotQueueWorker = proc init(_: type SlotQueueWorker): SlotQueueWorker =
SlotQueueWorker(doneProcessing: newFuture[void]("slotqueue.worker.processing")) let workerFut = Future[void].Raising([]).init(
"slotqueue.worker.processing", {FutureFlag.OwnCancelSchedule}
)
workerFut.cancelCallback = proc(data: pointer) {.raises: [].} =
# this is equivalent to try: ... except CatchableError: ...
if not workerFut.finished:
workerFut.complete()
trace "Cancelling `SlotQueue` worker processing future"
SlotQueueWorker(doneProcessing: workerFut)
proc init*( proc init*(
_: type SlotQueueItem, _: type SlotQueueItem,
@ -137,6 +144,7 @@ proc init*(
slotIndex: uint16, slotIndex: uint16,
ask: StorageAsk, ask: StorageAsk,
expiry: uint64, expiry: uint64,
collateral: UInt256,
seen = false, seen = false,
): SlotQueueItem = ): SlotQueueItem =
SlotQueueItem( SlotQueueItem(
@ -145,25 +153,32 @@ proc init*(
slotSize: ask.slotSize, slotSize: ask.slotSize,
duration: ask.duration, duration: ask.duration,
pricePerBytePerSecond: ask.pricePerBytePerSecond, pricePerBytePerSecond: ask.pricePerBytePerSecond,
collateralPerByte: ask.collateralPerByte, collateral: collateral,
expiry: expiry, expiry: expiry,
seen: seen, seen: seen,
) )
proc init*( proc init*(
_: type SlotQueueItem, request: StorageRequest, slotIndex: uint16 _: type SlotQueueItem,
request: StorageRequest,
slotIndex: uint16,
collateral: UInt256,
): SlotQueueItem = ): SlotQueueItem =
SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry) SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry, collateral)
proc init*( proc init*(
_: type SlotQueueItem, requestId: RequestId, ask: StorageAsk, expiry: uint64 _: type SlotQueueItem,
): seq[SlotQueueItem] = requestId: RequestId,
ask: StorageAsk,
expiry: uint64,
collateral: UInt256,
): seq[SlotQueueItem] {.raises: [SlotsOutOfRangeError].} =
if not ask.slots.inRange: if not ask.slots.inRange:
raise newException(SlotsOutOfRangeError, "Too many slots") raise newException(SlotsOutOfRangeError, "Too many slots")
var i = 0'u16 var i = 0'u16
proc initSlotQueueItem(): SlotQueueItem = proc initSlotQueueItem(): SlotQueueItem =
let item = SlotQueueItem.init(requestId, i, ask, expiry) let item = SlotQueueItem.init(requestId, i, ask, expiry, collateral)
inc i inc i
return item return item
@ -171,8 +186,10 @@ proc init*(
Rng.instance.shuffle(items) Rng.instance.shuffle(items)
return items return items
proc init*(_: type SlotQueueItem, request: StorageRequest): seq[SlotQueueItem] = proc init*(
return SlotQueueItem.init(request.id, request.ask, request.expiry) _: type SlotQueueItem, request: StorageRequest, collateral: UInt256
): seq[SlotQueueItem] =
return SlotQueueItem.init(request.id, request.ask, request.expiry, collateral)
proc inRange*(val: SomeUnsignedInt): bool = proc inRange*(val: SomeUnsignedInt): bool =
val.uint16 in SlotQueueSize.low .. SlotQueueSize.high val.uint16 in SlotQueueSize.low .. SlotQueueSize.high
@ -234,25 +251,7 @@ proc unpause*(self: SlotQueue) =
# set unpaused flag to true -- unblocks coroutines waiting on unpaused.wait() # set unpaused flag to true -- unblocks coroutines waiting on unpaused.wait()
self.unpaused.fire() self.unpaused.fire()
proc populateItem*( proc push*(self: SlotQueue, item: SlotQueueItem): ?!void {.raises: [].} =
self: SlotQueue, requestId: RequestId, slotIndex: uint16
): ?SlotQueueItem =
trace "populate item, items in queue", len = self.queue.len
for item in self.queue.items:
trace "populate item search", itemRequestId = item.requestId, requestId
if item.requestId == requestId:
return some SlotQueueItem(
requestId: requestId,
slotIndex: slotIndex,
slotSize: item.slotSize,
duration: item.duration,
pricePerBytePerSecond: item.pricePerBytePerSecond,
collateralPerByte: item.collateralPerByte,
expiry: item.expiry,
)
return none SlotQueueItem
proc push*(self: SlotQueue, item: SlotQueueItem): ?!void =
logScope: logScope:
requestId = item.requestId requestId = item.requestId
slotIndex = item.slotIndex slotIndex = item.slotIndex
@ -430,7 +429,6 @@ proc run(self: SlotQueue) {.async: (raises: []).} =
let fut = self.dispatch(worker, item) let fut = self.dispatch(worker, item)
self.trackedFutures.track(fut) self.trackedFutures.track(fut)
asyncSpawn fut
await sleepAsync(1.millis) # poll await sleepAsync(1.millis) # poll
except CancelledError: except CancelledError:
@ -458,7 +456,6 @@ proc start*(self: SlotQueue) =
let fut = self.run() let fut = self.run()
self.trackedFutures.track(fut) self.trackedFutures.track(fut)
asyncSpawn fut
proc stop*(self: SlotQueue) {.async.} = proc stop*(self: SlotQueue) {.async.} =
if not self.running: if not self.running:

View File

@ -67,8 +67,11 @@ method run*(
return await reservations.release(reservation.id, reservation.availabilityId, bytes) return await reservations.release(reservation.id, reservation.availabilityId, bytes)
try: try:
let slotId = slotId(request.id, data.slotIndex)
let isRepairing = (await context.market.slotState(slotId)) == SlotState.Repair
trace "Starting download" trace "Starting download"
if err =? (await onStore(request, data.slotIndex, onBlocks)).errorOption: if err =? (await onStore(request, data.slotIndex, onBlocks, isRepairing)).errorOption:
return some State(SaleErrored(error: err, reprocessSlot: false)) return some State(SaleErrored(error: err, reprocessSlot: false))
trace "Download complete" trace "Download complete"

View File

@ -30,6 +30,7 @@ method run*(
): Future[?State] {.async: (raises: []).} = ): Future[?State] {.async: (raises: []).} =
let data = SalesAgent(machine).data let data = SalesAgent(machine).data
let market = SalesAgent(machine).context.market let market = SalesAgent(machine).context.market
without (request =? data.request): without (request =? data.request):
raiseAssert "Request not set" raiseAssert "Request not set"
@ -38,28 +39,20 @@ method run*(
slotIndex = data.slotIndex slotIndex = data.slotIndex
try: try:
let slotState = await market.slotState(slotId(data.requestId, data.slotIndex)) without collateral =? await market.slotCollateral(data.requestId, data.slotIndex),
let requestedCollateral = request.ask.collateralPerSlot err:
var collateral: UInt256 error "Failure attempting to fill slot: unable to calculate collateral",
error = err.msg
if slotState == SlotState.Repair: return some State(SaleErrored(error: err))
# When repairing the node gets "discount" on the collateral that it needs to
let repairRewardPercentage = (await market.repairRewardPercentage).u256
collateral =
requestedCollateral -
((requestedCollateral * repairRewardPercentage)).div(100.u256)
else:
collateral = requestedCollateral
debug "Filling slot" debug "Filling slot"
try: try:
await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral) await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral)
except SlotStateMismatchError as e:
debug "Slot is already filled, ignoring slot"
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
except MarketError as e: except MarketError as e:
if e.msg.contains "Slot is not free": return some State(SaleErrored(error: e))
debug "Slot is already filled, ignoring slot"
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
else:
return some State(SaleErrored(error: e))
# other CatchableErrors are handled "automatically" by the SaleState # other CatchableErrors are handled "automatically" by the SaleState
return some State(SaleFilled()) return some State(SaleFilled())

View File

@ -44,12 +44,11 @@ method run*(
try: try:
trace "Reserving slot" trace "Reserving slot"
await market.reserveSlot(data.requestId, data.slotIndex) await market.reserveSlot(data.requestId, data.slotIndex)
except SlotReservationNotAllowedError as e:
debug "Slot cannot be reserved, ignoring", error = e.msg
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
except MarketError as e: except MarketError as e:
if e.msg.contains "SlotReservations_ReservationNotAllowed": return some State(SaleErrored(error: e))
debug "Slot cannot be reserved, ignoring", error = e.msg
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
else:
return some State(SaleErrored(error: e))
# other CatchableErrors are handled "automatically" by the SaleState # other CatchableErrors are handled "automatically" by the SaleState
trace "Slot successfully reserved" trace "Slot successfully reserved"

View File

@ -315,13 +315,15 @@ proc new*[T, H](
cellSize = cellSize cellSize = cellSize
if (manifest.blocksCount mod manifest.numSlots) != 0: if (manifest.blocksCount mod manifest.numSlots) != 0:
trace "Number of blocks must be divisable by number of slots." const msg = "Number of blocks must be divisible by number of slots."
return failure("Number of blocks must be divisable by number of slots.") trace msg
return failure(msg)
let cellSize = if manifest.verifiable: manifest.cellSize else: cellSize let cellSize = if manifest.verifiable: manifest.cellSize else: cellSize
if (manifest.blockSize mod cellSize) != 0.NBytes: if (manifest.blockSize mod cellSize) != 0.NBytes:
trace "Block size must be divisable by cell size." const msg = "Block size must be divisible by cell size."
return failure("Block size must be divisable by cell size.") trace msg
return failure(msg)
let let
numSlotBlocks = manifest.numSlotBlocks numSlotBlocks = manifest.numSlotBlocks

View File

@ -38,7 +38,9 @@ type
AnyProof* = CircomProof AnyProof* = CircomProof
AnySampler* = Poseidon2Sampler AnySampler* = Poseidon2Sampler
# add any other generic type here, eg. Poseidon2Sampler | ReinforceConcreteSampler
AnyBuilder* = Poseidon2Builder AnyBuilder* = Poseidon2Builder
# add any other generic type here, eg. Poseidon2Builder | ReinforceConcreteBuilder
AnyProofInputs* = ProofInputs[Poseidon2Hash] AnyProofInputs* = ProofInputs[Poseidon2Hash]
Prover* = ref object of RootObj Prover* = ref object of RootObj

View File

@ -57,6 +57,8 @@ template withExceptions(body: untyped) =
raise newLPStreamEOFError() raise newLPStreamEOFError()
except AsyncStreamError as exc: except AsyncStreamError as exc:
raise newException(LPStreamError, exc.msg) raise newException(LPStreamError, exc.msg)
except CatchableError as exc:
raise newException(Defect, "Unexpected error in AsyncStreamWrapper", exc)
method readOnce*( method readOnce*(
self: AsyncStreamWrapper, pbytes: pointer, nbytes: int self: AsyncStreamWrapper, pbytes: pointer, nbytes: int
@ -74,11 +76,13 @@ method readOnce*(
proc completeWrite( proc completeWrite(
self: AsyncStreamWrapper, fut: Future[void], msgLen: int self: AsyncStreamWrapper, fut: Future[void], msgLen: int
): Future[void] {.async.} = ): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
withExceptions: withExceptions:
await fut await fut
method write*(self: AsyncStreamWrapper, msg: seq[byte]): Future[void] = method write*(
self: AsyncStreamWrapper, msg: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
# Avoid a copy of msg being kept in the closure created by `{.async.}` as this # Avoid a copy of msg being kept in the closure created by `{.async.}` as this
# drives up memory usage # drives up memory usage

View File

@ -67,13 +67,9 @@ method atEof*(self: StoreStream): bool =
self.offset >= self.size self.offset >= self.size
type LPStreamReadError* = object of LPStreamError type LPStreamReadError* = object of LPStreamError
par*: ref CatchableError
proc newLPStreamReadError*(p: ref CatchableError): ref LPStreamReadError = proc newLPStreamReadError*(p: ref CatchableError): ref LPStreamReadError =
var w = newException(LPStreamReadError, "Read stream failed") newException(LPStreamReadError, "Read stream failed", p)
w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg
w.par = p
result = w
method readOnce*( method readOnce*(
self: StoreStream, pbytes: pointer, nbytes: int self: StoreStream, pbytes: pointer, nbytes: int

View File

@ -23,3 +23,16 @@ proc freeDoubleArray*(
# Free outer array # Free outer array
if not arr.isNil: if not arr.isNil:
deallocShared(arr) deallocShared(arr)
proc makeUncheckedArray*(
data: ref seq[seq[byte]]
): ptr UncheckedArray[ptr UncheckedArray[byte]] =
result = cast[ptr UncheckedArray[ptr UncheckedArray[byte]]](alloc0(
sizeof(ptr UncheckedArray[byte]) * data[].len
))
for i, blk in data[]:
if blk.len > 0:
result[i] = cast[ptr UncheckedArray[byte]](addr blk[0])
else:
result[i] = nil

View File

@ -74,7 +74,6 @@ proc scheduler(machine: Machine) {.async: (raises: []).} =
debug "enter state", state = fromState & " => " & $machine.state debug "enter state", state = fromState & " => " & $machine.state
running = machine.run(machine.state) running = machine.run(machine.state)
machine.trackedFutures.track(running) machine.trackedFutures.track(running)
asyncSpawn running
except CancelledError: except CancelledError:
break # do not propagate bc it is asyncSpawned break # do not propagate bc it is asyncSpawned
@ -88,7 +87,6 @@ proc start*(machine: Machine, initialState: State) =
machine.started = true machine.started = true
let fut = machine.scheduler() let fut = machine.scheduler()
machine.trackedFutures.track(fut) machine.trackedFutures.track(fut)
asyncSpawn fut
machine.schedule(Event.transition(machine.state, initialState)) machine.schedule(Event.transition(machine.state, initialState))
proc stop*(machine: Machine) {.async.} = proc stop*(machine: Machine) {.async.} =

View File

@ -50,7 +50,6 @@ method start*(
timer.callback = callback timer.callback = callback
timer.interval = interval timer.interval = interval
timer.loopFuture = timerLoop(timer) timer.loopFuture = timerLoop(timer)
asyncSpawn timer.loopFuture
method stop*(timer: Timer) {.async, base.} = method stop*(timer: Timer) {.async, base.} =
if timer.loopFuture != nil and not timer.loopFuture.finished: if timer.loopFuture != nil and not timer.loopFuture.finished:

View File

@ -5,9 +5,11 @@ import ../logutils
{.push raises: [].} {.push raises: [].}
type TrackedFutures* = ref object type
futures: Table[uint, FutureBase] TrackedFuture = Future[void].Raising([])
cancelling: bool TrackedFutures* = ref object
futures: Table[uint, TrackedFuture]
cancelling: bool
logScope: logScope:
topics = "trackable futures" topics = "trackable futures"
@ -15,15 +17,18 @@ logScope:
proc len*(self: TrackedFutures): int = proc len*(self: TrackedFutures): int =
self.futures.len self.futures.len
proc removeFuture(self: TrackedFutures, future: FutureBase) = proc removeFuture(self: TrackedFutures, future: TrackedFuture) =
if not self.cancelling and not future.isNil: if not self.cancelling and not future.isNil:
self.futures.del(future.id) self.futures.del(future.id)
proc track*[T](self: TrackedFutures, fut: Future[T]) = proc track*(self: TrackedFutures, fut: TrackedFuture) =
if self.cancelling: if self.cancelling:
return return
self.futures[fut.id] = FutureBase(fut) if fut.finished:
return
self.futures[fut.id] = fut
proc cb(udata: pointer) = proc cb(udata: pointer) =
self.removeFuture(fut) self.removeFuture(fut)
@ -33,13 +38,8 @@ proc track*[T](self: TrackedFutures, fut: Future[T]) =
proc cancelTracked*(self: TrackedFutures) {.async: (raises: []).} = proc cancelTracked*(self: TrackedFutures) {.async: (raises: []).} =
self.cancelling = true self.cancelling = true
trace "cancelling tracked futures" trace "cancelling tracked futures", len = self.futures.len
let cancellations = self.futures.values.toSeq.mapIt(it.cancelAndWait())
var cancellations: seq[FutureBase]
for future in self.futures.values:
if not future.isNil and not future.finished:
cancellations.add future.cancelAndWait()
await noCancel allFutures cancellations await noCancel allFutures cancellations
self.futures.clear() self.futures.clear()

View File

@ -142,7 +142,6 @@ proc start*(validation: Validation) {.async.} =
await validation.subscribeSlotFilled() await validation.subscribeSlotFilled()
await validation.restoreHistoricalState() await validation.restoreHistoricalState()
validation.running = validation.run() validation.running = validation.run()
asyncSpawn validation.running
proc stop*(validation: Validation) {.async.} = proc stop*(validation: Validation) {.async.} =
if not validation.running.isNil and not validation.running.finished: if not validation.running.isNil and not validation.running.finished:

View File

@ -56,7 +56,7 @@ in pkgs.gcc13Stdenv.mkDerivation rec {
fakeCargo fakeCargo
]; ];
# Disable CPU optmizations that make binary not portable. # Disable CPU optimizations that make binary not portable.
NIMFLAGS = "-d:disableMarchNative -d:git_revision_override=${revision}"; NIMFLAGS = "-d:disableMarchNative -d:git_revision_override=${revision}";
# Avoid Nim cache permission errors. # Avoid Nim cache permission errors.
XDG_CACHE_HOME = "/tmp"; XDG_CACHE_HOME = "/tmp";

View File

@ -1,3 +1,3 @@
import pkg/asynctest/chronos/unittest import pkg/asynctest/chronos/unittest2
export unittest export unittest2

View File

@ -84,12 +84,12 @@ asyncchecksuite "Block Advertising and Discovery":
blockDiscovery.publishBlockProvideHandler = proc( blockDiscovery.publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid d: MockDiscovery, cid: Cid
): Future[void] {.async, gcsafe.} = ): Future[void] {.async: (raises: [CancelledError]).} =
return return
blockDiscovery.findBlockProvidersHandler = proc( blockDiscovery.findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async.} = ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
await engine.resolveBlocks(blocks.filterIt(it.cid == cid)) await engine.resolveBlocks(blocks.filterIt(it.cid == cid))
await allFuturesThrowing(allFinished(pendingBlocks)) await allFuturesThrowing(allFinished(pendingBlocks))
@ -97,17 +97,17 @@ asyncchecksuite "Block Advertising and Discovery":
await engine.stop() await engine.stop()
test "Should advertise trees": test "Should advertise trees":
let let cids = @[manifest.treeCid]
cids = @[manifest.treeCid] var advertised = initTable.collect:
advertised = initTable.collect: for cid in cids:
for cid in cids: {cid: newFuture[void]()}
{cid: newFuture[void]()}
blockDiscovery.publishBlockProvideHandler = proc( blockDiscovery.publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid d: MockDiscovery, cid: Cid
) {.async.} = ) {.async: (raises: [CancelledError]).} =
if cid in advertised and not advertised[cid].finished(): advertised.withValue(cid, fut):
advertised[cid].complete() if not fut[].finished:
fut[].complete()
await engine.start() await engine.start()
await allFuturesThrowing(allFinished(toSeq(advertised.values))) await allFuturesThrowing(allFinished(toSeq(advertised.values)))
@ -118,7 +118,7 @@ asyncchecksuite "Block Advertising and Discovery":
blockDiscovery.publishBlockProvideHandler = proc( blockDiscovery.publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid d: MockDiscovery, cid: Cid
) {.async.} = ) {.async: (raises: [CancelledError]).} =
check: check:
cid notin blockCids cid notin blockCids
@ -138,7 +138,7 @@ asyncchecksuite "Block Advertising and Discovery":
blockDiscovery.findBlockProvidersHandler = proc( blockDiscovery.findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] = ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
check false check false
await engine.start() await engine.start()
@ -221,17 +221,17 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc( MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid d: MockDiscovery, cid: Cid
): Future[void] {.async.} = ) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[1].peerInfo.signedPeerRecord advertised[cid] = switch[1].peerInfo.signedPeerRecord
MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc( MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid d: MockDiscovery, cid: Cid
): Future[void] {.async.} = ) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[2].peerInfo.signedPeerRecord advertised[cid] = switch[2].peerInfo.signedPeerRecord
MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc( MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid d: MockDiscovery, cid: Cid
): Future[void] {.async.} = ) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[3].peerInfo.signedPeerRecord advertised[cid] = switch[3].peerInfo.signedPeerRecord
discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid) discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid)
@ -266,23 +266,21 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc( MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async.} = ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
if cid in advertised: advertised.withValue(cid, val):
result.add(advertised[cid]) result.add(val[])
let futs = collect(newSeq): let futs = collect(newSeq):
for m in mBlocks[0 .. 2]: for m in mBlocks[0 .. 2]:
blockexc[0].engine.requestBlock(m.cid) blockexc[0].engine.requestBlock(m.cid)
await allFuturesThrowing( await allFuturesThrowing(switch.mapIt(it.start())).wait(10.seconds)
switch.mapIt(it.start()) & blockexc.mapIt(it.engine.start()) await allFuturesThrowing(blockexc.mapIt(it.engine.start())).wait(10.seconds)
)
.wait(10.seconds)
await allFutures(futs).wait(10.seconds) await allFutures(futs).wait(10.seconds)
await allFuturesThrowing(blockexc.mapIt(it.engine.stop()) & switch.mapIt(it.stop())) await allFuturesThrowing(blockexc.mapIt(it.engine.stop())).wait(10.seconds)
.wait(10.seconds) await allFuturesThrowing(switch.mapIt(it.stop())).wait(10.seconds)
test "E2E - Should advertise and discover blocks with peers already connected": test "E2E - Should advertise and discover blocks with peers already connected":
# Distribute the blocks amongst 1..3 # Distribute the blocks amongst 1..3
@ -292,17 +290,17 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc( MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid d: MockDiscovery, cid: Cid
): Future[void] {.async.} = ) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[1].peerInfo.signedPeerRecord advertised[cid] = switch[1].peerInfo.signedPeerRecord
MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc( MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid d: MockDiscovery, cid: Cid
): Future[void] {.async.} = ) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[2].peerInfo.signedPeerRecord advertised[cid] = switch[2].peerInfo.signedPeerRecord
MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc( MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid d: MockDiscovery, cid: Cid
): Future[void] {.async.} = ) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[3].peerInfo.signedPeerRecord advertised[cid] = switch[3].peerInfo.signedPeerRecord
discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid) discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid)
@ -337,18 +335,16 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc( MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async.} = ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
if cid in advertised: advertised.withValue(cid, val):
return @[advertised[cid]] return @[val[]]
let futs = mBlocks[0 .. 2].mapIt(blockexc[0].engine.requestBlock(it.cid)) let futs = mBlocks[0 .. 2].mapIt(blockexc[0].engine.requestBlock(it.cid))
await allFuturesThrowing( await allFuturesThrowing(switch.mapIt(it.start())).wait(10.seconds)
switch.mapIt(it.start()) & blockexc.mapIt(it.engine.start()) await allFuturesThrowing(blockexc.mapIt(it.engine.start())).wait(10.seconds)
)
.wait(10.seconds)
await allFutures(futs).wait(10.seconds) await allFutures(futs).wait(10.seconds)
await allFuturesThrowing(blockexc.mapIt(it.engine.stop()) & switch.mapIt(it.stop())) await allFuturesThrowing(blockexc.mapIt(it.engine.stop())).wait(10.seconds)
.wait(10.seconds) await allFuturesThrowing(switch.mapIt(it.stop())).wait(10.seconds)

View File

@ -68,7 +68,7 @@ asyncchecksuite "Test Discovery Engine":
blockDiscovery.findBlockProvidersHandler = proc( blockDiscovery.findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
pendingBlocks.resolve( pendingBlocks.resolve(
blocks.filterIt(it.cid == cid).mapIt( blocks.filterIt(it.cid == cid).mapIt(
BlockDelivery(blk: it, address: it.address) BlockDelivery(blk: it, address: it.address)
@ -94,7 +94,7 @@ asyncchecksuite "Test Discovery Engine":
blockDiscovery.findBlockProvidersHandler = proc( blockDiscovery.findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
check cid == blocks[0].cid check cid == blocks[0].cid
if not want.finished: if not want.finished:
want.complete() want.complete()
@ -122,7 +122,7 @@ asyncchecksuite "Test Discovery Engine":
var pendingCids = newSeq[Cid]() var pendingCids = newSeq[Cid]()
blockDiscovery.findBlockProvidersHandler = proc( blockDiscovery.findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
check cid in pendingCids check cid in pendingCids
pendingCids.keepItIf(it != cid) pendingCids.keepItIf(it != cid)
check peerStore.len < minPeers check peerStore.len < minPeers
@ -159,12 +159,12 @@ asyncchecksuite "Test Discovery Engine":
discoveryLoopSleep = 100.millis, discoveryLoopSleep = 100.millis,
concurrentDiscReqs = 2, concurrentDiscReqs = 2,
) )
reqs = newFuture[void]() reqs = Future[void].Raising([CancelledError]).init()
count = 0 count = 0
blockDiscovery.findBlockProvidersHandler = proc( blockDiscovery.findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.gcsafe, async.} = ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
check cid == blocks[0].cid check cid == blocks[0].cid
if count > 0: if count > 0:
check false check false

View File

@ -34,7 +34,7 @@ asyncchecksuite "Advertiser":
advertised = newSeq[Cid]() advertised = newSeq[Cid]()
blockDiscovery.publishBlockProvideHandler = proc( blockDiscovery.publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid d: MockDiscovery, cid: Cid
) {.async, gcsafe.} = ) {.async: (raises: [CancelledError]), gcsafe.} =
advertised.add(cid) advertised.add(cid)
advertiser = Advertiser.new(localStore, blockDiscovery) advertiser = Advertiser.new(localStore, blockDiscovery)

View File

@ -22,7 +22,7 @@ import ../../examples
const NopSendWantCancellationsProc = proc( const NopSendWantCancellationsProc = proc(
id: PeerId, addresses: seq[BlockAddress] id: PeerId, addresses: seq[BlockAddress]
) {.gcsafe, async.} = ) {.async: (raises: [CancelledError]).} =
discard discard
asyncchecksuite "NetworkStore engine basic": asyncchecksuite "NetworkStore engine basic":
@ -66,20 +66,17 @@ asyncchecksuite "NetworkStore engine basic":
wantType: WantType = WantType.WantHave, wantType: WantType = WantType.WantHave,
full: bool = false, full: bool = false,
sendDontHave: bool = false, sendDontHave: bool = false,
) {.gcsafe, async.} = ) {.async: (raises: [CancelledError]).} =
check addresses.mapIt($it.cidOrTreeCid).sorted == blocks.mapIt($it.cid).sorted check addresses.mapIt($it.cidOrTreeCid).sorted == blocks.mapIt($it.cid).sorted
done.complete() done.complete()
let let
network = BlockExcNetwork(request: BlockExcRequest(sendWantList: sendWantList)) network = BlockExcNetwork(request: BlockExcRequest(sendWantList: sendWantList))
localStore = CacheStore.new(blocks.mapIt(it)) localStore = CacheStore.new(blocks.mapIt(it))
discovery = DiscoveryEngine.new( discovery = DiscoveryEngine.new(
localStore, peerStore, network, blockDiscovery, pendingBlocks localStore, peerStore, network, blockDiscovery, pendingBlocks
) )
advertiser = Advertiser.new(localStore, blockDiscovery) advertiser = Advertiser.new(localStore, blockDiscovery)
engine = BlockExcEngine.new( engine = BlockExcEngine.new(
localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks
) )
@ -93,7 +90,9 @@ asyncchecksuite "NetworkStore engine basic":
test "Should send account to new peers": test "Should send account to new peers":
let pricing = Pricing.example let pricing = Pricing.example
proc sendAccount(peer: PeerId, account: Account) {.gcsafe, async.} = proc sendAccount(
peer: PeerId, account: Account
) {.async: (raises: [CancelledError]).} =
check account.address == pricing.address check account.address == pricing.address
done.complete() done.complete()
@ -186,7 +185,9 @@ asyncchecksuite "NetworkStore engine handlers":
done = newFuture[void]() done = newFuture[void]()
wantList = makeWantList(blocks.mapIt(it.cid)) wantList = makeWantList(blocks.mapIt(it.cid))
proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = proc sendPresence(
peerId: PeerId, presence: seq[BlockPresence]
) {.async: (raises: [CancelledError]).} =
check presence.mapIt(it.address) == wantList.entries.mapIt(it.address) check presence.mapIt(it.address) == wantList.entries.mapIt(it.address)
done.complete() done.complete()
@ -203,7 +204,9 @@ asyncchecksuite "NetworkStore engine handlers":
done = newFuture[void]() done = newFuture[void]()
wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true) wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true)
proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = proc sendPresence(
peerId: PeerId, presence: seq[BlockPresence]
) {.async: (raises: [CancelledError]).} =
check presence.mapIt(it.address) == wantList.entries.mapIt(it.address) check presence.mapIt(it.address) == wantList.entries.mapIt(it.address)
for p in presence: for p in presence:
check: check:
@ -222,7 +225,9 @@ asyncchecksuite "NetworkStore engine handlers":
done = newFuture[void]() done = newFuture[void]()
wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true) wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true)
proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = proc sendPresence(
peerId: PeerId, presence: seq[BlockPresence]
) {.async: (raises: [CancelledError]).} =
for p in presence: for p in presence:
if p.address.cidOrTreeCid != blocks[0].cid and if p.address.cidOrTreeCid != blocks[0].cid and
p.address.cidOrTreeCid != blocks[1].cid: p.address.cidOrTreeCid != blocks[1].cid:
@ -266,19 +271,21 @@ asyncchecksuite "NetworkStore engine handlers":
peerContext.account = account.some peerContext.account = account.some
peerContext.blocks = blocks.mapIt( peerContext.blocks = blocks.mapIt(
(it.address, Presence(address: it.address, price: rand(uint16).u256)) (it.address, Presence(address: it.address, price: rand(uint16).u256, have: true))
).toTable ).toTable
engine.network = BlockExcNetwork( engine.network = BlockExcNetwork(
request: BlockExcRequest( request: BlockExcRequest(
sendPayment: proc(receiver: PeerId, payment: SignedState) {.gcsafe, async.} = sendPayment: proc(
receiver: PeerId, payment: SignedState
) {.async: (raises: [CancelledError]).} =
let let
amount = blocks.mapIt(peerContext.blocks[it.address].price).foldl(a + b) amount =
blocks.mapIt(peerContext.blocks[it.address].catch.get.price).foldl(a + b)
balances = !payment.state.outcome.balances(Asset) balances = !payment.state.outcome.balances(Asset)
check receiver == peerId check receiver == peerId
check balances[account.address.toDestination] == amount check balances[account.address.toDestination].catch.get == amount
done.complete(), done.complete(),
# Install NOP for want list cancellations so they don't cause a crash # Install NOP for want list cancellations so they don't cause a crash
@ -286,10 +293,12 @@ asyncchecksuite "NetworkStore engine handlers":
) )
) )
let requestedBlocks = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.address))
await engine.blocksDeliveryHandler( await engine.blocksDeliveryHandler(
peerId, blocks.mapIt(BlockDelivery(blk: it, address: it.address)) peerId, blocks.mapIt(BlockDelivery(blk: it, address: it.address))
) )
await done.wait(100.millis) await done.wait(100.millis)
await allFuturesThrowing(requestedBlocks).wait(100.millis)
test "Should handle block presence": test "Should handle block presence":
var handles: var handles:
@ -303,7 +312,7 @@ asyncchecksuite "NetworkStore engine handlers":
wantType: WantType = WantType.WantHave, wantType: WantType = WantType.WantHave,
full: bool = false, full: bool = false,
sendDontHave: bool = false, sendDontHave: bool = false,
) {.gcsafe, async.} = ) {.async: (raises: [CancelledError]).} =
engine.pendingBlocks.resolve( engine.pendingBlocks.resolve(
blocks.filterIt(it.address in addresses).mapIt( blocks.filterIt(it.address in addresses).mapIt(
BlockDelivery(blk: it, address: it.address) BlockDelivery(blk: it, address: it.address)
@ -340,9 +349,9 @@ asyncchecksuite "NetworkStore engine handlers":
proc sendWantCancellations( proc sendWantCancellations(
id: PeerId, addresses: seq[BlockAddress] id: PeerId, addresses: seq[BlockAddress]
) {.gcsafe, async.} = ) {.async: (raises: [CancelledError]).} =
for address in addresses: for address in addresses:
cancellations[address].complete() cancellations[address].catch.expect("address should exist").complete()
engine.network = BlockExcNetwork( engine.network = BlockExcNetwork(
request: BlockExcRequest(sendWantCancellations: sendWantCancellations) request: BlockExcRequest(sendWantCancellations: sendWantCancellations)
@ -416,7 +425,7 @@ asyncchecksuite "Block Download":
wantType: WantType = WantType.WantHave, wantType: WantType = WantType.WantHave,
full: bool = false, full: bool = false,
sendDontHave: bool = false, sendDontHave: bool = false,
) {.gcsafe, async.} = ) {.async: (raises: [CancelledError]).} =
check wantType == WantHave check wantType == WantHave
check not engine.pendingBlocks.isInFlight(address) check not engine.pendingBlocks.isInFlight(address)
check engine.pendingBlocks.retries(address) == retries check engine.pendingBlocks.retries(address) == retries
@ -433,7 +442,7 @@ asyncchecksuite "Block Download":
discard (await pending).tryGet() discard (await pending).tryGet()
test "Should retry block request": test "Should retry block request":
let var
address = BlockAddress.init(blocks[0].cid) address = BlockAddress.init(blocks[0].cid)
steps = newAsyncEvent() steps = newAsyncEvent()
@ -445,7 +454,7 @@ asyncchecksuite "Block Download":
wantType: WantType = WantType.WantHave, wantType: WantType = WantType.WantHave,
full: bool = false, full: bool = false,
sendDontHave: bool = false, sendDontHave: bool = false,
) {.gcsafe, async.} = ) {.async: (raises: [CancelledError]).} =
case wantType case wantType
of WantHave: of WantHave:
check engine.pendingBlocks.isInFlight(address) == false check engine.pendingBlocks.isInFlight(address) == false
@ -467,7 +476,7 @@ asyncchecksuite "Block Download":
let pending = engine.requestBlock(address) let pending = engine.requestBlock(address)
await steps.wait() await steps.wait()
# add blocks presence # add blocks precense
peerCtx.blocks = blocks.mapIt( peerCtx.blocks = blocks.mapIt(
(it.address, Presence(address: it.address, have: true, price: UInt256.example)) (it.address, Presence(address: it.address, have: true, price: UInt256.example))
).toTable ).toTable
@ -493,7 +502,7 @@ asyncchecksuite "Block Download":
wantType: WantType = WantType.WantHave, wantType: WantType = WantType.WantHave,
full: bool = false, full: bool = false,
sendDontHave: bool = false, sendDontHave: bool = false,
) {.gcsafe, async.} = ) {.async: (raises: [CancelledError]).} =
done.complete() done.complete()
engine.pendingBlocks.blockRetries = 10 engine.pendingBlocks.blockRetries = 10
@ -573,7 +582,7 @@ asyncchecksuite "Task Handler":
test "Should send want-blocks in priority order": test "Should send want-blocks in priority order":
proc sendBlocksDelivery( proc sendBlocksDelivery(
id: PeerId, blocksDelivery: seq[BlockDelivery] id: PeerId, blocksDelivery: seq[BlockDelivery]
) {.gcsafe, async.} = ) {.async: (raises: [CancelledError]).} =
check blocksDelivery.len == 2 check blocksDelivery.len == 2
check: check:
blocksDelivery[1].address == blocks[0].address blocksDelivery[1].address == blocks[0].address
@ -610,7 +619,7 @@ asyncchecksuite "Task Handler":
test "Should set in-flight for outgoing blocks": test "Should set in-flight for outgoing blocks":
proc sendBlocksDelivery( proc sendBlocksDelivery(
id: PeerId, blocksDelivery: seq[BlockDelivery] id: PeerId, blocksDelivery: seq[BlockDelivery]
) {.gcsafe, async.} = ) {.async: (raises: [CancelledError]).} =
check peersCtx[0].peerWants[0].inFlight check peersCtx[0].peerWants[0].inFlight
for blk in blocks: for blk in blocks:
@ -649,7 +658,9 @@ asyncchecksuite "Task Handler":
let missing = @[Block.new("missing".toBytes).tryGet()] let missing = @[Block.new("missing".toBytes).tryGet()]
let price = (!engine.pricing).price let price = (!engine.pricing).price
proc sendPresence(id: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = proc sendPresence(
id: PeerId, presence: seq[BlockPresence]
) {.async: (raises: [CancelledError]).} =
check presence.mapIt(!Presence.init(it)) == check presence.mapIt(!Presence.init(it)) ==
@[ @[
Presence(address: present[0].address, have: true, price: price), Presence(address: present[0].address, have: true, price: price),

View File

@ -1,10 +1,10 @@
import std/unittest import pkg/unittest2
import pkg/codex/stores import pkg/codex/stores
import ../../examples import ../../examples
import ../../helpers import ../../helpers
checksuite "engine payments": suite "Engine payments":
let address = EthAddress.example let address = EthAddress.example
let amount = 42.u256 let amount = 42.u256

View File

@ -6,7 +6,7 @@ import ../../../asynctest
import ../../examples import ../../examples
import ../../helpers import ../../helpers
checksuite "account protobuf messages": suite "account protobuf messages":
let account = Account(address: EthAddress.example) let account = Account(address: EthAddress.example)
let message = AccountMessage.init(account) let message = AccountMessage.init(account)
@ -21,7 +21,7 @@ checksuite "account protobuf messages":
incorrect.address.del(0) incorrect.address.del(0)
check Account.init(incorrect).isNone check Account.init(incorrect).isNone
checksuite "channel update messages": suite "channel update messages":
let state = SignedState.example let state = SignedState.example
let update = StateChannelUpdate.init(state) let update = StateChannelUpdate.init(state)

View File

@ -6,7 +6,7 @@ import ../../../asynctest
import ../../examples import ../../examples
import ../../helpers import ../../helpers
checksuite "block presence protobuf messages": suite "block presence protobuf messages":
let let
cid = Cid.example cid = Cid.example
address = BlockAddress(leaf: false, cid: cid) address = BlockAddress(leaf: false, cid: cid)

View File

@ -26,7 +26,7 @@ asyncchecksuite "Network - Handlers":
blocks: seq[bt.Block] blocks: seq[bt.Block]
done: Future[void] done: Future[void]
proc getConn(): Future[Connection] {.async.} = proc getConn(): Future[Connection] {.async: (raises: [CancelledError]).} =
return Connection(buffer) return Connection(buffer)
setup: setup:
@ -45,7 +45,7 @@ asyncchecksuite "Network - Handlers":
discard await networkPeer.connect() discard await networkPeer.connect()
test "Want List handler": test "Want List handler":
proc wantListHandler(peer: PeerId, wantList: WantList) {.gcsafe, async.} = proc wantListHandler(peer: PeerId, wantList: WantList) {.async: (raises: []).} =
# check that we got the correct amount of entries # check that we got the correct amount of entries
check wantList.entries.len == 4 check wantList.entries.len == 4
@ -72,7 +72,7 @@ asyncchecksuite "Network - Handlers":
test "Blocks Handler": test "Blocks Handler":
proc blocksDeliveryHandler( proc blocksDeliveryHandler(
peer: PeerId, blocksDelivery: seq[BlockDelivery] peer: PeerId, blocksDelivery: seq[BlockDelivery]
) {.gcsafe, async.} = ) {.async: (raises: []).} =
check blocks == blocksDelivery.mapIt(it.blk) check blocks == blocksDelivery.mapIt(it.blk)
done.complete() done.complete()
@ -85,7 +85,9 @@ asyncchecksuite "Network - Handlers":
await done.wait(500.millis) await done.wait(500.millis)
test "Presence Handler": test "Presence Handler":
proc presenceHandler(peer: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = proc presenceHandler(
peer: PeerId, presence: seq[BlockPresence]
) {.async: (raises: []).} =
for b in blocks: for b in blocks:
check: check:
b.address in presence b.address in presence
@ -105,7 +107,7 @@ asyncchecksuite "Network - Handlers":
test "Handles account messages": test "Handles account messages":
let account = Account(address: EthAddress.example) let account = Account(address: EthAddress.example)
proc handleAccount(peer: PeerId, received: Account) {.gcsafe, async.} = proc handleAccount(peer: PeerId, received: Account) {.async: (raises: []).} =
check received == account check received == account
done.complete() done.complete()
@ -119,7 +121,7 @@ asyncchecksuite "Network - Handlers":
test "Handles payment messages": test "Handles payment messages":
let payment = SignedState.example let payment = SignedState.example
proc handlePayment(peer: PeerId, received: SignedState) {.gcsafe, async.} = proc handlePayment(peer: PeerId, received: SignedState) {.async: (raises: []).} =
check received == payment check received == payment
done.complete() done.complete()
@ -165,7 +167,7 @@ asyncchecksuite "Network - Senders":
await allFuturesThrowing(switch1.stop(), switch2.stop()) await allFuturesThrowing(switch1.stop(), switch2.stop())
test "Send want list": test "Send want list":
proc wantListHandler(peer: PeerId, wantList: WantList) {.gcsafe, async.} = proc wantListHandler(peer: PeerId, wantList: WantList) {.async: (raises: []).} =
# check that we got the correct amount of entries # check that we got the correct amount of entries
check wantList.entries.len == 4 check wantList.entries.len == 4
@ -195,7 +197,7 @@ asyncchecksuite "Network - Senders":
test "send blocks": test "send blocks":
proc blocksDeliveryHandler( proc blocksDeliveryHandler(
peer: PeerId, blocksDelivery: seq[BlockDelivery] peer: PeerId, blocksDelivery: seq[BlockDelivery]
) {.gcsafe, async.} = ) {.async: (raises: []).} =
check blocks == blocksDelivery.mapIt(it.blk) check blocks == blocksDelivery.mapIt(it.blk)
done.complete() done.complete()
@ -207,7 +209,9 @@ asyncchecksuite "Network - Senders":
await done.wait(500.millis) await done.wait(500.millis)
test "send presence": test "send presence":
proc presenceHandler(peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async.} = proc presenceHandler(
peer: PeerId, precense: seq[BlockPresence]
) {.async: (raises: []).} =
for b in blocks: for b in blocks:
check: check:
b.address in precense b.address in precense
@ -226,7 +230,7 @@ asyncchecksuite "Network - Senders":
test "send account": test "send account":
let account = Account(address: EthAddress.example) let account = Account(address: EthAddress.example)
proc handleAccount(peer: PeerId, received: Account) {.gcsafe, async.} = proc handleAccount(peer: PeerId, received: Account) {.async: (raises: []).} =
check received == account check received == account
done.complete() done.complete()
@ -238,7 +242,7 @@ asyncchecksuite "Network - Senders":
test "send payment": test "send payment":
let payment = SignedState.example let payment = SignedState.example
proc handlePayment(peer: PeerId, received: SignedState) {.gcsafe, async.} = proc handlePayment(peer: PeerId, received: SignedState) {.async: (raises: []).} =
check received == payment check received == payment
done.complete() done.complete()
@ -276,7 +280,7 @@ asyncchecksuite "Network - Test Limits":
let account = Account(address: EthAddress.example) let account = Account(address: EthAddress.example)
network2.handlers.onAccount = proc( network2.handlers.onAccount = proc(
peer: PeerId, received: Account peer: PeerId, received: Account
) {.gcsafe, async.} = ) {.async: (raises: []).} =
check false check false
let fut = network1.send( let fut = network1.send(

View File

@ -1,7 +1,7 @@
import std/sugar import std/sugar
import std/sequtils import std/sequtils
import std/unittest
import pkg/unittest2
import pkg/libp2p import pkg/libp2p
import pkg/codex/blockexchange/peers import pkg/codex/blockexchange/peers
@ -11,7 +11,7 @@ import pkg/codex/blockexchange/protobuf/presence
import ../helpers import ../helpers
import ../examples import ../examples
checksuite "Peer Context Store": suite "Peer Context Store":
var var
store: PeerCtxStore store: PeerCtxStore
peerCtx: BlockExcPeerCtx peerCtx: BlockExcPeerCtx
@ -31,7 +31,7 @@ checksuite "Peer Context Store":
test "Should get peer": test "Should get peer":
check store.get(peerCtx.id) == peerCtx check store.get(peerCtx.id) == peerCtx
checksuite "Peer Context Store Peer Selection": suite "Peer Context Store Peer Selection":
var var
store: PeerCtxStore store: PeerCtxStore
peerCtxs: seq[BlockExcPeerCtx] peerCtxs: seq[BlockExcPeerCtx]

View File

@ -10,7 +10,7 @@ import pkg/codex/blockexchange
import ../helpers import ../helpers
import ../../asynctest import ../../asynctest
checksuite "Pending Blocks": suite "Pending Blocks":
test "Should add want handle": test "Should add want handle":
let let
pendingBlocks = PendingBlocksManager.new() pendingBlocks = PendingBlocksManager.new()

View File

@ -21,7 +21,7 @@ proc new*(
var consumed = 0 var consumed = 0
proc reader( proc reader(
data: ChunkBuffer, len: int data: ChunkBuffer, len: int
): Future[int] {.async, gcsafe, raises: [Defect].} = ): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
if consumed >= dataset.len: if consumed >= dataset.len:
return 0 return 0

View File

@ -14,29 +14,42 @@ import pkg/codex/discovery
import pkg/contractabi/address as ca import pkg/contractabi/address as ca
type MockDiscovery* = ref object of Discovery type MockDiscovery* = ref object of Discovery
findBlockProvidersHandler*: findBlockProvidersHandler*: proc(
proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.gcsafe.} d: MockDiscovery, cid: Cid
publishBlockProvideHandler*: proc(d: MockDiscovery, cid: Cid): Future[void] {.gcsafe.} ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).}
findHostProvidersHandler*:
proc(d: MockDiscovery, host: ca.Address): Future[seq[SignedPeerRecord]] {.gcsafe.} publishBlockProvideHandler*:
publishHostProvideHandler*: proc(d: MockDiscovery, cid: Cid): Future[void] {.async: (raises: [CancelledError]).}
proc(d: MockDiscovery, host: ca.Address): Future[void] {.gcsafe.}
findHostProvidersHandler*: proc(
d: MockDiscovery, host: ca.Address
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).}
publishHostProvideHandler*: proc(d: MockDiscovery, host: ca.Address): Future[void] {.
async: (raises: [CancelledError])
.}
proc new*(T: type MockDiscovery): MockDiscovery = proc new*(T: type MockDiscovery): MockDiscovery =
MockDiscovery() MockDiscovery()
proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} = proc findPeer*(
d: Discovery, peerId: PeerId
): Future[?PeerRecord] {.async: (raises: [CancelledError]).} =
## mock find a peer - always return none ## mock find a peer - always return none
## ##
return none(PeerRecord) return none(PeerRecord)
method find*(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async.} = method find*(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
if isNil(d.findBlockProvidersHandler): if isNil(d.findBlockProvidersHandler):
return return
return await d.findBlockProvidersHandler(d, cid) return await d.findBlockProvidersHandler(d, cid)
method provide*(d: MockDiscovery, cid: Cid): Future[void] {.async.} = method provide*(
d: MockDiscovery, cid: Cid
): Future[void] {.async: (raises: [CancelledError]).} =
if isNil(d.publishBlockProvideHandler): if isNil(d.publishBlockProvideHandler):
return return
@ -44,13 +57,15 @@ method provide*(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
method find*( method find*(
d: MockDiscovery, host: ca.Address d: MockDiscovery, host: ca.Address
): Future[seq[SignedPeerRecord]] {.async.} = ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
if isNil(d.findHostProvidersHandler): if isNil(d.findHostProvidersHandler):
return return
return await d.findHostProvidersHandler(d, host) return await d.findHostProvidersHandler(d, host)
method provide*(d: MockDiscovery, host: ca.Address): Future[void] {.async.} = method provide*(
d: MockDiscovery, host: ca.Address
): Future[void] {.async: (raises: [CancelledError]).} =
if isNil(d.publishHostProvideHandler): if isNil(d.publishHostProvideHandler):
return return

View File

@ -46,7 +46,8 @@ type
subscriptions: Subscriptions subscriptions: Subscriptions
config*: MarketplaceConfig config*: MarketplaceConfig
canReserveSlot*: bool canReserveSlot*: bool
reserveSlotThrowError*: ?(ref MarketError) errorOnReserveSlot*: ?(ref MarketError)
errorOnFillSlot*: ?(ref CatchableError)
clock: ?Clock clock: ?Clock
Fulfillment* = object Fulfillment* = object
@ -138,22 +139,35 @@ proc new*(_: type MockMarket, clock: ?Clock = Clock.none): MockMarket =
signer: Address.example, config: config, canReserveSlot: true, clock: clock signer: Address.example, config: config, canReserveSlot: true, clock: clock
) )
method loadConfig*(
market: MockMarket
): Future[?!void] {.async: (raises: [CancelledError]).} =
discard
method getSigner*(market: MockMarket): Future[Address] {.async.} = method getSigner*(market: MockMarket): Future[Address] {.async.} =
return market.signer return market.signer
method periodicity*(mock: MockMarket): Future[Periodicity] {.async.} = method periodicity*(
mock: MockMarket
): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} =
return Periodicity(seconds: mock.config.proofs.period) return Periodicity(seconds: mock.config.proofs.period)
method proofTimeout*(market: MockMarket): Future[uint64] {.async.} = method proofTimeout*(
market: MockMarket
): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} =
return market.config.proofs.timeout return market.config.proofs.timeout
method requestDurationLimit*(market: MockMarket): Future[uint64] {.async.} = method requestDurationLimit*(market: MockMarket): Future[uint64] {.async.} =
return market.config.requestDurationLimit return market.config.requestDurationLimit
method proofDowntime*(market: MockMarket): Future[uint8] {.async.} = method proofDowntime*(
market: MockMarket
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
return market.config.proofs.downtime return market.config.proofs.downtime
method repairRewardPercentage*(market: MockMarket): Future[uint8] {.async.} = method repairRewardPercentage*(
market: MockMarket
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
return market.config.collateral.repairRewardPercentage return market.config.collateral.repairRewardPercentage
method getPointer*(market: MockMarket, slotId: SlotId): Future[uint8] {.async.} = method getPointer*(market: MockMarket, slotId: SlotId): Future[uint8] {.async.} =
@ -173,7 +187,7 @@ method mySlots*(market: MockMarket): Future[seq[SlotId]] {.async.} =
method getRequest*( method getRequest*(
market: MockMarket, id: RequestId market: MockMarket, id: RequestId
): Future[?StorageRequest] {.async.} = ): Future[?StorageRequest] {.async: (raises: [CancelledError]).} =
for request in market.requested: for request in market.requested:
if request.id == id: if request.id == id:
return some request return some request
@ -191,10 +205,16 @@ method requestState*(
): Future[?RequestState] {.async.} = ): Future[?RequestState] {.async.} =
return market.requestState .? [requestId] return market.requestState .? [requestId]
method slotState*(market: MockMarket, slotId: SlotId): Future[SlotState] {.async.} = method slotState*(
if not market.slotState.hasKey(slotId): market: MockMarket, slotId: SlotId
): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} =
if slotId notin market.slotState:
return SlotState.Free return SlotState.Free
return market.slotState[slotId]
try:
return market.slotState[slotId]
except KeyError as e:
raiseAssert "SlotId not found in known slots (MockMarket.slotState)"
method getRequestEnd*( method getRequestEnd*(
market: MockMarket, id: RequestId market: MockMarket, id: RequestId
@ -270,6 +290,9 @@ proc fillSlot*(
host: Address, host: Address,
collateral = 0.u256, collateral = 0.u256,
) = ) =
if error =? market.errorOnFillSlot:
raise error
let slot = MockSlot( let slot = MockSlot(
requestId: requestId, requestId: requestId,
slotIndex: slotIndex, slotIndex: slotIndex,
@ -351,7 +374,7 @@ method canProofBeMarkedAsMissing*(
method reserveSlot*( method reserveSlot*(
market: MockMarket, requestId: RequestId, slotIndex: uint64 market: MockMarket, requestId: RequestId, slotIndex: uint64
) {.async.} = ) {.async.} =
if error =? market.reserveSlotThrowError: if error =? market.errorOnReserveSlot:
raise error raise error
method canReserveSlot*( method canReserveSlot*(
@ -362,8 +385,19 @@ method canReserveSlot*(
func setCanReserveSlot*(market: MockMarket, canReserveSlot: bool) = func setCanReserveSlot*(market: MockMarket, canReserveSlot: bool) =
market.canReserveSlot = canReserveSlot market.canReserveSlot = canReserveSlot
func setReserveSlotThrowError*(market: MockMarket, error: ?(ref MarketError)) = func setErrorOnReserveSlot*(market: MockMarket, error: ref MarketError) =
market.reserveSlotThrowError = error market.errorOnReserveSlot =
if error.isNil:
none (ref MarketError)
else:
some error
func setErrorOnFillSlot*(market: MockMarket, error: ref CatchableError) =
market.errorOnFillSlot =
if error.isNil:
none (ref CatchableError)
else:
some error
method subscribeRequests*( method subscribeRequests*(
market: MockMarket, callback: OnRequest market: MockMarket, callback: OnRequest
@ -534,3 +568,33 @@ method unsubscribe*(subscription: ProofSubmittedSubscription) {.async.} =
method unsubscribe*(subscription: SlotReservationsFullSubscription) {.async.} = method unsubscribe*(subscription: SlotReservationsFullSubscription) {.async.} =
subscription.market.subscriptions.onSlotReservationsFull.keepItIf(it != subscription) subscription.market.subscriptions.onSlotReservationsFull.keepItIf(it != subscription)
method slotCollateral*(
market: MockMarket, requestId: RequestId, slotIndex: uint64
): Future[?!UInt256] {.async: (raises: [CancelledError]).} =
let slotid = slotId(requestId, slotIndex)
try:
let state = await slotState(market, slotid)
without request =? await market.getRequest(requestId):
return failure newException(
MarketError, "Failure calculating the slotCollateral, cannot get the request"
)
return market.slotCollateral(request.ask.collateralPerSlot, state)
except MarketError as error:
error "Error when trying to calculate the slotCollateral", error = error.msg
return failure error
method slotCollateral*(
market: MockMarket, collateralPerSlot: UInt256, slotState: SlotState
): ?!UInt256 {.raises: [].} =
if slotState == SlotState.Repair:
let repairRewardPercentage = market.config.collateral.repairRewardPercentage.u256
return success (
collateralPerSlot - (collateralPerSlot * repairRewardPercentage).div(100.u256)
)
return success collateralPerSlot

View File

@ -7,7 +7,7 @@ type MockSlotQueueItem* = object
slotSize*: uint64 slotSize*: uint64
duration*: uint64 duration*: uint64
pricePerBytePerSecond*: UInt256 pricePerBytePerSecond*: UInt256
collateralPerByte*: UInt256 collateral*: UInt256
expiry*: uint64 expiry*: uint64
seen*: bool seen*: bool
@ -19,8 +19,8 @@ proc toSlotQueueItem*(item: MockSlotQueueItem): SlotQueueItem =
slotSize: item.slotSize, slotSize: item.slotSize,
duration: item.duration, duration: item.duration,
pricePerBytePerSecond: item.pricePerBytePerSecond, pricePerBytePerSecond: item.pricePerBytePerSecond,
collateralPerByte: item.collateralPerByte,
), ),
expiry = item.expiry, expiry = item.expiry,
seen = item.seen, seen = item.seen,
collateral = item.collateral,
) )

View File

@ -26,7 +26,7 @@ proc new*(
var consumed = 0 var consumed = 0
proc reader( proc reader(
data: ChunkBuffer, len: int data: ChunkBuffer, len: int
): Future[int] {.async, gcsafe, raises: [Defect].} = ): Future[int] {.async: (raises: [ChunkerError, CancelledError]), gcsafe.} =
var alpha = toSeq(byte('A') .. byte('z')) var alpha = toSeq(byte('A') .. byte('z'))
if consumed >= size: if consumed >= size:

View File

@ -1,4 +1,4 @@
import std/unittest import pkg/unittest2
import pkg/codex/merkletree import pkg/codex/merkletree

View File

@ -1,4 +1,4 @@
import std/unittest import pkg/unittest2
import pkg/questionable/results import pkg/questionable/results
import pkg/stew/byteutils import pkg/stew/byteutils
@ -18,7 +18,7 @@ const data = [
"00000000000000000000000000000009".toBytes, "00000000000000000000000000000010".toBytes, "00000000000000000000000000000009".toBytes, "00000000000000000000000000000010".toBytes,
] ]
checksuite "merkletree - coders": suite "merkletree - coders":
test "encoding and decoding a tree yields the same tree": test "encoding and decoding a tree yields the same tree":
let let
tree = CodexTree.init(Sha256HashCodec, data).tryGet() tree = CodexTree.init(Sha256HashCodec, data).tryGet()

View File

@ -1,6 +1,6 @@
import std/unittest
import std/sequtils import std/sequtils
import pkg/unittest2
import pkg/questionable/results import pkg/questionable/results
import pkg/stew/byteutils import pkg/stew/byteutils
import pkg/libp2p import pkg/libp2p

View File

@ -1,7 +1,7 @@
import std/unittest
import std/sequtils import std/sequtils
import std/random import std/random
import pkg/unittest2
import pkg/poseidon2 import pkg/poseidon2
import pkg/poseidon2/sponge import pkg/poseidon2/sponge

View File

@ -1,6 +1,6 @@
import std/unittest
import std/sequtils import std/sequtils
import pkg/unittest2
import pkg/poseidon2 import pkg/poseidon2
import pkg/poseidon2/io import pkg/poseidon2/io
import pkg/questionable/results import pkg/questionable/results

View File

@ -125,7 +125,7 @@ asyncchecksuite "Test Node - Host contracts":
fetchedBytes += blk.data.len.uint fetchedBytes += blk.data.len.uint
return success() return success()
(await onStore(request, 1.uint64, onBlocks)).tryGet() (await onStore(request, 1.uint64, onBlocks, isRepairing = false)).tryGet()
check fetchedBytes == 12 * DefaultBlockSize.uint check fetchedBytes == 12 * DefaultBlockSize.uint
let indexer = verifiable.protectedStrategy.init( let indexer = verifiable.protectedStrategy.init(

View File

@ -1,4 +1,4 @@
import std/unittest import pkg/unittest2
import pkg/questionable import pkg/questionable
import pkg/codex/contracts/requests import pkg/codex/contracts/requests
import pkg/codex/sales/states/cancelled import pkg/codex/sales/states/cancelled
@ -8,7 +8,7 @@ import pkg/codex/sales/states/filled
import ../../examples import ../../examples
import ../../helpers import ../../helpers
checksuite "sales state 'downloading'": suite "sales state 'downloading'":
let request = StorageRequest.example let request = StorageRequest.example
let slotIndex = request.ask.slots div 2 let slotIndex = request.ask.slots div 2
var state: SaleDownloading var state: SaleDownloading

View File

@ -14,7 +14,7 @@ import ../../helpers/mockmarket
import ../../examples import ../../examples
import ../../helpers import ../../helpers
checksuite "sales state 'filled'": suite "sales state 'filled'":
let request = StorageRequest.example let request = StorageRequest.example
let slotIndex = request.ask.slots div 2 let slotIndex = request.ask.slots div 2

View File

@ -1,18 +1,31 @@
import std/unittest
import pkg/questionable import pkg/questionable
import pkg/codex/contracts/requests import pkg/codex/contracts/requests
import pkg/codex/sales/states/filling import pkg/codex/sales/states/filling
import pkg/codex/sales/states/cancelled import pkg/codex/sales/states/cancelled
import pkg/codex/sales/states/failed import pkg/codex/sales/states/failed
import pkg/codex/sales/states/ignored
import pkg/codex/sales/states/errored
import pkg/codex/sales/salesagent
import pkg/codex/sales/salescontext
import ../../../asynctest
import ../../examples import ../../examples
import ../../helpers import ../../helpers
import ../../helpers/mockmarket
import ../../helpers/mockclock
checksuite "sales state 'filling'": suite "sales state 'filling'":
let request = StorageRequest.example let request = StorageRequest.example
let slotIndex = request.ask.slots div 2 let slotIndex = request.ask.slots div 2
var state: SaleFilling var state: SaleFilling
var market: MockMarket
var clock: MockClock
var agent: SalesAgent
setup: setup:
clock = MockClock.new()
market = MockMarket.new()
let context = SalesContext(market: market, clock: clock)
agent = newSalesAgent(context, request.id, slotIndex, request.some)
state = SaleFilling.new() state = SaleFilling.new()
test "switches to cancelled state when request expires": test "switches to cancelled state when request expires":
@ -22,3 +35,28 @@ checksuite "sales state 'filling'":
test "switches to failed state when request fails": test "switches to failed state when request fails":
let next = state.onFailed(request) let next = state.onFailed(request)
check !next of SaleFailed check !next of SaleFailed
test "run switches to ignored when slot is not free":
let error = newException(
SlotStateMismatchError, "Failed to fill slot because the slot is not free"
)
market.setErrorOnFillSlot(error)
market.requested.add(request)
market.slotState[request.slotId(slotIndex)] = SlotState.Filled
let next = !(await state.run(agent))
check next of SaleIgnored
check SaleIgnored(next).reprocessSlot == false
check SaleIgnored(next).returnBytes
test "run switches to errored with other error ":
let error = newException(MarketError, "some error")
market.setErrorOnFillSlot(error)
market.requested.add(request)
market.slotState[request.slotId(slotIndex)] = SlotState.Filled
let next = !(await state.run(agent))
check next of SaleErrored
let errored = SaleErrored(next)
check errored.error == error

View File

@ -54,15 +54,16 @@ asyncchecksuite "sales state 'SlotReserving'":
test "run switches to errored when slot reservation errors": test "run switches to errored when slot reservation errors":
let error = newException(MarketError, "some error") let error = newException(MarketError, "some error")
market.setReserveSlotThrowError(some error) market.setErrorOnReserveSlot(error)
let next = !(await state.run(agent)) let next = !(await state.run(agent))
check next of SaleErrored check next of SaleErrored
let errored = SaleErrored(next) let errored = SaleErrored(next)
check errored.error == error check errored.error == error
test "catches reservation not allowed error": test "run switches to ignored when reservation is not allowed":
let error = newException(MarketError, "SlotReservations_ReservationNotAllowed") let error =
market.setReserveSlotThrowError(some error) newException(SlotReservationNotAllowedError, "Reservation is not allowed")
market.setErrorOnReserveSlot(error)
let next = !(await state.run(agent)) let next = !(await state.run(agent))
check next of SaleIgnored check next of SaleIgnored
check SaleIgnored(next).reprocessSlot == false check SaleIgnored(next).reprocessSlot == false

View File

@ -14,7 +14,7 @@ import ../../helpers/mockmarket
import ../../examples import ../../examples
import ../../helpers import ../../helpers
checksuite "sales state 'unknown'": suite "sales state 'unknown'":
let request = StorageRequest.example let request = StorageRequest.example
let slotIndex = request.ask.slots div 2 let slotIndex = request.ask.slots div 2
let slotId = slotId(request.id, slotIndex) let slotId = slotId(request.id, slotIndex)

View File

@ -283,35 +283,95 @@ asyncchecksuite "Reservations module":
check updated.isErr check updated.isErr
check updated.error of NotExistsError check updated.error of NotExistsError
test "onAvailabilityAdded called when availability is created": test "OnAvailabilitySaved called when availability is created":
var added: Availability var added: Availability
reservations.onAvailabilityAdded = proc(a: Availability) {.async.} = reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
added = a added = a
let availability = createAvailability() let availability = createAvailability()
check added == availability check added == availability
test "onAvailabilityAdded called when availability size is increased": test "OnAvailabilitySaved called when availability size is increased":
var availability = createAvailability() var availability = createAvailability()
var added: Availability var added: Availability
reservations.onAvailabilityAdded = proc(a: Availability) {.async.} = reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
added = a added = a
availability.freeSize += 1 availability.freeSize += 1
discard await reservations.update(availability) discard await reservations.update(availability)
check added == availability check added == availability
test "onAvailabilityAdded is not called when availability size is decreased": test "OnAvailabilitySaved is not called when availability size is decreased":
var availability = createAvailability() var availability = createAvailability()
var called = false var called = false
reservations.onAvailabilityAdded = proc(a: Availability) {.async.} = reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
called = true called = true
availability.freeSize -= 1 availability.freeSize -= 1
discard await reservations.update(availability) discard await reservations.update(availability)
check not called check not called
test "OnAvailabilitySaved called when availability duration is increased":
var availability = createAvailability()
var added: Availability
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
added = a
availability.duration += 1
discard await reservations.update(availability)
check added == availability
test "OnAvailabilitySaved is not called when availability duration is decreased":
var availability = createAvailability()
var called = false
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
called = true
availability.duration -= 1
discard await reservations.update(availability)
check not called
test "OnAvailabilitySaved called when availability minPricePerBytePerSecond is increased":
var availability = createAvailability()
var added: Availability
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
added = a
availability.minPricePerBytePerSecond += 1.u256
discard await reservations.update(availability)
check added == availability
test "OnAvailabilitySaved is not called when availability minPricePerBytePerSecond is decreased":
var availability = createAvailability()
var called = false
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
called = true
availability.minPricePerBytePerSecond -= 1.u256
discard await reservations.update(availability)
check not called
test "OnAvailabilitySaved called when availability totalCollateral is increased":
var availability = createAvailability()
var added: Availability
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
added = a
availability.totalCollateral = availability.totalCollateral + 1.u256
discard await reservations.update(availability)
check added == availability
test "OnAvailabilitySaved is not called when availability totalCollateral is decreased":
var availability = createAvailability()
var called = false
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
called = true
availability.totalCollateral = availability.totalCollateral - 1.u256
discard await reservations.update(availability)
check not called
test "availabilities can be found": test "availabilities can be found":
let availability = createAvailability() let availability = createAvailability()

View File

@ -62,7 +62,7 @@ asyncchecksuite "Sales - start":
sales = Sales.new(market, clock, repo) sales = Sales.new(market, clock, repo)
reservations = sales.context.reservations reservations = sales.context.reservations
sales.onStore = proc( sales.onStore = proc(
request: StorageRequest, slot: uint64, onBatch: BatchProc request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false
): Future[?!void] {.async.} = ): Future[?!void] {.async.} =
return success() return success()
@ -181,7 +181,7 @@ asyncchecksuite "Sales":
sales = Sales.new(market, clock, repo) sales = Sales.new(market, clock, repo)
reservations = sales.context.reservations reservations = sales.context.reservations
sales.onStore = proc( sales.onStore = proc(
request: StorageRequest, slot: uint64, onBatch: BatchProc request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false
): Future[?!void] {.async.} = ): Future[?!void] {.async.} =
return success() return success()
@ -229,17 +229,24 @@ asyncchecksuite "Sales":
availability = a.get # update id availability = a.get # update id
proc notProcessed(itemsProcessed: seq[SlotQueueItem], request: StorageRequest): bool = proc notProcessed(itemsProcessed: seq[SlotQueueItem], request: StorageRequest): bool =
let items = SlotQueueItem.init(request) let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot)
for i in 0 ..< items.len: for i in 0 ..< items.len:
if itemsProcessed.contains(items[i]): if itemsProcessed.contains(items[i]):
return false return false
return true return true
proc addRequestToSaturatedQueue(): Future[StorageRequest] {.async.} = proc addRequestToSaturatedQueue(): Future[StorageRequest] {.async.} =
queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = queue.onProcessSlot = proc(
await sleepAsync(10.millis) item: SlotQueueItem, done: Future[void]
itemsProcessed.add item ) {.async: (raises: []).} =
done.complete() try:
await sleepAsync(10.millis)
itemsProcessed.add item
except CancelledError as exc:
checkpoint(exc.msg)
finally:
if not done.finished:
done.complete()
var request1 = StorageRequest.example var request1 = StorageRequest.example
request1.ask.collateralPerByte = request.ask.collateralPerByte + 1 request1.ask.collateralPerByte = request.ask.collateralPerByte + 1
@ -261,12 +268,15 @@ asyncchecksuite "Sales":
waitFor run() waitFor run()
test "processes all request's slots once StorageRequested emitted": test "processes all request's slots once StorageRequested emitted":
queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = queue.onProcessSlot = proc(
item: SlotQueueItem, done: Future[void]
) {.async: (raises: []).} =
itemsProcessed.add item itemsProcessed.add item
done.complete() if not done.finished:
done.complete()
createAvailability() createAvailability()
await market.requestStorage(request) await market.requestStorage(request)
let items = SlotQueueItem.init(request) let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot)
check eventually items.allIt(itemsProcessed.contains(it)) check eventually items.allIt(itemsProcessed.contains(it))
test "removes slots from slot queue once RequestCancelled emitted": test "removes slots from slot queue once RequestCancelled emitted":
@ -287,30 +297,42 @@ asyncchecksuite "Sales":
test "removes slot index from slot queue once SlotFilled emitted": test "removes slot index from slot queue once SlotFilled emitted":
let request1 = await addRequestToSaturatedQueue() let request1 = await addRequestToSaturatedQueue()
market.emitSlotFilled(request1.id, 1.uint64) market.emitSlotFilled(request1.id, 1.uint64)
let expected = SlotQueueItem.init(request1, 1'u16) let expected =
SlotQueueItem.init(request1, 1'u16, collateral = request1.ask.collateralPerSlot)
check always (not itemsProcessed.contains(expected)) check always (not itemsProcessed.contains(expected))
test "removes slot index from slot queue once SlotReservationsFull emitted": test "removes slot index from slot queue once SlotReservationsFull emitted":
let request1 = await addRequestToSaturatedQueue() let request1 = await addRequestToSaturatedQueue()
market.emitSlotReservationsFull(request1.id, 1.uint64) market.emitSlotReservationsFull(request1.id, 1.uint64)
let expected = SlotQueueItem.init(request1, 1'u16) let expected =
SlotQueueItem.init(request1, 1'u16, collateral = request1.ask.collateralPerSlot)
check always (not itemsProcessed.contains(expected)) check always (not itemsProcessed.contains(expected))
test "adds slot index to slot queue once SlotFreed emitted": test "adds slot index to slot queue once SlotFreed emitted":
queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = queue.onProcessSlot = proc(
item: SlotQueueItem, done: Future[void]
) {.async: (raises: []).} =
itemsProcessed.add item itemsProcessed.add item
done.complete() if not done.finished:
done.complete()
createAvailability() createAvailability()
market.requested.add request # "contract" must be able to return request market.requested.add request # "contract" must be able to return request
market.emitSlotFreed(request.id, 2.uint64) market.emitSlotFreed(request.id, 2.uint64)
let expected = SlotQueueItem.init(request, 2.uint16) without collateralPerSlot =? await market.slotCollateral(request.id, 2.uint64),
error:
fail()
let expected =
SlotQueueItem.init(request, 2.uint16, collateral = request.ask.collateralPerSlot)
check eventually itemsProcessed.contains(expected) check eventually itemsProcessed.contains(expected)
test "items in queue are readded (and marked seen) once ignored": test "items in queue are readded (and marked seen) once ignored":
await market.requestStorage(request) await market.requestStorage(request)
let items = SlotQueueItem.init(request) let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot)
check eventually queue.len > 0 check eventually queue.len > 0
# queue starts paused, allow items to be added to the queue # queue starts paused, allow items to be added to the queue
check eventually queue.paused check eventually queue.paused
@ -331,7 +353,7 @@ asyncchecksuite "Sales":
test "queue is paused once availability is insufficient to service slots in queue": test "queue is paused once availability is insufficient to service slots in queue":
createAvailability() # enough to fill a single slot createAvailability() # enough to fill a single slot
await market.requestStorage(request) await market.requestStorage(request)
let items = SlotQueueItem.init(request) let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot)
check eventually queue.len > 0 check eventually queue.len > 0
# queue starts paused, allow items to be added to the queue # queue starts paused, allow items to be added to the queue
check eventually queue.paused check eventually queue.paused
@ -348,7 +370,7 @@ asyncchecksuite "Sales":
test "availability size is reduced by request slot size when fully downloaded": test "availability size is reduced by request slot size when fully downloaded":
sales.onStore = proc( sales.onStore = proc(
request: StorageRequest, slot: uint64, onBatch: BatchProc request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false
): Future[?!void] {.async.} = ): Future[?!void] {.async.} =
let blk = bt.Block.new(@[1.byte]).get let blk = bt.Block.new(@[1.byte]).get
await onBatch(blk.repeat(request.ask.slotSize.int)) await onBatch(blk.repeat(request.ask.slotSize.int))
@ -361,7 +383,7 @@ asyncchecksuite "Sales":
test "non-downloaded bytes are returned to availability once finished": test "non-downloaded bytes are returned to availability once finished":
var slotIndex = 0.uint64 var slotIndex = 0.uint64
sales.onStore = proc( sales.onStore = proc(
request: StorageRequest, slot: uint64, onBatch: BatchProc request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false
): Future[?!void] {.async.} = ): Future[?!void] {.async.} =
slotIndex = slot slotIndex = slot
let blk = bt.Block.new(@[1.byte]).get let blk = bt.Block.new(@[1.byte]).get
@ -421,7 +443,7 @@ asyncchecksuite "Sales":
var storingRequest: StorageRequest var storingRequest: StorageRequest
var storingSlot: uint64 var storingSlot: uint64
sales.onStore = proc( sales.onStore = proc(
request: StorageRequest, slot: uint64, onBatch: BatchProc request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false
): Future[?!void] {.async.} = ): Future[?!void] {.async.} =
storingRequest = request storingRequest = request
storingSlot = slot storingSlot = slot
@ -434,7 +456,7 @@ asyncchecksuite "Sales":
test "makes storage available again when data retrieval fails": test "makes storage available again when data retrieval fails":
let error = newException(IOError, "data retrieval failed") let error = newException(IOError, "data retrieval failed")
sales.onStore = proc( sales.onStore = proc(
request: StorageRequest, slot: uint64, onBatch: BatchProc request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false
): Future[?!void] {.async.} = ): Future[?!void] {.async.} =
return failure(error) return failure(error)
createAvailability() createAvailability()
@ -503,7 +525,7 @@ asyncchecksuite "Sales":
test "makes storage available again when other host fills the slot": test "makes storage available again when other host fills the slot":
let otherHost = Address.example let otherHost = Address.example
sales.onStore = proc( sales.onStore = proc(
request: StorageRequest, slot: uint64, onBatch: BatchProc request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false
): Future[?!void] {.async.} = ): Future[?!void] {.async.} =
await sleepAsync(chronos.hours(1)) await sleepAsync(chronos.hours(1))
return success() return success()
@ -519,7 +541,7 @@ asyncchecksuite "Sales":
let origSize = availability.freeSize let origSize = availability.freeSize
sales.onStore = proc( sales.onStore = proc(
request: StorageRequest, slot: uint64, onBatch: BatchProc request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false
): Future[?!void] {.async.} = ): Future[?!void] {.async.} =
await sleepAsync(chronos.hours(1)) await sleepAsync(chronos.hours(1))
return success() return success()
@ -544,7 +566,7 @@ asyncchecksuite "Sales":
let origSize = availability.freeSize let origSize = availability.freeSize
sales.onStore = proc( sales.onStore = proc(
request: StorageRequest, slot: uint64, onBatch: BatchProc request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false
): Future[?!void] {.async.} = ): Future[?!void] {.async.} =
await sleepAsync(chronos.hours(1)) await sleepAsync(chronos.hours(1))
return success() return success()

View File

@ -50,12 +50,19 @@ suite "Slot queue start/stop":
suite "Slot queue workers": suite "Slot queue workers":
var queue: SlotQueue var queue: SlotQueue
proc onProcessSlot(item: SlotQueueItem, doneProcessing: Future[void]) {.async.} = proc onProcessSlot(
await sleepAsync(1000.millis) item: SlotQueueItem, doneProcessing: Future[void]
) {.async: (raises: []).} =
# this is not illustrative of the realistic scenario as the # this is not illustrative of the realistic scenario as the
# `doneProcessing` future would be passed to another context before being # `doneProcessing` future would be passed to another context before being
# completed and therefore is not as simple as making the callback async # completed and therefore is not as simple as making the callback async
doneProcessing.complete() try:
await sleepAsync(1000.millis)
except CatchableError as exc:
checkpoint(exc.msg)
finally:
if not doneProcessing.finished:
doneProcessing.complete()
setup: setup:
let request = StorageRequest.example let request = StorageRequest.example
@ -89,9 +96,14 @@ suite "Slot queue workers":
check eventually queue.activeWorkers == 3 check eventually queue.activeWorkers == 3
test "discards workers once processing completed": test "discards workers once processing completed":
proc processSlot(item: SlotQueueItem, done: Future[void]) {.async.} = proc processSlot(item: SlotQueueItem, done: Future[void]) {.async: (raises: []).} =
await sleepAsync(1.millis) try:
done.complete() await sleepAsync(1.millis)
except CatchableError as exc:
checkpoint(exc.msg)
finally:
if not done.finished:
done.complete()
queue.onProcessSlot = processSlot queue.onProcessSlot = processSlot
@ -114,11 +126,19 @@ suite "Slot queue":
proc newSlotQueue(maxSize, maxWorkers: int, processSlotDelay = 1.millis) = proc newSlotQueue(maxSize, maxWorkers: int, processSlotDelay = 1.millis) =
queue = SlotQueue.new(maxWorkers, maxSize.uint16) queue = SlotQueue.new(maxWorkers, maxSize.uint16)
queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = queue.onProcessSlot = proc(
await sleepAsync(processSlotDelay) item: SlotQueueItem, done: Future[void]
onProcessSlotCalled = true ) {.async: (raises: []).} =
onProcessSlotCalledWith.add (item.requestId, item.slotIndex) try:
done.complete() await sleepAsync(processSlotDelay)
except CatchableError as exc:
checkpoint(exc.msg)
finally:
onProcessSlotCalled = true
onProcessSlotCalledWith.add (item.requestId, item.slotIndex)
if not done.finished:
done.complete()
queue.start() queue.start()
setup: setup:
@ -159,8 +179,10 @@ suite "Slot queue":
requestB.ask.collateralPerByte = 1.u256 requestB.ask.collateralPerByte = 1.u256
requestB.expiry = 1000.uint64 requestB.expiry = 1000.uint64
let itemA = SlotQueueItem.init(requestA, 0) let itemA =
let itemB = SlotQueueItem.init(requestB, 0) SlotQueueItem.init(requestA, 0, collateral = requestA.ask.collateralPerSlot)
let itemB =
SlotQueueItem.init(requestB, 0, collateral = requestB.ask.collateralPerSlot)
check itemB < itemA # B higher priority than A check itemB < itemA # B higher priority than A
check itemA > itemB check itemA > itemB
@ -172,7 +194,7 @@ suite "Slot queue":
slotSize: 1.uint64, slotSize: 1.uint64,
duration: 1.uint64, duration: 1.uint64,
pricePerBytePerSecond: 2.u256, # profitability is higher (good) pricePerBytePerSecond: 2.u256, # profitability is higher (good)
collateralPerByte: 1.u256, collateral: 1.u256,
expiry: 1.uint64, expiry: 1.uint64,
seen: true, # seen (bad), more weight than profitability seen: true, # seen (bad), more weight than profitability
) )
@ -182,7 +204,7 @@ suite "Slot queue":
slotSize: 1.uint64, slotSize: 1.uint64,
duration: 1.uint64, duration: 1.uint64,
pricePerBytePerSecond: 1.u256, # profitability is lower (bad) pricePerBytePerSecond: 1.u256, # profitability is lower (bad)
collateralPerByte: 1.u256, collateral: 1.u256,
expiry: 1.uint64, expiry: 1.uint64,
seen: false, # not seen (good) seen: false, # not seen (good)
) )
@ -197,7 +219,7 @@ suite "Slot queue":
slotSize: 1.uint64, slotSize: 1.uint64,
duration: 1.uint64, duration: 1.uint64,
pricePerBytePerSecond: 1.u256, # reward is lower (bad) pricePerBytePerSecond: 1.u256, # reward is lower (bad)
collateralPerByte: 1.u256, # collateral is lower (good) collateral: 1.u256, # collateral is lower (good)
expiry: 1.uint64, expiry: 1.uint64,
seen: false, seen: false,
) )
@ -208,7 +230,7 @@ suite "Slot queue":
duration: 1.uint64, duration: 1.uint64,
pricePerBytePerSecond: 2.u256, pricePerBytePerSecond: 2.u256,
# reward is higher (good), more weight than collateral # reward is higher (good), more weight than collateral
collateralPerByte: 2.u256, # collateral is higher (bad) collateral: 2.u256, # collateral is higher (bad)
expiry: 1.uint64, expiry: 1.uint64,
seen: false, seen: false,
) )
@ -223,7 +245,7 @@ suite "Slot queue":
slotSize: 1.uint64, slotSize: 1.uint64,
duration: 1.uint64, duration: 1.uint64,
pricePerBytePerSecond: 1.u256, pricePerBytePerSecond: 1.u256,
collateralPerByte: 2.u256, # collateral is higher (bad) collateral: 2.u256, # collateral is higher (bad)
expiry: 2.uint64, # expiry is longer (good) expiry: 2.uint64, # expiry is longer (good)
seen: false, seen: false,
) )
@ -233,7 +255,7 @@ suite "Slot queue":
slotSize: 1.uint64, slotSize: 1.uint64,
duration: 1.uint64, duration: 1.uint64,
pricePerBytePerSecond: 1.u256, pricePerBytePerSecond: 1.u256,
collateralPerByte: 1.u256, # collateral is lower (good), more weight than expiry collateral: 1.u256, # collateral is lower (good), more weight than expiry
expiry: 1.uint64, # expiry is shorter (bad) expiry: 1.uint64, # expiry is shorter (bad)
seen: false, seen: false,
) )
@ -248,7 +270,7 @@ suite "Slot queue":
slotSize: 1.uint64, # slotSize is smaller (good) slotSize: 1.uint64, # slotSize is smaller (good)
duration: 1.uint64, duration: 1.uint64,
pricePerBytePerSecond: 1.u256, pricePerBytePerSecond: 1.u256,
collateralPerByte: 1.u256, collateral: 1.u256,
expiry: 1.uint64, # expiry is shorter (bad) expiry: 1.uint64, # expiry is shorter (bad)
seen: false, seen: false,
) )
@ -258,7 +280,7 @@ suite "Slot queue":
slotSize: 2.uint64, # slotSize is larger (bad) slotSize: 2.uint64, # slotSize is larger (bad)
duration: 1.uint64, duration: 1.uint64,
pricePerBytePerSecond: 1.u256, pricePerBytePerSecond: 1.u256,
collateralPerByte: 1.u256, collateral: 1.u256,
expiry: 2.uint64, # expiry is longer (good), more weight than slotSize expiry: 2.uint64, # expiry is longer (good), more weight than slotSize
seen: false, seen: false,
) )
@ -273,7 +295,7 @@ suite "Slot queue":
slotSize: 2.uint64, # slotSize is larger (bad) slotSize: 2.uint64, # slotSize is larger (bad)
duration: 1.uint64, duration: 1.uint64,
pricePerBytePerSecond: 1.u256, pricePerBytePerSecond: 1.u256,
collateralPerByte: 1.u256, collateral: 1.u256,
expiry: 1.uint64, # expiry is shorter (bad) expiry: 1.uint64, # expiry is shorter (bad)
seen: false, seen: false,
) )
@ -283,7 +305,7 @@ suite "Slot queue":
slotSize: 1.uint64, # slotSize is smaller (good) slotSize: 1.uint64, # slotSize is smaller (good)
duration: 1.uint64, duration: 1.uint64,
pricePerBytePerSecond: 1.u256, pricePerBytePerSecond: 1.u256,
collateralPerByte: 1.u256, collateral: 1.u256,
expiry: 1.uint64, expiry: 1.uint64,
seen: false, seen: false,
) )
@ -292,11 +314,16 @@ suite "Slot queue":
test "expands available all possible slot indices on init": test "expands available all possible slot indices on init":
let request = StorageRequest.example let request = StorageRequest.example
let items = SlotQueueItem.init(request) let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot)
check items.len.uint64 == request.ask.slots check items.len.uint64 == request.ask.slots
var checked = 0 var checked = 0
for slotIndex in 0'u16 ..< request.ask.slots.uint16: for slotIndex in 0'u16 ..< request.ask.slots.uint16:
check items.anyIt(it == SlotQueueItem.init(request, slotIndex)) check items.anyIt(
it ==
SlotQueueItem.init(
request, slotIndex, collateral = request.ask.collateralPerSlot
)
)
inc checked inc checked
check checked == items.len check checked == items.len
@ -322,34 +349,17 @@ suite "Slot queue":
check isOk queue.push(item3) check isOk queue.push(item3)
check isOk queue.push(item4) check isOk queue.push(item4)
test "populates item with exisiting request metadata":
newSlotQueue(maxSize = 8, maxWorkers = 1, processSlotDelay = 10.millis)
let request0 = StorageRequest.example
var request1 = StorageRequest.example
request1.ask.collateralPerByte += 1.u256
let items0 = SlotQueueItem.init(request0)
let items1 = SlotQueueItem.init(request1)
check queue.push(items0).isOk
check queue.push(items1).isOk
let populated = !queue.populateItem(request1.id, 12'u16)
check populated.requestId == request1.id
check populated.slotIndex == 12'u16
check populated.slotSize == request1.ask.slotSize
check populated.duration == request1.ask.duration
check populated.pricePerBytePerSecond == request1.ask.pricePerBytePerSecond
check populated.collateralPerByte == request1.ask.collateralPerByte
test "does not find exisiting request metadata":
newSlotQueue(maxSize = 2, maxWorkers = 2)
let item = SlotQueueItem.example
check queue.populateItem(item.requestId, 12'u16).isNone
test "can support uint16.high slots": test "can support uint16.high slots":
var request = StorageRequest.example var request = StorageRequest.example
let maxUInt16 = uint16.high let maxUInt16 = uint16.high
let uint64Slots = uint64(maxUInt16) let uint64Slots = uint64(maxUInt16)
request.ask.slots = uint64Slots request.ask.slots = uint64Slots
let items = SlotQueueItem.init(request.id, request.ask, request.expiry) let items = SlotQueueItem.init(
request.id,
request.ask,
request.expiry,
collateral = request.ask.collateralPerSlot,
)
check items.len.uint16 == maxUInt16 check items.len.uint16 == maxUInt16
test "cannot support greater than uint16.high slots": test "cannot support greater than uint16.high slots":
@ -358,7 +368,12 @@ suite "Slot queue":
let uint64Slots = uint64(int32Slots) let uint64Slots = uint64(int32Slots)
request.ask.slots = uint64Slots request.ask.slots = uint64Slots
expect SlotsOutOfRangeError: expect SlotsOutOfRangeError:
discard SlotQueueItem.init(request.id, request.ask, request.expiry) discard SlotQueueItem.init(
request.id,
request.ask,
request.expiry,
collateral = request.ask.collateralPerSlot,
)
test "cannot push duplicate items": test "cannot push duplicate items":
newSlotQueue(maxSize = 6, maxWorkers = 1, processSlotDelay = 15.millis) newSlotQueue(maxSize = 6, maxWorkers = 1, processSlotDelay = 15.millis)
@ -399,8 +414,10 @@ suite "Slot queue":
let request0 = StorageRequest.example let request0 = StorageRequest.example
var request1 = StorageRequest.example var request1 = StorageRequest.example
request1.ask.collateralPerByte += 1.u256 request1.ask.collateralPerByte += 1.u256
let items0 = SlotQueueItem.init(request0) let items0 =
let items1 = SlotQueueItem.init(request1) SlotQueueItem.init(request0, collateral = request0.ask.collateralPerSlot)
let items1 =
SlotQueueItem.init(request1, collateral = request1.ask.collateralPerSlot)
check queue.push(items0).isOk check queue.push(items0).isOk
check queue.push(items1).isOk check queue.push(items1).isOk
let last = items1[items1.high] let last = items1[items1.high]
@ -413,8 +430,10 @@ suite "Slot queue":
let request0 = StorageRequest.example let request0 = StorageRequest.example
var request1 = StorageRequest.example var request1 = StorageRequest.example
request1.ask.collateralPerByte += 1.u256 request1.ask.collateralPerByte += 1.u256
let items0 = SlotQueueItem.init(request0) let items0 =
let items1 = SlotQueueItem.init(request1) SlotQueueItem.init(request0, collateral = request0.ask.collateralPerSlot)
let items1 =
SlotQueueItem.init(request1, collateral = request1.ask.collateralPerSlot)
check queue.push(items0).isOk check queue.push(items0).isOk
check queue.push(items1).isOk check queue.push(items1).isOk
queue.delete(request1.id) queue.delete(request1.id)
@ -433,42 +452,56 @@ suite "Slot queue":
request3.ask.collateralPerByte = request2.ask.collateralPerByte + 1 request3.ask.collateralPerByte = request2.ask.collateralPerByte + 1
request4.ask.collateralPerByte = request3.ask.collateralPerByte + 1 request4.ask.collateralPerByte = request3.ask.collateralPerByte + 1
request5.ask.collateralPerByte = request4.ask.collateralPerByte + 1 request5.ask.collateralPerByte = request4.ask.collateralPerByte + 1
let item0 = SlotQueueItem.init(request0, 0) let item0 =
let item1 = SlotQueueItem.init(request1, 0) SlotQueueItem.init(request0, 0, collateral = request0.ask.collateralPerSlot)
let item2 = SlotQueueItem.init(request2, 0) let item1 =
let item3 = SlotQueueItem.init(request3, 0) SlotQueueItem.init(request1, 0, collateral = request1.ask.collateralPerSlot)
let item4 = SlotQueueItem.init(request4, 0) let item2 =
let item5 = SlotQueueItem.init(request5, 0) SlotQueueItem.init(request2, 0, collateral = request2.ask.collateralPerSlot)
let item3 =
SlotQueueItem.init(request3, 0, collateral = request3.ask.collateralPerSlot)
let item4 =
SlotQueueItem.init(request4, 0, collateral = request4.ask.collateralPerSlot)
let item5 =
SlotQueueItem.init(request5, 0, collateral = request5.ask.collateralPerSlot)
check queue.contains(item5) == false check queue.contains(item5) == false
check queue.push(@[item0, item1, item2, item3, item4, item5]).isOk check queue.push(@[item0, item1, item2, item3, item4, item5]).isOk
check queue.contains(item5) check queue.contains(item5)
test "sorts items by profitability descending (higher pricePerBytePerSecond == higher priority == goes first in the list)": test "sorts items by profitability descending (higher pricePerBytePerSecond == higher priority == goes first in the list)":
var request = StorageRequest.example var request = StorageRequest.example
let item0 = SlotQueueItem.init(request, 0) let item0 =
SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot)
request.ask.pricePerBytePerSecond += 1.u256 request.ask.pricePerBytePerSecond += 1.u256
let item1 = SlotQueueItem.init(request, 1) let item1 =
SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot)
check item1 < item0 check item1 < item0
test "sorts items by collateral ascending (higher required collateralPerByte = lower priority == comes later in the list)": test "sorts items by collateral ascending (higher required collateral = lower priority == comes later in the list)":
var request = StorageRequest.example var request = StorageRequest.example
let item0 = SlotQueueItem.init(request, 0) let item0 =
request.ask.collateralPerByte += 1.u256 SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot)
let item1 = SlotQueueItem.init(request, 1) let item1 = SlotQueueItem.init(
request, 1, collateral = request.ask.collateralPerSlot + 1.u256
)
check item1 > item0 check item1 > item0
test "sorts items by expiry descending (longer expiry = higher priority)": test "sorts items by expiry descending (longer expiry = higher priority)":
var request = StorageRequest.example var request = StorageRequest.example
let item0 = SlotQueueItem.init(request, 0) let item0 =
SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot)
request.expiry += 1 request.expiry += 1
let item1 = SlotQueueItem.init(request, 1) let item1 =
SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot)
check item1 < item0 check item1 < item0
test "sorts items by slot size descending (bigger dataset = higher profitability = higher priority)": test "sorts items by slot size descending (bigger dataset = higher profitability = higher priority)":
var request = StorageRequest.example var request = StorageRequest.example
let item0 = SlotQueueItem.init(request, 0) let item0 =
SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot)
request.ask.slotSize += 1 request.ask.slotSize += 1
let item1 = SlotQueueItem.init(request, 1) let item1 =
SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot)
check item1 < item0 check item1 < item0
test "should call callback once an item is added": test "should call callback once an item is added":
@ -489,13 +522,17 @@ suite "Slot queue":
# sleeping after push allows the slotqueue loop to iterate, # sleeping after push allows the slotqueue loop to iterate,
# calling the callback for each pushed/updated item # calling the callback for each pushed/updated item
var request = StorageRequest.example var request = StorageRequest.example
let item0 = SlotQueueItem.init(request, 0) let item0 =
SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot)
request.ask.pricePerBytePerSecond += 1.u256 request.ask.pricePerBytePerSecond += 1.u256
let item1 = SlotQueueItem.init(request, 1) let item1 =
SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot)
request.ask.pricePerBytePerSecond += 1.u256 request.ask.pricePerBytePerSecond += 1.u256
let item2 = SlotQueueItem.init(request, 2) let item2 =
SlotQueueItem.init(request, 2, collateral = request.ask.collateralPerSlot)
request.ask.pricePerBytePerSecond += 1.u256 request.ask.pricePerBytePerSecond += 1.u256
let item3 = SlotQueueItem.init(request, 3) let item3 =
SlotQueueItem.init(request, 3, collateral = request.ask.collateralPerSlot)
check queue.push(item0).isOk check queue.push(item0).isOk
await sleepAsync(1.millis) await sleepAsync(1.millis)
@ -520,13 +557,17 @@ suite "Slot queue":
# sleeping after push allows the slotqueue loop to iterate, # sleeping after push allows the slotqueue loop to iterate,
# calling the callback for each pushed/updated item # calling the callback for each pushed/updated item
var request = StorageRequest.example var request = StorageRequest.example
let item0 = SlotQueueItem.init(request, 0) let item0 =
SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot)
request.ask.pricePerBytePerSecond += 1.u256 request.ask.pricePerBytePerSecond += 1.u256
let item1 = SlotQueueItem.init(request, 1) let item1 =
SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot)
request.ask.pricePerBytePerSecond += 1.u256 request.ask.pricePerBytePerSecond += 1.u256
let item2 = SlotQueueItem.init(request, 2) let item2 =
SlotQueueItem.init(request, 2, collateral = request.ask.collateralPerSlot)
request.ask.pricePerBytePerSecond += 1.u256 request.ask.pricePerBytePerSecond += 1.u256
let item3 = SlotQueueItem.init(request, 3) let item3 =
SlotQueueItem.init(request, 3, collateral = request.ask.collateralPerSlot)
check queue.push(item0).isOk check queue.push(item0).isOk
check queue.push(item1).isOk check queue.push(item1).isOk
@ -550,7 +591,7 @@ suite "Slot queue":
queue.pause queue.pause
let request = StorageRequest.example let request = StorageRequest.example
var items = SlotQueueItem.init(request) var items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot)
check queue.push(items).isOk check queue.push(items).isOk
# check all items processed # check all items processed
check eventually queue.len == 0 check eventually queue.len == 0
@ -558,8 +599,14 @@ suite "Slot queue":
test "pushing seen item does not unpause queue": test "pushing seen item does not unpause queue":
newSlotQueue(maxSize = 4, maxWorkers = 4) newSlotQueue(maxSize = 4, maxWorkers = 4)
let request = StorageRequest.example let request = StorageRequest.example
let item0 = let item0 = SlotQueueItem.init(
SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = true) request.id,
0'u16,
request.ask,
request.expiry,
request.ask.collateralPerSlot,
seen = true,
)
check queue.paused check queue.paused
check queue.push(item0).isOk check queue.push(item0).isOk
check queue.paused check queue.paused
@ -567,8 +614,14 @@ suite "Slot queue":
test "paused queue waits for unpause before continuing processing": test "paused queue waits for unpause before continuing processing":
newSlotQueue(maxSize = 4, maxWorkers = 4) newSlotQueue(maxSize = 4, maxWorkers = 4)
let request = StorageRequest.example let request = StorageRequest.example
let item = let item = SlotQueueItem.init(
SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = false) request.id,
1'u16,
request.ask,
request.expiry,
request.ask.collateralPerSlot,
seen = false,
)
check queue.paused check queue.paused
# push causes unpause # push causes unpause
check queue.push(item).isOk check queue.push(item).isOk
@ -579,10 +632,22 @@ suite "Slot queue":
test "processing a 'seen' item pauses the queue": test "processing a 'seen' item pauses the queue":
newSlotQueue(maxSize = 4, maxWorkers = 4) newSlotQueue(maxSize = 4, maxWorkers = 4)
let request = StorageRequest.example let request = StorageRequest.example
let unseen = let unseen = SlotQueueItem.init(
SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = false) request.id,
let seen = 0'u16,
SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = true) request.ask,
request.expiry,
request.ask.collateralPerSlot,
seen = false,
)
let seen = SlotQueueItem.init(
request.id,
1'u16,
request.ask,
request.expiry,
request.ask.collateralPerSlot,
seen = true,
)
# push causes unpause # push causes unpause
check queue.push(unseen).isSuccess check queue.push(unseen).isSuccess
# check all items processed # check all items processed
@ -595,10 +660,22 @@ suite "Slot queue":
test "processing a 'seen' item does not decrease the number of workers": test "processing a 'seen' item does not decrease the number of workers":
newSlotQueue(maxSize = 4, maxWorkers = 4) newSlotQueue(maxSize = 4, maxWorkers = 4)
let request = StorageRequest.example let request = StorageRequest.example
let unseen = let unseen = SlotQueueItem.init(
SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = false) request.id,
let seen = 0'u16,
SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = true) request.ask,
request.expiry,
request.ask.collateralPerSlot,
seen = false,
)
let seen = SlotQueueItem.init(
request.id,
1'u16,
request.ask,
request.expiry,
request.ask.collateralPerSlot,
seen = true,
)
# push seen item to ensure that queue is pausing # push seen item to ensure that queue is pausing
check queue.push(seen).isSuccess check queue.push(seen).isSuccess
# unpause and pause a number of times # unpause and pause a number of times
@ -615,10 +692,22 @@ suite "Slot queue":
test "item 'seen' flags can be cleared": test "item 'seen' flags can be cleared":
newSlotQueue(maxSize = 4, maxWorkers = 1) newSlotQueue(maxSize = 4, maxWorkers = 1)
let request = StorageRequest.example let request = StorageRequest.example
let item0 = let item0 = SlotQueueItem.init(
SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = true) request.id,
let item1 = 0'u16,
SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = true) request.ask,
request.expiry,
request.ask.collateralPerSlot,
seen = true,
)
let item1 = SlotQueueItem.init(
request.id,
1'u16,
request.ask,
request.expiry,
request.ask.collateralPerSlot,
seen = true,
)
check queue.push(item0).isOk check queue.push(item0).isOk
check queue.push(item1).isOk check queue.push(item1).isOk
check queue[0].seen check queue[0].seen

View File

@ -133,7 +133,7 @@ suite "Slot builder":
check: check:
Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize).error.msg == Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize).error.msg ==
"Number of blocks must be divisable by number of slots." "Number of blocks must be divisible by number of slots."
test "Block size must be divisable by cell size": test "Block size must be divisable by cell size":
let mismatchManifest = Manifest.new( let mismatchManifest = Manifest.new(
@ -151,7 +151,7 @@ suite "Slot builder":
check: check:
Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize).error.msg == Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize).error.msg ==
"Block size must be divisable by cell size." "Block size must be divisible by cell size."
test "Should build correct slot builder": test "Should build correct slot builder":
builder = builder =

View File

@ -1,6 +1,6 @@
import std/unittest
import std/random import std/random
import pkg/unittest2
import pkg/stew/objects import pkg/stew/objects
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
@ -11,7 +11,7 @@ import pkg/codex/stores/repostore/coders
import ../../helpers import ../../helpers
checksuite "Test coders": suite "Test coders":
proc rand(T: type NBytes): T = proc rand(T: type NBytes): T =
rand(Natural).NBytes rand(Natural).NBytes

View File

@ -11,7 +11,7 @@ import ./commonstoretests
import ../../asynctest import ../../asynctest
import ../helpers import ../helpers
checksuite "Cache Store": suite "Cache Store":
var var
newBlock, newBlock1, newBlock2, newBlock3: Block newBlock, newBlock1, newBlock2, newBlock3: Block
store: CacheStore store: CacheStore

View File

@ -36,7 +36,7 @@ proc createManifestCid(): ?!Cid =
let cid = ?Cid.init(version, codec, hash).mapFailure let cid = ?Cid.init(version, codec, hash).mapFailure
return success cid return success cid
checksuite "KeyUtils": suite "KeyUtils":
test "makePrefixKey should create block key": test "makePrefixKey should create block key":
let length = 6 let length = 6
let cid = Cid.example let cid = Cid.example

View File

@ -21,7 +21,7 @@ import ../examples
import codex/stores/maintenance import codex/stores/maintenance
checksuite "BlockMaintainer": suite "BlockMaintainer":
var mockRepoStore: MockRepoStore var mockRepoStore: MockRepoStore
var interval: Duration var interval: Duration
var mockTimer: MockTimer var mockTimer: MockTimer

View File

@ -24,7 +24,7 @@ import ../helpers/mockclock
import ../examples import ../examples
import ./commonstoretests import ./commonstoretests
checksuite "Test RepoStore start/stop": suite "Test RepoStore start/stop":
var var
repoDs: Datastore repoDs: Datastore
metaDs: Datastore metaDs: Datastore

View File

@ -22,7 +22,7 @@ proc toSortedSeq[T](h: AsyncHeapQueue[T], queueType = QueueType.Min): seq[T] =
while tmp.len > 0: while tmp.len > 0:
result.add(popNoWait(tmp).tryGet()) result.add(popNoWait(tmp).tryGet())
checksuite "Synchronous tests": suite "Synchronous tests":
test "Test pushNoWait - Min": test "Test pushNoWait - Min":
var heap = newAsyncHeapQueue[int]() var heap = newAsyncHeapQueue[int]()
let data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0] let data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]

View File

@ -27,7 +27,7 @@ asyncchecksuite "Chunking":
let contents = [1.byte, 2, 3, 4, 5, 6, 7, 8, 9, 0] let contents = [1.byte, 2, 3, 4, 5, 6, 7, 8, 9, 0]
proc reader( proc reader(
data: ChunkBuffer, len: int data: ChunkBuffer, len: int
): Future[int] {.gcsafe, async, raises: [Defect].} = ): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
let read = min(contents.len - offset, len) let read = min(contents.len - offset, len)
if read == 0: if read == 0:
return 0 return 0
@ -97,8 +97,13 @@ asyncchecksuite "Chunking":
discard (await chunker.getBytes()) discard (await chunker.getBytes())
test "stream should forward LPStreamError": test "stream should forward LPStreamError":
expect LPStreamError: try:
await raiseStreamException(newException(LPStreamError, "test error")) await raiseStreamException(newException(LPStreamError, "test error"))
except ChunkerError as exc:
check exc.parent of LPStreamError
except CatchableError as exc:
checkpoint("Unexpected error: " & exc.msg)
fail()
test "stream should catch LPStreamEOFError": test "stream should catch LPStreamEOFError":
await raiseStreamException(newException(LPStreamEOFError, "test error")) await raiseStreamException(newException(LPStreamEOFError, "test error"))
@ -106,7 +111,3 @@ asyncchecksuite "Chunking":
test "stream should forward CancelledError": test "stream should forward CancelledError":
expect CancelledError: expect CancelledError:
await raiseStreamException(newException(CancelledError, "test error")) await raiseStreamException(newException(CancelledError, "test error"))
test "stream should forward LPStreamError":
expect LPStreamError:
await raiseStreamException(newException(LPStreamError, "test error"))

View File

@ -1,9 +1,9 @@
import std/unittest import pkg/unittest2
import codex/clock import codex/clock
import ./helpers import ./helpers
checksuite "Clock": suite "Clock":
proc testConversion(seconds: SecondsSince1970) = proc testConversion(seconds: SecondsSince1970) =
let asBytes = seconds.toBytes let asBytes = seconds.toBytes

View File

@ -228,7 +228,7 @@ suite "Erasure encode/decode":
discard (await erasure.decode(encoded)).tryGet() discard (await erasure.decode(encoded)).tryGet()
test "Should concurrently encode/decode multiple datasets": test "Should concurrently encode/decode multiple datasets":
const iterations = 2 const iterations = 5
let let
datasetSize = 1.MiBs datasetSize = 1.MiBs
@ -335,18 +335,18 @@ suite "Erasure encode/decode":
for i in 0 ..< parityLen: for i in 0 ..< parityLen:
paritySeq[i] = cast[seq[byte]](parity[i]) paritySeq[i] = cast[seq[byte]](parity[i])
# call encodeAsync to get the parity # call asyncEncode to get the parity
let encFut = let encFut =
await erasure.encodeAsync(BlockSize.int, blocksLen, parityLen, data, parity) await erasure.asyncEncode(BlockSize.int, blocksLen, parityLen, data, parity)
check encFut.isOk check encFut.isOk
let decFut = await erasure.decodeAsync( let decFut = await erasure.asyncDecode(
BlockSize.int, blocksLen, parityLen, data, paritySeq, recovered BlockSize.int, blocksLen, parityLen, data, paritySeq, recovered
) )
check decFut.isOk check decFut.isOk
# call encodeAsync and cancel the task # call asyncEncode and cancel the task
let encodeFut = erasure.encodeAsync( let encodeFut = erasure.asyncEncode(
BlockSize.int, blocksLen, parityLen, data, cancelledTaskParity BlockSize.int, blocksLen, parityLen, data, cancelledTaskParity
) )
encodeFut.cancel() encodeFut.cancel()
@ -359,8 +359,8 @@ suite "Erasure encode/decode":
for i in 0 ..< parityLen: for i in 0 ..< parityLen:
check equalMem(parity[i], cancelledTaskParity[i], BlockSize.int) check equalMem(parity[i], cancelledTaskParity[i], BlockSize.int)
# call decodeAsync and cancel the task # call asyncDecode and cancel the task
let decodeFut = erasure.decodeAsync( let decodeFut = erasure.asyncDecode(
BlockSize.int, blocksLen, parityLen, data, paritySeq, cancelledTaskRecovered BlockSize.int, blocksLen, parityLen, data, paritySeq, cancelledTaskRecovered
) )
decodeFut.cancel() decodeFut.cancel()

View File

@ -1,6 +1,7 @@
import std/options import std/options
import std/strutils import std/strutils
import std/unittest
import pkg/unittest2
import pkg/codex/blocktype import pkg/codex/blocktype
import pkg/codex/conf import pkg/codex/conf
import pkg/codex/contracts/requests import pkg/codex/contracts/requests

View File

@ -13,7 +13,7 @@ import ../asynctest
import ./helpers import ./helpers
import ./examples import ./examples
checksuite "Manifest": suite "Manifest":
let let
manifest = manifest =
Manifest.new(treeCid = Cid.example, blockSize = 1.MiBs, datasetSize = 100.MiBs) Manifest.new(treeCid = Cid.example, blockSize = 1.MiBs, datasetSize = 100.MiBs)

View File

@ -116,7 +116,7 @@ asyncchecksuite "Purchasing":
await purchase.wait() await purchase.wait()
check market.withdrawn == @[request.id] check market.withdrawn == @[request.id]
checksuite "Purchasing state machine": suite "Purchasing state machine":
var purchasing: Purchasing var purchasing: Purchasing
var market: MockMarket var market: MockMarket
var clock: MockClock var clock: MockClock

View File

@ -1,10 +1,10 @@
import std/times import std/times
import std/unittest
import codex/systemclock import pkg/unittest2
import pkg/codex/systemclock
import ./helpers import ./helpers
checksuite "SystemClock": suite "SystemClock":
test "Should get now": test "Should get now":
let clock = SystemClock.new() let clock = SystemClock.new()

View File

@ -7,7 +7,7 @@ import pkg/codex/utils/iter
import ../../asynctest import ../../asynctest
import ../helpers import ../helpers
checksuite "Test Iter": suite "Test Iter":
test "Should be finished": test "Should be finished":
let iter = Iter[int].empty() let iter = Iter[int].empty()

View File

@ -1,12 +1,14 @@
import std/unittest
import std/os import std/os
import codex/utils/keyutils
import pkg/unittest2
import pkg/codex/utils/keyutils
import ../helpers import ../helpers
when defined(windows): when defined(windows):
import stew/windows/acl import stew/windows/acl
checksuite "keyutils": suite "keyutils":
let path = getTempDir() / "CodexTest" let path = getTempDir() / "CodexTest"
setup: setup:

View File

@ -1,8 +1,9 @@
import std/unittest import pkg/unittest2
import codex/utils/options import pkg/codex/utils/options
import ../helpers import ../helpers
checksuite "optional casts": suite "optional casts":
test "casting value to same type works": test "casting value to same type works":
check 42 as int == some 42 check 42 as int == some 42
@ -31,7 +32,7 @@ checksuite "optional casts":
check 42.some as string == string.none check 42.some as string == string.none
check int.none as int == int.none check int.none as int == int.none
checksuite "Optionalize": suite "Optionalize":
test "does not except non-object types": test "does not except non-object types":
static: static:
doAssert not compiles(Optionalize(int)) doAssert not compiles(Optionalize(int))

View File

@ -17,47 +17,71 @@ asyncchecksuite "tracked futures":
check module.trackedFutures.len == 0 check module.trackedFutures.len == 0
test "tracks unfinished futures": test "tracks unfinished futures":
let fut = newFuture[void]("test") let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule})
module.trackedFutures.track(fut) module.trackedFutures.track(fut)
check module.trackedFutures.len == 1 check module.trackedFutures.len == 1
test "does not track completed futures": test "does not track completed futures":
let fut = newFuture[void]("test") let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule})
fut.complete() fut.complete()
module.trackedFutures.track(fut) module.trackedFutures.track(fut)
check eventually module.trackedFutures.len == 0 check module.trackedFutures.len == 0
test "does not track failed futures":
let fut = newFuture[void]("test")
fut.fail((ref CatchableError)(msg: "some error"))
module.trackedFutures.track(fut)
check eventually module.trackedFutures.len == 0
test "does not track cancelled futures": test "does not track cancelled futures":
let fut = newFuture[void]("test") let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule})
fut.cancelCallback = proc(data: pointer) =
fut.cancelAndSchedule() # manually schedule the cancel
await fut.cancelAndWait() await fut.cancelAndWait()
module.trackedFutures.track(fut) module.trackedFutures.track(fut)
check eventually module.trackedFutures.len == 0 check eventually module.trackedFutures.len == 0
test "removes tracked future when finished": test "removes tracked future when finished":
let fut = newFuture[void]("test") let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule})
module.trackedFutures.track(fut) module.trackedFutures.track(fut)
check module.trackedFutures.len == 1
fut.complete() fut.complete()
check eventually module.trackedFutures.len == 0 check eventually module.trackedFutures.len == 0
test "removes tracked future when cancelled": test "removes tracked future when cancelled":
let fut = newFuture[void]("test") let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule})
fut.cancelCallback = proc(data: pointer) =
fut.cancelAndSchedule() # manually schedule the cancel
module.trackedFutures.track(fut) module.trackedFutures.track(fut)
check module.trackedFutures.len == 1
await fut.cancelAndWait()
check eventually module.trackedFutures.len == 0
test "completed and removes future on cancel":
let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule})
fut.cancelCallback = proc(data: pointer) =
fut.complete()
module.trackedFutures.track(fut)
check module.trackedFutures.len == 1
await fut.cancelAndWait() await fut.cancelAndWait()
check eventually module.trackedFutures.len == 0 check eventually module.trackedFutures.len == 0
test "cancels and removes all tracked futures": test "cancels and removes all tracked futures":
let fut1 = newFuture[void]("test1") let fut1 = Future[void].Raising([]).init("test1", {FutureFlag.OwnCancelSchedule})
let fut2 = newFuture[void]("test2") fut1.cancelCallback = proc(data: pointer) =
let fut3 = newFuture[void]("test3") fut1.cancelAndSchedule() # manually schedule the cancel
let fut2 = Future[void].Raising([]).init("test2", {FutureFlag.OwnCancelSchedule})
fut2.cancelCallback = proc(data: pointer) =
fut2.cancelAndSchedule() # manually schedule the cancel
let fut3 = Future[void].Raising([]).init("test3", {FutureFlag.OwnCancelSchedule})
fut3.cancelCallback = proc(data: pointer) =
fut3.cancelAndSchedule() # manually schedule the cancel
module.trackedFutures.track(fut1) module.trackedFutures.track(fut1)
check module.trackedFutures.len == 1
module.trackedFutures.track(fut2) module.trackedFutures.track(fut2)
check module.trackedFutures.len == 2
module.trackedFutures.track(fut3) module.trackedFutures.track(fut3)
check module.trackedFutures.len == 3
await module.trackedFutures.cancelTracked() await module.trackedFutures.cancelTracked()
check eventually fut1.cancelled check eventually fut1.cancelled
check eventually fut2.cancelled check eventually fut2.cancelled

View File

@ -1,4 +1,4 @@
import std/unittest import pkg/unittest2
import pkg/codex/utils import pkg/codex/utils

View File

@ -598,6 +598,37 @@ ethersuite "On-Chain Market":
check endBalanceHost == (startBalanceHost + request.ask.collateralPerSlot) check endBalanceHost == (startBalanceHost + request.ask.collateralPerSlot)
check endBalanceReward == (startBalanceReward + expectedPayout) check endBalanceReward == (startBalanceReward + expectedPayout)
test "returns the collateral when the slot is not being repaired":
await market.requestStorage(request)
await market.reserveSlot(request.id, 0.uint64)
await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot)
let slotId = request.slotId(0.uint64)
without collateral =? await market.slotCollateral(request.id, 0.uint64), error:
fail()
check collateral == request.ask.collateralPerSlot
test "calculates correctly the collateral when the slot is being repaired":
# Ensure that the config is loaded and repairRewardPercentage is available
discard await market.repairRewardPercentage()
await market.requestStorage(request)
await market.reserveSlot(request.id, 0.uint64)
await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot)
await market.freeSlot(slotId(request.id, 0.uint64))
let slotId = request.slotId(0.uint64)
without collateral =? await market.slotCollateral(request.id, 0.uint64), error:
fail()
# slotCollateral
# repairRewardPercentage = 10
# expected collateral = slotCollateral - slotCollateral * 0.1
check collateral ==
request.ask.collateralPerSlot - (request.ask.collateralPerSlot * 10).div(100.u256)
test "the request is added in cache after the fist access": test "the request is added in cache after the fist access":
await market.requestStorage(request) await market.requestStorage(request)

View File

@ -72,7 +72,9 @@ proc example*(_: type Slot): Slot =
proc example*(_: type SlotQueueItem): SlotQueueItem = proc example*(_: type SlotQueueItem): SlotQueueItem =
let request = StorageRequest.example let request = StorageRequest.example
let slot = Slot.example let slot = Slot.example
SlotQueueItem.init(request, slot.slotIndex.uint16) SlotQueueItem.init(
request, slot.slotIndex.uint16, collateral = request.ask.collateralPerSlot
)
proc example(_: type G1Point): G1Point = proc example(_: type G1Point): G1Point =
G1Point(x: UInt256.example, y: UInt256.example) G1Point(x: UInt256.example, y: UInt256.example)

View File

@ -2,4 +2,36 @@ import helpers/multisetup
import helpers/trackers import helpers/trackers
import helpers/templeveldb import helpers/templeveldb
import std/sequtils, chronos
export multisetup, trackers, templeveldb export multisetup, trackers, templeveldb
### taken from libp2p errorhelpers.nim
proc allFuturesThrowing*(args: varargs[FutureBase]): Future[void] =
# This proc is only meant for use in tests / not suitable for general use.
# - Swallowing errors arbitrarily instead of aggregating them is bad design
# - It raises `CatchableError` instead of the union of the `futs` errors,
# inflating the caller's `raises` list unnecessarily. `macro` could fix it
let futs = @args
(
proc() {.async: (raises: [CatchableError]).} =
await allFutures(futs)
var firstErr: ref CatchableError
for fut in futs:
if fut.failed:
let err = fut.error()
if err of CancelledError:
raise err
if firstErr == nil:
firstErr = err
if firstErr != nil:
raise firstErr
)()
proc allFuturesThrowing*[T](futs: varargs[Future[T]]): Future[void] =
allFuturesThrowing(futs.mapIt(FutureBase(it)))
proc allFuturesThrowing*[T, E]( # https://github.com/nim-lang/Nim/issues/23432
futs: varargs[InternalRaisesFuture[T, E]]
): Future[void] =
allFuturesThrowing(futs.mapIt(FutureBase(it)))

View File

@ -1,5 +1,5 @@
import pkg/codex/streams/storestream import pkg/codex/streams/storestream
import std/unittest import pkg/unittest2
# From lip2p/tests/helpers # From lip2p/tests/helpers
const trackerNames = [StoreStreamTrackerName] const trackerNames = [StoreStreamTrackerName]

View File

@ -4,115 +4,216 @@ import std/strutils
from pkg/libp2p import Cid, `$`, init from pkg/libp2p import Cid, `$`, init
import pkg/stint import pkg/stint
import pkg/questionable/results import pkg/questionable/results
import pkg/chronos/apps/http/[httpserver, shttpserver, httpclient] import pkg/chronos/apps/http/[httpserver, shttpserver, httpclient, httptable]
import pkg/codex/logutils import pkg/codex/logutils
import pkg/codex/rest/json import pkg/codex/rest/json
import pkg/codex/purchasing import pkg/codex/purchasing
import pkg/codex/errors import pkg/codex/errors
import pkg/codex/sales/reservations import pkg/codex/sales/reservations
export purchasing export purchasing, httptable, httpclient
type CodexClient* = ref object type CodexClient* = ref object
http: HttpClient
baseurl: string baseurl: string
session: HttpSessionRef session: HttpSessionRef
type CodexClientError* = object of CatchableError
const HttpClientTimeoutMs = 60 * 1000
proc new*(_: type CodexClient, baseurl: string): CodexClient = proc new*(_: type CodexClient, baseurl: string): CodexClient =
CodexClient( CodexClient(session: HttpSessionRef.new(), baseurl: baseurl)
http: newHttpClient(timeout = HttpClientTimeoutMs),
baseurl: baseurl,
session: HttpSessionRef.new({HttpClientFlag.Http11Pipeline}),
)
proc info*(client: CodexClient): ?!JsonNode = proc close*(self: CodexClient): Future[void] {.async: (raises: []).} =
let url = client.baseurl & "/debug/info" await self.session.closeWait()
JsonNode.parse(client.http.getContent(url))
proc setLogLevel*(client: CodexClient, level: string) = proc request(
let url = client.baseurl & "/debug/chronicles/loglevel?level=" & level self: CodexClient,
let headers = newHttpHeaders({"Content-Type": "text/plain"}) httpMethod: httputils.HttpMethod,
let response = client.http.request(url, httpMethod = HttpPost, headers = headers) url: string,
assert response.status == "200 OK" body: openArray[char] = [],
headers: openArray[HttpHeaderTuple] = [],
): Future[HttpClientResponseRef] {.
async: (raw: true, raises: [CancelledError, HttpError])
.} =
HttpClientRequestRef
.new(
self.session,
url,
httpMethod,
version = HttpVersion11,
flags = {},
maxResponseHeadersSize = HttpMaxHeadersSize,
headers = headers,
body = body.toOpenArrayByte(0, len(body) - 1),
).get
.send()
proc upload*(client: CodexClient, contents: string): ?!Cid = proc post(
let response = client.http.post(client.baseurl & "/data", contents) self: CodexClient,
assert response.status == "200 OK" url: string,
Cid.init(response.body).mapFailure body: string = "",
headers: seq[HttpHeaderTuple] = @[],
): Future[HttpClientResponseRef] {.
async: (raw: true, raises: [CancelledError, HttpError])
.} =
return self.request(MethodPost, url, headers = headers, body = body)
proc upload*(client: CodexClient, bytes: seq[byte]): ?!Cid = proc get(
client.upload(string.fromBytes(bytes)) self: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[]
): Future[HttpClientResponseRef] {.
async: (raw: true, raises: [CancelledError, HttpError])
.} =
return self.request(MethodGet, url, headers = headers)
proc download*(client: CodexClient, cid: Cid, local = false): ?!string = proc delete(
let response = client.http.get( self: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[]
client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream") ): Future[HttpClientResponseRef] {.
) async: (raw: true, raises: [CancelledError, HttpError])
.} =
return self.request(MethodDelete, url, headers = headers)
if response.status != "200 OK": proc patch(
return failure(response.status) self: CodexClient,
url: string,
body: string = "",
headers: seq[HttpHeaderTuple] = @[],
): Future[HttpClientResponseRef] {.
async: (raw: true, raises: [CancelledError, HttpError])
.} =
return self.request(MethodPatch, url, headers = headers, body = body)
success response.body proc body*(
response: HttpClientResponseRef
): Future[string] {.async: (raises: [CancelledError, HttpError]).} =
return bytesToString (await response.getBodyBytes())
proc downloadManifestOnly*(client: CodexClient, cid: Cid): ?!string = proc getContent(
let response = client.http.get(client.baseurl & "/data/" & $cid & "/network/manifest") client: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[]
): Future[string] {.async: (raises: [CancelledError, HttpError]).} =
let response = await client.get(url, headers)
return await response.body
if response.status != "200 OK": proc info*(
return failure(response.status) client: CodexClient
): Future[?!JsonNode] {.async: (raises: [CancelledError, HttpError]).} =
let response = await client.get(client.baseurl & "/debug/info")
return JsonNode.parse(await response.body)
success response.body proc setLogLevel*(
client: CodexClient, level: string
): Future[void] {.async: (raises: [CancelledError, HttpError]).} =
let
url = client.baseurl & "/debug/chronicles/loglevel?level=" & level
headers = @[("Content-Type", "text/plain")]
response = await client.post(url, headers = headers, body = "")
assert response.status == 200
proc downloadNoStream*(client: CodexClient, cid: Cid): ?!string = proc uploadRaw*(
let response = client.http.post(client.baseurl & "/data/" & $cid & "/network") client: CodexClient, contents: string, headers: seq[HttpHeaderTuple] = @[]
): Future[HttpClientResponseRef] {.
async: (raw: true, raises: [CancelledError, HttpError])
.} =
return client.post(client.baseurl & "/data", body = contents, headers = headers)
if response.status != "200 OK": proc upload*(
return failure(response.status) client: CodexClient, contents: string
): Future[?!Cid] {.async: (raises: [CancelledError, HttpError]).} =
let response = await client.uploadRaw(contents)
assert response.status == 200
Cid.init(await response.body).mapFailure
success response.body proc upload*(
client: CodexClient, bytes: seq[byte]
): Future[?!Cid] {.async: (raw: true).} =
return client.upload(string.fromBytes(bytes))
proc downloadRaw*(
client: CodexClient, cid: string, local = false
): Future[HttpClientResponseRef] {.
async: (raw: true, raises: [CancelledError, HttpError])
.} =
return
client.get(client.baseurl & "/data/" & cid & (if local: "" else: "/network/stream"))
proc downloadBytes*( proc downloadBytes*(
client: CodexClient, cid: Cid, local = false client: CodexClient, cid: Cid, local = false
): Future[?!seq[byte]] {.async.} = ): Future[?!seq[byte]] {.async: (raises: [CancelledError, HttpError]).} =
let uri = let response = await client.downloadRaw($cid, local = local)
parseUri(client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream"))
let (status, bytes) = await client.session.fetch(uri) if response.status != 200:
return failure($response.status)
if status != 200: success await response.getBodyBytes()
return failure("fetch failed with status " & $status)
success bytes proc download*(
client: CodexClient, cid: Cid, local = false
): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} =
without response =? await client.downloadBytes(cid, local = local), err:
return failure(err)
return success bytesToString(response)
proc delete*(client: CodexClient, cid: Cid): ?!void = proc downloadNoStream*(
let client: CodexClient, cid: Cid
url = client.baseurl & "/data/" & $cid ): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} =
response = client.http.delete(url) let response = await client.post(client.baseurl & "/data/" & $cid & "/network")
if response.status != "204 No Content": if response.status != 200:
return failure(response.status) return failure($response.status)
success await response.body
proc downloadManifestOnly*(
client: CodexClient, cid: Cid
): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} =
let response =
await client.get(client.baseurl & "/data/" & $cid & "/network/manifest")
if response.status != 200:
return failure($response.status)
success await response.body
proc deleteRaw*(
client: CodexClient, cid: string
): Future[HttpClientResponseRef] {.
async: (raw: true, raises: [CancelledError, HttpError])
.} =
return client.delete(client.baseurl & "/data/" & cid)
proc delete*(
client: CodexClient, cid: Cid
): Future[?!void] {.async: (raises: [CancelledError, HttpError]).} =
let response = await client.deleteRaw($cid)
if response.status != 204:
return failure($response.status)
success() success()
proc list*(client: CodexClient): ?!RestContentList = proc listRaw*(
let url = client.baseurl & "/data" client: CodexClient
let response = client.http.get(url) ): Future[HttpClientResponseRef] {.
async: (raw: true, raises: [CancelledError, HttpError])
.} =
return client.get(client.baseurl & "/data")
if response.status != "200 OK": proc list*(
return failure(response.status) client: CodexClient
): Future[?!RestContentList] {.async: (raises: [CancelledError, HttpError]).} =
let response = await client.listRaw()
RestContentList.fromJson(response.body) if response.status != 200:
return failure($response.status)
proc space*(client: CodexClient): ?!RestRepoStore = RestContentList.fromJson(await response.body)
proc space*(
client: CodexClient
): Future[?!RestRepoStore] {.async: (raises: [CancelledError, HttpError]).} =
let url = client.baseurl & "/space" let url = client.baseurl & "/space"
let response = client.http.get(url) let response = await client.get(url)
if response.status != "200 OK": if response.status != 200:
return failure(response.status) return failure($response.status)
RestRepoStore.fromJson(response.body) RestRepoStore.fromJson(await response.body)
proc requestStorageRaw*( proc requestStorageRaw*(
client: CodexClient, client: CodexClient,
@ -124,7 +225,9 @@ proc requestStorageRaw*(
expiry: uint64 = 0, expiry: uint64 = 0,
nodes: uint = 3, nodes: uint = 3,
tolerance: uint = 1, tolerance: uint = 1,
): Response = ): Future[HttpClientResponseRef] {.
async: (raw: true, raises: [CancelledError, HttpError])
.} =
## Call request storage REST endpoint ## Call request storage REST endpoint
## ##
let url = client.baseurl & "/storage/request/" & $cid let url = client.baseurl & "/storage/request/" & $cid
@ -141,7 +244,7 @@ proc requestStorageRaw*(
if expiry != 0: if expiry != 0:
json["expiry"] = %($expiry) json["expiry"] = %($expiry)
return client.http.post(url, $json) return client.post(url, $json)
proc requestStorage*( proc requestStorage*(
client: CodexClient, client: CodexClient,
@ -153,43 +256,45 @@ proc requestStorage*(
collateralPerByte: UInt256, collateralPerByte: UInt256,
nodes: uint = 3, nodes: uint = 3,
tolerance: uint = 1, tolerance: uint = 1,
): ?!PurchaseId = ): Future[?!PurchaseId] {.async: (raises: [CancelledError, HttpError]).} =
## Call request storage REST endpoint ## Call request storage REST endpoint
## ##
let response = client.requestStorageRaw( let
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, response = await client.requestStorageRaw(
nodes, tolerance, cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry,
) nodes, tolerance,
if response.status != "200 OK": )
doAssert(false, response.body) body = await response.body
PurchaseId.fromHex(response.body).catch
proc getPurchase*(client: CodexClient, purchaseId: PurchaseId): ?!RestPurchase = if response.status != 200:
doAssert(false, body)
PurchaseId.fromHex(body).catch
proc getPurchase*(
client: CodexClient, purchaseId: PurchaseId
): Future[?!RestPurchase] {.async: (raises: [CancelledError, HttpError]).} =
let url = client.baseurl & "/storage/purchases/" & purchaseId.toHex let url = client.baseurl & "/storage/purchases/" & purchaseId.toHex
try: try:
let body = client.http.getContent(url) let body = await client.getContent(url)
return RestPurchase.fromJson(body) return RestPurchase.fromJson(body)
except CatchableError as e: except CatchableError as e:
return failure e.msg return failure e.msg
proc getSalesAgent*(client: CodexClient, slotId: SlotId): ?!RestSalesAgent = proc getSalesAgent*(
client: CodexClient, slotId: SlotId
): Future[?!RestSalesAgent] {.async: (raises: [CancelledError, HttpError]).} =
let url = client.baseurl & "/sales/slots/" & slotId.toHex let url = client.baseurl & "/sales/slots/" & slotId.toHex
try: try:
let body = client.http.getContent(url) let body = await client.getContent(url)
return RestSalesAgent.fromJson(body) return RestSalesAgent.fromJson(body)
except CatchableError as e: except CatchableError as e:
return failure e.msg return failure e.msg
proc getSlots*(client: CodexClient): ?!seq[Slot] =
let url = client.baseurl & "/sales/slots"
let body = client.http.getContent(url)
seq[Slot].fromJson(body)
proc postAvailability*( proc postAvailability*(
client: CodexClient, client: CodexClient,
totalSize, duration: uint64, totalSize, duration: uint64,
minPricePerBytePerSecond, totalCollateral: UInt256, minPricePerBytePerSecond, totalCollateral: UInt256,
): ?!Availability = ): Future[?!Availability] {.async: (raises: [CancelledError, HttpError]).} =
## Post sales availability endpoint ## Post sales availability endpoint
## ##
let url = client.baseurl & "/sales/availability" let url = client.baseurl & "/sales/availability"
@ -200,17 +305,21 @@ proc postAvailability*(
"minPricePerBytePerSecond": minPricePerBytePerSecond, "minPricePerBytePerSecond": minPricePerBytePerSecond,
"totalCollateral": totalCollateral, "totalCollateral": totalCollateral,
} }
let response = client.http.post(url, $json) let response = await client.post(url, $json)
doAssert response.status == "201 Created", let body = await response.body
"expected 201 Created, got " & response.status & ", body: " & response.body
Availability.fromJson(response.body) doAssert response.status == 201,
"expected 201 Created, got " & $response.status & ", body: " & body
Availability.fromJson(body)
proc patchAvailabilityRaw*( proc patchAvailabilityRaw*(
client: CodexClient, client: CodexClient,
availabilityId: AvailabilityId, availabilityId: AvailabilityId,
totalSize, freeSize, duration: ?uint64 = uint64.none, totalSize, freeSize, duration: ?uint64 = uint64.none,
minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none, minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none,
): Response = ): Future[HttpClientResponseRef] {.
async: (raw: true, raises: [CancelledError, HttpError])
.} =
## Updates availability ## Updates availability
## ##
let url = client.baseurl & "/sales/availability/" & $availabilityId let url = client.baseurl & "/sales/availability/" & $availabilityId
@ -233,68 +342,50 @@ proc patchAvailabilityRaw*(
if totalCollateral =? totalCollateral: if totalCollateral =? totalCollateral:
json["totalCollateral"] = %totalCollateral json["totalCollateral"] = %totalCollateral
client.http.patch(url, $json) client.patch(url, $json)
proc patchAvailability*( proc patchAvailability*(
client: CodexClient, client: CodexClient,
availabilityId: AvailabilityId, availabilityId: AvailabilityId,
totalSize, duration: ?uint64 = uint64.none, totalSize, duration: ?uint64 = uint64.none,
minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none, minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none,
): void = ): Future[void] {.async: (raises: [CancelledError, HttpError]).} =
let response = client.patchAvailabilityRaw( let response = await client.patchAvailabilityRaw(
availabilityId, availabilityId,
totalSize = totalSize, totalSize = totalSize,
duration = duration, duration = duration,
minPricePerBytePerSecond = minPricePerBytePerSecond, minPricePerBytePerSecond = minPricePerBytePerSecond,
totalCollateral = totalCollateral, totalCollateral = totalCollateral,
) )
doAssert response.status == "200 OK", "expected 200 OK, got " & response.status doAssert response.status == 200, "expected 200 OK, got " & $response.status
proc getAvailabilities*(client: CodexClient): ?!seq[Availability] = proc getAvailabilities*(
client: CodexClient
): Future[?!seq[Availability]] {.async: (raises: [CancelledError, HttpError]).} =
## Call sales availability REST endpoint ## Call sales availability REST endpoint
let url = client.baseurl & "/sales/availability" let url = client.baseurl & "/sales/availability"
let body = client.http.getContent(url) let body = await client.getContent(url)
seq[Availability].fromJson(body) seq[Availability].fromJson(body)
proc getAvailabilityReservations*( proc getAvailabilityReservations*(
client: CodexClient, availabilityId: AvailabilityId client: CodexClient, availabilityId: AvailabilityId
): ?!seq[Reservation] = ): Future[?!seq[Reservation]] {.async: (raises: [CancelledError, HttpError]).} =
## Retrieves Availability's Reservations ## Retrieves Availability's Reservations
let url = client.baseurl & "/sales/availability/" & $availabilityId & "/reservations" let url = client.baseurl & "/sales/availability/" & $availabilityId & "/reservations"
let body = client.http.getContent(url) let body = await client.getContent(url)
seq[Reservation].fromJson(body) seq[Reservation].fromJson(body)
proc close*(client: CodexClient) = proc purchaseStateIs*(
client.http.close() client: CodexClient, id: PurchaseId, state: string
): Future[bool] {.async: (raises: [CancelledError, HttpError]).} =
(await client.getPurchase(id)).option .? state == some state
proc restart*(client: CodexClient) = proc saleStateIs*(
client.http.close() client: CodexClient, id: SlotId, state: string
client.http = newHttpClient(timeout = HttpClientTimeoutMs) ): Future[bool] {.async: (raises: [CancelledError, HttpError]).} =
(await client.getSalesAgent(id)).option .? state == some state
proc purchaseStateIs*(client: CodexClient, id: PurchaseId, state: string): bool = proc requestId*(
client.getPurchase(id).option .? state == some state client: CodexClient, id: PurchaseId
): Future[?RequestId] {.async: (raises: [CancelledError, HttpError]).} =
proc saleStateIs*(client: CodexClient, id: SlotId, state: string): bool = return (await client.getPurchase(id)).option .? requestId
client.getSalesAgent(id).option .? state == some state
proc requestId*(client: CodexClient, id: PurchaseId): ?RequestId =
return client.getPurchase(id).option .? requestId
proc uploadRaw*(
client: CodexClient, contents: string, headers = newHttpHeaders()
): Response =
return client.http.request(
client.baseurl & "/data", body = contents, httpMethod = HttpPost, headers = headers
)
proc listRaw*(client: CodexClient): Response =
return client.http.request(client.baseurl & "/data", httpMethod = HttpGet)
proc downloadRaw*(client: CodexClient, cid: string, local = false): Response =
return client.http.request(
client.baseurl & "/data/" & cid & (if local: "" else: "/network/stream"),
httpMethod = HttpGet,
)
proc deleteRaw*(client: CodexClient, cid: string): Response =
return client.http.request(client.baseurl & "/data/" & cid, httpMethod = HttpDelete)

View File

@ -68,7 +68,7 @@ method stop*(node: CodexProcess) {.async.} =
trace "stopping codex client" trace "stopping codex client"
if client =? node.client: if client =? node.client:
client.close() await client.close()
node.client = none CodexClient node.client = none CodexClient
method removeDataDir*(node: CodexProcess) = method removeDataDir*(node: CodexProcess) =

View File

@ -60,13 +60,13 @@ template marketplacesuite*(name: string, body: untyped) =
duration: uint64, duration: uint64,
collateralPerByte: UInt256, collateralPerByte: UInt256,
minPricePerBytePerSecond: UInt256, minPricePerBytePerSecond: UInt256,
) = ): Future[void] {.async: (raises: [CancelledError, HttpError, ConfigurationError]).} =
let totalCollateral = datasetSize.u256 * collateralPerByte let totalCollateral = datasetSize.u256 * collateralPerByte
# post availability to each provider # post availability to each provider
for i in 0 ..< providers().len: for i in 0 ..< providers().len:
let provider = providers()[i].client let provider = providers()[i].client
discard provider.postAvailability( discard await provider.postAvailability(
totalSize = datasetSize, totalSize = datasetSize,
duration = duration.uint64, duration = duration.uint64,
minPricePerBytePerSecond = minPricePerBytePerSecond, minPricePerBytePerSecond = minPricePerBytePerSecond,
@ -83,16 +83,18 @@ template marketplacesuite*(name: string, body: untyped) =
expiry: uint64 = 4.periods, expiry: uint64 = 4.periods,
nodes = providers().len, nodes = providers().len,
tolerance = 0, tolerance = 0,
): Future[PurchaseId] {.async.} = ): Future[PurchaseId] {.async: (raises: [CancelledError, HttpError]).} =
let id = client.requestStorage( let id = (
cid, await client.requestStorage(
expiry = expiry, cid,
duration = duration, expiry = expiry,
proofProbability = proofProbability, duration = duration,
collateralPerByte = collateralPerByte, proofProbability = proofProbability,
pricePerBytePerSecond = pricePerBytePerSecond, collateralPerByte = collateralPerByte,
nodes = nodes.uint, pricePerBytePerSecond = pricePerBytePerSecond,
tolerance = tolerance.uint, nodes = nodes.uint,
tolerance = tolerance.uint,
)
).get ).get
return id return id

Some files were not shown because too many files have changed in this diff Show More