diff --git a/.github/actions/nimbus-build-system/action.yml b/.github/actions/nimbus-build-system/action.yml index 219966db..5d1917e3 100644 --- a/.github/actions/nimbus-build-system/action.yml +++ b/.github/actions/nimbus-build-system/action.yml @@ -89,7 +89,7 @@ runs: - name: Install gcc 14 on Linux # We don't want to install gcc 14 for coverage (Ubuntu 20.04) - if : ${{ inputs.os == 'linux' && !inputs.coverage }} + if : ${{ inputs.os == 'linux' && inputs.coverage != 'true' }} shell: ${{ inputs.shell }} {0} run: | # Add GCC-14 to alternatives @@ -202,7 +202,7 @@ runs: - name: Restore Nim toolchain binaries from cache id: nim-cache uses: actions/cache@v4 - if : ${{ !inputs.coverage }} + if : ${{ inputs.coverage != 'true' }} with: path: NimBinaries key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }} diff --git a/.github/workflows/docker-reusable.yml b/.github/workflows/docker-reusable.yml index f0e46d95..7d937f78 100644 --- a/.github/workflows/docker-reusable.yml +++ b/.github/workflows/docker-reusable.yml @@ -94,11 +94,11 @@ jobs: - target: os: linux arch: amd64 - builder: ubuntu-22.04 + builder: ubuntu-24.04 - target: os: linux arch: arm64 - builder: ubuntu-22.04-arm + builder: ubuntu-24.04-arm name: Build ${{ matrix.target.os }}/${{ matrix.target.arch }} runs-on: ${{ matrix.builder }} diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 50b14d05..4232ff0f 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -2,17 +2,17 @@ name: OpenAPI on: push: - branches: - - 'master' + tags: + - "v*.*.*" paths: - - 'openapi.yaml' - - '.github/workflows/docs.yml' + - "openapi.yaml" + - ".github/workflows/docs.yml" pull_request: branches: - - '**' + - "**" paths: - - 'openapi.yaml' - - '.github/workflows/docs.yml' + - "openapi.yaml" + - ".github/workflows/docs.yml" # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages permissions: @@ -40,7 +40,7 @@ jobs: deploy: name: Deploy runs-on: ubuntu-latest - if: github.ref == 'refs/heads/master' + if: startsWith(github.ref, 'refs/tags/') steps: - name: Checkout uses: actions/checkout@v4 diff --git a/.github/workflows/nim-matrix.yml b/.github/workflows/nim-matrix.yml index 4d86d3bb..71129574 100644 --- a/.github/workflows/nim-matrix.yml +++ b/.github/workflows/nim-matrix.yml @@ -20,10 +20,10 @@ jobs: uses: fabiocaccamo/create-matrix-action@v5 with: matrix: | - os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} build: needs: matrix diff --git a/.gitignore b/.gitignore index 0e1f27db..f6292dda 100644 --- a/.gitignore +++ b/.gitignore @@ -45,3 +45,5 @@ docker/prometheus-data .DS_Store nim.cfg tests/integration/logs + +data/ diff --git a/.gitmodules b/.gitmodules index ece88749..5cc2bfab 100644 --- a/.gitmodules +++ b/.gitmodules @@ -221,3 +221,13 @@ [submodule "vendor/nph"] path = vendor/nph url = https://github.com/arnetheduck/nph.git +[submodule "vendor/nim-quic"] + path = vendor/nim-quic + url = https://github.com/vacp2p/nim-quic.git + ignore = untracked + branch = master +[submodule "vendor/nim-ngtcp2"] + path = vendor/nim-ngtcp2 + url = https://github.com/vacp2p/nim-ngtcp2.git + ignore = untracked + branch = master diff --git a/Makefile b/Makefile index 3dfe8e7e..29d6c11d 100644 --- a/Makefile +++ b/Makefile @@ -229,6 +229,11 @@ nph/%: build-nph echo -e $(FORMAT_MSG) "nph/$*" && \ $(NPH) $* +format: + $(NPH) *.nim + $(NPH) codex/ + $(NPH) tests/ + clean-nph: rm -f $(NPH) diff --git a/README.md b/README.md index d073057f..2a15051f 100644 --- a/README.md +++ b/README.md @@ -59,8 +59,8 @@ Feel free to dive in, contributions are welcomed! Open an issue or submit PRs. ### Linting and formatting -`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is requrired to adhere to its styling. +`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling. If you are setting up fresh setup, in order to get `nph` run `make build-nph`. In order to format files run `make nph/`. -If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior commiting them. +If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them. If you are using VSCode and the [NimLang](https://marketplace.visualstudio.com/items?itemName=NimLang.nimlang) extension you can enable "Format On Save" (eq. the `nim.formatOnSave` property) that will format the files using `nph`. \ No newline at end of file diff --git a/build.nims b/build.nims index aa090e71..baf21e03 100644 --- a/build.nims +++ b/build.nims @@ -4,7 +4,6 @@ import std/os except commandLineParams ### Helper functions proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") = - if not dirExists "build": mkDir "build" @@ -14,13 +13,15 @@ proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") = for param in commandLineParams(): extra_params &= " " & param else: - for i in 2.. 0: + self.pendingBlocks.setInFlight(address, true) + await self.sendWantBlock(@[address], peers.with.randomPeer) + else: + self.pendingBlocks.setInFlight(address, false) + if peers.without.len > 0: + await self.sendWantHave(@[address], peers.without) + self.discovery.queueFindBlocksReq(@[address.cidOrTreeCid]) + + await (handle or sleepAsync(self.pendingBlocks.retryInterval)) + self.pendingBlocks.decRetries(address) + + if handle.finished: + trace "Handle for block finished", failed = handle.failed + break except CancelledError as exc: - trace "Block handle cancelled", address, peerId + trace "Block download cancelled" + if not handle.finished: + await handle.cancelAndWait() except CatchableError as exc: - warn "Error block handle, disconnecting peer", address, exc = exc.msg, peerId - - # TODO: really, this is just a quick and dirty way of - # preventing hitting the same "bad" peer every time, however, - # we might as well discover this on or next iteration, so - # it doesn't mean that we're never talking to this peer again. - # TODO: we need a lot more work around peer selection and - # prioritization - - # drop unresponsive peer - await b.network.switch.disconnect(peerId) - b.discovery.queueFindBlocksReq(@[address.cidOrTreeCid]) - -proc pickPseudoRandom( - address: BlockAddress, peers: seq[BlockExcPeerCtx] -): BlockExcPeerCtx = - return peers[hash(address) mod peers.len] + warn "Error downloadloading block", exc = exc.msg + if not handle.finished: + handle.fail(exc) + finally: + self.pendingBlocks.setInFlight(address, false) proc requestBlock*( - b: BlockExcEngine, address: BlockAddress -): Future[?!Block] {.async.} = - let blockFuture = b.pendingBlocks.getWantHandle(address, b.blockFetchTimeout) + self: BlockExcEngine, address: BlockAddress +): Future[?!Block] {.async: (raises: [CancelledError]).} = + if address notin self.pendingBlocks: + self.trackedFutures.track(self.downloadInternal(address)) - if not b.pendingBlocks.isInFlight(address): - let peers = b.peers.getPeersForBlock(address) - - if peers.with.len == 0: - b.discovery.queueFindBlocksReq(@[address.cidOrTreeCid]) - else: - let selected = pickPseudoRandom(address, peers.with) - asyncSpawn b.monitorBlockHandle(blockFuture, address, selected.id) - b.pendingBlocks.setInFlight(address) - await b.sendWantBlock(@[address], selected) - - await b.sendWantHave(@[address], peers.without) - - # Don't let timeouts bubble up. We can't be too broad here or we break - # cancellations. try: - success await blockFuture - except AsyncTimeoutError as err: + let handle = self.pendingBlocks.getWantHandle(address) + success await handle + except CancelledError as err: + warn "Block request cancelled", address + raise err + except CatchableError as err: + error "Block request failed", address, err = err.msg failure err -proc requestBlock*(b: BlockExcEngine, cid: Cid): Future[?!Block] = - b.requestBlock(BlockAddress.init(cid)) +proc requestBlock*( + self: BlockExcEngine, cid: Cid +): Future[?!Block] {.async: (raw: true, raises: [CancelledError]).} = + self.requestBlock(BlockAddress.init(cid)) proc blockPresenceHandler*( - b: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence] -) {.async.} = + self: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence] +) {.async: (raises: []).} = + trace "Received block presence from peer", peer, blocks = blocks.mapIt($it) let - peerCtx = b.peers.get(peer) - wantList = toSeq(b.pendingBlocks.wantList) + peerCtx = self.peers.get(peer) + ourWantList = toSeq(self.pendingBlocks.wantList) if peerCtx.isNil: return @@ -228,82 +247,116 @@ proc blockPresenceHandler*( let peerHave = peerCtx.peerHave - dontWantCids = peerHave.filterIt(it notin wantList) + dontWantCids = peerHave.filterIt(it notin ourWantList) if dontWantCids.len > 0: peerCtx.cleanPresence(dontWantCids) - let wantCids = wantList.filterIt(it in peerHave) - - if wantCids.len > 0: - trace "Peer has blocks in our wantList", peer, wants = wantCids - await b.sendWantBlock(wantCids, peerCtx) - - # if none of the connected peers report our wants in their have list, - # fire up discovery - b.discovery.queueFindBlocksReq( - toSeq(b.pendingBlocks.wantListCids).filter do(cid: Cid) -> bool: - not b.peers.anyIt(cid in it.peerHaveCids) + let ourWantCids = ourWantList.filterIt( + it in peerHave and not self.pendingBlocks.retriesExhausted(it) and + not self.pendingBlocks.isInFlight(it) ) -proc scheduleTasks(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} = + for address in ourWantCids: + self.pendingBlocks.setInFlight(address, true) + self.pendingBlocks.decRetries(address) + + if ourWantCids.len > 0: + trace "Peer has blocks in our wantList", peer, wants = ourWantCids + if err =? catch(await self.sendWantBlock(ourWantCids, peerCtx)).errorOption: + warn "Failed to send wantBlock to peer", peer, err = err.msg + +proc scheduleTasks( + self: BlockExcEngine, blocksDelivery: seq[BlockDelivery] +) {.async: (raises: [CancelledError]).} = let cids = blocksDelivery.mapIt(it.blk.cid) # schedule any new peers to provide blocks to - for p in b.peers: + for p in self.peers: for c in cids: # for each cid # schedule a peer if it wants at least one cid # and we have it in our local store if c in p.peerWantsCids: - if await (c in b.localStore): - if b.scheduleTask(p): - trace "Task scheduled for peer", peer = p.id - else: - warn "Unable to schedule task for peer", peer = p.id + try: + if await (c in self.localStore): + # TODO: the try/except should go away once blockstore tracks exceptions + self.scheduleTask(p) + break + except CancelledError as exc: + warn "Checking local store canceled", cid = c, err = exc.msg + return + except CatchableError as exc: + error "Error checking local store for cid", cid = c, err = exc.msg + raiseAssert "Unexpected error checking local store for cid" - break # do next peer - -proc cancelBlocks(b: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} = +proc cancelBlocks( + self: BlockExcEngine, addrs: seq[BlockAddress] +) {.async: (raises: [CancelledError]).} = ## Tells neighboring peers that we're no longer interested in a block. - trace "Sending block request cancellations to peers", - addrs, peers = b.peers.mapIt($it.id) + ## - let failed = ( - await allFinished( - b.peers.mapIt( - b.network.request.sendWantCancellations(peer = it.id, addresses = addrs) + if self.peers.len == 0: + return + + trace "Sending block request cancellations to peers", + addrs, peers = self.peers.peerIds + + proc processPeer(peerCtx: BlockExcPeerCtx): Future[BlockExcPeerCtx] {.async.} = + await self.network.request.sendWantCancellations( + peer = peerCtx.id, addresses = addrs.filterIt(it in peerCtx) + ) + + return peerCtx + + try: + let (succeededFuts, failedFuts) = await allFinishedFailed( + toSeq(self.peers.peers.values).filterIt(it.peerHave.anyIt(it in addrs)).map( + processPeer ) ) - ).filterIt(it.failed) - if failed.len > 0: - warn "Failed to send block request cancellations to peers", peers = failed.len + (await allFinished(succeededFuts)).mapIt(it.read).apply do(peerCtx: BlockExcPeerCtx): + peerCtx.cleanPresence(addrs) -proc resolveBlocks*(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} = - b.pendingBlocks.resolve(blocksDelivery) - await b.scheduleTasks(blocksDelivery) - await b.cancelBlocks(blocksDelivery.mapIt(it.address)) + if failedFuts.len > 0: + warn "Failed to send block request cancellations to peers", peers = failedFuts.len + else: + trace "Block request cancellations sent to peers", peers = self.peers.len + except CancelledError as exc: + warn "Error sending block request cancellations", error = exc.msg + raise exc + except CatchableError as exc: + warn "Error sending block request cancellations", error = exc.msg -proc resolveBlocks*(b: BlockExcEngine, blocks: seq[Block]) {.async.} = - await b.resolveBlocks( +proc resolveBlocks*( + self: BlockExcEngine, blocksDelivery: seq[BlockDelivery] +) {.async: (raises: [CancelledError]).} = + self.pendingBlocks.resolve(blocksDelivery) + await self.scheduleTasks(blocksDelivery) + await self.cancelBlocks(blocksDelivery.mapIt(it.address)) + +proc resolveBlocks*( + self: BlockExcEngine, blocks: seq[Block] +) {.async: (raises: [CancelledError]).} = + await self.resolveBlocks( blocks.mapIt( BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid)) ) ) proc payForBlocks( - engine: BlockExcEngine, peer: BlockExcPeerCtx, blocksDelivery: seq[BlockDelivery] -) {.async.} = + self: BlockExcEngine, peer: BlockExcPeerCtx, blocksDelivery: seq[BlockDelivery] +) {.async: (raises: [CancelledError]).} = let - sendPayment = engine.network.request.sendPayment + sendPayment = self.network.request.sendPayment price = peer.price(blocksDelivery.mapIt(it.address)) - if payment =? engine.wallet.pay(peer, price): + if payment =? self.wallet.pay(peer, price): trace "Sending payment for blocks", price, len = blocksDelivery.len await sendPayment(peer.id, payment) -proc validateBlockDelivery(b: BlockExcEngine, bd: BlockDelivery): ?!void = - if bd.address notin b.pendingBlocks: +proc validateBlockDelivery(self: BlockExcEngine, bd: BlockDelivery): ?!void = + if bd.address notin self.pendingBlocks: return failure("Received block is not currently a pending block") if bd.address.leaf: @@ -333,8 +386,8 @@ proc validateBlockDelivery(b: BlockExcEngine, bd: BlockDelivery): ?!void = return success() proc blocksDeliveryHandler*( - b: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery] -) {.async.} = + self: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery] +) {.async: (raises: []).} = trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt(it.address)) var validatedBlocksDelivery: seq[BlockDelivery] @@ -343,40 +396,50 @@ proc blocksDeliveryHandler*( peer = peer address = bd.address - if err =? b.validateBlockDelivery(bd).errorOption: - warn "Block validation failed", msg = err.msg - continue - - if err =? (await b.localStore.putBlock(bd.blk)).errorOption: - error "Unable to store block", err = err.msg - continue - - if bd.address.leaf: - without proof =? bd.proof: - error "Proof expected for a leaf block delivery" + try: + if err =? self.validateBlockDelivery(bd).errorOption: + warn "Block validation failed", msg = err.msg continue - if err =? ( - await b.localStore.putCidAndProof( - bd.address.treeCid, bd.address.index, bd.blk.cid, proof - ) - ).errorOption: - error "Unable to store proof and cid for a block" + + if err =? (await self.localStore.putBlock(bd.blk)).errorOption: + error "Unable to store block", err = err.msg continue + if bd.address.leaf: + without proof =? bd.proof: + warn "Proof expected for a leaf block delivery" + continue + if err =? ( + await self.localStore.putCidAndProof( + bd.address.treeCid, bd.address.index, bd.blk.cid, proof + ) + ).errorOption: + warn "Unable to store proof and cid for a block" + continue + except CatchableError as exc: + warn "Error handling block delivery", error = exc.msg + continue + validatedBlocksDelivery.add(bd) - await b.resolveBlocks(validatedBlocksDelivery) codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64) - let peerCtx = b.peers.get(peer) - + let peerCtx = self.peers.get(peer) if peerCtx != nil: - await b.payForBlocks(peerCtx, blocksDelivery) - ## shouldn't we remove them from the want-list instead of this: - peerCtx.cleanPresence(blocksDelivery.mapIt(it.address)) + if err =? catch(await self.payForBlocks(peerCtx, blocksDelivery)).errorOption: + warn "Error paying for blocks", err = err.msg + return -proc wantListHandler*(b: BlockExcEngine, peer: PeerId, wantList: WantList) {.async.} = - let peerCtx = b.peers.get(peer) + if err =? catch(await self.resolveBlocks(validatedBlocksDelivery)).errorOption: + warn "Error resolving blocks", err = err.msg + return + +proc wantListHandler*( + self: BlockExcEngine, peer: PeerId, wantList: WantList +) {.async: (raises: []).} = + trace "Received want list from peer", peer, wantList = wantList.entries.len + + let peerCtx = self.peers.get(peer) if peerCtx.isNil: return @@ -385,113 +448,138 @@ proc wantListHandler*(b: BlockExcEngine, peer: PeerId, wantList: WantList) {.asy presence: seq[BlockPresence] schedulePeer = false - for e in wantList.entries: - let idx = peerCtx.peerWants.findIt(it.address == e.address) + try: + for e in wantList.entries: + let idx = peerCtx.peerWants.findIt(it.address == e.address) - logScope: - peer = peerCtx.id - address = e.address - wantType = $e.wantType + logScope: + peer = peerCtx.id + address = e.address + wantType = $e.wantType - if idx < 0: # Adding new entry to peer wants - let - have = await e.address in b.localStore - price = @(b.pricing.get(Pricing(price: 0.u256)).price.toBytesBE) + if idx < 0: # Adding new entry to peer wants + let + have = + try: + await e.address in self.localStore + except CatchableError as exc: + # TODO: should not be necessary once we have proper exception tracking on the BlockStore interface + false + price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE) - case e.wantType: - of WantType.WantHave: - if have: - presence.add( - BlockPresence( - address: e.address, `type`: BlockPresenceType.Have, price: price - ) - ) - else: - if e.sendDontHave: + if e.cancel: + trace "Received cancelation for untracked block, skipping", + address = e.address + continue + + trace "Processing want list entry", wantList = $e + case e.wantType + of WantType.WantHave: + if have: presence.add( BlockPresence( - address: e.address, `type`: BlockPresenceType.DontHave, price: price + address: e.address, `type`: BlockPresenceType.Have, price: price ) ) + else: + if e.sendDontHave: + presence.add( + BlockPresence( + address: e.address, `type`: BlockPresenceType.DontHave, price: price + ) + ) + + codex_block_exchange_want_have_lists_received.inc() + of WantType.WantBlock: peerCtx.peerWants.add(e) + schedulePeer = true + codex_block_exchange_want_block_lists_received.inc() + else: # Updating existing entry in peer wants + # peer doesn't want this block anymore + if e.cancel: + trace "Canceling want for block", address = e.address + peerCtx.peerWants.del(idx) + trace "Canceled block request", + address = e.address, len = peerCtx.peerWants.len + else: + if e.wantType == WantType.WantBlock: + schedulePeer = true + # peer might want to ask for the same cid with + # different want params + trace "Updating want for block", address = e.address + peerCtx.peerWants[idx] = e # update entry + trace "Updated block request", + address = e.address, len = peerCtx.peerWants.len - codex_block_exchange_want_have_lists_received.inc() - of WantType.WantBlock: - peerCtx.peerWants.add(e) - schedulePeer = true - codex_block_exchange_want_block_lists_received.inc() - else: # Updating existing entry in peer wants - # peer doesn't want this block anymore - if e.cancel: - trace "Canceling want for block", address = e.address - peerCtx.peerWants.del(idx) - else: - # peer might want to ask for the same cid with - # different want params - trace "Updating want for block", address = e.address - peerCtx.peerWants[idx] = e # update entry + if presence.len > 0: + trace "Sending presence to remote", items = presence.mapIt($it).join(",") + await self.network.request.sendPresence(peer, presence) - if presence.len > 0: - trace "Sending presence to remote", items = presence.mapIt($it).join(",") - await b.network.request.sendPresence(peer, presence) + if schedulePeer: + self.scheduleTask(peerCtx) + except CancelledError as exc: #TODO: replace with CancelledError + warn "Error processing want list", error = exc.msg - if schedulePeer: - if not b.scheduleTask(peerCtx): - warn "Unable to schedule task for peer", peer - -proc accountHandler*(engine: BlockExcEngine, peer: PeerId, account: Account) {.async.} = - let context = engine.peers.get(peer) +proc accountHandler*( + self: BlockExcEngine, peer: PeerId, account: Account +) {.async: (raises: []).} = + let context = self.peers.get(peer) if context.isNil: return context.account = account.some proc paymentHandler*( - engine: BlockExcEngine, peer: PeerId, payment: SignedState -) {.async.} = + self: BlockExcEngine, peer: PeerId, payment: SignedState +) {.async: (raises: []).} = trace "Handling payments", peer - without context =? engine.peers.get(peer).option and account =? context.account: + without context =? self.peers.get(peer).option and account =? context.account: trace "No context or account for peer", peer return if channel =? context.paymentChannel: let sender = account.address - discard engine.wallet.acceptPayment(channel, Asset, sender, payment) + discard self.wallet.acceptPayment(channel, Asset, sender, payment) else: - context.paymentChannel = engine.wallet.acceptChannel(payment).option + context.paymentChannel = self.wallet.acceptChannel(payment).option -proc setupPeer*(b: BlockExcEngine, peer: PeerId) {.async.} = +proc setupPeer*( + self: BlockExcEngine, peer: PeerId +) {.async: (raises: [CancelledError]).} = ## Perform initial setup, such as want ## list exchange ## trace "Setting up peer", peer - if peer notin b.peers: + if peer notin self.peers: trace "Setting up new peer", peer - b.peers.add(BlockExcPeerCtx(id: peer)) - trace "Added peer", peers = b.peers.len + self.peers.add(BlockExcPeerCtx(id: peer)) + trace "Added peer", peers = self.peers.len # broadcast our want list, the other peer will do the same - if b.pendingBlocks.wantListLen > 0: + if self.pendingBlocks.wantListLen > 0: trace "Sending our want list to a peer", peer - let cids = toSeq(b.pendingBlocks.wantList) - await b.network.request.sendWantList(peer, cids, full = true) + let cids = toSeq(self.pendingBlocks.wantList) + await self.network.request.sendWantList(peer, cids, full = true) - if address =? b.pricing .? address: - await b.network.request.sendAccount(peer, Account(address: address)) + if address =? self.pricing .? address: + trace "Sending account to peer", peer + await self.network.request.sendAccount(peer, Account(address: address)) -proc dropPeer*(b: BlockExcEngine, peer: PeerId) = +proc dropPeer*(self: BlockExcEngine, peer: PeerId) {.raises: [].} = ## Cleanup disconnected peer ## trace "Dropping peer", peer # drop the peer from the peers table - b.peers.remove(peer) + self.peers.remove(peer) -proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} = +proc taskHandler*( + self: BlockExcEngine, task: BlockExcPeerCtx +) {.gcsafe, async: (raises: [CancelledError, RetriesExhaustedError]).} = # Send to the peer blocks he wants to get, # if they present in our local store @@ -514,22 +602,25 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} = proc localLookup(e: WantListEntry): Future[?!BlockDelivery] {.async.} = if e.address.leaf: - (await b.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map( + (await self.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map( (blkAndProof: (Block, CodexProof)) => BlockDelivery( address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some ) ) else: - (await b.localStore.getBlock(e.address)).map( + (await self.localStore.getBlock(e.address)).map( (blk: Block) => BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none) ) let blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup)) - blocksDelivery = - blocksDeliveryFut.filterIt(it.completed and it.read.isOk).mapIt(it.read.get) + blocksDelivery = blocksDeliveryFut.filterIt(it.completed and it.value.isOk).mapIt: + if bd =? it.value: + bd + else: + raiseAssert "Unexpected error in local lookup" # All the wants that failed local lookup must be set to not-in-flight again. let @@ -540,26 +631,23 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} = if blocksDelivery.len > 0: trace "Sending blocks to peer", peer = task.id, blocks = (blocksDelivery.mapIt(it.address)) - await b.network.request.sendBlocksDelivery(task.id, blocksDelivery) + await self.network.request.sendBlocksDelivery(task.id, blocksDelivery) codex_block_exchange_blocks_sent.inc(blocksDelivery.len.int64) task.peerWants.keepItIf(it.address notin successAddresses) -proc blockexcTaskRunner(b: BlockExcEngine) {.async: (raises: []).} = +proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} = ## process tasks ## trace "Starting blockexc task runner" - while b.blockexcRunning: - try: - let peerCtx = await b.taskQueue.pop() - - await b.taskHandler(peerCtx) - except CancelledError: - break # do not propagate as blockexcTaskRunner was asyncSpawned - except CatchableError as e: - error "error running block exchange task", error = e.msgDetail + try: + while self.blockexcRunning: + let peerCtx = await self.taskQueue.pop() + await self.taskHandler(peerCtx) + except CatchableError as exc: + error "error running block exchange task", error = exc.msg info "Exiting blockexc task runner" @@ -573,55 +661,59 @@ proc new*( peerStore: PeerCtxStore, pendingBlocks: PendingBlocksManager, concurrentTasks = DefaultConcurrentTasks, - peersPerRequest = DefaultMaxPeersPerRequest, - blockFetchTimeout = DefaultBlockTimeout, ): BlockExcEngine = ## Create new block exchange engine instance ## - let engine = BlockExcEngine( + let self = BlockExcEngine( localStore: localStore, peers: peerStore, pendingBlocks: pendingBlocks, - peersPerRequest: peersPerRequest, network: network, wallet: wallet, concurrentTasks: concurrentTasks, - trackedFutures: TrackedFutures.new(), + trackedFutures: TrackedFutures(), taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize), discovery: discovery, advertiser: advertiser, - blockFetchTimeout: blockFetchTimeout, ) - proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} = + proc peerEventHandler( + peerId: PeerId, event: PeerEvent + ): Future[void] {.gcsafe, async: (raises: [CancelledError]).} = if event.kind == PeerEventKind.Joined: - await engine.setupPeer(peerId) + await self.setupPeer(peerId) else: - engine.dropPeer(peerId) + self.dropPeer(peerId) if not isNil(network.switch): network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined) network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left) - proc blockWantListHandler(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.} = - engine.wantListHandler(peer, wantList) + proc blockWantListHandler( + peer: PeerId, wantList: WantList + ): Future[void] {.async: (raises: []).} = + self.wantListHandler(peer, wantList) proc blockPresenceHandler( peer: PeerId, presence: seq[BlockPresence] - ): Future[void] {.gcsafe.} = - engine.blockPresenceHandler(peer, presence) + ): Future[void] {.async: (raises: []).} = + self.blockPresenceHandler(peer, presence) proc blocksDeliveryHandler( peer: PeerId, blocksDelivery: seq[BlockDelivery] - ): Future[void] {.gcsafe.} = - engine.blocksDeliveryHandler(peer, blocksDelivery) + ): Future[void] {.async: (raises: []).} = + self.blocksDeliveryHandler(peer, blocksDelivery) - proc accountHandler(peer: PeerId, account: Account): Future[void] {.gcsafe.} = - engine.accountHandler(peer, account) + proc accountHandler( + peer: PeerId, account: Account + ): Future[void] {.async: (raises: []).} = + self.accountHandler(peer, account) - proc paymentHandler(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} = - engine.paymentHandler(peer, payment) + proc paymentHandler( + peer: PeerId, payment: SignedState + ): Future[void] {.async: (raises: []).} = + self.paymentHandler(peer, payment) network.handlers = BlockExcHandlers( onWantList: blockWantListHandler, @@ -631,4 +723,4 @@ proc new*( onPayment: paymentHandler, ) - return engine + return self diff --git a/codex/blockexchange/engine/payments.nim b/codex/blockexchange/engine/payments.nim index 88953976..260a3005 100644 --- a/codex/blockexchange/engine/payments.nim +++ b/codex/blockexchange/engine/payments.nim @@ -7,6 +7,8 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. +{.push raises: [].} + import std/math import pkg/nitro import pkg/questionable/results @@ -15,9 +17,6 @@ import ../peers export nitro export results -push: - {.upraises: [].} - const ChainId* = 0.u256 # invalid chain id for now const Asset* = EthAddress.zero # invalid ERC20 asset address for now const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals diff --git a/codex/blockexchange/engine/pendingblocks.nim b/codex/blockexchange/engine/pendingblocks.nim index 3b69e2d2..f169f744 100644 --- a/codex/blockexchange/engine/pendingblocks.nim +++ b/codex/blockexchange/engine/pendingblocks.nim @@ -7,13 +7,11 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. +{.push raises: [].} + import std/tables import std/monotimes - -import pkg/upraises - -push: - {.upraises: [].} +import std/strutils import pkg/chronos import pkg/libp2p @@ -34,66 +32,76 @@ declareGauge( codex_block_exchange_retrieval_time_us, "codex blockexchange block retrieval time us" ) -const DefaultBlockTimeout* = 10.minutes +const + DefaultBlockRetries* = 3000 + DefaultRetryInterval* = 500.millis type + RetriesExhaustedError* = object of CatchableError + BlockHandle* = Future[Block].Raising([CancelledError, RetriesExhaustedError]) + BlockReq* = object - handle*: Future[Block] + handle*: BlockHandle inFlight*: bool + blockRetries*: int startTime*: int64 PendingBlocksManager* = ref object of RootObj + blockRetries*: int = DefaultBlockRetries + retryInterval*: Duration = DefaultRetryInterval blocks*: Table[BlockAddress, BlockReq] # pending Block requests proc updatePendingBlockGauge(p: PendingBlocksManager) = codex_block_exchange_pending_block_requests.set(p.blocks.len.int64) proc getWantHandle*( - p: PendingBlocksManager, - address: BlockAddress, - timeout = DefaultBlockTimeout, - inFlight = false, -): Future[Block] {.async.} = + self: PendingBlocksManager, address: BlockAddress, inFlight = false +): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} = ## Add an event for a block ## - try: - if address notin p.blocks: - p.blocks[address] = BlockReq( - handle: newFuture[Block]("pendingBlocks.getWantHandle"), - inFlight: inFlight, - startTime: getMonoTime().ticks, - ) + self.blocks.withValue(address, blk): + return blk[].handle + do: + let blk = BlockReq( + handle: newFuture[Block]("pendingBlocks.getWantHandle"), + inFlight: inFlight, + blockRetries: self.blockRetries, + startTime: getMonoTime().ticks, + ) + self.blocks[address] = blk + let handle = blk.handle - p.updatePendingBlockGauge() - return await p.blocks[address].handle.wait(timeout) - except CancelledError as exc: - trace "Blocks cancelled", exc = exc.msg, address - raise exc - except CatchableError as exc: - error "Pending WANT failed or expired", exc = exc.msg - # no need to cancel, it is already cancelled by wait() - raise exc - finally: - p.blocks.del(address) - p.updatePendingBlockGauge() + proc cleanUpBlock(data: pointer) {.raises: [].} = + self.blocks.del(address) + self.updatePendingBlockGauge() + + handle.addCallback(cleanUpBlock) + handle.cancelCallback = proc(data: pointer) {.raises: [].} = + if not handle.finished: + handle.removeCallback(cleanUpBlock) + cleanUpBlock(nil) + + self.updatePendingBlockGauge() + return handle proc getWantHandle*( - p: PendingBlocksManager, cid: Cid, timeout = DefaultBlockTimeout, inFlight = false -): Future[Block] = - p.getWantHandle(BlockAddress.init(cid), timeout, inFlight) + self: PendingBlocksManager, cid: Cid, inFlight = false +): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} = + self.getWantHandle(BlockAddress.init(cid), inFlight) proc resolve*( - p: PendingBlocksManager, blocksDelivery: seq[BlockDelivery] + self: PendingBlocksManager, blocksDelivery: seq[BlockDelivery] ) {.gcsafe, raises: [].} = ## Resolve pending blocks ## for bd in blocksDelivery: - p.blocks.withValue(bd.address, blockReq): - if not blockReq.handle.finished: + self.blocks.withValue(bd.address, blockReq): + if not blockReq[].handle.finished: + trace "Resolving pending block", address = bd.address let - startTime = blockReq.startTime + startTime = blockReq[].startTime stopTime = getMonoTime().ticks retrievalDurationUs = (stopTime - startTime) div 1000 @@ -106,52 +114,70 @@ proc resolve*( else: trace "Block handle already finished", address = bd.address -proc setInFlight*(p: PendingBlocksManager, address: BlockAddress, inFlight = true) = +func retries*(self: PendingBlocksManager, address: BlockAddress): int = + self.blocks.withValue(address, pending): + result = pending[].blockRetries + do: + result = 0 + +func decRetries*(self: PendingBlocksManager, address: BlockAddress) = + self.blocks.withValue(address, pending): + pending[].blockRetries -= 1 + +func retriesExhausted*(self: PendingBlocksManager, address: BlockAddress): bool = + self.blocks.withValue(address, pending): + result = pending[].blockRetries <= 0 + +func setInFlight*(self: PendingBlocksManager, address: BlockAddress, inFlight = true) = ## Set inflight status for a block ## - p.blocks.withValue(address, pending): + self.blocks.withValue(address, pending): pending[].inFlight = inFlight -proc isInFlight*(p: PendingBlocksManager, address: BlockAddress): bool = +func isInFlight*(self: PendingBlocksManager, address: BlockAddress): bool = ## Check if a block is in flight ## - p.blocks.withValue(address, pending): + self.blocks.withValue(address, pending): result = pending[].inFlight -proc contains*(p: PendingBlocksManager, cid: Cid): bool = - BlockAddress.init(cid) in p.blocks +func contains*(self: PendingBlocksManager, cid: Cid): bool = + BlockAddress.init(cid) in self.blocks -proc contains*(p: PendingBlocksManager, address: BlockAddress): bool = - address in p.blocks +func contains*(self: PendingBlocksManager, address: BlockAddress): bool = + address in self.blocks -iterator wantList*(p: PendingBlocksManager): BlockAddress = - for a in p.blocks.keys: +iterator wantList*(self: PendingBlocksManager): BlockAddress = + for a in self.blocks.keys: yield a -iterator wantListBlockCids*(p: PendingBlocksManager): Cid = - for a in p.blocks.keys: +iterator wantListBlockCids*(self: PendingBlocksManager): Cid = + for a in self.blocks.keys: if not a.leaf: yield a.cid -iterator wantListCids*(p: PendingBlocksManager): Cid = +iterator wantListCids*(self: PendingBlocksManager): Cid = var yieldedCids = initHashSet[Cid]() - for a in p.blocks.keys: + for a in self.blocks.keys: let cid = a.cidOrTreeCid if cid notin yieldedCids: yieldedCids.incl(cid) yield cid -iterator wantHandles*(p: PendingBlocksManager): Future[Block] = - for v in p.blocks.values: +iterator wantHandles*(self: PendingBlocksManager): Future[Block] = + for v in self.blocks.values: yield v.handle -proc wantListLen*(p: PendingBlocksManager): int = - p.blocks.len +proc wantListLen*(self: PendingBlocksManager): int = + self.blocks.len -func len*(p: PendingBlocksManager): int = - p.blocks.len +func len*(self: PendingBlocksManager): int = + self.blocks.len -func new*(T: type PendingBlocksManager): PendingBlocksManager = - PendingBlocksManager() +func new*( + T: type PendingBlocksManager, + retries = DefaultBlockRetries, + interval = DefaultRetryInterval, +): PendingBlocksManager = + PendingBlocksManager(blockRetries: retries, retryInterval: interval) diff --git a/codex/blockexchange/network/network.nim b/codex/blockexchange/network/network.nim index ecb72890..d4754110 100644 --- a/codex/blockexchange/network/network.nim +++ b/codex/blockexchange/network/network.nim @@ -21,26 +21,29 @@ import ../../blocktype as bt import ../../logutils import ../protobuf/blockexc as pb import ../protobuf/payments +import ../../utils/trackedfutures import ./networkpeer -export network, payments +export networkpeer, payments logScope: topics = "codex blockexcnetwork" const Codec* = "/codex/blockexc/1.0.0" - MaxInflight* = 100 + DefaultMaxInflight* = 100 type - WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.} + WantListHandler* = + proc(peer: PeerId, wantList: WantList) {.gcsafe, async: (raises: []).} BlocksDeliveryHandler* = - proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.} + proc(peer: PeerId, blocks: seq[BlockDelivery]) {.gcsafe, async: (raises: []).} BlockPresenceHandler* = - proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.} - AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.} - PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} + proc(peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async: (raises: []).} + AccountHandler* = proc(peer: PeerId, account: Account) {.gcsafe, async: (raises: []).} + PaymentHandler* = + proc(peer: PeerId, payment: SignedState) {.gcsafe, async: (raises: []).} BlockExcHandlers* = object onWantList*: WantListHandler @@ -57,15 +60,20 @@ type wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false, - ): Future[void] {.gcsafe.} - WantCancellationSender* = - proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.} - BlocksDeliverySender* = - proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} - PresenceSender* = - proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} - AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.} - PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} + ) {.async: (raises: [CancelledError]).} + WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]) {. + async: (raises: [CancelledError]) + .} + BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {. + async: (raises: [CancelledError]) + .} + PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]) {. + async: (raises: [CancelledError]) + .} + AccountSender* = + proc(peer: PeerId, account: Account) {.async: (raises: [CancelledError]).} + PaymentSender* = + proc(peer: PeerId, payment: SignedState) {.async: (raises: [CancelledError]).} BlockExcRequest* = object sendWantList*: WantListSender @@ -82,6 +90,8 @@ type request*: BlockExcRequest getConn: ConnProvider inflightSema: AsyncSemaphore + maxInflight: int = DefaultMaxInflight + trackedFutures*: TrackedFutures = TrackedFutures() proc peerId*(b: BlockExcNetwork): PeerId = ## Return peer id @@ -95,7 +105,9 @@ proc isSelf*(b: BlockExcNetwork, peer: PeerId): bool = return b.peerId == peer -proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} = +proc send*( + b: BlockExcNetwork, id: PeerId, msg: pb.Message +) {.async: (raises: [CancelledError]).} = ## Send message to peer ## @@ -103,8 +115,9 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} = trace "Unable to send, peer not found", peerId = id return - let peer = b.peers[id] try: + let peer = b.peers[id] + await b.inflightSema.acquire() await peer.send(msg) except CancelledError as error: @@ -114,7 +127,9 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} = finally: b.inflightSema.release() -proc handleWantList(b: BlockExcNetwork, peer: NetworkPeer, list: WantList) {.async.} = +proc handleWantList( + b: BlockExcNetwork, peer: NetworkPeer, list: WantList +) {.async: (raises: []).} = ## Handle incoming want list ## @@ -130,7 +145,7 @@ proc sendWantList*( wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false, -): Future[void] = +) {.async: (raw: true, raises: [CancelledError]).} = ## Send a want message to peer ## @@ -151,14 +166,14 @@ proc sendWantList*( proc sendWantCancellations*( b: BlockExcNetwork, id: PeerId, addresses: seq[BlockAddress] -): Future[void] {.async.} = +): Future[void] {.async: (raises: [CancelledError]).} = ## Informs a remote peer that we're no longer interested in a set of blocks ## await b.sendWantList(id = id, addresses = addresses, cancel = true) proc handleBlocksDelivery( b: BlockExcNetwork, peer: NetworkPeer, blocksDelivery: seq[BlockDelivery] -) {.async.} = +) {.async: (raises: []).} = ## Handle incoming blocks ## @@ -167,7 +182,7 @@ proc handleBlocksDelivery( proc sendBlocksDelivery*( b: BlockExcNetwork, id: PeerId, blocksDelivery: seq[BlockDelivery] -): Future[void] = +) {.async: (raw: true, raises: [CancelledError]).} = ## Send blocks to remote ## @@ -175,7 +190,7 @@ proc sendBlocksDelivery*( proc handleBlockPresence( b: BlockExcNetwork, peer: NetworkPeer, presence: seq[BlockPresence] -) {.async.} = +) {.async: (raises: []).} = ## Handle block presence ## @@ -184,7 +199,7 @@ proc handleBlockPresence( proc sendBlockPresence*( b: BlockExcNetwork, id: PeerId, presence: seq[BlockPresence] -): Future[void] = +) {.async: (raw: true, raises: [CancelledError]).} = ## Send presence to remote ## @@ -192,20 +207,24 @@ proc sendBlockPresence*( proc handleAccount( network: BlockExcNetwork, peer: NetworkPeer, account: Account -) {.async.} = +) {.async: (raises: []).} = ## Handle account info ## if not network.handlers.onAccount.isNil: await network.handlers.onAccount(peer.id, account) -proc sendAccount*(b: BlockExcNetwork, id: PeerId, account: Account): Future[void] = +proc sendAccount*( + b: BlockExcNetwork, id: PeerId, account: Account +) {.async: (raw: true, raises: [CancelledError]).} = ## Send account info to remote ## b.send(id, Message(account: AccountMessage.init(account))) -proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[void] = +proc sendPayment*( + b: BlockExcNetwork, id: PeerId, payment: SignedState +) {.async: (raw: true, raises: [CancelledError]).} = ## Send payment to remote ## @@ -213,30 +232,32 @@ proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[ proc handlePayment( network: BlockExcNetwork, peer: NetworkPeer, payment: SignedState -) {.async.} = +) {.async: (raises: []).} = ## Handle payment ## if not network.handlers.onPayment.isNil: await network.handlers.onPayment(peer.id, payment) -proc rpcHandler(b: BlockExcNetwork, peer: NetworkPeer, msg: Message) {.raises: [].} = +proc rpcHandler( + b: BlockExcNetwork, peer: NetworkPeer, msg: Message +) {.async: (raises: []).} = ## handle rpc messages ## if msg.wantList.entries.len > 0: - asyncSpawn b.handleWantList(peer, msg.wantList) + b.trackedFutures.track(b.handleWantList(peer, msg.wantList)) if msg.payload.len > 0: - asyncSpawn b.handleBlocksDelivery(peer, msg.payload) + b.trackedFutures.track(b.handleBlocksDelivery(peer, msg.payload)) if msg.blockPresences.len > 0: - asyncSpawn b.handleBlockPresence(peer, msg.blockPresences) + b.trackedFutures.track(b.handleBlockPresence(peer, msg.blockPresences)) if account =? Account.init(msg.account): - asyncSpawn b.handleAccount(peer, account) + b.trackedFutures.track(b.handleAccount(peer, account)) if payment =? SignedState.init(msg.payment): - asyncSpawn b.handlePayment(peer, payment) + b.trackedFutures.track(b.handlePayment(peer, payment)) proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer = ## Creates or retrieves a BlockExcNetwork Peer @@ -245,8 +266,11 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer = if peer in b.peers: return b.peers.getOrDefault(peer, nil) - var getConn: ConnProvider = proc(): Future[Connection] {.async, gcsafe, closure.} = + var getConn: ConnProvider = proc(): Future[Connection] {. + async: (raises: [CancelledError]) + .} = try: + trace "Getting new connection stream", peer return await b.switch.dial(peer, Codec) except CancelledError as error: raise error @@ -256,8 +280,8 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer = if not isNil(b.getConn): getConn = b.getConn - let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async.} = - b.rpcHandler(p, msg) + let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async: (raises: []).} = + await b.rpcHandler(p, msg) # create new pubsub peer let blockExcPeer = NetworkPeer.new(peer, getConn, rpcHandler) @@ -282,48 +306,65 @@ proc dialPeer*(b: BlockExcNetwork, peer: PeerRecord) {.async.} = trace "Skipping dialing self", peer = peer.peerId return + if peer.peerId in b.peers: + trace "Already connected to peer", peer = peer.peerId + return + await b.switch.connect(peer.peerId, peer.addresses.mapIt(it.address)) proc dropPeer*(b: BlockExcNetwork, peer: PeerId) = ## Cleanup disconnected peer ## + trace "Dropping peer", peer b.peers.del(peer) -method init*(b: BlockExcNetwork) = +method init*(self: BlockExcNetwork) = ## Perform protocol initialization ## - proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} = + proc peerEventHandler( + peerId: PeerId, event: PeerEvent + ): Future[void] {.gcsafe, async: (raises: [CancelledError]).} = if event.kind == PeerEventKind.Joined: - b.setupPeer(peerId) + self.setupPeer(peerId) else: - b.dropPeer(peerId) + self.dropPeer(peerId) - b.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined) - b.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left) + self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined) + self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left) - proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} = + proc handler( + conn: Connection, proto: string + ): Future[void] {.async: (raises: [CancelledError]).} = let peerId = conn.peerId - let blockexcPeer = b.getOrCreatePeer(peerId) + let blockexcPeer = self.getOrCreatePeer(peerId) await blockexcPeer.readLoop(conn) # attach read loop - b.handler = handle - b.codec = Codec + self.handler = handler + self.codec = Codec + +proc stop*(self: BlockExcNetwork) {.async: (raises: []).} = + await self.trackedFutures.cancelTracked() proc new*( T: type BlockExcNetwork, switch: Switch, connProvider: ConnProvider = nil, - maxInflight = MaxInflight, + maxInflight = DefaultMaxInflight, ): BlockExcNetwork = ## Create a new BlockExcNetwork instance ## let self = BlockExcNetwork( - switch: switch, getConn: connProvider, inflightSema: newAsyncSemaphore(maxInflight) + switch: switch, + getConn: connProvider, + inflightSema: newAsyncSemaphore(maxInflight), + maxInflight: maxInflight, ) + self.maxIncomingStreams = self.maxInflight + proc sendWantList( id: PeerId, cids: seq[BlockAddress], @@ -332,26 +373,32 @@ proc new*( wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false, - ): Future[void] {.gcsafe.} = + ): Future[void] {.async: (raw: true, raises: [CancelledError]).} = self.sendWantList(id, cids, priority, cancel, wantType, full, sendDontHave) proc sendWantCancellations( id: PeerId, addresses: seq[BlockAddress] - ): Future[void] {.gcsafe.} = + ): Future[void] {.async: (raw: true, raises: [CancelledError]).} = self.sendWantCancellations(id, addresses) proc sendBlocksDelivery( id: PeerId, blocksDelivery: seq[BlockDelivery] - ): Future[void] {.gcsafe.} = + ): Future[void] {.async: (raw: true, raises: [CancelledError]).} = self.sendBlocksDelivery(id, blocksDelivery) - proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} = + proc sendPresence( + id: PeerId, presence: seq[BlockPresence] + ): Future[void] {.async: (raw: true, raises: [CancelledError]).} = self.sendBlockPresence(id, presence) - proc sendAccount(id: PeerId, account: Account): Future[void] {.gcsafe.} = + proc sendAccount( + id: PeerId, account: Account + ): Future[void] {.async: (raw: true, raises: [CancelledError]).} = self.sendAccount(id, account) - proc sendPayment(id: PeerId, payment: SignedState): Future[void] {.gcsafe.} = + proc sendPayment( + id: PeerId, payment: SignedState + ): Future[void] {.async: (raw: true, raises: [CancelledError]).} = self.sendPayment(id, payment) self.request = BlockExcRequest( diff --git a/codex/blockexchange/network/networkpeer.nim b/codex/blockexchange/network/networkpeer.nim index 90c538ea..66c39294 100644 --- a/codex/blockexchange/network/networkpeer.nim +++ b/codex/blockexchange/network/networkpeer.nim @@ -7,9 +7,7 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. -import pkg/upraises -push: - {.upraises: [].} +{.push raises: [].} import pkg/chronos import pkg/libp2p @@ -18,67 +16,81 @@ import ../protobuf/blockexc import ../protobuf/message import ../../errors import ../../logutils +import ../../utils/trackedfutures logScope: topics = "codex blockexcnetworkpeer" -type - ConnProvider* = proc(): Future[Connection] {.gcsafe, closure.} +const DefaultYieldInterval = 50.millis - RPCHandler* = proc(peer: NetworkPeer, msg: Message): Future[void] {.gcsafe.} +type + ConnProvider* = + proc(): Future[Connection] {.gcsafe, async: (raises: [CancelledError]).} + + RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.gcsafe, async: (raises: []).} NetworkPeer* = ref object of RootObj id*: PeerId handler*: RPCHandler sendConn: Connection getConn: ConnProvider + yieldInterval*: Duration = DefaultYieldInterval + trackedFutures: TrackedFutures -proc connected*(b: NetworkPeer): bool = - not (isNil(b.sendConn)) and not (b.sendConn.closed or b.sendConn.atEof) +proc connected*(self: NetworkPeer): bool = + not (isNil(self.sendConn)) and not (self.sendConn.closed or self.sendConn.atEof) -proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} = +proc readLoop*(self: NetworkPeer, conn: Connection) {.async: (raises: []).} = if isNil(conn): + trace "No connection to read from", peer = self.id return + trace "Attaching read loop", peer = self.id, connId = conn.oid try: + var nextYield = Moment.now() + self.yieldInterval while not conn.atEof or not conn.closed: + if Moment.now() > nextYield: + nextYield = Moment.now() + self.yieldInterval + trace "Yielding in read loop", + peer = self.id, nextYield = nextYield, interval = self.yieldInterval + await sleepAsync(10.millis) + let data = await conn.readLp(MaxMessageSize.int) msg = Message.protobufDecode(data).mapFailure().tryGet() - await b.handler(b, msg) + trace "Received message", peer = self.id, connId = conn.oid + await self.handler(self, msg) except CancelledError: trace "Read loop cancelled" except CatchableError as err: warn "Exception in blockexc read loop", msg = err.msg finally: + trace "Detaching read loop", peer = self.id, connId = conn.oid await conn.close() -proc connect*(b: NetworkPeer): Future[Connection] {.async.} = - if b.connected: - return b.sendConn +proc connect*( + self: NetworkPeer +): Future[Connection] {.async: (raises: [CancelledError]).} = + if self.connected: + trace "Already connected", peer = self.id, connId = self.sendConn.oid + return self.sendConn - b.sendConn = await b.getConn() - asyncSpawn b.readLoop(b.sendConn) - return b.sendConn + self.sendConn = await self.getConn() + self.trackedFutures.track(self.readLoop(self.sendConn)) + return self.sendConn -proc send*(b: NetworkPeer, msg: Message) {.async.} = - let conn = await b.connect() +proc send*( + self: NetworkPeer, msg: Message +) {.async: (raises: [CancelledError, LPStreamError]).} = + let conn = await self.connect() if isNil(conn): - warn "Unable to get send connection for peer message not sent", peer = b.id + warn "Unable to get send connection for peer message not sent", peer = self.id return + trace "Sending message", peer = self.id, connId = conn.oid await conn.writeLp(protobufEncode(msg)) -proc broadcast*(b: NetworkPeer, msg: Message) = - proc sendAwaiter() {.async.} = - try: - await b.send(msg) - except CatchableError as exc: - warn "Exception broadcasting message to peer", peer = b.id, exc = exc.msg - - asyncSpawn sendAwaiter() - func new*( T: type NetworkPeer, peer: PeerId, @@ -87,4 +99,9 @@ func new*( ): NetworkPeer = doAssert(not isNil(connProvider), "should supply connection provider") - NetworkPeer(id: peer, getConn: connProvider, handler: rpcHandler) + NetworkPeer( + id: peer, + getConn: connProvider, + handler: rpcHandler, + trackedFutures: TrackedFutures(), + ) diff --git a/codex/blockexchange/peers/peerctxstore.nim b/codex/blockexchange/peers/peerctxstore.nim index 7cf167b4..ce2506a8 100644 --- a/codex/blockexchange/peers/peerctxstore.nim +++ b/codex/blockexchange/peers/peerctxstore.nim @@ -7,14 +7,12 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. +{.push raises: [].} + import std/sequtils import std/tables import std/algorithm - -import pkg/upraises - -push: - {.upraises: [].} +import std/sequtils import pkg/chronos import pkg/libp2p @@ -33,9 +31,7 @@ type PeerCtxStore* = ref object of RootObj peers*: OrderedTable[PeerId, BlockExcPeerCtx] - PeersForBlock* = object of RootObj - with*: seq[BlockExcPeerCtx] - without*: seq[BlockExcPeerCtx] + PeersForBlock* = tuple[with: seq[BlockExcPeerCtx], without: seq[BlockExcPeerCtx]] iterator items*(self: PeerCtxStore): BlockExcPeerCtx = for p in self.peers.values: @@ -47,6 +43,9 @@ proc contains*(a: openArray[BlockExcPeerCtx], b: PeerId): bool = a.anyIt(it.id == b) +func peerIds*(self: PeerCtxStore): seq[PeerId] = + toSeq(self.peers.keys) + func contains*(self: PeerCtxStore, peerId: PeerId): bool = peerId in self.peers @@ -75,7 +74,7 @@ func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] = toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it.address.cidOrTreeCid == cid)) proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock = - var res = PeersForBlock() + var res: PeersForBlock = (@[], @[]) for peer in self: if peer.peerHave.anyIt(it == address): res.with.add(peer) diff --git a/codex/blockexchange/protobuf/message.nim b/codex/blockexchange/protobuf/message.nim index 73cb60f1..4db89729 100644 --- a/codex/blockexchange/protobuf/message.nim +++ b/codex/blockexchange/protobuf/message.nim @@ -97,7 +97,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: WantList) = pb.write(field, ipb) proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) = - var ipb = initProtoBuffer(maxSize = MaxBlockSize) + var ipb = initProtoBuffer() ipb.write(1, value.blk.cid.data.buffer) ipb.write(2, value.blk.data) ipb.write(3, value.address) @@ -128,7 +128,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) = pb.write(field, ipb) proc protobufEncode*(value: Message): seq[byte] = - var ipb = initProtoBuffer(maxSize = MaxMessageSize) + var ipb = initProtoBuffer() ipb.write(1, value.wantList) for v in value.payload: ipb.write(3, v) @@ -254,16 +254,14 @@ proc decode*( proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] = var value = Message() - pb = initProtoBuffer(msg, maxSize = MaxMessageSize) + pb = initProtoBuffer(msg) ipb: ProtoBuffer sublist: seq[seq[byte]] if ?pb.getField(1, ipb): value.wantList = ?WantList.decode(ipb) if ?pb.getRepeatedField(3, sublist): for item in sublist: - value.payload.add( - ?BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize)) - ) + value.payload.add(?BlockDelivery.decode(initProtoBuffer(item))) if ?pb.getRepeatedField(4, sublist): for item in sublist: value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item))) diff --git a/codex/blockexchange/protobuf/payments.nim b/codex/blockexchange/protobuf/payments.nim index 5d010a81..885562c4 100644 --- a/codex/blockexchange/protobuf/payments.nim +++ b/codex/blockexchange/protobuf/payments.nim @@ -1,8 +1,9 @@ +{.push raises: [].} + import pkg/stew/byteutils import pkg/stint import pkg/nitro import pkg/questionable -import pkg/upraises import ./blockexc export AccountMessage @@ -11,9 +12,6 @@ export StateChannelUpdate export stint export nitro -push: - {.upraises: [].} - type Account* = object address*: EthAddress diff --git a/codex/blockexchange/protobuf/presence.nim b/codex/blockexchange/protobuf/presence.nim index d941746d..3b24a570 100644 --- a/codex/blockexchange/protobuf/presence.nim +++ b/codex/blockexchange/protobuf/presence.nim @@ -1,8 +1,9 @@ +{.push raises: [].} + import libp2p import pkg/stint import pkg/questionable import pkg/questionable/results -import pkg/upraises import ./blockexc import ../../blocktype @@ -11,9 +12,6 @@ export questionable export stint export BlockPresenceType -upraises.push: - {.upraises: [].} - type PresenceMessage* = blockexc.BlockPresence Presence* = object diff --git a/codex/chunker.nim b/codex/chunker.nim index f735aa4b..908dd0c0 100644 --- a/codex/chunker.nim +++ b/codex/chunker.nim @@ -28,8 +28,11 @@ const DefaultChunkSize* = DefaultBlockSize type # default reader type + ChunkerError* = object of CatchableError ChunkBuffer* = ptr UncheckedArray[byte] - Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.gcsafe, raises: [Defect].} + Reader* = proc(data: ChunkBuffer, len: int): Future[int] {. + gcsafe, async: (raises: [ChunkerError, CancelledError]) + .} # Reader that splits input data into fixed-size chunks Chunker* = ref object @@ -74,7 +77,7 @@ proc new*( proc reader( data: ChunkBuffer, len: int - ): Future[int] {.gcsafe, async, raises: [Defect].} = + ): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} = var res = 0 try: while res < len: @@ -85,7 +88,7 @@ proc new*( raise error except LPStreamError as error: error "LPStream error", err = error.msg - raise error + raise newException(ChunkerError, "LPStream error", error) except CatchableError as exc: error "CatchableError exception", exc = exc.msg raise newException(Defect, exc.msg) @@ -102,7 +105,7 @@ proc new*( proc reader( data: ChunkBuffer, len: int - ): Future[int] {.gcsafe, async, raises: [Defect].} = + ): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} = var total = 0 try: while total < len: diff --git a/codex/clock.nim b/codex/clock.nim index 98db22f7..c02e04aa 100644 --- a/codex/clock.nim +++ b/codex/clock.nim @@ -40,5 +40,8 @@ proc toSecondsSince1970*(bytes: seq[byte]): SecondsSince1970 = let asUint = uint64.fromBytes(bytes) cast[int64](asUint) +proc toSecondsSince1970*(num: uint64): SecondsSince1970 = + cast[int64](num) + proc toSecondsSince1970*(bigint: UInt256): SecondsSince1970 = bigint.truncate(int64) diff --git a/codex/codex.nim b/codex/codex.nim index 13985254..391a94fc 100644 --- a/codex/codex.nim +++ b/codex/codex.nim @@ -11,8 +11,10 @@ import std/sequtils import std/strutils import std/os import std/tables +import std/cpuinfo import pkg/chronos +import pkg/taskpools import pkg/presto import pkg/libp2p import pkg/confutils @@ -107,7 +109,9 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} = quit QuitFailure let marketplace = Marketplace.new(marketplaceAddress, signer) - let market = OnChainMarket.new(marketplace, config.rewardRecipient) + let market = OnChainMarket.new( + marketplace, config.rewardRecipient, config.marketplaceRequestCacheSize + ) let clock = OnChainClock.new(provider) var client: ?ClientInteractions @@ -130,6 +134,10 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} = if config.simulateProofFailures > 0: warn "Proof failure simulation is not enabled for this build! Configuration ignored" + if error =? (await market.loadConfig()).errorOption: + fatal "Cannot load market configuration", error = error.msg + quit QuitFailure + let purchasing = Purchasing.new(market, clock) let sales = Sales.new(market, clock, repo, proofFailures) client = some ClientInteractions.new(clock, purchasing) @@ -169,14 +177,20 @@ proc start*(s: CodexServer) {.async.} = proc stop*(s: CodexServer) {.async.} = notice "Stopping codex node" - await allFuturesThrowing( - s.restServer.stop(), - s.codexNode.switch.stop(), - s.codexNode.stop(), - s.repoStore.stop(), - s.maintenance.stop(), + let res = await noCancel allFinishedFailed( + @[ + s.restServer.stop(), + s.codexNode.switch.stop(), + s.codexNode.stop(), + s.repoStore.stop(), + s.maintenance.stop(), + ] ) + if res.failure.len > 0: + error "Failed to stop codex node", failures = res.failure.len + raiseAssert "Failed to stop codex node" + proc new*( T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey ): CodexServer = @@ -194,7 +208,18 @@ proc new*( .withTcpTransport({ServerFlags.ReuseAddr}) .build() - var cache: CacheStore = nil + var + cache: CacheStore = nil + taskpool: Taskpool + + try: + if config.numThreads == ThreadCount(0): + taskpool = Taskpool.new(numThreads = min(countProcessors(), 16)) + else: + taskpool = Taskpool.new(numThreads = int(config.numThreads)) + info "Threadpool started", numThreads = taskpool.numThreads + except CatchableError as exc: + raiseAssert("Failure in taskpool initialization:" & exc.msg) if config.cacheSize > 0'nb: cache = CacheStore.new(cacheSize = config.cacheSize) @@ -286,6 +311,7 @@ proc new*( engine = engine, discovery = discovery, prover = prover, + taskPool = taskpool, ) restServer = RestServerRef @@ -295,7 +321,7 @@ proc new*( bufferSize = (1024 * 64), maxRequestBodySize = int.high, ) - .expect("Should start rest server!") + .expect("Should create rest server!") switch.mount(network) diff --git a/codex/conf.nim b/codex/conf.nim index 6d47f8f4..986a53d6 100644 --- a/codex/conf.nim +++ b/codex/conf.nim @@ -44,14 +44,19 @@ import ./utils import ./nat import ./utils/natutils +from ./contracts/config import DefaultRequestCacheSize from ./validationconfig import MaxSlots, ValidationGroups export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig export ValidationGroups, MaxSlots export - DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockMaintenanceInterval, - DefaultNumberOfBlocksToMaintainPerInterval + DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockInterval, DefaultNumBlocksPerInterval, + DefaultRequestCacheSize + +type ThreadCount* = distinct Natural + +proc `==`*(a, b: ThreadCount): bool {.borrow.} proc defaultDataDir*(): string = let dataDir = @@ -71,6 +76,7 @@ const DefaultDataDir* = defaultDataDir() DefaultCircuitDir* = defaultDataDir() / "circuits" + DefaultThreadCount* = ThreadCount(0) type StartUpCmd* {.pure.} = enum @@ -184,6 +190,13 @@ type name: "max-peers" .}: int + numThreads* {. + desc: + "Number of worker threads (\"0\" = use as many threads as there are CPU cores available)", + defaultValue: DefaultThreadCount, + name: "num-threads" + .}: ThreadCount + agentString* {. defaultValue: "Codex", desc: "Node agent string which is used as identifier in network", @@ -238,15 +251,15 @@ type desc: "Time interval in seconds - determines frequency of block " & "maintenance cycle: how often blocks are checked " & "for expiration and cleanup", - defaultValue: DefaultBlockMaintenanceInterval, - defaultValueDesc: $DefaultBlockMaintenanceInterval, + defaultValue: DefaultBlockInterval, + defaultValueDesc: $DefaultBlockInterval, name: "block-mi" .}: Duration blockMaintenanceNumberOfBlocks* {. desc: "Number of blocks to check every maintenance cycle", - defaultValue: DefaultNumberOfBlocksToMaintainPerInterval, - defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval, + defaultValue: DefaultNumBlocksPerInterval, + defaultValueDesc: $DefaultNumBlocksPerInterval, name: "block-mn" .}: int @@ -347,6 +360,16 @@ type name: "reward-recipient" .}: Option[EthAddress] + marketplaceRequestCacheSize* {. + desc: + "Maximum number of StorageRequests kept in memory." & + "Reduces fetching of StorageRequest data from the contract.", + defaultValue: DefaultRequestCacheSize, + defaultValueDesc: $DefaultRequestCacheSize, + name: "request-cache-size", + hidden + .}: uint16 + case persistenceCmd* {.defaultValue: noCmd, command.}: PersistenceCmd of PersistenceCmd.prover: circuitDir* {. @@ -482,6 +505,13 @@ proc parseCmdArg*( quit QuitFailure ma +proc parseCmdArg*(T: type ThreadCount, input: string): T {.upraises: [ValueError].} = + let count = parseInt(input) + if count != 0 and count < 2: + warn "Invalid number of threads", input = input + quit QuitFailure + ThreadCount(count) + proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T = var res: SignedPeerRecord try: @@ -579,6 +609,15 @@ proc readValue*( quit QuitFailure val = NBytes(value) +proc readValue*( + r: var TomlReader, val: var ThreadCount +) {.upraises: [SerializationError, IOError].} = + var str = r.readValue(string) + try: + val = parseCmdArg(ThreadCount, str) + except CatchableError as err: + raise newException(SerializationError, err.msg) + proc readValue*( r: var TomlReader, val: var Duration ) {.upraises: [SerializationError, IOError].} = @@ -609,6 +648,9 @@ proc completeCmdArg*(T: type NBytes, val: string): seq[string] = proc completeCmdArg*(T: type Duration, val: string): seq[string] = discard +proc completeCmdArg*(T: type ThreadCount, val: string): seq[string] = + discard + # silly chronicles, colors is a compile-time property proc stripAnsi*(v: string): string = var diff --git a/codex/contracts/clock.nim b/codex/contracts/clock.nim index b5bf7ebb..b7863539 100644 --- a/codex/contracts/clock.nim +++ b/codex/contracts/clock.nim @@ -5,6 +5,7 @@ import pkg/chronos import pkg/stint import ../clock import ../conf +import ../utils/trackedfutures export clock @@ -18,9 +19,12 @@ type OnChainClock* = ref object of Clock blockNumber: UInt256 started: bool newBlock: AsyncEvent + trackedFutures: TrackedFutures proc new*(_: type OnChainClock, provider: Provider): OnChainClock = - OnChainClock(provider: provider, newBlock: newAsyncEvent()) + OnChainClock( + provider: provider, newBlock: newAsyncEvent(), trackedFutures: TrackedFutures() + ) proc update(clock: OnChainClock, blck: Block) = if number =? blck.number and number > clock.blockNumber: @@ -32,15 +36,12 @@ proc update(clock: OnChainClock, blck: Block) = blockTime = blck.timestamp, blockNumber = number, offset = clock.offset clock.newBlock.fire() -proc update(clock: OnChainClock) {.async.} = +proc update(clock: OnChainClock) {.async: (raises: []).} = try: if latest =? (await clock.provider.getBlock(BlockTag.latest)): clock.update(latest) - except CancelledError as error: - raise error except CatchableError as error: debug "error updating clock: ", error = error.msg - discard method start*(clock: OnChainClock) {.async.} = if clock.started: @@ -52,7 +53,7 @@ method start*(clock: OnChainClock) {.async.} = return # ignore block parameter; hardhat may call this with pending blocks - asyncSpawn clock.update() + clock.trackedFutures.track(clock.update()) await clock.update() @@ -64,6 +65,7 @@ method stop*(clock: OnChainClock) {.async.} = return await clock.subscription.unsubscribe() + await clock.trackedFutures.cancelTracked() clock.started = false method now*(clock: OnChainClock): SecondsSince1970 = diff --git a/codex/contracts/config.nim b/codex/contracts/config.nim index 87cd1f2a..3c31c8b5 100644 --- a/codex/contracts/config.nim +++ b/codex/contracts/config.nim @@ -4,47 +4,66 @@ import pkg/questionable/results export contractabi +const DefaultRequestCacheSize* = 128.uint16 + type MarketplaceConfig* = object collateral*: CollateralConfig proofs*: ProofConfig + reservations*: SlotReservationsConfig + requestDurationLimit*: uint64 CollateralConfig* = object repairRewardPercentage*: uint8 # percentage of remaining collateral slot has after it has been freed maxNumberOfSlashes*: uint8 # frees slot when the number of slashes reaches this value - slashCriterion*: uint16 # amount of proofs missed that lead to slashing slashPercentage*: uint8 # percentage of the collateral that is slashed + validatorRewardPercentage*: uint8 + # percentage of the slashed amount going to the validators ProofConfig* = object - period*: UInt256 # proofs requirements are calculated per period (in seconds) - timeout*: UInt256 # mark proofs as missing before the timeout (in seconds) + period*: uint64 # proofs requirements are calculated per period (in seconds) + timeout*: uint64 # mark proofs as missing before the timeout (in seconds) downtime*: uint8 # ignore this much recent blocks for proof requirements + downtimeProduct*: uint8 zkeyHash*: string # hash of the zkey file which is linked to the verifier # Ensures the pointer does not remain in downtime for many consecutive # periods. For each period increase, move the pointer `pointerProduct` # blocks. Should be a prime number to ensure there are no cycles. - downtimeProduct*: uint8 + + SlotReservationsConfig* = object + maxReservations*: uint8 func fromTuple(_: type ProofConfig, tupl: tuple): ProofConfig = ProofConfig( period: tupl[0], timeout: tupl[1], downtime: tupl[2], - zkeyHash: tupl[3], - downtimeProduct: tupl[4], + downtimeProduct: tupl[3], + zkeyHash: tupl[4], ) +func fromTuple(_: type SlotReservationsConfig, tupl: tuple): SlotReservationsConfig = + SlotReservationsConfig(maxReservations: tupl[0]) + func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig = CollateralConfig( repairRewardPercentage: tupl[0], maxNumberOfSlashes: tupl[1], - slashCriterion: tupl[2], - slashPercentage: tupl[3], + slashPercentage: tupl[2], + validatorRewardPercentage: tupl[3], ) func fromTuple(_: type MarketplaceConfig, tupl: tuple): MarketplaceConfig = - MarketplaceConfig(collateral: tupl[0], proofs: tupl[1]) + MarketplaceConfig( + collateral: tupl[0], + proofs: tupl[1], + reservations: tupl[2], + requestDurationLimit: tupl[3], + ) + +func solidityType*(_: type SlotReservationsConfig): string = + solidityType(SlotReservationsConfig.fieldTypes) func solidityType*(_: type ProofConfig): string = solidityType(ProofConfig.fieldTypes) @@ -53,7 +72,10 @@ func solidityType*(_: type CollateralConfig): string = solidityType(CollateralConfig.fieldTypes) func solidityType*(_: type MarketplaceConfig): string = - solidityType(CollateralConfig.fieldTypes) + solidityType(MarketplaceConfig.fieldTypes) + +func encode*(encoder: var AbiEncoder, slot: SlotReservationsConfig) = + encoder.write(slot.fieldValues) func encode*(encoder: var AbiEncoder, slot: ProofConfig) = encoder.write(slot.fieldValues) @@ -68,6 +90,10 @@ func decode*(decoder: var AbiDecoder, T: type ProofConfig): ?!T = let tupl = ?decoder.read(ProofConfig.fieldTypes) success ProofConfig.fromTuple(tupl) +func decode*(decoder: var AbiDecoder, T: type SlotReservationsConfig): ?!T = + let tupl = ?decoder.read(SlotReservationsConfig.fieldTypes) + success SlotReservationsConfig.fromTuple(tupl) + func decode*(decoder: var AbiDecoder, T: type CollateralConfig): ?!T = let tupl = ?decoder.read(CollateralConfig.fieldTypes) success CollateralConfig.fromTuple(tupl) diff --git a/codex/contracts/deployment.nim b/codex/contracts/deployment.nim index c4e59b80..cc125d18 100644 --- a/codex/contracts/deployment.nim +++ b/codex/contracts/deployment.nim @@ -18,9 +18,9 @@ const knownAddresses = { # Taiko Alpha-3 Testnet "167005": {"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")}.toTable, - # Codex Testnet - Nov 25 2024 18:41:29 PM (+00:00 UTC) + # Codex Testnet - Feb 25 2025 07:24:19 AM (+00:00 UTC) "789987": - {"Marketplace": Address.init("0xAB03b6a58C5262f530D54146DA2a552B1C0F7648")}.toTable, + {"Marketplace": Address.init("0xfFaF679D5Cbfdd5Dbc9Be61C616ed115DFb597ed")}.toTable, }.toTable proc getKnownAddress(T: type, chainId: UInt256): ?Address = diff --git a/codex/contracts/market.nim b/codex/contracts/market.nim index 35557050..8b235876 100644 --- a/codex/contracts/market.nim +++ b/codex/contracts/market.nim @@ -1,7 +1,9 @@ +import std/strformat import std/strutils import pkg/ethers import pkg/upraises import pkg/questionable +import pkg/lrucache import ../utils/exceptions import ../logutils import ../market @@ -20,6 +22,7 @@ type signer: Signer rewardRecipient: ?Address configuration: ?MarketplaceConfig + requestCache: LruCache[string, StorageRequest] MarketSubscription = market.Subscription EventSubscription = ethers.Subscription @@ -27,128 +30,199 @@ type eventSubscription: EventSubscription func new*( - _: type OnChainMarket, contract: Marketplace, rewardRecipient = Address.none + _: type OnChainMarket, + contract: Marketplace, + rewardRecipient = Address.none, + requestCacheSize: uint16 = DefaultRequestCacheSize, ): OnChainMarket = without signer =? contract.signer: raiseAssert("Marketplace contract should have a signer") - OnChainMarket(contract: contract, signer: signer, rewardRecipient: rewardRecipient) + var requestCache = newLruCache[string, StorageRequest](int(requestCacheSize)) + + OnChainMarket( + contract: contract, + signer: signer, + rewardRecipient: rewardRecipient, + requestCache: requestCache, + ) proc raiseMarketError(message: string) {.raises: [MarketError].} = raise newException(MarketError, message) -template convertEthersError(body) = +func prefixWith(suffix, prefix: string, separator = ": "): string = + if prefix.len > 0: + return &"{prefix}{separator}{suffix}" + else: + return suffix + +template convertEthersError(msg: string = "", body) = try: body except EthersError as error: - raiseMarketError(error.msgDetail) + raiseMarketError(error.msgDetail.prefixWith(msg)) -proc config(market: OnChainMarket): Future[MarketplaceConfig] {.async.} = +proc config( + market: OnChainMarket +): Future[MarketplaceConfig] {.async: (raises: [CancelledError, MarketError]).} = without resolvedConfig =? market.configuration: - let fetchedConfig = await market.contract.configuration() - market.configuration = some fetchedConfig - return fetchedConfig + if err =? (await market.loadConfig()).errorOption: + raiseMarketError(err.msg) + + without config =? market.configuration: + raiseMarketError("Failed to access to config from the Marketplace contract") + + return config return resolvedConfig -proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} = +proc approveFunds( + market: OnChainMarket, amount: UInt256 +) {.async: (raises: [CancelledError, MarketError]).} = debug "Approving tokens", amount - convertEthersError: + convertEthersError("Failed to approve funds"): let tokenAddress = await market.contract.token() let token = Erc20Token.new(tokenAddress, market.signer) discard await token.increaseAllowance(market.contract.address(), amount).confirm(1) -method getZkeyHash*(market: OnChainMarket): Future[?string] {.async.} = +method loadConfig*( + market: OnChainMarket +): Future[?!void] {.async: (raises: [CancelledError]).} = + try: + without config =? market.configuration: + let fetchedConfig = await market.contract.configuration() + + market.configuration = some fetchedConfig + + return success() + except EthersError as err: + return failure newException( + MarketError, + "Failed to fetch the config from the Marketplace contract: " & err.msg, + ) + +method getZkeyHash*( + market: OnChainMarket +): Future[?string] {.async: (raises: [CancelledError, MarketError]).} = let config = await market.config() return some config.proofs.zkeyHash -method getSigner*(market: OnChainMarket): Future[Address] {.async.} = - convertEthersError: +method getSigner*( + market: OnChainMarket +): Future[Address] {.async: (raises: [CancelledError, MarketError]).} = + convertEthersError("Failed to get signer address"): return await market.signer.getAddress() -method periodicity*(market: OnChainMarket): Future[Periodicity] {.async.} = - convertEthersError: +method periodicity*( + market: OnChainMarket +): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} = + convertEthersError("Failed to get Marketplace config"): let config = await market.config() let period = config.proofs.period return Periodicity(seconds: period) -method proofTimeout*(market: OnChainMarket): Future[UInt256] {.async.} = - convertEthersError: +method proofTimeout*( + market: OnChainMarket +): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} = + convertEthersError("Failed to get Marketplace config"): let config = await market.config() return config.proofs.timeout -method repairRewardPercentage*(market: OnChainMarket): Future[uint8] {.async.} = - convertEthersError: - let config = await market.contract.configuration() +method repairRewardPercentage*( + market: OnChainMarket +): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} = + convertEthersError("Failed to get Marketplace config"): + let config = await market.config() return config.collateral.repairRewardPercentage -method proofDowntime*(market: OnChainMarket): Future[uint8] {.async.} = - convertEthersError: +method requestDurationLimit*(market: OnChainMarket): Future[uint64] {.async.} = + convertEthersError("Failed to get Marketplace config"): + let config = await market.config() + return config.requestDurationLimit + +method proofDowntime*( + market: OnChainMarket +): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} = + convertEthersError("Failed to get Marketplace config"): let config = await market.config() return config.proofs.downtime method getPointer*(market: OnChainMarket, slotId: SlotId): Future[uint8] {.async.} = - convertEthersError: + convertEthersError("Failed to get slot pointer"): let overrides = CallOverrides(blockTag: some BlockTag.pending) return await market.contract.getPointer(slotId, overrides) method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} = - convertEthersError: + convertEthersError("Failed to get my requests"): return await market.contract.myRequests method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} = - convertEthersError: + convertEthersError("Failed to get my slots"): let slots = await market.contract.mySlots() debug "Fetched my slots", numSlots = len(slots) return slots -method requestStorage(market: OnChainMarket, request: StorageRequest) {.async.} = - convertEthersError: +method requestStorage( + market: OnChainMarket, request: StorageRequest +) {.async: (raises: [CancelledError, MarketError]).} = + convertEthersError("Failed to request storage"): debug "Requesting storage" await market.approveFunds(request.totalPrice()) discard await market.contract.requestStorage(request).confirm(1) method getRequest*( market: OnChainMarket, id: RequestId -): Future[?StorageRequest] {.async.} = - convertEthersError: - try: - return some await market.contract.getRequest(id) - except Marketplace_UnknownRequest: - return none StorageRequest +): Future[?StorageRequest] {.async: (raises: [CancelledError]).} = + try: + let key = $id + + if key in market.requestCache: + return some market.requestCache[key] + + let request = await market.contract.getRequest(id) + market.requestCache[key] = request + return some request + except Marketplace_UnknownRequest, KeyError: + warn "Cannot retrieve the request", error = getCurrentExceptionMsg() + return none StorageRequest + except EthersError as e: + error "Cannot retrieve the request", error = e.msg + return none StorageRequest method requestState*( market: OnChainMarket, requestId: RequestId ): Future[?RequestState] {.async.} = - convertEthersError: + convertEthersError("Failed to get request state"): try: let overrides = CallOverrides(blockTag: some BlockTag.pending) return some await market.contract.requestState(requestId, overrides) except Marketplace_UnknownRequest: return none RequestState -method slotState*(market: OnChainMarket, slotId: SlotId): Future[SlotState] {.async.} = - convertEthersError: +method slotState*( + market: OnChainMarket, slotId: SlotId +): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} = + convertEthersError("Failed to fetch the slot state from the Marketplace contract"): let overrides = CallOverrides(blockTag: some BlockTag.pending) return await market.contract.slotState(slotId, overrides) method getRequestEnd*( market: OnChainMarket, id: RequestId ): Future[SecondsSince1970] {.async.} = - convertEthersError: + convertEthersError("Failed to get request end"): return await market.contract.requestEnd(id) method requestExpiresAt*( market: OnChainMarket, id: RequestId ): Future[SecondsSince1970] {.async.} = - convertEthersError: + convertEthersError("Failed to get request expiry"): return await market.contract.requestExpiry(id) method getHost( - market: OnChainMarket, requestId: RequestId, slotIndex: UInt256 -): Future[?Address] {.async.} = - convertEthersError: + market: OnChainMarket, requestId: RequestId, slotIndex: uint64 +): Future[?Address] {.async: (raises: [CancelledError, MarketError]).} = + convertEthersError("Failed to get slot's host"): let slotId = slotId(requestId, slotIndex) let address = await market.contract.getHost(slotId) if address != Address.default: @@ -158,12 +232,12 @@ method getHost( method currentCollateral*( market: OnChainMarket, slotId: SlotId -): Future[UInt256] {.async.} = - convertEthersError: +): Future[UInt256] {.async: (raises: [MarketError, CancelledError]).} = + convertEthersError("Failed to get slot's current collateral"): return await market.contract.currentCollateral(slotId) method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} = - convertEthersError: + convertEthersError("Failed to get active slot"): try: return some await market.contract.getActiveSlot(slotId) except Marketplace_SlotIsFree: @@ -172,45 +246,60 @@ method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.as method fillSlot( market: OnChainMarket, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, proof: Groth16Proof, collateral: UInt256, -) {.async.} = - convertEthersError: +) {.async: (raises: [CancelledError, MarketError]).} = + convertEthersError("Failed to fill slot"): logScope: requestId slotIndex - await market.approveFunds(collateral) - trace "calling fillSlot on contract" - discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(1) - trace "fillSlot transaction completed" + try: + await market.approveFunds(collateral) + trace "calling fillSlot on contract" + discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(1) + trace "fillSlot transaction completed" + except Marketplace_SlotNotFree as parent: + raise newException( + SlotStateMismatchError, "Failed to fill slot because the slot is not free", + parent, + ) -method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} = - convertEthersError: - var freeSlot: Future[Confirmable] - if rewardRecipient =? market.rewardRecipient: - # If --reward-recipient specified, use it as the reward recipient, and use - # the SP's address as the collateral recipient - let collateralRecipient = await market.getSigner() - freeSlot = market.contract.freeSlot( - slotId, - rewardRecipient, # --reward-recipient - collateralRecipient, - ) # SP's address - else: - # Otherwise, use the SP's address as both the reward and collateral - # recipient (the contract will use msg.sender for both) - freeSlot = market.contract.freeSlot(slotId) +method freeSlot*( + market: OnChainMarket, slotId: SlotId +) {.async: (raises: [CancelledError, MarketError]).} = + convertEthersError("Failed to free slot"): + try: + var freeSlot: Future[Confirmable] + if rewardRecipient =? market.rewardRecipient: + # If --reward-recipient specified, use it as the reward recipient, and use + # the SP's address as the collateral recipient + let collateralRecipient = await market.getSigner() + freeSlot = market.contract.freeSlot( + slotId, + rewardRecipient, # --reward-recipient + collateralRecipient, + ) # SP's address + else: + # Otherwise, use the SP's address as both the reward and collateral + # recipient (the contract will use msg.sender for both) + freeSlot = market.contract.freeSlot(slotId) - discard await freeSlot.confirm(1) + discard await freeSlot.confirm(1) + except Marketplace_SlotIsFree as parent: + raise newException( + SlotStateMismatchError, "Failed to free slot, slot is already free", parent + ) -method withdrawFunds(market: OnChainMarket, requestId: RequestId) {.async.} = - convertEthersError: +method withdrawFunds( + market: OnChainMarket, requestId: RequestId +) {.async: (raises: [CancelledError, MarketError]).} = + convertEthersError("Failed to withdraw funds"): discard await market.contract.withdrawFunds(requestId).confirm(1) method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} = - convertEthersError: + convertEthersError("Failed to get proof requirement"): try: let overrides = CallOverrides(blockTag: some BlockTag.pending) return await market.contract.isProofRequired(id, overrides) @@ -218,7 +307,7 @@ method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async return false method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} = - convertEthersError: + convertEthersError("Failed to get future proof requirement"): try: let overrides = CallOverrides(blockTag: some BlockTag.pending) return await market.contract.willProofBeRequired(id, overrides) @@ -228,18 +317,20 @@ method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.a method getChallenge*( market: OnChainMarket, id: SlotId ): Future[ProofChallenge] {.async.} = - convertEthersError: + convertEthersError("Failed to get proof challenge"): let overrides = CallOverrides(blockTag: some BlockTag.pending) return await market.contract.getChallenge(id, overrides) -method submitProof*(market: OnChainMarket, id: SlotId, proof: Groth16Proof) {.async.} = - convertEthersError: +method submitProof*( + market: OnChainMarket, id: SlotId, proof: Groth16Proof +) {.async: (raises: [CancelledError, MarketError]).} = + convertEthersError("Failed to submit proof"): discard await market.contract.submitProof(id, proof).confirm(1) method markProofAsMissing*( market: OnChainMarket, id: SlotId, period: Period -) {.async.} = - convertEthersError: +) {.async: (raises: [CancelledError, MarketError]).} = + convertEthersError("Failed to mark proof as missing"): discard await market.contract.markProofAsMissing(id, period).confirm(1) method canProofBeMarkedAsMissing*( @@ -256,22 +347,28 @@ method canProofBeMarkedAsMissing*( return false method reserveSlot*( - market: OnChainMarket, requestId: RequestId, slotIndex: UInt256 -) {.async.} = - convertEthersError: - discard await market.contract - .reserveSlot( - requestId, - slotIndex, - # reserveSlot runs out of gas for unknown reason, but 100k gas covers it - TransactionOverrides(gasLimit: some 100000.u256), - ) - .confirm(1) + market: OnChainMarket, requestId: RequestId, slotIndex: uint64 +) {.async: (raises: [CancelledError, MarketError]).} = + convertEthersError("Failed to reserve slot"): + try: + discard await market.contract + .reserveSlot( + requestId, + slotIndex, + # reserveSlot runs out of gas for unknown reason, but 100k gas covers it + TransactionOverrides(gasLimit: some 100000.u256), + ) + .confirm(1) + except SlotReservations_ReservationNotAllowed: + raise newException( + SlotReservationNotAllowedError, + "Failed to reserve slot because reservation is not allowed", + ) method canReserveSlot*( - market: OnChainMarket, requestId: RequestId, slotIndex: UInt256 + market: OnChainMarket, requestId: RequestId, slotIndex: uint64 ): Future[bool] {.async.} = - convertEthersError: + convertEthersError("Unable to determine if slot can be reserved"): return await market.contract.canReserveSlot(requestId, slotIndex) method subscribeRequests*( @@ -284,7 +381,7 @@ method subscribeRequests*( callback(event.requestId, event.ask, event.expiry) - convertEthersError: + convertEthersError("Failed to subscribe to StorageRequested events"): let subscription = await market.contract.subscribe(StorageRequested, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -298,21 +395,21 @@ method subscribeSlotFilled*( callback(event.requestId, event.slotIndex) - convertEthersError: + convertEthersError("Failed to subscribe to SlotFilled events"): let subscription = await market.contract.subscribe(SlotFilled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) method subscribeSlotFilled*( market: OnChainMarket, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, callback: OnSlotFilled, ): Future[MarketSubscription] {.async.} = - proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: UInt256) = + proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: uint64) = if eventRequestId == requestId and eventSlotIndex == slotIndex: callback(requestId, slotIndex) - convertEthersError: + convertEthersError("Failed to subscribe to SlotFilled events"): return await market.subscribeSlotFilled(onSlotFilled) method subscribeSlotFreed*( @@ -325,7 +422,7 @@ method subscribeSlotFreed*( callback(event.requestId, event.slotIndex) - convertEthersError: + convertEthersError("Failed to subscribe to SlotFreed events"): let subscription = await market.contract.subscribe(SlotFreed, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -340,7 +437,7 @@ method subscribeSlotReservationsFull*( callback(event.requestId, event.slotIndex) - convertEthersError: + convertEthersError("Failed to subscribe to SlotReservationsFull events"): let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -354,7 +451,7 @@ method subscribeFulfillment( callback(event.requestId) - convertEthersError: + convertEthersError("Failed to subscribe to RequestFulfilled events"): let subscription = await market.contract.subscribe(RequestFulfilled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -369,7 +466,7 @@ method subscribeFulfillment( if event.requestId == requestId: callback(event.requestId) - convertEthersError: + convertEthersError("Failed to subscribe to RequestFulfilled events"): let subscription = await market.contract.subscribe(RequestFulfilled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -383,7 +480,7 @@ method subscribeRequestCancelled*( callback(event.requestId) - convertEthersError: + convertEthersError("Failed to subscribe to RequestCancelled events"): let subscription = await market.contract.subscribe(RequestCancelled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -398,7 +495,7 @@ method subscribeRequestCancelled*( if event.requestId == requestId: callback(event.requestId) - convertEthersError: + convertEthersError("Failed to subscribe to RequestCancelled events"): let subscription = await market.contract.subscribe(RequestCancelled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -412,7 +509,7 @@ method subscribeRequestFailed*( callback(event.requestId) - convertEthersError: + convertEthersError("Failed to subscribe to RequestFailed events"): let subscription = await market.contract.subscribe(RequestFailed, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -427,7 +524,7 @@ method subscribeRequestFailed*( if event.requestId == requestId: callback(event.requestId) - convertEthersError: + convertEthersError("Failed to subscribe to RequestFailed events"): let subscription = await market.contract.subscribe(RequestFailed, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -441,7 +538,7 @@ method subscribeProofSubmission*( callback(event.id) - convertEthersError: + convertEthersError("Failed to subscribe to ProofSubmitted events"): let subscription = await market.contract.subscribe(ProofSubmitted, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -451,13 +548,13 @@ method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} = method queryPastSlotFilledEvents*( market: OnChainMarket, fromBlock: BlockTag ): Future[seq[SlotFilled]] {.async.} = - convertEthersError: + convertEthersError("Failed to get past SlotFilled events from block"): return await market.contract.queryFilter(SlotFilled, fromBlock, BlockTag.latest) method queryPastSlotFilledEvents*( market: OnChainMarket, blocksAgo: int ): Future[seq[SlotFilled]] {.async.} = - convertEthersError: + convertEthersError("Failed to get past SlotFilled events"): let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo) return await market.queryPastSlotFilledEvents(fromBlock) @@ -465,21 +562,58 @@ method queryPastSlotFilledEvents*( method queryPastSlotFilledEvents*( market: OnChainMarket, fromTime: SecondsSince1970 ): Future[seq[SlotFilled]] {.async.} = - convertEthersError: + convertEthersError("Failed to get past SlotFilled events from time"): let fromBlock = await market.contract.provider.blockNumberForEpoch(fromTime) return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock)) method queryPastStorageRequestedEvents*( market: OnChainMarket, fromBlock: BlockTag ): Future[seq[StorageRequested]] {.async.} = - convertEthersError: + convertEthersError("Failed to get past StorageRequested events from block"): return await market.contract.queryFilter(StorageRequested, fromBlock, BlockTag.latest) method queryPastStorageRequestedEvents*( market: OnChainMarket, blocksAgo: int ): Future[seq[StorageRequested]] {.async.} = - convertEthersError: + convertEthersError("Failed to get past StorageRequested events"): let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo) return await market.queryPastStorageRequestedEvents(fromBlock) + +method slotCollateral*( + market: OnChainMarket, requestId: RequestId, slotIndex: uint64 +): Future[?!UInt256] {.async: (raises: [CancelledError]).} = + let slotid = slotId(requestId, slotIndex) + + try: + let slotState = await market.slotState(slotid) + + without request =? await market.getRequest(requestId): + return failure newException( + MarketError, "Failure calculating the slotCollateral, cannot get the request" + ) + + return market.slotCollateral(request.ask.collateralPerSlot, slotState) + except MarketError as error: + error "Error when trying to calculate the slotCollateral", error = error.msg + return failure error + +method slotCollateral*( + market: OnChainMarket, collateralPerSlot: UInt256, slotState: SlotState +): ?!UInt256 {.raises: [].} = + if slotState == SlotState.Repair: + without repairRewardPercentage =? + market.configuration .? collateral .? repairRewardPercentage: + return failure newException( + MarketError, + "Failure calculating the slotCollateral, cannot get the reward percentage", + ) + + return success ( + collateralPerSlot - (collateralPerSlot * repairRewardPercentage.u256).div( + 100.u256 + ) + ) + + return success(collateralPerSlot) diff --git a/codex/contracts/marketplace.nim b/codex/contracts/marketplace.nim index 87fd1e47..11eca5be 100644 --- a/codex/contracts/marketplace.nim +++ b/codex/contracts/marketplace.nim @@ -42,6 +42,7 @@ type Marketplace_InsufficientCollateral* = object of SolidityError Marketplace_InsufficientReward* = object of SolidityError Marketplace_InvalidCid* = object of SolidityError + Marketplace_DurationExceedsLimit* = object of SolidityError Proofs_InsufficientBlockHeight* = object of SolidityError Proofs_InvalidProof* = object of SolidityError Proofs_ProofAlreadySubmitted* = object of SolidityError @@ -50,8 +51,8 @@ type Proofs_ProofNotMissing* = object of SolidityError Proofs_ProofNotRequired* = object of SolidityError Proofs_ProofAlreadyMarkedMissing* = object of SolidityError - Proofs_InvalidProbability* = object of SolidityError Periods_InvalidSecondsPerPeriod* = object of SolidityError + SlotReservations_ReservationNotAllowed* = object of SolidityError proc configuration*(marketplace: Marketplace): MarketplaceConfig {.contract, view.} proc token*(marketplace: Marketplace): Address {.contract, view.} @@ -59,10 +60,6 @@ proc currentCollateral*( marketplace: Marketplace, id: SlotId ): UInt256 {.contract, view.} -proc slashMisses*(marketplace: Marketplace): UInt256 {.contract, view.} -proc slashPercentage*(marketplace: Marketplace): UInt256 {.contract, view.} -proc minCollateralThreshold*(marketplace: Marketplace): UInt256 {.contract, view.} - proc requestStorage*( marketplace: Marketplace, request: StorageRequest ): Confirmable {. @@ -70,15 +67,14 @@ proc requestStorage*( errors: [ Marketplace_InvalidClientAddress, Marketplace_RequestAlreadyExists, Marketplace_InvalidExpiry, Marketplace_InsufficientSlots, - Marketplace_InvalidMaxSlotLoss, + Marketplace_InvalidMaxSlotLoss, Marketplace_InsufficientDuration, + Marketplace_InsufficientProofProbability, Marketplace_InsufficientCollateral, + Marketplace_InsufficientReward, Marketplace_InvalidCid, ] .} proc fillSlot*( - marketplace: Marketplace, - requestId: RequestId, - slotIndex: UInt256, - proof: Groth16Proof, + marketplace: Marketplace, requestId: RequestId, slotIndex: uint64, proof: Groth16Proof ): Confirmable {. contract, errors: [ @@ -154,9 +150,6 @@ proc requestExpiry*( marketplace: Marketplace, requestId: RequestId ): SecondsSince1970 {.contract, view.} -proc proofTimeout*(marketplace: Marketplace): UInt256 {.contract, view.} - -proc proofEnd*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.} proc missingProofs*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.} proc isProofRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.} proc willProofBeRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.} @@ -175,7 +168,7 @@ proc submitProof*( .} proc markProofAsMissing*( - marketplace: Marketplace, id: SlotId, period: UInt256 + marketplace: Marketplace, id: SlotId, period: uint64 ): Confirmable {. contract, errors: [ @@ -186,9 +179,9 @@ proc markProofAsMissing*( .} proc reserveSlot*( - marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256 + marketplace: Marketplace, requestId: RequestId, slotIndex: uint64 ): Confirmable {.contract.} proc canReserveSlot*( - marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256 + marketplace: Marketplace, requestId: RequestId, slotIndex: uint64 ): bool {.contract, view.} diff --git a/codex/contracts/provider.nim b/codex/contracts/provider.nim index b7fc5602..b1576bb0 100644 --- a/codex/contracts/provider.nim +++ b/codex/contracts/provider.nim @@ -14,7 +14,7 @@ proc raiseProviderError(message: string) {.raises: [ProviderError].} = proc blockNumberAndTimestamp*( provider: Provider, blockTag: BlockTag -): Future[(UInt256, UInt256)] {.async: (raises: [ProviderError]).} = +): Future[(UInt256, UInt256)] {.async: (raises: [ProviderError, CancelledError]).} = without latestBlock =? await provider.getBlock(blockTag): raiseProviderError("Could not get latest block") @@ -25,7 +25,7 @@ proc blockNumberAndTimestamp*( proc binarySearchFindClosestBlock( provider: Provider, epochTime: int, low: UInt256, high: UInt256 -): Future[UInt256] {.async: (raises: [ProviderError]).} = +): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} = let (_, lowTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(low)) let (_, highTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(high)) if abs(lowTimestamp.truncate(int) - epochTime) < @@ -39,7 +39,7 @@ proc binarySearchBlockNumberForEpoch( epochTime: UInt256, latestBlockNumber: UInt256, earliestBlockNumber: UInt256, -): Future[UInt256] {.async: (raises: [ProviderError]).} = +): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} = var low = earliestBlockNumber var high = latestBlockNumber @@ -65,7 +65,7 @@ proc binarySearchBlockNumberForEpoch( proc blockNumberForEpoch*( provider: Provider, epochTime: SecondsSince1970 -): Future[UInt256] {.async: (raises: [ProviderError]).} = +): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} = let epochTimeUInt256 = epochTime.u256 let (latestBlockNumber, latestBlockTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.latest) @@ -118,6 +118,6 @@ proc blockNumberForEpoch*( proc pastBlockTag*( provider: Provider, blocksAgo: int -): Future[BlockTag] {.async: (raises: [ProviderError]).} = +): Future[BlockTag] {.async: (raises: [ProviderError, CancelledError]).} = let head = await provider.getBlockNumber() return BlockTag.init(head - blocksAgo.abs.u256) diff --git a/codex/contracts/requests.nim b/codex/contracts/requests.nim index 48947602..2b3811c3 100644 --- a/codex/contracts/requests.nim +++ b/codex/contracts/requests.nim @@ -6,8 +6,11 @@ import pkg/nimcrypto import pkg/ethers/fields import pkg/questionable/results import pkg/stew/byteutils +import pkg/libp2p/[cid, multicodec] import ../logutils import ../utils/json +import ../clock +from ../errors import mapFailure export contractabi @@ -16,25 +19,25 @@ type client* {.serialize.}: Address ask* {.serialize.}: StorageAsk content* {.serialize.}: StorageContent - expiry* {.serialize.}: UInt256 + expiry* {.serialize.}: uint64 nonce*: Nonce StorageAsk* = object - slots* {.serialize.}: uint64 - slotSize* {.serialize.}: UInt256 - duration* {.serialize.}: UInt256 proofProbability* {.serialize.}: UInt256 pricePerBytePerSecond* {.serialize.}: UInt256 collateralPerByte* {.serialize.}: UInt256 + slots* {.serialize.}: uint64 + slotSize* {.serialize.}: uint64 + duration* {.serialize.}: uint64 maxSlotLoss* {.serialize.}: uint64 StorageContent* = object - cid* {.serialize.}: string + cid* {.serialize.}: Cid merkleRoot*: array[32, byte] Slot* = object request* {.serialize.}: StorageRequest - slotIndex* {.serialize.}: UInt256 + slotIndex* {.serialize.}: uint64 SlotId* = distinct array[32, byte] RequestId* = distinct array[32, byte] @@ -108,18 +111,21 @@ func fromTuple(_: type Slot, tupl: tuple): Slot = func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk = StorageAsk( - slots: tupl[0], - slotSize: tupl[1], - duration: tupl[2], - proofProbability: tupl[3], - pricePerBytePerSecond: tupl[4], - collateralPerByte: tupl[5], + proofProbability: tupl[0], + pricePerBytePerSecond: tupl[1], + collateralPerByte: tupl[2], + slots: tupl[3], + slotSize: tupl[4], + duration: tupl[5], maxSlotLoss: tupl[6], ) func fromTuple(_: type StorageContent, tupl: tuple): StorageContent = StorageContent(cid: tupl[0], merkleRoot: tupl[1]) +func solidityType*(_: type Cid): string = + solidityType(seq[byte]) + func solidityType*(_: type StorageContent): string = solidityType(StorageContent.fieldTypes) @@ -129,6 +135,10 @@ func solidityType*(_: type StorageAsk): string = func solidityType*(_: type StorageRequest): string = solidityType(StorageRequest.fieldTypes) +# Note: it seems to be ok to ignore the vbuffer offset for now +func encode*(encoder: var AbiEncoder, cid: Cid) = + encoder.write(cid.data.buffer) + func encode*(encoder: var AbiEncoder, content: StorageContent) = encoder.write(content.fieldValues) @@ -141,8 +151,12 @@ func encode*(encoder: var AbiEncoder, id: RequestId | SlotId | Nonce) = func encode*(encoder: var AbiEncoder, request: StorageRequest) = encoder.write(request.fieldValues) -func encode*(encoder: var AbiEncoder, request: Slot) = - encoder.write(request.fieldValues) +func encode*(encoder: var AbiEncoder, slot: Slot) = + encoder.write(slot.fieldValues) + +func decode*(decoder: var AbiDecoder, T: type Cid): ?!T = + let data = ?decoder.read(seq[byte]) + Cid.init(data).mapFailure func decode*(decoder: var AbiDecoder, T: type StorageContent): ?!T = let tupl = ?decoder.read(StorageContent.fieldTypes) @@ -164,21 +178,21 @@ func id*(request: StorageRequest): RequestId = let encoding = AbiEncoder.encode((request,)) RequestId(keccak256.digest(encoding).data) -func slotId*(requestId: RequestId, slotIndex: UInt256): SlotId = +func slotId*(requestId: RequestId, slotIndex: uint64): SlotId = let encoding = AbiEncoder.encode((requestId, slotIndex)) SlotId(keccak256.digest(encoding).data) -func slotId*(request: StorageRequest, slotIndex: UInt256): SlotId = +func slotId*(request: StorageRequest, slotIndex: uint64): SlotId = slotId(request.id, slotIndex) func id*(slot: Slot): SlotId = slotId(slot.request, slot.slotIndex) func pricePerSlotPerSecond*(ask: StorageAsk): UInt256 = - ask.pricePerBytePerSecond * ask.slotSize + ask.pricePerBytePerSecond * ask.slotSize.u256 func pricePerSlot*(ask: StorageAsk): UInt256 = - ask.duration * ask.pricePerSlotPerSecond + ask.duration.u256 * ask.pricePerSlotPerSecond func totalPrice*(ask: StorageAsk): UInt256 = ask.slots.u256 * ask.pricePerSlot @@ -187,7 +201,7 @@ func totalPrice*(request: StorageRequest): UInt256 = request.ask.totalPrice func collateralPerSlot*(ask: StorageAsk): UInt256 = - ask.collateralPerByte * ask.slotSize + ask.collateralPerByte * ask.slotSize.u256 -func size*(ask: StorageAsk): UInt256 = - ask.slots.u256 * ask.slotSize +func size*(ask: StorageAsk): uint64 = + ask.slots * ask.slotSize diff --git a/codex/discovery.nim b/codex/discovery.nim index 9aa8c7d8..4a211c20 100644 --- a/codex/discovery.nim +++ b/codex/discovery.nim @@ -7,6 +7,8 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. +{.push raises: [].} + import std/algorithm import std/sequtils @@ -54,70 +56,122 @@ proc toNodeId*(host: ca.Address): NodeId = readUintBE[256](keccak256.digest(host.toArray).data) -proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} = +proc findPeer*( + d: Discovery, peerId: PeerId +): Future[?PeerRecord] {.async: (raises: [CancelledError]).} = trace "protocol.resolve..." ## Find peer using the given Discovery object ## - let node = await d.protocol.resolve(toNodeId(peerId)) - return - if node.isSome(): - node.get().record.data.some - else: - PeerRecord.none + try: + let node = await d.protocol.resolve(toNodeId(peerId)) -method find*(d: Discovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} = + return + if node.isSome(): + node.get().record.data.some + else: + PeerRecord.none + except CancelledError as exc: + warn "Error finding peer", peerId = peerId, exc = exc.msg + raise exc + except CatchableError as exc: + warn "Error finding peer", peerId = peerId, exc = exc.msg + + return PeerRecord.none + +method find*( + d: Discovery, cid: Cid +): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} = ## Find block providers ## - without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure, error: - warn "Error finding providers for block", cid, error = error.msg - return providers.filterIt(not (it.data.peerId == d.peerId)) + try: + without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure, + error: + warn "Error finding providers for block", cid, error = error.msg -method provide*(d: Discovery, cid: Cid) {.async, base.} = + return providers.filterIt(not (it.data.peerId == d.peerId)) + except CancelledError as exc: + warn "Error finding providers for block", cid, exc = exc.msg + raise exc + except CatchableError as exc: + warn "Error finding providers for block", cid, exc = exc.msg + +method provide*(d: Discovery, cid: Cid) {.async: (raises: [CancelledError]), base.} = ## Provide a block Cid ## - let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get) + try: + let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get) - if nodes.len <= 0: - warn "Couldn't provide to any nodes!" + if nodes.len <= 0: + warn "Couldn't provide to any nodes!" + except CancelledError as exc: + warn "Error providing block", cid, exc = exc.msg + raise exc + except CatchableError as exc: + warn "Error providing block", cid, exc = exc.msg method find*( d: Discovery, host: ca.Address -): Future[seq[SignedPeerRecord]] {.async, base.} = +): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} = ## Find host providers ## - trace "Finding providers for host", host = $host - without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure, - error: - trace "Error finding providers for host", host = $host, exc = error.msg - return + try: + trace "Finding providers for host", host = $host + without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure, + error: + trace "Error finding providers for host", host = $host, exc = error.msg + return - if providers.len <= 0: - trace "No providers found", host = $host - return + if providers.len <= 0: + trace "No providers found", host = $host + return - providers.sort do(a, b: SignedPeerRecord) -> int: - system.cmp[uint64](a.data.seqNo, b.data.seqNo) + providers.sort do(a, b: SignedPeerRecord) -> int: + system.cmp[uint64](a.data.seqNo, b.data.seqNo) - return providers + return providers + except CancelledError as exc: + warn "Error finding providers for host", host = $host, exc = exc.msg + raise exc + except CatchableError as exc: + warn "Error finding providers for host", host = $host, exc = exc.msg -method provide*(d: Discovery, host: ca.Address) {.async, base.} = +method provide*( + d: Discovery, host: ca.Address +) {.async: (raises: [CancelledError]), base.} = ## Provide hosts ## - trace "Providing host", host = $host - let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get) - if nodes.len > 0: - trace "Provided to nodes", nodes = nodes.len + try: + trace "Providing host", host = $host + let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get) + if nodes.len > 0: + trace "Provided to nodes", nodes = nodes.len + except CancelledError as exc: + warn "Error providing host", host = $host, exc = exc.msg + raise exc + except CatchableError as exc: + warn "Error providing host", host = $host, exc = exc.msg -method removeProvider*(d: Discovery, peerId: PeerId): Future[void] {.base, gcsafe.} = +method removeProvider*( + d: Discovery, peerId: PeerId +): Future[void] {.base, gcsafe, async: (raises: [CancelledError]).} = ## Remove provider from providers table ## trace "Removing provider", peerId - d.protocol.removeProvidersLocal(peerId) + try: + await d.protocol.removeProvidersLocal(peerId) + except CancelledError as exc: + warn "Error removing provider", peerId = peerId, exc = exc.msg + raise exc + except CatchableError as exc: + warn "Error removing provider", peerId = peerId, exc = exc.msg + except Exception as exc: # Something in discv5 is raising Exception + warn "Error removing provider", peerId = peerId, exc = exc.msg + raiseAssert("Unexpected Exception in removeProvider") proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) = ## Update providers record @@ -125,7 +179,7 @@ proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) = d.announceAddrs = @addrs - trace "Updating announce record", addrs = d.announceAddrs + info "Updating announce record", addrs = d.announceAddrs d.providerRecord = SignedPeerRecord .init(d.key, PeerRecord.init(d.peerId, d.announceAddrs)) .expect("Should construct signed record").some @@ -137,7 +191,7 @@ proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) = ## Update providers record ## - trace "Updating Dht record", addrs = addrs + info "Updating Dht record", addrs = addrs d.dhtRecord = SignedPeerRecord .init(d.key, PeerRecord.init(d.peerId, @addrs)) .expect("Should construct signed record").some @@ -145,12 +199,18 @@ proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) = if not d.protocol.isNil: d.protocol.updateRecord(d.dhtRecord).expect("Should update SPR") -proc start*(d: Discovery) {.async.} = - d.protocol.open() - await d.protocol.start() +proc start*(d: Discovery) {.async: (raises: []).} = + try: + d.protocol.open() + await d.protocol.start() + except CatchableError as exc: + error "Error starting discovery", exc = exc.msg -proc stop*(d: Discovery) {.async.} = - await d.protocol.closeWait() +proc stop*(d: Discovery) {.async: (raises: []).} = + try: + await noCancel d.protocol.closeWait() + except CatchableError as exc: + error "Error stopping discovery", exc = exc.msg proc new*( T: type Discovery, diff --git a/codex/erasure/backend.nim b/codex/erasure/backend.nim index a6dd8b8c..32009829 100644 --- a/codex/erasure/backend.nim +++ b/codex/erasure/backend.nim @@ -29,14 +29,18 @@ method release*(self: ErasureBackend) {.base, gcsafe.} = raiseAssert("not implemented!") method encode*( - self: EncoderBackend, buffers, parity: var openArray[seq[byte]] + self: EncoderBackend, + buffers, parity: ptr UncheckedArray[ptr UncheckedArray[byte]], + dataLen, parityLen: int, ): Result[void, cstring] {.base, gcsafe.} = ## encode buffers using a backend ## raiseAssert("not implemented!") method decode*( - self: DecoderBackend, buffers, parity, recovered: var openArray[seq[byte]] + self: DecoderBackend, + buffers, parity, recovered: ptr UncheckedArray[ptr UncheckedArray[byte]], + dataLen, parityLen, recoveredLen: int, ): Result[void, cstring] {.base, gcsafe.} = ## decode buffers using a backend ## diff --git a/codex/erasure/backends/leopard.nim b/codex/erasure/backends/leopard.nim index c9f9db40..a0016570 100644 --- a/codex/erasure/backends/leopard.nim +++ b/codex/erasure/backends/leopard.nim @@ -10,7 +10,7 @@ import std/options import pkg/leopard -import pkg/stew/results +import pkg/results import ../backend @@ -22,11 +22,13 @@ type decoder*: Option[LeoDecoder] method encode*( - self: LeoEncoderBackend, data, parity: var openArray[seq[byte]] + self: LeoEncoderBackend, + data, parity: ptr UncheckedArray[ptr UncheckedArray[byte]], + dataLen, parityLen: int, ): Result[void, cstring] = ## Encode data using Leopard backend - if parity.len == 0: + if parityLen == 0: return ok() var encoder = @@ -36,10 +38,12 @@ method encode*( else: self.encoder.get() - encoder.encode(data, parity) + encoder.encode(data, parity, dataLen, parityLen) method decode*( - self: LeoDecoderBackend, data, parity, recovered: var openArray[seq[byte]] + self: LeoDecoderBackend, + data, parity, recovered: ptr UncheckedArray[ptr UncheckedArray[byte]], + dataLen, parityLen, recoveredLen: int, ): Result[void, cstring] = ## Decode data using given Leopard backend @@ -50,7 +54,7 @@ method decode*( else: self.decoder.get() - decoder.decode(data, parity, recovered) + decoder.decode(data, parity, recovered, dataLen, parityLen, recoveredLen) method release*(self: LeoEncoderBackend) = if self.encoder.isSome: diff --git a/codex/erasure/erasure.nim b/codex/erasure/erasure.nim index aacd187a..884969d0 100644 --- a/codex/erasure/erasure.nim +++ b/codex/erasure/erasure.nim @@ -12,12 +12,14 @@ import pkg/upraises push: {.upraises: [].} -import std/sequtils -import std/sugar +import std/[sugar, atomics, sequtils] import pkg/chronos +import pkg/chronos/threadsync +import pkg/chronicles import pkg/libp2p/[multicodec, cid, multihash] import pkg/libp2p/protobuf/minprotobuf +import pkg/taskpools import ../logutils import ../manifest @@ -28,6 +30,7 @@ import ../utils import ../utils/asynciter import ../indexingstrategy import ../errors +import ../utils/arrayutils import pkg/stew/byteutils @@ -68,6 +71,7 @@ type proc(size, blocks, parity: int): DecoderBackend {.raises: [Defect], noSideEffect.} Erasure* = ref object + taskPool: Taskpool encoderProvider*: EncoderProvider decoderProvider*: DecoderProvider store*: BlockStore @@ -87,6 +91,24 @@ type # provided. minSize*: NBytes + EncodeTask = object + success: Atomic[bool] + erasure: ptr Erasure + blocks: ptr UncheckedArray[ptr UncheckedArray[byte]] + parity: ptr UncheckedArray[ptr UncheckedArray[byte]] + blockSize, blocksLen, parityLen: int + signal: ThreadSignalPtr + + DecodeTask = object + success: Atomic[bool] + erasure: ptr Erasure + blocks: ptr UncheckedArray[ptr UncheckedArray[byte]] + parity: ptr UncheckedArray[ptr UncheckedArray[byte]] + recovered: ptr UncheckedArray[ptr UncheckedArray[byte]] + blockSize, blocksLen: int + parityLen, recoveredLen: int + signal: ThreadSignalPtr + func indexToPos(steps, idx, step: int): int {.inline.} = ## Convert an index to a position in the encoded ## dataset @@ -269,6 +291,73 @@ proc init*( strategy: strategy, ) +proc leopardEncodeTask(tp: Taskpool, task: ptr EncodeTask) {.gcsafe.} = + # Task suitable for running in taskpools - look, no GC! + let encoder = + task[].erasure.encoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen) + defer: + encoder.release() + discard task[].signal.fireSync() + + if ( + let res = + encoder.encode(task[].blocks, task[].parity, task[].blocksLen, task[].parityLen) + res.isErr + ): + warn "Error from leopard encoder backend!", error = $res.error + + task[].success.store(false) + else: + task[].success.store(true) + +proc asyncEncode*( + self: Erasure, + blockSize, blocksLen, parityLen: int, + blocks: ref seq[seq[byte]], + parity: ptr UncheckedArray[ptr UncheckedArray[byte]], +): Future[?!void] {.async: (raises: [CancelledError]).} = + without threadPtr =? ThreadSignalPtr.new(): + return failure("Unable to create thread signal") + + defer: + threadPtr.close().expect("closing once works") + + var data = makeUncheckedArray(blocks) + + defer: + dealloc(data) + + ## Create an ecode task with block data + var task = EncodeTask( + erasure: addr self, + blockSize: blockSize, + blocksLen: blocksLen, + parityLen: parityLen, + blocks: data, + parity: parity, + signal: threadPtr, + ) + + let t = addr task + + doAssert self.taskPool.numThreads > 1, + "Must have at least one separate thread or signal will never be fired" + self.taskPool.spawn leopardEncodeTask(self.taskPool, t) + let threadFut = threadPtr.wait() + + if joinErr =? catch(await threadFut.join()).errorOption: + if err =? catch(await noCancel threadFut).errorOption: + return failure(err) + if joinErr of CancelledError: + raise (ref CancelledError) joinErr + else: + return failure(joinErr) + + if not t.success.load(): + return failure("Leopard encoding failed") + + success() + proc encodeData( self: Erasure, manifest: Manifest, params: EncodingParams ): Future[?!Manifest] {.async.} = @@ -276,7 +365,6 @@ proc encodeData( ## ## `manifest` - the manifest to encode ## - logScope: steps = params.steps rounded_blocks = params.rounded @@ -286,7 +374,6 @@ proc encodeData( var cids = seq[Cid].new() - encoder = self.encoderProvider(manifest.blockSize.int, params.ecK, params.ecM) emptyBlock = newSeq[byte](manifest.blockSize.int) cids[].setLen(params.blocksCount) @@ -296,8 +383,7 @@ proc encodeData( # TODO: Don't allocate a new seq every time, allocate once and zero out var data = seq[seq[byte]].new() # number of blocks to encode - parityData = - newSeqWith[seq[byte]](params.ecM, newSeq[byte](manifest.blockSize.int)) + parity = createDoubleArray(params.ecM, manifest.blockSize.int) data[].setLen(params.ecK) # TODO: this is a tight blocking loop so we sleep here to allow @@ -311,15 +397,25 @@ proc encodeData( trace "Unable to prepare data", error = err.msg return failure(err) - trace "Erasure coding data", data = data[].len, parity = parityData.len + trace "Erasure coding data", data = data[].len - if (let res = encoder.encode(data[], parityData); res.isErr): - trace "Unable to encode manifest!", error = $res.error - return failure($res.error) + try: + if err =? ( + await self.asyncEncode( + manifest.blockSize.int, params.ecK, params.ecM, data, parity + ) + ).errorOption: + return failure(err) + except CancelledError as exc: + raise exc + finally: + freeDoubleArray(parity, params.ecM) var idx = params.rounded + step for j in 0 ..< params.ecM: - without blk =? bt.Block.new(parityData[j]), error: + var innerPtr: ptr UncheckedArray[byte] = parity[][j] + without blk =? bt.Block.new(innerPtr.toOpenArray(0, manifest.blockSize.int - 1)), + error: trace "Unable to create parity block", err = error.msg return failure(error) @@ -356,8 +452,6 @@ proc encodeData( except CatchableError as exc: trace "Erasure coding encoding error", exc = exc.msg return failure(exc) - finally: - encoder.release() proc encode*( self: Erasure, @@ -381,6 +475,83 @@ proc encode*( return success encodedManifest +proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} = + # Task suitable for running in taskpools - look, no GC! + let decoder = + task[].erasure.decoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen) + defer: + decoder.release() + discard task[].signal.fireSync() + + if ( + let res = decoder.decode( + task[].blocks, + task[].parity, + task[].recovered, + task[].blocksLen, + task[].parityLen, + task[].recoveredLen, + ) + res.isErr + ): + warn "Error from leopard decoder backend!", error = $res.error + task[].success.store(false) + else: + task[].success.store(true) + +proc asyncDecode*( + self: Erasure, + blockSize, blocksLen, parityLen: int, + blocks, parity: ref seq[seq[byte]], + recovered: ptr UncheckedArray[ptr UncheckedArray[byte]], +): Future[?!void] {.async: (raises: [CancelledError]).} = + without threadPtr =? ThreadSignalPtr.new(): + return failure("Unable to create thread signal") + + defer: + threadPtr.close().expect("closing once works") + + var + blockData = makeUncheckedArray(blocks) + parityData = makeUncheckedArray(parity) + + defer: + dealloc(blockData) + dealloc(parityData) + + ## Create an decode task with block data + var task = DecodeTask( + erasure: addr self, + blockSize: blockSize, + blocksLen: blocksLen, + parityLen: parityLen, + recoveredLen: blocksLen, + blocks: blockData, + parity: parityData, + recovered: recovered, + signal: threadPtr, + ) + + # Hold the task pointer until the signal is received + let t = addr task + doAssert self.taskPool.numThreads > 1, + "Must have at least one separate thread or signal will never be fired" + self.taskPool.spawn leopardDecodeTask(self.taskPool, t) + let threadFut = threadPtr.wait() + + if joinErr =? catch(await threadFut.join()).errorOption: + if err =? catch(await noCancel threadFut).errorOption: + return failure(err) + if joinErr of CancelledError: + raise (ref CancelledError) joinErr + else: + return failure(joinErr) + + if not t.success.load(): + return failure("Leopard encoding failed") + + success() + proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} = ## Decode a protected manifest into it's original ## manifest @@ -388,7 +559,6 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} = ## `encoded` - the encoded (protected) manifest to ## be recovered ## - logScope: steps = encoded.steps rounded_blocks = encoded.rounded @@ -411,8 +581,7 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} = var data = seq[seq[byte]].new() parityData = seq[seq[byte]].new() - recovered = - newSeqWith[seq[byte]](encoded.ecK, newSeq[byte](encoded.blockSize.int)) + recovered = createDoubleArray(encoded.ecK, encoded.blockSize.int) data[].setLen(encoded.ecK) # set len to K parityData[].setLen(encoded.ecM) # set len to M @@ -430,15 +599,26 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} = continue trace "Erasure decoding data" - - if (let err = decoder.decode(data[], parityData[], recovered); err.isErr): - trace "Unable to decode data!", err = $err.error - return failure($err.error) + try: + if err =? ( + await self.asyncDecode( + encoded.blockSize.int, encoded.ecK, encoded.ecM, data, parityData, recovered + ) + ).errorOption: + return failure(err) + except CancelledError as exc: + raise exc + finally: + freeDoubleArray(recovered, encoded.ecK) for i in 0 ..< encoded.ecK: let idx = i * encoded.steps + step if data[i].len <= 0 and not cids[idx].isEmpty: - without blk =? bt.Block.new(recovered[i]), error: + var innerPtr: ptr UncheckedArray[byte] = recovered[][i] + + without blk =? bt.Block.new( + innerPtr.toOpenArray(0, encoded.blockSize.int - 1) + ), error: trace "Unable to create block!", exc = error.msg return failure(error) @@ -490,10 +670,13 @@ proc new*( store: BlockStore, encoderProvider: EncoderProvider, decoderProvider: DecoderProvider, + taskPool: Taskpool, ): Erasure = ## Create a new Erasure instance for encoding and decoding manifests ## - Erasure( - store: store, encoderProvider: encoderProvider, decoderProvider: decoderProvider + store: store, + encoderProvider: encoderProvider, + decoderProvider: decoderProvider, + taskPool: taskPool, ) diff --git a/codex/errors.nim b/codex/errors.nim index f7c2fa6b..fadf7299 100644 --- a/codex/errors.nim +++ b/codex/errors.nim @@ -9,7 +9,7 @@ import std/options -import pkg/stew/results +import pkg/results import pkg/chronos import pkg/questionable/results @@ -19,6 +19,8 @@ type CodexError* = object of CatchableError # base codex error CodexResult*[T] = Result[T, ref CodexError] + FinishedFailed*[T] = tuple[success: seq[Future[T]], failure: seq[Future[T]]] + template mapFailure*[T, V, E]( exp: Result[T, V], exc: typedesc[E] ): Result[T, ref CatchableError] = @@ -40,35 +42,18 @@ func toFailure*[T](exp: Option[T]): Result[T, ref CatchableError] {.inline.} = else: T.failure("Option is None") -# allFuturesThrowing was moved to the tests in libp2p -proc allFuturesThrowing*[T](args: varargs[Future[T]]): Future[void] = - var futs: seq[Future[T]] - for fut in args: - futs &= fut - proc call() {.async.} = - var first: ref CatchableError = nil - futs = await allFinished(futs) - for fut in futs: - if fut.failed: - let err = fut.readError() - if err of Defect: - raise err - else: - if err of CancelledError: - raise err - if isNil(first): - first = err - if not isNil(first): - raise first +proc allFinishedFailed*[T](futs: seq[Future[T]]): Future[FinishedFailed[T]] {.async.} = + ## Check if all futures have finished or failed + ## + ## TODO: wip, not sure if we want this - at the minimum, + ## we should probably avoid the async transform - return call() + var res: FinishedFailed[T] = (@[], @[]) + await allFutures(futs) + for f in futs: + if f.failed: + res.failure.add f + else: + res.success.add f -proc allFutureResult*[T](fut: seq[Future[T]]): Future[?!void] {.async.} = - try: - await allFuturesThrowing(fut) - except CancelledError as exc: - raise exc - except CatchableError as exc: - return failure(exc.msg) - - return success() + return res diff --git a/codex/logutils.nim b/codex/logutils.nim index b37f6952..e9604aba 100644 --- a/codex/logutils.nim +++ b/codex/logutils.nim @@ -152,7 +152,7 @@ proc formatTextLineSeq*(val: seq[string]): string = template formatIt*(format: LogFormat, T: typedesc, body: untyped) = # Provides formatters for logging with Chronicles for the given type and # `LogFormat`. - # NOTE: `seq[T]`, `Option[T]`, and `seq[Option[T]]` are overriddden + # NOTE: `seq[T]`, `Option[T]`, and `seq[Option[T]]` are overridden # since the base `setProperty` is generic using `auto` and conflicts with # providing a generic `seq` and `Option` override. when format == LogFormat.json: diff --git a/codex/manifest/coders.nim b/codex/manifest/coders.nim index 0c461e45..30e0c7ca 100644 --- a/codex/manifest/coders.nim +++ b/codex/manifest/coders.nim @@ -63,7 +63,6 @@ proc encode*(manifest: Manifest): ?!seq[byte] = # optional ErasureInfo erasure = 7; # erasure coding info # optional filename: ?string = 8; # original filename # optional mimetype: ?string = 9; # original mimetype - # optional uploadedAt: ?int64 = 10; # original uploadedAt # } # ``` # @@ -102,9 +101,6 @@ proc encode*(manifest: Manifest): ?!seq[byte] = if manifest.mimetype.isSome: header.write(9, manifest.mimetype.get()) - if manifest.uploadedAt.isSome: - header.write(10, manifest.uploadedAt.get().uint64) - pbNode.write(1, header) # set the treeCid as the data field pbNode.finish() @@ -135,7 +131,6 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest = verifiableStrategy: uint32 filename: string mimetype: string - uploadedAt: uint64 # Decode `Header` message if pbNode.getField(1, pbHeader).isErr: @@ -169,9 +164,6 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest = if pbHeader.getField(9, mimetype).isErr: return failure("Unable to decode `mimetype` from manifest!") - if pbHeader.getField(10, uploadedAt).isErr: - return failure("Unable to decode `uploadedAt` from manifest!") - let protected = pbErasureInfo.buffer.len > 0 var verifiable = false if protected: @@ -211,7 +203,6 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest = var filenameOption = if filename.len == 0: string.none else: filename.some var mimetypeOption = if mimetype.len == 0: string.none else: mimetype.some - var uploadedAtOption = if uploadedAt == 0: int64.none else: uploadedAt.int64.some let self = if protected: @@ -229,7 +220,6 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest = strategy = StrategyType(protectedStrategy), filename = filenameOption, mimetype = mimetypeOption, - uploadedAt = uploadedAtOption, ) else: Manifest.new( @@ -241,7 +231,6 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest = codec = codec.MultiCodec, filename = filenameOption, mimetype = mimetypeOption, - uploadedAt = uploadedAtOption, ) ?self.verify() diff --git a/codex/manifest/manifest.nim b/codex/manifest/manifest.nim index 6e0d1b80..cbb0bace 100644 --- a/codex/manifest/manifest.nim +++ b/codex/manifest/manifest.nim @@ -38,7 +38,6 @@ type Manifest* = ref object of RootObj version: CidVersion # Cid version filename {.serialize.}: ?string # The filename of the content uploaded (optional) mimetype {.serialize.}: ?string # The mimetype of the content uploaded (optional) - uploadedAt {.serialize.}: ?int64 # The UTC creation timestamp in seconds case protected {.serialize.}: bool # Protected datasets have erasure coded info of true: ecK: int # Number of blocks to encode @@ -131,8 +130,6 @@ func filename*(self: Manifest): ?string = func mimetype*(self: Manifest): ?string = self.mimetype -func uploadedAt*(self: Manifest): ?int64 = - self.uploadedAt ############################################################ # Operations on block list ############################################################ @@ -165,14 +162,11 @@ func verify*(self: Manifest): ?!void = return success() -func cid*(self: Manifest): ?!Cid {.deprecated: "use treeCid instead".} = - self.treeCid.success - func `==`*(a, b: Manifest): bool = (a.treeCid == b.treeCid) and (a.datasetSize == b.datasetSize) and (a.blockSize == b.blockSize) and (a.version == b.version) and (a.hcodec == b.hcodec) and (a.codec == b.codec) and (a.protected == b.protected) and (a.filename == b.filename) and - (a.mimetype == b.mimetype) and (a.uploadedAt == b.uploadedAt) and ( + (a.mimetype == b.mimetype) and ( if a.protected: (a.ecK == b.ecK) and (a.ecM == b.ecM) and (a.originalTreeCid == b.originalTreeCid) and (a.originalDatasetSize == b.originalDatasetSize) and @@ -202,9 +196,6 @@ func `$`*(self: Manifest): string = if self.mimetype.isSome: result &= ", mimetype: " & $self.mimetype - if self.uploadedAt.isSome: - result &= ", uploadedAt: " & $self.uploadedAt - result &= ( if self.protected: ", ecK: " & $self.ecK & ", ecM: " & $self.ecM & ", originalTreeCid: " & @@ -236,7 +227,6 @@ func new*( protected = false, filename: ?string = string.none, mimetype: ?string = string.none, - uploadedAt: ?int64 = int64.none, ): Manifest = T( treeCid: treeCid, @@ -248,7 +238,6 @@ func new*( protected: protected, filename: filename, mimetype: mimetype, - uploadedAt: uploadedAt, ) func new*( @@ -278,7 +267,6 @@ func new*( protectedStrategy: strategy, filename: manifest.filename, mimetype: manifest.mimetype, - uploadedAt: manifest.uploadedAt, ) func new*(T: type Manifest, manifest: Manifest): Manifest = @@ -296,7 +284,6 @@ func new*(T: type Manifest, manifest: Manifest): Manifest = protected: false, filename: manifest.filename, mimetype: manifest.mimetype, - uploadedAt: manifest.uploadedAt, ) func new*( @@ -314,7 +301,6 @@ func new*( strategy = SteppedStrategy, filename: ?string = string.none, mimetype: ?string = string.none, - uploadedAt: ?int64 = int64.none, ): Manifest = Manifest( treeCid: treeCid, @@ -331,7 +317,6 @@ func new*( protectedStrategy: strategy, filename: filename, mimetype: mimetype, - uploadedAt: uploadedAt, ) func new*( @@ -374,7 +359,6 @@ func new*( verifiableStrategy: strategy, filename: manifest.filename, mimetype: manifest.mimetype, - uploadedAt: manifest.uploadedAt, ) func new*(T: type Manifest, data: openArray[byte]): ?!Manifest = diff --git a/codex/market.nim b/codex/market.nim index bc325cd9..31c0687f 100644 --- a/codex/market.nim +++ b/codex/market.nim @@ -18,15 +18,16 @@ export periods type Market* = ref object of RootObj MarketError* = object of CodexError + SlotStateMismatchError* = object of MarketError + SlotReservationNotAllowedError* = object of MarketError Subscription* = ref object of RootObj OnRequest* = - proc(id: RequestId, ask: StorageAsk, expiry: UInt256) {.gcsafe, upraises: [].} + proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, upraises: [].} OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].} - OnSlotFilled* = - proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].} - OnSlotFreed* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].} + OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].} + OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].} OnSlotReservationsFull* = - proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].} + proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].} OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises: [].} OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises: [].} OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises: [].} @@ -37,19 +38,19 @@ type StorageRequested* = object of MarketplaceEvent requestId*: RequestId ask*: StorageAsk - expiry*: UInt256 + expiry*: uint64 SlotFilled* = object of MarketplaceEvent requestId* {.indexed.}: RequestId - slotIndex*: UInt256 + slotIndex*: uint64 SlotFreed* = object of MarketplaceEvent requestId* {.indexed.}: RequestId - slotIndex*: UInt256 + slotIndex*: uint64 SlotReservationsFull* = object of MarketplaceEvent requestId* {.indexed.}: RequestId - slotIndex*: UInt256 + slotIndex*: uint64 RequestFulfilled* = object of MarketplaceEvent requestId* {.indexed.}: RequestId @@ -63,22 +64,42 @@ type ProofSubmitted* = object of MarketplaceEvent id*: SlotId -method getZkeyHash*(market: Market): Future[?string] {.base, async.} = +method loadConfig*( + market: Market +): Future[?!void] {.base, async: (raises: [CancelledError]).} = raiseAssert("not implemented") -method getSigner*(market: Market): Future[Address] {.base, async.} = +method getZkeyHash*( + market: Market +): Future[?string] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") -method periodicity*(market: Market): Future[Periodicity] {.base, async.} = +method getSigner*( + market: Market +): Future[Address] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") -method proofTimeout*(market: Market): Future[UInt256] {.base, async.} = +method periodicity*( + market: Market +): Future[Periodicity] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") -method repairRewardPercentage*(market: Market): Future[uint8] {.base, async.} = +method proofTimeout*( + market: Market +): Future[uint64] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") -method proofDowntime*(market: Market): Future[uint8] {.base, async.} = +method repairRewardPercentage*( + market: Market +): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} = + raiseAssert("not implemented") + +method requestDurationLimit*(market: Market): Future[uint64] {.base, async.} = + raiseAssert("not implemented") + +method proofDowntime*( + market: Market +): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") method getPointer*(market: Market, slotId: SlotId): Future[uint8] {.base, async.} = @@ -89,7 +110,9 @@ proc inDowntime*(market: Market, slotId: SlotId): Future[bool] {.async.} = let pntr = await market.getPointer(slotId) return pntr < downtime -method requestStorage*(market: Market, request: StorageRequest) {.base, async.} = +method requestStorage*( + market: Market, request: StorageRequest +) {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} = @@ -100,7 +123,7 @@ method mySlots*(market: Market): Future[seq[SlotId]] {.base, async.} = method getRequest*( market: Market, id: RequestId -): Future[?StorageRequest] {.base, async.} = +): Future[?StorageRequest] {.base, async: (raises: [CancelledError]).} = raiseAssert("not implemented") method requestState*( @@ -108,7 +131,9 @@ method requestState*( ): Future[?RequestState] {.base, async.} = raiseAssert("not implemented") -method slotState*(market: Market, slotId: SlotId): Future[SlotState] {.base, async.} = +method slotState*( + market: Market, slotId: SlotId +): Future[SlotState] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") method getRequestEnd*( @@ -122,13 +147,13 @@ method requestExpiresAt*( raiseAssert("not implemented") method getHost*( - market: Market, requestId: RequestId, slotIndex: UInt256 -): Future[?Address] {.base, async.} = + market: Market, requestId: RequestId, slotIndex: uint64 +): Future[?Address] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") method currentCollateral*( market: Market, slotId: SlotId -): Future[UInt256] {.base, async.} = +): Future[UInt256] {.base, async: (raises: [MarketError, CancelledError]).} = raiseAssert("not implemented") method getActiveSlot*(market: Market, slotId: SlotId): Future[?Slot] {.base, async.} = @@ -137,16 +162,20 @@ method getActiveSlot*(market: Market, slotId: SlotId): Future[?Slot] {.base, asy method fillSlot*( market: Market, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, proof: Groth16Proof, collateral: UInt256, -) {.base, async.} = +) {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") -method freeSlot*(market: Market, slotId: SlotId) {.base, async.} = +method freeSlot*( + market: Market, slotId: SlotId +) {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") -method withdrawFunds*(market: Market, requestId: RequestId) {.base, async.} = +method withdrawFunds*( + market: Market, requestId: RequestId +) {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") method subscribeRequests*( @@ -165,10 +194,14 @@ method getChallenge*( ): Future[ProofChallenge] {.base, async.} = raiseAssert("not implemented") -method submitProof*(market: Market, id: SlotId, proof: Groth16Proof) {.base, async.} = +method submitProof*( + market: Market, id: SlotId, proof: Groth16Proof +) {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") -method markProofAsMissing*(market: Market, id: SlotId, period: Period) {.base, async.} = +method markProofAsMissing*( + market: Market, id: SlotId, period: Period +) {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") method canProofBeMarkedAsMissing*( @@ -177,12 +210,12 @@ method canProofBeMarkedAsMissing*( raiseAssert("not implemented") method reserveSlot*( - market: Market, requestId: RequestId, slotIndex: UInt256 -) {.base, async.} = + market: Market, requestId: RequestId, slotIndex: uint64 +) {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") method canReserveSlot*( - market: Market, requestId: RequestId, slotIndex: UInt256 + market: Market, requestId: RequestId, slotIndex: uint64 ): Future[bool] {.base, async.} = raiseAssert("not implemented") @@ -202,7 +235,7 @@ method subscribeSlotFilled*( raiseAssert("not implemented") method subscribeSlotFilled*( - market: Market, requestId: RequestId, slotIndex: UInt256, callback: OnSlotFilled + market: Market, requestId: RequestId, slotIndex: uint64, callback: OnSlotFilled ): Future[Subscription] {.base, async.} = raiseAssert("not implemented") @@ -268,3 +301,13 @@ method queryPastStorageRequestedEvents*( market: Market, blocksAgo: int ): Future[seq[StorageRequested]] {.base, async.} = raiseAssert("not implemented") + +method slotCollateral*( + market: Market, requestId: RequestId, slotIndex: uint64 +): Future[?!UInt256] {.base, async: (raises: [CancelledError]).} = + raiseAssert("not implemented") + +method slotCollateral*( + market: Market, collateralPerSlot: UInt256, slotState: SlotState +): ?!UInt256 {.base, gcsafe, raises: [].} = + raiseAssert("not implemented") diff --git a/codex/merkletree/codex/coders.nim b/codex/merkletree/codex/coders.nim index b8209991..1d50707c 100644 --- a/codex/merkletree/codex/coders.nim +++ b/codex/merkletree/codex/coders.nim @@ -27,11 +27,11 @@ const MaxMerkleTreeSize = 100.MiBs.uint const MaxMerkleProofSize = 1.MiBs.uint proc encode*(self: CodexTree): seq[byte] = - var pb = initProtoBuffer(maxSize = MaxMerkleTreeSize) + var pb = initProtoBuffer() pb.write(1, self.mcodec.uint64) pb.write(2, self.leavesCount.uint64) for node in self.nodes: - var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize) + var nodesPb = initProtoBuffer() nodesPb.write(1, node) nodesPb.finish() pb.write(3, nodesPb) @@ -40,7 +40,7 @@ proc encode*(self: CodexTree): seq[byte] = pb.buffer proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree = - var pb = initProtoBuffer(data, maxSize = MaxMerkleTreeSize) + var pb = initProtoBuffer(data) var mcodecCode: uint64 var leavesCount: uint64 discard ?pb.getField(1, mcodecCode).mapFailure @@ -63,13 +63,13 @@ proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree = CodexTree.fromNodes(mcodec, nodes, leavesCount.int) proc encode*(self: CodexProof): seq[byte] = - var pb = initProtoBuffer(maxSize = MaxMerkleProofSize) + var pb = initProtoBuffer() pb.write(1, self.mcodec.uint64) pb.write(2, self.index.uint64) pb.write(3, self.nleaves.uint64) for node in self.path: - var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize) + var nodesPb = initProtoBuffer() nodesPb.write(1, node) nodesPb.finish() pb.write(4, nodesPb) @@ -78,7 +78,7 @@ proc encode*(self: CodexProof): seq[byte] = pb.buffer proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof = - var pb = initProtoBuffer(data, maxSize = MaxMerkleProofSize) + var pb = initProtoBuffer(data) var mcodecCode: uint64 var index: uint64 var nleaves: uint64 diff --git a/codex/node.nim b/codex/node.nim index ee2a2b46..9932deb6 100644 --- a/codex/node.nim +++ b/codex/node.nim @@ -15,6 +15,7 @@ import std/strformat import std/sugar import times +import pkg/taskpools import pkg/questionable import pkg/questionable/results import pkg/chronos @@ -44,13 +45,14 @@ import ./utils import ./errors import ./logutils import ./utils/asynciter +import ./utils/trackedfutures export logutils logScope: topics = "codex node" -const FetchBatch = 200 +const DefaultFetchBatch = 10 type Contracts* = @@ -70,6 +72,8 @@ type contracts*: Contracts clock*: Clock storage*: Contracts + taskpool: Taskpool + trackedFutures: TrackedFutures CodexNodeRef* = ref CodexNode @@ -149,7 +153,11 @@ proc updateExpiry*( let ensuringFutures = Iter[int].new(0 ..< manifest.blocksCount).mapIt( self.networkStore.localStore.ensureExpiry(manifest.treeCid, it, expiry) ) - await allFuturesThrowing(ensuringFutures) + + let res = await allFinishedFailed(ensuringFutures) + if res.failure.len > 0: + trace "Some blocks failed to update expiry", len = res.failure.len + return failure("Some blocks failed to update expiry (" & $res.failure.len & " )") except CancelledError as exc: raise exc except CatchableError as exc: @@ -161,8 +169,9 @@ proc fetchBatched*( self: CodexNodeRef, cid: Cid, iter: Iter[int], - batchSize = FetchBatch, + batchSize = DefaultFetchBatch, onBatch: BatchProc = nil, + fetchLocal = true, ): Future[?!void] {.async, gcsafe.} = ## Fetch blocks in batches of `batchSize` ## @@ -177,30 +186,62 @@ proc fetchBatched*( let blocks = collect: for i in 0 ..< batchSize: if not iter.finished: - self.networkStore.getBlock(BlockAddress.init(cid, iter.next())) + let address = BlockAddress.init(cid, iter.next()) + if not (await address in self.networkStore) or fetchLocal: + self.networkStore.getBlock(address) - if blocksErr =? (await allFutureResult(blocks)).errorOption: - return failure(blocksErr) + let res = await allFinishedFailed(blocks) + if res.failure.len > 0: + trace "Some blocks failed to fetch", len = res.failure.len + return failure("Some blocks failed to fetch (" & $res.failure.len & " )") if not onBatch.isNil and batchErr =? (await onBatch(blocks.mapIt(it.read.get))).errorOption: return failure(batchErr) + await sleepAsync(1.millis) + success() proc fetchBatched*( self: CodexNodeRef, manifest: Manifest, - batchSize = FetchBatch, + batchSize = DefaultFetchBatch, onBatch: BatchProc = nil, + fetchLocal = true, ): Future[?!void] = ## Fetch manifest in batches of `batchSize` ## - trace "Fetching blocks in batches of", size = batchSize + trace "Fetching blocks in batches of", + size = batchSize, blocksCount = manifest.blocksCount let iter = Iter[int].new(0 ..< manifest.blocksCount) - self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch) + self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch, fetchLocal) + +proc fetchDatasetAsync*( + self: CodexNodeRef, manifest: Manifest, fetchLocal = true +): Future[void] {.async: (raises: []).} = + ## Asynchronously fetch a dataset in the background. + ## This task will be tracked and cleaned up on node shutdown. + ## + try: + if err =? ( + await self.fetchBatched( + manifest = manifest, batchSize = DefaultFetchBatch, fetchLocal = fetchLocal + ) + ).errorOption: + error "Unable to fetch blocks", err = err.msg + except CancelledError as exc: + trace "Cancelled fetching blocks", exc = exc.msg + except CatchableError as exc: + error "Error fetching blocks", exc = exc.msg + +proc fetchDatasetAsyncTask*(self: CodexNodeRef, manifest: Manifest) = + ## Start fetching a dataset in the background. + ## The task will be tracked and cleaned up on node shutdown. + ## + self.trackedFutures.track(self.fetchDatasetAsync(manifest, fetchLocal = false)) proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async.} = ## Streams the contents of a single block. @@ -212,16 +253,15 @@ proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async without blk =? (await self.networkStore.getBlock(BlockAddress.init(cid))), err: return failure(err) - proc streamOneBlock(): Future[void] {.async.} = + proc streamOneBlock(): Future[void] {.async: (raises: []).} = try: + defer: + await stream.pushEof() await stream.pushData(blk.data) except CatchableError as exc: trace "Unable to send block", cid, exc = exc.msg - discard - finally: - await stream.pushEof() - asyncSpawn streamOneBlock() + self.trackedFutures.track(streamOneBlock()) LPStream(stream).success proc streamEntireDataset( @@ -231,24 +271,40 @@ proc streamEntireDataset( ## trace "Retrieving blocks from manifest", manifestCid + var jobs: seq[Future[void]] + let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false)) if manifest.protected: # Retrieve, decode and save to the local store all EС groups - proc erasureJob(): Future[?!void] {.async.} = - # Spawn an erasure decoding job - let erasure = - Erasure.new(self.networkStore, leoEncoderProvider, leoDecoderProvider) - without _ =? (await erasure.decode(manifest)), error: - error "Unable to erasure decode manifest", manifestCid, exc = error.msg - return failure(error) + proc erasureJob(): Future[void] {.async: (raises: []).} = + try: + # Spawn an erasure decoding job + let erasure = Erasure.new( + self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskpool + ) + without _ =? (await erasure.decode(manifest)), error: + error "Unable to erasure decode manifest", manifestCid, exc = error.msg + except CatchableError as exc: + trace "Error erasure decoding manifest", manifestCid, exc = exc.msg - return success() + jobs.add(erasureJob()) - if err =? (await erasureJob()).errorOption: - return failure(err) + jobs.add(self.fetchDatasetAsync(manifest)) + + # Monitor stream completion and cancel background jobs when done + proc monitorStream() {.async: (raises: []).} = + try: + await stream.join() + except CatchableError as exc: + warn "Stream failed", exc = exc.msg + finally: + await noCancel allFutures(jobs.mapIt(it.cancelAndWait)) + + self.trackedFutures.track(monitorStream()) # Retrieve all blocks of the dataset sequentially from the local store or network trace "Creating store stream for manifest", manifestCid - LPStream(StoreStream.new(self.networkStore, manifest, pad = false)).success + + stream.success proc retrieve*( self: CodexNodeRef, cid: Cid, local: bool = true @@ -267,6 +323,65 @@ proc retrieve*( await self.streamEntireDataset(manifest, cid) +proc deleteSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!void] {.async.} = + if err =? (await self.networkStore.delBlock(cid)).errorOption: + error "Error deleting block", cid, err = err.msg + return failure(err) + + trace "Deleted block", cid + return success() + +proc deleteEntireDataset(self: CodexNodeRef, cid: Cid): Future[?!void] {.async.} = + # Deletion is a strictly local operation + var store = self.networkStore.localStore + + if not (await cid in store): + # As per the contract for delete*, an absent dataset is not an error. + return success() + + without manifestBlock =? await store.getBlock(cid), err: + return failure(err) + + without manifest =? Manifest.decode(manifestBlock), err: + return failure(err) + + let runtimeQuota = initDuration(milliseconds = 100) + var lastIdle = getTime() + for i in 0 ..< manifest.blocksCount: + if (getTime() - lastIdle) >= runtimeQuota: + await idleAsync() + lastIdle = getTime() + + if err =? (await store.delBlock(manifest.treeCid, i)).errorOption: + # The contract for delBlock is fuzzy, but we assume that if the block is + # simply missing we won't get an error. This is a best effort operation and + # can simply be retried. + error "Failed to delete block within dataset", index = i, err = err.msg + return failure(err) + + if err =? (await store.delBlock(cid)).errorOption: + error "Error deleting manifest block", err = err.msg + + success() + +proc delete*( + self: CodexNodeRef, cid: Cid +): Future[?!void] {.async: (raises: [CatchableError]).} = + ## Deletes a whole dataset, if Cid is a Manifest Cid, or a single block, if Cid a block Cid, + ## from the underlying block store. This is a strictly local operation. + ## + ## Missing blocks in dataset deletes are ignored. + ## + + without isManifest =? cid.isManifest, err: + trace "Bad content type for CID:", cid = cid, err = err.msg + return failure(err) + + if not isManifest: + return await self.deleteSingleBlock(cid) + + await self.deleteEntireDataset(cid) + proc store*( self: CodexNodeRef, stream: LPStream, @@ -332,7 +447,6 @@ proc store*( codec = dataCodec, filename = filename, mimetype = mimetype, - uploadedAt = now().utc.toTime.toUnix.some, ) without manifestBlk =? await self.storeManifest(manifest), err: @@ -369,13 +483,13 @@ proc iterateManifests*(self: CodexNodeRef, onManifest: OnManifest) {.async.} = proc setupRequest( self: CodexNodeRef, cid: Cid, - duration: UInt256, + duration: uint64, proofProbability: UInt256, nodes: uint, tolerance: uint, pricePerBytePerSecond: UInt256, collateralPerByte: UInt256, - expiry: UInt256, + expiry: uint64, ): Future[?!StorageRequest] {.async.} = ## Setup slots for a given dataset ## @@ -403,8 +517,9 @@ proc setupRequest( return failure error # Erasure code the dataset according to provided parameters - let erasure = - Erasure.new(self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider) + let erasure = Erasure.new( + self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider, self.taskpool + ) without encoded =? (await erasure.encode(manifest, ecK, ecM)), error: trace "Unable to erasure code dataset" @@ -432,17 +547,14 @@ proc setupRequest( request = StorageRequest( ask: StorageAsk( slots: verifiable.numSlots.uint64, - slotSize: builder.slotBytes.uint.u256, + slotSize: builder.slotBytes.uint64, duration: duration, proofProbability: proofProbability, pricePerBytePerSecond: pricePerBytePerSecond, collateralPerByte: collateralPerByte, maxSlotLoss: tolerance, ), - content: StorageContent( - cid: $manifestBlk.cid, # TODO: why string? - merkleRoot: verifyRoot, - ), + content: StorageContent(cid: manifestBlk.cid, merkleRoot: verifyRoot), expiry: expiry, ) @@ -452,13 +564,13 @@ proc setupRequest( proc requestStorage*( self: CodexNodeRef, cid: Cid, - duration: UInt256, + duration: uint64, proofProbability: UInt256, nodes: uint, tolerance: uint, pricePerBytePerSecond: UInt256, collateralPerByte: UInt256, - expiry: UInt256, + expiry: uint64, ): Future[?!PurchaseId] {.async.} = ## Initiate a request for storage sequence, this might ## be a multistep procedure. @@ -472,7 +584,7 @@ proc requestStorage*( pricePerBytePerSecond = pricePerBytePerSecond proofProbability = proofProbability collateralPerByte = collateralPerByte - expiry = expiry.truncate(int64) + expiry = expiry now = self.clock.now trace "Received a request for storage!" @@ -494,20 +606,26 @@ proc requestStorage*( success purchase.id proc onStore( - self: CodexNodeRef, request: StorageRequest, slotIdx: UInt256, blocksCb: BlocksCb + self: CodexNodeRef, + request: StorageRequest, + slotIdx: uint64, + blocksCb: BlocksCb, + isRepairing: bool = false, ): Future[?!void] {.async.} = ## store data in local storage ## + let cid = request.content.cid + logScope: - cid = request.content.cid + cid = $cid slotIdx = slotIdx trace "Received a request to store a slot" - without cid =? Cid.init(request.content.cid).mapFailure, err: - trace "Unable to parse Cid", cid - return failure(err) + # TODO: Use the isRepairing to manage the slot download. + # If isRepairing is true, the slot has to be repaired before + # being downloaded. without manifest =? (await self.fetchManifest(cid)), err: trace "Unable to fetch manifest for cid", cid, err = err.msg @@ -518,11 +636,9 @@ proc onStore( trace "Unable to create slots builder", err = err.msg return failure(err) - let - slotIdx = slotIdx.truncate(int) - expiry = request.expiry.toSecondsSince1970 + let expiry = request.expiry - if slotIdx > manifest.slotRoots.high: + if slotIdx > manifest.slotRoots.high.uint64: trace "Slot index not in manifest", slotIdx return failure(newException(CodexError, "Slot index not in manifest")) @@ -530,9 +646,12 @@ proc onStore( trace "Updating expiry for blocks", blocks = blocks.len let ensureExpiryFutures = - blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry)) - if updateExpiryErr =? (await allFutureResult(ensureExpiryFutures)).errorOption: - return failure(updateExpiryErr) + blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry.toSecondsSince1970)) + + let res = await allFinishedFailed(ensureExpiryFutures) + if res.failure.len > 0: + trace "Some blocks failed to update expiry", len = res.failure.len + return failure("Some blocks failed to update expiry (" & $res.failure.len & " )") if not blocksCb.isNil and err =? (await blocksCb(blocks)).errorOption: trace "Unable to process blocks", err = err.msg @@ -546,7 +665,11 @@ proc onStore( trace "Unable to create indexing strategy from protected manifest", err = err.msg return failure(err) - without blksIter =? indexer.getIndicies(slotIdx).catch, err: + if slotIdx > int.high.uint64: + error "Cannot cast slot index to int", slotIndex = slotIdx + return + + without blksIter =? indexer.getIndicies(slotIdx.int).catch, err: trace "Unable to get indicies from strategy", err = err.msg return failure(err) @@ -556,13 +679,13 @@ proc onStore( trace "Unable to fetch blocks", err = err.msg return failure(err) - without slotRoot =? (await builder.buildSlot(slotIdx.Natural)), err: + without slotRoot =? (await builder.buildSlot(slotIdx.int)), err: trace "Unable to build slot", err = err.msg return failure(err) trace "Slot successfully retrieved and reconstructed" - if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx.int]: + if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx]: trace "Slot root mismatch", manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid() return failure(newException(CodexError, "Slot root mismatch")) @@ -578,8 +701,8 @@ proc onProve( ## let - cidStr = slot.request.content.cid - slotIdx = slot.slotIndex.truncate(Natural) + cidStr = $slot.request.content.cid + slotIdx = slot.slotIndex logScope: cid = cidStr @@ -600,7 +723,8 @@ proc onProve( return failure(err) when defined(verify_circuit): - without (inputs, proof) =? await prover.prove(slotIdx, manifest, challenge), err: + without (inputs, proof) =? await prover.prove(slotIdx.int, manifest, challenge), + err: error "Unable to generate proof", err = err.msg return failure(err) @@ -614,7 +738,7 @@ proc onProve( trace "Proof verified successfully" else: - without (_, proof) =? await prover.prove(slotIdx, manifest, challenge), err: + without (_, proof) =? await prover.prove(slotIdx.int, manifest, challenge), err: error "Unable to generate proof", err = err.msg return failure(err) @@ -627,16 +751,11 @@ proc onProve( failure "Prover not enabled" proc onExpiryUpdate( - self: CodexNodeRef, rootCid: string, expiry: SecondsSince1970 + self: CodexNodeRef, rootCid: Cid, expiry: SecondsSince1970 ): Future[?!void] {.async.} = - without cid =? Cid.init(rootCid): - trace "Unable to parse Cid", cid - let error = newException(CodexError, "Unable to parse Cid") - return failure(error) + return await self.updateExpiry(rootCid, expiry) - return await self.updateExpiry(cid, expiry) - -proc onClear(self: CodexNodeRef, request: StorageRequest, slotIndex: UInt256) = +proc onClear(self: CodexNodeRef, request: StorageRequest, slotIndex: uint64) = # TODO: remove data from local storage discard @@ -652,16 +771,19 @@ proc start*(self: CodexNodeRef) {.async.} = if hostContracts =? self.contracts.host: hostContracts.sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, + slot: uint64, + onBatch: BatchProc, + isRepairing: bool = false, ): Future[?!void] = - self.onStore(request, slot, onBatch) + self.onStore(request, slot, onBatch, isRepairing) hostContracts.sales.onExpiryUpdate = proc( - rootCid: string, expiry: SecondsSince1970 + rootCid: Cid, expiry: SecondsSince1970 ): Future[?!void] = self.onExpiryUpdate(rootCid, expiry) - hostContracts.sales.onClear = proc(request: StorageRequest, slotIndex: UInt256) = + hostContracts.sales.onClear = proc(request: StorageRequest, slotIndex: uint64) = # TODO: remove data from local storage self.onClear(request, slotIndex) @@ -703,6 +825,11 @@ proc start*(self: CodexNodeRef) {.async.} = proc stop*(self: CodexNodeRef) {.async.} = trace "Stopping node" + if not self.taskpool.isNil: + self.taskpool.shutdown() + + await self.trackedFutures.cancelTracked() + if not self.engine.isNil: await self.engine.stop() @@ -730,6 +857,7 @@ proc new*( networkStore: NetworkStore, engine: BlockExcEngine, discovery: Discovery, + taskpool: Taskpool, prover = Prover.none, contracts = Contracts.default, ): CodexNodeRef = @@ -742,5 +870,7 @@ proc new*( engine: engine, prover: prover, discovery: discovery, + taskPool: taskpool, contracts: contracts, + trackedFutures: TrackedFutures(), ) diff --git a/codex/periods.nim b/codex/periods.nim index 429931ee..cbb860e2 100644 --- a/codex/periods.nim +++ b/codex/periods.nim @@ -2,10 +2,10 @@ import pkg/stint type Periodicity* = object - seconds*: UInt256 + seconds*: uint64 - Period* = UInt256 - Timestamp* = UInt256 + Period* = uint64 + Timestamp* = uint64 func periodOf*(periodicity: Periodicity, timestamp: Timestamp): Period = timestamp div periodicity.seconds diff --git a/codex/purchasing.nim b/codex/purchasing.nim index 4ab84405..25a35137 100644 --- a/codex/purchasing.nim +++ b/codex/purchasing.nim @@ -14,7 +14,7 @@ export purchase type Purchasing* = ref object - market: Market + market*: Market clock: Clock purchases: Table[PurchaseId, Purchase] proofProbability*: UInt256 diff --git a/codex/purchasing/states/cancelled.nim b/codex/purchasing/states/cancelled.nim index 760dc81a..5aeeceac 100644 --- a/codex/purchasing/states/cancelled.nim +++ b/codex/purchasing/states/cancelled.nim @@ -1,25 +1,35 @@ import pkg/metrics import ../../logutils +import ../../utils/exceptions import ../statemachine -import ./errorhandling +import ./error declareCounter(codex_purchases_cancelled, "codex purchases cancelled") logScope: topics = "marketplace purchases cancelled" -type PurchaseCancelled* = ref object of ErrorHandlingState +type PurchaseCancelled* = ref object of PurchaseState method `$`*(state: PurchaseCancelled): string = "cancelled" -method run*(state: PurchaseCancelled, machine: Machine): Future[?State] {.async.} = +method run*( + state: PurchaseCancelled, machine: Machine +): Future[?State] {.async: (raises: []).} = codex_purchases_cancelled.inc() let purchase = Purchase(machine) - warn "Request cancelled, withdrawing remaining funds", requestId = purchase.requestId - await purchase.market.withdrawFunds(purchase.requestId) + try: + warn "Request cancelled, withdrawing remaining funds", + requestId = purchase.requestId + await purchase.market.withdrawFunds(purchase.requestId) - let error = newException(Timeout, "Purchase cancelled due to timeout") - purchase.future.fail(error) + let error = newException(Timeout, "Purchase cancelled due to timeout") + purchase.future.fail(error) + except CancelledError as e: + trace "PurchaseCancelled.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during PurchaseCancelled.run", error = e.msgDetail + return some State(PurchaseErrored(error: e)) diff --git a/codex/purchasing/states/error.nim b/codex/purchasing/states/error.nim index d7017b38..afa9f54f 100644 --- a/codex/purchasing/states/error.nim +++ b/codex/purchasing/states/error.nim @@ -14,7 +14,9 @@ type PurchaseErrored* = ref object of PurchaseState method `$`*(state: PurchaseErrored): string = "errored" -method run*(state: PurchaseErrored, machine: Machine): Future[?State] {.async.} = +method run*( + state: PurchaseErrored, machine: Machine +): Future[?State] {.async: (raises: []).} = codex_purchases_error.inc() let purchase = Purchase(machine) diff --git a/codex/purchasing/states/errorhandling.nim b/codex/purchasing/states/errorhandling.nim deleted file mode 100644 index 8ef91ba6..00000000 --- a/codex/purchasing/states/errorhandling.nim +++ /dev/null @@ -1,8 +0,0 @@ -import pkg/questionable -import ../statemachine -import ./error - -type ErrorHandlingState* = ref object of PurchaseState - -method onError*(state: ErrorHandlingState, error: ref CatchableError): ?State = - some State(PurchaseErrored(error: error)) diff --git a/codex/purchasing/states/failed.nim b/codex/purchasing/states/failed.nim index 5a126a73..1f6be74f 100644 --- a/codex/purchasing/states/failed.nim +++ b/codex/purchasing/states/failed.nim @@ -1,6 +1,7 @@ import pkg/metrics import ../statemachine import ../../logutils +import ../../utils/exceptions import ./error declareCounter(codex_purchases_failed, "codex purchases failed") @@ -10,11 +11,20 @@ type PurchaseFailed* = ref object of PurchaseState method `$`*(state: PurchaseFailed): string = "failed" -method run*(state: PurchaseFailed, machine: Machine): Future[?State] {.async.} = +method run*( + state: PurchaseFailed, machine: Machine +): Future[?State] {.async: (raises: []).} = codex_purchases_failed.inc() let purchase = Purchase(machine) - warn "Request failed, withdrawing remaining funds", requestId = purchase.requestId - await purchase.market.withdrawFunds(purchase.requestId) + + try: + warn "Request failed, withdrawing remaining funds", requestId = purchase.requestId + await purchase.market.withdrawFunds(purchase.requestId) + except CancelledError as e: + trace "PurchaseFailed.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during PurchaseFailed.run", error = e.msgDetail + return some State(PurchaseErrored(error: e)) let error = newException(PurchaseError, "Purchase failed") return some State(PurchaseErrored(error: error)) diff --git a/codex/purchasing/states/finished.nim b/codex/purchasing/states/finished.nim index 6cf5ffcc..bb7a726d 100644 --- a/codex/purchasing/states/finished.nim +++ b/codex/purchasing/states/finished.nim @@ -1,7 +1,9 @@ import pkg/metrics import ../statemachine +import ../../utils/exceptions import ../../logutils +import ./error declareCounter(codex_purchases_finished, "codex purchases finished") @@ -13,10 +15,19 @@ type PurchaseFinished* = ref object of PurchaseState method `$`*(state: PurchaseFinished): string = "finished" -method run*(state: PurchaseFinished, machine: Machine): Future[?State] {.async.} = +method run*( + state: PurchaseFinished, machine: Machine +): Future[?State] {.async: (raises: []).} = codex_purchases_finished.inc() let purchase = Purchase(machine) - info "Purchase finished, withdrawing remaining funds", requestId = purchase.requestId - await purchase.market.withdrawFunds(purchase.requestId) + try: + info "Purchase finished, withdrawing remaining funds", + requestId = purchase.requestId + await purchase.market.withdrawFunds(purchase.requestId) - purchase.future.complete() + purchase.future.complete() + except CancelledError as e: + trace "PurchaseFinished.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during PurchaseFinished.run", error = e.msgDetail + return some State(PurchaseErrored(error: e)) diff --git a/codex/purchasing/states/pending.nim b/codex/purchasing/states/pending.nim index 4852f266..1472a63e 100644 --- a/codex/purchasing/states/pending.nim +++ b/codex/purchasing/states/pending.nim @@ -1,18 +1,28 @@ import pkg/metrics +import ../../logutils +import ../../utils/exceptions import ../statemachine -import ./errorhandling import ./submitted +import ./error declareCounter(codex_purchases_pending, "codex purchases pending") -type PurchasePending* = ref object of ErrorHandlingState +type PurchasePending* = ref object of PurchaseState method `$`*(state: PurchasePending): string = "pending" -method run*(state: PurchasePending, machine: Machine): Future[?State] {.async.} = +method run*( + state: PurchasePending, machine: Machine +): Future[?State] {.async: (raises: []).} = codex_purchases_pending.inc() let purchase = Purchase(machine) - let request = !purchase.request - await purchase.market.requestStorage(request) - return some State(PurchaseSubmitted()) + try: + let request = !purchase.request + await purchase.market.requestStorage(request) + return some State(PurchaseSubmitted()) + except CancelledError as e: + trace "PurchasePending.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during PurchasePending.run", error = e.msgDetail + return some State(PurchaseErrored(error: e)) diff --git a/codex/purchasing/states/started.nim b/codex/purchasing/states/started.nim index 083e64c8..e93d7013 100644 --- a/codex/purchasing/states/started.nim +++ b/codex/purchasing/states/started.nim @@ -1,22 +1,25 @@ import pkg/metrics import ../../logutils +import ../../utils/exceptions import ../statemachine -import ./errorhandling import ./finished import ./failed +import ./error declareCounter(codex_purchases_started, "codex purchases started") logScope: topics = "marketplace purchases started" -type PurchaseStarted* = ref object of ErrorHandlingState +type PurchaseStarted* = ref object of PurchaseState method `$`*(state: PurchaseStarted): string = "started" -method run*(state: PurchaseStarted, machine: Machine): Future[?State] {.async.} = +method run*( + state: PurchaseStarted, machine: Machine +): Future[?State] {.async: (raises: []).} = codex_purchases_started.inc() let purchase = Purchase(machine) @@ -28,15 +31,24 @@ method run*(state: PurchaseStarted, machine: Machine): Future[?State] {.async.} proc callback(_: RequestId) = failed.complete() - let subscription = await market.subscribeRequestFailed(purchase.requestId, callback) + var ended: Future[void] + try: + let subscription = await market.subscribeRequestFailed(purchase.requestId, callback) - # Ensure that we're past the request end by waiting an additional second - let ended = clock.waitUntil((await market.getRequestEnd(purchase.requestId)) + 1) - let fut = await one(ended, failed) - await subscription.unsubscribe() - if fut.id == failed.id: + # Ensure that we're past the request end by waiting an additional second + ended = clock.waitUntil((await market.getRequestEnd(purchase.requestId)) + 1) + let fut = await one(ended, failed) + await subscription.unsubscribe() + if fut.id == failed.id: + ended.cancelSoon() + return some State(PurchaseFailed()) + else: + failed.cancelSoon() + return some State(PurchaseFinished()) + except CancelledError as e: ended.cancelSoon() - return some State(PurchaseFailed()) - else: failed.cancelSoon() - return some State(PurchaseFinished()) + trace "PurchaseStarted.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during PurchaseStarted.run", error = e.msgDetail + return some State(PurchaseErrored(error: e)) diff --git a/codex/purchasing/states/submitted.nim b/codex/purchasing/states/submitted.nim index 1cf65b1f..dd3669e4 100644 --- a/codex/purchasing/states/submitted.nim +++ b/codex/purchasing/states/submitted.nim @@ -1,22 +1,25 @@ import pkg/metrics import ../../logutils +import ../../utils/exceptions import ../statemachine -import ./errorhandling import ./started import ./cancelled +import ./error logScope: topics = "marketplace purchases submitted" declareCounter(codex_purchases_submitted, "codex purchases submitted") -type PurchaseSubmitted* = ref object of ErrorHandlingState +type PurchaseSubmitted* = ref object of PurchaseState method `$`*(state: PurchaseSubmitted): string = "submitted" -method run*(state: PurchaseSubmitted, machine: Machine): Future[?State] {.async.} = +method run*( + state: PurchaseSubmitted, machine: Machine +): Future[?State] {.async: (raises: []).} = codex_purchases_submitted.inc() let purchase = Purchase(machine) let request = !purchase.request @@ -44,5 +47,10 @@ method run*(state: PurchaseSubmitted, machine: Machine): Future[?State] {.async. await wait().withTimeout() except Timeout: return some State(PurchaseCancelled()) + except CancelledError as e: + trace "PurchaseSubmitted.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during PurchaseSubmitted.run", error = e.msgDetail + return some State(PurchaseErrored(error: e)) return some State(PurchaseStarted()) diff --git a/codex/purchasing/states/unknown.nim b/codex/purchasing/states/unknown.nim index 54e09942..8c2bff48 100644 --- a/codex/purchasing/states/unknown.nim +++ b/codex/purchasing/states/unknown.nim @@ -1,34 +1,44 @@ import pkg/metrics +import ../../utils/exceptions +import ../../logutils import ../statemachine -import ./errorhandling import ./submitted import ./started import ./cancelled import ./finished import ./failed +import ./error declareCounter(codex_purchases_unknown, "codex purchases unknown") -type PurchaseUnknown* = ref object of ErrorHandlingState +type PurchaseUnknown* = ref object of PurchaseState method `$`*(state: PurchaseUnknown): string = "unknown" -method run*(state: PurchaseUnknown, machine: Machine): Future[?State] {.async.} = - codex_purchases_unknown.inc() - let purchase = Purchase(machine) - if (request =? await purchase.market.getRequest(purchase.requestId)) and - (requestState =? await purchase.market.requestState(purchase.requestId)): - purchase.request = some request +method run*( + state: PurchaseUnknown, machine: Machine +): Future[?State] {.async: (raises: []).} = + try: + codex_purchases_unknown.inc() + let purchase = Purchase(machine) + if (request =? await purchase.market.getRequest(purchase.requestId)) and + (requestState =? await purchase.market.requestState(purchase.requestId)): + purchase.request = some request - case requestState - of RequestState.New: - return some State(PurchaseSubmitted()) - of RequestState.Started: - return some State(PurchaseStarted()) - of RequestState.Cancelled: - return some State(PurchaseCancelled()) - of RequestState.Finished: - return some State(PurchaseFinished()) - of RequestState.Failed: - return some State(PurchaseFailed()) + case requestState + of RequestState.New: + return some State(PurchaseSubmitted()) + of RequestState.Started: + return some State(PurchaseStarted()) + of RequestState.Cancelled: + return some State(PurchaseCancelled()) + of RequestState.Finished: + return some State(PurchaseFinished()) + of RequestState.Failed: + return some State(PurchaseFailed()) + except CancelledError as e: + trace "PurchaseUnknown.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during PurchaseUnknown.run", error = e.msgDetail + return some State(PurchaseErrored(error: e)) diff --git a/codex/rest/api.nim b/codex/rest/api.nim index a64d26cf..ee493e03 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -13,8 +13,8 @@ push: {.upraises: [].} import std/sequtils -import mimetypes -import os +import std/mimetypes +import std/os import pkg/questionable import pkg/questionable/results @@ -65,9 +65,15 @@ proc formatManifestBlocks(node: CodexNodeRef): Future[JsonNode] {.async.} = return %RestContentList.init(content) +proc isPending(resp: HttpResponseRef): bool = + ## Checks that an HttpResponseRef object is still pending; i.e., + ## that no body has yet been sent. This helps us guard against calling + ## sendBody(resp: HttpResponseRef, ...) twice, which is illegal. + return resp.getResponseState() == HttpResponseState.Empty + proc retrieveCid( node: CodexNodeRef, cid: Cid, local: bool = true, resp: HttpResponseRef -): Future[RestApiResponse] {.async.} = +): Future[void] {.async: (raises: [CancelledError, HttpWriteError]).} = ## Download a file from the node in a streaming ## manner ## @@ -79,16 +85,21 @@ proc retrieveCid( without stream =? (await node.retrieve(cid, local)), error: if error of BlockNotFoundError: resp.status = Http404 - return await resp.sendBody("") + await resp.sendBody( + "The requested CID could not be retrieved (" & error.msg & ")." + ) + return else: resp.status = Http500 - return await resp.sendBody(error.msg) + await resp.sendBody(error.msg) + return # It is ok to fetch again the manifest because it will hit the cache without manifest =? (await node.fetchManifest(cid)), err: error "Failed to fetch manifest", err = err.msg resp.status = Http404 - return await resp.sendBody(err.msg) + await resp.sendBody(err.msg) + return if manifest.mimetype.isSome: resp.setHeader("Content-Type", manifest.mimetype.get()) @@ -103,7 +114,14 @@ proc retrieveCid( else: resp.setHeader("Content-Disposition", "attachment") - await resp.prepareChunked() + # For erasure-coded datasets, we need to return the _original_ length; i.e., + # the length of the non-erasure-coded dataset, as that's what we will be + # returning to the client. + let contentLength = + if manifest.protected: manifest.originalDatasetSize else: manifest.datasetSize + resp.setHeader("Content-Length", $(contentLength.int)) + + await resp.prepare(HttpResponseStreamType.Plain) while not stream.atEof: var @@ -116,13 +134,16 @@ proc retrieveCid( bytes += buff.len - await resp.sendChunk(addr buff[0], buff.len) + await resp.send(addr buff[0], buff.len) await resp.finish() codex_api_downloads.inc() + except CancelledError as exc: + raise exc except CatchableError as exc: - warn "Excepting streaming blocks", exc = exc.msg + warn "Error streaming blocks", exc = exc.msg resp.status = Http500 - return await resp.sendBody("") + if resp.isPending(): + await resp.sendBody(exc.msg) finally: info "Sent bytes", cid = cid, bytes if not stream.isNil: @@ -238,6 +259,15 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute let json = await formatManifestBlocks(node) return RestApiResponse.response($json, contentType = "application/json") + router.api(MethodOptions, "/api/codex/v1/data/{cid}") do( + cid: Cid, resp: HttpResponseRef + ) -> RestApiResponse: + if corsOrigin =? allowedOrigin: + resp.setCorsHeaders("GET,DELETE", corsOrigin) + + resp.status = Http204 + await resp.sendBody("") + router.api(MethodGet, "/api/codex/v1/data/{cid}") do( cid: Cid, resp: HttpResponseRef ) -> RestApiResponse: @@ -254,6 +284,27 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute await node.retrieveCid(cid.get(), local = true, resp = resp) + router.api(MethodDelete, "/api/codex/v1/data/{cid}") do( + cid: Cid, resp: HttpResponseRef + ) -> RestApiResponse: + ## Deletes either a single block or an entire dataset + ## from the local node. Does nothing and returns 200 + ## if the dataset is not locally available. + ## + var headers = buildCorsHeaders("DELETE", allowedOrigin) + + if cid.isErr: + return RestApiResponse.error(Http400, $cid.error(), headers = headers) + + if err =? (await node.delete(cid.get())).errorOption: + return RestApiResponse.error(Http500, err.msg, headers = headers) + + if corsOrigin =? allowedOrigin: + resp.setCorsHeaders("DELETE", corsOrigin) + + resp.status = Http204 + await resp.sendBody("") + router.api(MethodPost, "/api/codex/v1/data/{cid}/network") do( cid: Cid, resp: HttpResponseRef ) -> RestApiResponse: @@ -269,15 +320,8 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute error "Failed to fetch manifest", err = err.msg return RestApiResponse.error(Http404, err.msg, headers = headers) - proc fetchDatasetAsync(): Future[void] {.async.} = - try: - if err =? (await node.fetchBatched(manifest)).errorOption: - error "Unable to fetch dataset", cid = cid.get(), err = err.msg - except CatchableError as exc: - error "CatchableError when fetching dataset", cid = cid.get(), exc = exc.msg - discard - - asyncSpawn fetchDatasetAsync() + # Start fetching the dataset in the background + node.fetchDatasetAsyncTask(manifest) let json = %formatManifest(cid.get(), manifest) return RestApiResponse.response($json, contentType = "application/json") @@ -298,6 +342,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute resp.setCorsHeaders("GET", corsOrigin) resp.setHeader("Access-Control-Headers", "X-Requested-With") + resp.setHeader("Access-Control-Expose-Headers", "Content-Disposition") await node.retrieveCid(cid.get(), local = false, resp = resp) router.api(MethodGet, "/api/codex/v1/data/{cid}/network/manifest") do( @@ -430,19 +475,28 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = if restAv.totalSize == 0: return RestApiResponse.error( - Http400, "Total size must be larger then zero", headers = headers + Http422, "Total size must be larger then zero", headers = headers ) - if not reservations.hasAvailable(restAv.totalSize.truncate(uint)): + if not reservations.hasAvailable(restAv.totalSize): return RestApiResponse.error(Http422, "Not enough storage quota", headers = headers) without availability =? ( await reservations.createAvailability( - restAv.totalSize, restAv.duration, restAv.minPricePerBytePerSecond, + restAv.totalSize, + restAv.duration, + restAv.minPricePerBytePerSecond, restAv.totalCollateral, + enabled = restAv.enabled |? true, + until = restAv.until |? 0, ) ), error: + if error of CancelledError: + raise error + if error of UntilOutOfBoundsError: + return RestApiResponse.error(Http422, error.msg) + return RestApiResponse.error(Http500, error.msg, headers = headers) return RestApiResponse.response( @@ -479,6 +533,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = ## tokens) to be matched against the request's pricePerBytePerSecond ## totalCollateral - total collateral (in amount of ## tokens) that can be distributed among matching requests + try: without contracts =? node.contracts.host: return RestApiResponse.error(Http503, "Persistence is not enabled") @@ -503,17 +558,23 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = return RestApiResponse.error(Http500, error.msg) if isSome restAv.freeSize: - return RestApiResponse.error(Http400, "Updating freeSize is not allowed") + return RestApiResponse.error(Http422, "Updating freeSize is not allowed") if size =? restAv.totalSize: + if size == 0: + return RestApiResponse.error(Http422, "Total size must be larger then zero") + # we don't allow lowering the totalSize bellow currently utilized size if size < (availability.totalSize - availability.freeSize): return RestApiResponse.error( - Http400, + Http422, "New totalSize must be larger then current totalSize - freeSize, which is currently: " & $(availability.totalSize - availability.freeSize), ) + if not reservations.hasAvailable(size): + return RestApiResponse.error(Http422, "Not enough storage quota") + availability.freeSize += size - availability.totalSize availability.totalSize = size @@ -526,10 +587,21 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = if totalCollateral =? restAv.totalCollateral: availability.totalCollateral = totalCollateral - if err =? (await reservations.update(availability)).errorOption: - return RestApiResponse.error(Http500, err.msg) + if until =? restAv.until: + availability.until = until - return RestApiResponse.response(Http200) + if enabled =? restAv.enabled: + availability.enabled = enabled + + if err =? (await reservations.update(availability)).errorOption: + if err of CancelledError: + raise err + if err of UntilOutOfBoundsError: + return RestApiResponse.error(Http422, err.msg) + else: + return RestApiResponse.error(Http500, err.msg) + + return RestApiResponse.response(Http204) except CatchableError as exc: trace "Excepting processing request", exc = exc.msg return RestApiResponse.error(Http500) @@ -607,18 +679,52 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) = without params =? StorageRequestParams.fromJson(body), error: return RestApiResponse.error(Http400, error.msg, headers = headers) + let expiry = params.expiry + + if expiry <= 0 or expiry >= params.duration: + return RestApiResponse.error( + Http422, + "Expiry must be greater than zero and less than the request's duration", + headers = headers, + ) + + if params.proofProbability <= 0: + return RestApiResponse.error( + Http422, "Proof probability must be greater than zero", headers = headers + ) + + if params.collateralPerByte <= 0: + return RestApiResponse.error( + Http422, "Collateral per byte must be greater than zero", headers = headers + ) + + if params.pricePerBytePerSecond <= 0: + return RestApiResponse.error( + Http422, + "Price per byte per second must be greater than zero", + headers = headers, + ) + + let requestDurationLimit = await contracts.purchasing.market.requestDurationLimit + if params.duration > requestDurationLimit: + return RestApiResponse.error( + Http422, + "Duration exceeds limit of " & $requestDurationLimit & " seconds", + headers = headers, + ) + let nodes = params.nodes |? 3 let tolerance = params.tolerance |? 1 if tolerance == 0: return RestApiResponse.error( - Http400, "Tolerance needs to be bigger then zero", headers = headers + Http422, "Tolerance needs to be bigger then zero", headers = headers ) # prevent underflow if tolerance > nodes: return RestApiResponse.error( - Http400, + Http422, "Invalid parameters: `tolerance` cannot be greater than `nodes`", headers = headers, ) @@ -629,21 +735,11 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) = # ensure leopard constrainst of 1 < K ≥ M if ecK <= 1 or ecK < ecM: return RestApiResponse.error( - Http400, + Http422, "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`", headers = headers, ) - without expiry =? params.expiry: - return RestApiResponse.error(Http400, "Expiry required", headers = headers) - - if expiry <= 0 or expiry >= params.duration: - return RestApiResponse.error( - Http400, - "Expiry needs value bigger then zero and smaller then the request's duration", - headers = headers, - ) - without purchaseId =? await node.requestStorage( cid, params.duration, params.proofProbability, nodes, tolerance, @@ -651,7 +747,7 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) = ), error: if error of InsufficientBlocksError: return RestApiResponse.error( - Http400, + Http422, "Dataset too small for erasure parameters, need at least " & $(ref InsufficientBlocksError)(error).minSize.int & " bytes", headers = headers, diff --git a/codex/rest/coders.nim b/codex/rest/coders.nim index 1c997ccf..319ce3d6 100644 --- a/codex/rest/coders.nim +++ b/codex/rest/coders.nim @@ -14,7 +14,7 @@ import pkg/chronos import pkg/libp2p import pkg/stew/base10 import pkg/stew/byteutils -import pkg/stew/results +import pkg/results import pkg/stint import ../sales diff --git a/codex/rest/json.nim b/codex/rest/json.nim index 9bc7664e..1b9459c1 100644 --- a/codex/rest/json.nim +++ b/codex/rest/json.nim @@ -13,11 +13,11 @@ export json type StorageRequestParams* = object - duration* {.serialize.}: UInt256 + duration* {.serialize.}: uint64 proofProbability* {.serialize.}: UInt256 pricePerBytePerSecond* {.serialize.}: UInt256 collateralPerByte* {.serialize.}: UInt256 - expiry* {.serialize.}: ?UInt256 + expiry* {.serialize.}: uint64 nodes* {.serialize.}: ?uint tolerance* {.serialize.}: ?uint @@ -28,16 +28,18 @@ type error* {.serialize.}: ?string RestAvailability* = object - totalSize* {.serialize.}: UInt256 - duration* {.serialize.}: UInt256 + totalSize* {.serialize.}: uint64 + duration* {.serialize.}: uint64 minPricePerBytePerSecond* {.serialize.}: UInt256 totalCollateral* {.serialize.}: UInt256 - freeSize* {.serialize.}: ?UInt256 + freeSize* {.serialize.}: ?uint64 + enabled* {.serialize.}: ?bool + until* {.serialize.}: ?SecondsSince1970 RestSalesAgent* = object state* {.serialize.}: string requestId* {.serialize.}: RequestId - slotIndex* {.serialize.}: UInt256 + slotIndex* {.serialize.}: uint64 request* {.serialize.}: ?StorageRequest reservation* {.serialize.}: ?Reservation diff --git a/codex/rng.nim b/codex/rng.nim index 9d82156e..866d65f8 100644 --- a/codex/rng.nim +++ b/codex/rng.nim @@ -55,6 +55,15 @@ proc sample*[T]( break +proc sample*[T]( + rng: Rng, sample: openArray[T], limit: int +): seq[T] {.raises: [Defect, RngSampleError].} = + if limit > sample.len: + raise newException(RngSampleError, "Limit cannot be larger than sample!") + + for _ in 0 ..< min(sample.len, limit): + result.add(rng.sample(sample, result)) + proc shuffle*[T](rng: Rng, a: var openArray[T]) = for i in countdown(a.high, 1): let j = rng.rand(i) diff --git a/codex/sales.nim b/codex/sales.nim index 4bf2d13c..37e2c06a 100644 --- a/codex/sales.nim +++ b/codex/sales.nim @@ -113,7 +113,6 @@ proc remove(sales: Sales, agent: SalesAgent) {.async.} = proc cleanUp( sales: Sales, agent: SalesAgent, - returnBytes: bool, reprocessSlot: bool, returnedCollateral: ?UInt256, processing: Future[void], @@ -132,7 +131,7 @@ proc cleanUp( # if reservation for the SalesAgent was not created, then it means # that the cleanUp was called before the sales process really started, so # there are not really any bytes to be returned - if returnBytes and request =? data.request and reservation =? data.reservation: + if request =? data.request and reservation =? data.reservation: if returnErr =? ( await sales.context.reservations.returnBytesToAvailability( reservation.availabilityId, reservation.id, request.ask.slotSize @@ -150,20 +149,35 @@ proc cleanUp( ).errorOption: error "failure deleting reservation", error = deleteErr.msg + if data.slotIndex > uint16.high.uint64: + error "Cannot cast slot index to uint16", slotIndex = data.slotIndex + return + # Re-add items back into the queue to prevent small availabilities from # draining the queue. Seen items will be ordered last. if reprocessSlot and request =? data.request: - let queue = sales.context.slotQueue - var seenItem = SlotQueueItem.init( - data.requestId, - data.slotIndex.truncate(uint16), - data.ask, - request.expiry, - seen = true, - ) - trace "pushing ignored item to queue, marked as seen" - if err =? queue.push(seenItem).errorOption: - error "failed to readd slot to queue", errorType = $(type err), error = err.msg + try: + without collateral =? + await sales.context.market.slotCollateral(data.requestId, data.slotIndex), err: + error "Failed to re-add item back to the slot queue: unable to calculate collateral", + error = err.msg + return + + let queue = sales.context.slotQueue + var seenItem = SlotQueueItem.init( + data.requestId, + data.slotIndex.uint16, + data.ask, + request.expiry, + seen = true, + collateral = collateral, + ) + trace "pushing ignored item to queue, marked as seen" + if err =? queue.push(seenItem).errorOption: + error "failed to readd slot to queue", errorType = $(type err), error = err.msg + except MarketError as e: + error "Failed to re-add item back to the slot queue.", error = e.msg + return await sales.remove(agent) @@ -172,7 +186,7 @@ proc cleanUp( processing.complete() proc filled( - sales: Sales, request: StorageRequest, slotIndex: UInt256, processing: Future[void] + sales: Sales, request: StorageRequest, slotIndex: uint64, processing: Future[void] ) = if onSale =? sales.context.onSale: onSale(request, slotIndex) @@ -184,16 +198,15 @@ proc filled( proc processSlot(sales: Sales, item: SlotQueueItem, done: Future[void]) = debug "Processing slot from queue", requestId = item.requestId, slot = item.slotIndex - let agent = newSalesAgent( - sales.context, item.requestId, item.slotIndex.u256, none StorageRequest - ) + let agent = + newSalesAgent(sales.context, item.requestId, item.slotIndex, none StorageRequest) agent.onCleanUp = proc( - returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none + reprocessSlot = false, returnedCollateral = UInt256.none ) {.async.} = - await sales.cleanUp(agent, returnBytes, reprocessSlot, returnedCollateral, done) + await sales.cleanUp(agent, reprocessSlot, returnedCollateral, done) - agent.onFilled = some proc(request: StorageRequest, slotIndex: UInt256) = + agent.onFilled = some proc(request: StorageRequest, slotIndex: uint64) = sales.filled(request, slotIndex, done) agent.start(SalePreparing()) @@ -257,12 +270,12 @@ proc load*(sales: Sales) {.async.} = newSalesAgent(sales.context, slot.request.id, slot.slotIndex, some slot.request) agent.onCleanUp = proc( - returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none + reprocessSlot = false, returnedCollateral = UInt256.none ) {.async.} = # since workers are not being dispatched, this future has not been created # by a worker. Create a dummy one here so we can call sales.cleanUp let done: Future[void] = nil - await sales.cleanUp(agent, returnBytes, reprocessSlot, returnedCollateral, done) + await sales.cleanUp(agent, reprocessSlot, returnedCollateral, done) # There is no need to assign agent.onFilled as slots loaded from `mySlots` # are inherently already filled and so assigning agent.onFilled would be @@ -271,7 +284,9 @@ proc load*(sales: Sales) {.async.} = agent.start(SaleUnknown()) sales.agents.add agent -proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} = +proc OnAvailabilitySaved( + sales: Sales, availability: Availability +) {.async: (raises: []).} = ## When availabilities are modified or added, the queue should be unpaused if ## it was paused and any slots in the queue should have their `seen` flag ## cleared. @@ -283,8 +298,8 @@ proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} = queue.unpause() proc onStorageRequested( - sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: UInt256 -) = + sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: uint64 +) {.raises: [].} = logScope: topics = "marketplace sales onStorageRequested" requestId @@ -295,7 +310,14 @@ proc onStorageRequested( trace "storage requested, adding slots to queue" - without items =? SlotQueueItem.init(requestId, ask, expiry).catch, err: + let market = sales.context.market + + without collateral =? market.slotCollateral(ask.collateralPerSlot, SlotState.Free), + err: + error "Request failure, unable to calculate collateral", error = err.msg + return + + without items =? SlotQueueItem.init(requestId, ask, expiry, collateral).catch, err: if err of SlotsOutOfRangeError: warn "Too many slots, cannot add to queue" else: @@ -312,7 +334,7 @@ proc onStorageRequested( else: warn "Error adding request to SlotQueue", error = err.msg -proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: UInt256) = +proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: uint64) = logScope: topics = "marketplace sales onSlotFreed" requestId @@ -325,35 +347,54 @@ proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: UInt256) = let market = context.market let queue = context.slotQueue - # first attempt to populate request using existing slot metadata in queue - without var found =? queue.populateItem(requestId, slotIndex.truncate(uint16)): - trace "no existing request metadata, getting request info from contract" - # if there's no existing slot for that request, retrieve the request - # from the contract. - try: - without request =? await market.getRequest(requestId): - error "unknown request in contract" - return + try: + without request =? (await market.getRequest(requestId)), err: + error "unknown request in contract", error = err.msgDetail + return - found = SlotQueueItem.init(request, slotIndex.truncate(uint16)) - except CancelledError: - discard # do not propagate as addSlotToQueue was asyncSpawned - except CatchableError as e: - error "failed to get request from contract and add slots to queue", - error = e.msgDetail + # Take the repairing state into consideration to calculate the collateral. + # This is particularly needed because it will affect the priority in the queue + # and we want to give the user the ability to tweak the parameters. + # Adding the repairing state directly in the queue priority calculation + # would not allow this flexibility. + without collateral =? + market.slotCollateral(request.ask.collateralPerSlot, SlotState.Repair), err: + error "Failed to add freed slot to queue: unable to calculate collateral", + error = err.msg + return - if err =? queue.push(found).errorOption: - error "failed to push slot items to queue", error = err.msgDetail + if slotIndex > uint16.high.uint64: + error "Cannot cast slot index to uint16, value = ", slotIndex + return + without slotQueueItem =? + SlotQueueItem.init(request, slotIndex.uint16, collateral = collateral).catch, + err: + warn "Too many slots, cannot add to queue", error = err.msgDetail + return + + if err =? queue.push(slotQueueItem).errorOption: + if err of SlotQueueItemExistsError: + error "Failed to push item to queue because it already exists", + error = err.msgDetail + elif err of QueueNotRunningError: + warn "Failed to push item to queue because queue is not running", + error = err.msgDetail + except CancelledError as e: + trace "sales.addSlotToQueue was cancelled" + + # We could get rid of this by adding the storage ask in the SlotFreed event, + # so we would not need to call getRequest to get the collateralPerSlot. let fut = addSlotToQueue() sales.trackedFutures.track(fut) - asyncSpawn fut proc subscribeRequested(sales: Sales) {.async.} = let context = sales.context let market = context.market - proc onStorageRequested(requestId: RequestId, ask: StorageAsk, expiry: UInt256) = + proc onStorageRequested( + requestId: RequestId, ask: StorageAsk, expiry: uint64 + ) {.raises: [].} = sales.onStorageRequested(requestId, ask, expiry) try: @@ -426,9 +467,13 @@ proc subscribeSlotFilled(sales: Sales) {.async.} = let market = context.market let queue = context.slotQueue - proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) = + proc onSlotFilled(requestId: RequestId, slotIndex: uint64) = + if slotIndex > uint16.high.uint64: + error "Cannot cast slot index to uint16, value = ", slotIndex + return + trace "slot filled, removing from slot queue", requestId, slotIndex - queue.delete(requestId, slotIndex.truncate(uint16)) + queue.delete(requestId, slotIndex.uint16) for agent in sales.agents: agent.onSlotFilled(requestId, slotIndex) @@ -445,7 +490,7 @@ proc subscribeSlotFreed(sales: Sales) {.async.} = let context = sales.context let market = context.market - proc onSlotFreed(requestId: RequestId, slotIndex: UInt256) = + proc onSlotFreed(requestId: RequestId, slotIndex: uint64) = sales.onSlotFreed(requestId, slotIndex) try: @@ -461,9 +506,13 @@ proc subscribeSlotReservationsFull(sales: Sales) {.async.} = let market = context.market let queue = context.slotQueue - proc onSlotReservationsFull(requestId: RequestId, slotIndex: UInt256) = + proc onSlotReservationsFull(requestId: RequestId, slotIndex: uint64) = + if slotIndex > uint16.high.uint64: + error "Cannot cast slot index to uint16, value = ", slotIndex + return + trace "reservations for slot full, removing from slot queue", requestId, slotIndex - queue.delete(requestId, slotIndex.truncate(uint16)) + queue.delete(requestId, slotIndex.uint16) try: let sub = await market.subscribeSlotReservationsFull(onSlotReservationsFull) @@ -477,16 +526,19 @@ proc startSlotQueue(sales: Sales) = let slotQueue = sales.context.slotQueue let reservations = sales.context.reservations - slotQueue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = + slotQueue.onProcessSlot = proc( + item: SlotQueueItem, done: Future[void] + ) {.async: (raises: []).} = trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex sales.processSlot(item, done) slotQueue.start() - proc onAvailabilityAdded(availability: Availability) {.async.} = - await sales.onAvailabilityAdded(availability) + proc OnAvailabilitySaved(availability: Availability) {.async: (raises: []).} = + if availability.enabled: + await sales.OnAvailabilitySaved(availability) - reservations.onAvailabilityAdded = onAvailabilityAdded + reservations.OnAvailabilitySaved = OnAvailabilitySaved proc subscribe(sales: Sales) {.async.} = await sales.subscribeRequested() diff --git a/codex/sales/reservations.nim b/codex/sales/reservations.nim index 4f48e057..b717cc1c 100644 --- a/codex/sales/reservations.nim +++ b/codex/sales/reservations.nim @@ -35,6 +35,7 @@ import std/sequtils import std/sugar import std/typetraits import std/sequtils +import std/times import pkg/chronos import pkg/datastore import pkg/nimcrypto @@ -64,30 +65,41 @@ type SomeStorableId = AvailabilityId | ReservationId Availability* = ref object id* {.serialize.}: AvailabilityId - totalSize* {.serialize.}: UInt256 - freeSize* {.serialize.}: UInt256 - duration* {.serialize.}: UInt256 + totalSize* {.serialize.}: uint64 + freeSize* {.serialize.}: uint64 + duration* {.serialize.}: uint64 minPricePerBytePerSecond* {.serialize.}: UInt256 totalCollateral {.serialize.}: UInt256 totalRemainingCollateral* {.serialize.}: UInt256 + # If set to false, the availability will not accept new slots. + # If enabled, it will not impact any existing slots that are already being hosted. + enabled* {.serialize.}: bool + # Specifies the latest timestamp after which the availability will no longer host any slots. + # If set to 0, there will be no restrictions. + until* {.serialize.}: SecondsSince1970 Reservation* = ref object id* {.serialize.}: ReservationId availabilityId* {.serialize.}: AvailabilityId - size* {.serialize.}: UInt256 + size* {.serialize.}: uint64 requestId* {.serialize.}: RequestId - slotIndex* {.serialize.}: UInt256 + slotIndex* {.serialize.}: uint64 + validUntil* {.serialize.}: SecondsSince1970 Reservations* = ref object of RootObj availabilityLock: AsyncLock # Lock for protecting assertions of availability's sizes when searching for matching availability repo: RepoStore - onAvailabilityAdded: ?OnAvailabilityAdded + OnAvailabilitySaved: ?OnAvailabilitySaved - GetNext* = proc(): Future[?seq[byte]] {.upraises: [], gcsafe, closure.} - IterDispose* = proc(): Future[?!void] {.gcsafe, closure.} - OnAvailabilityAdded* = - proc(availability: Availability): Future[void] {.upraises: [], gcsafe.} + GetNext* = proc(): Future[?seq[byte]] {. + upraises: [], gcsafe, async: (raises: [CancelledError]), closure + .} + IterDispose* = + proc(): Future[?!void] {.gcsafe, async: (raises: [CancelledError]), closure.} + OnAvailabilitySaved* = proc(availability: Availability): Future[void] {. + upraises: [], gcsafe, async: (raises: []) + .} StorableIter* = ref object finished*: bool next*: GetNext @@ -102,13 +114,20 @@ type SerializationError* = object of ReservationsError UpdateFailedError* = object of ReservationsError BytesOutOfBoundsError* = object of ReservationsError + UntilOutOfBoundsError* = object of ReservationsError const SalesKey = (CodexMetaKey / "sales").tryGet # TODO: move to sales module ReservationsKey = (SalesKey / "reservations").tryGet proc hash*(x: AvailabilityId): Hash {.borrow.} -proc all*(self: Reservations, T: type SomeStorableObject): Future[?!seq[T]] {.async.} +proc all*( + self: Reservations, T: type SomeStorableObject +): Future[?!seq[T]] {.async: (raises: [CancelledError]).} + +proc all*( + self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId +): Future[?!seq[T]] {.async: (raises: [CancelledError]).} template withLock(lock, body) = try: @@ -123,11 +142,13 @@ proc new*(T: type Reservations, repo: RepoStore): Reservations = proc init*( _: type Availability, - totalSize: UInt256, - freeSize: UInt256, - duration: UInt256, + totalSize: uint64, + freeSize: uint64, + duration: uint64, minPricePerBytePerSecond: UInt256, totalCollateral: UInt256, + enabled: bool, + until: SecondsSince1970, ): Availability = var id: array[32, byte] doAssert randomBytes(id) == 32 @@ -139,6 +160,8 @@ proc init*( minPricePerBytePerSecond: minPricePerBytePerSecond, totalCollateral: totalCollateral, totalRemainingCollateral: totalCollateral, + enabled: enabled, + until: until, ) func totalCollateral*(self: Availability): UInt256 {.inline.} = @@ -151,9 +174,10 @@ proc `totalCollateral=`*(self: Availability, value: UInt256) {.inline.} = proc init*( _: type Reservation, availabilityId: AvailabilityId, - size: UInt256, + size: uint64, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, + validUntil: SecondsSince1970, ): Reservation = var id: array[32, byte] doAssert randomBytes(id) == 32 @@ -163,6 +187,7 @@ proc init*( size: size, requestId: requestId, slotIndex: slotIndex, + validUntil: validUntil, ) func toArray(id: SomeStorableId): array[32, byte] = @@ -189,10 +214,10 @@ logutils.formatIt(LogFormat.textLines, SomeStorableId): logutils.formatIt(LogFormat.json, SomeStorableId): it.to0xHexLog -proc `onAvailabilityAdded=`*( - self: Reservations, onAvailabilityAdded: OnAvailabilityAdded +proc `OnAvailabilitySaved=`*( + self: Reservations, OnAvailabilitySaved: OnAvailabilitySaved ) = - self.onAvailabilityAdded = some onAvailabilityAdded + self.OnAvailabilitySaved = some OnAvailabilitySaved func key*(id: AvailabilityId): ?!Key = ## sales / reservations / @@ -206,7 +231,7 @@ func key*(availability: Availability): ?!Key = return availability.id.key func maxCollateralPerByte*(availability: Availability): UInt256 = - return availability.totalRemainingCollateral div availability.freeSize + return availability.totalRemainingCollateral div availability.freeSize.stuint(256) func key*(reservation: Reservation): ?!Key = return key(reservation.id, reservation.availabilityId) @@ -217,11 +242,19 @@ func available*(self: Reservations): uint = func hasAvailable*(self: Reservations, bytes: uint): bool = self.repo.available(bytes.NBytes) -proc exists*(self: Reservations, key: Key): Future[bool] {.async.} = +proc exists*( + self: Reservations, key: Key +): Future[bool] {.async: (raises: [CancelledError]).} = let exists = await self.repo.metaDs.ds.contains(key) return exists -proc getImpl(self: Reservations, key: Key): Future[?!seq[byte]] {.async.} = +iterator items(self: StorableIter): Future[?seq[byte]] = + while not self.finished: + yield self.next() + +proc getImpl( + self: Reservations, key: Key +): Future[?!seq[byte]] {.async: (raises: [CancelledError]).} = if not await self.exists(key): let err = newException(NotExistsError, "object with key " & $key & " does not exist") @@ -234,7 +267,7 @@ proc getImpl(self: Reservations, key: Key): Future[?!seq[byte]] {.async.} = proc get*( self: Reservations, key: Key, T: type SomeStorableObject -): Future[?!T] {.async.} = +): Future[?!T] {.async: (raises: [CancelledError]).} = without serialized =? await self.getImpl(key), error: return failure(error) @@ -243,7 +276,9 @@ proc get*( return success obj -proc updateImpl(self: Reservations, obj: SomeStorableObject): Future[?!void] {.async.} = +proc updateImpl( + self: Reservations, obj: SomeStorableObject +): Future[?!void] {.async: (raises: [CancelledError]).} = trace "updating " & $(obj.type), id = obj.id without key =? obj.key, error: @@ -256,10 +291,15 @@ proc updateImpl(self: Reservations, obj: SomeStorableObject): Future[?!void] {.a proc updateAvailability( self: Reservations, obj: Availability -): Future[?!void] {.async.} = +): Future[?!void] {.async: (raises: [CancelledError]).} = logScope: availabilityId = obj.id + if obj.until < 0: + let error = + newException(UntilOutOfBoundsError, "Cannot set until to a negative value") + return failure(error) + without key =? obj.key, error: return failure(error) @@ -268,66 +308,65 @@ proc updateAvailability( trace "Creating new Availability" let res = await self.updateImpl(obj) # inform subscribers that Availability has been added - if onAvailabilityAdded =? self.onAvailabilityAdded: - # when chronos v4 is implemented, and OnAvailabilityAdded is annotated - # with async:(raises:[]), we can remove this try/catch as we know, with - # certainty, that nothing will be raised - try: - await onAvailabilityAdded(obj) - except CancelledError as e: - raise e - except CatchableError as e: - # we don't have any insight into types of exceptions that - # `onAvailabilityAdded` can raise because it is caller-defined - warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg + if OnAvailabilitySaved =? self.OnAvailabilitySaved: + await OnAvailabilitySaved(obj) return res else: return failure(err) + if obj.until > 0: + without allReservations =? await self.all(Reservation, obj.id), error: + error.msg = "Error updating reservation: " & error.msg + return failure(error) + + let requestEnds = allReservations.mapIt(it.validUntil) + + if requestEnds.len > 0 and requestEnds.max > obj.until: + let error = newException( + UntilOutOfBoundsError, + "Until parameter must be greater or equal to the longest currently hosted slot", + ) + return failure(error) + # Sizing of the availability changed, we need to adjust the repo reservation accordingly if oldAvailability.totalSize != obj.totalSize: trace "totalSize changed, updating repo reservation" if oldAvailability.totalSize < obj.totalSize: # storage added if reserveErr =? ( - await self.repo.reserve( - (obj.totalSize - oldAvailability.totalSize).truncate(uint).NBytes - ) + await self.repo.reserve((obj.totalSize - oldAvailability.totalSize).NBytes) ).errorOption: return failure(reserveErr.toErr(ReserveFailedError)) elif oldAvailability.totalSize > obj.totalSize: # storage removed if reserveErr =? ( - await self.repo.release( - (oldAvailability.totalSize - obj.totalSize).truncate(uint).NBytes - ) + await self.repo.release((oldAvailability.totalSize - obj.totalSize).NBytes) ).errorOption: return failure(reserveErr.toErr(ReleaseFailedError)) let res = await self.updateImpl(obj) - if oldAvailability.freeSize < obj.freeSize: # availability added + if oldAvailability.freeSize < obj.freeSize or oldAvailability.duration < obj.duration or + oldAvailability.minPricePerBytePerSecond < obj.minPricePerBytePerSecond or + oldAvailability.totalCollateral < obj.totalCollateral: # availability updated # inform subscribers that Availability has been modified (with increased # size) - if onAvailabilityAdded =? self.onAvailabilityAdded: - # when chronos v4 is implemented, and OnAvailabilityAdded is annotated - # with async:(raises:[]), we can remove this try/catch as we know, with - # certainty, that nothing will be raised - try: - await onAvailabilityAdded(obj) - except CancelledError as e: - raise e - except CatchableError as e: - # we don't have any insight into types of exceptions that - # `onAvailabilityAdded` can raise because it is caller-defined - warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg - + if OnAvailabilitySaved =? self.OnAvailabilitySaved: + await OnAvailabilitySaved(obj) return res -proc update*(self: Reservations, obj: Reservation): Future[?!void] {.async.} = +proc update*( + self: Reservations, obj: Reservation +): Future[?!void] {.async: (raises: [CancelledError]).} = return await self.updateImpl(obj) -proc update*(self: Reservations, obj: Availability): Future[?!void] {.async.} = - withLock(self.availabilityLock): - return await self.updateAvailability(obj) +proc update*( + self: Reservations, obj: Availability +): Future[?!void] {.async: (raises: [CancelledError]).} = + try: + withLock(self.availabilityLock): + return await self.updateAvailability(obj) + except AsyncLockError as e: + error "Lock error when trying to update the availability", err = e.msg + return failure(e) proc delete(self: Reservations, key: Key): Future[?!void] {.async.} = trace "deleting object", key @@ -361,7 +400,7 @@ proc deleteReservation*( else: return failure(error) - if reservation.size > 0.u256: + if reservation.size > 0.uint64: trace "returning remaining reservation bytes to availability", size = reservation.size @@ -389,17 +428,25 @@ proc deleteReservation*( proc createAvailability*( self: Reservations, - size: UInt256, - duration: UInt256, + size: uint64, + duration: uint64, minPricePerBytePerSecond: UInt256, totalCollateral: UInt256, + enabled: bool, + until: SecondsSince1970, ): Future[?!Availability] {.async.} = trace "creating availability", - size, duration, minPricePerBytePerSecond, totalCollateral + size, duration, minPricePerBytePerSecond, totalCollateral, enabled, until - let availability = - Availability.init(size, size, duration, minPricePerBytePerSecond, totalCollateral) - let bytes = availability.freeSize.truncate(uint) + if until < 0: + let error = + newException(UntilOutOfBoundsError, "Cannot set until to a negative value") + return failure(error) + + let availability = Availability.init( + size, size, duration, minPricePerBytePerSecond, totalCollateral, enabled, until + ) + let bytes = availability.freeSize if reserveErr =? (await self.repo.reserve(bytes.NBytes)).errorOption: return failure(reserveErr.toErr(ReserveFailedError)) @@ -418,10 +465,11 @@ proc createAvailability*( method createReservation*( self: Reservations, availabilityId: AvailabilityId, - slotSize: UInt256, + slotSize: uint64, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, collateralPerByte: UInt256, + validUntil: SecondsSince1970, ): Future[?!Reservation] {.async, base.} = withLock(self.availabilityLock): without availabilityKey =? availabilityId.key, error: @@ -438,9 +486,11 @@ method createReservation*( ) return failure(error) - trace "Creating reservation", availabilityId, slotSize, requestId, slotIndex + trace "Creating reservation", + availabilityId, slotSize, requestId, slotIndex, validUntil = validUntil - let reservation = Reservation.init(availabilityId, slotSize, requestId, slotIndex) + let reservation = + Reservation.init(availabilityId, slotSize, requestId, slotIndex, validUntil) if createResErr =? (await self.update(reservation)).errorOption: return failure(createResErr) @@ -450,7 +500,7 @@ method createReservation*( availability.freeSize -= slotSize # adjust the remaining totalRemainingCollateral - availability.totalRemainingCollateral -= slotSize * collateralPerByte + availability.totalRemainingCollateral -= slotSize.u256 * collateralPerByte # update availability with reduced size trace "Updating availability with reduced size" @@ -475,7 +525,7 @@ proc returnBytesToAvailability*( self: Reservations, availabilityId: AvailabilityId, reservationId: ReservationId, - bytes: UInt256, + bytes: uint64, ): Future[?!void] {.async.} = logScope: reservationId @@ -502,8 +552,7 @@ proc returnBytesToAvailability*( # First lets see if we can re-reserve the bytes, if the Repo's quota # is depleted then we will fail-fast as there is nothing to be done atm. - if reserveErr =? - (await self.repo.reserve(bytesToBeReturned.truncate(uint).NBytes)).errorOption: + if reserveErr =? (await self.repo.reserve(bytesToBeReturned.NBytes)).errorOption: return failure(reserveErr.toErr(ReserveFailedError)) without availabilityKey =? availabilityId.key, error: @@ -517,8 +566,7 @@ proc returnBytesToAvailability*( # Update availability with returned size if updateErr =? (await self.updateAvailability(availability)).errorOption: trace "Rolling back returning bytes" - if rollbackErr =? - (await self.repo.release(bytesToBeReturned.truncate(uint).NBytes)).errorOption: + if rollbackErr =? (await self.repo.release(bytesToBeReturned.NBytes)).errorOption: rollbackErr.parent = updateErr return failure(rollbackErr) @@ -531,7 +579,7 @@ proc release*( reservationId: ReservationId, availabilityId: AvailabilityId, bytes: uint, -): Future[?!void] {.async.} = +): Future[?!void] {.async: (raises: [CancelledError]).} = logScope: topics = "release" bytes @@ -546,7 +594,7 @@ proc release*( without var reservation =? (await self.get(key, Reservation)), error: return failure(error) - if reservation.size < bytes.u256: + if reservation.size < bytes: let error = newException( BytesOutOfBoundsError, "trying to release an amount of bytes that is greater than the total size of the Reservation", @@ -556,7 +604,7 @@ proc release*( if releaseErr =? (await self.repo.release(bytes.NBytes)).errorOption: return failure(releaseErr.toErr(ReleaseFailedError)) - reservation.size -= bytes.u256 + reservation.size -= bytes # persist partially used Reservation with updated size if err =? (await self.update(reservation)).errorOption: @@ -569,13 +617,9 @@ proc release*( return success() -iterator items(self: StorableIter): Future[?seq[byte]] = - while not self.finished: - yield self.next() - proc storables( self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey -): Future[?!StorableIter] {.async.} = +): Future[?!StorableIter] {.async: (raises: [CancelledError]).} = var iter = StorableIter() let query = Query.init(queryKey) when T is Availability: @@ -593,7 +637,7 @@ proc storables( return failure(error) # /sales/reservations - proc next(): Future[?seq[byte]] {.async.} = + proc next(): Future[?seq[byte]] {.async: (raises: [CancelledError]).} = await idleAsync() iter.finished = results.finished if not results.finished and res =? (await results.next()) and res.data.len > 0 and @@ -602,7 +646,7 @@ proc storables( return none seq[byte] - proc dispose(): Future[?!void] {.async.} = + proc dispose(): Future[?!void] {.async: (raises: [CancelledError]).} = return await results.dispose() iter.next = next @@ -611,39 +655,49 @@ proc storables( proc allImpl( self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey -): Future[?!seq[T]] {.async.} = +): Future[?!seq[T]] {.async: (raises: [CancelledError]).} = var ret: seq[T] = @[] without storables =? (await self.storables(T, queryKey)), error: return failure(error) for storable in storables.items: - without bytes =? (await storable): - continue + try: + without bytes =? (await storable): + continue - without obj =? T.fromJson(bytes), error: - error "json deserialization error", - json = string.fromBytes(bytes), error = error.msg - continue + without obj =? T.fromJson(bytes), error: + error "json deserialization error", + json = string.fromBytes(bytes), error = error.msg + continue - ret.add obj + ret.add obj + except CancelledError as err: + raise err + except CatchableError as err: + error "Error when retrieving storable", error = err.msg + continue return success(ret) -proc all*(self: Reservations, T: type SomeStorableObject): Future[?!seq[T]] {.async.} = +proc all*( + self: Reservations, T: type SomeStorableObject +): Future[?!seq[T]] {.async: (raises: [CancelledError]).} = return await self.allImpl(T) proc all*( self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId -): Future[?!seq[T]] {.async.} = - without key =? (ReservationsKey / $availabilityId): +): Future[?!seq[T]] {.async: (raises: [CancelledError]).} = + without key =? key(availabilityId): return failure("no key") return await self.allImpl(T, key) proc findAvailability*( self: Reservations, - size, duration, pricePerBytePerSecond, collateralPerByte: UInt256, + size, duration: uint64, + pricePerBytePerSecond, collateralPerByte: UInt256, + validUntil: SecondsSince1970, ): Future[?Availability] {.async.} = without storables =? (await self.storables(Availability)), e: error "failed to get all storables", error = e.msg @@ -651,11 +705,14 @@ proc findAvailability*( for item in storables.items: if bytes =? (await item) and availability =? Availability.fromJson(bytes): - if size <= availability.freeSize and duration <= availability.duration and + if availability.enabled and size <= availability.freeSize and + duration <= availability.duration and collateralPerByte <= availability.maxCollateralPerByte and - pricePerBytePerSecond >= availability.minPricePerBytePerSecond: + pricePerBytePerSecond >= availability.minPricePerBytePerSecond and + (availability.until == 0 or availability.until >= validUntil): trace "availability matched", id = availability.id, + enabled = availability.enabled, size, availFreeSize = availability.freeSize, duration, @@ -663,7 +720,8 @@ proc findAvailability*( pricePerBytePerSecond, availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond, collateralPerByte, - availMaxCollateralPerByte = availability.maxCollateralPerByte + availMaxCollateralPerByte = availability.maxCollateralPerByte, + until = availability.until # TODO: As soon as we're on ARC-ORC, we can use destructors # to automatically dispose our iterators when they fall out of scope. @@ -675,6 +733,7 @@ proc findAvailability*( trace "availability did not match", id = availability.id, + enabled = availability.enabled, size, availFreeSize = availability.freeSize, duration, @@ -682,4 +741,5 @@ proc findAvailability*( pricePerBytePerSecond, availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond, collateralPerByte, - availMaxCollateralPerByte = availability.maxCollateralPerByte + availMaxCollateralPerByte = availability.maxCollateralPerByte, + until = availability.until diff --git a/codex/sales/salesagent.nim b/codex/sales/salesagent.nim index 8a8e5dc0..61f3a9d3 100644 --- a/codex/sales/salesagent.nim +++ b/codex/sales/salesagent.nim @@ -6,6 +6,7 @@ import pkg/upraises import ../contracts/requests import ../errors import ../logutils +import ../utils/exceptions import ./statemachine import ./salescontext import ./salesdata @@ -26,9 +27,9 @@ type onFilled*: ?OnFilled OnCleanUp* = proc( - returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none + reprocessSlot = false, returnedCollateral = UInt256.none ): Future[void] {.gcsafe, upraises: [].} - OnFilled* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].} + OnFilled* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, upraises: [].} SalesAgentError = object of CodexError AllSlotsFilledError* = object of SalesAgentError @@ -39,7 +40,7 @@ func `==`*(a, b: SalesAgent): bool = proc newSalesAgent*( context: SalesContext, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, request: ?StorageRequest, ): SalesAgent = var agent = SalesAgent.new() @@ -68,41 +69,47 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} = let data = agent.data let clock = agent.context.clock - proc onCancelled() {.async.} = + proc onCancelled() {.async: (raises: []).} = without request =? data.request: return - let market = agent.context.market - let expiry = await market.requestExpiresAt(data.requestId) + try: + let market = agent.context.market + let expiry = await market.requestExpiresAt(data.requestId) - while true: - let deadline = max(clock.now, expiry) + 1 - trace "Waiting for request to be cancelled", now = clock.now, expiry = deadline - await clock.waitUntil(deadline) + while true: + let deadline = max(clock.now, expiry) + 1 + trace "Waiting for request to be cancelled", now = clock.now, expiry = deadline + await clock.waitUntil(deadline) - without state =? await agent.retrieveRequestState(): - error "Uknown request", requestId = data.requestId - return + without state =? await agent.retrieveRequestState(): + error "Unknown request", requestId = data.requestId + return - case state - of New: - discard - of RequestState.Cancelled: - agent.schedule(cancelledEvent(request)) - break - of RequestState.Started, RequestState.Finished, RequestState.Failed: - break + case state + of New: + discard + of RequestState.Cancelled: + agent.schedule(cancelledEvent(request)) + break + of RequestState.Started, RequestState.Finished, RequestState.Failed: + break - debug "The request is not yet canceled, even though it should be. Waiting for some more time.", - currentState = state, now = clock.now + debug "The request is not yet canceled, even though it should be. Waiting for some more time.", + currentState = state, now = clock.now + except CancelledError: + trace "Waiting for expiry to lapse was cancelled", requestId = data.requestId + except CatchableError as e: + error "Error while waiting for expiry to lapse", error = e.msgDetail data.cancelled = onCancelled() method onFulfilled*( agent: SalesAgent, requestId: RequestId ) {.base, gcsafe, upraises: [].} = - if agent.data.requestId == requestId and not agent.data.cancelled.isNil: - agent.data.cancelled.cancelSoon() + let cancelled = agent.data.cancelled + if agent.data.requestId == requestId and not cancelled.isNil and not cancelled.finished: + cancelled.cancelSoon() method onFailed*( agent: SalesAgent, requestId: RequestId @@ -113,7 +120,7 @@ method onFailed*( agent.schedule(failedEvent(request)) method onSlotFilled*( - agent: SalesAgent, requestId: RequestId, slotIndex: UInt256 + agent: SalesAgent, requestId: RequestId, slotIndex: uint64 ) {.base, gcsafe, upraises: [].} = if agent.data.requestId == requestId and agent.data.slotIndex == slotIndex: agent.schedule(slotFilledEvent(requestId, slotIndex)) diff --git a/codex/sales/salescontext.nim b/codex/sales/salescontext.nim index bb0b5dc9..af940a4b 100644 --- a/codex/sales/salescontext.nim +++ b/codex/sales/salescontext.nim @@ -1,6 +1,7 @@ import pkg/questionable import pkg/questionable/results import pkg/upraises +import pkg/libp2p/cid import ../market import ../clock @@ -25,13 +26,13 @@ type BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, raises: [].} OnStore* = proc( - request: StorageRequest, slot: UInt256, blocksCb: BlocksCb + request: StorageRequest, slot: uint64, blocksCb: BlocksCb, isRepairing: bool ): Future[?!void] {.gcsafe, upraises: [].} OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {. gcsafe, upraises: [] .} - OnExpiryUpdate* = proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] {. + OnExpiryUpdate* = proc(rootCid: Cid, expiry: SecondsSince1970): Future[?!void] {. gcsafe, upraises: [] .} - OnClear* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].} - OnSale* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].} + OnClear* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, upraises: [].} + OnSale* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, upraises: [].} diff --git a/codex/sales/salesdata.nim b/codex/sales/salesdata.nim index 995c7a4b..de8eccb5 100644 --- a/codex/sales/salesdata.nim +++ b/codex/sales/salesdata.nim @@ -7,6 +7,6 @@ type SalesData* = ref object requestId*: RequestId ask*: StorageAsk request*: ?StorageRequest - slotIndex*: UInt256 + slotIndex*: uint64 cancelled*: Future[void] reservation*: ?Reservation diff --git a/codex/sales/slotqueue.nim b/codex/sales/slotqueue.nim index 332ec9e0..60700d44 100644 --- a/codex/sales/slotqueue.nim +++ b/codex/sales/slotqueue.nim @@ -3,8 +3,8 @@ import std/tables import pkg/chronos import pkg/questionable import pkg/questionable/results -import pkg/upraises import ../errors +import ../clock import ../logutils import ../rng import ../utils @@ -16,8 +16,9 @@ logScope: topics = "marketplace slotqueue" type - OnProcessSlot* = - proc(item: SlotQueueItem, done: Future[void]): Future[void] {.gcsafe, upraises: [].} + OnProcessSlot* = proc(item: SlotQueueItem, done: Future[void]): Future[void] {. + gcsafe, async: (raises: []) + .} # Non-ref obj copies value when assigned, preventing accidental modification # of values which could cause an incorrect order (eg @@ -25,16 +26,16 @@ type # but the heap invariant would no longer be honoured. When non-ref, the # compiler can ensure that statement will fail). SlotQueueWorker = object - doneProcessing*: Future[void] + doneProcessing*: Future[void].Raising([]) SlotQueueItem* = object requestId: RequestId slotIndex: uint16 - slotSize: UInt256 - duration: UInt256 + slotSize: uint64 + duration: uint64 pricePerBytePerSecond: UInt256 - collateralPerByte: UInt256 - expiry: UInt256 + collateral: UInt256 # Collateral computed + expiry: uint64 seen: bool # don't need to -1 to prevent overflow when adding 1 (to always allow push) @@ -75,9 +76,6 @@ proc profitability(item: SlotQueueItem): UInt256 = slotSize: item.slotSize, ).pricePerSlot -proc collateralPerSlot(item: SlotQueueItem): UInt256 = - StorageAsk(collateralPerByte: item.collateralPerByte, slotSize: item.slotSize).collateralPerSlot - proc `<`*(a, b: SlotQueueItem): bool = # for A to have a higher priority than B (in a min queue), A must be less than # B. @@ -94,8 +92,8 @@ proc `<`*(a, b: SlotQueueItem): bool = scoreA.addIf(a.profitability > b.profitability, 3) scoreB.addIf(a.profitability < b.profitability, 3) - scoreA.addIf(a.collateralPerSlot < b.collateralPerSlot, 2) - scoreB.addIf(a.collateralPerSlot > b.collateralPerSlot, 2) + scoreA.addIf(a.collateral < b.collateral, 2) + scoreB.addIf(a.collateral > b.collateral, 2) scoreA.addIf(a.expiry > b.expiry, 1) scoreB.addIf(a.expiry < b.expiry, 1) @@ -128,14 +126,25 @@ proc new*( # `newAsyncQueue` procedure proc init(_: type SlotQueueWorker): SlotQueueWorker = - SlotQueueWorker(doneProcessing: newFuture[void]("slotqueue.worker.processing")) + let workerFut = Future[void].Raising([]).init( + "slotqueue.worker.processing", {FutureFlag.OwnCancelSchedule} + ) + + workerFut.cancelCallback = proc(data: pointer) {.raises: [].} = + # this is equivalent to try: ... except CatchableError: ... + if not workerFut.finished: + workerFut.complete() + trace "Cancelling `SlotQueue` worker processing future" + + SlotQueueWorker(doneProcessing: workerFut) proc init*( _: type SlotQueueItem, requestId: RequestId, slotIndex: uint16, ask: StorageAsk, - expiry: UInt256, + expiry: uint64, + collateral: UInt256, seen = false, ): SlotQueueItem = SlotQueueItem( @@ -144,25 +153,32 @@ proc init*( slotSize: ask.slotSize, duration: ask.duration, pricePerBytePerSecond: ask.pricePerBytePerSecond, - collateralPerByte: ask.collateralPerByte, + collateral: collateral, expiry: expiry, seen: seen, ) proc init*( - _: type SlotQueueItem, request: StorageRequest, slotIndex: uint16 + _: type SlotQueueItem, + request: StorageRequest, + slotIndex: uint16, + collateral: UInt256, ): SlotQueueItem = - SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry) + SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry, collateral) proc init*( - _: type SlotQueueItem, requestId: RequestId, ask: StorageAsk, expiry: UInt256 -): seq[SlotQueueItem] = + _: type SlotQueueItem, + requestId: RequestId, + ask: StorageAsk, + expiry: uint64, + collateral: UInt256, +): seq[SlotQueueItem] {.raises: [SlotsOutOfRangeError].} = if not ask.slots.inRange: raise newException(SlotsOutOfRangeError, "Too many slots") var i = 0'u16 proc initSlotQueueItem(): SlotQueueItem = - let item = SlotQueueItem.init(requestId, i, ask, expiry) + let item = SlotQueueItem.init(requestId, i, ask, expiry, collateral) inc i return item @@ -170,8 +186,10 @@ proc init*( Rng.instance.shuffle(items) return items -proc init*(_: type SlotQueueItem, request: StorageRequest): seq[SlotQueueItem] = - return SlotQueueItem.init(request.id, request.ask, request.expiry) +proc init*( + _: type SlotQueueItem, request: StorageRequest, collateral: UInt256 +): seq[SlotQueueItem] = + return SlotQueueItem.init(request.id, request.ask, request.expiry, collateral) proc inRange*(val: SomeUnsignedInt): bool = val.uint16 in SlotQueueSize.low .. SlotQueueSize.high @@ -182,10 +200,10 @@ proc requestId*(self: SlotQueueItem): RequestId = proc slotIndex*(self: SlotQueueItem): uint16 = self.slotIndex -proc slotSize*(self: SlotQueueItem): UInt256 = +proc slotSize*(self: SlotQueueItem): uint64 = self.slotSize -proc duration*(self: SlotQueueItem): UInt256 = +proc duration*(self: SlotQueueItem): uint64 = self.duration proc pricePerBytePerSecond*(self: SlotQueueItem): UInt256 = @@ -233,25 +251,7 @@ proc unpause*(self: SlotQueue) = # set unpaused flag to true -- unblocks coroutines waiting on unpaused.wait() self.unpaused.fire() -proc populateItem*( - self: SlotQueue, requestId: RequestId, slotIndex: uint16 -): ?SlotQueueItem = - trace "populate item, items in queue", len = self.queue.len - for item in self.queue.items: - trace "populate item search", itemRequestId = item.requestId, requestId - if item.requestId == requestId: - return some SlotQueueItem( - requestId: requestId, - slotIndex: slotIndex, - slotSize: item.slotSize, - duration: item.duration, - pricePerBytePerSecond: item.pricePerBytePerSecond, - collateralPerByte: item.collateralPerByte, - expiry: item.expiry, - ) - return none SlotQueueItem - -proc push*(self: SlotQueue, item: SlotQueueItem): ?!void = +proc push*(self: SlotQueue, item: SlotQueueItem): ?!void {.raises: [].} = logScope: requestId = item.requestId slotIndex = item.slotIndex @@ -429,7 +429,6 @@ proc run(self: SlotQueue) {.async: (raises: []).} = let fut = self.dispatch(worker, item) self.trackedFutures.track(fut) - asyncSpawn fut await sleepAsync(1.millis) # poll except CancelledError: @@ -457,7 +456,6 @@ proc start*(self: SlotQueue) = let fut = self.run() self.trackedFutures.track(fut) - asyncSpawn fut proc stop*(self: SlotQueue) {.async.} = if not self.running: diff --git a/codex/sales/statemachine.nim b/codex/sales/statemachine.nim index 6d3c7101..ec770ece 100644 --- a/codex/sales/statemachine.nim +++ b/codex/sales/statemachine.nim @@ -25,7 +25,7 @@ method onFailed*( discard method onSlotFilled*( - state: SaleState, requestId: RequestId, slotIndex: UInt256 + state: SaleState, requestId: RequestId, slotIndex: uint64 ): ?State {.base, upraises: [].} = discard @@ -37,6 +37,6 @@ proc failedEvent*(request: StorageRequest): Event = return proc(state: State): ?State = SaleState(state).onFailed(request) -proc slotFilledEvent*(requestId: RequestId, slotIndex: UInt256): Event = +proc slotFilledEvent*(requestId: RequestId, slotIndex: uint64): Event = return proc(state: State): ?State = SaleState(state).onSlotFilled(requestId, slotIndex) diff --git a/codex/sales/states/cancelled.nim b/codex/sales/states/cancelled.nim index 3bb92a2c..f3c755a3 100644 --- a/codex/sales/states/cancelled.nim +++ b/codex/sales/states/cancelled.nim @@ -1,17 +1,28 @@ import ../../logutils +import ../../utils/exceptions import ../salesagent import ../statemachine -import ./errorhandling +import ./errored logScope: topics = "marketplace sales cancelled" -type SaleCancelled* = ref object of ErrorHandlingState +type SaleCancelled* = ref object of SaleState method `$`*(state: SaleCancelled): string = "SaleCancelled" -method run*(state: SaleCancelled, machine: Machine): Future[?State] {.async.} = +proc slotIsFilledByMe( + market: Market, requestId: RequestId, slotIndex: uint64 +): Future[bool] {.async: (raises: [CancelledError, MarketError]).} = + let host = await market.getHost(requestId, slotIndex) + let me = await market.getSigner() + + return host == me.some + +method run*( + state: SaleCancelled, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data let market = agent.context.market @@ -19,21 +30,33 @@ method run*(state: SaleCancelled, machine: Machine): Future[?State] {.async.} = without request =? data.request: raiseAssert "no sale request" - let slot = Slot(request: request, slotIndex: data.slotIndex) - debug "Collecting collateral and partial payout", - requestId = data.requestId, slotIndex = data.slotIndex - let currentCollateral = await market.currentCollateral(slot.id) - await market.freeSlot(slot.id) + try: + var returnedCollateral = UInt256.none - if onClear =? agent.context.onClear and request =? data.request: - onClear(request, data.slotIndex) + if await slotIsFilledByMe(market, data.requestId, data.slotIndex): + debug "Collecting collateral and partial payout", + requestId = data.requestId, slotIndex = data.slotIndex - if onCleanUp =? agent.onCleanUp: - await onCleanUp( - returnBytes = true, - reprocessSlot = false, - returnedCollateral = some currentCollateral, - ) + let slot = Slot(request: request, slotIndex: data.slotIndex) + let currentCollateral = await market.currentCollateral(slot.id) - warn "Sale cancelled due to timeout", - requestId = data.requestId, slotIndex = data.slotIndex + try: + await market.freeSlot(slot.id) + except SlotStateMismatchError as e: + warn "Failed to free slot because slot is already free", error = e.msg + + returnedCollateral = currentCollateral.some + + if onClear =? agent.context.onClear and request =? data.request: + onClear(request, data.slotIndex) + + if onCleanUp =? agent.onCleanUp: + await onCleanUp(reprocessSlot = false, returnedCollateral = returnedCollateral) + + warn "Sale cancelled due to timeout", + requestId = data.requestId, slotIndex = data.slotIndex + except CancelledError as e: + trace "SaleCancelled.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleCancelled.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/downloading.nim b/codex/sales/states/downloading.nim index f6ced6be..7cf304d3 100644 --- a/codex/sales/states/downloading.nim +++ b/codex/sales/states/downloading.nim @@ -4,16 +4,16 @@ import pkg/questionable/results import ../../blocktype as bt import ../../logutils import ../../market +import ../../utils/exceptions import ../salesagent import ../statemachine -import ./errorhandling import ./cancelled import ./failed import ./filled import ./initialproving import ./errored -type SaleDownloading* = ref object of ErrorHandlingState +type SaleDownloading* = ref object of SaleState logScope: topics = "marketplace sales downloading" @@ -28,11 +28,13 @@ method onFailed*(state: SaleDownloading, request: StorageRequest): ?State = return some State(SaleFailed()) method onSlotFilled*( - state: SaleDownloading, requestId: RequestId, slotIndex: UInt256 + state: SaleDownloading, requestId: RequestId, slotIndex: uint64 ): ?State = return some State(SaleFilled()) -method run*(state: SaleDownloading, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleDownloading, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data let context = agent.context @@ -64,9 +66,18 @@ method run*(state: SaleDownloading, machine: Machine): Future[?State] {.async.} trace "Releasing batch of bytes written to disk", bytes return await reservations.release(reservation.id, reservation.availabilityId, bytes) - trace "Starting download" - if err =? (await onStore(request, data.slotIndex, onBlocks)).errorOption: - return some State(SaleErrored(error: err, reprocessSlot: false)) + try: + let slotId = slotId(request.id, data.slotIndex) + let isRepairing = (await context.market.slotState(slotId)) == SlotState.Repair - trace "Download complete" - return some State(SaleInitialProving()) + trace "Starting download" + if err =? (await onStore(request, data.slotIndex, onBlocks, isRepairing)).errorOption: + return some State(SaleErrored(error: err, reprocessSlot: false)) + + trace "Download complete" + return some State(SaleInitialProving()) + except CancelledError as e: + trace "SaleDownloading.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleDownloading.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/errored.nim b/codex/sales/states/errored.nim index b85b7930..95848fd3 100644 --- a/codex/sales/states/errored.nim +++ b/codex/sales/states/errored.nim @@ -17,10 +17,9 @@ type SaleErrored* = ref object of SaleState method `$`*(state: SaleErrored): string = "SaleErrored" -method onError*(state: SaleState, err: ref CatchableError): ?State {.upraises: [].} = - error "error during SaleErrored run", error = err.msg - -method run*(state: SaleErrored, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleErrored, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data let context = agent.context @@ -30,8 +29,13 @@ method run*(state: SaleErrored, machine: Machine): Future[?State] {.async.} = requestId = data.requestId, slotIndex = data.slotIndex - if onClear =? context.onClear and request =? data.request: - onClear(request, data.slotIndex) + try: + if onClear =? context.onClear and request =? data.request: + onClear(request, data.slotIndex) - if onCleanUp =? agent.onCleanUp: - await onCleanUp(returnBytes = true, reprocessSlot = state.reprocessSlot) + if onCleanUp =? agent.onCleanUp: + await onCleanUp(reprocessSlot = state.reprocessSlot) + except CancelledError as e: + trace "SaleErrored.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleErrored.run", error = e.msgDetail diff --git a/codex/sales/states/errorhandling.nim b/codex/sales/states/errorhandling.nim deleted file mode 100644 index 2ee399ef..00000000 --- a/codex/sales/states/errorhandling.nim +++ /dev/null @@ -1,8 +0,0 @@ -import pkg/questionable -import ../statemachine -import ./errored - -type ErrorHandlingState* = ref object of SaleState - -method onError*(state: ErrorHandlingState, error: ref CatchableError): ?State = - some State(SaleErrored(error: error)) diff --git a/codex/sales/states/failed.nim b/codex/sales/states/failed.nim index 6103765c..f1490d20 100644 --- a/codex/sales/states/failed.nim +++ b/codex/sales/states/failed.nim @@ -1,30 +1,40 @@ import ../../logutils +import ../../utils/exceptions +import ../../utils/exceptions import ../salesagent import ../statemachine -import ./errorhandling import ./errored logScope: topics = "marketplace sales failed" type - SaleFailed* = ref object of ErrorHandlingState + SaleFailed* = ref object of SaleState SaleFailedError* = object of SaleError method `$`*(state: SaleFailed): string = "SaleFailed" -method run*(state: SaleFailed, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleFailed, machine: Machine +): Future[?State] {.async: (raises: []).} = let data = SalesAgent(machine).data let market = SalesAgent(machine).context.market without request =? data.request: raiseAssert "no sale request" - let slot = Slot(request: request, slotIndex: data.slotIndex) - debug "Removing slot from mySlots", - requestId = data.requestId, slotIndex = data.slotIndex - await market.freeSlot(slot.id) + try: + let slot = Slot(request: request, slotIndex: data.slotIndex) + debug "Removing slot from mySlots", + requestId = data.requestId, slotIndex = data.slotIndex - let error = newException(SaleFailedError, "Sale failed") - return some State(SaleErrored(error: error)) + await market.freeSlot(slot.id) + + let error = newException(SaleFailedError, "Sale failed") + return some State(SaleErrored(error: error)) + except CancelledError as e: + trace "SaleFailed.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleFailed.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/filled.nim b/codex/sales/states/filled.nim index 9e7d9906..b0fc65c9 100644 --- a/codex/sales/states/filled.nim +++ b/codex/sales/states/filled.nim @@ -3,9 +3,9 @@ import pkg/questionable/results import ../../conf import ../../logutils +import ../../utils/exceptions import ../statemachine import ../salesagent -import ./errorhandling import ./errored import ./cancelled import ./failed @@ -18,7 +18,7 @@ logScope: topics = "marketplace sales filled" type - SaleFilled* = ref object of ErrorHandlingState + SaleFilled* = ref object of SaleState HostMismatchError* = object of CatchableError method onCancelled*(state: SaleFilled, request: StorageRequest): ?State = @@ -30,40 +30,48 @@ method onFailed*(state: SaleFilled, request: StorageRequest): ?State = method `$`*(state: SaleFilled): string = "SaleFilled" -method run*(state: SaleFilled, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleFilled, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data let context = agent.context - let market = context.market - let host = await market.getHost(data.requestId, data.slotIndex) - let me = await market.getSigner() - if host == me.some: - info "Slot succesfully filled", - requestId = data.requestId, slotIndex = data.slotIndex + try: + let host = await market.getHost(data.requestId, data.slotIndex) + let me = await market.getSigner() - without request =? data.request: - raiseAssert "no sale request" + if host == me.some: + info "Slot succesfully filled", + requestId = data.requestId, slotIndex = data.slotIndex - if onFilled =? agent.onFilled: - onFilled(request, data.slotIndex) + without request =? data.request: + raiseAssert "no sale request" - without onExpiryUpdate =? context.onExpiryUpdate: - raiseAssert "onExpiryUpdate callback not set" + if onFilled =? agent.onFilled: + onFilled(request, data.slotIndex) - let requestEnd = await market.getRequestEnd(data.requestId) - if err =? (await onExpiryUpdate(request.content.cid, requestEnd)).errorOption: - return some State(SaleErrored(error: err)) + without onExpiryUpdate =? context.onExpiryUpdate: + raiseAssert "onExpiryUpdate callback not set" - when codex_enable_proof_failures: - if context.simulateProofFailures > 0: - info "Proving with failure rate", rate = context.simulateProofFailures - return some State( - SaleProvingSimulated(failEveryNProofs: context.simulateProofFailures) - ) + let requestEnd = await market.getRequestEnd(data.requestId) + if err =? (await onExpiryUpdate(request.content.cid, requestEnd)).errorOption: + return some State(SaleErrored(error: err)) - return some State(SaleProving()) - else: - let error = newException(HostMismatchError, "Slot filled by other host") - return some State(SaleErrored(error: error)) + when codex_enable_proof_failures: + if context.simulateProofFailures > 0: + info "Proving with failure rate", rate = context.simulateProofFailures + return some State( + SaleProvingSimulated(failEveryNProofs: context.simulateProofFailures) + ) + + return some State(SaleProving()) + else: + let error = newException(HostMismatchError, "Slot filled by other host") + return some State(SaleErrored(error: error)) + except CancelledError as e: + trace "SaleFilled.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleFilled.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/filling.nim b/codex/sales/states/filling.nim index 1934fc12..1b76150a 100644 --- a/codex/sales/states/filling.nim +++ b/codex/sales/states/filling.nim @@ -1,9 +1,9 @@ import pkg/stint import ../../logutils import ../../market +import ../../utils/exceptions import ../statemachine import ../salesagent -import ./errorhandling import ./filled import ./cancelled import ./failed @@ -13,7 +13,7 @@ import ./errored logScope: topics = "marketplace sales filling" -type SaleFilling* = ref object of ErrorHandlingState +type SaleFilling* = ref object of SaleState proof*: Groth16Proof method `$`*(state: SaleFilling): string = @@ -25,9 +25,12 @@ method onCancelled*(state: SaleFilling, request: StorageRequest): ?State = method onFailed*(state: SaleFilling, request: StorageRequest): ?State = return some State(SaleFailed()) -method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleFilling, machine: Machine +): Future[?State] {.async: (raises: []).} = let data = SalesAgent(machine).data let market = SalesAgent(machine).context.market + without (request =? data.request): raiseAssert "Request not set" @@ -35,28 +38,26 @@ method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} = requestId = data.requestId slotIndex = data.slotIndex - let slotState = await market.slotState(slotId(data.requestId, data.slotIndex)) - let requestedCollateral = request.ask.collateralPerSlot - var collateral: UInt256 - - if slotState == SlotState.Repair: - # When repairing the node gets "discount" on the collateral that it needs to - let repairRewardPercentage = (await market.repairRewardPercentage).u256 - collateral = - requestedCollateral - - ((requestedCollateral * repairRewardPercentage)).div(100.u256) - else: - collateral = requestedCollateral - - debug "Filling slot" try: - await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral) - except MarketError as e: - if e.msg.contains "Slot is not free": - debug "Slot is already filled, ignoring slot" - return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) - else: - return some State(SaleErrored(error: e)) - # other CatchableErrors are handled "automatically" by the ErrorHandlingState + without collateral =? await market.slotCollateral(data.requestId, data.slotIndex), + err: + error "Failure attempting to fill slot: unable to calculate collateral", + error = err.msg + return some State(SaleErrored(error: err)) - return some State(SaleFilled()) + debug "Filling slot" + try: + await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral) + except SlotStateMismatchError as e: + debug "Slot is already filled, ignoring slot" + return some State(SaleIgnored(reprocessSlot: false)) + except MarketError as e: + return some State(SaleErrored(error: e)) + # other CatchableErrors are handled "automatically" by the SaleState + + return some State(SaleFilled()) + except CancelledError as e: + trace "SaleFilling.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleFilling.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/finished.nim b/codex/sales/states/finished.nim index 151300d0..16e66d27 100644 --- a/codex/sales/states/finished.nim +++ b/codex/sales/states/finished.nim @@ -1,16 +1,17 @@ import pkg/chronos import ../../logutils +import ../../utils/exceptions import ../statemachine import ../salesagent -import ./errorhandling import ./cancelled import ./failed +import ./errored logScope: topics = "marketplace sales finished" -type SaleFinished* = ref object of ErrorHandlingState +type SaleFinished* = ref object of SaleState returnedCollateral*: ?UInt256 method `$`*(state: SaleFinished): string = @@ -22,7 +23,9 @@ method onCancelled*(state: SaleFinished, request: StorageRequest): ?State = method onFailed*(state: SaleFinished, request: StorageRequest): ?State = return some State(SaleFailed()) -method run*(state: SaleFinished, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleFinished, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data @@ -32,5 +35,14 @@ method run*(state: SaleFinished, machine: Machine): Future[?State] {.async.} = info "Slot finished and paid out", requestId = data.requestId, slotIndex = data.slotIndex - if onCleanUp =? agent.onCleanUp: - await onCleanUp(returnedCollateral = state.returnedCollateral) + try: + if onClear =? agent.context.onClear: + onClear(request, data.slotIndex) + + if onCleanUp =? agent.onCleanUp: + await onCleanUp(returnedCollateral = state.returnedCollateral) + except CancelledError as e: + trace "SaleFilled.run onCleanUp was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleFilled.run in onCleanUp callback", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/ignored.nim b/codex/sales/states/ignored.nim index b915bff5..7f2ae5b1 100644 --- a/codex/sales/states/ignored.nim +++ b/codex/sales/states/ignored.nim @@ -1,9 +1,10 @@ import pkg/chronos import ../../logutils +import ../../utils/exceptions import ../statemachine import ../salesagent -import ./errorhandling +import ./errored logScope: topics = "marketplace sales ignored" @@ -11,17 +12,22 @@ logScope: # Ignored slots could mean there was no availability or that the slot could # not be reserved. -type SaleIgnored* = ref object of ErrorHandlingState +type SaleIgnored* = ref object of SaleState reprocessSlot*: bool # readd slot to queue with `seen` flag - returnBytes*: bool # return unreleased bytes from Reservation to Availability method `$`*(state: SaleIgnored): string = "SaleIgnored" -method run*(state: SaleIgnored, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleIgnored, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) - if onCleanUp =? agent.onCleanUp: - await onCleanUp( - reprocessSlot = state.reprocessSlot, returnBytes = state.returnBytes - ) + try: + if onCleanUp =? agent.onCleanUp: + await onCleanUp(reprocessSlot = state.reprocessSlot) + except CancelledError as e: + trace "SaleIgnored.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleIgnored.run in onCleanUp", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/initialproving.nim b/codex/sales/states/initialproving.nim index bc9ce6b6..57e8cc2c 100644 --- a/codex/sales/states/initialproving.nim +++ b/codex/sales/states/initialproving.nim @@ -1,9 +1,9 @@ import pkg/questionable/results import ../../clock import ../../logutils +import ../../utils/exceptions import ../statemachine import ../salesagent -import ./errorhandling import ./filling import ./cancelled import ./errored @@ -12,7 +12,7 @@ import ./failed logScope: topics = "marketplace sales initial-proving" -type SaleInitialProving* = ref object of ErrorHandlingState +type SaleInitialProving* = ref object of SaleState method `$`*(state: SaleInitialProving): string = "SaleInitialProving" @@ -25,9 +25,9 @@ method onFailed*(state: SaleInitialProving, request: StorageRequest): ?State = proc waitUntilNextPeriod(clock: Clock, periodicity: Periodicity) {.async.} = trace "Waiting until next period" - let period = periodicity.periodOf(clock.now().u256) - let periodEnd = periodicity.periodEnd(period).truncate(int64) - await clock.waitUntil(periodEnd + 1) + let period = periodicity.periodOf(clock.now().Timestamp) + let periodEnd = periodicity.periodEnd(period) + await clock.waitUntil((periodEnd + 1).toSecondsSince1970) proc waitForStableChallenge(market: Market, clock: Clock, slotId: SlotId) {.async.} = let periodicity = await market.periodicity() @@ -36,7 +36,9 @@ proc waitForStableChallenge(market: Market, clock: Clock, slotId: SlotId) {.asyn while (await market.getPointer(slotId)) > (256 - downtime): await clock.waitUntilNextPeriod(periodicity) -method run*(state: SaleInitialProving, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleInitialProving, machine: Machine +): Future[?State] {.async: (raises: []).} = let data = SalesAgent(machine).data let context = SalesAgent(machine).context let market = context.market @@ -48,16 +50,22 @@ method run*(state: SaleInitialProving, machine: Machine): Future[?State] {.async without onProve =? context.onProve: raiseAssert "onProve callback not set" - debug "Waiting for a proof challenge that is valid for the entire period" - let slot = Slot(request: request, slotIndex: data.slotIndex) - await waitForStableChallenge(market, clock, slot.id) + try: + debug "Waiting for a proof challenge that is valid for the entire period" + let slot = Slot(request: request, slotIndex: data.slotIndex) + await waitForStableChallenge(market, clock, slot.id) - debug "Generating initial proof", requestId = data.requestId - let challenge = await context.market.getChallenge(slot.id) - without proof =? (await onProve(slot, challenge)), err: - error "Failed to generate initial proof", error = err.msg - return some State(SaleErrored(error: err)) + debug "Generating initial proof", requestId = data.requestId + let challenge = await context.market.getChallenge(slot.id) + without proof =? (await onProve(slot, challenge)), err: + error "Failed to generate initial proof", error = err.msg + return some State(SaleErrored(error: err)) - debug "Finished proof calculation", requestId = data.requestId + debug "Finished proof calculation", requestId = data.requestId - return some State(SaleFilling(proof: proof)) + return some State(SaleFilling(proof: proof)) + except CancelledError as e: + trace "SaleInitialProving.run onCleanUp was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleInitialProving.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/payout.nim b/codex/sales/states/payout.nim index 9ce36613..e808307d 100644 --- a/codex/sales/states/payout.nim +++ b/codex/sales/states/payout.nim @@ -1,16 +1,17 @@ import ../../logutils import ../../market +import ../../utils/exceptions import ../statemachine import ../salesagent -import ./errorhandling import ./cancelled import ./failed import ./finished +import ./errored logScope: topics = "marketplace sales payout" -type SalePayout* = ref object of ErrorHandlingState +type SalePayout* = ref object of SaleState method `$`*(state: SalePayout): string = "SalePayout" @@ -21,17 +22,25 @@ method onCancelled*(state: SalePayout, request: StorageRequest): ?State = method onFailed*(state: SalePayout, request: StorageRequest): ?State = return some State(SaleFailed()) -method run*(state: SalePayout, machine: Machine): Future[?State] {.async.} = +method run*( + state: SalePayout, machine: Machine +): Future[?State] {.async: (raises: []).} = let data = SalesAgent(machine).data let market = SalesAgent(machine).context.market without request =? data.request: raiseAssert "no sale request" - let slot = Slot(request: request, slotIndex: data.slotIndex) - debug "Collecting finished slot's reward", - requestId = data.requestId, slotIndex = data.slotIndex - let currentCollateral = await market.currentCollateral(slot.id) - await market.freeSlot(slot.id) + try: + let slot = Slot(request: request, slotIndex: data.slotIndex) + debug "Collecting finished slot's reward", + requestId = data.requestId, slotIndex = data.slotIndex + let currentCollateral = await market.currentCollateral(slot.id) + await market.freeSlot(slot.id) - return some State(SaleFinished(returnedCollateral: some currentCollateral)) + return some State(SaleFinished(returnedCollateral: some currentCollateral)) + except CancelledError as e: + trace "SalePayout.run onCleanUp was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SalePayout.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/preparing.nim b/codex/sales/states/preparing.nim index bdde1249..a3aee4c9 100644 --- a/codex/sales/states/preparing.nim +++ b/codex/sales/states/preparing.nim @@ -4,9 +4,9 @@ import pkg/metrics import ../../logutils import ../../market +import ../../utils/exceptions import ../salesagent import ../statemachine -import ./errorhandling import ./cancelled import ./failed import ./filled @@ -18,7 +18,7 @@ declareCounter( codex_reservations_availability_mismatch, "codex reservations availability_mismatch" ) -type SalePreparing* = ref object of ErrorHandlingState +type SalePreparing* = ref object of SaleState logScope: topics = "marketplace sales preparing" @@ -33,66 +33,76 @@ method onFailed*(state: SalePreparing, request: StorageRequest): ?State = return some State(SaleFailed()) method onSlotFilled*( - state: SalePreparing, requestId: RequestId, slotIndex: UInt256 + state: SalePreparing, requestId: RequestId, slotIndex: uint64 ): ?State = return some State(SaleFilled()) -method run*(state: SalePreparing, machine: Machine): Future[?State] {.async.} = +method run*( + state: SalePreparing, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data let context = agent.context let market = context.market let reservations = context.reservations - await agent.retrieveRequest() - await agent.subscribe() + try: + await agent.retrieveRequest() + await agent.subscribe() - without request =? data.request: - raiseAssert "no sale request" + without request =? data.request: + raiseAssert "no sale request" - let slotId = slotId(data.requestId, data.slotIndex) - let state = await market.slotState(slotId) - if state != SlotState.Free and state != SlotState.Repair: - return some State(SaleIgnored(reprocessSlot: false, returnBytes: false)) + let slotId = slotId(data.requestId, data.slotIndex) + let state = await market.slotState(slotId) + if state != SlotState.Free and state != SlotState.Repair: + return some State(SaleIgnored(reprocessSlot: false)) - # TODO: Once implemented, check to ensure the host is allowed to fill the slot, - # due to the [sliding window mechanism](https://github.com/codex-storage/codex-research/blob/master/design/marketplace.md#dispersal) + # TODO: Once implemented, check to ensure the host is allowed to fill the slot, + # due to the [sliding window mechanism](https://github.com/codex-storage/codex-research/blob/master/design/marketplace.md#dispersal) - logScope: - slotIndex = data.slotIndex - slotSize = request.ask.slotSize - duration = request.ask.duration - pricePerBytePerSecond = request.ask.pricePerBytePerSecond - collateralPerByte = request.ask.collateralPerByte + logScope: + slotIndex = data.slotIndex + slotSize = request.ask.slotSize + duration = request.ask.duration + pricePerBytePerSecond = request.ask.pricePerBytePerSecond + collateralPerByte = request.ask.collateralPerByte - without availability =? - await reservations.findAvailability( - request.ask.slotSize, request.ask.duration, request.ask.pricePerBytePerSecond, - request.ask.collateralPerByte, - ): - debug "No availability found for request, ignoring" + let requestEnd = await market.getRequestEnd(data.requestId) - return some State(SaleIgnored(reprocessSlot: true)) + without availability =? + await reservations.findAvailability( + request.ask.slotSize, request.ask.duration, request.ask.pricePerBytePerSecond, + request.ask.collateralPerByte, requestEnd, + ): + debug "No availability found for request, ignoring" - info "Availability found for request, creating reservation" - - without reservation =? - await reservations.createReservation( - availability.id, request.ask.slotSize, request.id, data.slotIndex, - request.ask.collateralPerByte, - ), error: - trace "Creation of reservation failed" - # Race condition: - # reservations.findAvailability (line 64) is no guarantee. You can never know for certain that the reservation can be created until after you have it. - # Should createReservation fail because there's no space, we proceed to SaleIgnored. - if error of BytesOutOfBoundsError: - # Lets monitor how often this happen and if it is often we can make it more inteligent to handle it - codex_reservations_availability_mismatch.inc() return some State(SaleIgnored(reprocessSlot: true)) - return some State(SaleErrored(error: error)) + info "Availability found for request, creating reservation" - trace "Reservation created succesfully" + without reservation =? + await reservations.createReservation( + availability.id, request.ask.slotSize, request.id, data.slotIndex, + request.ask.collateralPerByte, requestEnd, + ), error: + trace "Creation of reservation failed" + # Race condition: + # reservations.findAvailability (line 64) is no guarantee. You can never know for certain that the reservation can be created until after you have it. + # Should createReservation fail because there's no space, we proceed to SaleIgnored. + if error of BytesOutOfBoundsError: + # Lets monitor how often this happen and if it is often we can make it more inteligent to handle it + codex_reservations_availability_mismatch.inc() + return some State(SaleIgnored(reprocessSlot: true)) - data.reservation = some reservation - return some State(SaleSlotReserving()) + return some State(SaleErrored(error: error)) + + trace "Reservation created successfully" + + data.reservation = some reservation + return some State(SaleSlotReserving()) + except CancelledError as e: + trace "SalePreparing.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SalePreparing.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/proving.nim b/codex/sales/states/proving.nim index 0ee2ed60..690e9136 100644 --- a/codex/sales/states/proving.nim +++ b/codex/sales/states/proving.nim @@ -6,7 +6,6 @@ import ../../utils/exceptions import ../statemachine import ../salesagent import ../salescontext -import ./errorhandling import ./cancelled import ./failed import ./errored @@ -18,7 +17,7 @@ logScope: type SlotFreedError* = object of CatchableError SlotNotFilledError* = object of CatchableError - SaleProving* = ref object of ErrorHandlingState + SaleProving* = ref object of SaleState loop: Future[void] method prove*( @@ -47,7 +46,7 @@ proc proveLoop( market: Market, clock: Clock, request: StorageRequest, - slotIndex: UInt256, + slotIndex: uint64, onProve: OnProve, ) {.async.} = let slot = Slot(request: request, slotIndex: slotIndex) @@ -61,12 +60,12 @@ proc proveLoop( proc getCurrentPeriod(): Future[Period] {.async.} = let periodicity = await market.periodicity() - return periodicity.periodOf(clock.now().u256) + return periodicity.periodOf(clock.now().Timestamp) proc waitUntilPeriod(period: Period) {.async.} = let periodicity = await market.periodicity() # Ensure that we're past the period boundary by waiting an additional second - await clock.waitUntil(periodicity.periodStart(period).truncate(int64) + 1) + await clock.waitUntil((periodicity.periodStart(period) + 1).toSecondsSince1970) while true: let currentPeriod = await getCurrentPeriod() @@ -113,7 +112,9 @@ method onFailed*(state: SaleProving, request: StorageRequest): ?State = # state change return some State(SaleFailed()) -method run*(state: SaleProving, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleProving, machine: Machine +): Future[?State] {.async: (raises: []).} = let data = SalesAgent(machine).data let context = SalesAgent(machine).context @@ -129,27 +130,37 @@ method run*(state: SaleProving, machine: Machine): Future[?State] {.async.} = without clock =? context.clock: raiseAssert("clock not set") - debug "Start proving", requestId = data.requestId, slotIndex = data.slotIndex try: - let loop = state.proveLoop(market, clock, request, data.slotIndex, onProve) - state.loop = loop - await loop - except CancelledError: - discard + debug "Start proving", requestId = data.requestId, slotIndex = data.slotIndex + try: + let loop = state.proveLoop(market, clock, request, data.slotIndex, onProve) + state.loop = loop + await loop + except CancelledError as e: + trace "proving loop cancelled" + discard + except CatchableError as e: + error "Proving failed", + msg = e.msg, typ = $(type e), stack = e.getStackTrace(), error = e.msgDetail + return some State(SaleErrored(error: e)) + finally: + # Cleanup of the proving loop + debug "Stopping proving.", requestId = data.requestId, slotIndex = data.slotIndex + + if not state.loop.isNil: + if not state.loop.finished: + try: + await state.loop.cancelAndWait() + except CancelledError: + discard + except CatchableError as e: + error "Error during cancellation of proving loop", msg = e.msg + + state.loop = nil + + return some State(SalePayout()) + except CancelledError as e: + trace "SaleProving.run onCleanUp was cancelled", error = e.msgDetail except CatchableError as e: - error "Proving failed", msg = e.msg + error "Error during SaleProving.run", error = e.msgDetail return some State(SaleErrored(error: e)) - finally: - # Cleanup of the proving loop - debug "Stopping proving.", requestId = data.requestId, slotIndex = data.slotIndex - - if not state.loop.isNil: - if not state.loop.finished: - try: - await state.loop.cancelAndWait() - except CatchableError as e: - error "Error during cancellation of proving loop", msg = e.msg - - state.loop = nil - - return some State(SalePayout()) diff --git a/codex/sales/states/provingsimulated.nim b/codex/sales/states/provingsimulated.nim index e60169bc..b8a3e9ce 100644 --- a/codex/sales/states/provingsimulated.nim +++ b/codex/sales/states/provingsimulated.nim @@ -4,12 +4,14 @@ when codex_enable_proof_failures: import pkg/stint import pkg/ethers + import ../../contracts/marketplace import ../../contracts/requests import ../../logutils import ../../market import ../../utils/exceptions import ../salescontext import ./proving + import ./errored logScope: topics = "marketplace sales simulated-proving" @@ -18,7 +20,7 @@ when codex_enable_proof_failures: failEveryNProofs*: int proofCount: int - proc onSubmitProofError(error: ref CatchableError, period: UInt256, slotId: SlotId) = + proc onSubmitProofError(error: ref CatchableError, period: Period, slotId: SlotId) = error "Submitting invalid proof failed", period, slotId, msg = error.msgDetail method prove*( @@ -29,22 +31,27 @@ when codex_enable_proof_failures: market: Market, currentPeriod: Period, ) {.async.} = - trace "Processing proving in simulated mode" - state.proofCount += 1 - if state.failEveryNProofs > 0 and state.proofCount mod state.failEveryNProofs == 0: - state.proofCount = 0 + try: + trace "Processing proving in simulated mode" + state.proofCount += 1 + if state.failEveryNProofs > 0 and state.proofCount mod state.failEveryNProofs == 0: + state.proofCount = 0 - try: - warn "Submitting INVALID proof", period = currentPeriod, slotId = slot.id - await market.submitProof(slot.id, Groth16Proof.default) - except MarketError as e: - if not e.msg.contains("Invalid proof"): + try: + warn "Submitting INVALID proof", period = currentPeriod, slotId = slot.id + await market.submitProof(slot.id, Groth16Proof.default) + except Proofs_InvalidProof as e: + discard # expected + except CancelledError as error: + raise error + except CatchableError as e: onSubmitProofError(e, currentPeriod, slot.id) - except CancelledError as error: - raise error - except CatchableError as e: - onSubmitProofError(e, currentPeriod, slot.id) - else: - await procCall SaleProving(state).prove( - slot, challenge, onProve, market, currentPeriod - ) + else: + await procCall SaleProving(state).prove( + slot, challenge, onProve, market, currentPeriod + ) + except CancelledError as e: + trace "Submitting INVALID proof cancelled", error = e.msgDetail + raise e + except CatchableError as e: + error "Submitting INVALID proof failed", error = e.msgDetail diff --git a/codex/sales/states/slotreserving.nim b/codex/sales/states/slotreserving.nim index 38b7fa76..780dadfc 100644 --- a/codex/sales/states/slotreserving.nim +++ b/codex/sales/states/slotreserving.nim @@ -3,16 +3,16 @@ import pkg/metrics import ../../logutils import ../../market +import ../../utils/exceptions import ../salesagent import ../statemachine -import ./errorhandling import ./cancelled import ./failed import ./ignored import ./downloading import ./errored -type SaleSlotReserving* = ref object of ErrorHandlingState +type SaleSlotReserving* = ref object of SaleState logScope: topics = "marketplace sales reserving" @@ -26,7 +26,9 @@ method onCancelled*(state: SaleSlotReserving, request: StorageRequest): ?State = method onFailed*(state: SaleSlotReserving, request: StorageRequest): ?State = return some State(SaleFailed()) -method run*(state: SaleSlotReserving, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleSlotReserving, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data let context = agent.context @@ -36,23 +38,28 @@ method run*(state: SaleSlotReserving, machine: Machine): Future[?State] {.async. requestId = data.requestId slotIndex = data.slotIndex - let canReserve = await market.canReserveSlot(data.requestId, data.slotIndex) - if canReserve: - try: - trace "Reserving slot" - await market.reserveSlot(data.requestId, data.slotIndex) - except MarketError as e: - if e.msg.contains "SlotReservations_ReservationNotAllowed": + try: + let canReserve = await market.canReserveSlot(data.requestId, data.slotIndex) + if canReserve: + try: + trace "Reserving slot" + await market.reserveSlot(data.requestId, data.slotIndex) + except SlotReservationNotAllowedError as e: debug "Slot cannot be reserved, ignoring", error = e.msg - return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) - else: + return some State(SaleIgnored(reprocessSlot: false)) + except MarketError as e: return some State(SaleErrored(error: e)) - # other CatchableErrors are handled "automatically" by the ErrorHandlingState + # other CatchableErrors are handled "automatically" by the SaleState - trace "Slot successfully reserved" - return some State(SaleDownloading()) - else: - # do not re-add this slot to the queue, and return bytes from Reservation to - # the Availability - debug "Slot cannot be reserved, ignoring" - return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) + trace "Slot successfully reserved" + return some State(SaleDownloading()) + else: + # do not re-add this slot to the queue, and return bytes from Reservation to + # the Availability + debug "Slot cannot be reserved, ignoring" + return some State(SaleIgnored(reprocessSlot: false)) + except CancelledError as e: + trace "SaleSlotReserving.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleSlotReserving.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/unknown.nim b/codex/sales/states/unknown.nim index 3034129a..d182d744 100644 --- a/codex/sales/states/unknown.nim +++ b/codex/sales/states/unknown.nim @@ -1,4 +1,5 @@ import ../../logutils +import ../../utils/exceptions import ../statemachine import ../salesagent import ./filled @@ -26,34 +27,42 @@ method onCancelled*(state: SaleUnknown, request: StorageRequest): ?State = method onFailed*(state: SaleUnknown, request: StorageRequest): ?State = return some State(SaleFailed()) -method run*(state: SaleUnknown, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleUnknown, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data let market = agent.context.market - await agent.retrieveRequest() - await agent.subscribe() + try: + await agent.retrieveRequest() + await agent.subscribe() - let slotId = slotId(data.requestId, data.slotIndex) - let slotState = await market.slotState(slotId) + let slotId = slotId(data.requestId, data.slotIndex) + let slotState = await market.slotState(slotId) - case slotState - of SlotState.Free: - let error = - newException(UnexpectedSlotError, "Slot state on chain should not be 'free'") - return some State(SaleErrored(error: error)) - of SlotState.Filled: - return some State(SaleFilled()) - of SlotState.Finished: - return some State(SalePayout()) - of SlotState.Paid: - return some State(SaleFinished()) - of SlotState.Failed: - return some State(SaleFailed()) - of SlotState.Cancelled: - return some State(SaleCancelled()) - of SlotState.Repair: - let error = newException( - SlotFreedError, "Slot was forcible freed and host was removed from its hosting" - ) - return some State(SaleErrored(error: error)) + case slotState + of SlotState.Free: + let error = + newException(UnexpectedSlotError, "Slot state on chain should not be 'free'") + return some State(SaleErrored(error: error)) + of SlotState.Filled: + return some State(SaleFilled()) + of SlotState.Finished: + return some State(SalePayout()) + of SlotState.Paid: + return some State(SaleFinished()) + of SlotState.Failed: + return some State(SaleFailed()) + of SlotState.Cancelled: + return some State(SaleCancelled()) + of SlotState.Repair: + let error = newException( + SlotFreedError, "Slot was forcible freed and host was removed from its hosting" + ) + return some State(SaleErrored(error: error)) + except CancelledError as e: + trace "SaleUnknown.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleUnknown.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/slots/builder/builder.nim b/codex/slots/builder/builder.nim index 30332f1c..1ea57a0f 100644 --- a/codex/slots/builder/builder.nim +++ b/codex/slots/builder/builder.nim @@ -315,13 +315,15 @@ proc new*[T, H]( cellSize = cellSize if (manifest.blocksCount mod manifest.numSlots) != 0: - trace "Number of blocks must be divisable by number of slots." - return failure("Number of blocks must be divisable by number of slots.") + const msg = "Number of blocks must be divisible by number of slots." + trace msg + return failure(msg) let cellSize = if manifest.verifiable: manifest.cellSize else: cellSize if (manifest.blockSize mod cellSize) != 0.NBytes: - trace "Block size must be divisable by cell size." - return failure("Block size must be divisable by cell size.") + const msg = "Block size must be divisible by cell size." + trace msg + return failure(msg) let numSlotBlocks = manifest.numSlotBlocks diff --git a/codex/slots/proofs/prover.nim b/codex/slots/proofs/prover.nim index 36fc0a05..b1aa77c0 100644 --- a/codex/slots/proofs/prover.nim +++ b/codex/slots/proofs/prover.nim @@ -38,7 +38,9 @@ type AnyProof* = CircomProof AnySampler* = Poseidon2Sampler + # add any other generic type here, eg. Poseidon2Sampler | ReinforceConcreteSampler AnyBuilder* = Poseidon2Builder + # add any other generic type here, eg. Poseidon2Builder | ReinforceConcreteBuilder AnyProofInputs* = ProofInputs[Poseidon2Hash] Prover* = ref object of RootObj diff --git a/codex/stores/maintenance.nim b/codex/stores/maintenance.nim index e7ce1bdf..cced5da9 100644 --- a/codex/stores/maintenance.nim +++ b/codex/stores/maintenance.nim @@ -22,8 +22,8 @@ import ../logutils import ../systemclock const - DefaultBlockMaintenanceInterval* = 10.minutes - DefaultNumberOfBlocksToMaintainPerInterval* = 1000 + DefaultBlockInterval* = 10.minutes + DefaultNumBlocksPerInterval* = 1000 type BlockMaintainer* = ref object of RootObj repoStore: RepoStore diff --git a/codex/stores/networkstore.nim b/codex/stores/networkstore.nim index faee36e1..f94bca33 100644 --- a/codex/stores/networkstore.nim +++ b/codex/stores/networkstore.nim @@ -137,6 +137,14 @@ method hasBlock*(self: NetworkStore, cid: Cid): Future[?!bool] {.async.} = trace "Checking network store for block existence", cid return await self.localStore.hasBlock(cid) +method hasBlock*( + self: NetworkStore, tree: Cid, index: Natural +): Future[?!bool] {.async.} = + ## Check if the block exists in the blockstore + ## + trace "Checking network store for block existence", tree, index + return await self.localStore.hasBlock(tree, index) + method close*(self: NetworkStore): Future[void] {.async.} = ## Close the underlying local blockstore ## diff --git a/codex/stores/repostore/operations.nim b/codex/stores/repostore/operations.nim index dcacbd62..cc488240 100644 --- a/codex/stores/repostore/operations.nim +++ b/codex/stores/repostore/operations.nim @@ -57,6 +57,17 @@ proc putLeafMetadata*( (md.some, res), ) +proc delLeafMetadata*( + self: RepoStore, treeCid: Cid, index: Natural +): Future[?!void] {.async.} = + without key =? createBlockCidAndProofMetadataKey(treeCid, index), err: + return failure(err) + + if err =? (await self.metaDs.delete(key)).errorOption: + return failure(err) + + success() + proc getLeafMetadata*( self: RepoStore, treeCid: Cid, index: Natural ): Future[?!LeafMetadata] {.async.} = @@ -94,7 +105,7 @@ proc updateQuotaUsage*( minusUsed: NBytes = 0.NBytes, plusReserved: NBytes = 0.NBytes, minusReserved: NBytes = 0.NBytes, -): Future[?!void] {.async.} = +): Future[?!void] {.async: (raises: [CancelledError]).} = await self.metaDs.modify( QuotaUsedKey, proc(maybeCurrUsage: ?QuotaUsage): Future[?QuotaUsage] {.async.} = @@ -205,9 +216,6 @@ proc storeBlock*( proc tryDeleteBlock*( self: RepoStore, cid: Cid, expiryLimit = SecondsSince1970.low ): Future[?!DeleteResult] {.async.} = - if cid.isEmpty: - return success(DeleteResult(kind: InUse)) - without metaKey =? createBlockExpirationMetadataKey(cid), err: return failure(err) diff --git a/codex/stores/repostore/store.nim b/codex/stores/repostore/store.nim index 1137f3e4..f600043b 100644 --- a/codex/stores/repostore/store.nim +++ b/codex/stores/repostore/store.nim @@ -213,13 +213,13 @@ method putBlock*( return success() -method delBlock*(self: RepoStore, cid: Cid): Future[?!void] {.async.} = - ## Delete a block from the blockstore when block refCount is 0 or block is expired - ## - +proc delBlockInternal(self: RepoStore, cid: Cid): Future[?!DeleteResultKind] {.async.} = logScope: cid = cid + if cid.isEmpty: + return success(Deleted) + trace "Attempting to delete a block" without res =? await self.tryDeleteBlock(cid, self.clock.now()), err: @@ -232,12 +232,28 @@ method delBlock*(self: RepoStore, cid: Cid): Future[?!void] {.async.} = if err =? (await self.updateQuotaUsage(minusUsed = res.released)).errorOption: return failure(err) - elif res.kind == InUse: - trace "Block in use, refCount > 0 and not expired" - else: - trace "Block not found in store" - return success() + success(res.kind) + +method delBlock*(self: RepoStore, cid: Cid): Future[?!void] {.async.} = + ## Delete a block from the blockstore when block refCount is 0 or block is expired + ## + + logScope: + cid = cid + + without outcome =? await self.delBlockInternal(cid), err: + return failure(err) + + case outcome + of InUse: + failure("Directly deleting a block that is part of a dataset is not allowed.") + of NotFound: + trace "Block not found, ignoring" + success() + of Deleted: + trace "Block already deleted" + success() method delBlock*( self: RepoStore, treeCid: Cid, index: Natural @@ -248,12 +264,19 @@ method delBlock*( else: return failure(err) + if err =? (await self.delLeafMetadata(treeCid, index)).errorOption: + error "Failed to delete leaf metadata, block will remain on disk.", err = err.msg + return failure(err) + if err =? (await self.updateBlockMetadata(leafMd.blkCid, minusRefCount = 1)).errorOption: if not (err of BlockNotFoundError): return failure(err) - await self.delBlock(leafMd.blkCid) # safe delete, only if refCount == 0 + without _ =? await self.delBlockInternal(leafMd.blkCid), err: + return failure(err) + + success() method hasBlock*(self: RepoStore, cid: Cid): Future[?!bool] {.async.} = ## Check if the block exists in the blockstore @@ -322,6 +345,18 @@ proc createBlockExpirationQuery(maxNumber: int, offset: int): ?!Query = let queryKey = ?createBlockExpirationMetadataQueryKey() success Query.init(queryKey, offset = offset, limit = maxNumber) +proc blockRefCount*(self: RepoStore, cid: Cid): Future[?!Natural] {.async.} = + ## Returns the reference count for a block. If the count is zero; + ## this means the block is eligible for garbage collection. + ## + without key =? createBlockExpirationMetadataKey(cid), err: + return failure(err) + + without md =? await get[BlockMetadata](self.metaDs, key), err: + return failure(err) + + return success(md.refCount) + method getBlockExpirations*( self: RepoStore, maxNumber: int, offset: int ): Future[?!AsyncIter[BlockExpiration]] {.async, base.} = @@ -372,7 +407,9 @@ method close*(self: RepoStore): Future[void] {.async.} = # RepoStore procs ########################################################### -proc reserve*(self: RepoStore, bytes: NBytes): Future[?!void] {.async.} = +proc reserve*( + self: RepoStore, bytes: NBytes +): Future[?!void] {.async: (raises: [CancelledError]).} = ## Reserve bytes ## @@ -380,7 +417,9 @@ proc reserve*(self: RepoStore, bytes: NBytes): Future[?!void] {.async.} = await self.updateQuotaUsage(plusReserved = bytes) -proc release*(self: RepoStore, bytes: NBytes): Future[?!void] {.async.} = +proc release*( + self: RepoStore, bytes: NBytes +): Future[?!void] {.async: (raises: [CancelledError]).} = ## Release bytes ## diff --git a/codex/stores/repostore/types.nim b/codex/stores/repostore/types.nim index 3d455d12..42f528e9 100644 --- a/codex/stores/repostore/types.nim +++ b/codex/stores/repostore/types.nim @@ -21,8 +21,8 @@ import ../../systemclock import ../../units const - DefaultBlockTtl* = 24.hours - DefaultQuotaBytes* = 8.GiBs + DefaultBlockTtl* = 30.days + DefaultQuotaBytes* = 20.GiBs type QuotaNotEnoughError* = object of CodexError diff --git a/codex/streams/asyncstreamwrapper.nim b/codex/streams/asyncstreamwrapper.nim index 6d5e703a..6708816d 100644 --- a/codex/streams/asyncstreamwrapper.nim +++ b/codex/streams/asyncstreamwrapper.nim @@ -57,6 +57,8 @@ template withExceptions(body: untyped) = raise newLPStreamEOFError() except AsyncStreamError as exc: raise newException(LPStreamError, exc.msg) + except CatchableError as exc: + raise newException(Defect, "Unexpected error in AsyncStreamWrapper", exc) method readOnce*( self: AsyncStreamWrapper, pbytes: pointer, nbytes: int @@ -74,11 +76,13 @@ method readOnce*( proc completeWrite( self: AsyncStreamWrapper, fut: Future[void], msgLen: int -): Future[void] {.async.} = +): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} = withExceptions: await fut -method write*(self: AsyncStreamWrapper, msg: seq[byte]): Future[void] = +method write*( + self: AsyncStreamWrapper, msg: seq[byte] +): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} = # Avoid a copy of msg being kept in the closure created by `{.async.}` as this # drives up memory usage diff --git a/codex/streams/storestream.nim b/codex/streams/storestream.nim index 85b0e354..64a356de 100644 --- a/codex/streams/storestream.nim +++ b/codex/streams/storestream.nim @@ -67,13 +67,9 @@ method atEof*(self: StoreStream): bool = self.offset >= self.size type LPStreamReadError* = object of LPStreamError - par*: ref CatchableError proc newLPStreamReadError*(p: ref CatchableError): ref LPStreamReadError = - var w = newException(LPStreamReadError, "Read stream failed") - w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg - w.par = p - result = w + newException(LPStreamReadError, "Read stream failed", p) method readOnce*( self: StoreStream, pbytes: pointer, nbytes: int @@ -110,7 +106,7 @@ method readOnce*( raise newLPStreamReadError(error) trace "Reading bytes from store stream", - manifestCid = self.manifest.cid.get(), + manifestCid = self.manifest.treeCid, numBlocks = self.manifest.blocksCount, blockNum, blkCid = blk.cid, diff --git a/codex/utils/arrayutils.nim b/codex/utils/arrayutils.nim new file mode 100644 index 00000000..e36a0cb3 --- /dev/null +++ b/codex/utils/arrayutils.nim @@ -0,0 +1,38 @@ +import std/sequtils + +proc createDoubleArray*( + outerLen, innerLen: int +): ptr UncheckedArray[ptr UncheckedArray[byte]] = + # Allocate outer array + result = cast[ptr UncheckedArray[ptr UncheckedArray[byte]]](allocShared0( + sizeof(ptr UncheckedArray[byte]) * outerLen + )) + + # Allocate each inner array + for i in 0 ..< outerLen: + result[i] = cast[ptr UncheckedArray[byte]](allocShared0(sizeof(byte) * innerLen)) + +proc freeDoubleArray*( + arr: ptr UncheckedArray[ptr UncheckedArray[byte]], outerLen: int +) = + # Free each inner array + for i in 0 ..< outerLen: + if not arr[i].isNil: + deallocShared(arr[i]) + + # Free outer array + if not arr.isNil: + deallocShared(arr) + +proc makeUncheckedArray*( + data: ref seq[seq[byte]] +): ptr UncheckedArray[ptr UncheckedArray[byte]] = + result = cast[ptr UncheckedArray[ptr UncheckedArray[byte]]](alloc0( + sizeof(ptr UncheckedArray[byte]) * data[].len + )) + + for i, blk in data[]: + if blk.len > 0: + result[i] = cast[ptr UncheckedArray[byte]](addr blk[0]) + else: + result[i] = nil diff --git a/codex/utils/asyncheapqueue.nim b/codex/utils/asyncheapqueue.nim index 1b0dd8bc..bc37c462 100644 --- a/codex/utils/asyncheapqueue.nim +++ b/codex/utils/asyncheapqueue.nim @@ -9,7 +9,7 @@ import std/sequtils import pkg/chronos -import pkg/stew/results +import pkg/results # Based on chronos AsyncHeapQueue and std/heapqueue diff --git a/codex/utils/asyncspawn.nim b/codex/utils/asyncspawn.nim deleted file mode 100644 index 95a9f014..00000000 --- a/codex/utils/asyncspawn.nim +++ /dev/null @@ -1,10 +0,0 @@ -import pkg/chronos - -proc asyncSpawn*(future: Future[void], ignore: type CatchableError) = - proc ignoringError() {.async.} = - try: - await future - except ignore: - discard - - asyncSpawn ignoringError() diff --git a/codex/utils/asyncstatemachine.nim b/codex/utils/asyncstatemachine.nim index 572ae246..eb84378c 100644 --- a/codex/utils/asyncstatemachine.nim +++ b/codex/utils/asyncstatemachine.nim @@ -2,6 +2,7 @@ import pkg/questionable import pkg/chronos import ../logutils import ./trackedfutures +import ./exceptions {.push raises: [].} @@ -46,24 +47,14 @@ proc schedule*(machine: Machine, event: Event) = except AsyncQueueFullError: raiseAssert "unlimited queue is full?!" -method run*(state: State, machine: Machine): Future[?State] {.base, async.} = +method run*( + state: State, machine: Machine +): Future[?State] {.base, async: (raises: []).} = discard -method onError*(state: State, error: ref CatchableError): ?State {.base.} = - raise (ref Defect)(msg: "error in state machine: " & error.msg, parent: error) - -proc onError(machine: Machine, error: ref CatchableError): Event = - return proc(state: State): ?State = - state.onError(error) - proc run(machine: Machine, state: State) {.async: (raises: []).} = - try: - if next =? await state.run(machine): - machine.schedule(Event.transition(state, next)) - except CancelledError: - discard # do not propagate - except CatchableError as e: - machine.schedule(machine.onError(e)) + if next =? await state.run(machine): + machine.schedule(Event.transition(state, next)) proc scheduler(machine: Machine) {.async: (raises: []).} = var running: Future[void].Raising([]) @@ -83,7 +74,6 @@ proc scheduler(machine: Machine) {.async: (raises: []).} = debug "enter state", state = fromState & " => " & $machine.state running = machine.run(machine.state) machine.trackedFutures.track(running) - asyncSpawn running except CancelledError: break # do not propagate bc it is asyncSpawned @@ -97,7 +87,6 @@ proc start*(machine: Machine, initialState: State) = machine.started = true let fut = machine.scheduler() machine.trackedFutures.track(fut) - asyncSpawn fut machine.schedule(Event.transition(machine.state, initialState)) proc stop*(machine: Machine) {.async.} = diff --git a/codex/utils/natutils.nim b/codex/utils/natutils.nim index 8a641e95..996d8dd0 100644 --- a/codex/utils/natutils.nim +++ b/codex/utils/natutils.nim @@ -1,7 +1,7 @@ {.push raises: [].} import - std/[tables, hashes], stew/results, stew/shims/net as stewNet, chronos, chronicles + std/[tables, hashes], pkg/results, pkg/stew/shims/net as stewNet, chronos, chronicles import pkg/libp2p diff --git a/codex/utils/timer.nim b/codex/utils/timer.nim index 0a5a940a..5a9537cf 100644 --- a/codex/utils/timer.nim +++ b/codex/utils/timer.nim @@ -50,7 +50,6 @@ method start*( timer.callback = callback timer.interval = interval timer.loopFuture = timerLoop(timer) - asyncSpawn timer.loopFuture method stop*(timer: Timer) {.async, base.} = if timer.loopFuture != nil and not timer.loopFuture.finished: diff --git a/codex/utils/trackedfutures.nim b/codex/utils/trackedfutures.nim index eb3cc219..34007e08 100644 --- a/codex/utils/trackedfutures.nim +++ b/codex/utils/trackedfutures.nim @@ -5,9 +5,11 @@ import ../logutils {.push raises: [].} -type TrackedFutures* = ref object - futures: Table[uint, FutureBase] - cancelling: bool +type + TrackedFuture = Future[void].Raising([]) + TrackedFutures* = ref object + futures: Table[uint, TrackedFuture] + cancelling: bool logScope: topics = "trackable futures" @@ -15,15 +17,18 @@ logScope: proc len*(self: TrackedFutures): int = self.futures.len -proc removeFuture(self: TrackedFutures, future: FutureBase) = +proc removeFuture(self: TrackedFutures, future: TrackedFuture) = if not self.cancelling and not future.isNil: self.futures.del(future.id) -proc track*[T](self: TrackedFutures, fut: Future[T]) = +proc track*(self: TrackedFutures, fut: TrackedFuture) = if self.cancelling: return - self.futures[fut.id] = FutureBase(fut) + if fut.finished: + return + + self.futures[fut.id] = fut proc cb(udata: pointer) = self.removeFuture(fut) @@ -33,13 +38,8 @@ proc track*[T](self: TrackedFutures, fut: Future[T]) = proc cancelTracked*(self: TrackedFutures) {.async: (raises: []).} = self.cancelling = true - trace "cancelling tracked futures" - - var cancellations: seq[FutureBase] - for future in self.futures.values: - if not future.isNil and not future.finished: - cancellations.add future.cancelAndWait() - + trace "cancelling tracked futures", len = self.futures.len + let cancellations = self.futures.values.toSeq.mapIt(it.cancelAndWait()) await noCancel allFutures cancellations self.futures.clear() diff --git a/codex/validation.nim b/codex/validation.nim index 6e3135e4..e6d74840 100644 --- a/codex/validation.nim +++ b/codex/validation.nim @@ -2,6 +2,7 @@ import std/sets import std/sequtils import pkg/chronos import pkg/questionable/results +import pkg/stew/endians2 import ./validationconfig import ./market @@ -19,11 +20,9 @@ type Validation* = ref object subscriptions: seq[Subscription] running: Future[void] periodicity: Periodicity - proofTimeout: UInt256 + proofTimeout: uint64 config: ValidationConfig -const MaxStorageRequestDuration = 30.days - logScope: topics = "codex validator" @@ -35,18 +34,19 @@ proc new*( proc slots*(validation: Validation): seq[SlotId] = validation.slots.toSeq -proc getCurrentPeriod(validation: Validation): UInt256 = - return validation.periodicity.periodOf(validation.clock.now().u256) +proc getCurrentPeriod(validation: Validation): Period = + return validation.periodicity.periodOf(validation.clock.now().Timestamp) proc waitUntilNextPeriod(validation: Validation) {.async.} = let period = validation.getCurrentPeriod() let periodEnd = validation.periodicity.periodEnd(period) trace "Waiting until next period", currentPeriod = period - await validation.clock.waitUntil(periodEnd.truncate(int64) + 1) + await validation.clock.waitUntil((periodEnd + 1).toSecondsSince1970) func groupIndexForSlotId*(slotId: SlotId, validationGroups: ValidationGroups): uint16 = - let slotIdUInt256 = UInt256.fromBytesBE(slotId.toArray) - (slotIdUInt256 mod validationGroups.u256).truncate(uint16) + let a = slotId.toArray + let slotIdInt64 = uint64.fromBytesBE(a) + (slotIdInt64 mod uint64(validationGroups)).uint16 func maxSlotsConstraintRespected(validation: Validation): bool = validation.config.maxSlots == 0 or validation.slots.len < validation.config.maxSlots @@ -57,7 +57,7 @@ func shouldValidateSlot(validation: Validation, slotId: SlotId): bool = groupIndexForSlotId(slotId, validationGroups) == validation.config.groupIndex proc subscribeSlotFilled(validation: Validation) {.async.} = - proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) = + proc onSlotFilled(requestId: RequestId, slotIndex: uint64) = if not validation.maxSlotsConstraintRespected: return let slotId = slotId(requestId, slotIndex) @@ -115,14 +115,13 @@ proc run(validation: Validation) {.async: (raises: []).} = except CatchableError as e: error "Validation failed", msg = e.msg -proc epochForDurationBackFromNow( - validation: Validation, duration: Duration -): SecondsSince1970 = - return validation.clock.now - duration.secs +proc findEpoch(validation: Validation, secondsAgo: uint64): SecondsSince1970 = + return validation.clock.now - secondsAgo.int64 proc restoreHistoricalState(validation: Validation) {.async.} = trace "Restoring historical state..." - let startTimeEpoch = validation.epochForDurationBackFromNow(MaxStorageRequestDuration) + let requestDurationLimit = await validation.market.requestDurationLimit + let startTimeEpoch = validation.findEpoch(secondsAgo = requestDurationLimit) let slotFilledEvents = await validation.market.queryPastSlotFilledEvents(fromTime = startTimeEpoch) for event in slotFilledEvents: @@ -143,7 +142,6 @@ proc start*(validation: Validation) {.async.} = await validation.subscribeSlotFilled() await validation.restoreHistoricalState() validation.running = validation.run() - asyncSpawn validation.running proc stop*(validation: Validation) {.async.} = if not validation.running.isNil and not validation.running.finished: diff --git a/config.nims b/config.nims index 6a4767ad..05a31fff 100644 --- a/config.nims +++ b/config.nims @@ -1,21 +1,24 @@ - include "build.nims" import std/os const currentDir = currentSourcePath()[0 .. ^(len("config.nims") + 1)] when getEnv("NIMBUS_BUILD_SYSTEM") == "yes" and - # BEWARE - # In Nim 1.6, config files are evaluated with a working directory - # matching where the Nim command was invocated. This means that we - # must do all file existence checks with full absolute paths: - system.fileExists(currentDir & "nimbus-build-system.paths"): + # BEWARE + # In Nim 1.6, config files are evaluated with a working directory + # matching where the Nim command was invocated. This means that we + # must do all file existence checks with full absolute paths: + system.fileExists(currentDir & "nimbus-build-system.paths"): include "nimbus-build-system.paths" when defined(release): - switch("nimcache", joinPath(currentSourcePath.parentDir, "nimcache/release/$projectName")) + switch( + "nimcache", joinPath(currentSourcePath.parentDir, "nimcache/release/$projectName") + ) else: - switch("nimcache", joinPath(currentSourcePath.parentDir, "nimcache/debug/$projectName")) + switch( + "nimcache", joinPath(currentSourcePath.parentDir, "nimcache/debug/$projectName") + ) when defined(limitStackUsage): # This limits stack usage of each individual function to 1MB - the option is @@ -34,7 +37,8 @@ when defined(windows): # increase stack size switch("passL", "-Wl,--stack,8388608") # https://github.com/nim-lang/Nim/issues/4057 - --tlsEmulation:off + --tlsEmulation: + off if defined(i386): # set the IMAGE_FILE_LARGE_ADDRESS_AWARE flag so we can use PAE, if enabled, and access more than 2 GiB of RAM switch("passL", "-Wl,--large-address-aware") @@ -63,30 +67,47 @@ else: # ("-fno-asynchronous-unwind-tables" breaks Nim's exception raising, sometimes) switch("passC", "-mno-avx512vl") ---tlsEmulation:off ---threads:on ---opt:speed ---excessiveStackTrace:on +--tlsEmulation: + off +--threads: + on +--opt: + speed +--excessiveStackTrace: + on # enable metric collection ---define:metrics +--define: + metrics # for heap-usage-by-instance-type metrics and object base-type strings ---define:nimTypeNames ---styleCheck:usages ---styleCheck:error ---maxLoopIterationsVM:1000000000 ---fieldChecks:on ---warningAsError:"ProveField:on" +--define: + nimTypeNames +--styleCheck: + usages +--styleCheck: + error +--maxLoopIterationsVM: + 1000000000 +--fieldChecks: + on +--warningAsError: + "ProveField:on" when (NimMajor, NimMinor) >= (1, 4): - --warning:"ObservableStores:off" - --warning:"LockLevel:off" - --hint:"XCannotRaiseY:off" + --warning: + "ObservableStores:off" + --warning: + "LockLevel:off" + --hint: + "XCannotRaiseY:off" when (NimMajor, NimMinor) >= (1, 6): - --warning:"DotLikeOps:off" + --warning: + "DotLikeOps:off" when (NimMajor, NimMinor, NimPatch) >= (1, 6, 11): - --warning:"BareExcept:off" + --warning: + "BareExcept:off" when (NimMajor, NimMinor) >= (2, 0): - --mm:refc + --mm: + refc switch("define", "withoutPCRE") @@ -94,10 +115,12 @@ switch("define", "withoutPCRE") # "--debugger:native" build. It can be increased with `ulimit -n 1024`. if not defined(macosx): # add debugging symbols and original files and line numbers - --debugger:native + --debugger: + native if not (defined(windows) and defined(i386)) and not defined(disable_libbacktrace): # light-weight stack traces using libbacktrace and libunwind - --define:nimStackTraceOverride + --define: + nimStackTraceOverride switch("import", "libbacktrace") # `switch("warning[CaseTransition]", "off")` fails with "Error: invalid command line option: '--warning[CaseTransition]'" diff --git a/nix/default.nix b/nix/default.nix index 691e2af3..b5823f86 100644 --- a/nix/default.nix +++ b/nix/default.nix @@ -56,7 +56,7 @@ in pkgs.gcc13Stdenv.mkDerivation rec { fakeCargo ]; - # Disable CPU optmizations that make binary not portable. + # Disable CPU optimizations that make binary not portable. NIMFLAGS = "-d:disableMarchNative -d:git_revision_override=${revision}"; # Avoid Nim cache permission errors. XDG_CACHE_HOME = "/tmp"; diff --git a/openapi.yaml b/openapi.yaml index 9d401e8f..8bae1b10 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -163,6 +163,14 @@ components: totalCollateral: type: string description: Total collateral (in amount of tokens) that can be used for matching requests + enabled: + type: boolean + description: Enable the ability to receive sales on this availability. + default: true + until: + type: integer + description: Specifies the latest timestamp, after which the availability will no longer host any slots. If set to 0, there will be no restrictions. + default: 0 SalesAvailabilityREAD: allOf: @@ -239,6 +247,9 @@ components: slotIndex: type: string description: Slot Index as decimal string + validUntil: + type: integer + description: Timestamp after which the reservation will no longer be valid. StorageRequestCreation: type: object @@ -325,6 +336,7 @@ components: - unknown error: type: string + nullable: true description: If Request failed, then here is presented the error message request: $ref: "#/components/schemas/StorageRequest" @@ -371,12 +383,6 @@ components: nullable: true description: "The original mimetype of the uploaded content (optional)" example: image/png - uploadedAt: - type: integer - format: int64 - nullable: true - description: "The UTC upload timestamp in seconds" - example: 1729244192 Space: type: object @@ -709,7 +715,7 @@ paths: "400": description: Invalid data input "422": - description: Not enough node's storage quota available + description: Not enough node's storage quota available or the provided parameters did not pass validation "500": description: Error reserving availability "503": @@ -742,7 +748,7 @@ paths: "404": description: Availability not found "422": - description: Not enough node's storage quota available + description: The provided parameters did not pass validation "500": description: Error reserving availability "503": @@ -805,6 +811,8 @@ paths: type: string "400": description: Invalid or missing Request ID + "422": + description: The storage request parameters are not valid "404": description: Request ID not found "503": diff --git a/tests/asynctest.nim b/tests/asynctest.nim index 7c6a4afd..4db8277f 100644 --- a/tests/asynctest.nim +++ b/tests/asynctest.nim @@ -1,3 +1,3 @@ -import pkg/asynctest/chronos/unittest +import pkg/asynctest/chronos/unittest2 -export unittest +export unittest2 diff --git a/tests/codex/blockexchange/discovery/testdiscovery.nim b/tests/codex/blockexchange/discovery/testdiscovery.nim index 88331c3f..c54a1fff 100644 --- a/tests/codex/blockexchange/discovery/testdiscovery.nim +++ b/tests/codex/blockexchange/discovery/testdiscovery.nim @@ -84,30 +84,30 @@ asyncchecksuite "Block Advertising and Discovery": blockDiscovery.publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ): Future[void] {.async, gcsafe.} = + ): Future[void] {.async: (raises: [CancelledError]).} = return blockDiscovery.findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] {.async.} = + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = await engine.resolveBlocks(blocks.filterIt(it.cid == cid)) await allFuturesThrowing(allFinished(pendingBlocks)) await engine.stop() - test "Should advertise both manifests and trees": - let - cids = @[manifest.cid.tryGet, manifest.treeCid] - advertised = initTable.collect: - for cid in cids: - {cid: newFuture[void]()} + test "Should advertise trees": + let cids = @[manifest.treeCid] + var advertised = initTable.collect: + for cid in cids: + {cid: newFuture[void]()} blockDiscovery.publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ) {.async.} = - if cid in advertised and not advertised[cid].finished(): - advertised[cid].complete() + ) {.async: (raises: [CancelledError]).} = + advertised.withValue(cid, fut): + if not fut[].finished: + fut[].complete() await engine.start() await allFuturesThrowing(allFinished(toSeq(advertised.values))) @@ -118,7 +118,7 @@ asyncchecksuite "Block Advertising and Discovery": blockDiscovery.publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ) {.async.} = + ) {.async: (raises: [CancelledError]).} = check: cid notin blockCids @@ -138,7 +138,7 @@ asyncchecksuite "Block Advertising and Discovery": blockDiscovery.findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] = + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = check false await engine.start() @@ -221,17 +221,17 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ): Future[void] {.async.} = + ) {.async: (raises: [CancelledError]).} = advertised[cid] = switch[1].peerInfo.signedPeerRecord MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ): Future[void] {.async.} = + ) {.async: (raises: [CancelledError]).} = advertised[cid] = switch[2].peerInfo.signedPeerRecord MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ): Future[void] {.async.} = + ) {.async: (raises: [CancelledError]).} = advertised[cid] = switch[3].peerInfo.signedPeerRecord discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid) @@ -266,23 +266,21 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] {.async.} = - if cid in advertised: - result.add(advertised[cid]) + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = + advertised.withValue(cid, val): + result.add(val[]) let futs = collect(newSeq): for m in mBlocks[0 .. 2]: blockexc[0].engine.requestBlock(m.cid) - await allFuturesThrowing( - switch.mapIt(it.start()) & blockexc.mapIt(it.engine.start()) - ) - .wait(10.seconds) + await allFuturesThrowing(switch.mapIt(it.start())).wait(10.seconds) + await allFuturesThrowing(blockexc.mapIt(it.engine.start())).wait(10.seconds) await allFutures(futs).wait(10.seconds) - await allFuturesThrowing(blockexc.mapIt(it.engine.stop()) & switch.mapIt(it.stop())) - .wait(10.seconds) + await allFuturesThrowing(blockexc.mapIt(it.engine.stop())).wait(10.seconds) + await allFuturesThrowing(switch.mapIt(it.stop())).wait(10.seconds) test "E2E - Should advertise and discover blocks with peers already connected": # Distribute the blocks amongst 1..3 @@ -292,17 +290,17 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ): Future[void] {.async.} = + ) {.async: (raises: [CancelledError]).} = advertised[cid] = switch[1].peerInfo.signedPeerRecord MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ): Future[void] {.async.} = + ) {.async: (raises: [CancelledError]).} = advertised[cid] = switch[2].peerInfo.signedPeerRecord MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ): Future[void] {.async.} = + ) {.async: (raises: [CancelledError]).} = advertised[cid] = switch[3].peerInfo.signedPeerRecord discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid) @@ -337,18 +335,16 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] {.async.} = - if cid in advertised: - return @[advertised[cid]] + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = + advertised.withValue(cid, val): + return @[val[]] let futs = mBlocks[0 .. 2].mapIt(blockexc[0].engine.requestBlock(it.cid)) - await allFuturesThrowing( - switch.mapIt(it.start()) & blockexc.mapIt(it.engine.start()) - ) - .wait(10.seconds) + await allFuturesThrowing(switch.mapIt(it.start())).wait(10.seconds) + await allFuturesThrowing(blockexc.mapIt(it.engine.start())).wait(10.seconds) await allFutures(futs).wait(10.seconds) - await allFuturesThrowing(blockexc.mapIt(it.engine.stop()) & switch.mapIt(it.stop())) - .wait(10.seconds) + await allFuturesThrowing(blockexc.mapIt(it.engine.stop())).wait(10.seconds) + await allFuturesThrowing(switch.mapIt(it.stop())).wait(10.seconds) diff --git a/tests/codex/blockexchange/discovery/testdiscoveryengine.nim b/tests/codex/blockexchange/discovery/testdiscoveryengine.nim index 904703a0..9efab1a6 100644 --- a/tests/codex/blockexchange/discovery/testdiscoveryengine.nim +++ b/tests/codex/blockexchange/discovery/testdiscoveryengine.nim @@ -68,7 +68,7 @@ asyncchecksuite "Test Discovery Engine": blockDiscovery.findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = pendingBlocks.resolve( blocks.filterIt(it.cid == cid).mapIt( BlockDelivery(blk: it, address: it.address) @@ -76,7 +76,7 @@ asyncchecksuite "Test Discovery Engine": ) await discoveryEngine.start() - await allFuturesThrowing(allFinished(wants)).wait(1.seconds) + await allFuturesThrowing(allFinished(wants)).wait(100.millis) await discoveryEngine.stop() test "Should queue discovery request": @@ -94,14 +94,14 @@ asyncchecksuite "Test Discovery Engine": blockDiscovery.findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = check cid == blocks[0].cid if not want.finished: want.complete() await discoveryEngine.start() discoveryEngine.queueFindBlocksReq(@[blocks[0].cid]) - await want.wait(1.seconds) + await want.wait(100.millis) await discoveryEngine.stop() test "Should not request more than minPeersPerBlock": @@ -122,7 +122,7 @@ asyncchecksuite "Test Discovery Engine": var pendingCids = newSeq[Cid]() blockDiscovery.findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = check cid in pendingCids pendingCids.keepItIf(it != cid) check peerStore.len < minPeers @@ -159,12 +159,12 @@ asyncchecksuite "Test Discovery Engine": discoveryLoopSleep = 100.millis, concurrentDiscReqs = 2, ) - reqs = newFuture[void]() + reqs = Future[void].Raising([CancelledError]).init() count = 0 blockDiscovery.findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] {.gcsafe, async.} = + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = check cid == blocks[0].cid if count > 0: check false diff --git a/tests/codex/blockexchange/engine/testadvertiser.nim b/tests/codex/blockexchange/engine/testadvertiser.nim index 157564d6..83a70f65 100644 --- a/tests/codex/blockexchange/engine/testadvertiser.nim +++ b/tests/codex/blockexchange/engine/testadvertiser.nim @@ -34,7 +34,7 @@ asyncchecksuite "Advertiser": advertised = newSeq[Cid]() blockDiscovery.publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ) {.async, gcsafe.} = + ) {.async: (raises: [CancelledError]), gcsafe.} = advertised.add(cid) advertiser = Advertiser.new(localStore, blockDiscovery) diff --git a/tests/codex/blockexchange/engine/testblockexc.nim b/tests/codex/blockexchange/engine/testblockexc.nim index aa15f795..0c250231 100644 --- a/tests/codex/blockexchange/engine/testblockexc.nim +++ b/tests/codex/blockexchange/engine/testblockexc.nim @@ -1,5 +1,6 @@ import std/sequtils import std/algorithm +import std/importutils import pkg/chronos import pkg/stew/byteutils @@ -20,7 +21,7 @@ asyncchecksuite "NetworkStore engine - 2 nodes": peerCtx1, peerCtx2: BlockExcPeerCtx pricing1, pricing2: Pricing blocks1, blocks2: seq[bt.Block] - pendingBlocks1, pendingBlocks2: seq[Future[bt.Block]] + pendingBlocks1, pendingBlocks2: seq[BlockHandle] setup: blocks1 = await makeRandomBlocks(datasetSize = 2048, blockSize = 256'nb) @@ -56,7 +57,7 @@ asyncchecksuite "NetworkStore engine - 2 nodes": nodeCmps2.switch.peerInfo.peerId, nodeCmps2.switch.peerInfo.addrs ) - await sleepAsync(1.seconds) # give some time to exchange lists + await sleepAsync(100.millis) # give some time to exchange lists peerCtx2 = nodeCmps1.peerStore.get(nodeCmps2.switch.peerInfo.peerId) peerCtx1 = nodeCmps2.peerStore.get(nodeCmps1.switch.peerInfo.peerId) @@ -75,7 +76,6 @@ asyncchecksuite "NetworkStore engine - 2 nodes": test "Should exchange blocks on connect": await allFuturesThrowing(allFinished(pendingBlocks1)).wait(10.seconds) - await allFuturesThrowing(allFinished(pendingBlocks2)).wait(10.seconds) check: @@ -178,7 +178,7 @@ asyncchecksuite "NetworkStore - multiple nodes": (await nodes[i div 4].networkStore.engine.localStore.putBlock(blocks[i])).tryGet() await connectNodes(nodes) - await sleepAsync(1.seconds) + await sleepAsync(100.millis) await allFuturesThrowing(allFinished(pendingBlocks)) @@ -203,45 +203,9 @@ asyncchecksuite "NetworkStore - multiple nodes": (await nodes[i div 4].networkStore.engine.localStore.putBlock(blocks[i])).tryGet() await connectNodes(nodes) - await sleepAsync(1.seconds) + await sleepAsync(100.millis) await allFuturesThrowing(allFinished(pendingBlocks1), allFinished(pendingBlocks2)) check pendingBlocks1.mapIt(it.read) == blocks[0 .. 3] check pendingBlocks2.mapIt(it.read) == blocks[12 .. 15] - - test "Should actively cancel want-haves if block received from elsewhere": - let - # Peer wanting to download blocks - downloader = nodes[4] - # Bystander peer - gets block request but can't satisfy them - bystander = nodes[3] - # Holder of actual blocks - blockHolder = nodes[1] - - let aBlock = blocks[0] - (await blockHolder.engine.localStore.putBlock(aBlock)).tryGet() - - await connectNodes(@[downloader, bystander]) - # Downloader asks for block... - let blockRequest = downloader.engine.requestBlock(aBlock.cid) - - # ... and bystander learns that downloader wants it, but can't provide it. - check eventually( - bystander.engine.peers - .get(downloader.switch.peerInfo.peerId).peerWants - .filterIt(it.address == aBlock.address).len == 1 - ) - - # As soon as we connect the downloader to the blockHolder, the block should - # propagate to the downloader... - await connectNodes(@[downloader, blockHolder]) - check (await blockRequest).tryGet().cid == aBlock.cid - check (await downloader.engine.localStore.hasBlock(aBlock.cid)).tryGet() - - # ... and the bystander should have cancelled the want-have - check eventually( - bystander.engine.peers - .get(downloader.switch.peerInfo.peerId).peerWants - .filterIt(it.address == aBlock.address).len == 0 - ) diff --git a/tests/codex/blockexchange/engine/testengine.nim b/tests/codex/blockexchange/engine/testengine.nim index f7cc8294..0541c119 100644 --- a/tests/codex/blockexchange/engine/testengine.nim +++ b/tests/codex/blockexchange/engine/testengine.nim @@ -20,6 +20,11 @@ import ../../../asynctest import ../../helpers import ../../examples +const NopSendWantCancellationsProc = proc( + id: PeerId, addresses: seq[BlockAddress] +) {.async: (raises: [CancelledError]).} = + discard + asyncchecksuite "NetworkStore engine basic": var rng: Rng @@ -61,20 +66,17 @@ asyncchecksuite "NetworkStore engine basic": wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false, - ) {.gcsafe, async.} = + ) {.async: (raises: [CancelledError]).} = check addresses.mapIt($it.cidOrTreeCid).sorted == blocks.mapIt($it.cid).sorted done.complete() let network = BlockExcNetwork(request: BlockExcRequest(sendWantList: sendWantList)) - localStore = CacheStore.new(blocks.mapIt(it)) discovery = DiscoveryEngine.new( localStore, peerStore, network, blockDiscovery, pendingBlocks ) - advertiser = Advertiser.new(localStore, blockDiscovery) - engine = BlockExcEngine.new( localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks ) @@ -88,7 +90,9 @@ asyncchecksuite "NetworkStore engine basic": test "Should send account to new peers": let pricing = Pricing.example - proc sendAccount(peer: PeerId, account: Account) {.gcsafe, async.} = + proc sendAccount( + peer: PeerId, account: Account + ) {.async: (raises: [CancelledError]).} = check account.address == pricing.address done.complete() @@ -129,11 +133,6 @@ asyncchecksuite "NetworkStore engine handlers": localStore: BlockStore blocks: seq[Block] - const NopSendWantCancellationsProc = proc( - id: PeerId, addresses: seq[BlockAddress] - ) {.gcsafe, async.} = - discard - setup: rng = Rng.instance() chunker = RandomChunker.new(rng, size = 1024'nb, chunkSize = 256'nb) @@ -186,7 +185,9 @@ asyncchecksuite "NetworkStore engine handlers": done = newFuture[void]() wantList = makeWantList(blocks.mapIt(it.cid)) - proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = + proc sendPresence( + peerId: PeerId, presence: seq[BlockPresence] + ) {.async: (raises: [CancelledError]).} = check presence.mapIt(it.address) == wantList.entries.mapIt(it.address) done.complete() @@ -203,7 +204,9 @@ asyncchecksuite "NetworkStore engine handlers": done = newFuture[void]() wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true) - proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = + proc sendPresence( + peerId: PeerId, presence: seq[BlockPresence] + ) {.async: (raises: [CancelledError]).} = check presence.mapIt(it.address) == wantList.entries.mapIt(it.address) for p in presence: check: @@ -222,7 +225,9 @@ asyncchecksuite "NetworkStore engine handlers": done = newFuture[void]() wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true) - proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = + proc sendPresence( + peerId: PeerId, presence: seq[BlockPresence] + ) {.async: (raises: [CancelledError]).} = for p in presence: if p.address.cidOrTreeCid != blocks[0].cid and p.address.cidOrTreeCid != blocks[1].cid: @@ -266,19 +271,21 @@ asyncchecksuite "NetworkStore engine handlers": peerContext.account = account.some peerContext.blocks = blocks.mapIt( - (it.address, Presence(address: it.address, price: rand(uint16).u256)) + (it.address, Presence(address: it.address, price: rand(uint16).u256, have: true)) ).toTable engine.network = BlockExcNetwork( request: BlockExcRequest( - sendPayment: proc(receiver: PeerId, payment: SignedState) {.gcsafe, async.} = + sendPayment: proc( + receiver: PeerId, payment: SignedState + ) {.async: (raises: [CancelledError]).} = let - amount = blocks.mapIt(peerContext.blocks[it.address].price).foldl(a + b) - + amount = + blocks.mapIt(peerContext.blocks[it.address].catch.get.price).foldl(a + b) balances = !payment.state.outcome.balances(Asset) check receiver == peerId - check balances[account.address.toDestination] == amount + check balances[account.address.toDestination].catch.get == amount done.complete(), # Install NOP for want list cancellations so they don't cause a crash @@ -286,13 +293,16 @@ asyncchecksuite "NetworkStore engine handlers": ) ) + let requestedBlocks = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.address)) await engine.blocksDeliveryHandler( peerId, blocks.mapIt(BlockDelivery(blk: it, address: it.address)) ) await done.wait(100.millis) + await allFuturesThrowing(requestedBlocks).wait(100.millis) test "Should handle block presence": - var handles: Table[Cid, Future[Block]] + var handles: + Table[Cid, Future[Block].Raising([CancelledError, RetriesExhaustedError])] proc sendWantList( id: PeerId, @@ -302,7 +312,7 @@ asyncchecksuite "NetworkStore engine handlers": wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false, - ) {.gcsafe, async.} = + ) {.async: (raises: [CancelledError]).} = engine.pendingBlocks.resolve( blocks.filterIt(it.address in addresses).mapIt( BlockDelivery(blk: it, address: it.address) @@ -333,20 +343,183 @@ asyncchecksuite "NetworkStore engine handlers": blocksDelivery = blocks.mapIt(BlockDelivery(blk: it, address: it.address)) cancellations = newTable(blocks.mapIt((it.address, newFuture[void]())).toSeq) + peerCtx.blocks = blocks.mapIt( + (it.address, Presence(address: it.address, have: true, price: UInt256.example)) + ).toTable + proc sendWantCancellations( id: PeerId, addresses: seq[BlockAddress] - ) {.gcsafe, async.} = + ) {.async: (raises: [CancelledError]).} = for address in addresses: - cancellations[address].complete() + cancellations[address].catch.expect("address should exist").complete() engine.network = BlockExcNetwork( request: BlockExcRequest(sendWantCancellations: sendWantCancellations) ) await engine.blocksDeliveryHandler(peerId, blocksDelivery) - discard await allFinished(pending) + discard await allFinished(pending).wait(100.millis) await allFuturesThrowing(cancellations.values().toSeq) +asyncchecksuite "Block Download": + var + rng: Rng + seckey: PrivateKey + peerId: PeerId + chunker: Chunker + wallet: WalletRef + blockDiscovery: Discovery + peerStore: PeerCtxStore + pendingBlocks: PendingBlocksManager + network: BlockExcNetwork + engine: BlockExcEngine + discovery: DiscoveryEngine + advertiser: Advertiser + peerCtx: BlockExcPeerCtx + localStore: BlockStore + blocks: seq[Block] + + setup: + rng = Rng.instance() + chunker = RandomChunker.new(rng, size = 1024'nb, chunkSize = 256'nb) + + while true: + let chunk = await chunker.getBytes() + if chunk.len <= 0: + break + + blocks.add(Block.new(chunk).tryGet()) + + seckey = PrivateKey.random(rng[]).tryGet() + peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet() + wallet = WalletRef.example + blockDiscovery = Discovery.new() + peerStore = PeerCtxStore.new() + pendingBlocks = PendingBlocksManager.new() + + localStore = CacheStore.new() + network = BlockExcNetwork() + + discovery = + DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks) + + advertiser = Advertiser.new(localStore, blockDiscovery) + + engine = BlockExcEngine.new( + localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks + ) + + peerCtx = BlockExcPeerCtx(id: peerId) + engine.peers.add(peerCtx) + + test "Should exhaust retries": + var + retries = 2 + address = BlockAddress.init(blocks[0].cid) + + proc sendWantList( + id: PeerId, + addresses: seq[BlockAddress], + priority: int32 = 0, + cancel: bool = false, + wantType: WantType = WantType.WantHave, + full: bool = false, + sendDontHave: bool = false, + ) {.async: (raises: [CancelledError]).} = + check wantType == WantHave + check not engine.pendingBlocks.isInFlight(address) + check engine.pendingBlocks.retries(address) == retries + retries -= 1 + + engine.pendingBlocks.blockRetries = 2 + engine.pendingBlocks.retryInterval = 10.millis + engine.network = + BlockExcNetwork(request: BlockExcRequest(sendWantList: sendWantList)) + + let pending = engine.requestBlock(address) + + expect RetriesExhaustedError: + discard (await pending).tryGet() + + test "Should retry block request": + var + address = BlockAddress.init(blocks[0].cid) + steps = newAsyncEvent() + + proc sendWantList( + id: PeerId, + addresses: seq[BlockAddress], + priority: int32 = 0, + cancel: bool = false, + wantType: WantType = WantType.WantHave, + full: bool = false, + sendDontHave: bool = false, + ) {.async: (raises: [CancelledError]).} = + case wantType + of WantHave: + check engine.pendingBlocks.isInFlight(address) == false + check engine.pendingBlocks.retriesExhausted(address) == false + steps.fire() + of WantBlock: + check engine.pendingBlocks.isInFlight(address) == true + check engine.pendingBlocks.retriesExhausted(address) == false + steps.fire() + + engine.pendingBlocks.blockRetries = 10 + engine.pendingBlocks.retryInterval = 10.millis + engine.network = BlockExcNetwork( + request: BlockExcRequest( + sendWantList: sendWantList, sendWantCancellations: NopSendWantCancellationsProc + ) + ) + + let pending = engine.requestBlock(address) + await steps.wait() + + # add blocks precense + peerCtx.blocks = blocks.mapIt( + (it.address, Presence(address: it.address, have: true, price: UInt256.example)) + ).toTable + + steps.clear() + await steps.wait() + + await engine.blocksDeliveryHandler( + peerId, @[BlockDelivery(blk: blocks[0], address: address)] + ) + check (await pending).tryGet() == blocks[0] + + test "Should cancel block request": + var + address = BlockAddress.init(blocks[0].cid) + done = newFuture[void]() + + proc sendWantList( + id: PeerId, + addresses: seq[BlockAddress], + priority: int32 = 0, + cancel: bool = false, + wantType: WantType = WantType.WantHave, + full: bool = false, + sendDontHave: bool = false, + ) {.async: (raises: [CancelledError]).} = + done.complete() + + engine.pendingBlocks.blockRetries = 10 + engine.pendingBlocks.retryInterval = 1.seconds + engine.network = BlockExcNetwork( + request: BlockExcRequest( + sendWantList: sendWantList, sendWantCancellations: NopSendWantCancellationsProc + ) + ) + + let pending = engine.requestBlock(address) + await done.wait(100.millis) + + pending.cancel() + expect CancelledError: + discard (await pending).tryGet() + asyncchecksuite "Task Handler": var rng: Rng @@ -409,7 +582,7 @@ asyncchecksuite "Task Handler": test "Should send want-blocks in priority order": proc sendBlocksDelivery( id: PeerId, blocksDelivery: seq[BlockDelivery] - ) {.gcsafe, async.} = + ) {.async: (raises: [CancelledError]).} = check blocksDelivery.len == 2 check: blocksDelivery[1].address == blocks[0].address @@ -446,7 +619,7 @@ asyncchecksuite "Task Handler": test "Should set in-flight for outgoing blocks": proc sendBlocksDelivery( id: PeerId, blocksDelivery: seq[BlockDelivery] - ) {.gcsafe, async.} = + ) {.async: (raises: [CancelledError]).} = check peersCtx[0].peerWants[0].inFlight for blk in blocks: @@ -485,7 +658,9 @@ asyncchecksuite "Task Handler": let missing = @[Block.new("missing".toBytes).tryGet()] let price = (!engine.pricing).price - proc sendPresence(id: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = + proc sendPresence( + id: PeerId, presence: seq[BlockPresence] + ) {.async: (raises: [CancelledError]).} = check presence.mapIt(!Presence.init(it)) == @[ Presence(address: present[0].address, have: true, price: price), diff --git a/tests/codex/blockexchange/engine/testpayments.nim b/tests/codex/blockexchange/engine/testpayments.nim index 24d5dab6..e93cc837 100644 --- a/tests/codex/blockexchange/engine/testpayments.nim +++ b/tests/codex/blockexchange/engine/testpayments.nim @@ -1,10 +1,10 @@ -import std/unittest +import pkg/unittest2 import pkg/codex/stores import ../../examples import ../../helpers -checksuite "engine payments": +suite "Engine payments": let address = EthAddress.example let amount = 42.u256 diff --git a/tests/codex/blockexchange/protobuf/testpayments.nim b/tests/codex/blockexchange/protobuf/testpayments.nim index d0773d70..3ada0105 100644 --- a/tests/codex/blockexchange/protobuf/testpayments.nim +++ b/tests/codex/blockexchange/protobuf/testpayments.nim @@ -6,7 +6,7 @@ import ../../../asynctest import ../../examples import ../../helpers -checksuite "account protobuf messages": +suite "account protobuf messages": let account = Account(address: EthAddress.example) let message = AccountMessage.init(account) @@ -21,7 +21,7 @@ checksuite "account protobuf messages": incorrect.address.del(0) check Account.init(incorrect).isNone -checksuite "channel update messages": +suite "channel update messages": let state = SignedState.example let update = StateChannelUpdate.init(state) diff --git a/tests/codex/blockexchange/protobuf/testpresence.nim b/tests/codex/blockexchange/protobuf/testpresence.nim index 7e3b94e6..dc048c59 100644 --- a/tests/codex/blockexchange/protobuf/testpresence.nim +++ b/tests/codex/blockexchange/protobuf/testpresence.nim @@ -6,7 +6,7 @@ import ../../../asynctest import ../../examples import ../../helpers -checksuite "block presence protobuf messages": +suite "block presence protobuf messages": let cid = Cid.example address = BlockAddress(leaf: false, cid: cid) diff --git a/tests/codex/blockexchange/testnetwork.nim b/tests/codex/blockexchange/testnetwork.nim index 0fae4ffe..b9a51c9d 100644 --- a/tests/codex/blockexchange/testnetwork.nim +++ b/tests/codex/blockexchange/testnetwork.nim @@ -26,7 +26,7 @@ asyncchecksuite "Network - Handlers": blocks: seq[bt.Block] done: Future[void] - proc getConn(): Future[Connection] {.async.} = + proc getConn(): Future[Connection] {.async: (raises: [CancelledError]).} = return Connection(buffer) setup: @@ -45,7 +45,7 @@ asyncchecksuite "Network - Handlers": discard await networkPeer.connect() test "Want List handler": - proc wantListHandler(peer: PeerId, wantList: WantList) {.gcsafe, async.} = + proc wantListHandler(peer: PeerId, wantList: WantList) {.async: (raises: []).} = # check that we got the correct amount of entries check wantList.entries.len == 4 @@ -72,7 +72,7 @@ asyncchecksuite "Network - Handlers": test "Blocks Handler": proc blocksDeliveryHandler( peer: PeerId, blocksDelivery: seq[BlockDelivery] - ) {.gcsafe, async.} = + ) {.async: (raises: []).} = check blocks == blocksDelivery.mapIt(it.blk) done.complete() @@ -85,7 +85,9 @@ asyncchecksuite "Network - Handlers": await done.wait(500.millis) test "Presence Handler": - proc presenceHandler(peer: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = + proc presenceHandler( + peer: PeerId, presence: seq[BlockPresence] + ) {.async: (raises: []).} = for b in blocks: check: b.address in presence @@ -105,7 +107,7 @@ asyncchecksuite "Network - Handlers": test "Handles account messages": let account = Account(address: EthAddress.example) - proc handleAccount(peer: PeerId, received: Account) {.gcsafe, async.} = + proc handleAccount(peer: PeerId, received: Account) {.async: (raises: []).} = check received == account done.complete() @@ -119,7 +121,7 @@ asyncchecksuite "Network - Handlers": test "Handles payment messages": let payment = SignedState.example - proc handlePayment(peer: PeerId, received: SignedState) {.gcsafe, async.} = + proc handlePayment(peer: PeerId, received: SignedState) {.async: (raises: []).} = check received == payment done.complete() @@ -165,7 +167,7 @@ asyncchecksuite "Network - Senders": await allFuturesThrowing(switch1.stop(), switch2.stop()) test "Send want list": - proc wantListHandler(peer: PeerId, wantList: WantList) {.gcsafe, async.} = + proc wantListHandler(peer: PeerId, wantList: WantList) {.async: (raises: []).} = # check that we got the correct amount of entries check wantList.entries.len == 4 @@ -195,7 +197,7 @@ asyncchecksuite "Network - Senders": test "send blocks": proc blocksDeliveryHandler( peer: PeerId, blocksDelivery: seq[BlockDelivery] - ) {.gcsafe, async.} = + ) {.async: (raises: []).} = check blocks == blocksDelivery.mapIt(it.blk) done.complete() @@ -207,7 +209,9 @@ asyncchecksuite "Network - Senders": await done.wait(500.millis) test "send presence": - proc presenceHandler(peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async.} = + proc presenceHandler( + peer: PeerId, precense: seq[BlockPresence] + ) {.async: (raises: []).} = for b in blocks: check: b.address in precense @@ -226,7 +230,7 @@ asyncchecksuite "Network - Senders": test "send account": let account = Account(address: EthAddress.example) - proc handleAccount(peer: PeerId, received: Account) {.gcsafe, async.} = + proc handleAccount(peer: PeerId, received: Account) {.async: (raises: []).} = check received == account done.complete() @@ -238,7 +242,7 @@ asyncchecksuite "Network - Senders": test "send payment": let payment = SignedState.example - proc handlePayment(peer: PeerId, received: SignedState) {.gcsafe, async.} = + proc handlePayment(peer: PeerId, received: SignedState) {.async: (raises: []).} = check received == payment done.complete() @@ -276,7 +280,7 @@ asyncchecksuite "Network - Test Limits": let account = Account(address: EthAddress.example) network2.handlers.onAccount = proc( peer: PeerId, received: Account - ) {.gcsafe, async.} = + ) {.async: (raises: []).} = check false let fut = network1.send( diff --git a/tests/codex/blockexchange/testpeerctxstore.nim b/tests/codex/blockexchange/testpeerctxstore.nim index 6ea601d1..e2983d10 100644 --- a/tests/codex/blockexchange/testpeerctxstore.nim +++ b/tests/codex/blockexchange/testpeerctxstore.nim @@ -1,7 +1,7 @@ import std/sugar import std/sequtils -import std/unittest +import pkg/unittest2 import pkg/libp2p import pkg/codex/blockexchange/peers @@ -11,7 +11,7 @@ import pkg/codex/blockexchange/protobuf/presence import ../helpers import ../examples -checksuite "Peer Context Store": +suite "Peer Context Store": var store: PeerCtxStore peerCtx: BlockExcPeerCtx @@ -31,7 +31,7 @@ checksuite "Peer Context Store": test "Should get peer": check store.get(peerCtx.id) == peerCtx -checksuite "Peer Context Store Peer Selection": +suite "Peer Context Store Peer Selection": var store: PeerCtxStore peerCtxs: seq[BlockExcPeerCtx] diff --git a/tests/codex/blockexchange/testpendingblocks.nim b/tests/codex/blockexchange/testpendingblocks.nim index 45b065c0..af1e6728 100644 --- a/tests/codex/blockexchange/testpendingblocks.nim +++ b/tests/codex/blockexchange/testpendingblocks.nim @@ -10,7 +10,7 @@ import pkg/codex/blockexchange import ../helpers import ../../asynctest -checksuite "Pending Blocks": +suite "Pending Blocks": test "Should add want handle": let pendingBlocks = PendingBlocksManager.new() @@ -28,7 +28,10 @@ checksuite "Pending Blocks": check blk.cid in pendingBlocks pendingBlocks.resolve(@[blk].mapIt(BlockDelivery(blk: it, address: it.address))) - check (await handle) == blk + await sleepAsync(0.millis) + # trigger the event loop, otherwise the block finishes before poll runs + let resolved = await handle + check resolved == blk check blk.cid notin pendingBlocks test "Should cancel want handle": @@ -41,20 +44,6 @@ checksuite "Pending Blocks": await handle.cancelAndWait() check blk.cid notin pendingBlocks - test "Should expire want handle": - let - pendingBlocks = PendingBlocksManager.new() - blk = bt.Block.new("Hello".toBytes).tryGet - handle = pendingBlocks.getWantHandle(blk.cid, 1.millis) - - check blk.cid in pendingBlocks - - await sleepAsync(10.millis) - expect AsyncTimeoutError: - discard await handle - - check blk.cid notin pendingBlocks - test "Should get wants list": let pendingBlocks = PendingBlocksManager.new() @@ -79,3 +68,19 @@ checksuite "Pending Blocks": check: (await allFinished(wantHandles)).mapIt($it.read.cid).sorted(cmp[string]) == (await allFinished(handles)).mapIt($it.read.cid).sorted(cmp[string]) + + test "Should handle retry counters": + let + pendingBlocks = PendingBlocksManager.new(3) + blk = bt.Block.new("Hello".toBytes).tryGet + address = BlockAddress.init(blk.cid) + handle = pendingBlocks.getWantHandle(blk.cid) + + check pendingBlocks.retries(address) == 3 + pendingBlocks.decRetries(address) + check pendingBlocks.retries(address) == 2 + pendingBlocks.decRetries(address) + check pendingBlocks.retries(address) == 1 + pendingBlocks.decRetries(address) + check pendingBlocks.retries(address) == 0 + check pendingBlocks.retriesExhausted(address) diff --git a/tests/codex/examples.nim b/tests/codex/examples.nim index 69a85db8..52b8a0b8 100644 --- a/tests/codex/examples.nim +++ b/tests/codex/examples.nim @@ -8,6 +8,7 @@ import pkg/codex/stores import pkg/codex/blocktype as bt import pkg/codex/sales import pkg/codex/merkletree +import pkg/codex/manifest import ../examples export examples @@ -36,8 +37,8 @@ proc example*(_: type SignedState): SignedState = proc example*(_: type Pricing): Pricing = Pricing(address: EthAddress.example, price: uint32.rand.u256) -proc example*(_: type bt.Block): bt.Block = - let length = rand(4096) +proc example*(_: type bt.Block, size: int = 4096): bt.Block = + let length = rand(size) let bytes = newSeqWith(length, rand(uint8)) bt.Block.new(bytes).tryGet() @@ -51,6 +52,15 @@ proc example*(_: type BlockExcPeerCtx): BlockExcPeerCtx = proc example*(_: type Cid): Cid = bt.Block.example.cid +proc example*(_: type Manifest): Manifest = + Manifest.new( + treeCid = Cid.example, + blockSize = 256.NBytes, + datasetSize = 4096.NBytes, + filename = "example.txt".some, + mimetype = "text/plain".some, + ) + proc example*(_: type MultiHash, mcodec = Sha256HashCodec): MultiHash = let bytes = newSeqWith(256, rand(uint8)) MultiHash.digest($mcodec, bytes).tryGet() @@ -58,19 +68,21 @@ proc example*(_: type MultiHash, mcodec = Sha256HashCodec): MultiHash = proc example*( _: type Availability, collateralPerByte = uint8.example.u256 ): Availability = - let totalSize = uint16.example.u256 + let totalSize = uint16.example.uint64 Availability.init( totalSize = totalSize, - freeSize = uint16.example.u256, - duration = uint16.example.u256, + freeSize = uint16.example.uint64, + duration = uint16.example.uint64, minPricePerBytePerSecond = uint8.example.u256, - totalCollateral = totalSize * collateralPerByte, + totalCollateral = totalSize.u256 * collateralPerByte, + enabled = true, + until = 0.SecondsSince1970, ) proc example*(_: type Reservation): Reservation = Reservation.init( availabilityId = AvailabilityId(array[32, byte].example), - size = uint16.example.u256, + size = uint16.example.uint64, slotId = SlotId.example, ) diff --git a/tests/codex/helpers.nim b/tests/codex/helpers.nim index 6d7415d3..898dd16e 100644 --- a/tests/codex/helpers.nim +++ b/tests/codex/helpers.nim @@ -85,30 +85,31 @@ proc makeWantList*( ) proc storeDataGetManifest*( - store: BlockStore, chunker: Chunker + store: BlockStore, blocks: seq[Block] ): Future[Manifest] {.async.} = - var cids = newSeq[Cid]() - - while (let chunk = await chunker.getBytes(); chunk.len > 0): - let blk = Block.new(chunk).tryGet() - cids.add(blk.cid) + for blk in blocks: (await store.putBlock(blk)).tryGet() let - tree = CodexTree.init(cids).tryGet() + (manifest, tree) = makeManifestAndTree(blocks).tryGet() treeCid = tree.rootCid.tryGet() - manifest = Manifest.new( - treeCid = treeCid, - blockSize = NBytes(chunker.chunkSize), - datasetSize = NBytes(chunker.offset), - ) for i in 0 ..< tree.leavesCount: let proof = tree.getProof(i).tryGet() - (await store.putCidAndProof(treeCid, i, cids[i], proof)).tryGet() + (await store.putCidAndProof(treeCid, i, blocks[i].cid, proof)).tryGet() return manifest +proc storeDataGetManifest*( + store: BlockStore, chunker: Chunker +): Future[Manifest] {.async.} = + var blocks = newSeq[Block]() + + while (let chunk = await chunker.getBytes(); chunk.len > 0): + blocks.add(Block.new(chunk).tryGet()) + + return await storeDataGetManifest(store, blocks) + proc makeRandomBlocks*( datasetSize: int, blockSize: NBytes ): Future[seq[Block]] {.async.} = diff --git a/tests/codex/helpers/mockchunker.nim b/tests/codex/helpers/mockchunker.nim index 0d38cf3b..eb51f7ca 100644 --- a/tests/codex/helpers/mockchunker.nim +++ b/tests/codex/helpers/mockchunker.nim @@ -21,7 +21,7 @@ proc new*( var consumed = 0 proc reader( data: ChunkBuffer, len: int - ): Future[int] {.async, gcsafe, raises: [Defect].} = + ): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} = if consumed >= dataset.len: return 0 diff --git a/tests/codex/helpers/mockdiscovery.nim b/tests/codex/helpers/mockdiscovery.nim index 42ad76a9..4110c577 100644 --- a/tests/codex/helpers/mockdiscovery.nim +++ b/tests/codex/helpers/mockdiscovery.nim @@ -14,29 +14,42 @@ import pkg/codex/discovery import pkg/contractabi/address as ca type MockDiscovery* = ref object of Discovery - findBlockProvidersHandler*: - proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.gcsafe.} - publishBlockProvideHandler*: proc(d: MockDiscovery, cid: Cid): Future[void] {.gcsafe.} - findHostProvidersHandler*: - proc(d: MockDiscovery, host: ca.Address): Future[seq[SignedPeerRecord]] {.gcsafe.} - publishHostProvideHandler*: - proc(d: MockDiscovery, host: ca.Address): Future[void] {.gcsafe.} + findBlockProvidersHandler*: proc( + d: MockDiscovery, cid: Cid + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} + + publishBlockProvideHandler*: + proc(d: MockDiscovery, cid: Cid): Future[void] {.async: (raises: [CancelledError]).} + + findHostProvidersHandler*: proc( + d: MockDiscovery, host: ca.Address + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} + + publishHostProvideHandler*: proc(d: MockDiscovery, host: ca.Address): Future[void] {. + async: (raises: [CancelledError]) + .} proc new*(T: type MockDiscovery): MockDiscovery = MockDiscovery() -proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} = +proc findPeer*( + d: Discovery, peerId: PeerId +): Future[?PeerRecord] {.async: (raises: [CancelledError]).} = ## mock find a peer - always return none - ## + ## return none(PeerRecord) -method find*(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async.} = +method find*( + d: MockDiscovery, cid: Cid +): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = if isNil(d.findBlockProvidersHandler): return return await d.findBlockProvidersHandler(d, cid) -method provide*(d: MockDiscovery, cid: Cid): Future[void] {.async.} = +method provide*( + d: MockDiscovery, cid: Cid +): Future[void] {.async: (raises: [CancelledError]).} = if isNil(d.publishBlockProvideHandler): return @@ -44,13 +57,15 @@ method provide*(d: MockDiscovery, cid: Cid): Future[void] {.async.} = method find*( d: MockDiscovery, host: ca.Address -): Future[seq[SignedPeerRecord]] {.async.} = +): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = if isNil(d.findHostProvidersHandler): return return await d.findHostProvidersHandler(d, host) -method provide*(d: MockDiscovery, host: ca.Address): Future[void] {.async.} = +method provide*( + d: MockDiscovery, host: ca.Address +): Future[void] {.async: (raises: [CancelledError]).} = if isNil(d.publishHostProvideHandler): return diff --git a/tests/codex/helpers/mockmarket.nim b/tests/codex/helpers/mockmarket.nim index bb0eaaa2..55abeb14 100644 --- a/tests/codex/helpers/mockmarket.nim +++ b/tests/codex/helpers/mockmarket.nim @@ -8,6 +8,7 @@ import pkg/codex/market import pkg/codex/contracts/requests import pkg/codex/contracts/proofs import pkg/codex/contracts/config +import pkg/questionable/results from pkg/ethers import BlockTag import codex/clock @@ -46,7 +47,10 @@ type subscriptions: Subscriptions config*: MarketplaceConfig canReserveSlot*: bool - reserveSlotThrowError*: ?(ref MarketError) + errorOnReserveSlot*: ?(ref MarketError) + errorOnFillSlot*: ?(ref MarketError) + errorOnFreeSlot*: ?(ref MarketError) + errorOnGetHost*: ?(ref MarketError) clock: ?Clock Fulfillment* = object @@ -57,7 +61,7 @@ type MockSlot* = object requestId*: RequestId host*: Address - slotIndex*: UInt256 + slotIndex*: uint64 proof*: Groth16Proof timestamp: ?SecondsSince1970 collateral*: UInt256 @@ -84,7 +88,7 @@ type SlotFilledSubscription* = ref object of Subscription market: MockMarket requestId: ?RequestId - slotIndex: ?UInt256 + slotIndex: ?uint64 callback: OnSlotFilled SlotFreedSubscription* = ref object of Subscription @@ -122,36 +126,61 @@ proc new*(_: type MockMarket, clock: ?Clock = Clock.none): MockMarket = collateral: CollateralConfig( repairRewardPercentage: 10, maxNumberOfSlashes: 5, - slashCriterion: 3, slashPercentage: 10, + validatorRewardPercentage: 20, ), proofs: ProofConfig( - period: 10.u256, timeout: 5.u256, downtime: 64.uint8, downtimeProduct: 67.uint8 + period: 10.Period, + timeout: 5.uint64, + downtime: 64.uint8, + downtimeProduct: 67.uint8, ), + reservations: SlotReservationsConfig(maxReservations: 3), + requestDurationLimit: (60 * 60 * 24 * 30).uint64, ) MockMarket( signer: Address.example, config: config, canReserveSlot: true, clock: clock ) -method getSigner*(market: MockMarket): Future[Address] {.async.} = +method loadConfig*( + market: MockMarket +): Future[?!void] {.async: (raises: [CancelledError]).} = + discard + +method getSigner*( + market: MockMarket +): Future[Address] {.async: (raises: [CancelledError, MarketError]).} = return market.signer -method periodicity*(mock: MockMarket): Future[Periodicity] {.async.} = +method periodicity*( + mock: MockMarket +): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} = return Periodicity(seconds: mock.config.proofs.period) -method proofTimeout*(market: MockMarket): Future[UInt256] {.async.} = +method proofTimeout*( + market: MockMarket +): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} = return market.config.proofs.timeout -method proofDowntime*(market: MockMarket): Future[uint8] {.async.} = +method requestDurationLimit*(market: MockMarket): Future[uint64] {.async.} = + return market.config.requestDurationLimit + +method proofDowntime*( + market: MockMarket +): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} = return market.config.proofs.downtime -method repairRewardPercentage*(market: MockMarket): Future[uint8] {.async.} = +method repairRewardPercentage*( + market: MockMarket +): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} = return market.config.collateral.repairRewardPercentage method getPointer*(market: MockMarket, slotId: SlotId): Future[uint8] {.async.} = return market.proofPointer -method requestStorage*(market: MockMarket, request: StorageRequest) {.async.} = +method requestStorage*( + market: MockMarket, request: StorageRequest +) {.async: (raises: [CancelledError, MarketError]).} = market.requested.add(request) var subscriptions = market.subscriptions.onRequest for subscription in subscriptions: @@ -165,15 +194,15 @@ method mySlots*(market: MockMarket): Future[seq[SlotId]] {.async.} = method getRequest*( market: MockMarket, id: RequestId -): Future[?StorageRequest] {.async.} = +): Future[?StorageRequest] {.async: (raises: [CancelledError]).} = for request in market.requested: if request.id == id: return some request return none StorageRequest -method getActiveSlot*(market: MockMarket, slotId: SlotId): Future[?Slot] {.async.} = +method getActiveSlot*(market: MockMarket, id: SlotId): Future[?Slot] {.async.} = for slot in market.filled: - if slotId(slot.requestId, slot.slotIndex) == slotId and + if slotId(slot.requestId, slot.slotIndex) == id and request =? await market.getRequest(slot.requestId): return some Slot(request: request, slotIndex: slot.slotIndex) return none Slot @@ -183,10 +212,16 @@ method requestState*( ): Future[?RequestState] {.async.} = return market.requestState .? [requestId] -method slotState*(market: MockMarket, slotId: SlotId): Future[SlotState] {.async.} = - if not market.slotState.hasKey(slotId): +method slotState*( + market: MockMarket, slotId: SlotId +): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} = + if slotId notin market.slotState: return SlotState.Free - return market.slotState[slotId] + + try: + return market.slotState[slotId] + except KeyError as e: + raiseAssert "SlotId not found in known slots (MockMarket.slotState)" method getRequestEnd*( market: MockMarket, id: RequestId @@ -199,8 +234,11 @@ method requestExpiresAt*( return market.requestExpiry[id] method getHost*( - market: MockMarket, requestId: RequestId, slotIndex: UInt256 -): Future[?Address] {.async.} = + market: MockMarket, requestId: RequestId, slotIndex: uint64 +): Future[?Address] {.async: (raises: [CancelledError, MarketError]).} = + if error =? market.errorOnGetHost: + raise error + for slot in market.filled: if slot.requestId == requestId and slot.slotIndex == slotIndex: return some slot.host @@ -208,13 +246,13 @@ method getHost*( method currentCollateral*( market: MockMarket, slotId: SlotId -): Future[UInt256] {.async.} = +): Future[UInt256] {.async: (raises: [MarketError, CancelledError]).} = for slot in market.filled: if slotId == slotId(slot.requestId, slot.slotIndex): return slot.collateral return 0.u256 -proc emitSlotFilled*(market: MockMarket, requestId: RequestId, slotIndex: UInt256) = +proc emitSlotFilled*(market: MockMarket, requestId: RequestId, slotIndex: uint64) = var subscriptions = market.subscriptions.onSlotFilled for subscription in subscriptions: let requestMatches = @@ -224,13 +262,13 @@ proc emitSlotFilled*(market: MockMarket, requestId: RequestId, slotIndex: UInt25 if requestMatches and slotMatches: subscription.callback(requestId, slotIndex) -proc emitSlotFreed*(market: MockMarket, requestId: RequestId, slotIndex: UInt256) = +proc emitSlotFreed*(market: MockMarket, requestId: RequestId, slotIndex: uint64) = var subscriptions = market.subscriptions.onSlotFreed for subscription in subscriptions: subscription.callback(requestId, slotIndex) proc emitSlotReservationsFull*( - market: MockMarket, requestId: RequestId, slotIndex: UInt256 + market: MockMarket, requestId: RequestId, slotIndex: uint64 ) = var subscriptions = market.subscriptions.onSlotReservationsFull for subscription in subscriptions: @@ -257,11 +295,14 @@ proc emitRequestFailed*(market: MockMarket, requestId: RequestId) = proc fillSlot*( market: MockMarket, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, proof: Groth16Proof, host: Address, collateral = 0.u256, ) = + if error =? market.errorOnFillSlot: + raise error + let slot = MockSlot( requestId: requestId, slotIndex: slotIndex, @@ -277,13 +318,18 @@ proc fillSlot*( method fillSlot*( market: MockMarket, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, proof: Groth16Proof, collateral: UInt256, -) {.async.} = +) {.async: (raises: [CancelledError, MarketError]).} = market.fillSlot(requestId, slotIndex, proof, market.signer, collateral) -method freeSlot*(market: MockMarket, slotId: SlotId) {.async.} = +method freeSlot*( + market: MockMarket, slotId: SlotId +) {.async: (raises: [CancelledError, MarketError]).} = + if error =? market.errorOnFreeSlot: + raise error + market.freed.add(slotId) for s in market.filled: if slotId(s.requestId, s.slotIndex) == slotId: @@ -291,7 +337,9 @@ method freeSlot*(market: MockMarket, slotId: SlotId) {.async.} = break market.slotState[slotId] = SlotState.Free -method withdrawFunds*(market: MockMarket, requestId: RequestId) {.async.} = +method withdrawFunds*( + market: MockMarket, requestId: RequestId +) {.async: (raises: [CancelledError, MarketError]).} = market.withdrawn.add(requestId) if state =? market.requestState .? [requestId] and state == RequestState.Cancelled: @@ -321,12 +369,16 @@ method getChallenge*(mock: MockMarket, id: SlotId): Future[ProofChallenge] {.asy proc setProofEnd*(mock: MockMarket, id: SlotId, proofEnd: UInt256) = mock.proofEnds[id] = proofEnd -method submitProof*(mock: MockMarket, id: SlotId, proof: Groth16Proof) {.async.} = +method submitProof*( + mock: MockMarket, id: SlotId, proof: Groth16Proof +) {.async: (raises: [CancelledError, MarketError]).} = mock.submitted.add(proof) for subscription in mock.subscriptions.onProofSubmitted: subscription.callback(id) -method markProofAsMissing*(market: MockMarket, id: SlotId, period: Period) {.async.} = +method markProofAsMissing*( + market: MockMarket, id: SlotId, period: Period +) {.async: (raises: [CancelledError, MarketError]).} = market.markedAsMissingProofs.add(id) proc setCanProofBeMarkedAsMissing*(mock: MockMarket, id: SlotId, required: bool) = @@ -341,21 +393,46 @@ method canProofBeMarkedAsMissing*( return market.canBeMarkedAsMissing.contains(id) method reserveSlot*( - market: MockMarket, requestId: RequestId, slotIndex: UInt256 -) {.async.} = - if error =? market.reserveSlotThrowError: + market: MockMarket, requestId: RequestId, slotIndex: uint64 +) {.async: (raises: [CancelledError, MarketError]).} = + if error =? market.errorOnReserveSlot: raise error method canReserveSlot*( - market: MockMarket, requestId: RequestId, slotIndex: UInt256 + market: MockMarket, requestId: RequestId, slotIndex: uint64 ): Future[bool] {.async.} = return market.canReserveSlot func setCanReserveSlot*(market: MockMarket, canReserveSlot: bool) = market.canReserveSlot = canReserveSlot -func setReserveSlotThrowError*(market: MockMarket, error: ?(ref MarketError)) = - market.reserveSlotThrowError = error +func setErrorOnReserveSlot*(market: MockMarket, error: ref MarketError) = + market.errorOnReserveSlot = + if error.isNil: + none (ref MarketError) + else: + some error + +func setErrorOnFillSlot*(market: MockMarket, error: ref MarketError) = + market.errorOnFillSlot = + if error.isNil: + none (ref MarketError) + else: + some error + +func setErrorOnFreeSlot*(market: MockMarket, error: ref MarketError) = + market.errorOnFreeSlot = + if error.isNil: + none (ref MarketError) + else: + some error + +func setErrorOnGetHost*(market: MockMarket, error: ref MarketError) = + market.errorOnGetHost = + if error.isNil: + none (ref MarketError) + else: + some error method subscribeRequests*( market: MockMarket, callback: OnRequest @@ -390,7 +467,7 @@ method subscribeSlotFilled*( return subscription method subscribeSlotFilled*( - market: MockMarket, requestId: RequestId, slotIndex: UInt256, callback: OnSlotFilled + market: MockMarket, requestId: RequestId, slotIndex: uint64, callback: OnSlotFilled ): Future[Subscription] {.async.} = let subscription = SlotFilledSubscription( market: market, @@ -526,3 +603,33 @@ method unsubscribe*(subscription: ProofSubmittedSubscription) {.async.} = method unsubscribe*(subscription: SlotReservationsFullSubscription) {.async.} = subscription.market.subscriptions.onSlotReservationsFull.keepItIf(it != subscription) + +method slotCollateral*( + market: MockMarket, requestId: RequestId, slotIndex: uint64 +): Future[?!UInt256] {.async: (raises: [CancelledError]).} = + let slotid = slotId(requestId, slotIndex) + + try: + let state = await slotState(market, slotid) + + without request =? await market.getRequest(requestId): + return failure newException( + MarketError, "Failure calculating the slotCollateral, cannot get the request" + ) + + return market.slotCollateral(request.ask.collateralPerSlot, state) + except MarketError as error: + error "Error when trying to calculate the slotCollateral", error = error.msg + return failure error + +method slotCollateral*( + market: MockMarket, collateralPerSlot: UInt256, slotState: SlotState +): ?!UInt256 {.raises: [].} = + if slotState == SlotState.Repair: + let repairRewardPercentage = market.config.collateral.repairRewardPercentage.u256 + + return success ( + collateralPerSlot - (collateralPerSlot * repairRewardPercentage).div(100.u256) + ) + + return success collateralPerSlot diff --git a/tests/codex/helpers/mockreservations.nim b/tests/codex/helpers/mockreservations.nim index 060790a8..91ed04ec 100644 --- a/tests/codex/helpers/mockreservations.nim +++ b/tests/codex/helpers/mockreservations.nim @@ -2,6 +2,7 @@ import pkg/chronos import pkg/codex/sales import pkg/codex/stores import pkg/questionable/results +import pkg/codex/clock type MockReservations* = ref object of Reservations createReservationThrowBytesOutOfBoundsError: bool @@ -24,10 +25,11 @@ proc setCreateReservationThrowError*( method createReservation*( self: MockReservations, availabilityId: AvailabilityId, - slotSize: UInt256, + slotSize: uint64, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, collateralPerByte: UInt256, + validUntil: SecondsSince1970, ): Future[?!Reservation] {.async.} = if self.createReservationThrowBytesOutOfBoundsError: let error = newException( @@ -45,4 +47,5 @@ method createReservation*( requestId, slotIndex, collateralPerByte, + validUntil, ) diff --git a/tests/codex/helpers/mocksalesagent.nim b/tests/codex/helpers/mocksalesagent.nim index 8374ae1d..d5de265a 100644 --- a/tests/codex/helpers/mocksalesagent.nim +++ b/tests/codex/helpers/mocksalesagent.nim @@ -12,6 +12,6 @@ method onFailed*(agent: SalesAgent, requestId: RequestId) = failedCalled = true method onSlotFilled*( - agent: SalesAgent, requestId: RequestId, slotIndex: UInt256 + agent: SalesAgent, requestId: RequestId, slotIndex: uint64 ) {.base.} = slotFilledCalled = true diff --git a/tests/codex/helpers/mockslotqueueitem.nim b/tests/codex/helpers/mockslotqueueitem.nim index bc0c1047..8657850f 100644 --- a/tests/codex/helpers/mockslotqueueitem.nim +++ b/tests/codex/helpers/mockslotqueueitem.nim @@ -4,11 +4,11 @@ import pkg/codex/sales/slotqueue type MockSlotQueueItem* = object requestId*: RequestId slotIndex*: uint16 - slotSize*: UInt256 - duration*: UInt256 + slotSize*: uint64 + duration*: uint64 pricePerBytePerSecond*: UInt256 - collateralPerByte*: UInt256 - expiry*: UInt256 + collateral*: UInt256 + expiry*: uint64 seen*: bool proc toSlotQueueItem*(item: MockSlotQueueItem): SlotQueueItem = @@ -19,8 +19,8 @@ proc toSlotQueueItem*(item: MockSlotQueueItem): SlotQueueItem = slotSize: item.slotSize, duration: item.duration, pricePerBytePerSecond: item.pricePerBytePerSecond, - collateralPerByte: item.collateralPerByte, ), expiry = item.expiry, seen = item.seen, + collateral = item.collateral, ) diff --git a/tests/codex/helpers/randomchunker.nim b/tests/codex/helpers/randomchunker.nim index b482f67f..cf857595 100644 --- a/tests/codex/helpers/randomchunker.nim +++ b/tests/codex/helpers/randomchunker.nim @@ -26,7 +26,7 @@ proc new*( var consumed = 0 proc reader( data: ChunkBuffer, len: int - ): Future[int] {.async, gcsafe, raises: [Defect].} = + ): Future[int] {.async: (raises: [ChunkerError, CancelledError]), gcsafe.} = var alpha = toSeq(byte('A') .. byte('z')) if consumed >= size: diff --git a/tests/codex/merkletree/generictreetests.nim b/tests/codex/merkletree/generictreetests.nim index 0e1f7c9f..6244bc1c 100644 --- a/tests/codex/merkletree/generictreetests.nim +++ b/tests/codex/merkletree/generictreetests.nim @@ -1,4 +1,4 @@ -import std/unittest +import pkg/unittest2 import pkg/codex/merkletree diff --git a/tests/codex/merkletree/testcodexcoders.nim b/tests/codex/merkletree/testcodexcoders.nim index d9544083..6da56844 100644 --- a/tests/codex/merkletree/testcodexcoders.nim +++ b/tests/codex/merkletree/testcodexcoders.nim @@ -1,4 +1,4 @@ -import std/unittest +import pkg/unittest2 import pkg/questionable/results import pkg/stew/byteutils @@ -18,7 +18,7 @@ const data = [ "00000000000000000000000000000009".toBytes, "00000000000000000000000000000010".toBytes, ] -checksuite "merkletree - coders": +suite "merkletree - coders": test "encoding and decoding a tree yields the same tree": let tree = CodexTree.init(Sha256HashCodec, data).tryGet() diff --git a/tests/codex/merkletree/testcodextree.nim b/tests/codex/merkletree/testcodextree.nim index c4713d40..29390c16 100644 --- a/tests/codex/merkletree/testcodextree.nim +++ b/tests/codex/merkletree/testcodextree.nim @@ -1,6 +1,6 @@ -import std/unittest import std/sequtils +import pkg/unittest2 import pkg/questionable/results import pkg/stew/byteutils import pkg/libp2p diff --git a/tests/codex/merkletree/testmerkledigest.nim b/tests/codex/merkletree/testmerkledigest.nim index ccb138da..4cc2d197 100644 --- a/tests/codex/merkletree/testmerkledigest.nim +++ b/tests/codex/merkletree/testmerkledigest.nim @@ -1,7 +1,7 @@ -import std/unittest import std/sequtils import std/random +import pkg/unittest2 import pkg/poseidon2 import pkg/poseidon2/sponge diff --git a/tests/codex/merkletree/testposeidon2tree.nim b/tests/codex/merkletree/testposeidon2tree.nim index f60fdb39..e12751b7 100644 --- a/tests/codex/merkletree/testposeidon2tree.nim +++ b/tests/codex/merkletree/testposeidon2tree.nim @@ -1,6 +1,6 @@ -import std/unittest import std/sequtils +import pkg/unittest2 import pkg/poseidon2 import pkg/poseidon2/io import pkg/questionable/results diff --git a/tests/codex/node/helpers.nim b/tests/codex/node/helpers.nim index 0d72b06b..a28a1f37 100644 --- a/tests/codex/node/helpers.nim +++ b/tests/codex/node/helpers.nim @@ -6,6 +6,7 @@ import pkg/chronos import pkg/codex/codextypes import pkg/codex/chunker import pkg/codex/stores +import pkg/taskpools import ../../asynctest @@ -118,10 +119,11 @@ template setupAndTearDown*() {.dirty.} = engine = engine, prover = Prover.none, discovery = blockDiscovery, + taskpool = Taskpool.new(), ) teardown: - close(file) + file.close() await node.stop() await metaTmp.destroyDb() await repoTmp.destroyDb() diff --git a/tests/codex/node/testcontracts.nim b/tests/codex/node/testcontracts.nim index cce6d5bd..73dd8daf 100644 --- a/tests/codex/node/testcontracts.nim +++ b/tests/codex/node/testcontracts.nim @@ -75,10 +75,9 @@ asyncchecksuite "Test Node - Host contracts": let manifestBlock = bt.Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() - erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider) + erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, Taskpool.new) manifestCid = manifestBlock.cid - manifestCidStr = $(manifestCid) (await localStore.putBlock(manifestBlock)).tryGet() @@ -99,7 +98,7 @@ asyncchecksuite "Test Node - Host contracts": expectedExpiry: SecondsSince1970 = clock.now + DefaultBlockTtl.seconds + 11123 expiryUpdateCallback = !sales.onExpiryUpdate - (await expiryUpdateCallback(manifestCidStr, expectedExpiry)).tryGet() + (await expiryUpdateCallback(manifestCid, expectedExpiry)).tryGet() for index in 0 ..< manifest.blocksCount: let @@ -116,8 +115,9 @@ asyncchecksuite "Test Node - Host contracts": test "onStore callback": let onStore = !sales.onStore var request = StorageRequest.example - request.content.cid = $verifiableBlock.cid - request.expiry = (getTime() + DefaultBlockTtl.toTimesDuration + 1.hours).toUnix.u256 + request.content.cid = verifiableBlock.cid + request.expiry = + (getTime() + DefaultBlockTtl.toTimesDuration + 1.hours).toUnix.uint64 var fetchedBytes: uint = 0 let onBlocks = proc(blocks: seq[bt.Block]): Future[?!void] {.async.} = @@ -125,7 +125,7 @@ asyncchecksuite "Test Node - Host contracts": fetchedBytes += blk.data.len.uint return success() - (await onStore(request, 1.u256, onBlocks)).tryGet() + (await onStore(request, 1.uint64, onBlocks, isRepairing = false)).tryGet() check fetchedBytes == 12 * DefaultBlockSize.uint let indexer = verifiable.protectedStrategy.init( diff --git a/tests/codex/node/testnode.nim b/tests/codex/node/testnode.nim index e4a9d1f4..511badef 100644 --- a/tests/codex/node/testnode.nim +++ b/tests/codex/node/testnode.nim @@ -12,6 +12,7 @@ import pkg/questionable/results import pkg/stint import pkg/poseidon2 import pkg/poseidon2/io +import pkg/taskpools import pkg/nitro import pkg/codexdht/discv5/protocol as discv5 @@ -37,6 +38,7 @@ import ../examples import ../helpers import ../helpers/mockmarket import ../helpers/mockclock +import ../slots/helpers import ./helpers @@ -62,21 +64,6 @@ asyncchecksuite "Test Node - Basic": check: fetched == manifest - test "Should not lookup non-existing blocks twice": - # https://github.com/codex-storage/nim-codex/issues/699 - let - cstore = CountingStore.new(engine, localStore) - node = CodexNodeRef.new(switch, cstore, engine, blockDiscovery) - missingCid = - Cid.init("zDvZRwzmCvtiyubW9AecnxgLnXK8GrBvpQJBDzToxmzDN6Nrc2CZ").get() - - engine.blockFetchTimeout = timer.milliseconds(100) - - discard await node.retrieve(missingCid, local = false) - - let lookupCount = cstore.lookups.getOrDefault(missingCid) - check lookupCount == 1 - test "Block Batching": let manifest = await storeDataGetManifest(localStore, chunker) @@ -91,17 +78,15 @@ asyncchecksuite "Test Node - Basic": ) ).tryGet() - test "Store and retrieve Data Stream": + test "Should store Data Stream": let stream = BufferStream.new() storeFut = node.store(stream) - oddChunkSize = math.trunc(DefaultBlockSize.float / 3.14).NBytes # Let's check that node.store can correctly rechunk these odd chunks - oddChunker = FileChunker.new(file = file, chunkSize = oddChunkSize, pad = false) - # TODO: doesn't work with pad=tue + oddChunker = FileChunker.new(file = file, chunkSize = 1024.NBytes, pad = false) + # don't pad, so `node.store` gets the correct size var original: seq[byte] - try: while (let chunk = await oddChunker.getBytes(); chunk.len > 0): original &= chunk @@ -114,13 +99,35 @@ asyncchecksuite "Test Node - Basic": manifestCid = (await storeFut).tryGet() manifestBlock = (await localStore.getBlock(manifestCid)).tryGet() localManifest = Manifest.decode(manifestBlock).tryGet() - data = await (await node.retrieve(manifestCid)).drain() + var data: seq[byte] + for i in 0 ..< localManifest.blocksCount: + let blk = (await localStore.getBlock(localManifest.treeCid, i)).tryGet() + data &= blk.data + + data.setLen(localManifest.datasetSize.int) # truncate data to original size check: - data.len == localManifest.datasetSize.int data.len == original.len sha256.digest(data) == sha256.digest(original) + test "Should retrieve a Data Stream": + let + manifest = await storeDataGetManifest(localStore, chunker) + manifestBlk = + bt.Block.new(data = manifest.encode().tryGet, codec = ManifestCodec).tryGet() + + (await localStore.putBlock(manifestBlk)).tryGet() + let data = await ((await node.retrieve(manifestBlk.cid)).tryGet()).drain() + + var storedData: seq[byte] + for i in 0 ..< manifest.blocksCount: + let blk = (await localStore.getBlock(manifest.treeCid, i)).tryGet() + storedData &= blk.data + + storedData.setLen(manifest.datasetSize.int) # truncate data to original size + check: + storedData == data + test "Retrieve One Block": let testString = "Block 1" @@ -137,7 +144,8 @@ asyncchecksuite "Test Node - Basic": test "Setup purchase request": let - erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider) + erasure = + Erasure.new(store, leoEncoderProvider, leoDecoderProvider, Taskpool.new()) manifest = await storeDataGetManifest(localStore, chunker) manifestBlock = bt.Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() @@ -154,15 +162,40 @@ asyncchecksuite "Test Node - Basic": cid = manifestBlock.cid, nodes = 5, tolerance = 2, - duration = 100.u256, + duration = 100.uint64, pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, - expiry = 200.u256, + expiry = 200.uint64, collateralPerByte = 1.u256, ) ).tryGet check: (await verifiableBlock.cid in localStore) == true - request.content.cid == $verifiableBlock.cid + request.content.cid == verifiableBlock.cid request.content.merkleRoot == builder.verifyRoot.get.toBytes + + test "Should delete a single block": + let randomBlock = bt.Block.new("Random block".toBytes).tryGet() + (await localStore.putBlock(randomBlock)).tryGet() + check (await randomBlock.cid in localStore) == true + + (await node.delete(randomBlock.cid)).tryGet() + check (await randomBlock.cid in localStore) == false + + test "Should delete an entire dataset": + let + blocks = await makeRandomBlocks(datasetSize = 2048, blockSize = 256'nb) + manifest = await storeDataGetManifest(localStore, blocks) + manifestBlock = (await store.storeManifest(manifest)).tryGet() + manifestCid = manifestBlock.cid + + check await manifestCid in localStore + for blk in blocks: + check await blk.cid in localStore + + (await node.delete(manifestCid)).tryGet() + + check not await manifestCid in localStore + for blk in blocks: + check not (await blk.cid in localStore) diff --git a/tests/codex/sales/helpers/periods.nim b/tests/codex/sales/helpers/periods.nim index ba1793c2..99716cec 100644 --- a/tests/codex/sales/helpers/periods.nim +++ b/tests/codex/sales/helpers/periods.nim @@ -3,6 +3,6 @@ import ../../helpers/mockclock proc advanceToNextPeriod*(clock: MockClock, market: Market) {.async.} = let periodicity = await market.periodicity() - let period = periodicity.periodOf(clock.now().u256) + let period = periodicity.periodOf(clock.now().Timestamp) let periodEnd = periodicity.periodEnd(period) - clock.set((periodEnd + 1).truncate(int)) + clock.set(periodEnd.toSecondsSince1970 + 1) diff --git a/tests/codex/sales/states/testcancelled.nim b/tests/codex/sales/states/testcancelled.nim index d2568b98..6eaf1f5a 100644 --- a/tests/codex/sales/states/testcancelled.nim +++ b/tests/codex/sales/states/testcancelled.nim @@ -2,9 +2,11 @@ import pkg/questionable import pkg/chronos import pkg/codex/contracts/requests import pkg/codex/sales/states/cancelled +import pkg/codex/sales/states/errored import pkg/codex/sales/salesagent import pkg/codex/sales/salescontext import pkg/codex/market +from pkg/codex/utils/asyncstatemachine import State import ../../../asynctest import ../../examples @@ -14,7 +16,7 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'cancelled'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let clock = MockClock.new() let currentCollateral = UInt256.example @@ -22,16 +24,14 @@ asyncchecksuite "sales state 'cancelled'": var market: MockMarket var state: SaleCancelled var agent: SalesAgent - var returnBytesWas = bool.none - var reprocessSlotWas = bool.none - var returnedCollateralValue = UInt256.none + var reprocessSlotWas: ?bool + var returnedCollateralValue: ?UInt256 setup: market = MockMarket.new() let onCleanUp = proc( - returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none + reprocessSlot = false, returnedCollateral = UInt256.none ) {.async.} = - returnBytesWas = some returnBytes reprocessSlotWas = some reprocessSlot returnedCollateralValue = returnedCollateral @@ -39,8 +39,43 @@ asyncchecksuite "sales state 'cancelled'": agent = newSalesAgent(context, request.id, slotIndex, request.some) agent.onCleanUp = onCleanUp state = SaleCancelled.new() + reprocessSlotWas = bool.none + returnedCollateralValue = UInt256.none + teardown: + reprocessSlotWas = bool.none + returnedCollateralValue = UInt256.none - test "calls onCleanUp with returnBytes = false, reprocessSlot = true, and returnedCollateral = currentCollateral": + test "calls onCleanUp with reprocessSlot = true, and returnedCollateral = currentCollateral": + market.fillSlot( + requestId = request.id, + slotIndex = slotIndex, + proof = Groth16Proof.default, + host = await market.getSigner(), + collateral = currentCollateral, + ) + discard await state.run(agent) + check eventually reprocessSlotWas == some false + check eventually returnedCollateralValue == some currentCollateral + + test "completes the cancelled state when free slot error is raised and the collateral is returned when a host is hosting a slot": + market.fillSlot( + requestId = request.id, + slotIndex = slotIndex, + proof = Groth16Proof.default, + host = await market.getSigner(), + collateral = currentCollateral, + ) + + let error = + newException(SlotStateMismatchError, "Failed to free slot, slot is already free") + market.setErrorOnFreeSlot(error) + + let next = await state.run(agent) + check next == none State + check eventually reprocessSlotWas == some false + check eventually returnedCollateralValue == some currentCollateral + + test "completes the cancelled state when free slot error is raised and the collateral is not returned when a host is not hosting a slot": market.fillSlot( requestId = request.id, slotIndex = slotIndex, @@ -48,7 +83,30 @@ asyncchecksuite "sales state 'cancelled'": host = Address.example, collateral = currentCollateral, ) - discard await state.run(agent) - check eventually returnBytesWas == some true + + let error = + newException(SlotStateMismatchError, "Failed to free slot, slot is already free") + market.setErrorOnFreeSlot(error) + + let next = await state.run(agent) + check next == none State check eventually reprocessSlotWas == some false - check eventually returnedCollateralValue == some currentCollateral + check eventually returnedCollateralValue == UInt256.none + + test "calls onCleanUp and returns the collateral when an error is raised": + market.fillSlot( + requestId = request.id, + slotIndex = slotIndex, + proof = Groth16Proof.default, + host = Address.example, + collateral = currentCollateral, + ) + + let error = newException(MarketError, "") + market.setErrorOnGetHost(error) + + let next = !(await state.run(agent)) + + check next of SaleErrored + let errored = SaleErrored(next) + check errored.error == error diff --git a/tests/codex/sales/states/testdownloading.nim b/tests/codex/sales/states/testdownloading.nim index e13ac53e..71376fc8 100644 --- a/tests/codex/sales/states/testdownloading.nim +++ b/tests/codex/sales/states/testdownloading.nim @@ -1,4 +1,4 @@ -import std/unittest +import pkg/unittest2 import pkg/questionable import pkg/codex/contracts/requests import pkg/codex/sales/states/cancelled @@ -8,9 +8,9 @@ import pkg/codex/sales/states/filled import ../../examples import ../../helpers -checksuite "sales state 'downloading'": +suite "sales state 'downloading'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 var state: SaleDownloading setup: diff --git a/tests/codex/sales/states/testerrored.nim b/tests/codex/sales/states/testerrored.nim index 9c8ee17a..0cc26cf8 100644 --- a/tests/codex/sales/states/testerrored.nim +++ b/tests/codex/sales/states/testerrored.nim @@ -14,20 +14,18 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'errored'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let market = MockMarket.new() let clock = MockClock.new() var state: SaleErrored var agent: SalesAgent - var returnBytesWas = false var reprocessSlotWas = false setup: let onCleanUp = proc( - returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none + reprocessSlot = false, returnedCollateral = UInt256.none ) {.async.} = - returnBytesWas = returnBytes reprocessSlotWas = reprocessSlot let context = SalesContext(market: market, clock: clock) @@ -35,8 +33,7 @@ asyncchecksuite "sales state 'errored'": agent.onCleanUp = onCleanUp state = SaleErrored(error: newException(ValueError, "oh no!")) - test "calls onCleanUp with returnBytes = false and reprocessSlot = true": + test "calls onCleanUp with reprocessSlot = true": state = SaleErrored(error: newException(ValueError, "oh no!"), reprocessSlot: true) discard await state.run(agent) - check eventually returnBytesWas == true check eventually reprocessSlotWas == true diff --git a/tests/codex/sales/states/testfilled.nim b/tests/codex/sales/states/testfilled.nim index f8f77da6..f077b780 100644 --- a/tests/codex/sales/states/testfilled.nim +++ b/tests/codex/sales/states/testfilled.nim @@ -14,9 +14,9 @@ import ../../helpers/mockmarket import ../../examples import ../../helpers -checksuite "sales state 'filled'": +suite "sales state 'filled'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 var market: MockMarket var slot: MockSlot @@ -36,7 +36,7 @@ checksuite "sales state 'filled'": market.requestEnds[request.id] = 321 onExpiryUpdatePassedExpiry = -1 let onExpiryUpdate = proc( - rootCid: string, expiry: SecondsSince1970 + rootCid: Cid, expiry: SecondsSince1970 ): Future[?!void] {.async.} = onExpiryUpdatePassedExpiry = expiry return success() diff --git a/tests/codex/sales/states/testfilling.nim b/tests/codex/sales/states/testfilling.nim index f0ce7059..54536a4c 100644 --- a/tests/codex/sales/states/testfilling.nim +++ b/tests/codex/sales/states/testfilling.nim @@ -1,18 +1,31 @@ -import std/unittest import pkg/questionable import pkg/codex/contracts/requests import pkg/codex/sales/states/filling import pkg/codex/sales/states/cancelled import pkg/codex/sales/states/failed +import pkg/codex/sales/states/ignored +import pkg/codex/sales/states/errored +import pkg/codex/sales/salesagent +import pkg/codex/sales/salescontext +import ../../../asynctest import ../../examples import ../../helpers +import ../../helpers/mockmarket +import ../../helpers/mockclock -checksuite "sales state 'filling'": +suite "sales state 'filling'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 var state: SaleFilling + var market: MockMarket + var clock: MockClock + var agent: SalesAgent setup: + clock = MockClock.new() + market = MockMarket.new() + let context = SalesContext(market: market, clock: clock) + agent = newSalesAgent(context, request.id, slotIndex, request.some) state = SaleFilling.new() test "switches to cancelled state when request expires": @@ -22,3 +35,27 @@ checksuite "sales state 'filling'": test "switches to failed state when request fails": let next = state.onFailed(request) check !next of SaleFailed + + test "run switches to ignored when slot is not free": + let error = newException( + SlotStateMismatchError, "Failed to fill slot because the slot is not free" + ) + market.setErrorOnFillSlot(error) + market.requested.add(request) + market.slotState[request.slotId(slotIndex)] = SlotState.Filled + + let next = !(await state.run(agent)) + check next of SaleIgnored + check SaleIgnored(next).reprocessSlot == false + + test "run switches to errored with other error ": + let error = newException(MarketError, "some error") + market.setErrorOnFillSlot(error) + market.requested.add(request) + market.slotState[request.slotId(slotIndex)] = SlotState.Filled + + let next = !(await state.run(agent)) + check next of SaleErrored + + let errored = SaleErrored(next) + check errored.error == error diff --git a/tests/codex/sales/states/testfinished.nim b/tests/codex/sales/states/testfinished.nim index 4b353014..1648df3a 100644 --- a/tests/codex/sales/states/testfinished.nim +++ b/tests/codex/sales/states/testfinished.nim @@ -15,7 +15,7 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'finished'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let clock = MockClock.new() let currentCollateral = UInt256.example @@ -23,22 +23,23 @@ asyncchecksuite "sales state 'finished'": var market: MockMarket var state: SaleFinished var agent: SalesAgent - var returnBytesWas = bool.none var reprocessSlotWas = bool.none var returnedCollateralValue = UInt256.none + var saleCleared = bool.none setup: market = MockMarket.new() let onCleanUp = proc( - returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none + reprocessSlot = false, returnedCollateral = UInt256.none ) {.async.} = - returnBytesWas = some returnBytes reprocessSlotWas = some reprocessSlot returnedCollateralValue = returnedCollateral let context = SalesContext(market: market, clock: clock) agent = newSalesAgent(context, request.id, slotIndex, request.some) agent.onCleanUp = onCleanUp + agent.context.onClear = some proc(request: StorageRequest, idx: uint64) = + saleCleared = some true state = SaleFinished(returnedCollateral: some currentCollateral) test "switches to cancelled state when request expires": @@ -49,8 +50,8 @@ asyncchecksuite "sales state 'finished'": let next = state.onFailed(request) check !next of SaleFailed - test "calls onCleanUp with returnBytes = false, reprocessSlot = true, and returnedCollateral = currentCollateral": + test "calls onCleanUp with reprocessSlot = true, and returnedCollateral = currentCollateral": discard await state.run(agent) - check eventually returnBytesWas == some false check eventually reprocessSlotWas == some false check eventually returnedCollateralValue == some currentCollateral + check eventually saleCleared == some true diff --git a/tests/codex/sales/states/testignored.nim b/tests/codex/sales/states/testignored.nim index 1c808e8b..5eea7d16 100644 --- a/tests/codex/sales/states/testignored.nim +++ b/tests/codex/sales/states/testignored.nim @@ -14,20 +14,18 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'ignored'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let market = MockMarket.new() let clock = MockClock.new() var state: SaleIgnored var agent: SalesAgent - var returnBytesWas = false var reprocessSlotWas = false setup: let onCleanUp = proc( - returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none + reprocessSlot = false, returnedCollateral = UInt256.none ) {.async.} = - returnBytesWas = returnBytes reprocessSlotWas = reprocessSlot let context = SalesContext(market: market, clock: clock) @@ -36,7 +34,6 @@ asyncchecksuite "sales state 'ignored'": state = SaleIgnored.new() test "calls onCleanUp with values assigned to SaleIgnored": - state = SaleIgnored(reprocessSlot: true, returnBytes: true) + state = SaleIgnored(reprocessSlot: true) discard await state.run(agent) - check eventually returnBytesWas == true check eventually reprocessSlotWas == true diff --git a/tests/codex/sales/states/testinitialproving.nim b/tests/codex/sales/states/testinitialproving.nim index 97331a07..cae0a069 100644 --- a/tests/codex/sales/states/testinitialproving.nim +++ b/tests/codex/sales/states/testinitialproving.nim @@ -20,7 +20,7 @@ import ../helpers/periods asyncchecksuite "sales state 'initialproving'": let proof = Groth16Proof.example let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let market = MockMarket.new() let clock = MockClock.new() diff --git a/tests/codex/sales/states/testpayout.nim b/tests/codex/sales/states/testpayout.nim index b1748b45..403c663f 100644 --- a/tests/codex/sales/states/testpayout.nim +++ b/tests/codex/sales/states/testpayout.nim @@ -15,7 +15,7 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'payout'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let clock = MockClock.new() let currentCollateral = UInt256.example diff --git a/tests/codex/sales/states/testpreparing.nim b/tests/codex/sales/states/testpreparing.nim index e78ee25e..802489a1 100644 --- a/tests/codex/sales/states/testpreparing.nim +++ b/tests/codex/sales/states/testpreparing.nim @@ -13,6 +13,7 @@ import pkg/codex/sales/salesagent import pkg/codex/sales/salescontext import pkg/codex/sales/reservations import pkg/codex/stores/repostore +import times import ../../../asynctest import ../../helpers import ../../examples @@ -22,7 +23,7 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'preparing'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let market = MockMarket.new() let clock = MockClock.new() var agent: SalesAgent @@ -34,11 +35,13 @@ asyncchecksuite "sales state 'preparing'": setup: availability = Availability.init( - totalSize = request.ask.slotSize + 100.u256, - freeSize = request.ask.slotSize + 100.u256, - duration = request.ask.duration + 60.u256, + totalSize = request.ask.slotSize + 100.uint64, + freeSize = request.ask.slotSize + 100.uint64, + duration = request.ask.duration + 60.uint64, minPricePerBytePerSecond = request.ask.pricePerBytePerSecond, totalCollateral = request.ask.collateralPerSlot * request.ask.slots.u256, + enabled = true, + until = 0.SecondsSince1970, ) let repoDs = SQLiteDatastore.new(Memory).tryGet() let metaDs = SQLiteDatastore.new(Memory).tryGet() @@ -52,6 +55,8 @@ asyncchecksuite "sales state 'preparing'": context.reservations = reservations agent = newSalesAgent(context, request.id, slotIndex, request.some) + market.requestEnds[request.id] = clock.now() + cast[int64](request.ask.duration) + teardown: await repo.stop() @@ -67,10 +72,14 @@ asyncchecksuite "sales state 'preparing'": let next = state.onSlotFilled(request.id, slotIndex) check !next of SaleFilled - proc createAvailability() {.async.} = + proc createAvailability(enabled = true) {.async.} = let a = await reservations.createAvailability( - availability.totalSize, availability.duration, - availability.minPricePerBytePerSecond, availability.totalCollateral, + availability.totalSize, + availability.duration, + availability.minPricePerBytePerSecond, + availability.totalCollateral, + enabled, + until = 0.SecondsSince1970, ) availability = a.get @@ -79,7 +88,11 @@ asyncchecksuite "sales state 'preparing'": check next of SaleIgnored let ignored = SaleIgnored(next) check ignored.reprocessSlot - check ignored.returnBytes == false + + test "run switches to ignored when availability is not enabled": + await createAvailability(enabled = false) + let next = !(await state.run(agent)) + check next of SaleIgnored test "run switches to slot reserving state after reservation created": await createAvailability() @@ -94,7 +107,6 @@ asyncchecksuite "sales state 'preparing'": check next of SaleIgnored let ignored = SaleIgnored(next) check ignored.reprocessSlot - check ignored.returnBytes == false test "run switches to errored when reserve fails with other error": await createAvailability() diff --git a/tests/codex/sales/states/testproving.nim b/tests/codex/sales/states/testproving.nim index afdeb4d2..6b7e7bd4 100644 --- a/tests/codex/sales/states/testproving.nim +++ b/tests/codex/sales/states/testproving.nim @@ -40,9 +40,9 @@ asyncchecksuite "sales state 'proving'": proc advanceToNextPeriod(market: Market) {.async.} = let periodicity = await market.periodicity() - let current = periodicity.periodOf(clock.now().u256) + let current = periodicity.periodOf(clock.now().Timestamp) let periodEnd = periodicity.periodEnd(current) - clock.set(periodEnd.truncate(int64) + 1) + clock.set(periodEnd.toSecondsSince1970 + 1) test "switches to cancelled state when request expires": let next = state.onCancelled(request) diff --git a/tests/codex/sales/states/testsimulatedproving.nim b/tests/codex/sales/states/testsimulatedproving.nim index 1fc5331c..c8f4ae1d 100644 --- a/tests/codex/sales/states/testsimulatedproving.nim +++ b/tests/codex/sales/states/testsimulatedproving.nim @@ -56,9 +56,9 @@ asyncchecksuite "sales state 'simulated-proving'": proc advanceToNextPeriod(market: Market) {.async.} = let periodicity = await market.periodicity() - let current = periodicity.periodOf(clock.now().u256) + let current = periodicity.periodOf(clock.now().Timestamp) let periodEnd = periodicity.periodEnd(current) - clock.set(periodEnd.truncate(int64) + 1) + clock.set(periodEnd.toSecondsSince1970 + 1) proc waitForProvingRounds(market: Market, rounds: int) {.async.} = var rnds = rounds - 1 # proof round runs prior to advancing diff --git a/tests/codex/sales/states/testslotreserving.nim b/tests/codex/sales/states/testslotreserving.nim index 1fd573fa..b223338a 100644 --- a/tests/codex/sales/states/testslotreserving.nim +++ b/tests/codex/sales/states/testslotreserving.nim @@ -19,7 +19,7 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'SlotReserving'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 var market: MockMarket var clock: MockClock var agent: SalesAgent @@ -54,16 +54,16 @@ asyncchecksuite "sales state 'SlotReserving'": test "run switches to errored when slot reservation errors": let error = newException(MarketError, "some error") - market.setReserveSlotThrowError(some error) + market.setErrorOnReserveSlot(error) let next = !(await state.run(agent)) check next of SaleErrored let errored = SaleErrored(next) check errored.error == error - test "catches reservation not allowed error": - let error = newException(MarketError, "SlotReservations_ReservationNotAllowed") - market.setReserveSlotThrowError(some error) + test "run switches to ignored when reservation is not allowed": + let error = + newException(SlotReservationNotAllowedError, "Reservation is not allowed") + market.setErrorOnReserveSlot(error) let next = !(await state.run(agent)) check next of SaleIgnored check SaleIgnored(next).reprocessSlot == false - check SaleIgnored(next).returnBytes diff --git a/tests/codex/sales/states/testunknown.nim b/tests/codex/sales/states/testunknown.nim index 97730f49..98b23224 100644 --- a/tests/codex/sales/states/testunknown.nim +++ b/tests/codex/sales/states/testunknown.nim @@ -14,9 +14,9 @@ import ../../helpers/mockmarket import ../../examples import ../../helpers -checksuite "sales state 'unknown'": +suite "sales state 'unknown'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let slotId = slotId(request.id, slotIndex) var market: MockMarket diff --git a/tests/codex/sales/testreservations.nim b/tests/codex/sales/testreservations.nim index a1c7d1a5..ff5e153c 100644 --- a/tests/codex/sales/testreservations.nim +++ b/tests/codex/sales/testreservations.nim @@ -1,5 +1,5 @@ import std/random - +import std/times import pkg/questionable import pkg/questionable/results import pkg/chronos @@ -8,6 +8,7 @@ import pkg/datastore import pkg/codex/stores import pkg/codex/errors import pkg/codex/sales +import pkg/codex/clock import pkg/codex/utils/json import ../../asynctest @@ -39,19 +40,22 @@ asyncchecksuite "Reservations module": await repoTmp.destroyDb() await metaTmp.destroyDb() - proc createAvailability(): Availability = + proc createAvailability(enabled = true, until = 0.SecondsSince1970): Availability = let example = Availability.example(collateralPerByte) - let totalSize = rand(100000 .. 200000).u256 - let totalCollateral = totalSize * collateralPerByte + let totalSize = rand(100000 .. 200000).uint64 + let totalCollateral = totalSize.u256 * collateralPerByte let availability = waitFor reservations.createAvailability( - totalSize, example.duration, example.minPricePerBytePerSecond, totalCollateral + totalSize, example.duration, example.minPricePerBytePerSecond, totalCollateral, + enabled, until, ) return availability.get proc createReservation(availability: Availability): Reservation = - let size = rand(1 ..< availability.freeSize.truncate(int)) + let size = rand(1 ..< availability.freeSize.int) + let validUntil = getTime().toUnix() + 30.SecondsSince1970 let reservation = waitFor reservations.createReservation( - availability.id, size.u256, RequestId.example, UInt256.example, 1.u256 + availability.id, size.uint64, RequestId.example, uint64.example, 1.u256, + validUntil, ) return reservation.get @@ -64,8 +68,12 @@ asyncchecksuite "Reservations module": check (await reservations.all(Availability)).get.len == 0 test "generates unique ids for storage availability": - let availability1 = Availability.init(1.u256, 2.u256, 3.u256, 4.u256, 5.u256) - let availability2 = Availability.init(1.u256, 2.u256, 3.u256, 4.u256, 5.u256) + let availability1 = Availability.init( + 1.uint64, 2.uint64, 3.uint64, 4.u256, 5.u256, true, 0.SecondsSince1970 + ) + let availability2 = Availability.init( + 1.uint64, 2.uint64, 3.uint64, 4.u256, 5.u256, true, 0.SecondsSince1970 + ) check availability1.id != availability2.id test "can reserve available storage": @@ -75,7 +83,7 @@ asyncchecksuite "Reservations module": test "creating availability reserves bytes in repo": let orig = repo.available.uint let availability = createAvailability() - check repo.available.uint == (orig.u256 - availability.freeSize).truncate(uint) + check repo.available.uint == orig - availability.freeSize test "can get all availabilities": let availability1 = createAvailability() @@ -128,20 +136,24 @@ asyncchecksuite "Reservations module": test "cannot create reservation with non-existant availability": let availability = Availability.example + let validUntil = getTime().toUnix() + 30.SecondsSince1970 let created = await reservations.createReservation( - availability.id, UInt256.example, RequestId.example, UInt256.example, 1.u256 + availability.id, uint64.example, RequestId.example, uint64.example, 1.u256, + validUntil, ) check created.isErr check created.error of NotExistsError test "cannot create reservation larger than availability size": let availability = createAvailability() + let validUntil = getTime().toUnix() + 30.SecondsSince1970 let created = await reservations.createReservation( availability.id, availability.totalSize + 1, RequestId.example, + uint64.example, UInt256.example, - UInt256.example, + validUntil, ) check created.isErr check created.error of BytesOutOfBoundsError @@ -149,23 +161,26 @@ asyncchecksuite "Reservations module": test "cannot create reservation larger than availability size - concurrency test": proc concurrencyTest(): Future[void] {.async.} = let availability = createAvailability() + let validUntil = getTime().toUnix() + 30.SecondsSince1970 let one = reservations.createReservation( availability.id, availability.totalSize - 1, RequestId.example, + uint64.example, UInt256.example, - UInt256.example, + validUntil, ) let two = reservations.createReservation( - availability.id, availability.totalSize, RequestId.example, UInt256.example, - UInt256.example, + availability.id, availability.totalSize, RequestId.example, uint64.example, + UInt256.example, validUntil, ) let oneResult = await one let twoResult = await two check oneResult.isErr or twoResult.isErr + if oneResult.isErr: check oneResult.error of BytesOutOfBoundsError if twoResult.isErr: @@ -228,7 +243,7 @@ asyncchecksuite "Reservations module": let reservation = createReservation(availability) let orig = availability.freeSize - reservation.size let origQuota = repo.quotaReservedBytes - let returnedBytes = reservation.size + 200.u256 + let returnedBytes = reservation.size + 200.uint64 check isOk await reservations.returnBytesToAvailability( reservation.availabilityId, reservation.id, returnedBytes @@ -238,7 +253,7 @@ asyncchecksuite "Reservations module": let updated = !(await reservations.get(key, Availability)) check updated.freeSize > orig - check (updated.freeSize - orig) == 200.u256 + check (updated.freeSize - orig) == 200.uint64 check (repo.quotaReservedBytes - origQuota) == 200.NBytes test "update releases quota when lowering size": @@ -259,6 +274,48 @@ asyncchecksuite "Reservations module": check isOk await reservations.update(availability) check (repo.quotaReservedBytes - origQuota) == 100.NBytes + test "create availability set enabled to true by default": + let availability = createAvailability() + check availability.enabled == true + + test "create availability set until to 0 by default": + let availability = createAvailability() + check availability.until == 0.SecondsSince1970 + + test "create availability whith correct values": + var until = getTime().toUnix() + + let availability = createAvailability(enabled = false, until = until) + check availability.enabled == false + check availability.until == until + + test "create an availability fails when trying set until with a negative value": + let totalSize = rand(100000 .. 200000).uint64 + let example = Availability.example(collateralPerByte) + let totalCollateral = totalSize.u256 * collateralPerByte + + let result = await reservations.createAvailability( + totalSize, + example.duration, + example.minPricePerBytePerSecond, + totalCollateral, + enabled = true, + until = -1.SecondsSince1970, + ) + + check result.isErr + check result.error of UntilOutOfBoundsError + + test "update an availability fails when trying set until with a negative value": + let until = getTime().toUnix() + let availability = createAvailability(until = until) + + availability.until = -1 + + let result = await reservations.update(availability) + check result.isErr + check result.error of UntilOutOfBoundsError + test "reservation can be partially released": let availability = createAvailability() let reservation = createReservation(availability) @@ -271,75 +328,190 @@ asyncchecksuite "Reservations module": let availability = createAvailability() let reservation = createReservation(availability) let updated = await reservations.release( - reservation.id, reservation.availabilityId, (reservation.size + 1).truncate(uint) + reservation.id, reservation.availabilityId, reservation.size + 1 ) check updated.isErr check updated.error of BytesOutOfBoundsError test "cannot release bytes from non-existant reservation": let availability = createAvailability() - let reservation = createReservation(availability) + discard createReservation(availability) let updated = await reservations.release(ReservationId.example, availability.id, 1) check updated.isErr check updated.error of NotExistsError - test "onAvailabilityAdded called when availability is created": + test "OnAvailabilitySaved called when availability is created": var added: Availability - reservations.onAvailabilityAdded = proc(a: Availability) {.async.} = + reservations.OnAvailabilitySaved = proc( + a: Availability + ) {.gcsafe, async: (raises: []).} = added = a let availability = createAvailability() check added == availability - test "onAvailabilityAdded called when availability size is increased": + test "OnAvailabilitySaved called when availability size is increased": var availability = createAvailability() var added: Availability - reservations.onAvailabilityAdded = proc(a: Availability) {.async.} = + reservations.OnAvailabilitySaved = proc( + a: Availability + ) {.gcsafe, async: (raises: []).} = added = a - availability.freeSize += 1.u256 + availability.freeSize += 1 discard await reservations.update(availability) check added == availability - test "onAvailabilityAdded is not called when availability size is decreased": + test "OnAvailabilitySaved is not called when availability size is decreased": var availability = createAvailability() var called = false - reservations.onAvailabilityAdded = proc(a: Availability) {.async.} = + reservations.OnAvailabilitySaved = proc( + a: Availability + ) {.gcsafe, async: (raises: []).} = called = true - availability.freeSize -= 1.u256 + availability.freeSize -= 1.uint64 + discard await reservations.update(availability) + + check not called + + test "OnAvailabilitySaved is not called when availability is disabled": + var availability = createAvailability(enabled = false) + var called = false + reservations.OnAvailabilitySaved = proc( + a: Availability + ) {.gcsafe, async: (raises: []).} = + called = true + availability.freeSize -= 1 + discard await reservations.update(availability) + + check not called + + test "OnAvailabilitySaved called when availability duration is increased": + var availability = createAvailability() + var added: Availability + reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} = + added = a + availability.duration += 1 + discard await reservations.update(availability) + + check added == availability + + test "OnAvailabilitySaved is not called when availability duration is decreased": + var availability = createAvailability() + var called = false + reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} = + called = true + availability.duration -= 1 + discard await reservations.update(availability) + + check not called + + test "OnAvailabilitySaved called when availability minPricePerBytePerSecond is increased": + var availability = createAvailability() + var added: Availability + reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} = + added = a + availability.minPricePerBytePerSecond += 1.u256 + discard await reservations.update(availability) + + check added == availability + + test "OnAvailabilitySaved is not called when availability minPricePerBytePerSecond is decreased": + var availability = createAvailability() + var called = false + reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} = + called = true + availability.minPricePerBytePerSecond -= 1.u256 + discard await reservations.update(availability) + + check not called + + test "OnAvailabilitySaved called when availability totalCollateral is increased": + var availability = createAvailability() + var added: Availability + reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} = + added = a + availability.totalCollateral = availability.totalCollateral + 1.u256 + discard await reservations.update(availability) + + check added == availability + + test "OnAvailabilitySaved is not called when availability totalCollateral is decreased": + var availability = createAvailability() + var called = false + reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} = + called = true + availability.totalCollateral = availability.totalCollateral - 1.u256 discard await reservations.update(availability) check not called test "availabilities can be found": let availability = createAvailability() - + let validUntil = getTime().toUnix() + 30.SecondsSince1970 let found = await reservations.findAvailability( availability.freeSize, availability.duration, - availability.minPricePerBytePerSecond, collateralPerByte, + availability.minPricePerBytePerSecond, collateralPerByte, validUntil, ) check found.isSome check found.get == availability + test "does not find an availability when is it disabled": + let availability = createAvailability(enabled = false) + let validUntil = getTime().toUnix() + 30.SecondsSince1970 + let found = await reservations.findAvailability( + availability.freeSize, availability.duration, + availability.minPricePerBytePerSecond, collateralPerByte, validUntil, + ) + + check found.isNone + + test "finds an availability when the until date is after the duration": + let example = Availability.example(collateralPerByte) + let until = getTime().toUnix() + example.duration.SecondsSince1970 + let availability = createAvailability(until = until) + let validUntil = getTime().toUnix() + 30.SecondsSince1970 + let found = await reservations.findAvailability( + availability.freeSize, availability.duration, + availability.minPricePerBytePerSecond, collateralPerByte, validUntil, + ) + + check found.isSome + check found.get == availability + + test "does not find an availability when the until date is before the duration": + let example = Availability.example(collateralPerByte) + let until = getTime().toUnix() + 1.SecondsSince1970 + let availability = createAvailability(until = until) + let validUntil = getTime().toUnix() + 30.SecondsSince1970 + let found = await reservations.findAvailability( + availability.freeSize, availability.duration, + availability.minPricePerBytePerSecond, collateralPerByte, validUntil, + ) + + check found.isNone + test "non-matching availabilities are not found": let availability = createAvailability() - + let validUntil = getTime().toUnix() + 30.SecondsSince1970 let found = await reservations.findAvailability( availability.freeSize + 1, availability.duration, availability.minPricePerBytePerSecond, collateralPerByte, + validUntil, ) check found.isNone test "non-existent availability cannot be found": let availability = Availability.example + let validUntil = getTime().toUnix() + 30.SecondsSince1970 let found = await reservations.findAvailability( availability.freeSize, availability.duration, - availability.minPricePerBytePerSecond, collateralPerByte, + availability.minPricePerBytePerSecond, collateralPerByte, validUntil, ) check found.isNone @@ -356,14 +528,16 @@ asyncchecksuite "Reservations module": check reservations.hasAvailable(DefaultQuotaBytes.uint - 1) test "reports quota not available to be reserved": - check not reservations.hasAvailable(DefaultQuotaBytes.uint + 1) + check not reservations.hasAvailable(DefaultQuotaBytes.uint64 + 1) test "fails to create availability with size that is larger than available quota": let created = await reservations.createAvailability( - (DefaultQuotaBytes.uint + 1).u256, - UInt256.example, + DefaultQuotaBytes.uint64 + 1, + uint64.example, UInt256.example, UInt256.example, + enabled = true, + until = 0.SecondsSince1970, ) check created.isErr check created.error of ReserveFailedError diff --git a/tests/codex/sales/testsales.nim b/tests/codex/sales/testsales.nim index 0d441f34..f4d9cbae 100644 --- a/tests/codex/sales/testsales.nim +++ b/tests/codex/sales/testsales.nim @@ -14,6 +14,7 @@ import pkg/codex/stores/repostore import pkg/codex/blocktype as bt import pkg/codex/node import pkg/codex/utils/asyncstatemachine +import times import ../../asynctest import ../helpers import ../helpers/mockmarket @@ -36,18 +37,21 @@ asyncchecksuite "Sales - start": var repo: RepoStore var queue: SlotQueue var itemsProcessed: seq[SlotQueueItem] + var expiry: SecondsSince1970 setup: request = StorageRequest( ask: StorageAsk( slots: 4, - slotSize: 100.u256, - duration: 60.u256, + slotSize: 100.uint64, + duration: 60.uint64, pricePerBytePerSecond: 1.u256, collateralPerByte: 1.u256, ), - content: StorageContent(cid: "some cid"), - expiry: (getTime() + initDuration(hours = 1)).toUnix.u256, + content: StorageContent( + cid: Cid.init("zb2rhheVmk3bLks5MgzTqyznLu1zqGH5jrfTA1eAZXrjx7Vob").tryGet + ), + expiry: (getTime() + initDuration(hours = 1)).toUnix.uint64, ) market = MockMarket.new() @@ -59,12 +63,12 @@ asyncchecksuite "Sales - start": sales = Sales.new(market, clock, repo) reservations = sales.context.reservations sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = return success() sales.onExpiryUpdate = proc( - rootCid: string, expiry: SecondsSince1970 + rootCid: Cid, expiry: SecondsSince1970 ): Future[?!void] {.async.} = return success() @@ -74,7 +78,8 @@ asyncchecksuite "Sales - start": ): Future[?!Groth16Proof] {.async.} = return success(proof) itemsProcessed = @[] - request.expiry = (clock.now() + 42).u256 + expiry = (clock.now() + 42) + request.expiry = expiry.uint64 teardown: await sales.stop() @@ -82,7 +87,7 @@ asyncchecksuite "Sales - start": await repoTmp.destroyDb() await metaTmp.destroyDb() - proc fillSlot(slotIdx: UInt256 = 0.u256) {.async.} = + proc fillSlot(slotIdx: uint64 = 0.uint64) {.async.} = let address = await market.getSigner() let slot = MockSlot(requestId: request.id, slotIndex: slotIdx, proof: proof, host: address) @@ -95,16 +100,15 @@ asyncchecksuite "Sales - start": request.ask.slots = 2 market.requested = @[request] market.requestState[request.id] = RequestState.New + market.requestExpiry[request.id] = expiry - let slot0 = - MockSlot(requestId: request.id, slotIndex: 0.u256, proof: proof, host: me) + let slot0 = MockSlot(requestId: request.id, slotIndex: 0, proof: proof, host: me) await fillSlot(slot0.slotIndex) - let slot1 = - MockSlot(requestId: request.id, slotIndex: 1.u256, proof: proof, host: me) + let slot1 = MockSlot(requestId: request.id, slotIndex: 1, proof: proof, host: me) await fillSlot(slot1.slotIndex) - market.activeSlots[me] = @[request.slotId(0.u256), request.slotId(1.u256)] + market.activeSlots[me] = @[request.slotId(0), request.slotId(1)] market.requested = @[request] market.activeRequests[me] = @[request.id] @@ -112,10 +116,10 @@ asyncchecksuite "Sales - start": check eventually sales.agents.len == 2 check sales.agents.any( - agent => agent.data.requestId == request.id and agent.data.slotIndex == 0.u256 + agent => agent.data.requestId == request.id and agent.data.slotIndex == 0.uint64 ) check sales.agents.any( - agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.u256 + agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.uint64 ) asyncchecksuite "Sales": @@ -124,7 +128,7 @@ asyncchecksuite "Sales": repoTmp = TempLevelDb.new() metaTmp = TempLevelDb.new() - var totalAvailabilitySize: UInt256 + var totalAvailabilitySize: uint64 var minPricePerBytePerSecond: UInt256 var requestedCollateralPerByte: UInt256 var totalCollateral: UInt256 @@ -139,27 +143,31 @@ asyncchecksuite "Sales": var itemsProcessed: seq[SlotQueueItem] setup: - totalAvailabilitySize = 100.u256 + totalAvailabilitySize = 100.uint64 minPricePerBytePerSecond = 1.u256 requestedCollateralPerByte = 1.u256 - totalCollateral = requestedCollateralPerByte * totalAvailabilitySize + totalCollateral = requestedCollateralPerByte * totalAvailabilitySize.stuint(256) availability = Availability.init( totalSize = totalAvailabilitySize, freeSize = totalAvailabilitySize, - duration = 60.u256, + duration = 60.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, totalCollateral = totalCollateral, + enabled = true, + until = 0.SecondsSince1970, ) request = StorageRequest( ask: StorageAsk( slots: 4, - slotSize: 100.u256, - duration: 60.u256, + slotSize: 100.uint64, + duration: 60.uint64, pricePerBytePerSecond: minPricePerBytePerSecond, collateralPerByte: 1.u256, ), - content: StorageContent(cid: "some cid"), - expiry: (getTime() + initDuration(hours = 1)).toUnix.u256, + content: StorageContent( + cid: Cid.init("zb2rhheVmk3bLks5MgzTqyznLu1zqGH5jrfTA1eAZXrjx7Vob").tryGet + ), + expiry: (getTime() + initDuration(hours = 1)).toUnix.uint64, ) market = MockMarket.new() @@ -176,12 +184,12 @@ asyncchecksuite "Sales": sales = Sales.new(market, clock, repo) reservations = sales.context.reservations sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = return success() sales.onExpiryUpdate = proc( - rootCid: string, expiry: SecondsSince1970 + rootCid: Cid, expiry: SecondsSince1970 ): Future[?!void] {.async.} = return success() @@ -216,25 +224,33 @@ asyncchecksuite "Sales": let key = availability.id.key.get (waitFor reservations.get(key, Availability)).get - proc createAvailability() = + proc createAvailability(enabled = true, until = 0.SecondsSince1970) = let a = waitFor reservations.createAvailability( availability.totalSize, availability.duration, - availability.minPricePerBytePerSecond, availability.totalCollateral, + availability.minPricePerBytePerSecond, availability.totalCollateral, enabled, + until, ) availability = a.get # update id proc notProcessed(itemsProcessed: seq[SlotQueueItem], request: StorageRequest): bool = - let items = SlotQueueItem.init(request) + let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot) for i in 0 ..< items.len: if itemsProcessed.contains(items[i]): return false return true proc addRequestToSaturatedQueue(): Future[StorageRequest] {.async.} = - queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = - await sleepAsync(10.millis) - itemsProcessed.add item - done.complete() + queue.onProcessSlot = proc( + item: SlotQueueItem, done: Future[void] + ) {.async: (raises: []).} = + try: + await sleepAsync(10.millis) + itemsProcessed.add item + except CancelledError as exc: + checkpoint(exc.msg) + finally: + if not done.finished: + done.complete() var request1 = StorageRequest.example request1.ask.collateralPerByte = request.ask.collateralPerByte + 1 @@ -256,12 +272,15 @@ asyncchecksuite "Sales": waitFor run() test "processes all request's slots once StorageRequested emitted": - queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = + queue.onProcessSlot = proc( + item: SlotQueueItem, done: Future[void] + ) {.async: (raises: []).} = itemsProcessed.add item - done.complete() + if not done.finished: + done.complete() createAvailability() await market.requestStorage(request) - let items = SlotQueueItem.init(request) + let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot) check eventually items.allIt(itemsProcessed.contains(it)) test "removes slots from slot queue once RequestCancelled emitted": @@ -281,31 +300,43 @@ asyncchecksuite "Sales": test "removes slot index from slot queue once SlotFilled emitted": let request1 = await addRequestToSaturatedQueue() - market.emitSlotFilled(request1.id, 1.u256) - let expected = SlotQueueItem.init(request1, 1'u16) + market.emitSlotFilled(request1.id, 1.uint64) + let expected = + SlotQueueItem.init(request1, 1'u16, collateral = request1.ask.collateralPerSlot) check always (not itemsProcessed.contains(expected)) test "removes slot index from slot queue once SlotReservationsFull emitted": let request1 = await addRequestToSaturatedQueue() - market.emitSlotReservationsFull(request1.id, 1.u256) - let expected = SlotQueueItem.init(request1, 1'u16) + market.emitSlotReservationsFull(request1.id, 1.uint64) + let expected = + SlotQueueItem.init(request1, 1'u16, collateral = request1.ask.collateralPerSlot) check always (not itemsProcessed.contains(expected)) test "adds slot index to slot queue once SlotFreed emitted": - queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = + queue.onProcessSlot = proc( + item: SlotQueueItem, done: Future[void] + ) {.async: (raises: []).} = itemsProcessed.add item - done.complete() + if not done.finished: + done.complete() createAvailability() market.requested.add request # "contract" must be able to return request - market.emitSlotFreed(request.id, 2.u256) - let expected = SlotQueueItem.init(request, 2.uint16) + market.emitSlotFreed(request.id, 2.uint64) + + without collateralPerSlot =? await market.slotCollateral(request.id, 2.uint64), + error: + fail() + + let expected = + SlotQueueItem.init(request, 2.uint16, collateral = request.ask.collateralPerSlot) + check eventually itemsProcessed.contains(expected) test "items in queue are readded (and marked seen) once ignored": await market.requestStorage(request) - let items = SlotQueueItem.init(request) + let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot) check eventually queue.len > 0 # queue starts paused, allow items to be added to the queue check eventually queue.paused @@ -326,7 +357,7 @@ asyncchecksuite "Sales": test "queue is paused once availability is insufficient to service slots in queue": createAvailability() # enough to fill a single slot await market.requestStorage(request) - let items = SlotQueueItem.init(request) + let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot) check eventually queue.len > 0 # queue starts paused, allow items to be added to the queue check eventually queue.paused @@ -343,27 +374,27 @@ asyncchecksuite "Sales": test "availability size is reduced by request slot size when fully downloaded": sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = let blk = bt.Block.new(@[1.byte]).get - await onBatch(blk.repeat(request.ask.slotSize.truncate(int))) + await onBatch(blk.repeat(request.ask.slotSize.int)) createAvailability() await market.requestStorage(request) check eventually getAvailability().freeSize == availability.freeSize - request.ask.slotSize - test "non-downloaded bytes are returned to availability once finished": - var slotIndex = 0.u256 + test "bytes are returned to availability once finished": + var slotIndex = 0.uint64 sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = slotIndex = slot let blk = bt.Block.new(@[1.byte]).get - await onBatch(@[blk]) + await onBatch(blk.repeat(request.ask.slotSize)) let sold = newFuture[void]() - sales.onSale = proc(request: StorageRequest, slotIndex: UInt256) = + sales.onSale = proc(request: StorageRequest, slotIndex: uint64) = sold.complete() createAvailability() @@ -374,9 +405,9 @@ asyncchecksuite "Sales": # complete request market.slotState[request.slotId(slotIndex)] = SlotState.Finished - clock.advance(request.ask.duration.truncate(int64)) + clock.advance(request.ask.duration.int64) - check eventually getAvailability().freeSize == origSize - 1 + check eventually getAvailability().freeSize == origSize test "ignores download when duration not long enough": availability.duration = request.ask.duration - 1 @@ -406,17 +437,45 @@ asyncchecksuite "Sales": test "ignores request when slot state is not free": createAvailability() await market.requestStorage(request) - market.slotState[request.slotId(0.u256)] = SlotState.Filled - market.slotState[request.slotId(1.u256)] = SlotState.Filled - market.slotState[request.slotId(2.u256)] = SlotState.Filled - market.slotState[request.slotId(3.u256)] = SlotState.Filled + market.slotState[request.slotId(0.uint64)] = SlotState.Filled + market.slotState[request.slotId(1.uint64)] = SlotState.Filled + market.slotState[request.slotId(2.uint64)] = SlotState.Filled + market.slotState[request.slotId(3.uint64)] = SlotState.Filled check wasIgnored() + test "ignores request when availability is not enabled": + createAvailability(enabled = false) + await market.requestStorage(request) + check wasIgnored() + + test "ignores request when availability until terminates before the duration": + let until = getTime().toUnix() + createAvailability(until = until) + await market.requestStorage(request) + + check wasIgnored() + + test "retrieves request when availability until terminates after the duration": + let requestEnd = getTime().toUnix() + cast[int64](request.ask.duration) + let until = requestEnd + 1 + createAvailability(until = until) + + var storingRequest: StorageRequest + sales.onStore = proc( + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false + ): Future[?!void] {.async.} = + storingRequest = request + return success() + + market.requestEnds[request.id] = requestEnd + await market.requestStorage(request) + check eventually storingRequest == request + test "retrieves and stores data locally": var storingRequest: StorageRequest - var storingSlot: UInt256 + var storingSlot: uint64 sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = storingRequest = request storingSlot = slot @@ -424,29 +483,12 @@ asyncchecksuite "Sales": createAvailability() await market.requestStorage(request) check eventually storingRequest == request - check storingSlot < request.ask.slots.u256 - - test "handles errors during state run": - var saleFailed = false - sales.onProve = proc( - slot: Slot, challenge: ProofChallenge - ): Future[?!Groth16Proof] {.async.} = - # raise exception so machine.onError is called - raise newException(ValueError, "some error") - - # onClear is called in SaleErrored.run - sales.onClear = proc(request: StorageRequest, idx: UInt256) = - saleFailed = true - createAvailability() - await market.requestStorage(request) - await allowRequestToStart() - - check eventually saleFailed + check storingSlot < request.ask.slots test "makes storage available again when data retrieval fails": let error = newException(IOError, "data retrieval failed") sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = return failure(error) createAvailability() @@ -455,7 +497,7 @@ asyncchecksuite "Sales": test "generates proof of storage": var provingRequest: StorageRequest - var provingSlot: UInt256 + var provingSlot: uint64 sales.onProve = proc( slot: Slot, challenge: ProofChallenge ): Future[?!Groth16Proof] {.async.} = @@ -467,7 +509,7 @@ asyncchecksuite "Sales": await allowRequestToStart() check eventually provingRequest == request - check provingSlot < request.ask.slots.u256 + check provingSlot < request.ask.slots test "fills a slot": createAvailability() @@ -476,14 +518,14 @@ asyncchecksuite "Sales": check eventually market.filled.len > 0 check market.filled[0].requestId == request.id - check market.filled[0].slotIndex < request.ask.slots.u256 + check market.filled[0].slotIndex < request.ask.slots check market.filled[0].proof == proof check market.filled[0].host == await market.getSigner() test "calls onFilled when slot is filled": var soldRequest = StorageRequest.default - var soldSlotIndex = UInt256.high - sales.onSale = proc(request: StorageRequest, slotIndex: UInt256) = + var soldSlotIndex = uint64.high + sales.onSale = proc(request: StorageRequest, slotIndex: uint64) = soldRequest = request soldSlotIndex = slotIndex createAvailability() @@ -491,7 +533,7 @@ asyncchecksuite "Sales": await allowRequestToStart() check eventually soldRequest == request - check soldSlotIndex < request.ask.slots.u256 + check soldSlotIndex < request.ask.slots test "calls onClear when storage becomes available again": # fail the proof intentionally to trigger `agent.finish(success=false)`, @@ -501,8 +543,8 @@ asyncchecksuite "Sales": ): Future[?!Groth16Proof] {.async.} = raise newException(IOError, "proof failed") var clearedRequest: StorageRequest - var clearedSlotIndex: UInt256 - sales.onClear = proc(request: StorageRequest, slotIndex: UInt256) = + var clearedSlotIndex: uint64 + sales.onClear = proc(request: StorageRequest, slotIndex: uint64) = clearedRequest = request clearedSlotIndex = slotIndex createAvailability() @@ -510,19 +552,19 @@ asyncchecksuite "Sales": await allowRequestToStart() check eventually clearedRequest == request - check clearedSlotIndex < request.ask.slots.u256 + check clearedSlotIndex < request.ask.slots test "makes storage available again when other host fills the slot": let otherHost = Address.example sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = await sleepAsync(chronos.hours(1)) return success() createAvailability() await market.requestStorage(request) for slotIndex in 0 ..< request.ask.slots: - market.fillSlot(request.id, slotIndex.u256, proof, otherHost) + market.fillSlot(request.id, slotIndex.uint64, proof, otherHost) check eventually (await reservations.all(Availability)).get == @[availability] test "makes storage available again when request expires": @@ -531,7 +573,7 @@ asyncchecksuite "Sales": let origSize = availability.freeSize sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = await sleepAsync(chronos.hours(1)) return success() @@ -551,12 +593,14 @@ asyncchecksuite "Sales": # ensure only one slot, otherwise once bytes are returned to the # availability, the queue will be unpaused and availability will be consumed # by other slots - request.ask.slots = 1.uint64 + request.ask.slots = 1 market.requestExpiry[request.id] = expiry + market.requestEnds[request.id] = + getTime().toUnix() + cast[int64](request.ask.duration) let origSize = availability.freeSize sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = await sleepAsync(chronos.hours(1)) return success() @@ -583,21 +627,19 @@ asyncchecksuite "Sales": market.requestState[request.id] = RequestState.New market.requestEnds[request.id] = request.expiry.toSecondsSince1970 - proc fillSlot(slotIdx: UInt256 = 0.u256) {.async.} = + proc fillSlot(slotIdx: uint64 = 0) {.async.} = let address = await market.getSigner() let slot = MockSlot(requestId: request.id, slotIndex: slotIdx, proof: proof, host: address) market.filled.add slot market.slotState[slotId(request.id, slotIdx)] = SlotState.Filled - let slot0 = - MockSlot(requestId: request.id, slotIndex: 0.u256, proof: proof, host: me) + let slot0 = MockSlot(requestId: request.id, slotIndex: 0, proof: proof, host: me) await fillSlot(slot0.slotIndex) - let slot1 = - MockSlot(requestId: request.id, slotIndex: 1.u256, proof: proof, host: me) + let slot1 = MockSlot(requestId: request.id, slotIndex: 1, proof: proof, host: me) await fillSlot(slot1.slotIndex) - market.activeSlots[me] = @[request.slotId(0.u256), request.slotId(1.u256)] + market.activeSlots[me] = @[request.slotId(0), request.slotId(1)] market.requested = @[request] market.activeRequests[me] = @[request.id] @@ -605,18 +647,36 @@ asyncchecksuite "Sales": check eventually sales.agents.len == 2 check sales.agents.any( - agent => agent.data.requestId == request.id and agent.data.slotIndex == 0.u256 + agent => agent.data.requestId == request.id and agent.data.slotIndex == 0.uint64 ) check sales.agents.any( - agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.u256 + agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.uint64 ) test "deletes inactive reservations on load": createAvailability() + let validUntil = getTime().toUnix() + 30.SecondsSince1970 discard await reservations.createReservation( - availability.id, 100.u256, RequestId.example, UInt256.example, UInt256.example + availability.id, 100.uint64, RequestId.example, 0.uint64, UInt256.example, + validUntil, ) check (await reservations.all(Reservation)).get.len == 1 await sales.load() check (await reservations.all(Reservation)).get.len == 0 check getAvailability().freeSize == availability.freeSize # was restored + + test "update an availability fails when trying change the until date before an existing reservation": + let until = getTime().toUnix() + 300.SecondsSince1970 + createAvailability(until = until) + + market.requestEnds[request.id] = + getTime().toUnix() + cast[int64](request.ask.duration) + + await market.requestStorage(request) + await allowRequestToStart() + + availability.until = getTime().toUnix() + + let result = await reservations.update(availability) + check result.isErr + check result.error of UntilOutOfBoundsError diff --git a/tests/codex/sales/testsalesagent.nim b/tests/codex/sales/testsalesagent.nim index f17711d3..c795904d 100644 --- a/tests/codex/sales/testsalesagent.nim +++ b/tests/codex/sales/testsalesagent.nim @@ -4,7 +4,6 @@ import pkg/codex/sales import pkg/codex/sales/salesagent import pkg/codex/sales/salescontext import pkg/codex/sales/statemachine -import pkg/codex/sales/states/errorhandling import ../../asynctest import ../helpers/mockmarket @@ -15,18 +14,12 @@ import ../examples var onCancelCalled = false var onFailedCalled = false var onSlotFilledCalled = false -var onErrorCalled = false -type - MockState = ref object of SaleState - MockErrorState = ref object of ErrorHandlingState +type MockState = ref object of SaleState method `$`*(state: MockState): string = "MockState" -method `$`*(state: MockErrorState): string = - "MockErrorState" - method onCancelled*(state: MockState, request: StorageRequest): ?State = onCancelCalled = true @@ -34,31 +27,24 @@ method onFailed*(state: MockState, request: StorageRequest): ?State = onFailedCalled = true method onSlotFilled*( - state: MockState, requestId: RequestId, slotIndex: UInt256 + state: MockState, requestId: RequestId, slotIndex: uint64 ): ?State = onSlotFilledCalled = true -method onError*(state: MockErrorState, err: ref CatchableError): ?State = - onErrorCalled = true - -method run*(state: MockErrorState, machine: Machine): Future[?State] {.async.} = - raise newException(ValueError, "failure") - asyncchecksuite "Sales agent": let request = StorageRequest.example var agent: SalesAgent var context: SalesContext - var slotIndex: UInt256 + var slotIndex: uint64 var market: MockMarket var clock: MockClock setup: market = MockMarket.new() - market.requestExpiry[request.id] = - getTime().toUnix() + request.expiry.truncate(int64) + market.requestExpiry[request.id] = getTime().toUnix() + request.expiry.int64 clock = MockClock.new() context = SalesContext(market: market, clock: clock) - slotIndex = 0.u256 + slotIndex = 0.uint64 onCancelCalled = false onFailedCalled = false onSlotFilledCalled = false @@ -123,7 +109,9 @@ asyncchecksuite "Sales agent": agent.start(MockState.new()) await agent.subscribe() agent.onFulfilled(request.id) - check eventually agent.data.cancelled.cancelled() + # Note: futures that are cancelled, and do not re-raise the CancelledError + # will have a state of completed, not cancelled. + check eventually agent.data.cancelled.completed() test "current state onFailed called when onFailed called": agent.start(MockState.new()) @@ -134,7 +122,3 @@ asyncchecksuite "Sales agent": agent.start(MockState.new()) agent.onSlotFilled(request.id, slotIndex) check eventually onSlotFilledCalled - - test "ErrorHandlingState.onError can be overridden at the state level": - agent.start(MockErrorState.new()) - check eventually onErrorCalled diff --git a/tests/codex/sales/testslotqueue.nim b/tests/codex/sales/testslotqueue.nim index 2e0759ee..7abad7eb 100644 --- a/tests/codex/sales/testslotqueue.nim +++ b/tests/codex/sales/testslotqueue.nim @@ -50,12 +50,19 @@ suite "Slot queue start/stop": suite "Slot queue workers": var queue: SlotQueue - proc onProcessSlot(item: SlotQueueItem, doneProcessing: Future[void]) {.async.} = - await sleepAsync(1000.millis) + proc onProcessSlot( + item: SlotQueueItem, doneProcessing: Future[void] + ) {.async: (raises: []).} = # this is not illustrative of the realistic scenario as the # `doneProcessing` future would be passed to another context before being # completed and therefore is not as simple as making the callback async - doneProcessing.complete() + try: + await sleepAsync(1000.millis) + except CatchableError as exc: + checkpoint(exc.msg) + finally: + if not doneProcessing.finished: + doneProcessing.complete() setup: let request = StorageRequest.example @@ -89,9 +96,14 @@ suite "Slot queue workers": check eventually queue.activeWorkers == 3 test "discards workers once processing completed": - proc processSlot(item: SlotQueueItem, done: Future[void]) {.async.} = - await sleepAsync(1.millis) - done.complete() + proc processSlot(item: SlotQueueItem, done: Future[void]) {.async: (raises: []).} = + try: + await sleepAsync(1.millis) + except CatchableError as exc: + checkpoint(exc.msg) + finally: + if not done.finished: + done.complete() queue.onProcessSlot = processSlot @@ -114,11 +126,19 @@ suite "Slot queue": proc newSlotQueue(maxSize, maxWorkers: int, processSlotDelay = 1.millis) = queue = SlotQueue.new(maxWorkers, maxSize.uint16) - queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = - await sleepAsync(processSlotDelay) - onProcessSlotCalled = true - onProcessSlotCalledWith.add (item.requestId, item.slotIndex) - done.complete() + queue.onProcessSlot = proc( + item: SlotQueueItem, done: Future[void] + ) {.async: (raises: []).} = + try: + await sleepAsync(processSlotDelay) + except CatchableError as exc: + checkpoint(exc.msg) + finally: + onProcessSlotCalled = true + onProcessSlotCalledWith.add (item.requestId, item.slotIndex) + if not done.finished: + done.complete() + queue.start() setup: @@ -146,21 +166,23 @@ suite "Slot queue": test "correctly compares SlotQueueItems": var requestA = StorageRequest.example - requestA.ask.duration = 1.u256 + requestA.ask.duration = 1.uint64 requestA.ask.pricePerBytePerSecond = 1.u256 - check requestA.ask.pricePerSlot == 1.u256 * requestA.ask.slotSize + check requestA.ask.pricePerSlot == 1.u256 * requestA.ask.slotSize.u256 requestA.ask.collateralPerByte = 100000.u256 - requestA.expiry = 1001.u256 + requestA.expiry = 1001.uint64 var requestB = StorageRequest.example - requestB.ask.duration = 100.u256 + requestB.ask.duration = 100.uint64 requestB.ask.pricePerBytePerSecond = 1000.u256 - check requestB.ask.pricePerSlot == 100000.u256 * requestB.ask.slotSize + check requestB.ask.pricePerSlot == 100000.u256 * requestB.ask.slotSize.u256 requestB.ask.collateralPerByte = 1.u256 - requestB.expiry = 1000.u256 + requestB.expiry = 1000.uint64 - let itemA = SlotQueueItem.init(requestA, 0) - let itemB = SlotQueueItem.init(requestB, 0) + let itemA = + SlotQueueItem.init(requestA, 0, collateral = requestA.ask.collateralPerSlot) + let itemB = + SlotQueueItem.init(requestB, 0, collateral = requestB.ask.collateralPerSlot) check itemB < itemA # B higher priority than A check itemA > itemB @@ -169,21 +191,21 @@ suite "Slot queue": let itemA = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, - duration: 1.u256, + slotSize: 1.uint64, + duration: 1.uint64, pricePerBytePerSecond: 2.u256, # profitability is higher (good) - collateralPerByte: 1.u256, - expiry: 1.u256, + collateral: 1.u256, + expiry: 1.uint64, seen: true, # seen (bad), more weight than profitability ) let itemB = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, - duration: 1.u256, + slotSize: 1.uint64, + duration: 1.uint64, pricePerBytePerSecond: 1.u256, # profitability is lower (bad) - collateralPerByte: 1.u256, - expiry: 1.u256, + collateral: 1.u256, + expiry: 1.uint64, seen: false, # not seen (good) ) check itemB.toSlotQueueItem < itemA.toSlotQueueItem # B higher priority than A @@ -194,22 +216,22 @@ suite "Slot queue": let itemA = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, - duration: 1.u256, + slotSize: 1.uint64, + duration: 1.uint64, pricePerBytePerSecond: 1.u256, # reward is lower (bad) - collateralPerByte: 1.u256, # collateral is lower (good) - expiry: 1.u256, + collateral: 1.u256, # collateral is lower (good) + expiry: 1.uint64, seen: false, ) let itemB = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, - duration: 1.u256, + slotSize: 1.uint64, + duration: 1.uint64, pricePerBytePerSecond: 2.u256, # reward is higher (good), more weight than collateral - collateralPerByte: 2.u256, # collateral is higher (bad) - expiry: 1.u256, + collateral: 2.u256, # collateral is higher (bad) + expiry: 1.uint64, seen: false, ) @@ -220,21 +242,21 @@ suite "Slot queue": let itemA = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, - duration: 1.u256, + slotSize: 1.uint64, + duration: 1.uint64, pricePerBytePerSecond: 1.u256, - collateralPerByte: 2.u256, # collateral is higher (bad) - expiry: 2.u256, # expiry is longer (good) + collateral: 2.u256, # collateral is higher (bad) + expiry: 2.uint64, # expiry is longer (good) seen: false, ) let itemB = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, - duration: 1.u256, + slotSize: 1.uint64, + duration: 1.uint64, pricePerBytePerSecond: 1.u256, - collateralPerByte: 1.u256, # collateral is lower (good), more weight than expiry - expiry: 1.u256, # expiry is shorter (bad) + collateral: 1.u256, # collateral is lower (good), more weight than expiry + expiry: 1.uint64, # expiry is shorter (bad) seen: false, ) @@ -245,21 +267,21 @@ suite "Slot queue": let itemA = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, # slotSize is smaller (good) - duration: 1.u256, + slotSize: 1.uint64, # slotSize is smaller (good) + duration: 1.uint64, pricePerBytePerSecond: 1.u256, - collateralPerByte: 1.u256, - expiry: 1.u256, # expiry is shorter (bad) + collateral: 1.u256, + expiry: 1.uint64, # expiry is shorter (bad) seen: false, ) let itemB = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 2.u256, # slotSize is larger (bad) - duration: 1.u256, + slotSize: 2.uint64, # slotSize is larger (bad) + duration: 1.uint64, pricePerBytePerSecond: 1.u256, - collateralPerByte: 1.u256, - expiry: 2.u256, # expiry is longer (good), more weight than slotSize + collateral: 1.u256, + expiry: 2.uint64, # expiry is longer (good), more weight than slotSize seen: false, ) @@ -270,21 +292,21 @@ suite "Slot queue": let itemA = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 2.u256, # slotSize is larger (bad) - duration: 1.u256, + slotSize: 2.uint64, # slotSize is larger (bad) + duration: 1.uint64, pricePerBytePerSecond: 1.u256, - collateralPerByte: 1.u256, - expiry: 1.u256, # expiry is shorter (bad) + collateral: 1.u256, + expiry: 1.uint64, # expiry is shorter (bad) seen: false, ) let itemB = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, # slotSize is smaller (good) - duration: 1.u256, + slotSize: 1.uint64, # slotSize is smaller (good) + duration: 1.uint64, pricePerBytePerSecond: 1.u256, - collateralPerByte: 1.u256, - expiry: 1.u256, + collateral: 1.u256, + expiry: 1.uint64, seen: false, ) @@ -292,11 +314,16 @@ suite "Slot queue": test "expands available all possible slot indices on init": let request = StorageRequest.example - let items = SlotQueueItem.init(request) + let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot) check items.len.uint64 == request.ask.slots var checked = 0 for slotIndex in 0'u16 ..< request.ask.slots.uint16: - check items.anyIt(it == SlotQueueItem.init(request, slotIndex)) + check items.anyIt( + it == + SlotQueueItem.init( + request, slotIndex, collateral = request.ask.collateralPerSlot + ) + ) inc checked check checked == items.len @@ -322,34 +349,17 @@ suite "Slot queue": check isOk queue.push(item3) check isOk queue.push(item4) - test "populates item with exisiting request metadata": - newSlotQueue(maxSize = 8, maxWorkers = 1, processSlotDelay = 10.millis) - let request0 = StorageRequest.example - var request1 = StorageRequest.example - request1.ask.collateralPerByte += 1.u256 - let items0 = SlotQueueItem.init(request0) - let items1 = SlotQueueItem.init(request1) - check queue.push(items0).isOk - check queue.push(items1).isOk - let populated = !queue.populateItem(request1.id, 12'u16) - check populated.requestId == request1.id - check populated.slotIndex == 12'u16 - check populated.slotSize == request1.ask.slotSize - check populated.duration == request1.ask.duration - check populated.pricePerBytePerSecond == request1.ask.pricePerBytePerSecond - check populated.collateralPerByte == request1.ask.collateralPerByte - - test "does not find exisiting request metadata": - newSlotQueue(maxSize = 2, maxWorkers = 2) - let item = SlotQueueItem.example - check queue.populateItem(item.requestId, 12'u16).isNone - test "can support uint16.high slots": var request = StorageRequest.example let maxUInt16 = uint16.high let uint64Slots = uint64(maxUInt16) request.ask.slots = uint64Slots - let items = SlotQueueItem.init(request.id, request.ask, request.expiry) + let items = SlotQueueItem.init( + request.id, + request.ask, + request.expiry, + collateral = request.ask.collateralPerSlot, + ) check items.len.uint16 == maxUInt16 test "cannot support greater than uint16.high slots": @@ -358,7 +368,12 @@ suite "Slot queue": let uint64Slots = uint64(int32Slots) request.ask.slots = uint64Slots expect SlotsOutOfRangeError: - discard SlotQueueItem.init(request.id, request.ask, request.expiry) + discard SlotQueueItem.init( + request.id, + request.ask, + request.expiry, + collateral = request.ask.collateralPerSlot, + ) test "cannot push duplicate items": newSlotQueue(maxSize = 6, maxWorkers = 1, processSlotDelay = 15.millis) @@ -399,8 +414,10 @@ suite "Slot queue": let request0 = StorageRequest.example var request1 = StorageRequest.example request1.ask.collateralPerByte += 1.u256 - let items0 = SlotQueueItem.init(request0) - let items1 = SlotQueueItem.init(request1) + let items0 = + SlotQueueItem.init(request0, collateral = request0.ask.collateralPerSlot) + let items1 = + SlotQueueItem.init(request1, collateral = request1.ask.collateralPerSlot) check queue.push(items0).isOk check queue.push(items1).isOk let last = items1[items1.high] @@ -413,8 +430,10 @@ suite "Slot queue": let request0 = StorageRequest.example var request1 = StorageRequest.example request1.ask.collateralPerByte += 1.u256 - let items0 = SlotQueueItem.init(request0) - let items1 = SlotQueueItem.init(request1) + let items0 = + SlotQueueItem.init(request0, collateral = request0.ask.collateralPerSlot) + let items1 = + SlotQueueItem.init(request1, collateral = request1.ask.collateralPerSlot) check queue.push(items0).isOk check queue.push(items1).isOk queue.delete(request1.id) @@ -433,42 +452,56 @@ suite "Slot queue": request3.ask.collateralPerByte = request2.ask.collateralPerByte + 1 request4.ask.collateralPerByte = request3.ask.collateralPerByte + 1 request5.ask.collateralPerByte = request4.ask.collateralPerByte + 1 - let item0 = SlotQueueItem.init(request0, 0) - let item1 = SlotQueueItem.init(request1, 0) - let item2 = SlotQueueItem.init(request2, 0) - let item3 = SlotQueueItem.init(request3, 0) - let item4 = SlotQueueItem.init(request4, 0) - let item5 = SlotQueueItem.init(request5, 0) + let item0 = + SlotQueueItem.init(request0, 0, collateral = request0.ask.collateralPerSlot) + let item1 = + SlotQueueItem.init(request1, 0, collateral = request1.ask.collateralPerSlot) + let item2 = + SlotQueueItem.init(request2, 0, collateral = request2.ask.collateralPerSlot) + let item3 = + SlotQueueItem.init(request3, 0, collateral = request3.ask.collateralPerSlot) + let item4 = + SlotQueueItem.init(request4, 0, collateral = request4.ask.collateralPerSlot) + let item5 = + SlotQueueItem.init(request5, 0, collateral = request5.ask.collateralPerSlot) check queue.contains(item5) == false check queue.push(@[item0, item1, item2, item3, item4, item5]).isOk check queue.contains(item5) test "sorts items by profitability descending (higher pricePerBytePerSecond == higher priority == goes first in the list)": var request = StorageRequest.example - let item0 = SlotQueueItem.init(request, 0) + let item0 = + SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot) request.ask.pricePerBytePerSecond += 1.u256 - let item1 = SlotQueueItem.init(request, 1) + let item1 = + SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot) check item1 < item0 - test "sorts items by collateral ascending (higher required collateralPerByte = lower priority == comes later in the list)": + test "sorts items by collateral ascending (higher required collateral = lower priority == comes later in the list)": var request = StorageRequest.example - let item0 = SlotQueueItem.init(request, 0) - request.ask.collateralPerByte += 1.u256 - let item1 = SlotQueueItem.init(request, 1) + let item0 = + SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot) + let item1 = SlotQueueItem.init( + request, 1, collateral = request.ask.collateralPerSlot + 1.u256 + ) check item1 > item0 test "sorts items by expiry descending (longer expiry = higher priority)": var request = StorageRequest.example - let item0 = SlotQueueItem.init(request, 0) - request.expiry += 1.u256 - let item1 = SlotQueueItem.init(request, 1) + let item0 = + SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot) + request.expiry += 1 + let item1 = + SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot) check item1 < item0 test "sorts items by slot size descending (bigger dataset = higher profitability = higher priority)": var request = StorageRequest.example - let item0 = SlotQueueItem.init(request, 0) - request.ask.slotSize += 1.u256 - let item1 = SlotQueueItem.init(request, 1) + let item0 = + SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot) + request.ask.slotSize += 1 + let item1 = + SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot) check item1 < item0 test "should call callback once an item is added": @@ -489,13 +522,17 @@ suite "Slot queue": # sleeping after push allows the slotqueue loop to iterate, # calling the callback for each pushed/updated item var request = StorageRequest.example - let item0 = SlotQueueItem.init(request, 0) + let item0 = + SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot) request.ask.pricePerBytePerSecond += 1.u256 - let item1 = SlotQueueItem.init(request, 1) + let item1 = + SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot) request.ask.pricePerBytePerSecond += 1.u256 - let item2 = SlotQueueItem.init(request, 2) + let item2 = + SlotQueueItem.init(request, 2, collateral = request.ask.collateralPerSlot) request.ask.pricePerBytePerSecond += 1.u256 - let item3 = SlotQueueItem.init(request, 3) + let item3 = + SlotQueueItem.init(request, 3, collateral = request.ask.collateralPerSlot) check queue.push(item0).isOk await sleepAsync(1.millis) @@ -520,13 +557,17 @@ suite "Slot queue": # sleeping after push allows the slotqueue loop to iterate, # calling the callback for each pushed/updated item var request = StorageRequest.example - let item0 = SlotQueueItem.init(request, 0) + let item0 = + SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot) request.ask.pricePerBytePerSecond += 1.u256 - let item1 = SlotQueueItem.init(request, 1) + let item1 = + SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot) request.ask.pricePerBytePerSecond += 1.u256 - let item2 = SlotQueueItem.init(request, 2) + let item2 = + SlotQueueItem.init(request, 2, collateral = request.ask.collateralPerSlot) request.ask.pricePerBytePerSecond += 1.u256 - let item3 = SlotQueueItem.init(request, 3) + let item3 = + SlotQueueItem.init(request, 3, collateral = request.ask.collateralPerSlot) check queue.push(item0).isOk check queue.push(item1).isOk @@ -550,7 +591,7 @@ suite "Slot queue": queue.pause let request = StorageRequest.example - var items = SlotQueueItem.init(request) + var items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot) check queue.push(items).isOk # check all items processed check eventually queue.len == 0 @@ -558,8 +599,14 @@ suite "Slot queue": test "pushing seen item does not unpause queue": newSlotQueue(maxSize = 4, maxWorkers = 4) let request = StorageRequest.example - let item0 = - SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = true) + let item0 = SlotQueueItem.init( + request.id, + 0'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = true, + ) check queue.paused check queue.push(item0).isOk check queue.paused @@ -567,8 +614,14 @@ suite "Slot queue": test "paused queue waits for unpause before continuing processing": newSlotQueue(maxSize = 4, maxWorkers = 4) let request = StorageRequest.example - let item = - SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = false) + let item = SlotQueueItem.init( + request.id, + 1'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = false, + ) check queue.paused # push causes unpause check queue.push(item).isOk @@ -579,10 +632,22 @@ suite "Slot queue": test "processing a 'seen' item pauses the queue": newSlotQueue(maxSize = 4, maxWorkers = 4) let request = StorageRequest.example - let unseen = - SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = false) - let seen = - SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = true) + let unseen = SlotQueueItem.init( + request.id, + 0'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = false, + ) + let seen = SlotQueueItem.init( + request.id, + 1'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = true, + ) # push causes unpause check queue.push(unseen).isSuccess # check all items processed @@ -595,10 +660,22 @@ suite "Slot queue": test "processing a 'seen' item does not decrease the number of workers": newSlotQueue(maxSize = 4, maxWorkers = 4) let request = StorageRequest.example - let unseen = - SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = false) - let seen = - SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = true) + let unseen = SlotQueueItem.init( + request.id, + 0'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = false, + ) + let seen = SlotQueueItem.init( + request.id, + 1'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = true, + ) # push seen item to ensure that queue is pausing check queue.push(seen).isSuccess # unpause and pause a number of times @@ -615,10 +692,22 @@ suite "Slot queue": test "item 'seen' flags can be cleared": newSlotQueue(maxSize = 4, maxWorkers = 1) let request = StorageRequest.example - let item0 = - SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = true) - let item1 = - SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = true) + let item0 = SlotQueueItem.init( + request.id, + 0'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = true, + ) + let item1 = SlotQueueItem.init( + request.id, + 1'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = true, + ) check queue.push(item0).isOk check queue.push(item1).isOk check queue[0].seen diff --git a/tests/codex/slots/helpers.nim b/tests/codex/slots/helpers.nim index 03d87d12..fced1f1c 100644 --- a/tests/codex/slots/helpers.nim +++ b/tests/codex/slots/helpers.nim @@ -15,9 +15,7 @@ import pkg/codex/rng import ../helpers -proc storeManifest*( - store: BlockStore, manifest: Manifest -): Future[?!bt.Block] {.async.} = +proc makeManifestBlock*(manifest: Manifest): ?!bt.Block = without encodedVerifiable =? manifest.encode(), err: trace "Unable to encode manifest" return failure(err) @@ -26,6 +24,15 @@ proc storeManifest*( trace "Unable to create block from manifest" return failure(error) + success blk + +proc storeManifest*( + store: BlockStore, manifest: Manifest +): Future[?!bt.Block] {.async.} = + without blk =? makeManifestBlock(manifest), err: + trace "Unable to create manifest block", err = err.msg + return failure(err) + if err =? (await store.putBlock(blk)).errorOption: trace "Unable to store manifest block", cid = blk.cid, err = err.msg return failure(err) diff --git a/tests/codex/slots/testslotbuilder.nim b/tests/codex/slots/testslotbuilder.nim index ef83bdee..9a2043a8 100644 --- a/tests/codex/slots/testslotbuilder.nim +++ b/tests/codex/slots/testslotbuilder.nim @@ -133,7 +133,7 @@ suite "Slot builder": check: Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize).error.msg == - "Number of blocks must be divisable by number of slots." + "Number of blocks must be divisible by number of slots." test "Block size must be divisable by cell size": let mismatchManifest = Manifest.new( @@ -151,7 +151,7 @@ suite "Slot builder": check: Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize).error.msg == - "Block size must be divisable by cell size." + "Block size must be divisible by cell size." test "Should build correct slot builder": builder = diff --git a/tests/codex/stores/repostore/testcoders.nim b/tests/codex/stores/repostore/testcoders.nim index f4d2b5e7..9d341af0 100644 --- a/tests/codex/stores/repostore/testcoders.nim +++ b/tests/codex/stores/repostore/testcoders.nim @@ -1,6 +1,6 @@ -import std/unittest import std/random +import pkg/unittest2 import pkg/stew/objects import pkg/questionable import pkg/questionable/results @@ -11,7 +11,7 @@ import pkg/codex/stores/repostore/coders import ../../helpers -checksuite "Test coders": +suite "Test coders": proc rand(T: type NBytes): T = rand(Natural).NBytes diff --git a/tests/codex/stores/testcachestore.nim b/tests/codex/stores/testcachestore.nim index e7025388..03075e1a 100644 --- a/tests/codex/stores/testcachestore.nim +++ b/tests/codex/stores/testcachestore.nim @@ -11,7 +11,7 @@ import ./commonstoretests import ../../asynctest import ../helpers -checksuite "Cache Store": +suite "Cache Store": var newBlock, newBlock1, newBlock2, newBlock3: Block store: CacheStore diff --git a/tests/codex/stores/testkeyutils.nim b/tests/codex/stores/testkeyutils.nim index 238e2681..86365c5c 100644 --- a/tests/codex/stores/testkeyutils.nim +++ b/tests/codex/stores/testkeyutils.nim @@ -36,7 +36,7 @@ proc createManifestCid(): ?!Cid = let cid = ?Cid.init(version, codec, hash).mapFailure return success cid -checksuite "KeyUtils": +suite "KeyUtils": test "makePrefixKey should create block key": let length = 6 let cid = Cid.example diff --git a/tests/codex/stores/testmaintenance.nim b/tests/codex/stores/testmaintenance.nim index e5ff519e..89e75700 100644 --- a/tests/codex/stores/testmaintenance.nim +++ b/tests/codex/stores/testmaintenance.nim @@ -21,7 +21,7 @@ import ../examples import codex/stores/maintenance -checksuite "BlockMaintainer": +suite "BlockMaintainer": var mockRepoStore: MockRepoStore var interval: Duration var mockTimer: MockTimer diff --git a/tests/codex/stores/testqueryiterhelper.nim b/tests/codex/stores/testqueryiterhelper.nim index 5d3d68fd..4e83dad4 100644 --- a/tests/codex/stores/testqueryiterhelper.nim +++ b/tests/codex/stores/testqueryiterhelper.nim @@ -1,6 +1,6 @@ import std/sugar -import pkg/stew/results +import pkg/results import pkg/questionable import pkg/chronos import pkg/datastore/typedds diff --git a/tests/codex/stores/testrepostore.nim b/tests/codex/stores/testrepostore.nim index dda4ed82..5274d046 100644 --- a/tests/codex/stores/testrepostore.nim +++ b/tests/codex/stores/testrepostore.nim @@ -12,9 +12,11 @@ import pkg/datastore import pkg/codex/stores/cachestore import pkg/codex/chunker import pkg/codex/stores +import pkg/codex/stores/repostore/operations import pkg/codex/blocktype as bt import pkg/codex/clock import pkg/codex/utils/asynciter +import pkg/codex/merkletree/codex import ../../asynctest import ../helpers @@ -22,7 +24,7 @@ import ../helpers/mockclock import ../examples import ./commonstoretests -checksuite "Test RepoStore start/stop": +suite "Test RepoStore start/stop": var repoDs: Datastore metaDs: Datastore @@ -354,6 +356,119 @@ asyncchecksuite "RepoStore": check has.isOk check has.get + test "should set the reference count for orphan blocks to 0": + let blk = Block.example(size = 200) + (await repo.putBlock(blk)).tryGet() + check (await repo.blockRefCount(blk.cid)).tryGet() == 0.Natural + + test "should not allow non-orphan blocks to be deleted directly": + let + repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = + 1000'nb) + dataset = await makeRandomBlocks(datasetSize = 512, blockSize = 256'nb) + blk = dataset[0] + (manifest, tree) = makeManifestAndTree(dataset).tryGet() + treeCid = tree.rootCid.tryGet() + proof = tree.getProof(0).tryGet() + + (await repo.putBlock(blk)).tryGet() + (await repo.putCidAndProof(treeCid, 0, blk.cid, proof)).tryGet() + + let err = (await repo.delBlock(blk.cid)).error() + check err.msg == + "Directly deleting a block that is part of a dataset is not allowed." + + test "should allow non-orphan blocks to be deleted by dataset reference": + let + repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = + 1000'nb) + dataset = await makeRandomBlocks(datasetSize = 512, blockSize = 256'nb) + blk = dataset[0] + (manifest, tree) = makeManifestAndTree(dataset).tryGet() + treeCid = tree.rootCid.tryGet() + proof = tree.getProof(0).tryGet() + + (await repo.putBlock(blk)).tryGet() + (await repo.putCidAndProof(treeCid, 0, blk.cid, proof)).tryGet() + + (await repo.delBlock(treeCid, 0.Natural)).tryGet() + check not (await blk.cid in repo) + + test "should not delete a non-orphan block until it is deleted from all parent datasets": + let + repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = + 1000'nb) + blockPool = await makeRandomBlocks(datasetSize = 768, blockSize = 256'nb) + + let + dataset1 = @[blockPool[0], blockPool[1]] + dataset2 = @[blockPool[1], blockPool[2]] + + let sharedBlock = blockPool[1] + + let + (manifest1, tree1) = makeManifestAndTree(dataset1).tryGet() + treeCid1 = tree1.rootCid.tryGet() + (manifest2, tree2) = makeManifestAndTree(dataset2).tryGet() + treeCid2 = tree2.rootCid.tryGet() + + (await repo.putBlock(sharedBlock)).tryGet() + check (await repo.blockRefCount(sharedBlock.cid)).tryGet() == 0.Natural + + let + proof1 = tree1.getProof(1).tryGet() + proof2 = tree2.getProof(0).tryGet() + + (await repo.putCidAndProof(treeCid1, 1, sharedBlock.cid, proof1)).tryGet() + check (await repo.blockRefCount(sharedBlock.cid)).tryGet() == 1.Natural + + (await repo.putCidAndProof(treeCid2, 0, sharedBlock.cid, proof2)).tryGet() + check (await repo.blockRefCount(sharedBlock.cid)).tryGet() == 2.Natural + + (await repo.delBlock(treeCid1, 1.Natural)).tryGet() + check (await repo.blockRefCount(sharedBlock.cid)).tryGet() == 1.Natural + check (await sharedBlock.cid in repo) + + (await repo.delBlock(treeCid2, 0.Natural)).tryGet() + check not (await sharedBlock.cid in repo) + + test "should clear leaf metadata when block is deleted from dataset": + let + repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = + 1000'nb) + dataset = await makeRandomBlocks(datasetSize = 512, blockSize = 256'nb) + blk = dataset[0] + (manifest, tree) = makeManifestAndTree(dataset).tryGet() + treeCid = tree.rootCid.tryGet() + proof = tree.getProof(1).tryGet() + + (await repo.putBlock(blk)).tryGet() + (await repo.putCidAndProof(treeCid, 0.Natural, blk.cid, proof)).tryGet() + + discard (await repo.getLeafMetadata(treeCid, 0.Natural)).tryGet() + + (await repo.delBlock(treeCid, 0.Natural)).tryGet() + + let err = (await repo.getLeafMetadata(treeCid, 0.Natural)).error() + check err of BlockNotFoundError + + test "should not fail when reinserting and deleting a previously deleted block (bug #1108)": + let + repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = + 1000'nb) + dataset = await makeRandomBlocks(datasetSize = 512, blockSize = 256'nb) + blk = dataset[0] + (manifest, tree) = makeManifestAndTree(dataset).tryGet() + treeCid = tree.rootCid.tryGet() + proof = tree.getProof(1).tryGet() + + (await repo.putBlock(blk)).tryGet() + (await repo.putCidAndProof(treeCid, 0, blk.cid, proof)).tryGet() + + (await repo.delBlock(treeCid, 0.Natural)).tryGet() + (await repo.putBlock(blk)).tryGet() + (await repo.delBlock(treeCid, 0.Natural)).tryGet() + commonBlockStoreTests( "RepoStore Sql backend", proc(): BlockStore = diff --git a/tests/codex/testasyncheapqueue.nim b/tests/codex/testasyncheapqueue.nim index eb3767cd..2d2cfb0c 100644 --- a/tests/codex/testasyncheapqueue.nim +++ b/tests/codex/testasyncheapqueue.nim @@ -1,5 +1,5 @@ import pkg/chronos -import pkg/stew/results +import pkg/results import pkg/codex/utils/asyncheapqueue import pkg/codex/rng @@ -22,7 +22,7 @@ proc toSortedSeq[T](h: AsyncHeapQueue[T], queueType = QueueType.Min): seq[T] = while tmp.len > 0: result.add(popNoWait(tmp).tryGet()) -checksuite "Synchronous tests": +suite "Synchronous tests": test "Test pushNoWait - Min": var heap = newAsyncHeapQueue[int]() let data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0] diff --git a/tests/codex/testchunking.nim b/tests/codex/testchunking.nim index 2241a82b..44202c40 100644 --- a/tests/codex/testchunking.nim +++ b/tests/codex/testchunking.nim @@ -27,7 +27,7 @@ asyncchecksuite "Chunking": let contents = [1.byte, 2, 3, 4, 5, 6, 7, 8, 9, 0] proc reader( data: ChunkBuffer, len: int - ): Future[int] {.gcsafe, async, raises: [Defect].} = + ): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} = let read = min(contents.len - offset, len) if read == 0: return 0 @@ -97,8 +97,13 @@ asyncchecksuite "Chunking": discard (await chunker.getBytes()) test "stream should forward LPStreamError": - expect LPStreamError: + try: await raiseStreamException(newException(LPStreamError, "test error")) + except ChunkerError as exc: + check exc.parent of LPStreamError + except CatchableError as exc: + checkpoint("Unexpected error: " & exc.msg) + fail() test "stream should catch LPStreamEOFError": await raiseStreamException(newException(LPStreamEOFError, "test error")) @@ -106,7 +111,3 @@ asyncchecksuite "Chunking": test "stream should forward CancelledError": expect CancelledError: await raiseStreamException(newException(CancelledError, "test error")) - - test "stream should forward LPStreamError": - expect LPStreamError: - await raiseStreamException(newException(LPStreamError, "test error")) diff --git a/tests/codex/testclock.nim b/tests/codex/testclock.nim index 2b0158cf..967de672 100644 --- a/tests/codex/testclock.nim +++ b/tests/codex/testclock.nim @@ -1,9 +1,9 @@ -import std/unittest +import pkg/unittest2 import codex/clock import ./helpers -checksuite "Clock": +suite "Clock": proc testConversion(seconds: SecondsSince1970) = let asBytes = seconds.toBytes diff --git a/tests/codex/testerasure.nim b/tests/codex/testerasure.nim index 952497e9..5046bac2 100644 --- a/tests/codex/testerasure.nim +++ b/tests/codex/testerasure.nim @@ -1,5 +1,6 @@ import std/sequtils import std/sugar +import std/times import pkg/chronos import pkg/questionable/results @@ -11,6 +12,8 @@ import pkg/codex/blocktype as bt import pkg/codex/rng import pkg/codex/utils import pkg/codex/indexingstrategy +import pkg/taskpools +import pkg/codex/utils/arrayutils import ../asynctest import ./helpers @@ -27,6 +30,7 @@ suite "Erasure encode/decode": var erasure: Erasure let repoTmp = TempLevelDb.new() let metaTmp = TempLevelDb.new() + var taskpool: Taskpool setup: let @@ -35,12 +39,14 @@ suite "Erasure encode/decode": rng = Rng.instance() chunker = RandomChunker.new(rng, size = dataSetSize, chunkSize = BlockSize) store = RepoStore.new(repoDs, metaDs) - erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider) + taskpool = Taskpool.new() + erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, taskpool) manifest = await storeDataGetManifest(store, chunker) teardown: await repoTmp.destroyDb() await metaTmp.destroyDb() + taskpool.shutdown() proc encode(buffers, parity: int): Future[Manifest] {.async.} = let encoded = @@ -212,7 +218,7 @@ suite "Erasure encode/decode": let present = await store.hasBlock(manifest.treeCid, d) check present.tryGet() - test "handles edge case of 0 parity blocks": + test "Handles edge case of 0 parity blocks": const buffers = 20 parity = 0 @@ -221,6 +227,43 @@ suite "Erasure encode/decode": discard (await erasure.decode(encoded)).tryGet() + test "Should concurrently encode/decode multiple datasets": + const iterations = 5 + + let + datasetSize = 1.MiBs + ecK = 10.Natural + ecM = 10.Natural + + var encodeTasks = newSeq[Future[?!Manifest]]() + var decodeTasks = newSeq[Future[?!Manifest]]() + var manifests = newSeq[Manifest]() + for i in 0 ..< iterations: + let + # create random data and store it + blockSize = rng.sample(@[1, 2, 4, 8, 16, 32, 64].mapIt(it.KiBs)) + chunker = RandomChunker.new(rng, size = datasetSize, chunkSize = blockSize) + manifest = await storeDataGetManifest(store, chunker) + manifests.add(manifest) + # encode the data concurrently + encodeTasks.add(erasure.encode(manifest, ecK, ecM)) + # wait for all encoding tasks to finish + let encodeResults = await allFinished(encodeTasks) + # decode the data concurrently + for i in 0 ..< encodeResults.len: + decodeTasks.add(erasure.decode(encodeResults[i].read().tryGet())) + # wait for all decoding tasks to finish + let decodeResults = await allFinished(decodeTasks) # TODO: use allFutures + + for j in 0 ..< decodeTasks.len: + let + decoded = decodeResults[j].read().tryGet() + encoded = encodeResults[j].read().tryGet() + check: + decoded.treeCid == manifests[j].treeCid + decoded.treeCid == encoded.originalTreeCid + decoded.blocksCount == encoded.originalBlocksCount + test "Should handle verifiable manifests": const buffers = 20 @@ -259,3 +302,73 @@ suite "Erasure encode/decode": decoded.treeCid == manifest.treeCid decoded.treeCid == encoded.originalTreeCid decoded.blocksCount == encoded.originalBlocksCount + + test "Should complete encode/decode task when cancelled": + let + blocksLen = 10000 + parityLen = 10 + data = seq[seq[byte]].new() + chunker = RandomChunker.new( + rng, size = (blocksLen * BlockSize.int), chunkSize = BlockSize + ) + + data[].setLen(blocksLen) + + for i in 0 ..< blocksLen: + let chunk = await chunker.getBytes() + shallowCopy(data[i], @(chunk)) + + let + parity = createDoubleArray(parityLen, BlockSize.int) + paritySeq = seq[seq[byte]].new() + recovered = createDoubleArray(blocksLen, BlockSize.int) + cancelledTaskParity = createDoubleArray(parityLen, BlockSize.int) + cancelledTaskRecovered = createDoubleArray(blocksLen, BlockSize.int) + + paritySeq[].setLen(parityLen) + defer: + freeDoubleArray(parity, parityLen) + freeDoubleArray(cancelledTaskParity, parityLen) + freeDoubleArray(recovered, blocksLen) + freeDoubleArray(cancelledTaskRecovered, blocksLen) + + for i in 0 ..< parityLen: + paritySeq[i] = cast[seq[byte]](parity[i]) + + # call asyncEncode to get the parity + let encFut = + await erasure.asyncEncode(BlockSize.int, blocksLen, parityLen, data, parity) + check encFut.isOk + + let decFut = await erasure.asyncDecode( + BlockSize.int, blocksLen, parityLen, data, paritySeq, recovered + ) + check decFut.isOk + + # call asyncEncode and cancel the task + let encodeFut = erasure.asyncEncode( + BlockSize.int, blocksLen, parityLen, data, cancelledTaskParity + ) + encodeFut.cancel() + + try: + discard await encodeFut + except CatchableError as exc: + check exc of CancelledError + finally: + for i in 0 ..< parityLen: + check equalMem(parity[i], cancelledTaskParity[i], BlockSize.int) + + # call asyncDecode and cancel the task + let decodeFut = erasure.asyncDecode( + BlockSize.int, blocksLen, parityLen, data, paritySeq, cancelledTaskRecovered + ) + decodeFut.cancel() + + try: + discard await decodeFut + except CatchableError as exc: + check exc of CancelledError + finally: + for i in 0 ..< blocksLen: + check equalMem(recovered[i], cancelledTaskRecovered[i], BlockSize.int) diff --git a/tests/codex/testlogutils.nim b/tests/codex/testlogutils.nim index b2694ee9..2077fb81 100644 --- a/tests/codex/testlogutils.nim +++ b/tests/codex/testlogutils.nim @@ -1,6 +1,7 @@ import std/options import std/strutils -import std/unittest + +import pkg/unittest2 import pkg/codex/blocktype import pkg/codex/conf import pkg/codex/contracts/requests diff --git a/tests/codex/testmanifest.nim b/tests/codex/testmanifest.nim index 241bec61..ea9465d5 100644 --- a/tests/codex/testmanifest.nim +++ b/tests/codex/testmanifest.nim @@ -13,7 +13,7 @@ import ../asynctest import ./helpers import ./examples -checksuite "Manifest": +suite "Manifest": let manifest = Manifest.new(treeCid = Cid.example, blockSize = 1.MiBs, datasetSize = 100.MiBs) diff --git a/tests/codex/testnat.nim b/tests/codex/testnat.nim index 57f51d31..3981b2e6 100644 --- a/tests/codex/testnat.nim +++ b/tests/codex/testnat.nim @@ -1,7 +1,7 @@ import std/[unittest, options, net], stew/shims/net as stewNet import pkg/chronos import pkg/libp2p/[multiaddress, multihash, multicodec] -import pkg/stew/results +import pkg/results import ../../codex/nat import ../../codex/utils/natutils diff --git a/tests/codex/testpurchasing.nim b/tests/codex/testpurchasing.nim index bbab4197..1834ee03 100644 --- a/tests/codex/testpurchasing.nim +++ b/tests/codex/testpurchasing.nim @@ -28,8 +28,8 @@ asyncchecksuite "Purchasing": request = StorageRequest( ask: StorageAsk( slots: uint8.example.uint64, - slotSize: uint32.example.u256, - duration: uint16.example.u256, + slotSize: uint32.example.uint64, + duration: uint16.example.uint64, pricePerBytePerSecond: uint8.example.u256, ) ) @@ -100,7 +100,6 @@ asyncchecksuite "Purchasing": market.requestExpiry[populatedRequest.id] = expiry let purchase = await purchasing.purchase(populatedRequest) check eventually market.requested.len > 0 - let request = market.requested[0] clock.set(expiry + 1) expect PurchaseTimeout: @@ -117,7 +116,7 @@ asyncchecksuite "Purchasing": await purchase.wait() check market.withdrawn == @[request.id] -checksuite "Purchasing state machine": +suite "Purchasing state machine": var purchasing: Purchasing var market: MockMarket var clock: MockClock @@ -130,8 +129,8 @@ checksuite "Purchasing state machine": request = StorageRequest( ask: StorageAsk( slots: uint8.example.uint64, - slotSize: uint32.example.u256, - duration: uint16.example.u256, + slotSize: uint32.example.uint64, + duration: uint16.example.uint64, pricePerBytePerSecond: uint8.example.u256, ) ) @@ -185,7 +184,7 @@ checksuite "Purchasing state machine": test "moves to PurchaseStarted when request state is Started": let request = StorageRequest.example let purchase = Purchase.new(request, market, clock) - market.requestEnds[request.id] = clock.now() + request.ask.duration.truncate(int64) + market.requestEnds[request.id] = clock.now() + request.ask.duration.int64 market.requested = @[request] market.requestState[request.id] = RequestState.Started let next = await PurchaseUnknown().run(purchase) @@ -218,7 +217,7 @@ checksuite "Purchasing state machine": test "moves to PurchaseFailed state once RequestFailed emitted": let request = StorageRequest.example let purchase = Purchase.new(request, market, clock) - market.requestEnds[request.id] = clock.now() + request.ask.duration.truncate(int64) + market.requestEnds[request.id] = clock.now() + request.ask.duration.int64 let future = PurchaseStarted().run(purchase) market.emitRequestFailed(request.id) @@ -229,10 +228,10 @@ checksuite "Purchasing state machine": test "moves to PurchaseFinished state once request finishes": let request = StorageRequest.example let purchase = Purchase.new(request, market, clock) - market.requestEnds[request.id] = clock.now() + request.ask.duration.truncate(int64) + market.requestEnds[request.id] = clock.now() + request.ask.duration.int64 let future = PurchaseStarted().run(purchase) - clock.advance(request.ask.duration.truncate(int64) + 1) + clock.advance(request.ask.duration.int64 + 1) let next = await future check !next of PurchaseFinished diff --git a/tests/codex/testsystemclock.nim b/tests/codex/testsystemclock.nim index 6f743283..3f179260 100644 --- a/tests/codex/testsystemclock.nim +++ b/tests/codex/testsystemclock.nim @@ -1,10 +1,10 @@ import std/times -import std/unittest -import codex/systemclock +import pkg/unittest2 +import pkg/codex/systemclock import ./helpers -checksuite "SystemClock": +suite "SystemClock": test "Should get now": let clock = SystemClock.new() diff --git a/tests/codex/testvalidation.nim b/tests/codex/testvalidation.nim index 95d913c3..30d6e3f3 100644 --- a/tests/codex/testvalidation.nim +++ b/tests/codex/testvalidation.nim @@ -16,8 +16,8 @@ logScope: topics = "testValidation" asyncchecksuite "validation": - let period = 10 - let timeout = 5 + let period = 10.uint64 + let timeout = 5.uint64 let maxSlots = MaxSlots(100) let validationGroups = ValidationGroups(8).some let slot = Slot.example @@ -51,8 +51,8 @@ asyncchecksuite "validation": groupIndex = groupIndexForSlotId(slot.id, !validationGroups) clock = MockClock.new() market = MockMarket.new(clock = Clock(clock).some) - market.config.proofs.period = period.u256 - market.config.proofs.timeout = timeout.u256 + market.config.proofs.period = period + market.config.proofs.timeout = timeout validation = newValidation(clock, market, maxSlots, validationGroups, groupIndex) teardown: @@ -60,10 +60,10 @@ asyncchecksuite "validation": await validation.stop() proc advanceToNextPeriod() = - let periodicity = Periodicity(seconds: period.u256) - let period = periodicity.periodOf(clock.now().u256) + let periodicity = Periodicity(seconds: period) + let period = periodicity.periodOf(clock.now().Timestamp) let periodEnd = periodicity.periodEnd(period) - clock.set((periodEnd + 1).truncate(int)) + clock.set(periodEnd.toSecondsSince1970 + 1) test "the list of slots that it's monitoring is empty initially": check validation.slots.len == 0 diff --git a/tests/codex/utils/testasyncstatemachine.nim b/tests/codex/utils/testasyncstatemachine.nim index 40a040c4..ed3ea747 100644 --- a/tests/codex/utils/testasyncstatemachine.nim +++ b/tests/codex/utils/testasyncstatemachine.nim @@ -10,9 +10,8 @@ type State1 = ref object of State State2 = ref object of State State3 = ref object of State - State4 = ref object of State -var runs, cancellations, errors = [0, 0, 0, 0] +var runs, cancellations = [0, 0, 0, 0] method `$`(state: State1): string = "State1" @@ -23,28 +22,20 @@ method `$`(state: State2): string = method `$`(state: State3): string = "State3" -method `$`(state: State4): string = - "State4" - -method run(state: State1, machine: Machine): Future[?State] {.async.} = +method run(state: State1, machine: Machine): Future[?State] {.async: (raises: []).} = inc runs[0] return some State(State2.new()) -method run(state: State2, machine: Machine): Future[?State] {.async.} = +method run(state: State2, machine: Machine): Future[?State] {.async: (raises: []).} = inc runs[1] try: await sleepAsync(1.hours) except CancelledError: inc cancellations[1] - raise -method run(state: State3, machine: Machine): Future[?State] {.async.} = +method run(state: State3, machine: Machine): Future[?State] {.async: (raises: []).} = inc runs[2] -method run(state: State4, machine: Machine): Future[?State] {.async.} = - inc runs[3] - raise newException(ValueError, "failed") - method onMoveToNextStateEvent*(state: State): ?State {.base, upraises: [].} = discard @@ -54,19 +45,6 @@ method onMoveToNextStateEvent(state: State2): ?State = method onMoveToNextStateEvent(state: State3): ?State = some State(State1.new()) -method onError(state: State1, error: ref CatchableError): ?State = - inc errors[0] - -method onError(state: State2, error: ref CatchableError): ?State = - inc errors[1] - -method onError(state: State3, error: ref CatchableError): ?State = - inc errors[2] - -method onError(state: State4, error: ref CatchableError): ?State = - inc errors[3] - some State(State2.new()) - asyncchecksuite "async state machines": var machine: Machine @@ -76,7 +54,6 @@ asyncchecksuite "async state machines": setup: runs = [0, 0, 0, 0] cancellations = [0, 0, 0, 0] - errors = [0, 0, 0, 0] machine = Machine.new() test "should call run on start state": @@ -112,16 +89,6 @@ asyncchecksuite "async state machines": check runs == [0, 1, 0, 0] check cancellations == [0, 1, 0, 0] - test "forwards errors to error handler": - machine.start(State4.new()) - check eventually errors == [0, 0, 0, 1] and runs == [0, 1, 0, 1] - - test "error handler ignores CancelledError": - machine.start(State2.new()) - machine.schedule(moveToNextStateEvent) - check eventually cancellations == [0, 1, 0, 0] - check errors == [0, 0, 0, 0] - test "queries properties of the current state": proc description(state: State): string = $state diff --git a/tests/codex/utils/testiter.nim b/tests/codex/utils/testiter.nim index 801e1937..ec19c484 100644 --- a/tests/codex/utils/testiter.nim +++ b/tests/codex/utils/testiter.nim @@ -7,7 +7,7 @@ import pkg/codex/utils/iter import ../../asynctest import ../helpers -checksuite "Test Iter": +suite "Test Iter": test "Should be finished": let iter = Iter[int].empty() diff --git a/tests/codex/utils/testkeyutils.nim b/tests/codex/utils/testkeyutils.nim index 2124e682..104258f3 100644 --- a/tests/codex/utils/testkeyutils.nim +++ b/tests/codex/utils/testkeyutils.nim @@ -1,12 +1,14 @@ -import std/unittest import std/os -import codex/utils/keyutils + +import pkg/unittest2 +import pkg/codex/utils/keyutils + import ../helpers when defined(windows): import stew/windows/acl -checksuite "keyutils": +suite "keyutils": let path = getTempDir() / "CodexTest" setup: diff --git a/tests/codex/utils/testoptions.nim b/tests/codex/utils/testoptions.nim index 05f7509e..650715bc 100644 --- a/tests/codex/utils/testoptions.nim +++ b/tests/codex/utils/testoptions.nim @@ -1,8 +1,9 @@ -import std/unittest -import codex/utils/options +import pkg/unittest2 +import pkg/codex/utils/options + import ../helpers -checksuite "optional casts": +suite "optional casts": test "casting value to same type works": check 42 as int == some 42 @@ -31,7 +32,7 @@ checksuite "optional casts": check 42.some as string == string.none check int.none as int == int.none -checksuite "Optionalize": +suite "Optionalize": test "does not except non-object types": static: doAssert not compiles(Optionalize(int)) diff --git a/tests/codex/utils/testtimer.nim b/tests/codex/utils/testtimer.nim index 303c43fb..2f356df9 100644 --- a/tests/codex/utils/testtimer.nim +++ b/tests/codex/utils/testtimer.nim @@ -52,21 +52,21 @@ asyncchecksuite "Timer": test "Start timer1 should execute callback": startNumbersTimer() - check eventually output == "0" + check eventually(output == "0", pollInterval = 10) test "Start timer1 should execute callback multiple times": startNumbersTimer() - check eventually output == "012" + check eventually(output == "012", pollInterval = 10) test "Starting timer1 multiple times has no impact": startNumbersTimer() startNumbersTimer() startNumbersTimer() - check eventually output == "01234" + check eventually(output == "01234", pollInterval = 10) test "Stop timer1 should stop execution of the callback": startNumbersTimer() - check eventually output == "012" + check eventually(output == "012", pollInterval = 10) await timer1.stop() await sleepAsync(30.milliseconds) let stoppedOutput = output @@ -81,4 +81,4 @@ asyncchecksuite "Timer": test "Starting both timers should execute callbacks sequentially": startNumbersTimer() startLettersTimer() - check eventually output == "0a1b2c3d4e" + check eventually(output == "0a1b2c3d4e", pollInterval = 10) diff --git a/tests/codex/utils/testtrackedfutures.nim b/tests/codex/utils/testtrackedfutures.nim index 35074919..993d5b43 100644 --- a/tests/codex/utils/testtrackedfutures.nim +++ b/tests/codex/utils/testtrackedfutures.nim @@ -17,47 +17,71 @@ asyncchecksuite "tracked futures": check module.trackedFutures.len == 0 test "tracks unfinished futures": - let fut = newFuture[void]("test") + let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule}) module.trackedFutures.track(fut) check module.trackedFutures.len == 1 test "does not track completed futures": - let fut = newFuture[void]("test") + let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule}) fut.complete() module.trackedFutures.track(fut) - check eventually module.trackedFutures.len == 0 - - test "does not track failed futures": - let fut = newFuture[void]("test") - fut.fail((ref CatchableError)(msg: "some error")) - module.trackedFutures.track(fut) - check eventually module.trackedFutures.len == 0 + check module.trackedFutures.len == 0 test "does not track cancelled futures": - let fut = newFuture[void]("test") + let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule}) + fut.cancelCallback = proc(data: pointer) = + fut.cancelAndSchedule() # manually schedule the cancel + await fut.cancelAndWait() module.trackedFutures.track(fut) check eventually module.trackedFutures.len == 0 test "removes tracked future when finished": - let fut = newFuture[void]("test") + let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule}) module.trackedFutures.track(fut) + check module.trackedFutures.len == 1 fut.complete() check eventually module.trackedFutures.len == 0 test "removes tracked future when cancelled": - let fut = newFuture[void]("test") + let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule}) + fut.cancelCallback = proc(data: pointer) = + fut.cancelAndSchedule() # manually schedule the cancel + module.trackedFutures.track(fut) + check module.trackedFutures.len == 1 + await fut.cancelAndWait() + check eventually module.trackedFutures.len == 0 + + test "completed and removes future on cancel": + let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule}) + fut.cancelCallback = proc(data: pointer) = + fut.complete() + + module.trackedFutures.track(fut) + check module.trackedFutures.len == 1 await fut.cancelAndWait() check eventually module.trackedFutures.len == 0 test "cancels and removes all tracked futures": - let fut1 = newFuture[void]("test1") - let fut2 = newFuture[void]("test2") - let fut3 = newFuture[void]("test3") + let fut1 = Future[void].Raising([]).init("test1", {FutureFlag.OwnCancelSchedule}) + fut1.cancelCallback = proc(data: pointer) = + fut1.cancelAndSchedule() # manually schedule the cancel + + let fut2 = Future[void].Raising([]).init("test2", {FutureFlag.OwnCancelSchedule}) + fut2.cancelCallback = proc(data: pointer) = + fut2.cancelAndSchedule() # manually schedule the cancel + + let fut3 = Future[void].Raising([]).init("test3", {FutureFlag.OwnCancelSchedule}) + fut3.cancelCallback = proc(data: pointer) = + fut3.cancelAndSchedule() # manually schedule the cancel + module.trackedFutures.track(fut1) + check module.trackedFutures.len == 1 module.trackedFutures.track(fut2) + check module.trackedFutures.len == 2 module.trackedFutures.track(fut3) + check module.trackedFutures.len == 3 await module.trackedFutures.cancelTracked() check eventually fut1.cancelled check eventually fut2.cancelled diff --git a/tests/codex/utils/testutils.nim b/tests/codex/utils/testutils.nim index 92c883be..b0bb20b5 100644 --- a/tests/codex/utils/testutils.nim +++ b/tests/codex/utils/testutils.nim @@ -1,4 +1,4 @@ -import std/unittest +import pkg/unittest2 import pkg/codex/utils diff --git a/tests/contracts/helpers/mockprovider.nim b/tests/contracts/helpers/mockprovider.nim index 09e65398..c5be8ad7 100644 --- a/tests/contracts/helpers/mockprovider.nim +++ b/tests/contracts/helpers/mockprovider.nim @@ -13,7 +13,7 @@ type MockProvider* = ref object of Provider method getBlock*( provider: MockProvider, tag: BlockTag -): Future[?Block] {.async: (raises: [ProviderError]).} = +): Future[?Block] {.async: (raises: [ProviderError, CancelledError]).} = try: if tag == BlockTag.latest: if latestBlock =? provider.latest: diff --git a/tests/contracts/testContracts.nim b/tests/contracts/testContracts.nim index 3af63ac1..84708ecd 100644 --- a/tests/contracts/testContracts.nim +++ b/tests/contracts/testContracts.nim @@ -49,28 +49,29 @@ ethersuite "Marketplace contracts": switchAccount(host) discard await token.approve(marketplace.address, request.ask.collateralPerSlot).confirm(1) - discard await marketplace.reserveSlot(request.id, 0.u256).confirm(1) - let receipt = await marketplace.fillSlot(request.id, 0.u256, proof).confirm(1) + discard await marketplace.reserveSlot(request.id, 0.uint64).confirm(1) + let receipt = await marketplace.fillSlot(request.id, 0.uint64, proof).confirm(1) filledAt = await ethProvider.blockTime(BlockTag.init(!receipt.blockNumber)) - slotId = request.slotId(0.u256) + slotId = request.slotId(0.uint64) proc waitUntilProofRequired(slotId: SlotId) {.async.} = - let currentPeriod = periodicity.periodOf(await ethProvider.currentTime()) - await ethProvider.advanceTimeTo(periodicity.periodEnd(currentPeriod)) + let currentPeriod = + periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) + await ethProvider.advanceTimeTo(periodicity.periodEnd(currentPeriod).u256) while not ( (await marketplace.isProofRequired(slotId)) and (await marketplace.getPointer(slotId)) < 250 ) : - await ethProvider.advanceTime(periodicity.seconds) + await ethProvider.advanceTime(periodicity.seconds.u256) proc startContract() {.async.} = for slotIndex in 1 ..< request.ask.slots: discard await token .approve(marketplace.address, request.ask.collateralPerSlot) .confirm(1) - discard await marketplace.reserveSlot(request.id, slotIndex.u256).confirm(1) - discard await marketplace.fillSlot(request.id, slotIndex.u256, proof).confirm(1) + discard await marketplace.reserveSlot(request.id, slotIndex.uint64).confirm(1) + discard await marketplace.fillSlot(request.id, slotIndex.uint64, proof).confirm(1) test "accept marketplace proofs": switchAccount(host) @@ -80,9 +81,10 @@ ethersuite "Marketplace contracts": test "can mark missing proofs": switchAccount(host) await waitUntilProofRequired(slotId) - let missingPeriod = periodicity.periodOf(await ethProvider.currentTime()) + let missingPeriod = + periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) let endOfPeriod = periodicity.periodEnd(missingPeriod) - await ethProvider.advanceTimeTo(endOfPeriod + 1) + await ethProvider.advanceTimeTo(endOfPeriod.u256 + 1) switchAccount(client) discard await marketplace.markProofAsMissing(slotId, missingPeriod).confirm(1) @@ -123,7 +125,8 @@ ethersuite "Marketplace contracts": let expiry = await marketplace.requestExpiry(request.id) await ethProvider.advanceTimeTo((expiry + 1).u256) switchAccount(client) - let missingPeriod = periodicity.periodOf(await ethProvider.currentTime()) - await ethProvider.advanceTime(periodicity.seconds) + let missingPeriod = + periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) + await ethProvider.advanceTime(periodicity.seconds.u256) expect Marketplace_SlotNotAcceptingProofs: discard await marketplace.markProofAsMissing(slotId, missingPeriod).confirm(1) diff --git a/tests/contracts/testDeployment.nim b/tests/contracts/testDeployment.nim index a439e42a..86a5fe00 100644 --- a/tests/contracts/testDeployment.nim +++ b/tests/contracts/testDeployment.nim @@ -12,7 +12,7 @@ type MockProvider = ref object of Provider method getChainId*( provider: MockProvider -): Future[UInt256] {.async: (raises: [ProviderError]).} = +): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} = return provider.chainId proc configFactory(): CodexConf = diff --git a/tests/contracts/testMarket.nim b/tests/contracts/testMarket.nim index a77c2aaa..068a4d2e 100644 --- a/tests/contracts/testMarket.nim +++ b/tests/contracts/testMarket.nim @@ -3,6 +3,8 @@ import std/importutils import pkg/chronos import pkg/ethers/erc20 import codex/contracts +import pkg/libp2p/cid +import pkg/lrucache import ../ethertest import ./examples import ./time @@ -23,7 +25,7 @@ ethersuite "On-Chain Market": var marketplace: Marketplace var token: Erc20Token var request: StorageRequest - var slotIndex: UInt256 + var slotIndex: uint64 var periodicity: Periodicity var host: Signer var otherHost: Signer @@ -56,11 +58,12 @@ ethersuite "On-Chain Market": host = ethProvider.getSigner(accounts[1]) otherHost = ethProvider.getSigner(accounts[3]) - slotIndex = (request.ask.slots div 2).u256 + slotIndex = request.ask.slots div 2 proc advanceToNextPeriod() {.async.} = - let currentPeriod = periodicity.periodOf(await ethProvider.currentTime()) - await ethProvider.advanceTimeTo(periodicity.periodEnd(currentPeriod) + 1) + let currentPeriod = + periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) + await ethProvider.advanceTimeTo((periodicity.periodEnd(currentPeriod) + 1).u256) proc advanceToCancelledRequest(request: StorageRequest) {.async.} = let expiry = (await market.requestExpiresAt(request.id)) + 1 @@ -123,12 +126,13 @@ ethersuite "On-Chain Market": test "supports request subscriptions": var receivedIds: seq[RequestId] var receivedAsks: seq[StorageAsk] - proc onRequest(id: RequestId, ask: StorageAsk, expiry: UInt256) = + proc onRequest(id: RequestId, ask: StorageAsk, expiry: uint64) = receivedIds.add(id) receivedAsks.add(ask) let subscription = await market.subscribeRequests(onRequest) await market.requestStorage(request) + check eventually receivedIds == @[request.id] and receivedAsks == @[request.ask] await subscription.unsubscribe() @@ -170,7 +174,8 @@ ethersuite "On-Chain Market": await market.reserveSlot(request.id, slotIndex) await market.fillSlot(request.id, slotIndex, proof, request.ask.collateralPerSlot) await waitUntilProofRequired(slotId) - let missingPeriod = periodicity.periodOf(await ethProvider.currentTime()) + let missingPeriod = + periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) await advanceToNextPeriod() await market.markProofAsMissing(slotId, missingPeriod) check (await marketplace.missingProofs(slotId)) == 1 @@ -181,15 +186,16 @@ ethersuite "On-Chain Market": await market.reserveSlot(request.id, slotIndex) await market.fillSlot(request.id, slotIndex, proof, request.ask.collateralPerSlot) await waitUntilProofRequired(slotId) - let missingPeriod = periodicity.periodOf(await ethProvider.currentTime()) + let missingPeriod = + periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) await advanceToNextPeriod() check (await market.canProofBeMarkedAsMissing(slotId, missingPeriod)) == true test "supports slot filled subscriptions": await market.requestStorage(request) var receivedIds: seq[RequestId] - var receivedSlotIndices: seq[UInt256] - proc onSlotFilled(id: RequestId, slotIndex: UInt256) = + var receivedSlotIndices: seq[uint64] + proc onSlotFilled(id: RequestId, slotIndex: uint64) = receivedIds.add(id) receivedSlotIndices.add(slotIndex) @@ -204,8 +210,8 @@ ethersuite "On-Chain Market": test "subscribes only to a certain slot": var otherSlot = slotIndex - 1 await market.requestStorage(request) - var receivedSlotIndices: seq[UInt256] - proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) = + var receivedSlotIndices: seq[uint64] + proc onSlotFilled(requestId: RequestId, slotIndex: uint64) = receivedSlotIndices.add(slotIndex) let subscription = @@ -222,8 +228,8 @@ ethersuite "On-Chain Market": await market.reserveSlot(request.id, slotIndex) await market.fillSlot(request.id, slotIndex, proof, request.ask.collateralPerSlot) var receivedRequestIds: seq[RequestId] = @[] - var receivedIdxs: seq[UInt256] = @[] - proc onSlotFreed(requestId: RequestId, idx: UInt256) = + var receivedIdxs: seq[uint64] = @[] + proc onSlotFreed(requestId: RequestId, idx: uint64) = receivedRequestIds.add(requestId) receivedIdxs.add(idx) @@ -241,8 +247,8 @@ ethersuite "On-Chain Market": await market.requestStorage(request) var receivedRequestIds: seq[RequestId] = @[] - var receivedIdxs: seq[UInt256] = @[] - proc onSlotReservationsFull(requestId: RequestId, idx: UInt256) = + var receivedIdxs: seq[uint64] = @[] + proc onSlotReservationsFull(requestId: RequestId, idx: uint64) = receivedRequestIds.add(requestId) receivedIdxs.add(idx) @@ -268,9 +274,9 @@ ethersuite "On-Chain Market": let subscription = await market.subscribeFulfillment(request.id, onFulfillment) for slotIndex in 0 ..< request.ask.slots: - await market.reserveSlot(request.id, slotIndex.u256) + await market.reserveSlot(request.id, slotIndex.uint64) await market.fillSlot( - request.id, slotIndex.u256, proof, request.ask.collateralPerSlot + request.id, slotIndex.uint64, proof, request.ask.collateralPerSlot ) check eventually receivedIds == @[request.id] await subscription.unsubscribe() @@ -289,14 +295,14 @@ ethersuite "On-Chain Market": let subscription = await market.subscribeFulfillment(request.id, onFulfillment) for slotIndex in 0 ..< request.ask.slots: - await market.reserveSlot(request.id, slotIndex.u256) + await market.reserveSlot(request.id, slotIndex.uint64) await market.fillSlot( - request.id, slotIndex.u256, proof, request.ask.collateralPerSlot + request.id, slotIndex.uint64, proof, request.ask.collateralPerSlot ) for slotIndex in 0 ..< otherRequest.ask.slots: - await market.reserveSlot(otherRequest.id, slotIndex.u256) + await market.reserveSlot(otherRequest.id, slotIndex.uint64) await market.fillSlot( - otherRequest.id, slotIndex.u256, proof, otherRequest.ask.collateralPerSlot + otherRequest.id, slotIndex.uint64, proof, otherRequest.ask.collateralPerSlot ) check eventually receivedIds == @[request.id] @@ -328,18 +334,19 @@ ethersuite "On-Chain Market": let subscription = await market.subscribeRequestFailed(request.id, onRequestFailed) for slotIndex in 0 ..< request.ask.slots: - await market.reserveSlot(request.id, slotIndex.u256) + await market.reserveSlot(request.id, slotIndex.uint64) await market.fillSlot( - request.id, slotIndex.u256, proof, request.ask.collateralPerSlot + request.id, slotIndex.uint64, proof, request.ask.collateralPerSlot ) for slotIndex in 0 .. request.ask.maxSlotLoss: - let slotId = request.slotId(slotIndex.u256) + let slotId = request.slotId(slotIndex.uint64) while true: let slotState = await marketplace.slotState(slotId) if slotState == SlotState.Repair or slotState == SlotState.Failed: break await waitUntilProofRequired(slotId) - let missingPeriod = periodicity.periodOf(await ethProvider.currentTime()) + let missingPeriod = + periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) await advanceToNextPeriod() discard await marketplace.markProofAsMissing(slotId, missingPeriod).confirm(1) check eventually receivedIds == @[request.id] @@ -393,9 +400,9 @@ ethersuite "On-Chain Market": test "can retrieve request state": await market.requestStorage(request) for slotIndex in 0 ..< request.ask.slots: - await market.reserveSlot(request.id, slotIndex.u256) + await market.reserveSlot(request.id, slotIndex.uint64) await market.fillSlot( - request.id, slotIndex.u256, proof, request.ask.collateralPerSlot + request.id, slotIndex.uint64, proof, request.ask.collateralPerSlot ) check (await market.requestState(request.id)) == some RequestState.Started @@ -458,13 +465,12 @@ ethersuite "On-Chain Market": test "can query past SlotFilled events": await market.requestStorage(request) - await market.reserveSlot(request.id, 0.u256) - await market.reserveSlot(request.id, 1.u256) - await market.reserveSlot(request.id, 2.u256) - await market.fillSlot(request.id, 0.u256, proof, request.ask.collateralPerSlot) - await market.fillSlot(request.id, 1.u256, proof, request.ask.collateralPerSlot) - await market.fillSlot(request.id, 2.u256, proof, request.ask.collateralPerSlot) - let slotId = request.slotId(slotIndex) + await market.reserveSlot(request.id, 0.uint64) + await market.reserveSlot(request.id, 1.uint64) + await market.reserveSlot(request.id, 2.uint64) + await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot) + await market.fillSlot(request.id, 1.uint64, proof, request.ask.collateralPerSlot) + await market.fillSlot(request.id, 2.uint64, proof, request.ask.collateralPerSlot) # `market.fill` executes an `approve` tx before the `fillSlot` tx, so that's # two PoA blocks per `fillSlot` call (6 blocks for 3 calls). We don't need @@ -473,15 +479,15 @@ ethersuite "On-Chain Market": let events = await market.queryPastSlotFilledEvents(blocksAgo = 5) check events == @[ - SlotFilled(requestId: request.id, slotIndex: 0.u256), - SlotFilled(requestId: request.id, slotIndex: 1.u256), - SlotFilled(requestId: request.id, slotIndex: 2.u256), + SlotFilled(requestId: request.id, slotIndex: 0), + SlotFilled(requestId: request.id, slotIndex: 1), + SlotFilled(requestId: request.id, slotIndex: 2), ] test "can query past SlotFilled events since given timestamp": await market.requestStorage(request) - await market.reserveSlot(request.id, 0.u256) - await market.fillSlot(request.id, 0.u256, proof, request.ask.collateralPerSlot) + await market.reserveSlot(request.id, 0.uint64) + await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot) # The SlotFilled event will be included in the same block as # the fillSlot transaction. If we want to ignore the SlotFilled event @@ -492,10 +498,10 @@ ethersuite "On-Chain Market": let (_, fromTime) = await ethProvider.blockNumberAndTimestamp(BlockTag.latest) - await market.reserveSlot(request.id, 1.u256) - await market.reserveSlot(request.id, 2.u256) - await market.fillSlot(request.id, 1.u256, proof, request.ask.collateralPerSlot) - await market.fillSlot(request.id, 2.u256, proof, request.ask.collateralPerSlot) + await market.reserveSlot(request.id, 1.uint64) + await market.reserveSlot(request.id, 2.uint64) + await market.fillSlot(request.id, 1.uint64, proof, request.ask.collateralPerSlot) + await market.fillSlot(request.id, 2.uint64, proof, request.ask.collateralPerSlot) let events = await market.queryPastSlotFilledEvents( fromTime = fromTime.truncate(SecondsSince1970) @@ -503,19 +509,19 @@ ethersuite "On-Chain Market": check events == @[ - SlotFilled(requestId: request.id, slotIndex: 1.u256), - SlotFilled(requestId: request.id, slotIndex: 2.u256), + SlotFilled(requestId: request.id, slotIndex: 1), + SlotFilled(requestId: request.id, slotIndex: 2), ] test "queryPastSlotFilledEvents returns empty sequence of events when " & "no SlotFilled events have occurred since given timestamp": await market.requestStorage(request) - await market.reserveSlot(request.id, 0.u256) - await market.reserveSlot(request.id, 1.u256) - await market.reserveSlot(request.id, 2.u256) - await market.fillSlot(request.id, 0.u256, proof, request.ask.collateralPerSlot) - await market.fillSlot(request.id, 1.u256, proof, request.ask.collateralPerSlot) - await market.fillSlot(request.id, 2.u256, proof, request.ask.collateralPerSlot) + await market.reserveSlot(request.id, 0.uint64) + await market.reserveSlot(request.id, 1.uint64) + await market.reserveSlot(request.id, 2.uint64) + await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot) + await market.fillSlot(request.id, 1.uint64, proof, request.ask.collateralPerSlot) + await market.fillSlot(request.id, 2.uint64, proof, request.ask.collateralPerSlot) await ethProvider.advanceTime(10.u256) @@ -540,21 +546,21 @@ ethersuite "On-Chain Market": let address = await host.getAddress() switchAccount(host) - await market.reserveSlot(request.id, 0.u256) - await market.fillSlot(request.id, 0.u256, proof, request.ask.collateralPerSlot) + await market.reserveSlot(request.id, 0.uint64) + await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot) let filledAt = (await ethProvider.currentTime()) - 1.u256 for slotIndex in 1 ..< request.ask.slots: - await market.reserveSlot(request.id, slotIndex.u256) + await market.reserveSlot(request.id, slotIndex.uint64) await market.fillSlot( - request.id, slotIndex.u256, proof, request.ask.collateralPerSlot + request.id, slotIndex.uint64, proof, request.ask.collateralPerSlot ) let requestEnd = await market.getRequestEnd(request.id) await ethProvider.advanceTimeTo(requestEnd.u256 + 1) let startBalance = await token.balanceOf(address) - await market.freeSlot(request.slotId(0.u256)) + await market.freeSlot(request.slotId(0.uint64)) let endBalance = await token.balanceOf(address) let expectedPayout = request.expectedPayout(filledAt, requestEnd.u256) @@ -567,14 +573,14 @@ ethersuite "On-Chain Market": await market.requestStorage(request) switchAccount(host) - await market.reserveSlot(request.id, 0.u256) - await market.fillSlot(request.id, 0.u256, proof, request.ask.collateralPerSlot) + await market.reserveSlot(request.id, 0.uint64) + await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot) let filledAt = (await ethProvider.currentTime()) - 1.u256 for slotIndex in 1 ..< request.ask.slots: - await market.reserveSlot(request.id, slotIndex.u256) + await market.reserveSlot(request.id, slotIndex.uint64) await market.fillSlot( - request.id, slotIndex.u256, proof, request.ask.collateralPerSlot + request.id, slotIndex.uint64, proof, request.ask.collateralPerSlot ) let requestEnd = await market.getRequestEnd(request.id) @@ -583,7 +589,7 @@ ethersuite "On-Chain Market": let startBalanceHost = await token.balanceOf(hostAddress) let startBalanceReward = await token.balanceOf(hostRewardRecipient) - await market.freeSlot(request.slotId(0.u256)) + await market.freeSlot(request.slotId(0.uint64)) let endBalanceHost = await token.balanceOf(hostAddress) let endBalanceReward = await token.balanceOf(hostRewardRecipient) @@ -591,3 +597,44 @@ ethersuite "On-Chain Market": let expectedPayout = request.expectedPayout(filledAt, requestEnd.u256) check endBalanceHost == (startBalanceHost + request.ask.collateralPerSlot) check endBalanceReward == (startBalanceReward + expectedPayout) + + test "returns the collateral when the slot is not being repaired": + await market.requestStorage(request) + await market.reserveSlot(request.id, 0.uint64) + await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot) + + let slotId = request.slotId(0.uint64) + without collateral =? await market.slotCollateral(request.id, 0.uint64), error: + fail() + + check collateral == request.ask.collateralPerSlot + + test "calculates correctly the collateral when the slot is being repaired": + # Ensure that the config is loaded and repairRewardPercentage is available + discard await market.repairRewardPercentage() + + await market.requestStorage(request) + await market.reserveSlot(request.id, 0.uint64) + await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot) + await market.freeSlot(slotId(request.id, 0.uint64)) + + let slotId = request.slotId(0.uint64) + + without collateral =? await market.slotCollateral(request.id, 0.uint64), error: + fail() + + # slotCollateral + # repairRewardPercentage = 10 + # expected collateral = slotCollateral - slotCollateral * 0.1 + check collateral == + request.ask.collateralPerSlot - (request.ask.collateralPerSlot * 10).div(100.u256) + + test "the request is added in cache after the fist access": + await market.requestStorage(request) + + check market.requestCache.contains($request.id) == false + discard await market.getRequest(request.id) + + check market.requestCache.contains($request.id) == true + let cacheValue = market.requestCache[$request.id] + check cacheValue == request diff --git a/tests/examples.nim b/tests/examples.nim index c96fefd6..9ef4e292 100644 --- a/tests/examples.nim +++ b/tests/examples.nim @@ -49,30 +49,32 @@ proc example*(_: type StorageRequest): StorageRequest = client: Address.example, ask: StorageAsk( slots: 4, - slotSize: (1 * 1024 * 1024 * 1024).u256, # 1 Gigabyte - duration: (10 * 60 * 60).u256, # 10 hours + slotSize: (1 * 1024 * 1024 * 1024).uint64, # 1 Gigabyte + duration: (10 * 60 * 60).uint64, # 10 hours collateralPerByte: 1.u256, proofProbability: 4.u256, # require a proof roughly once every 4 periods pricePerBytePerSecond: 1.u256, maxSlotLoss: 2, # 2 slots can be freed without data considered to be lost ), content: StorageContent( - cid: "zb2rhheVmk3bLks5MgzTqyznLu1zqGH5jrfTA1eAZXrjx7Vob", + cid: Cid.init("zb2rhheVmk3bLks5MgzTqyznLu1zqGH5jrfTA1eAZXrjx7Vob").tryGet, merkleRoot: array[32, byte].example, ), - expiry: (60 * 60).u256, # 1 hour , + expiry: (60 * 60).uint64, # 1 hour , nonce: Nonce.example, ) proc example*(_: type Slot): Slot = let request = StorageRequest.example - let slotIndex = rand(request.ask.slots.int).u256 + let slotIndex = rand(request.ask.slots.int).uint64 Slot(request: request, slotIndex: slotIndex) proc example*(_: type SlotQueueItem): SlotQueueItem = let request = StorageRequest.example let slot = Slot.example - SlotQueueItem.init(request, slot.slotIndex.truncate(uint16)) + SlotQueueItem.init( + request, slot.slotIndex.uint16, collateral = request.ask.collateralPerSlot + ) proc example(_: type G1Point): G1Point = G1Point(x: UInt256.example, y: UInt256.example) diff --git a/tests/helpers.nim b/tests/helpers.nim index a6a6ff44..b48b787e 100644 --- a/tests/helpers.nim +++ b/tests/helpers.nim @@ -1,5 +1,37 @@ import helpers/multisetup import helpers/trackers import helpers/templeveldb +import std/times +import std/sequtils, chronos export multisetup, trackers, templeveldb + +### taken from libp2p errorhelpers.nim +proc allFuturesThrowing*(args: varargs[FutureBase]): Future[void] = + # This proc is only meant for use in tests / not suitable for general use. + # - Swallowing errors arbitrarily instead of aggregating them is bad design + # - It raises `CatchableError` instead of the union of the `futs` errors, + # inflating the caller's `raises` list unnecessarily. `macro` could fix it + let futs = @args + ( + proc() {.async: (raises: [CatchableError]).} = + await allFutures(futs) + var firstErr: ref CatchableError + for fut in futs: + if fut.failed: + let err = fut.error() + if err of CancelledError: + raise err + if firstErr == nil: + firstErr = err + if firstErr != nil: + raise firstErr + )() + +proc allFuturesThrowing*[T](futs: varargs[Future[T]]): Future[void] = + allFuturesThrowing(futs.mapIt(FutureBase(it))) + +proc allFuturesThrowing*[T, E]( # https://github.com/nim-lang/Nim/issues/23432 + futs: varargs[InternalRaisesFuture[T, E]] +): Future[void] = + allFuturesThrowing(futs.mapIt(FutureBase(it))) diff --git a/tests/helpers/trackers.nim b/tests/helpers/trackers.nim index ed8c5692..898053c2 100644 --- a/tests/helpers/trackers.nim +++ b/tests/helpers/trackers.nim @@ -1,5 +1,5 @@ import pkg/codex/streams/storestream -import std/unittest +import pkg/unittest2 # From lip2p/tests/helpers const trackerNames = [StoreStreamTrackerName] diff --git a/tests/integration/codexclient.nim b/tests/integration/codexclient.nim index d1191fb9..d7ed3df2 100644 --- a/tests/integration/codexclient.nim +++ b/tests/integration/codexclient.nim @@ -4,117 +4,230 @@ import std/strutils from pkg/libp2p import Cid, `$`, init import pkg/stint import pkg/questionable/results -import pkg/chronos/apps/http/[httpserver, shttpserver, httpclient] +import pkg/chronos/apps/http/[httpserver, shttpserver, httpclient, httptable] import pkg/codex/logutils import pkg/codex/rest/json import pkg/codex/purchasing import pkg/codex/errors import pkg/codex/sales/reservations -export purchasing +export purchasing, httptable, httpclient type CodexClient* = ref object - http: HttpClient baseurl: string session: HttpSessionRef -type CodexClientError* = object of CatchableError - -const HttpClientTimeoutMs = 60 * 1000 - proc new*(_: type CodexClient, baseurl: string): CodexClient = - CodexClient( - http: newHttpClient(timeout = HttpClientTimeoutMs), - baseurl: baseurl, - session: HttpSessionRef.new({HttpClientFlag.Http11Pipeline}), - ) + CodexClient(session: HttpSessionRef.new(), baseurl: baseurl) -proc info*(client: CodexClient): ?!JsonNode = - let url = client.baseurl & "/debug/info" - JsonNode.parse(client.http.getContent(url)) +proc close*(self: CodexClient): Future[void] {.async: (raises: []).} = + await self.session.closeWait() -proc setLogLevel*(client: CodexClient, level: string) = - let url = client.baseurl & "/debug/chronicles/loglevel?level=" & level - let headers = newHttpHeaders({"Content-Type": "text/plain"}) - let response = client.http.request(url, httpMethod = HttpPost, headers = headers) - assert response.status == "200 OK" +proc request( + self: CodexClient, + httpMethod: httputils.HttpMethod, + url: string, + body: openArray[char] = [], + headers: openArray[HttpHeaderTuple] = [], +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + HttpClientRequestRef + .new( + self.session, + url, + httpMethod, + version = HttpVersion11, + flags = {}, + maxResponseHeadersSize = HttpMaxHeadersSize, + headers = headers, + body = body.toOpenArrayByte(0, len(body) - 1), + ).get + .send() -proc upload*(client: CodexClient, contents: string): ?!Cid = - let response = client.http.post(client.baseurl & "/data", contents) - assert response.status == "200 OK" - Cid.init(response.body).mapFailure +proc post*( + self: CodexClient, + url: string, + body: string = "", + headers: seq[HttpHeaderTuple] = @[], +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return self.request(MethodPost, url, headers = headers, body = body) -proc upload*(client: CodexClient, bytes: seq[byte]): ?!Cid = - client.upload(string.fromBytes(bytes)) +proc get( + self: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[] +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return self.request(MethodGet, url, headers = headers) -proc download*(client: CodexClient, cid: Cid, local = false): ?!string = - let response = client.http.get( - client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream") - ) +proc delete( + self: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[] +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return self.request(MethodDelete, url, headers = headers) - if response.status != "200 OK": - return failure(response.status) +proc patch*( + self: CodexClient, + url: string, + body: string = "", + headers: seq[HttpHeaderTuple] = @[], +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return self.request(MethodPatch, url, headers = headers, body = body) - success response.body +proc body*( + response: HttpClientResponseRef +): Future[string] {.async: (raises: [CancelledError, HttpError]).} = + return bytesToString (await response.getBodyBytes()) -proc downloadManifestOnly*(client: CodexClient, cid: Cid): ?!string = - let response = client.http.get(client.baseurl & "/data/" & $cid & "/network/manifest") +proc getContent( + client: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[] +): Future[string] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.get(url, headers) + return await response.body - if response.status != "200 OK": - return failure(response.status) +proc info*( + client: CodexClient +): Future[?!JsonNode] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.get(client.baseurl & "/debug/info") + return JsonNode.parse(await response.body) - success response.body +proc setLogLevel*( + client: CodexClient, level: string +): Future[void] {.async: (raises: [CancelledError, HttpError]).} = + let + url = client.baseurl & "/debug/chronicles/loglevel?level=" & level + headers = @[("Content-Type", "text/plain")] + response = await client.post(url, headers = headers, body = "") + assert response.status == 200 -proc downloadNoStream*(client: CodexClient, cid: Cid): ?!string = - let response = client.http.post(client.baseurl & "/data/" & $cid & "/network") +proc uploadRaw*( + client: CodexClient, contents: string, headers: seq[HttpHeaderTuple] = @[] +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return client.post(client.baseurl & "/data", body = contents, headers = headers) - if response.status != "200 OK": - return failure(response.status) +proc upload*( + client: CodexClient, contents: string +): Future[?!Cid] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.uploadRaw(contents) + assert response.status == 200 + Cid.init(await response.body).mapFailure - success response.body +proc upload*( + client: CodexClient, bytes: seq[byte] +): Future[?!Cid] {.async: (raw: true).} = + return client.upload(string.fromBytes(bytes)) + +proc downloadRaw*( + client: CodexClient, cid: string, local = false +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return + client.get(client.baseurl & "/data/" & cid & (if local: "" else: "/network/stream")) proc downloadBytes*( client: CodexClient, cid: Cid, local = false -): Future[?!seq[byte]] {.async.} = - let uri = - parseUri(client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream")) +): Future[?!seq[byte]] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.downloadRaw($cid, local = local) - let (status, bytes) = await client.session.fetch(uri) + if response.status != 200: + return failure($response.status) - if status != 200: - return failure("fetch failed with status " & $status) + success await response.getBodyBytes() - success bytes +proc download*( + client: CodexClient, cid: Cid, local = false +): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} = + without response =? await client.downloadBytes(cid, local = local), err: + return failure(err) + return success bytesToString(response) -proc list*(client: CodexClient): ?!RestContentList = - let url = client.baseurl & "/data" - let response = client.http.get(url) +proc downloadNoStream*( + client: CodexClient, cid: Cid +): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.post(client.baseurl & "/data/" & $cid & "/network") - if response.status != "200 OK": - return failure(response.status) + if response.status != 200: + return failure($response.status) - RestContentList.fromJson(response.body) + success await response.body -proc space*(client: CodexClient): ?!RestRepoStore = +proc downloadManifestOnly*( + client: CodexClient, cid: Cid +): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} = + let response = + await client.get(client.baseurl & "/data/" & $cid & "/network/manifest") + + if response.status != 200: + return failure($response.status) + + success await response.body + +proc deleteRaw*( + client: CodexClient, cid: string +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return client.delete(client.baseurl & "/data/" & cid) + +proc delete*( + client: CodexClient, cid: Cid +): Future[?!void] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.deleteRaw($cid) + + if response.status != 204: + return failure($response.status) + + success() + +proc listRaw*( + client: CodexClient +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return client.get(client.baseurl & "/data") + +proc list*( + client: CodexClient +): Future[?!RestContentList] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.listRaw() + + if response.status != 200: + return failure($response.status) + + RestContentList.fromJson(await response.body) + +proc space*( + client: CodexClient +): Future[?!RestRepoStore] {.async: (raises: [CancelledError, HttpError]).} = let url = client.baseurl & "/space" - let response = client.http.get(url) + let response = await client.get(url) - if response.status != "200 OK": - return failure(response.status) + if response.status != 200: + return failure($response.status) - RestRepoStore.fromJson(response.body) + RestRepoStore.fromJson(await response.body) proc requestStorageRaw*( client: CodexClient, cid: Cid, - duration: UInt256, + duration: uint64, pricePerBytePerSecond: UInt256, proofProbability: UInt256, collateralPerByte: UInt256, - expiry: uint = 0, + expiry: uint64 = 0, nodes: uint = 3, tolerance: uint = 1, -): Response = +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = ## Call request storage REST endpoint ## let url = client.baseurl & "/storage/request/" & $cid @@ -131,54 +244,59 @@ proc requestStorageRaw*( if expiry != 0: json["expiry"] = %($expiry) - return client.http.post(url, $json) + return client.post(url, $json) proc requestStorage*( client: CodexClient, cid: Cid, - duration: UInt256, + duration: uint64, pricePerBytePerSecond: UInt256, proofProbability: UInt256, - expiry: uint, + expiry: uint64, collateralPerByte: UInt256, nodes: uint = 3, tolerance: uint = 1, -): ?!PurchaseId = +): Future[?!PurchaseId] {.async: (raises: [CancelledError, HttpError]).} = ## Call request storage REST endpoint ## - let response = client.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, - nodes, tolerance, - ) - if response.status != "200 OK": - doAssert(false, response.body) - PurchaseId.fromHex(response.body).catch + let + response = await client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, + nodes, tolerance, + ) + body = await response.body -proc getPurchase*(client: CodexClient, purchaseId: PurchaseId): ?!RestPurchase = + if response.status != 200: + doAssert(false, body) + PurchaseId.fromHex(body).catch + +proc getPurchase*( + client: CodexClient, purchaseId: PurchaseId +): Future[?!RestPurchase] {.async: (raises: [CancelledError, HttpError]).} = let url = client.baseurl & "/storage/purchases/" & purchaseId.toHex try: - let body = client.http.getContent(url) + let body = await client.getContent(url) return RestPurchase.fromJson(body) except CatchableError as e: return failure e.msg -proc getSalesAgent*(client: CodexClient, slotId: SlotId): ?!RestSalesAgent = +proc getSalesAgent*( + client: CodexClient, slotId: SlotId +): Future[?!RestSalesAgent] {.async: (raises: [CancelledError, HttpError]).} = let url = client.baseurl & "/sales/slots/" & slotId.toHex try: - let body = client.http.getContent(url) + let body = await client.getContent(url) return RestSalesAgent.fromJson(body) except CatchableError as e: return failure e.msg -proc getSlots*(client: CodexClient): ?!seq[Slot] = - let url = client.baseurl & "/sales/slots" - let body = client.http.getContent(url) - seq[Slot].fromJson(body) - -proc postAvailability*( +proc postAvailabilityRaw*( client: CodexClient, - totalSize, duration, minPricePerBytePerSecond, totalCollateral: UInt256, -): ?!Availability = + totalSize, duration: uint64, + minPricePerBytePerSecond, totalCollateral: UInt256, + enabled: ?bool = bool.none, + until: ?SecondsSince1970 = SecondsSince1970.none, +): Future[HttpClientResponseRef] {.async: (raises: [CancelledError, HttpError]).} = ## Post sales availability endpoint ## let url = client.baseurl & "/sales/availability" @@ -188,18 +306,43 @@ proc postAvailability*( "duration": duration, "minPricePerBytePerSecond": minPricePerBytePerSecond, "totalCollateral": totalCollateral, + "enabled": enabled, + "until": until, } - let response = client.http.post(url, $json) - doAssert response.status == "201 Created", - "expected 201 Created, got " & response.status & ", body: " & response.body - Availability.fromJson(response.body) + return await client.post(url, $json) + +proc postAvailability*( + client: CodexClient, + totalSize, duration: uint64, + minPricePerBytePerSecond, totalCollateral: UInt256, + enabled: ?bool = bool.none, + until: ?SecondsSince1970 = SecondsSince1970.none, +): Future[?!Availability] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.postAvailabilityRaw( + totalSize = totalSize, + duration = duration, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = totalCollateral, + enabled = enabled, + until = until, + ) + + let body = await response.body + + doAssert response.status == 201, + "expected 201 Created, got " & $response.status & ", body: " & body + Availability.fromJson(body) proc patchAvailabilityRaw*( client: CodexClient, availabilityId: AvailabilityId, - totalSize, freeSize, duration, minPricePerBytePerSecond, totalCollateral: ?UInt256 = - UInt256.none, -): Response = + totalSize, freeSize, duration: ?uint64 = uint64.none, + minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none, + enabled: ?bool = bool.none, + until: ?SecondsSince1970 = SecondsSince1970.none, +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = ## Updates availability ## let url = client.baseurl & "/sales/availability/" & $availabilityId @@ -222,65 +365,63 @@ proc patchAvailabilityRaw*( if totalCollateral =? totalCollateral: json["totalCollateral"] = %totalCollateral - client.http.patch(url, $json) + if enabled =? enabled: + json["enabled"] = %enabled + + if until =? until: + json["until"] = %until + + client.patch(url, $json) proc patchAvailability*( client: CodexClient, availabilityId: AvailabilityId, - totalSize, duration, minPricePerBytePerSecond, totalCollateral: ?UInt256 = - UInt256.none, -): void = - let response = client.patchAvailabilityRaw( + totalSize, duration: ?uint64 = uint64.none, + minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none, + enabled: ?bool = bool.none, + until: ?SecondsSince1970 = SecondsSince1970.none, +): Future[void] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.patchAvailabilityRaw( availabilityId, totalSize = totalSize, duration = duration, minPricePerBytePerSecond = minPricePerBytePerSecond, totalCollateral = totalCollateral, + enabled = enabled, + until = until, ) - doAssert response.status == "200 OK", "expected 200 OK, got " & response.status + doAssert response.status == 204, "expected No Content, got " & $response.status -proc getAvailabilities*(client: CodexClient): ?!seq[Availability] = +proc getAvailabilities*( + client: CodexClient +): Future[?!seq[Availability]] {.async: (raises: [CancelledError, HttpError]).} = ## Call sales availability REST endpoint let url = client.baseurl & "/sales/availability" - let body = client.http.getContent(url) + let body = await client.getContent(url) seq[Availability].fromJson(body) proc getAvailabilityReservations*( client: CodexClient, availabilityId: AvailabilityId -): ?!seq[Reservation] = +): Future[?!seq[Reservation]] {.async: (raises: [CancelledError, HttpError]).} = ## Retrieves Availability's Reservations let url = client.baseurl & "/sales/availability/" & $availabilityId & "/reservations" - let body = client.http.getContent(url) + let body = await client.getContent(url) seq[Reservation].fromJson(body) -proc close*(client: CodexClient) = - client.http.close() +proc purchaseStateIs*( + client: CodexClient, id: PurchaseId, state: string +): Future[bool] {.async: (raises: [CancelledError, HttpError]).} = + (await client.getPurchase(id)).option .? state == some state -proc restart*(client: CodexClient) = - client.http.close() - client.http = newHttpClient(timeout = HttpClientTimeoutMs) +proc saleStateIs*( + client: CodexClient, id: SlotId, state: string +): Future[bool] {.async: (raises: [CancelledError, HttpError]).} = + (await client.getSalesAgent(id)).option .? state == some state -proc purchaseStateIs*(client: CodexClient, id: PurchaseId, state: string): bool = - client.getPurchase(id).option .? state == some state +proc requestId*( + client: CodexClient, id: PurchaseId +): Future[?RequestId] {.async: (raises: [CancelledError, HttpError]).} = + return (await client.getPurchase(id)).option .? requestId -proc saleStateIs*(client: CodexClient, id: SlotId, state: string): bool = - client.getSalesAgent(id).option .? state == some state - -proc requestId*(client: CodexClient, id: PurchaseId): ?RequestId = - return client.getPurchase(id).option .? requestId - -proc uploadRaw*( - client: CodexClient, contents: string, headers = newHttpHeaders() -): Response = - return client.http.request( - client.baseurl & "/data", body = contents, httpMethod = HttpPost, headers = headers - ) - -proc listRaw*(client: CodexClient): Response = - return client.http.request(client.baseurl & "/data", httpMethod = HttpGet) - -proc downloadRaw*(client: CodexClient, cid: string, local = false): Response = - return client.http.request( - client.baseurl & "/data/" & cid & (if local: "" else: "/network/stream"), - httpMethod = HttpGet, - ) +proc buildUrl*(client: CodexClient, path: string): string = + return client.baseurl & path diff --git a/tests/integration/codexconfig.nim b/tests/integration/codexconfig.nim index 41d7109c..138ae274 100644 --- a/tests/integration/codexconfig.nim +++ b/tests/integration/codexconfig.nim @@ -200,6 +200,54 @@ proc withLogLevel*( config.addCliOption("--log-level", $level) return startConfig +proc withBlockTtl*( + self: CodexConfig, ttl: int +): CodexConfig {.raises: [CodexConfigError].} = + var config = self + config.addCliOption("--block-ttl", $ttl) + return config + +proc withBlockTtl*( + self: CodexConfigs, idx: int, ttl: int +): CodexConfigs {.raises: [CodexConfigError].} = + self.checkBounds idx + + var startConfig = self + startConfig.configs[idx].addCliOption("--block-ttl", $ttl) + return startConfig + +proc withBlockTtl*( + self: CodexConfigs, ttl: int +): CodexConfigs {.raises: [CodexConfigError].} = + var startConfig = self + for config in startConfig.configs.mitems: + config.addCliOption("--block-ttl", $ttl) + return startConfig + +proc withBlockMaintenanceInterval*( + self: CodexConfig, interval: int +): CodexConfig {.raises: [CodexConfigError].} = + var config = self + config.addCliOption("--block-mi", $interval) + return config + +proc withBlockMaintenanceInterval*( + self: CodexConfigs, idx: int, interval: int +): CodexConfigs {.raises: [CodexConfigError].} = + self.checkBounds idx + + var startConfig = self + startConfig.configs[idx].addCliOption("--block-mi", $interval) + return startConfig + +proc withBlockMaintenanceInterval*( + self: CodexConfigs, interval: int +): CodexConfigs {.raises: [CodexConfigError].} = + var startConfig = self + for config in startConfig.configs.mitems: + config.addCliOption("--block-mi", $interval) + return startConfig + proc withSimulateProofFailures*( self: CodexConfigs, idx: int, failEveryNProofs: int ): CodexConfigs {.raises: [CodexConfigError].} = diff --git a/tests/integration/codexprocess.nim b/tests/integration/codexprocess.nim index 79d4b040..3eca5b04 100644 --- a/tests/integration/codexprocess.nim +++ b/tests/integration/codexprocess.nim @@ -68,7 +68,7 @@ method stop*(node: CodexProcess) {.async.} = trace "stopping codex client" if client =? node.client: - client.close() + await client.close() node.client = none CodexClient method removeDataDir*(node: CodexProcess) = diff --git a/tests/integration/marketplacesuite.nim b/tests/integration/marketplacesuite.nim index 68283ad1..1e09963b 100644 --- a/tests/integration/marketplacesuite.nim +++ b/tests/integration/marketplacesuite.nim @@ -20,14 +20,14 @@ template marketplacesuite*(name: string, body: untyped) = var token {.inject, used.}: Erc20Token proc getCurrentPeriod(): Future[Period] {.async.} = - return periodicity.periodOf(await ethProvider.currentTime()) + return periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) proc advanceToNextPeriod() {.async.} = - let periodicity = Periodicity(seconds: period.u256) - let currentTime = await ethProvider.currentTime() + let periodicity = Periodicity(seconds: period) + let currentTime = (await ethProvider.currentTime()).truncate(uint64) let currentPeriod = periodicity.periodOf(currentTime) let endOfPeriod = periodicity.periodEnd(currentPeriod) - await ethProvider.advanceTimeTo(endOfPeriod + 1) + await ethProvider.advanceTimeTo(endOfPeriod.u256 + 1) template eventuallyP(condition: untyped, finalPeriod: Period): bool = proc eventuallyP(): Future[bool] {.async.} = @@ -56,19 +56,19 @@ template marketplacesuite*(name: string, body: untyped) = return nodes.u256 * slotSize(blocks, nodes, tolerance) proc createAvailabilities( - datasetSize: UInt256, + datasetSize: uint64, duration: uint64, collateralPerByte: UInt256, minPricePerBytePerSecond: UInt256, - ) = - let totalCollateral = datasetSize * collateralPerByte + ): Future[void] {.async: (raises: [CancelledError, HttpError, ConfigurationError]).} = + let totalCollateral = datasetSize.u256 * collateralPerByte # post availability to each provider for i in 0 ..< providers().len: let provider = providers()[i].client - discard provider.postAvailability( + discard await provider.postAvailability( totalSize = datasetSize, - duration = duration.u256, + duration = duration.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, totalCollateral = totalCollateral, ) @@ -76,23 +76,25 @@ template marketplacesuite*(name: string, body: untyped) = proc requestStorage( client: CodexClient, cid: Cid, - proofProbability = 1, + proofProbability = 1.u256, duration: uint64 = 12.periods, pricePerBytePerSecond = 1.u256, collateralPerByte = 1.u256, expiry: uint64 = 4.periods, nodes = providers().len, tolerance = 0, - ): Future[PurchaseId] {.async.} = - let id = client.requestStorage( - cid, - expiry = expiry.uint, - duration = duration.u256, - proofProbability = proofProbability.u256, - collateralPerByte = collateralPerByte, - pricePerBytePerSecond = pricePerBytePerSecond, - nodes = nodes.uint, - tolerance = tolerance.uint, + ): Future[PurchaseId] {.async: (raises: [CancelledError, HttpError]).} = + let id = ( + await client.requestStorage( + cid, + expiry = expiry, + duration = duration, + proofProbability = proofProbability, + collateralPerByte = collateralPerByte, + pricePerBytePerSecond = pricePerBytePerSecond, + nodes = nodes.uint, + tolerance = tolerance.uint, + ) ).get return id @@ -102,7 +104,7 @@ template marketplacesuite*(name: string, body: untyped) = let tokenAddress = await marketplace.token() token = Erc20Token.new(tokenAddress, ethProvider.getSigner()) let config = await marketplace.configuration() - period = config.proofs.period.truncate(uint64) - periodicity = Periodicity(seconds: period.u256) + period = config.proofs.period + periodicity = Periodicity(seconds: period) body diff --git a/tests/integration/multinodes.nim b/tests/integration/multinodes.nim index ae9a38ab..4b183674 100644 --- a/tests/integration/multinodes.nim +++ b/tests/integration/multinodes.nim @@ -22,6 +22,7 @@ export hardhatprocess export codexprocess export hardhatconfig export codexconfig +export nodeconfigs type RunningNode* = ref object @@ -36,10 +37,12 @@ type MultiNodeSuiteError = object of CatchableError +const jsonRpcProviderUrl* = "http://127.0.0.1:8545" + proc raiseMultiNodeSuiteError(msg: string) = raise newException(MultiNodeSuiteError, msg) -proc nextFreePort(startPort: int): Future[int] {.async.} = +proc nextFreePort*(startPort: int): Future[int] {.async.} = proc client(server: StreamServer, transp: StreamTransport) {.async.} = await transp.closeWait() @@ -59,6 +62,15 @@ proc nextFreePort(startPort: int): Future[int] {.async.} = trace "port is not free", port inc port +proc sanitize(pathSegment: string): string = + var sanitized = pathSegment + for invalid in invalidFilenameChars.items: + sanitized = sanitized.replace(invalid, '_').replace(' ', '_') + sanitized + +proc getTempDirName*(starttime: string, role: Role, roleIdx: int): string = + getTempDir() / "Codex" / sanitize($starttime) / sanitize($role & "_" & $roleIdx) + template multinodesuite*(name: string, body: untyped) = asyncchecksuite name: # Following the problem described here: @@ -81,7 +93,6 @@ template multinodesuite*(name: string, body: untyped) = # .withEthProvider("ws://localhost:8545") # .some, # ... - let jsonRpcProviderUrl = "http://127.0.0.1:8545" var running {.inject, used.}: seq[RunningNode] var bootstrapNodes: seq[string] let starttime = now().format("yyyy-MM-dd'_'HH:mm:ss") @@ -147,8 +158,7 @@ template multinodesuite*(name: string, body: untyped) = raiseMultiNodeSuiteError "Cannot start node at nodeIdx " & $nodeIdx & ", not enough eth accounts." - let datadir = - getTempDir() / "Codex" / sanitize($starttime) / sanitize($role & "_" & $roleIdx) + let datadir = getTempDirName(starttime, role, roleIdx) try: if config.logFile.isSome: @@ -274,8 +284,10 @@ template multinodesuite*(name: string, body: untyped) = fail() quit(1) - proc updateBootstrapNodes(node: CodexProcess) = - without ninfo =? node.client.info(): + proc updateBootstrapNodes( + node: CodexProcess + ): Future[void] {.async: (raises: [CatchableError]).} = + without ninfo =? await node.client.info(): # raise CatchableError instead of Defect (with .get or !) so we # can gracefully shutdown and prevent zombies raiseMultiNodeSuiteError "Failed to get node info" @@ -314,14 +326,14 @@ template multinodesuite*(name: string, body: untyped) = for config in clients.configs: let node = await startClientNode(config) running.add RunningNode(role: Role.Client, node: node) - CodexProcess(node).updateBootstrapNodes() + await CodexProcess(node).updateBootstrapNodes() if var providers =? nodeConfigs.providers: failAndTeardownOnError "failed to start provider nodes": for config in providers.configs.mitems: let node = await startProviderNode(config) running.add RunningNode(role: Role.Provider, node: node) - CodexProcess(node).updateBootstrapNodes() + await CodexProcess(node).updateBootstrapNodes() if var validators =? nodeConfigs.validators: failAndTeardownOnError "failed to start validator nodes": diff --git a/tests/integration/testblockexpiration.nim b/tests/integration/testblockexpiration.nim index e3fad75c..6a33f3c6 100644 --- a/tests/integration/testblockexpiration.nim +++ b/tests/integration/testblockexpiration.nim @@ -1,89 +1,50 @@ -import std/os -import std/httpclient -import std/strutils -from std/net import TimeoutError +import ../examples +import ./multinodes -import pkg/chronos -import ../ethertest -import ./codexprocess -import ./nodeprocess - -ethersuite "Node block expiration tests": - var node: CodexProcess - var baseurl: string - - let dataDir = getTempDir() / "Codex1" - let content = "test file content" +multinodesuite "Node block expiration tests": + var content: seq[byte] setup: - baseurl = "http://localhost:8080/api/codex/v1" + content = await RandomChunker.example(blocks = 8) - teardown: - await node.stop() + test "node retains not-expired file", + NodeConfigs( + clients: CodexConfigs + .init(nodes = 1) + .withBlockTtl(0, 10) + .withBlockMaintenanceInterval(0, 1).some, + providers: CodexConfigs.none, + ): + let client = clients()[0] + let clientApi = client.client - dataDir.removeDir() - - proc startTestNode(blockTtlSeconds: int) {.async.} = - node = await CodexProcess.startNode( - @[ - "--api-port=8080", - "--data-dir=" & dataDir, - "--nat=none", - "--listen-addrs=/ip4/127.0.0.1/tcp/0", - "--disc-port=8090", - "--block-ttl=" & $blockTtlSeconds, - "--block-mi=1", - "--block-mn=10", - ], - false, - "cli-test-node", - ) - await node.waitUntilStarted() - - proc uploadTestFile(): string = - let client = newHttpClient() - let uploadUrl = baseurl & "/data" - let uploadResponse = client.post(uploadUrl, content) - check uploadResponse.status == "200 OK" - client.close() - uploadResponse.body - - proc downloadTestFile(contentId: string, local = false): Response = - let client = newHttpClient(timeout = 3000) - let downloadUrl = - baseurl & "/data/" & contentId & (if local: "" else: "/network/stream") - - let content = client.get(downloadUrl) - client.close() - content - - proc hasFile(contentId: string): bool = - let client = newHttpClient(timeout = 3000) - let dataLocalUrl = baseurl & "/data/" & contentId - let content = client.get(dataLocalUrl) - client.close() - content.code == Http200 - - test "node retains not-expired file": - await startTestNode(blockTtlSeconds = 10) - - let contentId = uploadTestFile() + let contentId = (await clientApi.upload(content)).get await sleepAsync(2.seconds) - let response = downloadTestFile(contentId, local = true) + let download = await clientApi.download(contentId, local = true) + check: - hasFile(contentId) - response.status == "200 OK" - response.body == content + download.isOk + download.get == string.fromBytes(content) - test "node deletes expired file": - await startTestNode(blockTtlSeconds = 1) + test "node deletes expired file", + NodeConfigs( + clients: CodexConfigs + .init(nodes = 1) + .withBlockTtl(0, 1) + .withBlockMaintenanceInterval(0, 1).some, + providers: CodexConfigs.none, + ): + let client = clients()[0] + let clientApi = client.client - let contentId = uploadTestFile() + let contentId = (await clientApi.upload(content)).get await sleepAsync(3.seconds) + let download = await clientApi.download(contentId, local = true) + check: - not hasFile(contentId) - downloadTestFile(contentId, local = true).code == Http404 + download.isFailure + download.error.msg == "404" diff --git a/tests/integration/testecbug.nim b/tests/integration/testecbug.nim index e7604de7..6b86fd29 100644 --- a/tests/integration/testecbug.nim +++ b/tests/integration/testecbug.nim @@ -13,21 +13,18 @@ marketplacesuite "Bug #821 - node crashes during erasure coding": .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log .withLogTopics("node", "erasure", "marketplace").some, - providers: CodexConfigs.init(nodes = 0) - # .debug() # uncomment to enable console log output - # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock") - .some, + providers: CodexConfigs.init(nodes = 0).some, ): - let pricePerBytePerSecond = 1.u256 - let duration = 20.periods - let collateralPerByte = 1.u256 - let expiry = 10.periods - let data = await RandomChunker.example(blocks = 8) - let client = clients()[0] - let clientApi = client.client + let + pricePerBytePerSecond = 1.u256 + duration = 20.periods + collateralPerByte = 1.u256 + expiry = 10.periods + data = await RandomChunker.example(blocks = 8) + client = clients()[0] + clientApi = client.client - let cid = clientApi.upload(data).get + let cid = (await clientApi.upload(data)).get var requestId = none RequestId proc onStorageRequested(eventResult: ?!StorageRequested) = @@ -49,9 +46,11 @@ marketplacesuite "Bug #821 - node crashes during erasure coding": check eventually(requestId.isSome, timeout = expiry.int * 1000) - let request = await marketplace.getRequest(requestId.get) - let cidFromRequest = Cid.init(request.content.cid).get() - let downloaded = await clientApi.downloadBytes(cidFromRequest, local = true) + let + request = await marketplace.getRequest(requestId.get) + cidFromRequest = request.content.cid + downloaded = await clientApi.downloadBytes(cidFromRequest, local = true) + check downloaded.isOk check downloaded.get.toHex == data.toHex diff --git a/tests/integration/testmarketplace.nim b/tests/integration/testmarketplace.nim index 7813485b..40f394e0 100644 --- a/tests/integration/testmarketplace.nim +++ b/tests/integration/testmarketplace.nim @@ -1,3 +1,5 @@ +import std/times +import std/httpclient import ../examples import ../contracts/time import ../contracts/deployment @@ -34,74 +36,82 @@ marketplacesuite "Marketplace": await ethProvider.advanceTime(1.u256) test "nodes negotiate contracts on the marketplace", marketplaceConfig: - let size = 0xFFFFFF.u256 + let size = 0xFFFFFF.uint64 let data = await RandomChunker.example(blocks = blocks) # host makes storage available - let availability = host.postAvailability( - totalSize = size, - duration = 20 * 60.u256, - minPricePerBytePerSecond = minPricePerBytePerSecond, - totalCollateral = size * minPricePerBytePerSecond, + let availability = ( + await host.postAvailability( + totalSize = size, + duration = 20 * 60.uint64, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = size.u256 * minPricePerBytePerSecond, + ) ).get # client requests storage - let cid = client.upload(data).get - let id = client.requestStorage( + let cid = (await client.upload(data)).get + let id = await client.requestStorage( cid, - duration = 20 * 60.u256, + duration = 20 * 60.uint64, pricePerBytePerSecond = minPricePerBytePerSecond, proofProbability = 3.u256, - expiry = 10 * 60, + expiry = 10 * 60.uint64, collateralPerByte = collateralPerByte, nodes = ecNodes, tolerance = ecTolerance, - ).get + ) - check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000) - let purchase = client.getPurchase(id).get + check eventually( + await client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000 + ) + let purchase = (await client.getPurchase(id)).get check purchase.error == none string - let availabilities = host.getAvailabilities().get + let availabilities = (await host.getAvailabilities()).get check availabilities.len == 1 let newSize = availabilities[0].freeSize check newSize > 0 and newSize < size - let reservations = host.getAvailabilityReservations(availability.id).get + let reservations = (await host.getAvailabilityReservations(availability.id)).get check reservations.len == 3 check reservations[0].requestId == purchase.requestId test "node slots gets paid out and rest of tokens are returned to client", marketplaceConfig: - let size = 0xFFFFFF.u256 + let size = 0xFFFFFF.uint64 let data = await RandomChunker.example(blocks = blocks) let marketplace = Marketplace.new(Marketplace.address, ethProvider.getSigner()) let tokenAddress = await marketplace.token() let token = Erc20Token.new(tokenAddress, ethProvider.getSigner()) - let duration = 20 * 60.u256 + let duration = 20 * 60.uint64 # host makes storage available let startBalanceHost = await token.balanceOf(hostAccount) - discard host.postAvailability( - totalSize = size, - duration = 20 * 60.u256, - minPricePerBytePerSecond = minPricePerBytePerSecond, - totalCollateral = size * minPricePerBytePerSecond, + discard ( + await host.postAvailability( + totalSize = size, + duration = 20 * 60.uint64, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = size.u256 * minPricePerBytePerSecond, + ) ).get # client requests storage - let cid = client.upload(data).get - let id = client.requestStorage( + let cid = (await client.upload(data)).get + let id = await client.requestStorage( cid, duration = duration, pricePerBytePerSecond = minPricePerBytePerSecond, proofProbability = 3.u256, - expiry = 10 * 60, + expiry = 10 * 60.uint64, collateralPerByte = collateralPerByte, nodes = ecNodes, tolerance = ecTolerance, - ).get + ) - check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000) - let purchase = client.getPurchase(id).get + check eventually( + await client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000 + ) + let purchase = (await client.getPurchase(id)).get check purchase.error == none string let clientBalanceBeforeFinished = await token.balanceOf(clientAccount) @@ -109,13 +119,13 @@ marketplacesuite "Marketplace": # Proving mechanism uses blockchain clock to do proving/collect/cleanup round # hence we must use `advanceTime` over `sleepAsync` as Hardhat does mine new blocks # only with new transaction - await ethProvider.advanceTime(duration) + await ethProvider.advanceTime(duration.u256) # Checking that the hosting node received reward for at least the time between let slotSize = slotSize(blocks, ecNodes, ecTolerance) let pricePerSlotPerSecond = minPricePerBytePerSecond * slotSize check eventually (await token.balanceOf(hostAccount)) - startBalanceHost >= - (duration - 5 * 60) * pricePerSlotPerSecond * ecNodes.u256 + (duration - 5 * 60).u256 * pricePerSlotPerSecond * ecNodes.u256 # Checking that client node receives some funds back that were not used for the host nodes check eventually( @@ -157,19 +167,19 @@ marketplacesuite "Marketplace payouts": # provider makes storage available let datasetSize = datasetSize(blocks, ecNodes, ecTolerance) - let totalAvailabilitySize = datasetSize div 2 - discard providerApi.postAvailability( + let totalAvailabilitySize = (datasetSize div 2).truncate(uint64) + discard await providerApi.postAvailability( # make availability size small enough that we can't fill all the slots, # thus causing a cancellation totalSize = totalAvailabilitySize, - duration = duration.u256, + duration = duration.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, - totalCollateral = collateralPerByte * totalAvailabilitySize, + totalCollateral = collateralPerByte * totalAvailabilitySize.u256, ) - let cid = clientApi.upload(data).get + let cid = (await clientApi.upload(data)).get - var slotIdxFilled = none UInt256 + var slotIdxFilled = none uint64 proc onSlotFilled(eventResult: ?!SlotFilled) = assert not eventResult.isErr slotIdxFilled = some (!eventResult).slotIndex @@ -189,11 +199,11 @@ marketplacesuite "Marketplace payouts": # wait until one slot is filled check eventually(slotIdxFilled.isSome, timeout = expiry.int * 1000) - let slotId = slotId(!clientApi.requestId(id), !slotIdxFilled) + let slotId = slotId(!(await clientApi.requestId(id)), !slotIdxFilled) # wait until sale is cancelled await ethProvider.advanceTime(expiry.u256) - check eventually providerApi.saleStateIs(slotId, "SaleCancelled") + check eventually await providerApi.saleStateIs(slotId, "SaleCancelled") await advanceToNextPeriod() diff --git a/tests/integration/testproofs.nim b/tests/integration/testproofs.nim index a547890b..c49b7b6f 100644 --- a/tests/integration/testproofs.nim +++ b/tests/integration/testproofs.nim @@ -42,11 +42,14 @@ marketplacesuite "Hosts submit regular proofs": let data = await RandomChunker.example(blocks = blocks) let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) - createAvailabilities( - datasetSize, duration, collateralPerByte, minPricePerBytePerSecond + await createAvailabilities( + datasetSize.truncate(uint64), + duration, + collateralPerByte, + minPricePerBytePerSecond, ) - let cid = client0.upload(data).get + let cid = (await client0.upload(data)).get let purchaseId = await client0.requestStorage( cid, @@ -56,13 +59,13 @@ marketplacesuite "Hosts submit regular proofs": tolerance = ecTolerance, ) - let purchase = client0.getPurchase(purchaseId).get + let purchase = (await client0.getPurchase(purchaseId)).get check purchase.error == none string let slotSize = slotSize(blocks, ecNodes, ecTolerance) check eventually( - client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000 + await client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000 ) var proofWasSubmitted = false @@ -116,24 +119,29 @@ marketplacesuite "Simulate invalid proofs": let data = await RandomChunker.example(blocks = blocks) let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) - createAvailabilities( - datasetSize, duration, collateralPerByte, minPricePerBytePerSecond + await createAvailabilities( + datasetSize.truncate(uint64), + duration, + collateralPerByte, + minPricePerBytePerSecond, ) - let cid = client0.upload(data).get + let cid = (await client0.upload(data)).get - let purchaseId = await client0.requestStorage( - cid, - expiry = expiry, - duration = duration, - nodes = ecNodes, - tolerance = ecTolerance, - proofProbability = 1, + let purchaseId = ( + await client0.requestStorage( + cid, + expiry = expiry, + duration = duration, + nodes = ecNodes, + tolerance = ecTolerance, + proofProbability = 1.u256, + ) ) - let requestId = client0.requestId(purchaseId).get + let requestId = (await client0.requestId(purchaseId)).get check eventually( - client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000 + await client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000 ) var slotWasFreed = false @@ -176,11 +184,14 @@ marketplacesuite "Simulate invalid proofs": let data = await RandomChunker.example(blocks = blocks) let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) - createAvailabilities( - datasetSize, duration, collateralPerByte, minPricePerBytePerSecond + await createAvailabilities( + datasetSize.truncate(uint64), + duration, + collateralPerByte, + minPricePerBytePerSecond, ) - let cid = client0.upload(data).get + let cid = (await client0.upload(data)).get let purchaseId = await client0.requestStorage( cid, @@ -188,9 +199,9 @@ marketplacesuite "Simulate invalid proofs": duration = duration, nodes = ecNodes, tolerance = ecTolerance, - proofProbability = 1, + proofProbability = 1.u256, ) - let requestId = client0.requestId(purchaseId).get + let requestId = (await client0.requestId(purchaseId)).get var slotWasFilled = false proc onSlotFilled(eventResult: ?!SlotFilled) = @@ -264,7 +275,9 @@ marketplacesuite "Simulate invalid proofs": # totalSize=slotSize, # should match 1 slot only # duration=totalPeriods.periods.u256, # minPricePerBytePerSecond=minPricePerBytePerSecond, - # totalCollateral=slotSize * minPricePerBytePerSecond + # totalCollateral=slotSize * minPricePerBytePerSecond, + # enabled = true.some, + # until = 0.SecondsSince1970.some, # ) # let cid = client0.upload(data).get diff --git a/tests/integration/testpurchasing.nim b/tests/integration/testpurchasing.nim index 4e08e7a8..ba8dd190 100644 --- a/tests/integration/testpurchasing.nim +++ b/tests/integration/testpurchasing.nim @@ -8,22 +8,26 @@ import ../examples twonodessuite "Purchasing": test "node handles storage request", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get - let id1 = client1.requestStorage( - cid, - duration = 100.u256, - pricePerBytePerSecond = 1.u256, - proofProbability = 3.u256, - expiry = 10, - collateralPerByte = 1.u256, + let cid = (await client1.upload(data)).get + let id1 = ( + await client1.requestStorage( + cid, + duration = 100.uint64, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + expiry = 10.uint64, + collateralPerByte = 1.u256, + ) ).get - let id2 = client1.requestStorage( - cid, - duration = 400.u256, - pricePerBytePerSecond = 2.u256, - proofProbability = 6.u256, - expiry = 10, - collateralPerByte = 2.u256, + let id2 = ( + await client1.requestStorage( + cid, + duration = 400.uint64, + pricePerBytePerSecond = 2.u256, + proofProbability = 6.u256, + expiry = 10.uint64, + collateralPerByte = 2.u256, + ) ).get check id1 != id2 @@ -34,23 +38,27 @@ twonodessuite "Purchasing": rng, size = DefaultBlockSize * 2, chunkSize = DefaultBlockSize * 2 ) let data = await chunker.getBytes() - let cid = client1.upload(byteutils.toHex(data)).get - let id = client1.requestStorage( - cid, - duration = 100.u256, - pricePerBytePerSecond = 1.u256, - proofProbability = 3.u256, - expiry = 30, - collateralPerByte = 1.u256, - nodes = 3, - tolerance = 1, + let cid = (await client1.upload(byteutils.toHex(data))).get + let id = ( + await client1.requestStorage( + cid, + duration = 100.uint64, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + expiry = 30.uint64, + collateralPerByte = 1.u256, + nodes = 3, + tolerance = 1, + ) ).get - let request = client1.getPurchase(id).get.request.get - check request.ask.duration == 100.u256 + let request = (await client1.getPurchase(id)).get.request.get + + check request.content.cid.data.buffer.len > 0 + check request.ask.duration == 100.uint64 check request.ask.pricePerBytePerSecond == 1.u256 check request.ask.proofProbability == 3.u256 - check request.expiry == 30 + check request.expiry == 30.uint64 check request.ask.collateralPerByte == 1.u256 check request.ask.slots == 3'u64 check request.ask.maxSlotLoss == 1'u64 @@ -73,54 +81,60 @@ twonodessuite "Purchasing": test "node remembers purchase status after restart", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get - let id = client1.requestStorage( - cid, - duration = 10 * 60.u256, - pricePerBytePerSecond = 1.u256, - proofProbability = 3.u256, - expiry = 5 * 60, - collateralPerByte = 1.u256, - nodes = 3.uint, - tolerance = 1.uint, + let cid = (await client1.upload(data)).get + let id = ( + await client1.requestStorage( + cid, + duration = 10 * 60.uint64, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + expiry = 5 * 60.uint64, + collateralPerByte = 1.u256, + nodes = 3.uint, + tolerance = 1.uint, + ) ).get - check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000) + check eventually( + await client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000 + ) await node1.restart() - client1.restart() - check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000) - let request = client1.getPurchase(id).get.request.get - check request.ask.duration == (10 * 60).u256 + check eventually( + await client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000 + ) + let request = (await client1.getPurchase(id)).get.request.get + check request.ask.duration == (10 * 60).uint64 check request.ask.pricePerBytePerSecond == 1.u256 check request.ask.proofProbability == 3.u256 - check request.expiry == (5 * 60).u256 + check request.expiry == (5 * 60).uint64 check request.ask.collateralPerByte == 1.u256 check request.ask.slots == 3'u64 check request.ask.maxSlotLoss == 1'u64 test "node requires expiry and its value to be in future", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get + let cid = (await client1.upload(data)).get - let responseMissing = client1.requestStorageRaw( + let responseMissing = await client1.requestStorageRaw( cid, - duration = 1.u256, + duration = 1.uint64, pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, collateralPerByte = 1.u256, ) - check responseMissing.status == "400 Bad Request" - check responseMissing.body == "Expiry required" + check responseMissing.status == 422 + check (await responseMissing.body) == + "Expiry must be greater than zero and less than the request's duration" - let responseBefore = client1.requestStorageRaw( + let responseBefore = await client1.requestStorageRaw( cid, - duration = 10.u256, + duration = 10.uint64, pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, collateralPerByte = 1.u256, - expiry = 10, + expiry = 10.uint64, ) - check responseBefore.status == "400 Bad Request" - check "Expiry needs value bigger then zero and smaller then the request's duration" in - responseBefore.body + check responseBefore.status == 422 + check "Expiry must be greater than zero and less than the request's duration" in + (await responseBefore.body) diff --git a/tests/integration/testrestapi.nim b/tests/integration/testrestapi.nim index 8cbe9817..57e38b39 100644 --- a/tests/integration/testrestapi.nim +++ b/tests/integration/testrestapi.nim @@ -1,149 +1,77 @@ -import std/httpclient +import std/importutils +import std/net import std/sequtils import std/strformat -from pkg/libp2p import `==` +from pkg/libp2p import `==`, `$`, Cid import pkg/codex/units +import pkg/codex/manifest import ./twonodes import ../examples +import ../codex/examples +import ../codex/slots/helpers import json twonodessuite "REST API": test "nodes can print their peer information", twoNodesConfig: - check !client1.info() != !client2.info() + check !(await client1.info()) != !(await client2.info()) test "nodes can set chronicles log level", twoNodesConfig: - client1.setLogLevel("DEBUG;TRACE:codex") + await client1.setLogLevel("DEBUG;TRACE:codex") test "node accepts file uploads", twoNodesConfig: - let cid1 = client1.upload("some file contents").get - let cid2 = client1.upload("some other contents").get + let cid1 = (await client1.upload("some file contents")).get + let cid2 = (await client1.upload("some other contents")).get check cid1 != cid2 test "node shows used and available space", twoNodesConfig: - discard client1.upload("some file contents").get - let totalSize = 12.u256 + discard (await client1.upload("some file contents")).get + let totalSize = 12.uint64 let minPricePerBytePerSecond = 1.u256 - let totalCollateral = totalSize * minPricePerBytePerSecond - discard client1.postAvailability( - totalSize = totalSize, - duration = 2.u256, - minPricePerBytePerSecond = minPricePerBytePerSecond, - totalCollateral = totalCollateral, + let totalCollateral = totalSize.u256 * minPricePerBytePerSecond + discard ( + await client1.postAvailability( + totalSize = totalSize, + duration = 2.uint64, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = totalCollateral, + enabled = true.some, + ) ).get - let space = client1.space().tryGet() + let space = (await client1.space()).tryGet() check: space.totalBlocks == 2 - space.quotaMaxBytes == 8589934592.NBytes - space.quotaUsedBytes == 65598.NBytes + space.quotaMaxBytes == 21474836480.NBytes + space.quotaUsedBytes == 65592.NBytes space.quotaReservedBytes == 12.NBytes test "node lists local files", twoNodesConfig: let content1 = "some file contents" let content2 = "some other contents" - let cid1 = client1.upload(content1).get - let cid2 = client1.upload(content2).get - let list = client1.list().get + let cid1 = (await client1.upload(content1)).get + let cid2 = (await client1.upload(content2)).get + let list = (await client1.list()).get check: [cid1, cid2].allIt(it in list.content.mapIt(it.cid)) - test "request storage fails for datasets that are too small", twoNodesConfig: - let cid = client1.upload("some file contents").get - let response = client1.requestStorageRaw( - cid, - duration = 10.u256, - pricePerBytePerSecond = 1.u256, - proofProbability = 3.u256, - collateralPerByte = 1.u256, - expiry = 9, - ) - - check: - response.status == "400 Bad Request" - response.body == - "Dataset too small for erasure parameters, need at least " & - $(2 * DefaultBlockSize.int) & " bytes" - test "request storage succeeds for sufficiently sized datasets", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get - let response = client1.requestStorageRaw( - cid, - duration = 10.u256, - pricePerBytePerSecond = 1.u256, - proofProbability = 3.u256, - collateralPerByte = 1.u256, - expiry = 9, + let cid = (await client1.upload(data)).get + let response = ( + await client1.requestStorageRaw( + cid, + duration = 10.uint64, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + collateralPerByte = 1.u256, + expiry = 9.uint64, + ) ) check: - response.status == "200 OK" - - test "request storage fails if tolerance is zero", twoNodesConfig: - let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get - let duration = 100.u256 - let pricePerBytePerSecond = 1.u256 - let proofProbability = 3.u256 - let expiry = 30.uint - let collateralPerByte = 1.u256 - let nodes = 3 - let tolerance = 0 - - var responseBefore = client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, - nodes.uint, tolerance.uint, - ) - - check responseBefore.status == "400 Bad Request" - check responseBefore.body == "Tolerance needs to be bigger then zero" - - test "request storage fails if nodes and tolerance aren't correct", twoNodesConfig: - let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get - let duration = 100.u256 - let pricePerBytePerSecond = 1.u256 - let proofProbability = 3.u256 - let expiry = 30.uint - let collateralPerByte = 1.u256 - let ecParams = @[(1, 1), (2, 1), (3, 2), (3, 3)] - - for ecParam in ecParams: - let (nodes, tolerance) = ecParam - - var responseBefore = client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, - expiry, nodes.uint, tolerance.uint, - ) - - check responseBefore.status == "400 Bad Request" - check responseBefore.body == - "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`" - - test "request storage fails if tolerance > nodes (underflow protection)", - twoNodesConfig: - let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get - let duration = 100.u256 - let pricePerBytePerSecond = 1.u256 - let proofProbability = 3.u256 - let expiry = 30.uint - let collateralPerByte = 1.u256 - let ecParams = @[(0, 1), (1, 2), (2, 3)] - - for ecParam in ecParams: - let (nodes, tolerance) = ecParam - - var responseBefore = client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, - expiry, nodes.uint, tolerance.uint, - ) - - check responseBefore.status == "400 Bad Request" - check responseBefore.body == - "Invalid parameters: `tolerance` cannot be greater than `nodes`" + response.status == 200 for ecParams in @[ (minBlocks: 2, nodes: 3, tolerance: 1), (minBlocks: 3, nodes: 5, tolerance: 2) @@ -152,70 +80,55 @@ twonodessuite "REST API": test "request storage succeeds if nodes and tolerance within range " & fmt"({minBlocks=}, {nodes=}, {tolerance=})", twoNodesConfig: let data = await RandomChunker.example(blocks = minBlocks) - let cid = client1.upload(data).get - let duration = 100.u256 + let cid = (await client1.upload(data)).get + let duration = 100.uint64 let pricePerBytePerSecond = 1.u256 let proofProbability = 3.u256 - let expiry = 30.uint + let expiry = 30.uint64 let collateralPerByte = 1.u256 - var responseBefore = client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, - expiry, nodes.uint, tolerance.uint, + var responseBefore = ( + await client1.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) ) - check responseBefore.status == "200 OK" + check responseBefore.status == 200 test "node accepts file uploads with content type", twoNodesConfig: - let headers = newHttpHeaders({"Content-Type": "text/plain"}) - let response = client1.uploadRaw("some file contents", headers) + let headers = @[("Content-Type", "text/plain")] + let response = await client1.uploadRaw("some file contents", headers) - check response.status == "200 OK" - check response.body != "" + check response.status == 200 + check (await response.body) != "" test "node accepts file uploads with content disposition", twoNodesConfig: - let headers = - newHttpHeaders({"Content-Disposition": "attachment; filename=\"example.txt\""}) - let response = client1.uploadRaw("some file contents", headers) + let headers = @[("Content-Disposition", "attachment; filename=\"example.txt\"")] + let response = await client1.uploadRaw("some file contents", headers) - check response.status == "200 OK" - check response.body != "" + check response.status == 200 + check (await response.body) != "" test "node accepts file uploads with content disposition without filename", twoNodesConfig: - let headers = newHttpHeaders({"Content-Disposition": "attachment"}) - let response = client1.uploadRaw("some file contents", headers) + let headers = @[("Content-Disposition", "attachment")] + let response = await client1.uploadRaw("some file contents", headers) - check response.status == "200 OK" - check response.body != "" - - test "upload fails if content disposition contains bad filename", twoNodesConfig: - let headers = - newHttpHeaders({"Content-Disposition": "attachment; filename=\"exam*ple.txt\""}) - let response = client1.uploadRaw("some file contents", headers) - - check response.status == "422 Unprocessable Entity" - check response.body == "The filename is not valid." - - test "upload fails if content type is invalid", twoNodesConfig: - let headers = newHttpHeaders({"Content-Type": "hello/world"}) - let response = client1.uploadRaw("some file contents", headers) - - check response.status == "422 Unprocessable Entity" - check response.body == "The MIME type 'hello/world' is not valid." + check response.status == 200 + check (await response.body) != "" test "node retrieve the metadata", twoNodesConfig: - let headers = newHttpHeaders( - { - "Content-Type": "text/plain", - "Content-Disposition": "attachment; filename=\"example.txt\"", - } - ) - let uploadResponse = client1.uploadRaw("some file contents", headers) - let cid = uploadResponse.body - let listResponse = client1.listRaw() + let headers = + @[ + ("Content-Type", "text/plain"), + ("Content-Disposition", "attachment; filename=\"example.txt\""), + ] + let uploadResponse = await client1.uploadRaw("some file contents", headers) + let cid = await uploadResponse.body + let listResponse = await client1.listRaw() - let jsonData = parseJson(listResponse.body) + let jsonData = parseJson(await listResponse.body) check jsonData.hasKey("content") == true @@ -229,37 +142,81 @@ twonodessuite "REST API": check manifest["filename"].getStr() == "example.txt" check manifest.hasKey("mimetype") == true check manifest["mimetype"].getStr() == "text/plain" - check manifest.hasKey("uploadedAt") == true - check manifest["uploadedAt"].getInt() > 0 test "node set the headers when for download", twoNodesConfig: - let headers = newHttpHeaders( - { - "Content-Disposition": "attachment; filename=\"example.txt\"", - "Content-Type": "text/plain", - } - ) + let headers = + @[ + ("Content-Disposition", "attachment; filename=\"example.txt\""), + ("Content-Type", "text/plain"), + ] - let uploadResponse = client1.uploadRaw("some file contents", headers) - let cid = uploadResponse.body + let uploadResponse = await client1.uploadRaw("some file contents", headers) + let cid = await uploadResponse.body - check uploadResponse.status == "200 OK" + check uploadResponse.status == 200 - let response = client1.downloadRaw(cid) + let response = await client1.downloadRaw(cid) - check response.status == "200 OK" - check response.headers.hasKey("Content-Type") == true - check response.headers["Content-Type"] == "text/plain" - check response.headers.hasKey("Content-Disposition") == true - check response.headers["Content-Disposition"] == + check response.status == 200 + check "Content-Type" in response.headers + check response.headers.getString("Content-Type") == "text/plain" + check "Content-Disposition" in response.headers + check response.headers.getString("Content-Disposition") == "attachment; filename=\"example.txt\"" let local = true - let localResponse = client1.downloadRaw(cid, local) + let localResponse = await client1.downloadRaw(cid, local) - check localResponse.status == "200 OK" - check localResponse.headers.hasKey("Content-Type") == true - check localResponse.headers["Content-Type"] == "text/plain" - check localResponse.headers.hasKey("Content-Disposition") == true - check localResponse.headers["Content-Disposition"] == + check localResponse.status == 200 + check "Content-Type" in localResponse.headers + check localResponse.headers.getString("Content-Type") == "text/plain" + check "Content-Disposition" in localResponse.headers + check localResponse.headers.getString("Content-Disposition") == "attachment; filename=\"example.txt\"" + + test "should delete a dataset when requested", twoNodesConfig: + let cid = (await client1.upload("some file contents")).get + + var response = await client1.downloadRaw($cid, local = true) + check (await response.body) == "some file contents" + + (await client1.delete(cid)).get + + response = await client1.downloadRaw($cid, local = true) + check response.status == 404 + + test "should return 200 when attempting delete of non-existing block", twoNodesConfig: + let response = await client1.deleteRaw($(Cid.example())) + check response.status == 204 + + test "should return 200 when attempting delete of non-existing dataset", + twoNodesConfig: + let cid = Manifest.example().makeManifestBlock().get.cid + let response = await client1.deleteRaw($cid) + check response.status == 204 + + test "should not crash if the download stream is closed before download completes", + twoNodesConfig: + # FIXME this is not a good test. For some reason, to get this to fail, I have to + # store content that is several times the default stream buffer size, otherwise + # the test will succeed even when the bug is present. Since this is probably some + # setting that is internal to chronos, it might change in future versions, + # invalidating this test. Works on Chronos 4.0.3. + + let + contents = repeat("b", DefaultStreamBufferSize * 10) + cid = (await client1.upload(contents)).get + response = await client1.downloadRaw($cid) + + let reader = response.getBodyReader() + + # Read 4 bytes from the stream just to make sure we actually + # receive some data. + check (bytesToString await reader.read(4)) == "bbbb" + + # Abruptly closes the stream (we have to dig all the way to the transport + # or Chronos will close things "nicely"). + response.connection.reader.tsource.close() + + let response2 = await client1.downloadRaw($cid) + check (await response2.body) == contents diff --git a/tests/integration/testrestapivalidation.nim b/tests/integration/testrestapivalidation.nim new file mode 100644 index 00000000..adeffa77 --- /dev/null +++ b/tests/integration/testrestapivalidation.nim @@ -0,0 +1,384 @@ +import std/httpclient +import std/times +import pkg/ethers +import pkg/codex/manifest +import pkg/codex/conf +import pkg/codex/contracts +from pkg/codex/stores/repostore/types import DefaultQuotaBytes +import ../asynctest +import ../checktest +import ../examples +import ../codex/examples +import ./codexconfig +import ./codexprocess + +from ./multinodes import Role, getTempDirName, jsonRpcProviderUrl, nextFreePort + +# This suite allows to run fast the basic rest api validation. +# It starts only one node for all the checks in order to speed up +# the execution. +asyncchecksuite "Rest API validation": + var node: CodexProcess + var config = CodexConfigs.init(nodes = 1).configs[0] + let starttime = now().format("yyyy-MM-dd'_'HH:mm:ss") + let nodexIdx = 0 + let datadir = getTempDirName(starttime, Role.Client, nodexIdx) + + config.addCliOption("--api-port", $(waitFor nextFreePort(8081))) + config.addCliOption("--data-dir", datadir) + config.addCliOption("--nat", "none") + config.addCliOption("--listen-addrs", "/ip4/127.0.0.1/tcp/0") + config.addCliOption("--disc-port", $(waitFor nextFreePort(8081))) + config.addCliOption(StartUpCmd.persistence, "--eth-provider", jsonRpcProviderUrl) + config.addCliOption(StartUpCmd.persistence, "--eth-account", $EthAddress.example) + + node = + waitFor CodexProcess.startNode(config.cliArgs, config.debugEnabled, $Role.Client) + + waitFor node.waitUntilStarted() + + let client = node.client() + + test "should return 422 when attempting delete of non-existing dataset": + let data = await RandomChunker.example(blocks = 2) + let cid = (await client.upload(data)).get + let duration = 100.uint64 + let pricePerBytePerSecond = 1.u256 + let proofProbability = 3.u256 + let expiry = 30.uint64 + let collateralPerByte = 1.u256 + let nodes = 3 + let tolerance = 0 + + var responseBefore = await client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, + nodes.uint, tolerance.uint, + ) + + check responseBefore.status == 422 + check (await responseBefore.body) == "Tolerance needs to be bigger then zero" + + test "request storage fails for datasets that are too small": + let cid = (await client.upload("some file contents")).get + let response = ( + await client.requestStorageRaw( + cid, + duration = 10.uint64, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + collateralPerByte = 1.u256, + expiry = 9.uint64, + ) + ) + + check: + response.status == 422 + (await response.body) == + "Dataset too small for erasure parameters, need at least " & + $(2 * DefaultBlockSize.int) & " bytes" + + test "request storage fails if nodes and tolerance aren't correct": + let data = await RandomChunker.example(blocks = 2) + let cid = (await client.upload(data)).get + let duration = 100.uint64 + let pricePerBytePerSecond = 1.u256 + let proofProbability = 3.u256 + let expiry = 30.uint64 + let collateralPerByte = 1.u256 + let ecParams = @[(1, 1), (2, 1), (3, 2), (3, 3)] + + for ecParam in ecParams: + let (nodes, tolerance) = ecParam + + var responseBefore = ( + await client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) + ) + + check responseBefore.status == 422 + check (await responseBefore.body) == + "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`" + + test "request storage fails if tolerance > nodes (underflow protection)": + let data = await RandomChunker.example(blocks = 2) + let cid = (await client.upload(data)).get + let duration = 100.uint64 + let pricePerBytePerSecond = 1.u256 + let proofProbability = 3.u256 + let expiry = 30.uint64 + let collateralPerByte = 1.u256 + let nodes = 3 + let tolerance = 0 + + var responseBefore = ( + await client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) + ) + + check responseBefore.status == 422 + check (await responseBefore.body) == "Tolerance needs to be bigger then zero" + + test "upload fails if content disposition contains bad filename": + let headers = @[("Content-Disposition", "attachment; filename=\"exam*ple.txt\"")] + let response = await client.uploadRaw("some file contents", headers) + + check response.status == 422 + check (await response.body) == "The filename is not valid." + + test "upload fails if content type is invalid": + let headers = @[("Content-Type", "hello/world")] + let response = await client.uploadRaw("some file contents", headers) + + check response.status == 422 + check (await response.body) == "The MIME type 'hello/world' is not valid." + + test "updating non-existing availability": + let nonExistingResponse = await client.patchAvailabilityRaw( + AvailabilityId.example, + duration = 100.uint64.some, + minPricePerBytePerSecond = 2.u256.some, + totalCollateral = 200.u256.some, + ) + check nonExistingResponse.status == 404 + + test "updating availability - freeSize is not allowed to be changed": + let availability = ( + await client.postAvailability( + totalSize = 140000.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) + ).get + let freeSizeResponse = + await client.patchAvailabilityRaw(availability.id, freeSize = 110000.uint64.some) + check freeSizeResponse.status == 422 + check "not allowed" in (await freeSizeResponse.body) + + test "creating availability above the node quota returns 422": + let response = await client.postAvailabilityRaw( + totalSize = 24000000000.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) + + check response.status == 422 + check (await response.body) == "Not enough storage quota" + + test "updating availability above the node quota returns 422": + let availability = ( + await client.postAvailability( + totalSize = 140000.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) + ).get + let response = await client.patchAvailabilityRaw( + availability.id, totalSize = 24000000000.uint64.some + ) + + check response.status == 422 + check (await response.body) == "Not enough storage quota" + + test "creating availability when total size is zero returns 422": + let response = await client.postAvailabilityRaw( + totalSize = 0.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) + + check response.status == 422 + check (await response.body) == "Total size must be larger then zero" + + test "updating availability when total size is zero returns 422": + let availability = ( + await client.postAvailability( + totalSize = 140000.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) + ).get + let response = + await client.patchAvailabilityRaw(availability.id, totalSize = 0.uint64.some) + + check response.status == 422 + check (await response.body) == "Total size must be larger then zero" + + test "creating availability when total size is negative returns 422": + let json = + %*{ + "totalSize": "-1", + "duration": "200", + "minPricePerBytePerSecond": "3", + "totalCollateral": "300", + } + let response = await client.post(client.buildUrl("/sales/availability"), $json) + + check response.status == 400 + check (await response.body) == "Parsed integer outside of valid range" + + test "updating availability when total size is negative returns 422": + let availability = ( + await client.postAvailability( + totalSize = 140000.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) + ).get + + let json = %*{"totalSize": "-1"} + let response = await client.patch( + client.buildUrl("/sales/availability/") & $availability.id, $json + ) + + check response.status == 400 + check (await response.body) == "Parsed integer outside of valid range" + + test "request storage fails if tolerance is zero": + let data = await RandomChunker.example(blocks = 2) + let cid = (await client.upload(data)).get + let duration = 100.uint64 + let pricePerBytePerSecond = 1.u256 + let proofProbability = 3.u256 + let expiry = 30.uint64 + let collateralPerByte = 1.u256 + let nodes = 3 + let tolerance = 0 + + var responseBefore = ( + await client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) + ) + + check responseBefore.status == 422 + check (await responseBefore.body) == "Tolerance needs to be bigger then zero" + + test "request storage fails if duration exceeds limit": + let data = await RandomChunker.example(blocks = 2) + let cid = (await client.upload(data)).get + let duration = (31 * 24 * 60 * 60).uint64 + # 31 days TODO: this should not be hardcoded, but waits for https://github.com/codex-storage/nim-codex/issues/1056 + let proofProbability = 3.u256 + let expiry = 30.uint + let collateralPerByte = 1.u256 + let nodes = 3 + let tolerance = 2 + let pricePerBytePerSecond = 1.u256 + + var responseBefore = ( + await client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) + ) + + check responseBefore.status == 422 + check "Duration exceeds limit of" in (await responseBefore.body) + + test "request storage fails if expiry is zero": + let data = await RandomChunker.example(blocks = 2) + let cid = (await client.upload(data)).get + let duration = 100.uint64 + let pricePerBytePerSecond = 1.u256 + let proofProbability = 3.u256 + let expiry = 0.uint64 + let collateralPerByte = 1.u256 + let nodes = 3 + let tolerance = 1 + + var responseBefore = await client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, + nodes.uint, tolerance.uint, + ) + + check responseBefore.status == 422 + check (await responseBefore.body) == + "Expiry must be greater than zero and less than the request's duration" + + test "request storage fails if proof probability is zero": + let data = await RandomChunker.example(blocks = 2) + let cid = (await client.upload(data)).get + let duration = 100.uint64 + let pricePerBytePerSecond = 1.u256 + let proofProbability = 0.u256 + let expiry = 30.uint64 + let collateralPerByte = 1.u256 + let nodes = 3 + let tolerance = 1 + + var responseBefore = await client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, + nodes.uint, tolerance.uint, + ) + + check responseBefore.status == 422 + check (await responseBefore.body) == "Proof probability must be greater than zero" + + test "request storage fails if price per byte per second is zero": + let data = await RandomChunker.example(blocks = 2) + let cid = (await client.upload(data)).get + let duration = 100.uint64 + let pricePerBytePerSecond = 0.u256 + let proofProbability = 3.u256 + let expiry = 30.uint64 + let collateralPerByte = 1.u256 + let nodes = 3 + let tolerance = 1 + + var responseBefore = await client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, + nodes.uint, tolerance.uint, + ) + + check responseBefore.status == 422 + check (await responseBefore.body) == + "Price per byte per second must be greater than zero" + + test "request storage fails if collareral per byte is zero": + let data = await RandomChunker.example(blocks = 2) + let cid = (await client.upload(data)).get + let duration = 100.uint64 + let pricePerBytePerSecond = 1.u256 + let proofProbability = 3.u256 + let expiry = 30.uint64 + let collateralPerByte = 0.u256 + let nodes = 3 + let tolerance = 1 + + var responseBefore = await client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, + nodes.uint, tolerance.uint, + ) + + check responseBefore.status == 422 + check (await responseBefore.body) == "Collateral per byte must be greater than zero" + + test "creating availability fails when until is negative": + let totalSize = 12.uint64 + let minPricePerBytePerSecond = 1.u256 + let totalCollateral = totalSize.u256 * minPricePerBytePerSecond + let response = await client.postAvailabilityRaw( + totalSize = totalSize, + duration = 2.uint64, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = totalCollateral, + until = -1.SecondsSince1970.some, + ) + + check: + response.status == 422 + (await response.body) == "Cannot set until to a negative value" + + waitFor node.stop() + node.removeDataDir() diff --git a/tests/integration/testsales.nim b/tests/integration/testsales.nim index a77e5649..ef999990 100644 --- a/tests/integration/testsales.nim +++ b/tests/integration/testsales.nim @@ -1,5 +1,7 @@ import std/httpclient +import std/times import pkg/codex/contracts +from pkg/codex/stores/repostore/types import DefaultQuotaBytes import ./twonodes import ../codex/examples import ../contracts/time @@ -17,11 +19,13 @@ proc findItem[T](items: seq[T], item: T): ?!T = multinodesuite "Sales": let salesConfig = NodeConfigs( clients: CodexConfigs.init(nodes = 1).some, - providers: CodexConfigs.init(nodes = 1).some, + providers: CodexConfigs.init(nodes = 1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock") + .some, ) - let minPricePerBytePerSecond = 1.u256 - var host: CodexClient var client: CodexClient @@ -30,124 +34,203 @@ multinodesuite "Sales": client = clients()[0].client test "node handles new storage availability", salesConfig: - let availability1 = host.postAvailability( - totalSize = 1.u256, - duration = 2.u256, - minPricePerBytePerSecond = 3.u256, - totalCollateral = 4.u256, + let availability1 = ( + await host.postAvailability( + totalSize = 1.uint64, + duration = 2.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 4.u256, + ) ).get - let availability2 = host.postAvailability( - totalSize = 4.u256, - duration = 5.u256, - minPricePerBytePerSecond = 6.u256, - totalCollateral = 7.u256, + let availability2 = ( + await host.postAvailability( + totalSize = 4.uint64, + duration = 5.uint64, + minPricePerBytePerSecond = 6.u256, + totalCollateral = 7.u256, + ) ).get check availability1 != availability2 test "node lists storage that is for sale", salesConfig: - let availability = host.postAvailability( - totalSize = 1.u256, - duration = 2.u256, - minPricePerBytePerSecond = 3.u256, - totalCollateral = 4.u256, + let availability = ( + await host.postAvailability( + totalSize = 1.uint64, + duration = 2.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 4.u256, + ) ).get - check availability in host.getAvailabilities().get - - test "updating non-existing availability", salesConfig: - let nonExistingResponse = host.patchAvailabilityRaw( - AvailabilityId.example, - duration = 100.u256.some, - minPricePerBytePerSecond = 2.u256.some, - totalCollateral = 200.u256.some, - ) - check nonExistingResponse.status == "404 Not Found" + check availability in (await host.getAvailabilities()).get test "updating availability", salesConfig: - let availability = host.postAvailability( - totalSize = 140000.u256, - duration = 200.u256, - minPricePerBytePerSecond = 3.u256, - totalCollateral = 300.u256, + let availability = ( + await host.postAvailability( + totalSize = 140000.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) ).get - host.patchAvailability( + var until = getTime().toUnix() + + await host.patchAvailability( availability.id, - duration = 100.u256.some, + duration = 100.uint64.some, minPricePerBytePerSecond = 2.u256.some, totalCollateral = 200.u256.some, + enabled = false.some, + until = until.some, ) - let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get - check updatedAvailability.duration == 100 + let updatedAvailability = + ((await host.getAvailabilities()).get).findItem(availability).get + check updatedAvailability.duration == 100.uint64 check updatedAvailability.minPricePerBytePerSecond == 2 check updatedAvailability.totalCollateral == 200 - check updatedAvailability.totalSize == 140000 - check updatedAvailability.freeSize == 140000 - - test "updating availability - freeSize is not allowed to be changed", salesConfig: - let availability = host.postAvailability( - totalSize = 140000.u256, - duration = 200.u256, - minPricePerBytePerSecond = 3.u256, - totalCollateral = 300.u256, - ).get - let freeSizeResponse = - host.patchAvailabilityRaw(availability.id, freeSize = 110000.u256.some) - check freeSizeResponse.status == "400 Bad Request" - check "not allowed" in freeSizeResponse.body + check updatedAvailability.totalSize == 140000.uint64 + check updatedAvailability.freeSize == 140000.uint64 + check updatedAvailability.enabled == false + check updatedAvailability.until == until test "updating availability - updating totalSize", salesConfig: - let availability = host.postAvailability( - totalSize = 140000.u256, - duration = 200.u256, - minPricePerBytePerSecond = 3.u256, - totalCollateral = 300.u256, + let availability = ( + await host.postAvailability( + totalSize = 140000.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) ).get - host.patchAvailability(availability.id, totalSize = 100000.u256.some) - let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get + await host.patchAvailability(availability.id, totalSize = 100000.uint64.some) + + let updatedAvailability = + ((await host.getAvailabilities()).get).findItem(availability).get check updatedAvailability.totalSize == 100000 check updatedAvailability.freeSize == 100000 test "updating availability - updating totalSize does not allow bellow utilized", salesConfig: - let originalSize = 0xFFFFFF.u256 + let originalSize = 0xFFFFFF.uint64 let data = await RandomChunker.example(blocks = 8) let minPricePerBytePerSecond = 3.u256 let collateralPerByte = 1.u256 - let totalCollateral = originalSize * collateralPerByte - let availability = host.postAvailability( - totalSize = originalSize, - duration = 20 * 60.u256, - minPricePerBytePerSecond = minPricePerBytePerSecond, - totalCollateral = totalCollateral, + let totalCollateral = originalSize.u256 * collateralPerByte + let availability = ( + await host.postAvailability( + totalSize = originalSize, + duration = 20 * 60.uint64, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = totalCollateral, + ) ).get # Lets create storage request that will utilize some of the availability's space - let cid = client.upload(data).get - let id = client.requestStorage( - cid, - duration = 20 * 60.u256, - pricePerBytePerSecond = minPricePerBytePerSecond, - proofProbability = 3.u256, - expiry = 10 * 60, - collateralPerByte = collateralPerByte, - nodes = 3, - tolerance = 1, + let cid = (await client.upload(data)).get + let id = ( + await client.requestStorage( + cid, + duration = 20 * 60.uint64, + pricePerBytePerSecond = minPricePerBytePerSecond, + proofProbability = 3.u256, + expiry = (10 * 60).uint64, + collateralPerByte = collateralPerByte, + nodes = 3, + tolerance = 1, + ) ).get - check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000) - let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get + check eventually( + await client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000 + ) + let updatedAvailability = + ((await host.getAvailabilities()).get).findItem(availability).get check updatedAvailability.totalSize != updatedAvailability.freeSize let utilizedSize = updatedAvailability.totalSize - updatedAvailability.freeSize - let totalSizeResponse = host.patchAvailabilityRaw( - availability.id, totalSize = (utilizedSize - 1.u256).some + let totalSizeResponse = ( + await host.patchAvailabilityRaw( + availability.id, totalSize = (utilizedSize - 1).some + ) ) - check totalSizeResponse.status == "400 Bad Request" - check "totalSize must be larger then current totalSize" in totalSizeResponse.body + check totalSizeResponse.status == 422 + check "totalSize must be larger then current totalSize" in + (await totalSizeResponse.body) - host.patchAvailability(availability.id, totalSize = (originalSize + 20000).some) + await host.patchAvailability( + availability.id, totalSize = (originalSize + 20000).some + ) let newUpdatedAvailability = - (host.getAvailabilities().get).findItem(availability).get + ((await host.getAvailabilities()).get).findItem(availability).get check newUpdatedAvailability.totalSize == originalSize + 20000 check newUpdatedAvailability.freeSize - updatedAvailability.freeSize == 20000 + + test "updating availability fails with until negative", salesConfig: + let availability = ( + await host.postAvailability( + totalSize = 140000.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) + ).get + + let response = + await host.patchAvailabilityRaw(availability.id, until = -1.SecondsSince1970.some) + + check: + (await response.body) == "Cannot set until to a negative value" + + test "returns an error when trying to update the until date before an existing a request is finished", + salesConfig: + let size = 0xFFFFFF.uint64 + let data = await RandomChunker.example(blocks = 8) + let duration = 20 * 60.uint64 + let minPricePerBytePerSecond = 3.u256 + let collateralPerByte = 1.u256 + let ecNodes = 3.uint + let ecTolerance = 1.uint + + # host makes storage available + let availability = ( + await host.postAvailability( + totalSize = size, + duration = duration, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = size.u256 * minPricePerBytePerSecond, + ) + ).get + + # client requests storage + let cid = (await client.upload(data)).get + let id = ( + await client.requestStorage( + cid, + duration = duration, + pricePerBytePerSecond = minPricePerBytePerSecond, + proofProbability = 3.u256, + expiry = 10 * 60.uint64, + collateralPerByte = collateralPerByte, + nodes = ecNodes, + tolerance = ecTolerance, + ) + ).get + + check eventually( + await client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000 + ) + let purchase = (await client.getPurchase(id)).get + check purchase.error == none string + + let unixNow = getTime().toUnix() + let until = unixNow + 1.SecondsSince1970 + + let response = await host.patchAvailabilityRaw( + availabilityId = availability.id, until = until.some + ) + + check: + response.status == 422 + (await response.body) == + "Until parameter must be greater or equal to the longest currently hosted slot" diff --git a/tests/integration/testupdownload.nim b/tests/integration/testupdownload.nim index 05d3a496..24e6039c 100644 --- a/tests/integration/testupdownload.nim +++ b/tests/integration/testupdownload.nim @@ -9,11 +9,11 @@ twonodessuite "Uploads and downloads": let content1 = "some file contents" let content2 = "some other contents" - let cid1 = client1.upload(content1).get - let cid2 = client2.upload(content2).get + let cid1 = (await client1.upload(content1)).get + let cid2 = (await client2.upload(content2)).get - let resp1 = client1.download(cid1, local = true).get - let resp2 = client2.download(cid2, local = true).get + let resp1 = (await client1.download(cid1, local = true)).get + let resp2 = (await client2.download(cid2, local = true)).get check: content1 == resp1 @@ -23,11 +23,11 @@ twonodessuite "Uploads and downloads": let content1 = "some file contents" let content2 = "some other contents" - let cid1 = client1.upload(content1).get - let cid2 = client2.upload(content2).get + let cid1 = (await client1.upload(content1)).get + let cid2 = (await client2.upload(content2)).get - let resp2 = client1.download(cid2, local = false).get - let resp1 = client2.download(cid1, local = false).get + let resp2 = (await client1.download(cid2, local = false)).get + let resp1 = (await client2.download(cid1, local = false)).get check: content1 == resp1 @@ -35,11 +35,12 @@ twonodessuite "Uploads and downloads": test "node fails retrieving non-existing local file", twoNodesConfig: let content1 = "some file contents" - let cid1 = client1.upload(content1).get # upload to first node - let resp2 = client2.download(cid1, local = true) # try retrieving from second node + let cid1 = (await client1.upload(content1)).get # upload to first node + let resp2 = + await client2.download(cid1, local = true) # try retrieving from second node check: - resp2.error.msg == "404 Not Found" + resp2.error.msg == "404" proc checkRestContent(cid: Cid, content: ?!string) = let c = content.tryGet() @@ -67,26 +68,28 @@ twonodessuite "Uploads and downloads": test "node allows downloading only manifest", twoNodesConfig: let content1 = "some file contents" - let cid1 = client1.upload(content1).get + let cid1 = (await client1.upload(content1)).get - let resp2 = client1.downloadManifestOnly(cid1) + let resp2 = await client1.downloadManifestOnly(cid1) checkRestContent(cid1, resp2) test "node allows downloading content without stream", twoNodesConfig: - let content1 = "some file contents" - let cid1 = client1.upload(content1).get + let + content1 = "some file contents" + cid1 = (await client1.upload(content1)).get + resp1 = await client2.downloadNoStream(cid1) - let resp1 = client2.downloadNoStream(cid1) checkRestContent(cid1, resp1) - let resp2 = client2.download(cid1, local = true).get + + let resp2 = (await client2.download(cid1, local = true)).get check: content1 == resp2 test "reliable transfer test", twoNodesConfig: proc transferTest(a: CodexClient, b: CodexClient) {.async.} = let data = await RandomChunker.example(blocks = 8) - let cid = a.upload(data).get - let response = b.download(cid).get + let cid = (await a.upload(data)).get + let response = (await b.download(cid)).get check: @response.mapIt(it.byte) == data diff --git a/tests/integration/testvalidator.nim b/tests/integration/testvalidator.nim index 8b7fbc5b..0d1a50e8 100644 --- a/tests/integration/testvalidator.nim +++ b/tests/integration/testvalidator.nim @@ -37,7 +37,7 @@ marketplacesuite "Validation": const blocks = 8 const ecNodes = 3 const ecTolerance = 1 - const proofProbability = 1 + const proofProbability = 1.u256 const collateralPerByte = 1.u256 const minPricePerBytePerSecond = 1.u256 @@ -99,11 +99,14 @@ marketplacesuite "Validation": let data = await RandomChunker.example(blocks = blocks) let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) - createAvailabilities( - datasetSize, duration, collateralPerByte, minPricePerBytePerSecond + await createAvailabilities( + datasetSize.truncate(uint64), + duration, + collateralPerByte, + minPricePerBytePerSecond, ) - let cid = client0.upload(data).get + let cid = (await client0.upload(data)).get let purchaseId = await client0.requestStorage( cid, expiry = expiry, @@ -112,12 +115,12 @@ marketplacesuite "Validation": tolerance = ecTolerance, proofProbability = proofProbability, ) - let requestId = client0.requestId(purchaseId).get + let requestId = (await client0.requestId(purchaseId)).get debug "validation suite", purchaseId = purchaseId.toHex, requestId = requestId if not eventuallyS( - client0.purchaseStateIs(purchaseId, "started"), + await client0.purchaseStateIs(purchaseId, "started"), timeout = (expiry + 60).int, step = 5, ): @@ -166,11 +169,14 @@ marketplacesuite "Validation": let data = await RandomChunker.example(blocks = blocks) let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) - createAvailabilities( - datasetSize, duration, collateralPerByte, minPricePerBytePerSecond + await createAvailabilities( + datasetSize.truncate(uint64), + duration, + collateralPerByte, + minPricePerBytePerSecond, ) - let cid = client0.upload(data).get + let cid = (await client0.upload(data)).get let purchaseId = await client0.requestStorage( cid, expiry = expiry, @@ -179,12 +185,12 @@ marketplacesuite "Validation": tolerance = ecTolerance, proofProbability = proofProbability, ) - let requestId = client0.requestId(purchaseId).get + let requestId = (await client0.requestId(purchaseId)).get debug "validation suite", purchaseId = purchaseId.toHex, requestId = requestId if not eventuallyS( - client0.purchaseStateIs(purchaseId, "started"), + await client0.purchaseStateIs(purchaseId, "started"), timeout = (expiry + 60).int, step = 5, ): diff --git a/tests/integration/twonodes.nim b/tests/integration/twonodes.nim index 5666690e..eeceb20d 100644 --- a/tests/integration/twonodes.nim +++ b/tests/integration/twonodes.nim @@ -1,4 +1,3 @@ -import std/os import std/macros import pkg/questionable import ./multinodes diff --git a/tests/testIntegration.nim b/tests/testIntegration.nim index 9a2dc472..152d22dd 100644 --- a/tests/testIntegration.nim +++ b/tests/testIntegration.nim @@ -1,5 +1,6 @@ import ./integration/testcli import ./integration/testrestapi +import ./integration/testrestapivalidation import ./integration/testupdownload import ./integration/testsales import ./integration/testpurchasing diff --git a/tests/testTaiko.nim b/tests/testTaiko.nim index 8036e8a3..b1555bfb 100644 --- a/tests/testTaiko.nim +++ b/tests/testTaiko.nim @@ -24,7 +24,7 @@ suite "Taiko L2 Integration Tests": ) node1.waitUntilStarted() - let bootstrap = (!node1.client.info())["spr"].getStr() + let bootstrap = (!(await node1.client.info()))["spr"].getStr() node2 = startNode( [ diff --git a/vendor/asynctest b/vendor/asynctest index 5154c0d7..73c08f77 160000 --- a/vendor/asynctest +++ b/vendor/asynctest @@ -1 +1 @@ -Subproject commit 5154c0d79dd8bb086ab418cc659e923330ac24f2 +Subproject commit 73c08f77afc5cc2a5628d00f915b97bf72f70c9b diff --git a/vendor/codex-contracts-eth b/vendor/codex-contracts-eth index e74d3397..c00152e6 160000 --- a/vendor/codex-contracts-eth +++ b/vendor/codex-contracts-eth @@ -1 +1 @@ -Subproject commit e74d3397a133eaf1eb95d9ce59f56747a7c8c30b +Subproject commit c00152e6213a3ad4e6760a670213bfae22b0aabf diff --git a/vendor/constantine b/vendor/constantine index bc3845aa..8d6a6a38 160000 --- a/vendor/constantine +++ b/vendor/constantine @@ -1 +1 @@ -Subproject commit bc3845aa492b52f7fef047503b1592e830d1a774 +Subproject commit 8d6a6a38b90fb8ee3ec2230839773e69aab36d80 diff --git a/vendor/nim-codex-dht b/vendor/nim-codex-dht index 4bd3a39e..f6eef1ac 160000 --- a/vendor/nim-codex-dht +++ b/vendor/nim-codex-dht @@ -1 +1 @@ -Subproject commit 4bd3a39e0030f8ee269ef217344b6b59ec2be6dc +Subproject commit f6eef1ac95c70053b2518f1e3909c909ed8701a6 diff --git a/vendor/nim-datastore b/vendor/nim-datastore index d67860ad..5778e373 160000 --- a/vendor/nim-datastore +++ b/vendor/nim-datastore @@ -1 +1 @@ -Subproject commit d67860add63fd23cdacde1d3da8f4739c2660c2d +Subproject commit 5778e373fa97286f389e0aef61f1e8f30a934dab diff --git a/vendor/nim-ethers b/vendor/nim-ethers index 1cfccb96..5d07b5db 160000 --- a/vendor/nim-ethers +++ b/vendor/nim-ethers @@ -1 +1 @@ -Subproject commit 1cfccb9695fa47860bf7ef3d75da9019096a3933 +Subproject commit 5d07b5dbcf584b020c732e84cc8b7229ab3e1083 diff --git a/vendor/nim-leopard b/vendor/nim-leopard index 3e09d811..7506b90f 160000 --- a/vendor/nim-leopard +++ b/vendor/nim-leopard @@ -1 +1 @@ -Subproject commit 3e09d8113f874f3584c3fe93818541b2ff9fb9c3 +Subproject commit 7506b90f9c650c02b96bf525d4fd1bd4942a495f diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p index 036e110a..c08d8073 160000 --- a/vendor/nim-libp2p +++ b/vendor/nim-libp2p @@ -1 +1 @@ -Subproject commit 036e110a6080fba1a1662c58cfd8c21f9a548021 +Subproject commit c08d80734989b028b3d1705f2188d783a343aac0 diff --git a/vendor/nim-ngtcp2 b/vendor/nim-ngtcp2 new file mode 160000 index 00000000..6834f475 --- /dev/null +++ b/vendor/nim-ngtcp2 @@ -0,0 +1 @@ +Subproject commit 6834f4756b6af58356ac9c4fef3d71db3c3ae5fe diff --git a/vendor/nim-quic b/vendor/nim-quic new file mode 160000 index 00000000..ddcb31ff --- /dev/null +++ b/vendor/nim-quic @@ -0,0 +1 @@ +Subproject commit ddcb31ffb74b5460ab37fd13547eca90594248bc diff --git a/vendor/nim-serde b/vendor/nim-serde index 69a7a011..5ced7c88 160000 --- a/vendor/nim-serde +++ b/vendor/nim-serde @@ -1 +1 @@ -Subproject commit 69a7a0111addaa4aad885dd4bd7b5ee4684a06de +Subproject commit 5ced7c88b97d99c582285ce796957fb71fd42434