From 8880ad9cd43f29609a5a4e7742fb01b8eb7d22f1 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Tue, 11 Feb 2025 11:47:25 +0100 Subject: [PATCH 01/40] fix linting in "codex/blockexchange/engine/engine.nim" (#1107) --- codex/blockexchange/engine/engine.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codex/blockexchange/engine/engine.nim b/codex/blockexchange/engine/engine.nim index 87335b0a..d30f88d9 100644 --- a/codex/blockexchange/engine/engine.nim +++ b/codex/blockexchange/engine/engine.nim @@ -398,7 +398,7 @@ proc wantListHandler*(b: BlockExcEngine, peer: PeerId, wantList: WantList) {.asy have = await e.address in b.localStore price = @(b.pricing.get(Pricing(price: 0.u256)).price.toBytesBE) - case e.wantType: + case e.wantType of WantType.WantHave: if have: presence.add( From 11888e78d79d62e00b5bb7be5015727d0be41af1 Mon Sep 17 00:00:00 2001 From: Marcin Czenko Date: Tue, 11 Feb 2025 16:16:45 +0100 Subject: [PATCH 02/40] deploy openapi spec only when tagged (#1106) --- .github/workflows/docs.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 50b14d05..4232ff0f 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -2,17 +2,17 @@ name: OpenAPI on: push: - branches: - - 'master' + tags: + - "v*.*.*" paths: - - 'openapi.yaml' - - '.github/workflows/docs.yml' + - "openapi.yaml" + - ".github/workflows/docs.yml" pull_request: branches: - - '**' + - "**" paths: - - 'openapi.yaml' - - '.github/workflows/docs.yml' + - "openapi.yaml" + - ".github/workflows/docs.yml" # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages permissions: @@ -40,7 +40,7 @@ jobs: deploy: name: Deploy runs-on: ubuntu-latest - if: github.ref == 'refs/heads/master' + if: startsWith(github.ref, 'refs/tags/') steps: - name: Checkout uses: actions/checkout@v4 From bbe1f09cd76a41eb1b202e81dd59fbbb67650c3a Mon Sep 17 00:00:00 2001 From: Giuliano Mega Date: Tue, 11 Feb 2025 16:00:05 -0300 Subject: [PATCH 03/40] Purging of local datasets (#1103) * feat(codex-node): add dataset deletion API to Codex node * feat(api): add deletion of local datasets to API * fix: logging, remove garbage, drop some CORS headers from DELETE request * fix: change empty response return code to 204 instead of 200 * fix: add time-based idling to avoid locking up the node during deletes, fix API status code * fix: uncomment commented tests committed by accident * fix: return correct code when missing CID is a Manifest CID; add back CORS headers * fix: remove lingering echo --- build.nims | 34 ++++++++++++------ codex/node.nim | 59 +++++++++++++++++++++++++++++++ codex/rest/api.nim | 30 ++++++++++++++++ tests/codex/examples.nim | 10 ++++++ tests/codex/helpers.nim | 27 +++++++------- tests/codex/node/testnode.nim | 26 ++++++++++++++ tests/codex/slots/helpers.nim | 13 +++++-- tests/integration/codexclient.nim | 13 +++++++ tests/integration/testrestapi.nim | 26 +++++++++++++- tests/integration/twonodes.nim | 1 - 10 files changed, 211 insertions(+), 28 deletions(-) diff --git a/build.nims b/build.nims index aa090e71..baf21e03 100644 --- a/build.nims +++ b/build.nims @@ -4,7 +4,6 @@ import std/os except commandLineParams ### Helper functions proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") = - if not dirExists "build": mkDir "build" @@ -14,13 +13,15 @@ proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") = for param in commandLineParams(): extra_params &= " " & param else: - for i in 2..= runtimeQuota: + await idleAsync() + lastIdle = getTime() + + if err =? (await store.delBlock(manifest.treeCid, i)).errorOption: + # The contract for delBlock is fuzzy, but we assume that if the block is + # simply missing we won't get an error. This is a best effort operation and + # can simply be retried. + error "Failed to delete block within dataset", index = i, err = err.msg + return failure(err) + + if err =? (await store.delBlock(cid)).errorOption: + error "Error deleting manifest block", err = err.msg + + success() + +proc delete*( + self: CodexNodeRef, cid: Cid +): Future[?!void] {.async: (raises: [CatchableError]).} = + ## Deletes a whole dataset, if Cid is a Manifest Cid, or a single block, if Cid a block Cid, + ## from the underlying block store. This is a strictly local operation. + ## + ## Missing blocks in dataset deletes are ignored. + ## + + without isManifest =? cid.isManifest, err: + trace "Bad content type for CID:", cid = cid, err = err.msg + return failure(err) + + if not isManifest: + return await self.deleteSingleBlock(cid) + + await self.deleteEntireDataset(cid) + proc store*( self: CodexNodeRef, stream: LPStream, diff --git a/codex/rest/api.nim b/codex/rest/api.nim index a64d26cf..f64a6f20 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -238,6 +238,15 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute let json = await formatManifestBlocks(node) return RestApiResponse.response($json, contentType = "application/json") + router.api(MethodOptions, "/api/codex/v1/data/{cid}") do( + cid: Cid, resp: HttpResponseRef + ) -> RestApiResponse: + if corsOrigin =? allowedOrigin: + resp.setCorsHeaders("GET,DELETE", corsOrigin) + + resp.status = Http204 + await resp.sendBody("") + router.api(MethodGet, "/api/codex/v1/data/{cid}") do( cid: Cid, resp: HttpResponseRef ) -> RestApiResponse: @@ -254,6 +263,27 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute await node.retrieveCid(cid.get(), local = true, resp = resp) + router.api(MethodDelete, "/api/codex/v1/data/{cid}") do( + cid: Cid, resp: HttpResponseRef + ) -> RestApiResponse: + ## Deletes either a single block or an entire dataset + ## from the local node. Does nothing and returns 200 + ## if the dataset is not locally available. + ## + var headers = buildCorsHeaders("DELETE", allowedOrigin) + + if cid.isErr: + return RestApiResponse.error(Http400, $cid.error(), headers = headers) + + if err =? (await node.delete(cid.get())).errorOption: + return RestApiResponse.error(Http500, err.msg, headers = headers) + + if corsOrigin =? allowedOrigin: + resp.setCorsHeaders("DELETE", corsOrigin) + + resp.status = Http204 + await resp.sendBody("") + router.api(MethodPost, "/api/codex/v1/data/{cid}/network") do( cid: Cid, resp: HttpResponseRef ) -> RestApiResponse: diff --git a/tests/codex/examples.nim b/tests/codex/examples.nim index 69a85db8..6f15182f 100644 --- a/tests/codex/examples.nim +++ b/tests/codex/examples.nim @@ -8,6 +8,7 @@ import pkg/codex/stores import pkg/codex/blocktype as bt import pkg/codex/sales import pkg/codex/merkletree +import pkg/codex/manifest import ../examples export examples @@ -51,6 +52,15 @@ proc example*(_: type BlockExcPeerCtx): BlockExcPeerCtx = proc example*(_: type Cid): Cid = bt.Block.example.cid +proc example*(_: type Manifest): Manifest = + Manifest.new( + treeCid = Cid.example, + blockSize = 256.NBytes, + datasetSize = 4096.NBytes, + filename = "example.txt".some, + mimetype = "text/plain".some, + ) + proc example*(_: type MultiHash, mcodec = Sha256HashCodec): MultiHash = let bytes = newSeqWith(256, rand(uint8)) MultiHash.digest($mcodec, bytes).tryGet() diff --git a/tests/codex/helpers.nim b/tests/codex/helpers.nim index 6d7415d3..898dd16e 100644 --- a/tests/codex/helpers.nim +++ b/tests/codex/helpers.nim @@ -85,30 +85,31 @@ proc makeWantList*( ) proc storeDataGetManifest*( - store: BlockStore, chunker: Chunker + store: BlockStore, blocks: seq[Block] ): Future[Manifest] {.async.} = - var cids = newSeq[Cid]() - - while (let chunk = await chunker.getBytes(); chunk.len > 0): - let blk = Block.new(chunk).tryGet() - cids.add(blk.cid) + for blk in blocks: (await store.putBlock(blk)).tryGet() let - tree = CodexTree.init(cids).tryGet() + (manifest, tree) = makeManifestAndTree(blocks).tryGet() treeCid = tree.rootCid.tryGet() - manifest = Manifest.new( - treeCid = treeCid, - blockSize = NBytes(chunker.chunkSize), - datasetSize = NBytes(chunker.offset), - ) for i in 0 ..< tree.leavesCount: let proof = tree.getProof(i).tryGet() - (await store.putCidAndProof(treeCid, i, cids[i], proof)).tryGet() + (await store.putCidAndProof(treeCid, i, blocks[i].cid, proof)).tryGet() return manifest +proc storeDataGetManifest*( + store: BlockStore, chunker: Chunker +): Future[Manifest] {.async.} = + var blocks = newSeq[Block]() + + while (let chunk = await chunker.getBytes(); chunk.len > 0): + blocks.add(Block.new(chunk).tryGet()) + + return await storeDataGetManifest(store, blocks) + proc makeRandomBlocks*( datasetSize: int, blockSize: NBytes ): Future[seq[Block]] {.async.} = diff --git a/tests/codex/node/testnode.nim b/tests/codex/node/testnode.nim index e4a9d1f4..b9450f40 100644 --- a/tests/codex/node/testnode.nim +++ b/tests/codex/node/testnode.nim @@ -37,6 +37,7 @@ import ../examples import ../helpers import ../helpers/mockmarket import ../helpers/mockclock +import ../slots/helpers import ./helpers @@ -166,3 +167,28 @@ asyncchecksuite "Test Node - Basic": (await verifiableBlock.cid in localStore) == true request.content.cid == $verifiableBlock.cid request.content.merkleRoot == builder.verifyRoot.get.toBytes + + test "Should delete a single block": + let randomBlock = bt.Block.new("Random block".toBytes).tryGet() + (await localStore.putBlock(randomBlock)).tryGet() + check (await randomBlock.cid in localStore) == true + + (await node.delete(randomBlock.cid)).tryGet() + check (await randomBlock.cid in localStore) == false + + test "Should delete an entire dataset": + let + blocks = await makeRandomBlocks(datasetSize = 2048, blockSize = 256'nb) + manifest = await storeDataGetManifest(localStore, blocks) + manifestBlock = (await store.storeManifest(manifest)).tryGet() + manifestCid = manifestBlock.cid + + check await manifestCid in localStore + for blk in blocks: + check await blk.cid in localStore + + (await node.delete(manifestCid)).tryGet() + + check not await manifestCid in localStore + for blk in blocks: + check not (await blk.cid in localStore) diff --git a/tests/codex/slots/helpers.nim b/tests/codex/slots/helpers.nim index 03d87d12..fced1f1c 100644 --- a/tests/codex/slots/helpers.nim +++ b/tests/codex/slots/helpers.nim @@ -15,9 +15,7 @@ import pkg/codex/rng import ../helpers -proc storeManifest*( - store: BlockStore, manifest: Manifest -): Future[?!bt.Block] {.async.} = +proc makeManifestBlock*(manifest: Manifest): ?!bt.Block = without encodedVerifiable =? manifest.encode(), err: trace "Unable to encode manifest" return failure(err) @@ -26,6 +24,15 @@ proc storeManifest*( trace "Unable to create block from manifest" return failure(error) + success blk + +proc storeManifest*( + store: BlockStore, manifest: Manifest +): Future[?!bt.Block] {.async.} = + without blk =? makeManifestBlock(manifest), err: + trace "Unable to create manifest block", err = err.msg + return failure(err) + if err =? (await store.putBlock(blk)).errorOption: trace "Unable to store manifest block", cid = blk.cid, err = err.msg return failure(err) diff --git a/tests/integration/codexclient.nim b/tests/integration/codexclient.nim index d1191fb9..992b50d0 100644 --- a/tests/integration/codexclient.nim +++ b/tests/integration/codexclient.nim @@ -86,6 +86,16 @@ proc downloadBytes*( success bytes +proc delete*(client: CodexClient, cid: Cid): ?!void = + let + url = client.baseurl & "/data/" & $cid + response = client.http.delete(url) + + if response.status != "204 No Content": + return failure(response.status) + + success() + proc list*(client: CodexClient): ?!RestContentList = let url = client.baseurl & "/data" let response = client.http.get(url) @@ -284,3 +294,6 @@ proc downloadRaw*(client: CodexClient, cid: string, local = false): Response = client.baseurl & "/data/" & cid & (if local: "" else: "/network/stream"), httpMethod = HttpGet, ) + +proc deleteRaw*(client: CodexClient, cid: string): Response = + return client.http.request(client.baseurl & "/data/" & cid, httpMethod = HttpDelete) diff --git a/tests/integration/testrestapi.nim b/tests/integration/testrestapi.nim index 8cbe9817..557efad2 100644 --- a/tests/integration/testrestapi.nim +++ b/tests/integration/testrestapi.nim @@ -1,10 +1,13 @@ import std/httpclient import std/sequtils import std/strformat -from pkg/libp2p import `==` +from pkg/libp2p import `==`, `$`, Cid import pkg/codex/units +import pkg/codex/manifest import ./twonodes import ../examples +import ../codex/examples +import ../codex/slots/helpers import json twonodessuite "REST API": @@ -263,3 +266,24 @@ twonodessuite "REST API": check localResponse.headers.hasKey("Content-Disposition") == true check localResponse.headers["Content-Disposition"] == "attachment; filename=\"example.txt\"" + + test "should delete a dataset when requested", twoNodesConfig: + let cid = client1.upload("some file contents").get + + var response = client1.downloadRaw($cid, local = true) + check response.body == "some file contents" + + client1.delete(cid).get + + response = client1.downloadRaw($cid, local = true) + check response.status == "404 Not Found" + + test "should return 200 when attempting delete of non-existing block", twoNodesConfig: + let response = client1.deleteRaw($(Cid.example())) + check response.status == "204 No Content" + + test "should return 200 when attempting delete of non-existing dataset", + twoNodesConfig: + let cid = Manifest.example().makeManifestBlock().get.cid + let response = client1.deleteRaw($cid) + check response.status == "204 No Content" diff --git a/tests/integration/twonodes.nim b/tests/integration/twonodes.nim index 5666690e..eeceb20d 100644 --- a/tests/integration/twonodes.nim +++ b/tests/integration/twonodes.nim @@ -1,4 +1,3 @@ -import std/os import std/macros import pkg/questionable import ./multinodes From 20f6fef7ab25e7634b9541e2e9d1fa63fa083f1b Mon Sep 17 00:00:00 2001 From: Slava <20563034+veaceslavdoina@users.noreply.github.com> Date: Tue, 11 Feb 2025 23:49:37 +0200 Subject: [PATCH 04/40] fix: use ubuntu-24.04 runners for docker workflows (#1102) Co-authored-by: Giuliano Mega --- .github/workflows/docker-reusable.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker-reusable.yml b/.github/workflows/docker-reusable.yml index f0e46d95..7d937f78 100644 --- a/.github/workflows/docker-reusable.yml +++ b/.github/workflows/docker-reusable.yml @@ -94,11 +94,11 @@ jobs: - target: os: linux arch: amd64 - builder: ubuntu-22.04 + builder: ubuntu-24.04 - target: os: linux arch: arm64 - builder: ubuntu-22.04-arm + builder: ubuntu-24.04-arm name: Build ${{ matrix.target.os }}/${{ matrix.target.arch }} runs-on: ${{ matrix.builder }} From 45e97513a7a14ac56689a7c508d5af145737054d Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Wed, 12 Feb 2025 04:48:58 -0600 Subject: [PATCH 05/40] remove uploadedAt from manifest (#1091) * remove uploadedAt from manifest * fix test --- codex/manifest/coders.nim | 11 ----------- codex/manifest/manifest.nim | 15 +-------------- codex/node.nim | 1 - codex/slots/builder/builder.nim | 2 +- openapi.yaml | 6 ------ tests/integration/testrestapi.nim | 4 +--- 6 files changed, 3 insertions(+), 36 deletions(-) diff --git a/codex/manifest/coders.nim b/codex/manifest/coders.nim index 0c461e45..30e0c7ca 100644 --- a/codex/manifest/coders.nim +++ b/codex/manifest/coders.nim @@ -63,7 +63,6 @@ proc encode*(manifest: Manifest): ?!seq[byte] = # optional ErasureInfo erasure = 7; # erasure coding info # optional filename: ?string = 8; # original filename # optional mimetype: ?string = 9; # original mimetype - # optional uploadedAt: ?int64 = 10; # original uploadedAt # } # ``` # @@ -102,9 +101,6 @@ proc encode*(manifest: Manifest): ?!seq[byte] = if manifest.mimetype.isSome: header.write(9, manifest.mimetype.get()) - if manifest.uploadedAt.isSome: - header.write(10, manifest.uploadedAt.get().uint64) - pbNode.write(1, header) # set the treeCid as the data field pbNode.finish() @@ -135,7 +131,6 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest = verifiableStrategy: uint32 filename: string mimetype: string - uploadedAt: uint64 # Decode `Header` message if pbNode.getField(1, pbHeader).isErr: @@ -169,9 +164,6 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest = if pbHeader.getField(9, mimetype).isErr: return failure("Unable to decode `mimetype` from manifest!") - if pbHeader.getField(10, uploadedAt).isErr: - return failure("Unable to decode `uploadedAt` from manifest!") - let protected = pbErasureInfo.buffer.len > 0 var verifiable = false if protected: @@ -211,7 +203,6 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest = var filenameOption = if filename.len == 0: string.none else: filename.some var mimetypeOption = if mimetype.len == 0: string.none else: mimetype.some - var uploadedAtOption = if uploadedAt == 0: int64.none else: uploadedAt.int64.some let self = if protected: @@ -229,7 +220,6 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest = strategy = StrategyType(protectedStrategy), filename = filenameOption, mimetype = mimetypeOption, - uploadedAt = uploadedAtOption, ) else: Manifest.new( @@ -241,7 +231,6 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest = codec = codec.MultiCodec, filename = filenameOption, mimetype = mimetypeOption, - uploadedAt = uploadedAtOption, ) ?self.verify() diff --git a/codex/manifest/manifest.nim b/codex/manifest/manifest.nim index 6e0d1b80..0bc51dfc 100644 --- a/codex/manifest/manifest.nim +++ b/codex/manifest/manifest.nim @@ -38,7 +38,6 @@ type Manifest* = ref object of RootObj version: CidVersion # Cid version filename {.serialize.}: ?string # The filename of the content uploaded (optional) mimetype {.serialize.}: ?string # The mimetype of the content uploaded (optional) - uploadedAt {.serialize.}: ?int64 # The UTC creation timestamp in seconds case protected {.serialize.}: bool # Protected datasets have erasure coded info of true: ecK: int # Number of blocks to encode @@ -131,8 +130,6 @@ func filename*(self: Manifest): ?string = func mimetype*(self: Manifest): ?string = self.mimetype -func uploadedAt*(self: Manifest): ?int64 = - self.uploadedAt ############################################################ # Operations on block list ############################################################ @@ -172,7 +169,7 @@ func `==`*(a, b: Manifest): bool = (a.treeCid == b.treeCid) and (a.datasetSize == b.datasetSize) and (a.blockSize == b.blockSize) and (a.version == b.version) and (a.hcodec == b.hcodec) and (a.codec == b.codec) and (a.protected == b.protected) and (a.filename == b.filename) and - (a.mimetype == b.mimetype) and (a.uploadedAt == b.uploadedAt) and ( + (a.mimetype == b.mimetype) and ( if a.protected: (a.ecK == b.ecK) and (a.ecM == b.ecM) and (a.originalTreeCid == b.originalTreeCid) and (a.originalDatasetSize == b.originalDatasetSize) and @@ -202,9 +199,6 @@ func `$`*(self: Manifest): string = if self.mimetype.isSome: result &= ", mimetype: " & $self.mimetype - if self.uploadedAt.isSome: - result &= ", uploadedAt: " & $self.uploadedAt - result &= ( if self.protected: ", ecK: " & $self.ecK & ", ecM: " & $self.ecM & ", originalTreeCid: " & @@ -236,7 +230,6 @@ func new*( protected = false, filename: ?string = string.none, mimetype: ?string = string.none, - uploadedAt: ?int64 = int64.none, ): Manifest = T( treeCid: treeCid, @@ -248,7 +241,6 @@ func new*( protected: protected, filename: filename, mimetype: mimetype, - uploadedAt: uploadedAt, ) func new*( @@ -278,7 +270,6 @@ func new*( protectedStrategy: strategy, filename: manifest.filename, mimetype: manifest.mimetype, - uploadedAt: manifest.uploadedAt, ) func new*(T: type Manifest, manifest: Manifest): Manifest = @@ -296,7 +287,6 @@ func new*(T: type Manifest, manifest: Manifest): Manifest = protected: false, filename: manifest.filename, mimetype: manifest.mimetype, - uploadedAt: manifest.uploadedAt, ) func new*( @@ -314,7 +304,6 @@ func new*( strategy = SteppedStrategy, filename: ?string = string.none, mimetype: ?string = string.none, - uploadedAt: ?int64 = int64.none, ): Manifest = Manifest( treeCid: treeCid, @@ -331,7 +320,6 @@ func new*( protectedStrategy: strategy, filename: filename, mimetype: mimetype, - uploadedAt: uploadedAt, ) func new*( @@ -374,7 +362,6 @@ func new*( verifiableStrategy: strategy, filename: manifest.filename, mimetype: manifest.mimetype, - uploadedAt: manifest.uploadedAt, ) func new*(T: type Manifest, data: openArray[byte]): ?!Manifest = diff --git a/codex/node.nim b/codex/node.nim index f43a4b55..b90d6a9e 100644 --- a/codex/node.nim +++ b/codex/node.nim @@ -391,7 +391,6 @@ proc store*( codec = dataCodec, filename = filename, mimetype = mimetype, - uploadedAt = now().utc.toTime.toUnix.some, ) without manifestBlk =? await self.storeManifest(manifest), err: diff --git a/codex/slots/builder/builder.nim b/codex/slots/builder/builder.nim index 30332f1c..74597ff1 100644 --- a/codex/slots/builder/builder.nim +++ b/codex/slots/builder/builder.nim @@ -189,7 +189,7 @@ proc getCellHashes*[T, H]( blkIdx = blkIdx pos = i - trace "Getting block CID for tree at index" + trace "Getting block CID for tree at index", index = blkIdx without (_, tree) =? (await self.buildBlockTree(blkIdx, i)) and digest =? tree.root, err: error "Failed to get block CID for tree at index", err = err.msg diff --git a/openapi.yaml b/openapi.yaml index 9d401e8f..70da398b 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -371,12 +371,6 @@ components: nullable: true description: "The original mimetype of the uploaded content (optional)" example: image/png - uploadedAt: - type: integer - format: int64 - nullable: true - description: "The UTC upload timestamp in seconds" - example: 1729244192 Space: type: object diff --git a/tests/integration/testrestapi.nim b/tests/integration/testrestapi.nim index 557efad2..13b06500 100644 --- a/tests/integration/testrestapi.nim +++ b/tests/integration/testrestapi.nim @@ -38,7 +38,7 @@ twonodessuite "REST API": check: space.totalBlocks == 2 space.quotaMaxBytes == 8589934592.NBytes - space.quotaUsedBytes == 65598.NBytes + space.quotaUsedBytes == 65592.NBytes space.quotaReservedBytes == 12.NBytes test "node lists local files", twoNodesConfig: @@ -232,8 +232,6 @@ twonodessuite "REST API": check manifest["filename"].getStr() == "example.txt" check manifest.hasKey("mimetype") == true check manifest["mimetype"].getStr() == "text/plain" - check manifest.hasKey("uploadedAt") == true - check manifest["uploadedAt"].getInt() > 0 test "node set the headers when for download", twoNodesConfig: let headers = newHttpHeaders( From c65148822ee4f0eb17fefb584196bb3754adfc08 Mon Sep 17 00:00:00 2001 From: munna0908 <88337208+munna0908@users.noreply.github.com> Date: Wed, 12 Feb 2025 23:26:26 +0530 Subject: [PATCH 06/40] feat: multithreading support for erasure coding (#1087) * implement async encode * implement async decode * cleanup code * add num-threads flag * fix tests * code cleanup * improve return types and exception handling for async proc * add validation check for numThreads flag * modify encode method * add new tests for aync encoding * modify decode method * cleanup test cases * add new cli flag for threadCount * test cleanup * add new tests * fix decodeAsync exception handling * code cleanup * chore: cosmetic changes --- codex/codex.nim | 16 +- codex/conf.nim | 31 ++++ codex/erasure/backend.nim | 8 +- codex/erasure/backends/leopard.nim | 14 +- codex/erasure/erasure.nim | 255 ++++++++++++++++++++++++++--- codex/node.nim | 17 +- codex/utils/arrayutils.nim | 25 +++ tests/codex/node/helpers.nim | 2 + tests/codex/node/testcontracts.nim | 2 +- tests/codex/node/testnode.nim | 6 +- tests/codex/testerasure.nim | 117 ++++++++++++- vendor/nim-leopard | 2 +- 12 files changed, 454 insertions(+), 41 deletions(-) create mode 100644 codex/utils/arrayutils.nim diff --git a/codex/codex.nim b/codex/codex.nim index 13985254..6dcfbaaa 100644 --- a/codex/codex.nim +++ b/codex/codex.nim @@ -11,8 +11,10 @@ import std/sequtils import std/strutils import std/os import std/tables +import std/cpuinfo import pkg/chronos +import pkg/taskpools import pkg/presto import pkg/libp2p import pkg/confutils @@ -194,7 +196,18 @@ proc new*( .withTcpTransport({ServerFlags.ReuseAddr}) .build() - var cache: CacheStore = nil + var + cache: CacheStore = nil + taskpool: Taskpool + + try: + if config.numThreads == ThreadCount(0): + taskpool = Taskpool.new(numThreads = min(countProcessors(), 16)) + else: + taskpool = Taskpool.new(numThreads = int(config.numThreads)) + info "Threadpool started", numThreads = taskpool.numThreads + except CatchableError as exc: + raiseAssert("Failure in taskpool initialization:" & exc.msg) if config.cacheSize > 0'nb: cache = CacheStore.new(cacheSize = config.cacheSize) @@ -286,6 +299,7 @@ proc new*( engine = engine, discovery = discovery, prover = prover, + taskPool = taskpool, ) restServer = RestServerRef diff --git a/codex/conf.nim b/codex/conf.nim index 6d47f8f4..ccf29a1f 100644 --- a/codex/conf.nim +++ b/codex/conf.nim @@ -53,6 +53,10 @@ export DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockMaintenanceInterval, DefaultNumberOfBlocksToMaintainPerInterval +type ThreadCount* = distinct Natural + +proc `==`*(a, b: ThreadCount): bool {.borrow.} + proc defaultDataDir*(): string = let dataDir = when defined(windows): @@ -71,6 +75,7 @@ const DefaultDataDir* = defaultDataDir() DefaultCircuitDir* = defaultDataDir() / "circuits" + DefaultThreadCount* = ThreadCount(0) type StartUpCmd* {.pure.} = enum @@ -184,6 +189,13 @@ type name: "max-peers" .}: int + numThreads* {. + desc: + "Number of worker threads (\"0\" = use as many threads as there are CPU cores available)", + defaultValue: DefaultThreadCount, + name: "num-threads" + .}: ThreadCount + agentString* {. defaultValue: "Codex", desc: "Node agent string which is used as identifier in network", @@ -482,6 +494,13 @@ proc parseCmdArg*( quit QuitFailure ma +proc parseCmdArg*(T: type ThreadCount, input: string): T {.upraises: [ValueError].} = + let count = parseInt(input) + if count != 0 and count < 2: + warn "Invalid number of threads", input = input + quit QuitFailure + ThreadCount(count) + proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T = var res: SignedPeerRecord try: @@ -579,6 +598,15 @@ proc readValue*( quit QuitFailure val = NBytes(value) +proc readValue*( + r: var TomlReader, val: var ThreadCount +) {.upraises: [SerializationError, IOError].} = + var str = r.readValue(string) + try: + val = parseCmdArg(ThreadCount, str) + except CatchableError as err: + raise newException(SerializationError, err.msg) + proc readValue*( r: var TomlReader, val: var Duration ) {.upraises: [SerializationError, IOError].} = @@ -609,6 +637,9 @@ proc completeCmdArg*(T: type NBytes, val: string): seq[string] = proc completeCmdArg*(T: type Duration, val: string): seq[string] = discard +proc completeCmdArg*(T: type ThreadCount, val: string): seq[string] = + discard + # silly chronicles, colors is a compile-time property proc stripAnsi*(v: string): string = var diff --git a/codex/erasure/backend.nim b/codex/erasure/backend.nim index a6dd8b8c..32009829 100644 --- a/codex/erasure/backend.nim +++ b/codex/erasure/backend.nim @@ -29,14 +29,18 @@ method release*(self: ErasureBackend) {.base, gcsafe.} = raiseAssert("not implemented!") method encode*( - self: EncoderBackend, buffers, parity: var openArray[seq[byte]] + self: EncoderBackend, + buffers, parity: ptr UncheckedArray[ptr UncheckedArray[byte]], + dataLen, parityLen: int, ): Result[void, cstring] {.base, gcsafe.} = ## encode buffers using a backend ## raiseAssert("not implemented!") method decode*( - self: DecoderBackend, buffers, parity, recovered: var openArray[seq[byte]] + self: DecoderBackend, + buffers, parity, recovered: ptr UncheckedArray[ptr UncheckedArray[byte]], + dataLen, parityLen, recoveredLen: int, ): Result[void, cstring] {.base, gcsafe.} = ## decode buffers using a backend ## diff --git a/codex/erasure/backends/leopard.nim b/codex/erasure/backends/leopard.nim index c9f9db40..ae599f12 100644 --- a/codex/erasure/backends/leopard.nim +++ b/codex/erasure/backends/leopard.nim @@ -22,11 +22,13 @@ type decoder*: Option[LeoDecoder] method encode*( - self: LeoEncoderBackend, data, parity: var openArray[seq[byte]] + self: LeoEncoderBackend, + data, parity: ptr UncheckedArray[ptr UncheckedArray[byte]], + dataLen, parityLen: int, ): Result[void, cstring] = ## Encode data using Leopard backend - if parity.len == 0: + if parityLen == 0: return ok() var encoder = @@ -36,10 +38,12 @@ method encode*( else: self.encoder.get() - encoder.encode(data, parity) + encoder.encode(data, parity, dataLen, parityLen) method decode*( - self: LeoDecoderBackend, data, parity, recovered: var openArray[seq[byte]] + self: LeoDecoderBackend, + data, parity, recovered: ptr UncheckedArray[ptr UncheckedArray[byte]], + dataLen, parityLen, recoveredLen: int, ): Result[void, cstring] = ## Decode data using given Leopard backend @@ -50,7 +54,7 @@ method decode*( else: self.decoder.get() - decoder.decode(data, parity, recovered) + decoder.decode(data, parity, recovered, dataLen, parityLen, recoveredLen) method release*(self: LeoEncoderBackend) = if self.encoder.isSome: diff --git a/codex/erasure/erasure.nim b/codex/erasure/erasure.nim index aacd187a..107f85bc 100644 --- a/codex/erasure/erasure.nim +++ b/codex/erasure/erasure.nim @@ -12,12 +12,14 @@ import pkg/upraises push: {.upraises: [].} -import std/sequtils -import std/sugar +import std/[sugar, atomics, sequtils] import pkg/chronos +import pkg/chronos/threadsync +import pkg/chronicles import pkg/libp2p/[multicodec, cid, multihash] import pkg/libp2p/protobuf/minprotobuf +import pkg/taskpools import ../logutils import ../manifest @@ -28,6 +30,7 @@ import ../utils import ../utils/asynciter import ../indexingstrategy import ../errors +import ../utils/arrayutils import pkg/stew/byteutils @@ -68,6 +71,7 @@ type proc(size, blocks, parity: int): DecoderBackend {.raises: [Defect], noSideEffect.} Erasure* = ref object + taskPool: Taskpool encoderProvider*: EncoderProvider decoderProvider*: DecoderProvider store*: BlockStore @@ -87,6 +91,24 @@ type # provided. minSize*: NBytes + EncodeTask = object + success: Atomic[bool] + erasure: ptr Erasure + blocks: ptr UncheckedArray[ptr UncheckedArray[byte]] + parity: ptr UncheckedArray[ptr UncheckedArray[byte]] + blockSize, blocksLen, parityLen: int + signal: ThreadSignalPtr + + DecodeTask = object + success: Atomic[bool] + erasure: ptr Erasure + blocks: ptr UncheckedArray[ptr UncheckedArray[byte]] + parity: ptr UncheckedArray[ptr UncheckedArray[byte]] + recovered: ptr UncheckedArray[ptr UncheckedArray[byte]] + blockSize, blocksLen: int + parityLen, recoveredLen: int + signal: ThreadSignalPtr + func indexToPos(steps, idx, step: int): int {.inline.} = ## Convert an index to a position in the encoded ## dataset @@ -269,6 +291,81 @@ proc init*( strategy: strategy, ) +proc leopardEncodeTask(tp: Taskpool, task: ptr EncodeTask) {.gcsafe.} = + # Task suitable for running in taskpools - look, no GC! + let encoder = + task[].erasure.encoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen) + defer: + encoder.release() + discard task[].signal.fireSync() + + if ( + let res = + encoder.encode(task[].blocks, task[].parity, task[].blocksLen, task[].parityLen) + res.isErr + ): + warn "Error from leopard encoder backend!", error = $res.error + + task[].success.store(false) + else: + task[].success.store(true) + +proc encodeAsync*( + self: Erasure, + blockSize, blocksLen, parityLen: int, + data: ref seq[seq[byte]], + parity: ptr UncheckedArray[ptr UncheckedArray[byte]], +): Future[?!void] {.async: (raises: [CancelledError]).} = + without threadPtr =? ThreadSignalPtr.new(): + return failure("Unable to create thread signal") + + defer: + threadPtr.close().expect("closing once works") + + var blockData = createDoubleArray(blocksLen, blockSize) + + for i in 0 ..< data[].len: + copyMem(blockData[i], addr data[i][0], blockSize) + + defer: + freeDoubleArray(blockData, blocksLen) + + ## Create an ecode task with block data + var task = EncodeTask( + erasure: addr self, + blockSize: blockSize, + blocksLen: blocksLen, + parityLen: parityLen, + blocks: blockData, + parity: parity, + signal: threadPtr, + ) + + let t = addr task + + doAssert self.taskPool.numThreads > 1, + "Must have at least one separate thread or signal will never be fired" + self.taskPool.spawn leopardEncodeTask(self.taskPool, t) + let threadFut = threadPtr.wait() + + try: + await threadFut.join() + except CatchableError as exc: + try: + await threadFut + except AsyncError as asyncExc: + return failure(asyncExc.msg) + finally: + if exc of CancelledError: + raise (ref CancelledError) exc + else: + return failure(exc.msg) + + if not t.success.load(): + return failure("Leopard encoding failed") + + success() + proc encodeData( self: Erasure, manifest: Manifest, params: EncodingParams ): Future[?!Manifest] {.async.} = @@ -276,7 +373,6 @@ proc encodeData( ## ## `manifest` - the manifest to encode ## - logScope: steps = params.steps rounded_blocks = params.rounded @@ -286,7 +382,6 @@ proc encodeData( var cids = seq[Cid].new() - encoder = self.encoderProvider(manifest.blockSize.int, params.ecK, params.ecM) emptyBlock = newSeq[byte](manifest.blockSize.int) cids[].setLen(params.blocksCount) @@ -296,8 +391,7 @@ proc encodeData( # TODO: Don't allocate a new seq every time, allocate once and zero out var data = seq[seq[byte]].new() # number of blocks to encode - parityData = - newSeqWith[seq[byte]](params.ecM, newSeq[byte](manifest.blockSize.int)) + parity = createDoubleArray(params.ecM, manifest.blockSize.int) data[].setLen(params.ecK) # TODO: this is a tight blocking loop so we sleep here to allow @@ -311,15 +405,25 @@ proc encodeData( trace "Unable to prepare data", error = err.msg return failure(err) - trace "Erasure coding data", data = data[].len, parity = parityData.len + trace "Erasure coding data", data = data[].len - if (let res = encoder.encode(data[], parityData); res.isErr): - trace "Unable to encode manifest!", error = $res.error - return failure($res.error) + try: + if err =? ( + await self.encodeAsync( + manifest.blockSize.int, params.ecK, params.ecM, data, parity + ) + ).errorOption: + return failure(err) + except CancelledError as exc: + raise exc + finally: + freeDoubleArray(parity, params.ecM) var idx = params.rounded + step for j in 0 ..< params.ecM: - without blk =? bt.Block.new(parityData[j]), error: + var innerPtr: ptr UncheckedArray[byte] = parity[][j] + without blk =? bt.Block.new(innerPtr.toOpenArray(0, manifest.blockSize.int - 1)), + error: trace "Unable to create parity block", err = error.msg return failure(error) @@ -356,8 +460,6 @@ proc encodeData( except CatchableError as exc: trace "Erasure coding encoding error", exc = exc.msg return failure(exc) - finally: - encoder.release() proc encode*( self: Erasure, @@ -381,6 +483,101 @@ proc encode*( return success encodedManifest +proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} = + # Task suitable for running in taskpools - look, no GC! + let decoder = + task[].erasure.decoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen) + defer: + decoder.release() + + if ( + let res = decoder.decode( + task[].blocks, + task[].parity, + task[].recovered, + task[].blocksLen, + task[].parityLen, + task[].recoveredLen, + ) + res.isErr + ): + warn "Error from leopard decoder backend!", error = $res.error + task[].success.store(false) + else: + task[].success.store(true) + + discard task[].signal.fireSync() + +proc decodeAsync*( + self: Erasure, + blockSize, blocksLen, parityLen: int, + blocks, parity: ref seq[seq[byte]], + recovered: ptr UncheckedArray[ptr UncheckedArray[byte]], +): Future[?!void] {.async: (raises: [CancelledError]).} = + without threadPtr =? ThreadSignalPtr.new(): + return failure("Unable to create thread signal") + + defer: + threadPtr.close().expect("closing once works") + + var + blocksData = createDoubleArray(blocksLen, blockSize) + parityData = createDoubleArray(parityLen, blockSize) + + for i in 0 ..< blocks[].len: + if blocks[i].len > 0: + copyMem(blocksData[i], addr blocks[i][0], blockSize) + else: + blocksData[i] = nil + + for i in 0 ..< parity[].len: + if parity[i].len > 0: + copyMem(parityData[i], addr parity[i][0], blockSize) + else: + parityData[i] = nil + + defer: + freeDoubleArray(blocksData, blocksLen) + freeDoubleArray(parityData, parityLen) + + ## Create an decode task with block data + var task = DecodeTask( + erasure: addr self, + blockSize: blockSize, + blocksLen: blocksLen, + parityLen: parityLen, + recoveredLen: blocksLen, + blocks: blocksData, + parity: parityData, + recovered: recovered, + signal: threadPtr, + ) + + # Hold the task pointer until the signal is received + let t = addr task + doAssert self.taskPool.numThreads > 1, + "Must have at least one separate thread or signal will never be fired" + self.taskPool.spawn leopardDecodeTask(self.taskPool, t) + let threadFut = threadPtr.wait() + + try: + await threadFut.join() + except CatchableError as exc: + try: + await threadFut + except AsyncError as asyncExc: + return failure(asyncExc.msg) + finally: + if exc of CancelledError: + raise (ref CancelledError) exc + else: + return failure(exc.msg) + + if not t.success.load(): + return failure("Leopard encoding failed") + + success() + proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} = ## Decode a protected manifest into it's original ## manifest @@ -388,7 +585,6 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} = ## `encoded` - the encoded (protected) manifest to ## be recovered ## - logScope: steps = encoded.steps rounded_blocks = encoded.rounded @@ -411,8 +607,7 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} = var data = seq[seq[byte]].new() parityData = seq[seq[byte]].new() - recovered = - newSeqWith[seq[byte]](encoded.ecK, newSeq[byte](encoded.blockSize.int)) + recovered = createDoubleArray(encoded.ecK, encoded.blockSize.int) data[].setLen(encoded.ecK) # set len to K parityData[].setLen(encoded.ecM) # set len to M @@ -430,15 +625,26 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} = continue trace "Erasure decoding data" - - if (let err = decoder.decode(data[], parityData[], recovered); err.isErr): - trace "Unable to decode data!", err = $err.error - return failure($err.error) + try: + if err =? ( + await self.decodeAsync( + encoded.blockSize.int, encoded.ecK, encoded.ecM, data, parityData, recovered + ) + ).errorOption: + return failure(err) + except CancelledError as exc: + raise exc + finally: + freeDoubleArray(recovered, encoded.ecK) for i in 0 ..< encoded.ecK: let idx = i * encoded.steps + step if data[i].len <= 0 and not cids[idx].isEmpty: - without blk =? bt.Block.new(recovered[i]), error: + var innerPtr: ptr UncheckedArray[byte] = recovered[][i] + + without blk =? bt.Block.new( + innerPtr.toOpenArray(0, encoded.blockSize.int - 1) + ), error: trace "Unable to create block!", exc = error.msg return failure(error) @@ -490,10 +696,13 @@ proc new*( store: BlockStore, encoderProvider: EncoderProvider, decoderProvider: DecoderProvider, + taskPool: Taskpool, ): Erasure = ## Create a new Erasure instance for encoding and decoding manifests ## - Erasure( - store: store, encoderProvider: encoderProvider, decoderProvider: decoderProvider + store: store, + encoderProvider: encoderProvider, + decoderProvider: decoderProvider, + taskPool: taskPool, ) diff --git a/codex/node.nim b/codex/node.nim index b90d6a9e..2602bfe6 100644 --- a/codex/node.nim +++ b/codex/node.nim @@ -15,6 +15,7 @@ import std/strformat import std/sugar import times +import pkg/taskpools import pkg/questionable import pkg/questionable/results import pkg/chronos @@ -70,6 +71,7 @@ type contracts*: Contracts clock*: Clock storage*: Contracts + taskpool: Taskpool CodexNodeRef* = ref CodexNode @@ -235,8 +237,9 @@ proc streamEntireDataset( # Retrieve, decode and save to the local store all EС groups proc erasureJob(): Future[?!void] {.async.} = # Spawn an erasure decoding job - let erasure = - Erasure.new(self.networkStore, leoEncoderProvider, leoDecoderProvider) + let erasure = Erasure.new( + self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskpool + ) without _ =? (await erasure.decode(manifest)), error: error "Unable to erasure decode manifest", manifestCid, exc = error.msg return failure(error) @@ -461,8 +464,9 @@ proc setupRequest( return failure error # Erasure code the dataset according to provided parameters - let erasure = - Erasure.new(self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider) + let erasure = Erasure.new( + self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider, self.taskpool + ) without encoded =? (await erasure.encode(manifest, ecK, ecM)), error: trace "Unable to erasure code dataset" @@ -782,12 +786,16 @@ proc stop*(self: CodexNodeRef) {.async.} = if not self.networkStore.isNil: await self.networkStore.close + if not self.taskpool.isNil: + self.taskpool.shutdown() + proc new*( T: type CodexNodeRef, switch: Switch, networkStore: NetworkStore, engine: BlockExcEngine, discovery: Discovery, + taskpool: Taskpool, prover = Prover.none, contracts = Contracts.default, ): CodexNodeRef = @@ -800,5 +808,6 @@ proc new*( engine: engine, prover: prover, discovery: discovery, + taskPool: taskpool, contracts: contracts, ) diff --git a/codex/utils/arrayutils.nim b/codex/utils/arrayutils.nim new file mode 100644 index 00000000..c398921f --- /dev/null +++ b/codex/utils/arrayutils.nim @@ -0,0 +1,25 @@ +import std/sequtils + +proc createDoubleArray*( + outerLen, innerLen: int +): ptr UncheckedArray[ptr UncheckedArray[byte]] = + # Allocate outer array + result = cast[ptr UncheckedArray[ptr UncheckedArray[byte]]](allocShared0( + sizeof(ptr UncheckedArray[byte]) * outerLen + )) + + # Allocate each inner array + for i in 0 ..< outerLen: + result[i] = cast[ptr UncheckedArray[byte]](allocShared0(sizeof(byte) * innerLen)) + +proc freeDoubleArray*( + arr: ptr UncheckedArray[ptr UncheckedArray[byte]], outerLen: int +) = + # Free each inner array + for i in 0 ..< outerLen: + if not arr[i].isNil: + deallocShared(arr[i]) + + # Free outer array + if not arr.isNil: + deallocShared(arr) diff --git a/tests/codex/node/helpers.nim b/tests/codex/node/helpers.nim index 0d72b06b..2d1a87dc 100644 --- a/tests/codex/node/helpers.nim +++ b/tests/codex/node/helpers.nim @@ -6,6 +6,7 @@ import pkg/chronos import pkg/codex/codextypes import pkg/codex/chunker import pkg/codex/stores +import pkg/taskpools import ../../asynctest @@ -118,6 +119,7 @@ template setupAndTearDown*() {.dirty.} = engine = engine, prover = Prover.none, discovery = blockDiscovery, + taskpool = Taskpool.new(), ) teardown: diff --git a/tests/codex/node/testcontracts.nim b/tests/codex/node/testcontracts.nim index cce6d5bd..52adb5f6 100644 --- a/tests/codex/node/testcontracts.nim +++ b/tests/codex/node/testcontracts.nim @@ -75,7 +75,7 @@ asyncchecksuite "Test Node - Host contracts": let manifestBlock = bt.Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() - erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider) + erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, Taskpool.new) manifestCid = manifestBlock.cid manifestCidStr = $(manifestCid) diff --git a/tests/codex/node/testnode.nim b/tests/codex/node/testnode.nim index b9450f40..3f9a141a 100644 --- a/tests/codex/node/testnode.nim +++ b/tests/codex/node/testnode.nim @@ -12,6 +12,7 @@ import pkg/questionable/results import pkg/stint import pkg/poseidon2 import pkg/poseidon2/io +import pkg/taskpools import pkg/nitro import pkg/codexdht/discv5/protocol as discv5 @@ -67,7 +68,7 @@ asyncchecksuite "Test Node - Basic": # https://github.com/codex-storage/nim-codex/issues/699 let cstore = CountingStore.new(engine, localStore) - node = CodexNodeRef.new(switch, cstore, engine, blockDiscovery) + node = CodexNodeRef.new(switch, cstore, engine, blockDiscovery, Taskpool.new()) missingCid = Cid.init("zDvZRwzmCvtiyubW9AecnxgLnXK8GrBvpQJBDzToxmzDN6Nrc2CZ").get() @@ -138,7 +139,8 @@ asyncchecksuite "Test Node - Basic": test "Setup purchase request": let - erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider) + erasure = + Erasure.new(store, leoEncoderProvider, leoDecoderProvider, Taskpool.new()) manifest = await storeDataGetManifest(localStore, chunker) manifestBlock = bt.Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() diff --git a/tests/codex/testerasure.nim b/tests/codex/testerasure.nim index 952497e9..d469b379 100644 --- a/tests/codex/testerasure.nim +++ b/tests/codex/testerasure.nim @@ -1,5 +1,6 @@ import std/sequtils import std/sugar +import std/times import pkg/chronos import pkg/questionable/results @@ -11,6 +12,8 @@ import pkg/codex/blocktype as bt import pkg/codex/rng import pkg/codex/utils import pkg/codex/indexingstrategy +import pkg/taskpools +import pkg/codex/utils/arrayutils import ../asynctest import ./helpers @@ -27,6 +30,7 @@ suite "Erasure encode/decode": var erasure: Erasure let repoTmp = TempLevelDb.new() let metaTmp = TempLevelDb.new() + var taskpool: Taskpool setup: let @@ -35,12 +39,14 @@ suite "Erasure encode/decode": rng = Rng.instance() chunker = RandomChunker.new(rng, size = dataSetSize, chunkSize = BlockSize) store = RepoStore.new(repoDs, metaDs) - erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider) + taskpool = Taskpool.new() + erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, taskpool) manifest = await storeDataGetManifest(store, chunker) teardown: await repoTmp.destroyDb() await metaTmp.destroyDb() + taskpool.shutdown() proc encode(buffers, parity: int): Future[Manifest] {.async.} = let encoded = @@ -212,7 +218,7 @@ suite "Erasure encode/decode": let present = await store.hasBlock(manifest.treeCid, d) check present.tryGet() - test "handles edge case of 0 parity blocks": + test "Handles edge case of 0 parity blocks": const buffers = 20 parity = 0 @@ -221,6 +227,43 @@ suite "Erasure encode/decode": discard (await erasure.decode(encoded)).tryGet() + test "Should concurrently encode/decode multiple datasets": + const iterations = 2 + + let + datasetSize = 1.MiBs + ecK = 10.Natural + ecM = 10.Natural + + var encodeTasks = newSeq[Future[?!Manifest]]() + var decodeTasks = newSeq[Future[?!Manifest]]() + var manifests = newSeq[Manifest]() + for i in 0 ..< iterations: + let + # create random data and store it + blockSize = rng.sample(@[1, 2, 4, 8, 16, 32, 64].mapIt(it.KiBs)) + chunker = RandomChunker.new(rng, size = datasetSize, chunkSize = blockSize) + manifest = await storeDataGetManifest(store, chunker) + manifests.add(manifest) + # encode the data concurrently + encodeTasks.add(erasure.encode(manifest, ecK, ecM)) + # wait for all encoding tasks to finish + let encodeResults = await allFinished(encodeTasks) + # decode the data concurrently + for i in 0 ..< encodeResults.len: + decodeTasks.add(erasure.decode(encodeResults[i].read().tryGet())) + # wait for all decoding tasks to finish + let decodeResults = await allFinished(decodeTasks) # TODO: use allFutures + + for j in 0 ..< decodeTasks.len: + let + decoded = decodeResults[j].read().tryGet() + encoded = encodeResults[j].read().tryGet() + check: + decoded.treeCid == manifests[j].treeCid + decoded.treeCid == encoded.originalTreeCid + decoded.blocksCount == encoded.originalBlocksCount + test "Should handle verifiable manifests": const buffers = 20 @@ -259,3 +302,73 @@ suite "Erasure encode/decode": decoded.treeCid == manifest.treeCid decoded.treeCid == encoded.originalTreeCid decoded.blocksCount == encoded.originalBlocksCount + + test "Should complete encode/decode task when cancelled": + let + blocksLen = 10000 + parityLen = 10 + data = seq[seq[byte]].new() + chunker = RandomChunker.new( + rng, size = (blocksLen * BlockSize.int), chunkSize = BlockSize + ) + + data[].setLen(blocksLen) + + for i in 0 ..< blocksLen: + let chunk = await chunker.getBytes() + shallowCopy(data[i], @(chunk)) + + let + parity = createDoubleArray(parityLen, BlockSize.int) + paritySeq = seq[seq[byte]].new() + recovered = createDoubleArray(blocksLen, BlockSize.int) + cancelledTaskParity = createDoubleArray(parityLen, BlockSize.int) + cancelledTaskRecovered = createDoubleArray(blocksLen, BlockSize.int) + + paritySeq[].setLen(parityLen) + defer: + freeDoubleArray(parity, parityLen) + freeDoubleArray(cancelledTaskParity, parityLen) + freeDoubleArray(recovered, blocksLen) + freeDoubleArray(cancelledTaskRecovered, blocksLen) + + for i in 0 ..< parityLen: + paritySeq[i] = cast[seq[byte]](parity[i]) + + # call encodeAsync to get the parity + let encFut = + await erasure.encodeAsync(BlockSize.int, blocksLen, parityLen, data, parity) + check encFut.isOk + + let decFut = await erasure.decodeAsync( + BlockSize.int, blocksLen, parityLen, data, paritySeq, recovered + ) + check decFut.isOk + + # call encodeAsync and cancel the task + let encodeFut = erasure.encodeAsync( + BlockSize.int, blocksLen, parityLen, data, cancelledTaskParity + ) + encodeFut.cancel() + + try: + discard await encodeFut + except CatchableError as exc: + check exc of CancelledError + finally: + for i in 0 ..< parityLen: + check equalMem(parity[i], cancelledTaskParity[i], BlockSize.int) + + # call decodeAsync and cancel the task + let decodeFut = erasure.decodeAsync( + BlockSize.int, blocksLen, parityLen, data, paritySeq, cancelledTaskRecovered + ) + decodeFut.cancel() + + try: + discard await decodeFut + except CatchableError as exc: + check exc of CancelledError + finally: + for i in 0 ..< blocksLen: + check equalMem(recovered[i], cancelledTaskRecovered[i], BlockSize.int) diff --git a/vendor/nim-leopard b/vendor/nim-leopard index 3e09d811..7506b90f 160000 --- a/vendor/nim-leopard +++ b/vendor/nim-leopard @@ -1 +1 @@ -Subproject commit 3e09d8113f874f3584c3fe93818541b2ff9fb9c3 +Subproject commit 7506b90f9c650c02b96bf525d4fd1bd4942a495f From 25c84f4e0ee459ecf4eb62804a6a78ee67fa8c14 Mon Sep 17 00:00:00 2001 From: Giuliano Mega Date: Fri, 14 Feb 2025 10:34:17 -0300 Subject: [PATCH 07/40] Fix/repostore deletes for non-orphan blocks (#1109) * fix: fix deletion of non-orphan blocks * feat: improve error feedback for illegal direct block deletes * chore: minor rewording of test header --- codex/stores/repostore/operations.nim | 14 +++- codex/stores/repostore/store.nim | 55 +++++++++--- tests/codex/examples.nim | 4 +- tests/codex/stores/testrepostore.nim | 115 ++++++++++++++++++++++++++ 4 files changed, 173 insertions(+), 15 deletions(-) diff --git a/codex/stores/repostore/operations.nim b/codex/stores/repostore/operations.nim index dcacbd62..125741e1 100644 --- a/codex/stores/repostore/operations.nim +++ b/codex/stores/repostore/operations.nim @@ -57,6 +57,17 @@ proc putLeafMetadata*( (md.some, res), ) +proc delLeafMetadata*( + self: RepoStore, treeCid: Cid, index: Natural +): Future[?!void] {.async.} = + without key =? createBlockCidAndProofMetadataKey(treeCid, index), err: + return failure(err) + + if err =? (await self.metaDs.delete(key)).errorOption: + return failure(err) + + success() + proc getLeafMetadata*( self: RepoStore, treeCid: Cid, index: Natural ): Future[?!LeafMetadata] {.async.} = @@ -205,9 +216,6 @@ proc storeBlock*( proc tryDeleteBlock*( self: RepoStore, cid: Cid, expiryLimit = SecondsSince1970.low ): Future[?!DeleteResult] {.async.} = - if cid.isEmpty: - return success(DeleteResult(kind: InUse)) - without metaKey =? createBlockExpirationMetadataKey(cid), err: return failure(err) diff --git a/codex/stores/repostore/store.nim b/codex/stores/repostore/store.nim index 2b14d6b7..d7305107 100644 --- a/codex/stores/repostore/store.nim +++ b/codex/stores/repostore/store.nim @@ -186,13 +186,13 @@ method putBlock*( return success() -method delBlock*(self: RepoStore, cid: Cid): Future[?!void] {.async.} = - ## Delete a block from the blockstore when block refCount is 0 or block is expired - ## - +proc delBlockInternal(self: RepoStore, cid: Cid): Future[?!DeleteResultKind] {.async.} = logScope: cid = cid + if cid.isEmpty: + return success(Deleted) + trace "Attempting to delete a block" without res =? await self.tryDeleteBlock(cid, self.clock.now()), err: @@ -205,12 +205,28 @@ method delBlock*(self: RepoStore, cid: Cid): Future[?!void] {.async.} = if err =? (await self.updateQuotaUsage(minusUsed = res.released)).errorOption: return failure(err) - elif res.kind == InUse: - trace "Block in use, refCount > 0 and not expired" - else: - trace "Block not found in store" - return success() + success(res.kind) + +method delBlock*(self: RepoStore, cid: Cid): Future[?!void] {.async.} = + ## Delete a block from the blockstore when block refCount is 0 or block is expired + ## + + logScope: + cid = cid + + without outcome =? await self.delBlockInternal(cid), err: + return failure(err) + + case outcome + of InUse: + failure("Directly deleting a block that is part of a dataset is not allowed.") + of NotFound: + trace "Block not found, ignoring" + success() + of Deleted: + trace "Block already deleted" + success() method delBlock*( self: RepoStore, treeCid: Cid, index: Natural @@ -221,12 +237,19 @@ method delBlock*( else: return failure(err) + if err =? (await self.delLeafMetadata(treeCid, index)).errorOption: + error "Failed to delete leaf metadata, block will remain on disk.", err = err.msg + return failure(err) + if err =? (await self.updateBlockMetadata(leafMd.blkCid, minusRefCount = 1)).errorOption: if not (err of BlockNotFoundError): return failure(err) - await self.delBlock(leafMd.blkCid) # safe delete, only if refCount == 0 + without _ =? await self.delBlockInternal(leafMd.blkCid), err: + return failure(err) + + success() method hasBlock*(self: RepoStore, cid: Cid): Future[?!bool] {.async.} = ## Check if the block exists in the blockstore @@ -295,6 +318,18 @@ proc createBlockExpirationQuery(maxNumber: int, offset: int): ?!Query = let queryKey = ?createBlockExpirationMetadataQueryKey() success Query.init(queryKey, offset = offset, limit = maxNumber) +proc blockRefCount*(self: RepoStore, cid: Cid): Future[?!Natural] {.async.} = + ## Returns the reference count for a block. If the count is zero; + ## this means the block is eligible for garbage collection. + ## + without key =? createBlockExpirationMetadataKey(cid), err: + return failure(err) + + without md =? await get[BlockMetadata](self.metaDs, key), err: + return failure(err) + + return success(md.refCount) + method getBlockExpirations*( self: RepoStore, maxNumber: int, offset: int ): Future[?!AsyncIter[BlockExpiration]] {.async, base.} = diff --git a/tests/codex/examples.nim b/tests/codex/examples.nim index 6f15182f..22a411c2 100644 --- a/tests/codex/examples.nim +++ b/tests/codex/examples.nim @@ -37,8 +37,8 @@ proc example*(_: type SignedState): SignedState = proc example*(_: type Pricing): Pricing = Pricing(address: EthAddress.example, price: uint32.rand.u256) -proc example*(_: type bt.Block): bt.Block = - let length = rand(4096) +proc example*(_: type bt.Block, size: int = 4096): bt.Block = + let length = rand(size) let bytes = newSeqWith(length, rand(uint8)) bt.Block.new(bytes).tryGet() diff --git a/tests/codex/stores/testrepostore.nim b/tests/codex/stores/testrepostore.nim index dda4ed82..0279b56f 100644 --- a/tests/codex/stores/testrepostore.nim +++ b/tests/codex/stores/testrepostore.nim @@ -12,9 +12,11 @@ import pkg/datastore import pkg/codex/stores/cachestore import pkg/codex/chunker import pkg/codex/stores +import pkg/codex/stores/repostore/operations import pkg/codex/blocktype as bt import pkg/codex/clock import pkg/codex/utils/asynciter +import pkg/codex/merkletree/codex import ../../asynctest import ../helpers @@ -354,6 +356,119 @@ asyncchecksuite "RepoStore": check has.isOk check has.get + test "should set the reference count for orphan blocks to 0": + let blk = Block.example(size = 200) + (await repo.putBlock(blk)).tryGet() + check (await repo.blockRefCount(blk.cid)).tryGet() == 0.Natural + + test "should not allow non-orphan blocks to be deleted directly": + let + repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = + 1000'nb) + dataset = await makeRandomBlocks(datasetSize = 512, blockSize = 256'nb) + blk = dataset[0] + (manifest, tree) = makeManifestAndTree(dataset).tryGet() + treeCid = tree.rootCid.tryGet() + proof = tree.getProof(0).tryGet() + + (await repo.putBlock(blk)).tryGet() + (await repo.putCidAndProof(treeCid, 0, blk.cid, proof)).tryGet() + + let err = (await repo.delBlock(blk.cid)).error() + check err.msg == + "Directly deleting a block that is part of a dataset is not allowed." + + test "should allow non-orphan blocks to be deleted by dataset reference": + let + repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = + 1000'nb) + dataset = await makeRandomBlocks(datasetSize = 512, blockSize = 256'nb) + blk = dataset[0] + (manifest, tree) = makeManifestAndTree(dataset).tryGet() + treeCid = tree.rootCid.tryGet() + proof = tree.getProof(0).tryGet() + + (await repo.putBlock(blk)).tryGet() + (await repo.putCidAndProof(treeCid, 0, blk.cid, proof)).tryGet() + + (await repo.delBlock(treeCid, 0.Natural)).tryGet() + check not (await blk.cid in repo) + + test "should not delete a non-orphan block until it is deleted from all parent datasets": + let + repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = + 1000'nb) + blockPool = await makeRandomBlocks(datasetSize = 768, blockSize = 256'nb) + + let + dataset1 = @[blockPool[0], blockPool[1]] + dataset2 = @[blockPool[1], blockPool[2]] + + let sharedBlock = blockPool[1] + + let + (manifest1, tree1) = makeManifestAndTree(dataset1).tryGet() + treeCid1 = tree1.rootCid.tryGet() + (manifest2, tree2) = makeManifestAndTree(dataset2).tryGet() + treeCid2 = tree2.rootCid.tryGet() + + (await repo.putBlock(sharedBlock)).tryGet() + check (await repo.blockRefCount(sharedBlock.cid)).tryGet() == 0.Natural + + let + proof1 = tree1.getProof(1).tryGet() + proof2 = tree2.getProof(0).tryGet() + + (await repo.putCidAndProof(treeCid1, 1, sharedBlock.cid, proof1)).tryGet() + check (await repo.blockRefCount(sharedBlock.cid)).tryGet() == 1.Natural + + (await repo.putCidAndProof(treeCid2, 0, sharedBlock.cid, proof2)).tryGet() + check (await repo.blockRefCount(sharedBlock.cid)).tryGet() == 2.Natural + + (await repo.delBlock(treeCid1, 1.Natural)).tryGet() + check (await repo.blockRefCount(sharedBlock.cid)).tryGet() == 1.Natural + check (await sharedBlock.cid in repo) + + (await repo.delBlock(treeCid2, 0.Natural)).tryGet() + check not (await sharedBlock.cid in repo) + + test "should clear leaf metadata when block is deleted from dataset": + let + repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = + 1000'nb) + dataset = await makeRandomBlocks(datasetSize = 512, blockSize = 256'nb) + blk = dataset[0] + (manifest, tree) = makeManifestAndTree(dataset).tryGet() + treeCid = tree.rootCid.tryGet() + proof = tree.getProof(1).tryGet() + + (await repo.putBlock(blk)).tryGet() + (await repo.putCidAndProof(treeCid, 0.Natural, blk.cid, proof)).tryGet() + + discard (await repo.getLeafMetadata(treeCid, 0.Natural)).tryGet() + + (await repo.delBlock(treeCid, 0.Natural)).tryGet() + + let err = (await repo.getLeafMetadata(treeCid, 0.Natural)).error() + check err of BlockNotFoundError + + test "should not fail when reinserting and deleting a previously deleted block (bug #1108)": + let + repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = + 1000'nb) + dataset = await makeRandomBlocks(datasetSize = 512, blockSize = 256'nb) + blk = dataset[0] + (manifest, tree) = makeManifestAndTree(dataset).tryGet() + treeCid = tree.rootCid.tryGet() + proof = tree.getProof(1).tryGet() + + (await repo.putBlock(blk)).tryGet() + (await repo.putCidAndProof(treeCid, 0, blk.cid, proof)).tryGet() + + (await repo.delBlock(treeCid, 0.Natural)).tryGet() + (await repo.putBlock(blk)).tryGet() + (await repo.delBlock(treeCid, 0.Natural)).tryGet() + commonBlockStoreTests( "RepoStore Sql backend", proc(): BlockStore = From dc08ff8840ef6c3bbb29a1a9fae5048edead90bf Mon Sep 17 00:00:00 2001 From: Arnaud Date: Mon, 17 Feb 2025 11:34:42 +0100 Subject: [PATCH 08/40] chore(marketplace): add a cache for storage requests (#1090) * Add cache to for requests * Change request cache description message and use const as default value * Set request cache size configuration hidden --- codex/codex.nim | 4 +++- codex/conf.nim | 13 ++++++++++++- codex/contracts/config.nim | 2 ++ codex/contracts/market.nim | 25 ++++++++++++++++++++++--- tests/contracts/testMarket.nim | 11 +++++++++++ 5 files changed, 50 insertions(+), 5 deletions(-) diff --git a/codex/codex.nim b/codex/codex.nim index 6dcfbaaa..dc577373 100644 --- a/codex/codex.nim +++ b/codex/codex.nim @@ -109,7 +109,9 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} = quit QuitFailure let marketplace = Marketplace.new(marketplaceAddress, signer) - let market = OnChainMarket.new(marketplace, config.rewardRecipient) + let market = OnChainMarket.new( + marketplace, config.rewardRecipient, config.marketplaceRequestCacheSize + ) let clock = OnChainClock.new(provider) var client: ?ClientInteractions diff --git a/codex/conf.nim b/codex/conf.nim index ccf29a1f..2a859efb 100644 --- a/codex/conf.nim +++ b/codex/conf.nim @@ -44,6 +44,7 @@ import ./utils import ./nat import ./utils/natutils +from ./contracts/config import DefaultRequestCacheSize from ./validationconfig import MaxSlots, ValidationGroups export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig @@ -51,7 +52,7 @@ export ValidationGroups, MaxSlots export DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockMaintenanceInterval, - DefaultNumberOfBlocksToMaintainPerInterval + DefaultNumberOfBlocksToMaintainPerInterval, DefaultRequestCacheSize type ThreadCount* = distinct Natural @@ -359,6 +360,16 @@ type name: "reward-recipient" .}: Option[EthAddress] + marketplaceRequestCacheSize* {. + desc: + "Maximum number of StorageRequests kept in memory." & + "Reduces fetching of StorageRequest data from the contract.", + defaultValue: DefaultRequestCacheSize, + defaultValueDesc: $DefaultRequestCacheSize, + name: "request-cache-size", + hidden + .}: uint16 + case persistenceCmd* {.defaultValue: noCmd, command.}: PersistenceCmd of PersistenceCmd.prover: circuitDir* {. diff --git a/codex/contracts/config.nim b/codex/contracts/config.nim index 87cd1f2a..5493c643 100644 --- a/codex/contracts/config.nim +++ b/codex/contracts/config.nim @@ -4,6 +4,8 @@ import pkg/questionable/results export contractabi +const DefaultRequestCacheSize* = 128.uint16 + type MarketplaceConfig* = object collateral*: CollateralConfig diff --git a/codex/contracts/market.nim b/codex/contracts/market.nim index 35557050..9157b269 100644 --- a/codex/contracts/market.nim +++ b/codex/contracts/market.nim @@ -2,6 +2,7 @@ import std/strutils import pkg/ethers import pkg/upraises import pkg/questionable +import pkg/lrucache import ../utils/exceptions import ../logutils import ../market @@ -20,6 +21,7 @@ type signer: Signer rewardRecipient: ?Address configuration: ?MarketplaceConfig + requestCache: LruCache[string, StorageRequest] MarketSubscription = market.Subscription EventSubscription = ethers.Subscription @@ -27,12 +29,22 @@ type eventSubscription: EventSubscription func new*( - _: type OnChainMarket, contract: Marketplace, rewardRecipient = Address.none + _: type OnChainMarket, + contract: Marketplace, + rewardRecipient = Address.none, + requestCacheSize: uint16 = DefaultRequestCacheSize, ): OnChainMarket = without signer =? contract.signer: raiseAssert("Marketplace contract should have a signer") - OnChainMarket(contract: contract, signer: signer, rewardRecipient: rewardRecipient) + var requestCache = newLruCache[string, StorageRequest](int(requestCacheSize)) + + OnChainMarket( + contract: contract, + signer: signer, + rewardRecipient: rewardRecipient, + requestCache: requestCache, + ) proc raiseMarketError(message: string) {.raises: [MarketError].} = raise newException(MarketError, message) @@ -112,9 +124,16 @@ method requestStorage(market: OnChainMarket, request: StorageRequest) {.async.} method getRequest*( market: OnChainMarket, id: RequestId ): Future[?StorageRequest] {.async.} = + let key = $id + + if market.requestCache.contains(key): + return some market.requestCache[key] + convertEthersError: try: - return some await market.contract.getRequest(id) + let request = await market.contract.getRequest(id) + market.requestCache[key] = request + return some request except Marketplace_UnknownRequest: return none StorageRequest diff --git a/tests/contracts/testMarket.nim b/tests/contracts/testMarket.nim index a77c2aaa..2ba450a1 100644 --- a/tests/contracts/testMarket.nim +++ b/tests/contracts/testMarket.nim @@ -3,6 +3,7 @@ import std/importutils import pkg/chronos import pkg/ethers/erc20 import codex/contracts +import pkg/lrucache import ../ethertest import ./examples import ./time @@ -591,3 +592,13 @@ ethersuite "On-Chain Market": let expectedPayout = request.expectedPayout(filledAt, requestEnd.u256) check endBalanceHost == (startBalanceHost + request.ask.collateralPerSlot) check endBalanceReward == (startBalanceReward + expectedPayout) + + test "the request is added in cache after the fist access": + await market.requestStorage(request) + + check market.requestCache.contains($request.id) == false + discard await market.getRequest(request.id) + + check market.requestCache.contains($request.id) == true + let cacheValue = market.requestCache[$request.id] + check cacheValue == request From 5af3477793191bb5eea3ac5861d72b654af6506e Mon Sep 17 00:00:00 2001 From: Eric <5089238+emizzle@users.noreply.github.com> Date: Tue, 18 Feb 2025 09:00:52 +1100 Subject: [PATCH 09/40] chore(deps): bump ethers to propagate cancellations (#1116) * chore(deps): bump ethers to propagate cancellations Ethers was swallowing canellations and turning them into EthersErrors, which was causing the sales statemachine to error when it should have been simply cancelling the current state's run. Hopefully fixes the intermittently failing marketplace integration test. * Add missing errors in async raises pragma * bump to version of ethers that supports cancellations --------- Co-authored-by: Arnaud --- codex/contracts/provider.nim | 10 +++++----- tests/contracts/helpers/mockprovider.nim | 2 +- tests/contracts/testDeployment.nim | 2 +- vendor/nim-ethers | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/codex/contracts/provider.nim b/codex/contracts/provider.nim index b7fc5602..b1576bb0 100644 --- a/codex/contracts/provider.nim +++ b/codex/contracts/provider.nim @@ -14,7 +14,7 @@ proc raiseProviderError(message: string) {.raises: [ProviderError].} = proc blockNumberAndTimestamp*( provider: Provider, blockTag: BlockTag -): Future[(UInt256, UInt256)] {.async: (raises: [ProviderError]).} = +): Future[(UInt256, UInt256)] {.async: (raises: [ProviderError, CancelledError]).} = without latestBlock =? await provider.getBlock(blockTag): raiseProviderError("Could not get latest block") @@ -25,7 +25,7 @@ proc blockNumberAndTimestamp*( proc binarySearchFindClosestBlock( provider: Provider, epochTime: int, low: UInt256, high: UInt256 -): Future[UInt256] {.async: (raises: [ProviderError]).} = +): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} = let (_, lowTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(low)) let (_, highTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(high)) if abs(lowTimestamp.truncate(int) - epochTime) < @@ -39,7 +39,7 @@ proc binarySearchBlockNumberForEpoch( epochTime: UInt256, latestBlockNumber: UInt256, earliestBlockNumber: UInt256, -): Future[UInt256] {.async: (raises: [ProviderError]).} = +): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} = var low = earliestBlockNumber var high = latestBlockNumber @@ -65,7 +65,7 @@ proc binarySearchBlockNumberForEpoch( proc blockNumberForEpoch*( provider: Provider, epochTime: SecondsSince1970 -): Future[UInt256] {.async: (raises: [ProviderError]).} = +): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} = let epochTimeUInt256 = epochTime.u256 let (latestBlockNumber, latestBlockTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.latest) @@ -118,6 +118,6 @@ proc blockNumberForEpoch*( proc pastBlockTag*( provider: Provider, blocksAgo: int -): Future[BlockTag] {.async: (raises: [ProviderError]).} = +): Future[BlockTag] {.async: (raises: [ProviderError, CancelledError]).} = let head = await provider.getBlockNumber() return BlockTag.init(head - blocksAgo.abs.u256) diff --git a/tests/contracts/helpers/mockprovider.nim b/tests/contracts/helpers/mockprovider.nim index 09e65398..c5be8ad7 100644 --- a/tests/contracts/helpers/mockprovider.nim +++ b/tests/contracts/helpers/mockprovider.nim @@ -13,7 +13,7 @@ type MockProvider* = ref object of Provider method getBlock*( provider: MockProvider, tag: BlockTag -): Future[?Block] {.async: (raises: [ProviderError]).} = +): Future[?Block] {.async: (raises: [ProviderError, CancelledError]).} = try: if tag == BlockTag.latest: if latestBlock =? provider.latest: diff --git a/tests/contracts/testDeployment.nim b/tests/contracts/testDeployment.nim index a439e42a..86a5fe00 100644 --- a/tests/contracts/testDeployment.nim +++ b/tests/contracts/testDeployment.nim @@ -12,7 +12,7 @@ type MockProvider = ref object of Provider method getChainId*( provider: MockProvider -): Future[UInt256] {.async: (raises: [ProviderError]).} = +): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} = return provider.chainId proc configFactory(): CodexConf = diff --git a/vendor/nim-ethers b/vendor/nim-ethers index 1cfccb96..d2b11a86 160000 --- a/vendor/nim-ethers +++ b/vendor/nim-ethers @@ -1 +1 @@ -Subproject commit 1cfccb9695fa47860bf7ef3d75da9019096a3933 +Subproject commit d2b11a865796a55296027f8ffba68398035ad435 From 6e73338425a41f40adfa554763a17efe1f5dbcf5 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Mon, 17 Feb 2025 23:04:04 +0100 Subject: [PATCH 10/40] Remove deprecated function (#1111) Co-authored-by: Dmitriy Ryajov --- codex/manifest/manifest.nim | 3 --- codex/streams/storestream.nim | 2 +- tests/codex/blockexchange/discovery/testdiscovery.nim | 4 ++-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/codex/manifest/manifest.nim b/codex/manifest/manifest.nim index 0bc51dfc..cbb0bace 100644 --- a/codex/manifest/manifest.nim +++ b/codex/manifest/manifest.nim @@ -162,9 +162,6 @@ func verify*(self: Manifest): ?!void = return success() -func cid*(self: Manifest): ?!Cid {.deprecated: "use treeCid instead".} = - self.treeCid.success - func `==`*(a, b: Manifest): bool = (a.treeCid == b.treeCid) and (a.datasetSize == b.datasetSize) and (a.blockSize == b.blockSize) and (a.version == b.version) and (a.hcodec == b.hcodec) and diff --git a/codex/streams/storestream.nim b/codex/streams/storestream.nim index 85b0e354..a68e2ea7 100644 --- a/codex/streams/storestream.nim +++ b/codex/streams/storestream.nim @@ -110,7 +110,7 @@ method readOnce*( raise newLPStreamReadError(error) trace "Reading bytes from store stream", - manifestCid = self.manifest.cid.get(), + manifestCid = self.manifest.treeCid, numBlocks = self.manifest.blocksCount, blockNum, blkCid = blk.cid, diff --git a/tests/codex/blockexchange/discovery/testdiscovery.nim b/tests/codex/blockexchange/discovery/testdiscovery.nim index 88331c3f..97a455e1 100644 --- a/tests/codex/blockexchange/discovery/testdiscovery.nim +++ b/tests/codex/blockexchange/discovery/testdiscovery.nim @@ -96,9 +96,9 @@ asyncchecksuite "Block Advertising and Discovery": await engine.stop() - test "Should advertise both manifests and trees": + test "Should advertise trees": let - cids = @[manifest.cid.tryGet, manifest.treeCid] + cids = @[manifest.treeCid] advertised = initTable.collect: for cid in cids: {cid: newFuture[void]()} From 0107eb06fe61ec0a86b8f60ae58d0833ccf0c2d2 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Tue, 18 Feb 2025 06:47:47 +0100 Subject: [PATCH 11/40] chore(marketplace): cid should be bytes (#1073) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Change cid format from string to bytes for the marketplace * refactor: marketplace custom errors handling * chore: update contracts repo * chore: update contracts submodule * Update contracts submodule * Initialize the Cid using init function * Restorage serialize pragma * Use Cid object instead of buffer * Simplify cid usage * Simplify cid usage * bump codex-contracts-eth after PR merge, formatting * fix rebase * collateralPerByte => collateralPerSlot --------- Co-authored-by: Adam Uhlíř Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com> --- codex/contracts/requests.nim | 19 +++++++++++++++--- codex/node.nim | 26 ++++++++----------------- codex/sales/salescontext.nim | 3 ++- tests/codex/node/testcontracts.nim | 5 ++--- tests/codex/node/testnode.nim | 2 +- tests/codex/sales/states/testfilled.nim | 2 +- tests/codex/sales/testsales.nim | 12 ++++++++---- tests/contracts/testMarket.nim | 1 + tests/examples.nim | 2 +- tests/integration/testecbug.nim | 2 +- tests/integration/testpurchasing.nim | 2 ++ vendor/codex-contracts-eth | 2 +- 12 files changed, 44 insertions(+), 34 deletions(-) diff --git a/codex/contracts/requests.nim b/codex/contracts/requests.nim index 48947602..98d8c342 100644 --- a/codex/contracts/requests.nim +++ b/codex/contracts/requests.nim @@ -6,8 +6,10 @@ import pkg/nimcrypto import pkg/ethers/fields import pkg/questionable/results import pkg/stew/byteutils +import pkg/libp2p/[cid, multicodec] import ../logutils import ../utils/json +from ../errors import mapFailure export contractabi @@ -29,7 +31,7 @@ type maxSlotLoss* {.serialize.}: uint64 StorageContent* = object - cid* {.serialize.}: string + cid* {.serialize.}: Cid merkleRoot*: array[32, byte] Slot* = object @@ -120,6 +122,9 @@ func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk = func fromTuple(_: type StorageContent, tupl: tuple): StorageContent = StorageContent(cid: tupl[0], merkleRoot: tupl[1]) +func solidityType*(_: type Cid): string = + solidityType(seq[byte]) + func solidityType*(_: type StorageContent): string = solidityType(StorageContent.fieldTypes) @@ -129,6 +134,10 @@ func solidityType*(_: type StorageAsk): string = func solidityType*(_: type StorageRequest): string = solidityType(StorageRequest.fieldTypes) +# Note: it seems to be ok to ignore the vbuffer offset for now +func encode*(encoder: var AbiEncoder, cid: Cid) = + encoder.write(cid.data.buffer) + func encode*(encoder: var AbiEncoder, content: StorageContent) = encoder.write(content.fieldValues) @@ -141,8 +150,12 @@ func encode*(encoder: var AbiEncoder, id: RequestId | SlotId | Nonce) = func encode*(encoder: var AbiEncoder, request: StorageRequest) = encoder.write(request.fieldValues) -func encode*(encoder: var AbiEncoder, request: Slot) = - encoder.write(request.fieldValues) +func encode*(encoder: var AbiEncoder, slot: Slot) = + encoder.write(slot.fieldValues) + +func decode*(decoder: var AbiDecoder, T: type Cid): ?!T = + let data = ?decoder.read(seq[byte]) + Cid.init(data).mapFailure func decode*(decoder: var AbiDecoder, T: type StorageContent): ?!T = let tupl = ?decoder.read(StorageContent.fieldTypes) diff --git a/codex/node.nim b/codex/node.nim index 2602bfe6..062ec2ce 100644 --- a/codex/node.nim +++ b/codex/node.nim @@ -501,10 +501,7 @@ proc setupRequest( collateralPerByte: collateralPerByte, maxSlotLoss: tolerance, ), - content: StorageContent( - cid: $manifestBlk.cid, # TODO: why string? - merkleRoot: verifyRoot, - ), + content: StorageContent(cid: manifestBlk.cid, merkleRoot: verifyRoot), expiry: expiry, ) @@ -561,16 +558,14 @@ proc onStore( ## store data in local storage ## + let cid = request.content.cid + logScope: - cid = request.content.cid + cid = $cid slotIdx = slotIdx trace "Received a request to store a slot" - without cid =? Cid.init(request.content.cid).mapFailure, err: - trace "Unable to parse Cid", cid - return failure(err) - without manifest =? (await self.fetchManifest(cid)), err: trace "Unable to fetch manifest for cid", cid, err = err.msg return failure(err) @@ -640,7 +635,7 @@ proc onProve( ## let - cidStr = slot.request.content.cid + cidStr = $slot.request.content.cid slotIdx = slot.slotIndex.truncate(Natural) logScope: @@ -689,14 +684,9 @@ proc onProve( failure "Prover not enabled" proc onExpiryUpdate( - self: CodexNodeRef, rootCid: string, expiry: SecondsSince1970 + self: CodexNodeRef, rootCid: Cid, expiry: SecondsSince1970 ): Future[?!void] {.async.} = - without cid =? Cid.init(rootCid): - trace "Unable to parse Cid", cid - let error = newException(CodexError, "Unable to parse Cid") - return failure(error) - - return await self.updateExpiry(cid, expiry) + return await self.updateExpiry(rootCid, expiry) proc onClear(self: CodexNodeRef, request: StorageRequest, slotIndex: UInt256) = # TODO: remove data from local storage @@ -719,7 +709,7 @@ proc start*(self: CodexNodeRef) {.async.} = self.onStore(request, slot, onBatch) hostContracts.sales.onExpiryUpdate = proc( - rootCid: string, expiry: SecondsSince1970 + rootCid: Cid, expiry: SecondsSince1970 ): Future[?!void] = self.onExpiryUpdate(rootCid, expiry) diff --git a/codex/sales/salescontext.nim b/codex/sales/salescontext.nim index bb0b5dc9..95f06c04 100644 --- a/codex/sales/salescontext.nim +++ b/codex/sales/salescontext.nim @@ -1,6 +1,7 @@ import pkg/questionable import pkg/questionable/results import pkg/upraises +import pkg/libp2p/cid import ../market import ../clock @@ -30,7 +31,7 @@ type OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {. gcsafe, upraises: [] .} - OnExpiryUpdate* = proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] {. + OnExpiryUpdate* = proc(rootCid: Cid, expiry: SecondsSince1970): Future[?!void] {. gcsafe, upraises: [] .} OnClear* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].} diff --git a/tests/codex/node/testcontracts.nim b/tests/codex/node/testcontracts.nim index 52adb5f6..0930d925 100644 --- a/tests/codex/node/testcontracts.nim +++ b/tests/codex/node/testcontracts.nim @@ -78,7 +78,6 @@ asyncchecksuite "Test Node - Host contracts": erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, Taskpool.new) manifestCid = manifestBlock.cid - manifestCidStr = $(manifestCid) (await localStore.putBlock(manifestBlock)).tryGet() @@ -99,7 +98,7 @@ asyncchecksuite "Test Node - Host contracts": expectedExpiry: SecondsSince1970 = clock.now + DefaultBlockTtl.seconds + 11123 expiryUpdateCallback = !sales.onExpiryUpdate - (await expiryUpdateCallback(manifestCidStr, expectedExpiry)).tryGet() + (await expiryUpdateCallback(manifestCid, expectedExpiry)).tryGet() for index in 0 ..< manifest.blocksCount: let @@ -116,7 +115,7 @@ asyncchecksuite "Test Node - Host contracts": test "onStore callback": let onStore = !sales.onStore var request = StorageRequest.example - request.content.cid = $verifiableBlock.cid + request.content.cid = verifiableBlock.cid request.expiry = (getTime() + DefaultBlockTtl.toTimesDuration + 1.hours).toUnix.u256 var fetchedBytes: uint = 0 diff --git a/tests/codex/node/testnode.nim b/tests/codex/node/testnode.nim index 3f9a141a..37960232 100644 --- a/tests/codex/node/testnode.nim +++ b/tests/codex/node/testnode.nim @@ -167,7 +167,7 @@ asyncchecksuite "Test Node - Basic": check: (await verifiableBlock.cid in localStore) == true - request.content.cid == $verifiableBlock.cid + request.content.cid == verifiableBlock.cid request.content.merkleRoot == builder.verifyRoot.get.toBytes test "Should delete a single block": diff --git a/tests/codex/sales/states/testfilled.nim b/tests/codex/sales/states/testfilled.nim index f8f77da6..74413776 100644 --- a/tests/codex/sales/states/testfilled.nim +++ b/tests/codex/sales/states/testfilled.nim @@ -36,7 +36,7 @@ checksuite "sales state 'filled'": market.requestEnds[request.id] = 321 onExpiryUpdatePassedExpiry = -1 let onExpiryUpdate = proc( - rootCid: string, expiry: SecondsSince1970 + rootCid: Cid, expiry: SecondsSince1970 ): Future[?!void] {.async.} = onExpiryUpdatePassedExpiry = expiry return success() diff --git a/tests/codex/sales/testsales.nim b/tests/codex/sales/testsales.nim index 0d441f34..05f31057 100644 --- a/tests/codex/sales/testsales.nim +++ b/tests/codex/sales/testsales.nim @@ -46,7 +46,9 @@ asyncchecksuite "Sales - start": pricePerBytePerSecond: 1.u256, collateralPerByte: 1.u256, ), - content: StorageContent(cid: "some cid"), + content: StorageContent( + cid: Cid.init("zb2rhheVmk3bLks5MgzTqyznLu1zqGH5jrfTA1eAZXrjx7Vob").tryGet + ), expiry: (getTime() + initDuration(hours = 1)).toUnix.u256, ) @@ -64,7 +66,7 @@ asyncchecksuite "Sales - start": return success() sales.onExpiryUpdate = proc( - rootCid: string, expiry: SecondsSince1970 + rootCid: Cid, expiry: SecondsSince1970 ): Future[?!void] {.async.} = return success() @@ -158,7 +160,9 @@ asyncchecksuite "Sales": pricePerBytePerSecond: minPricePerBytePerSecond, collateralPerByte: 1.u256, ), - content: StorageContent(cid: "some cid"), + content: StorageContent( + cid: Cid.init("zb2rhheVmk3bLks5MgzTqyznLu1zqGH5jrfTA1eAZXrjx7Vob").tryGet + ), expiry: (getTime() + initDuration(hours = 1)).toUnix.u256, ) @@ -181,7 +185,7 @@ asyncchecksuite "Sales": return success() sales.onExpiryUpdate = proc( - rootCid: string, expiry: SecondsSince1970 + rootCid: Cid, expiry: SecondsSince1970 ): Future[?!void] {.async.} = return success() diff --git a/tests/contracts/testMarket.nim b/tests/contracts/testMarket.nim index 2ba450a1..6506a2d6 100644 --- a/tests/contracts/testMarket.nim +++ b/tests/contracts/testMarket.nim @@ -3,6 +3,7 @@ import std/importutils import pkg/chronos import pkg/ethers/erc20 import codex/contracts +import pkg/libp2p/cid import pkg/lrucache import ../ethertest import ./examples diff --git a/tests/examples.nim b/tests/examples.nim index c96fefd6..26013cdc 100644 --- a/tests/examples.nim +++ b/tests/examples.nim @@ -57,7 +57,7 @@ proc example*(_: type StorageRequest): StorageRequest = maxSlotLoss: 2, # 2 slots can be freed without data considered to be lost ), content: StorageContent( - cid: "zb2rhheVmk3bLks5MgzTqyznLu1zqGH5jrfTA1eAZXrjx7Vob", + cid: Cid.init("zb2rhheVmk3bLks5MgzTqyznLu1zqGH5jrfTA1eAZXrjx7Vob").tryGet, merkleRoot: array[32, byte].example, ), expiry: (60 * 60).u256, # 1 hour , diff --git a/tests/integration/testecbug.nim b/tests/integration/testecbug.nim index e7604de7..29a3bc6f 100644 --- a/tests/integration/testecbug.nim +++ b/tests/integration/testecbug.nim @@ -50,7 +50,7 @@ marketplacesuite "Bug #821 - node crashes during erasure coding": check eventually(requestId.isSome, timeout = expiry.int * 1000) let request = await marketplace.getRequest(requestId.get) - let cidFromRequest = Cid.init(request.content.cid).get() + let cidFromRequest = request.content.cid let downloaded = await clientApi.downloadBytes(cidFromRequest, local = true) check downloaded.isOk check downloaded.get.toHex == data.toHex diff --git a/tests/integration/testpurchasing.nim b/tests/integration/testpurchasing.nim index 4e08e7a8..ebae78f6 100644 --- a/tests/integration/testpurchasing.nim +++ b/tests/integration/testpurchasing.nim @@ -47,6 +47,8 @@ twonodessuite "Purchasing": ).get let request = client1.getPurchase(id).get.request.get + + check request.content.cid.data.buffer.len > 0 check request.ask.duration == 100.u256 check request.ask.pricePerBytePerSecond == 1.u256 check request.ask.proofProbability == 3.u256 diff --git a/vendor/codex-contracts-eth b/vendor/codex-contracts-eth index e74d3397..0f2012b1 160000 --- a/vendor/codex-contracts-eth +++ b/vendor/codex-contracts-eth @@ -1 +1 @@ -Subproject commit e74d3397a133eaf1eb95d9ce59f56747a7c8c30b +Subproject commit 0f2012b1442c404605c8ba9dcae2f4e53058cd2c From 2298a0bf8109e8e8a5d80468684ae02c909469c2 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Tue, 18 Feb 2025 11:17:05 +0100 Subject: [PATCH 12/40] Use results instead of stew/results (#1112) --- codex/erasure/backends/leopard.nim | 2 +- codex/errors.nim | 2 +- codex/rest/coders.nim | 2 +- codex/utils/asyncheapqueue.nim | 2 +- codex/utils/natutils.nim | 3 +-- tests/codex/stores/testqueryiterhelper.nim | 2 +- tests/codex/testasyncheapqueue.nim | 2 +- tests/codex/testnat.nim | 2 +- 8 files changed, 8 insertions(+), 9 deletions(-) diff --git a/codex/erasure/backends/leopard.nim b/codex/erasure/backends/leopard.nim index ae599f12..a0016570 100644 --- a/codex/erasure/backends/leopard.nim +++ b/codex/erasure/backends/leopard.nim @@ -10,7 +10,7 @@ import std/options import pkg/leopard -import pkg/stew/results +import pkg/results import ../backend diff --git a/codex/errors.nim b/codex/errors.nim index f7c2fa6b..75cefde4 100644 --- a/codex/errors.nim +++ b/codex/errors.nim @@ -9,7 +9,7 @@ import std/options -import pkg/stew/results +import pkg/results import pkg/chronos import pkg/questionable/results diff --git a/codex/rest/coders.nim b/codex/rest/coders.nim index 1c997ccf..319ce3d6 100644 --- a/codex/rest/coders.nim +++ b/codex/rest/coders.nim @@ -14,7 +14,7 @@ import pkg/chronos import pkg/libp2p import pkg/stew/base10 import pkg/stew/byteutils -import pkg/stew/results +import pkg/results import pkg/stint import ../sales diff --git a/codex/utils/asyncheapqueue.nim b/codex/utils/asyncheapqueue.nim index 1b0dd8bc..bc37c462 100644 --- a/codex/utils/asyncheapqueue.nim +++ b/codex/utils/asyncheapqueue.nim @@ -9,7 +9,7 @@ import std/sequtils import pkg/chronos -import pkg/stew/results +import pkg/results # Based on chronos AsyncHeapQueue and std/heapqueue diff --git a/codex/utils/natutils.nim b/codex/utils/natutils.nim index 8a641e95..43909588 100644 --- a/codex/utils/natutils.nim +++ b/codex/utils/natutils.nim @@ -1,7 +1,6 @@ {.push raises: [].} -import - std/[tables, hashes], stew/results, stew/shims/net as stewNet, chronos, chronicles +import std/[tables, hashes], pkg/results, stew/shims/net as stewNet, chronos, chronicles import pkg/libp2p diff --git a/tests/codex/stores/testqueryiterhelper.nim b/tests/codex/stores/testqueryiterhelper.nim index 5d3d68fd..4e83dad4 100644 --- a/tests/codex/stores/testqueryiterhelper.nim +++ b/tests/codex/stores/testqueryiterhelper.nim @@ -1,6 +1,6 @@ import std/sugar -import pkg/stew/results +import pkg/results import pkg/questionable import pkg/chronos import pkg/datastore/typedds diff --git a/tests/codex/testasyncheapqueue.nim b/tests/codex/testasyncheapqueue.nim index eb3767cd..a9c6769b 100644 --- a/tests/codex/testasyncheapqueue.nim +++ b/tests/codex/testasyncheapqueue.nim @@ -1,5 +1,5 @@ import pkg/chronos -import pkg/stew/results +import pkg/results import pkg/codex/utils/asyncheapqueue import pkg/codex/rng diff --git a/tests/codex/testnat.nim b/tests/codex/testnat.nim index 57f51d31..3981b2e6 100644 --- a/tests/codex/testnat.nim +++ b/tests/codex/testnat.nim @@ -1,7 +1,7 @@ import std/[unittest, options, net], stew/shims/net as stewNet import pkg/chronos import pkg/libp2p/[multiaddress, multihash, multicodec] -import pkg/stew/results +import pkg/results import ../../codex/nat import ../../codex/utils/natutils From 1052dad30c6a195f310bd385c553c17e70a476e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adam=20Uhl=C3=AD=C5=99?= Date: Tue, 18 Feb 2025 20:41:54 +0100 Subject: [PATCH 13/40] feat: request duration limit (#1057) * feat: request duration limit * Fix tests and duration type * Add custom error * Remove merge issue * Update codex contracts eth * Update market config and fix test * Fix SlotReservationsConfig syntax * Update dependencies * test: remove doubled test * chore: update contracts repo --------- Co-authored-by: Arnaud --- codex.nim | 39 ++++++++------- codex/contracts/config.nim | 34 +++++++++++-- codex/contracts/market.nim | 7 ++- codex/contracts/marketplace.nim | 1 + codex/market.nim | 3 ++ codex/purchasing.nim | 2 +- codex/rest/api.nim | 8 +++ codex/validation.nim | 7 +-- config.nims | 79 +++++++++++++++++++----------- tests/codex/helpers/mockmarket.nim | 7 ++- tests/integration/testrestapi.nim | 20 ++++++++ vendor/codex-contracts-eth | 2 +- 12 files changed, 152 insertions(+), 57 deletions(-) diff --git a/codex.nim b/codex.nim index e2c6033e..7749bdee 100644 --- a/codex.nim +++ b/codex.nim @@ -38,33 +38,35 @@ when isMainModule: when defined(posix): import system/ansi_c - type - CodexStatus {.pure.} = enum - Stopped, - Stopping, - Running + type CodexStatus {.pure.} = enum + Stopped + Stopping + Running let config = CodexConf.load( version = codexFullVersion, envVarsPrefix = "codex", - secondarySources = proc (config: CodexConf, sources: auto) {.gcsafe, raises: [ConfigurationError].} = - if configFile =? config.configFile: - sources.addConfigFile(Toml, configFile) + secondarySources = proc( + config: CodexConf, sources: auto + ) {.gcsafe, raises: [ConfigurationError].} = + if configFile =? config.configFile: + sources.addConfigFile(Toml, configFile) + , ) config.setupLogging() config.setupMetrics() - if not(checkAndCreateDataDir((config.dataDir).string)): + if not (checkAndCreateDataDir((config.dataDir).string)): # We are unable to access/create data folder or data folder's # permissions are insecure. quit QuitFailure - if config.prover() and not(checkAndCreateDataDir((config.circuitDir).string)): + if config.prover() and not (checkAndCreateDataDir((config.circuitDir).string)): quit QuitFailure trace "Data dir initialized", dir = $config.dataDir - if not(checkAndCreateDataDir((config.dataDir / "repo"))): + if not (checkAndCreateDataDir((config.dataDir / "repo"))): # We are unable to access/create data folder or data folder's # permissions are insecure. quit QuitFailure @@ -83,11 +85,12 @@ when isMainModule: config.dataDir / config.netPrivKeyFile privateKey = setupKey(keyPath).expect("Should setup private key!") - server = try: - CodexServer.new(config, privateKey) - except Exception as exc: - error "Failed to start Codex", msg = exc.msg - quit QuitFailure + server = + try: + CodexServer.new(config, privateKey) + except Exception as exc: + error "Failed to start Codex", msg = exc.msg + quit QuitFailure ## Ctrl+C handling proc doShutdown() = @@ -101,7 +104,9 @@ when isMainModule: # workaround for https://github.com/nim-lang/Nim/issues/4057 try: setupForeignThreadGc() - except Exception as exc: raiseAssert exc.msg # shouldn't happen + except Exception as exc: + raiseAssert exc.msg + # shouldn't happen notice "Shutting down after having received SIGINT" doShutdown() diff --git a/codex/contracts/config.nim b/codex/contracts/config.nim index 5493c643..986b1944 100644 --- a/codex/contracts/config.nim +++ b/codex/contracts/config.nim @@ -10,13 +10,16 @@ type MarketplaceConfig* = object collateral*: CollateralConfig proofs*: ProofConfig + reservations*: SlotReservationsConfig + requestDurationLimit*: UInt256 CollateralConfig* = object repairRewardPercentage*: uint8 # percentage of remaining collateral slot has after it has been freed maxNumberOfSlashes*: uint8 # frees slot when the number of slashes reaches this value - slashCriterion*: uint16 # amount of proofs missed that lead to slashing slashPercentage*: uint8 # percentage of the collateral that is slashed + validatorRewardPercentage*: uint8 + # percentage of the slashed amount going to the validators ProofConfig* = object period*: UInt256 # proofs requirements are calculated per period (in seconds) @@ -28,6 +31,9 @@ type # blocks. Should be a prime number to ensure there are no cycles. downtimeProduct*: uint8 + SlotReservationsConfig* = object + maxReservations*: uint8 + func fromTuple(_: type ProofConfig, tupl: tuple): ProofConfig = ProofConfig( period: tupl[0], @@ -37,16 +43,27 @@ func fromTuple(_: type ProofConfig, tupl: tuple): ProofConfig = downtimeProduct: tupl[4], ) +func fromTuple(_: type SlotReservationsConfig, tupl: tuple): SlotReservationsConfig = + SlotReservationsConfig(maxReservations: tupl[0]) + func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig = CollateralConfig( repairRewardPercentage: tupl[0], maxNumberOfSlashes: tupl[1], - slashCriterion: tupl[2], - slashPercentage: tupl[3], + slashPercentage: tupl[2], + validatorRewardPercentage: tupl[3], ) func fromTuple(_: type MarketplaceConfig, tupl: tuple): MarketplaceConfig = - MarketplaceConfig(collateral: tupl[0], proofs: tupl[1]) + MarketplaceConfig( + collateral: tupl[0], + proofs: tupl[1], + reservations: tupl[2], + requestDurationLimit: tupl[3], + ) + +func solidityType*(_: type SlotReservationsConfig): string = + solidityType(SlotReservationsConfig.fieldTypes) func solidityType*(_: type ProofConfig): string = solidityType(ProofConfig.fieldTypes) @@ -55,7 +72,10 @@ func solidityType*(_: type CollateralConfig): string = solidityType(CollateralConfig.fieldTypes) func solidityType*(_: type MarketplaceConfig): string = - solidityType(CollateralConfig.fieldTypes) + solidityType(MarketplaceConfig.fieldTypes) + +func encode*(encoder: var AbiEncoder, slot: SlotReservationsConfig) = + encoder.write(slot.fieldValues) func encode*(encoder: var AbiEncoder, slot: ProofConfig) = encoder.write(slot.fieldValues) @@ -70,6 +90,10 @@ func decode*(decoder: var AbiDecoder, T: type ProofConfig): ?!T = let tupl = ?decoder.read(ProofConfig.fieldTypes) success ProofConfig.fromTuple(tupl) +func decode*(decoder: var AbiDecoder, T: type SlotReservationsConfig): ?!T = + let tupl = ?decoder.read(SlotReservationsConfig.fieldTypes) + success SlotReservationsConfig.fromTuple(tupl) + func decode*(decoder: var AbiDecoder, T: type CollateralConfig): ?!T = let tupl = ?decoder.read(CollateralConfig.fieldTypes) success CollateralConfig.fromTuple(tupl) diff --git a/codex/contracts/market.nim b/codex/contracts/market.nim index 9157b269..208dbe07 100644 --- a/codex/contracts/market.nim +++ b/codex/contracts/market.nim @@ -91,9 +91,14 @@ method proofTimeout*(market: OnChainMarket): Future[UInt256] {.async.} = method repairRewardPercentage*(market: OnChainMarket): Future[uint8] {.async.} = convertEthersError: - let config = await market.contract.configuration() + let config = await market.config() return config.collateral.repairRewardPercentage +method requestDurationLimit*(market: OnChainMarket): Future[UInt256] {.async.} = + convertEthersError: + let config = await market.config() + return config.requestDurationLimit + method proofDowntime*(market: OnChainMarket): Future[uint8] {.async.} = convertEthersError: let config = await market.config() diff --git a/codex/contracts/marketplace.nim b/codex/contracts/marketplace.nim index 87fd1e47..091f45db 100644 --- a/codex/contracts/marketplace.nim +++ b/codex/contracts/marketplace.nim @@ -42,6 +42,7 @@ type Marketplace_InsufficientCollateral* = object of SolidityError Marketplace_InsufficientReward* = object of SolidityError Marketplace_InvalidCid* = object of SolidityError + Marketplace_DurationExceedsLimit* = object of SolidityError Proofs_InsufficientBlockHeight* = object of SolidityError Proofs_InvalidProof* = object of SolidityError Proofs_ProofAlreadySubmitted* = object of SolidityError diff --git a/codex/market.nim b/codex/market.nim index bc325cd9..66f31804 100644 --- a/codex/market.nim +++ b/codex/market.nim @@ -78,6 +78,9 @@ method proofTimeout*(market: Market): Future[UInt256] {.base, async.} = method repairRewardPercentage*(market: Market): Future[uint8] {.base, async.} = raiseAssert("not implemented") +method requestDurationLimit*(market: Market): Future[UInt256] {.base, async.} = + raiseAssert("not implemented") + method proofDowntime*(market: Market): Future[uint8] {.base, async.} = raiseAssert("not implemented") diff --git a/codex/purchasing.nim b/codex/purchasing.nim index 4ab84405..25a35137 100644 --- a/codex/purchasing.nim +++ b/codex/purchasing.nim @@ -14,7 +14,7 @@ export purchase type Purchasing* = ref object - market: Market + market*: Market clock: Clock purchases: Table[PurchaseId, Purchase] proofProbability*: UInt256 diff --git a/codex/rest/api.nim b/codex/rest/api.nim index f64a6f20..8ba1abae 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -637,6 +637,14 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) = without params =? StorageRequestParams.fromJson(body), error: return RestApiResponse.error(Http400, error.msg, headers = headers) + let requestDurationLimit = await contracts.purchasing.market.requestDurationLimit + if params.duration > requestDurationLimit: + return RestApiResponse.error( + Http400, + "Duration exceeds limit of " & $requestDurationLimit & " seconds", + headers = headers, + ) + let nodes = params.nodes |? 3 let tolerance = params.tolerance |? 1 diff --git a/codex/validation.nim b/codex/validation.nim index 6e3135e4..6659bc5b 100644 --- a/codex/validation.nim +++ b/codex/validation.nim @@ -22,8 +22,6 @@ type Validation* = ref object proofTimeout: UInt256 config: ValidationConfig -const MaxStorageRequestDuration = 30.days - logScope: topics = "codex validator" @@ -122,7 +120,10 @@ proc epochForDurationBackFromNow( proc restoreHistoricalState(validation: Validation) {.async.} = trace "Restoring historical state..." - let startTimeEpoch = validation.epochForDurationBackFromNow(MaxStorageRequestDuration) + let requestDurationLimit = await validation.market.requestDurationLimit + let startTimeEpoch = validation.epochForDurationBackFromNow( + seconds(requestDurationLimit.truncate(int64)) + ) let slotFilledEvents = await validation.market.queryPastSlotFilledEvents(fromTime = startTimeEpoch) for event in slotFilledEvents: diff --git a/config.nims b/config.nims index 6a4767ad..05a31fff 100644 --- a/config.nims +++ b/config.nims @@ -1,21 +1,24 @@ - include "build.nims" import std/os const currentDir = currentSourcePath()[0 .. ^(len("config.nims") + 1)] when getEnv("NIMBUS_BUILD_SYSTEM") == "yes" and - # BEWARE - # In Nim 1.6, config files are evaluated with a working directory - # matching where the Nim command was invocated. This means that we - # must do all file existence checks with full absolute paths: - system.fileExists(currentDir & "nimbus-build-system.paths"): + # BEWARE + # In Nim 1.6, config files are evaluated with a working directory + # matching where the Nim command was invocated. This means that we + # must do all file existence checks with full absolute paths: + system.fileExists(currentDir & "nimbus-build-system.paths"): include "nimbus-build-system.paths" when defined(release): - switch("nimcache", joinPath(currentSourcePath.parentDir, "nimcache/release/$projectName")) + switch( + "nimcache", joinPath(currentSourcePath.parentDir, "nimcache/release/$projectName") + ) else: - switch("nimcache", joinPath(currentSourcePath.parentDir, "nimcache/debug/$projectName")) + switch( + "nimcache", joinPath(currentSourcePath.parentDir, "nimcache/debug/$projectName") + ) when defined(limitStackUsage): # This limits stack usage of each individual function to 1MB - the option is @@ -34,7 +37,8 @@ when defined(windows): # increase stack size switch("passL", "-Wl,--stack,8388608") # https://github.com/nim-lang/Nim/issues/4057 - --tlsEmulation:off + --tlsEmulation: + off if defined(i386): # set the IMAGE_FILE_LARGE_ADDRESS_AWARE flag so we can use PAE, if enabled, and access more than 2 GiB of RAM switch("passL", "-Wl,--large-address-aware") @@ -63,30 +67,47 @@ else: # ("-fno-asynchronous-unwind-tables" breaks Nim's exception raising, sometimes) switch("passC", "-mno-avx512vl") ---tlsEmulation:off ---threads:on ---opt:speed ---excessiveStackTrace:on +--tlsEmulation: + off +--threads: + on +--opt: + speed +--excessiveStackTrace: + on # enable metric collection ---define:metrics +--define: + metrics # for heap-usage-by-instance-type metrics and object base-type strings ---define:nimTypeNames ---styleCheck:usages ---styleCheck:error ---maxLoopIterationsVM:1000000000 ---fieldChecks:on ---warningAsError:"ProveField:on" +--define: + nimTypeNames +--styleCheck: + usages +--styleCheck: + error +--maxLoopIterationsVM: + 1000000000 +--fieldChecks: + on +--warningAsError: + "ProveField:on" when (NimMajor, NimMinor) >= (1, 4): - --warning:"ObservableStores:off" - --warning:"LockLevel:off" - --hint:"XCannotRaiseY:off" + --warning: + "ObservableStores:off" + --warning: + "LockLevel:off" + --hint: + "XCannotRaiseY:off" when (NimMajor, NimMinor) >= (1, 6): - --warning:"DotLikeOps:off" + --warning: + "DotLikeOps:off" when (NimMajor, NimMinor, NimPatch) >= (1, 6, 11): - --warning:"BareExcept:off" + --warning: + "BareExcept:off" when (NimMajor, NimMinor) >= (2, 0): - --mm:refc + --mm: + refc switch("define", "withoutPCRE") @@ -94,10 +115,12 @@ switch("define", "withoutPCRE") # "--debugger:native" build. It can be increased with `ulimit -n 1024`. if not defined(macosx): # add debugging symbols and original files and line numbers - --debugger:native + --debugger: + native if not (defined(windows) and defined(i386)) and not defined(disable_libbacktrace): # light-weight stack traces using libbacktrace and libunwind - --define:nimStackTraceOverride + --define: + nimStackTraceOverride switch("import", "libbacktrace") # `switch("warning[CaseTransition]", "off")` fails with "Error: invalid command line option: '--warning[CaseTransition]'" diff --git a/tests/codex/helpers/mockmarket.nim b/tests/codex/helpers/mockmarket.nim index bb0eaaa2..3638d11e 100644 --- a/tests/codex/helpers/mockmarket.nim +++ b/tests/codex/helpers/mockmarket.nim @@ -122,12 +122,14 @@ proc new*(_: type MockMarket, clock: ?Clock = Clock.none): MockMarket = collateral: CollateralConfig( repairRewardPercentage: 10, maxNumberOfSlashes: 5, - slashCriterion: 3, slashPercentage: 10, + validatorRewardPercentage: 20, ), proofs: ProofConfig( period: 10.u256, timeout: 5.u256, downtime: 64.uint8, downtimeProduct: 67.uint8 ), + reservations: SlotReservationsConfig(maxReservations: 3), + requestDurationLimit: (60 * 60 * 24 * 30).u256, ) MockMarket( signer: Address.example, config: config, canReserveSlot: true, clock: clock @@ -142,6 +144,9 @@ method periodicity*(mock: MockMarket): Future[Periodicity] {.async.} = method proofTimeout*(market: MockMarket): Future[UInt256] {.async.} = return market.config.proofs.timeout +method requestDurationLimit*(market: MockMarket): Future[UInt256] {.async.} = + return market.config.requestDurationLimit + method proofDowntime*(market: MockMarket): Future[uint8] {.async.} = return market.config.proofs.downtime diff --git a/tests/integration/testrestapi.nim b/tests/integration/testrestapi.nim index 13b06500..3918791e 100644 --- a/tests/integration/testrestapi.nim +++ b/tests/integration/testrestapi.nim @@ -103,6 +103,26 @@ twonodessuite "REST API": check responseBefore.status == "400 Bad Request" check responseBefore.body == "Tolerance needs to be bigger then zero" + test "request storage fails if duration exceeds limit", twoNodesConfig: + let data = await RandomChunker.example(blocks = 2) + let cid = client1.upload(data).get + let duration = (31 * 24 * 60 * 60).u256 + # 31 days TODO: this should not be hardcoded, but waits for https://github.com/codex-storage/nim-codex/issues/1056 + let proofProbability = 3.u256 + let expiry = 30.uint + let collateralPerByte = 1.u256 + let nodes = 3 + let tolerance = 2 + let pricePerBytePerSecond = 1.u256 + + var responseBefore = client1.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, + nodes.uint, tolerance.uint, + ) + + check responseBefore.status == "400 Bad Request" + check "Duration exceeds limit of" in responseBefore.body + test "request storage fails if nodes and tolerance aren't correct", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get diff --git a/vendor/codex-contracts-eth b/vendor/codex-contracts-eth index 0f2012b1..ff82c26b 160000 --- a/vendor/codex-contracts-eth +++ b/vendor/codex-contracts-eth @@ -1 +1 @@ -Subproject commit 0f2012b1442c404605c8ba9dcae2f4e53058cd2c +Subproject commit ff82c26b3669b52a09280c634141dace7f04659a From 87590f43ce769bc964232e9450314d6047277f67 Mon Sep 17 00:00:00 2001 From: Eric <5089238+emizzle@users.noreply.github.com> Date: Wed, 19 Feb 2025 11:18:45 +1100 Subject: [PATCH 14/40] fix(statemachine): do not raise from state.run (#1115) * fix(statemachine): do not raise from state.run * fix rebase * fix exception handling in SaleProvingSimulated.prove - re-raise CancelledError - don't return State on CatchableError - expect the Proofs_InvalidProof custom error instead of checking a string * asyncSpawn salesagent.onCancelled This was swallowing a KeyError in one of the tests (fixed in the previous commit) * remove error handling states in asyncstatemachine * revert unneeded changes * formatting * PR feedback, logging updates --- codex/purchasing/states/cancelled.nim | 24 +++-- codex/purchasing/states/error.nim | 4 +- codex/purchasing/states/errorhandling.nim | 8 -- codex/purchasing/states/failed.nim | 16 +++- codex/purchasing/states/finished.nim | 19 +++- codex/purchasing/states/pending.nim | 22 +++-- codex/purchasing/states/started.nim | 36 +++++--- codex/purchasing/states/submitted.nim | 14 ++- codex/purchasing/states/unknown.nim | 48 ++++++---- codex/sales/salesagent.nim | 52 ++++++----- codex/sales/states/cancelled.nim | 45 ++++++---- codex/sales/states/downloading.nim | 24 +++-- codex/sales/states/errored.nim | 20 +++-- codex/sales/states/errorhandling.nim | 8 -- codex/sales/states/failed.nim | 27 ++++-- codex/sales/states/filled.nim | 64 ++++++++------ codex/sales/states/filling.nim | 60 +++++++------ codex/sales/states/finished.nim | 19 ++-- codex/sales/states/ignored.nim | 23 +++-- codex/sales/states/initialproving.nim | 34 ++++--- codex/sales/states/payout.nim | 27 ++++-- codex/sales/states/preparing.nim | 98 +++++++++++---------- codex/sales/states/proving.nim | 59 ++++++++----- codex/sales/states/provingsimulated.nim | 41 +++++---- codex/sales/states/slotreserving.nim | 52 ++++++----- codex/sales/states/unknown.nim | 59 +++++++------ codex/utils/asyncstatemachine.nim | 21 ++--- tests/codex/sales/testsales.nim | 22 +---- tests/codex/sales/testsalesagent.nim | 23 +---- tests/codex/utils/testasyncstatemachine.nim | 41 +-------- 30 files changed, 564 insertions(+), 446 deletions(-) delete mode 100644 codex/purchasing/states/errorhandling.nim delete mode 100644 codex/sales/states/errorhandling.nim diff --git a/codex/purchasing/states/cancelled.nim b/codex/purchasing/states/cancelled.nim index 760dc81a..5aeeceac 100644 --- a/codex/purchasing/states/cancelled.nim +++ b/codex/purchasing/states/cancelled.nim @@ -1,25 +1,35 @@ import pkg/metrics import ../../logutils +import ../../utils/exceptions import ../statemachine -import ./errorhandling +import ./error declareCounter(codex_purchases_cancelled, "codex purchases cancelled") logScope: topics = "marketplace purchases cancelled" -type PurchaseCancelled* = ref object of ErrorHandlingState +type PurchaseCancelled* = ref object of PurchaseState method `$`*(state: PurchaseCancelled): string = "cancelled" -method run*(state: PurchaseCancelled, machine: Machine): Future[?State] {.async.} = +method run*( + state: PurchaseCancelled, machine: Machine +): Future[?State] {.async: (raises: []).} = codex_purchases_cancelled.inc() let purchase = Purchase(machine) - warn "Request cancelled, withdrawing remaining funds", requestId = purchase.requestId - await purchase.market.withdrawFunds(purchase.requestId) + try: + warn "Request cancelled, withdrawing remaining funds", + requestId = purchase.requestId + await purchase.market.withdrawFunds(purchase.requestId) - let error = newException(Timeout, "Purchase cancelled due to timeout") - purchase.future.fail(error) + let error = newException(Timeout, "Purchase cancelled due to timeout") + purchase.future.fail(error) + except CancelledError as e: + trace "PurchaseCancelled.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during PurchaseCancelled.run", error = e.msgDetail + return some State(PurchaseErrored(error: e)) diff --git a/codex/purchasing/states/error.nim b/codex/purchasing/states/error.nim index d7017b38..afa9f54f 100644 --- a/codex/purchasing/states/error.nim +++ b/codex/purchasing/states/error.nim @@ -14,7 +14,9 @@ type PurchaseErrored* = ref object of PurchaseState method `$`*(state: PurchaseErrored): string = "errored" -method run*(state: PurchaseErrored, machine: Machine): Future[?State] {.async.} = +method run*( + state: PurchaseErrored, machine: Machine +): Future[?State] {.async: (raises: []).} = codex_purchases_error.inc() let purchase = Purchase(machine) diff --git a/codex/purchasing/states/errorhandling.nim b/codex/purchasing/states/errorhandling.nim deleted file mode 100644 index 8ef91ba6..00000000 --- a/codex/purchasing/states/errorhandling.nim +++ /dev/null @@ -1,8 +0,0 @@ -import pkg/questionable -import ../statemachine -import ./error - -type ErrorHandlingState* = ref object of PurchaseState - -method onError*(state: ErrorHandlingState, error: ref CatchableError): ?State = - some State(PurchaseErrored(error: error)) diff --git a/codex/purchasing/states/failed.nim b/codex/purchasing/states/failed.nim index 5a126a73..1f6be74f 100644 --- a/codex/purchasing/states/failed.nim +++ b/codex/purchasing/states/failed.nim @@ -1,6 +1,7 @@ import pkg/metrics import ../statemachine import ../../logutils +import ../../utils/exceptions import ./error declareCounter(codex_purchases_failed, "codex purchases failed") @@ -10,11 +11,20 @@ type PurchaseFailed* = ref object of PurchaseState method `$`*(state: PurchaseFailed): string = "failed" -method run*(state: PurchaseFailed, machine: Machine): Future[?State] {.async.} = +method run*( + state: PurchaseFailed, machine: Machine +): Future[?State] {.async: (raises: []).} = codex_purchases_failed.inc() let purchase = Purchase(machine) - warn "Request failed, withdrawing remaining funds", requestId = purchase.requestId - await purchase.market.withdrawFunds(purchase.requestId) + + try: + warn "Request failed, withdrawing remaining funds", requestId = purchase.requestId + await purchase.market.withdrawFunds(purchase.requestId) + except CancelledError as e: + trace "PurchaseFailed.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during PurchaseFailed.run", error = e.msgDetail + return some State(PurchaseErrored(error: e)) let error = newException(PurchaseError, "Purchase failed") return some State(PurchaseErrored(error: error)) diff --git a/codex/purchasing/states/finished.nim b/codex/purchasing/states/finished.nim index 6cf5ffcc..bb7a726d 100644 --- a/codex/purchasing/states/finished.nim +++ b/codex/purchasing/states/finished.nim @@ -1,7 +1,9 @@ import pkg/metrics import ../statemachine +import ../../utils/exceptions import ../../logutils +import ./error declareCounter(codex_purchases_finished, "codex purchases finished") @@ -13,10 +15,19 @@ type PurchaseFinished* = ref object of PurchaseState method `$`*(state: PurchaseFinished): string = "finished" -method run*(state: PurchaseFinished, machine: Machine): Future[?State] {.async.} = +method run*( + state: PurchaseFinished, machine: Machine +): Future[?State] {.async: (raises: []).} = codex_purchases_finished.inc() let purchase = Purchase(machine) - info "Purchase finished, withdrawing remaining funds", requestId = purchase.requestId - await purchase.market.withdrawFunds(purchase.requestId) + try: + info "Purchase finished, withdrawing remaining funds", + requestId = purchase.requestId + await purchase.market.withdrawFunds(purchase.requestId) - purchase.future.complete() + purchase.future.complete() + except CancelledError as e: + trace "PurchaseFinished.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during PurchaseFinished.run", error = e.msgDetail + return some State(PurchaseErrored(error: e)) diff --git a/codex/purchasing/states/pending.nim b/codex/purchasing/states/pending.nim index 4852f266..1472a63e 100644 --- a/codex/purchasing/states/pending.nim +++ b/codex/purchasing/states/pending.nim @@ -1,18 +1,28 @@ import pkg/metrics +import ../../logutils +import ../../utils/exceptions import ../statemachine -import ./errorhandling import ./submitted +import ./error declareCounter(codex_purchases_pending, "codex purchases pending") -type PurchasePending* = ref object of ErrorHandlingState +type PurchasePending* = ref object of PurchaseState method `$`*(state: PurchasePending): string = "pending" -method run*(state: PurchasePending, machine: Machine): Future[?State] {.async.} = +method run*( + state: PurchasePending, machine: Machine +): Future[?State] {.async: (raises: []).} = codex_purchases_pending.inc() let purchase = Purchase(machine) - let request = !purchase.request - await purchase.market.requestStorage(request) - return some State(PurchaseSubmitted()) + try: + let request = !purchase.request + await purchase.market.requestStorage(request) + return some State(PurchaseSubmitted()) + except CancelledError as e: + trace "PurchasePending.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during PurchasePending.run", error = e.msgDetail + return some State(PurchaseErrored(error: e)) diff --git a/codex/purchasing/states/started.nim b/codex/purchasing/states/started.nim index 083e64c8..e93d7013 100644 --- a/codex/purchasing/states/started.nim +++ b/codex/purchasing/states/started.nim @@ -1,22 +1,25 @@ import pkg/metrics import ../../logutils +import ../../utils/exceptions import ../statemachine -import ./errorhandling import ./finished import ./failed +import ./error declareCounter(codex_purchases_started, "codex purchases started") logScope: topics = "marketplace purchases started" -type PurchaseStarted* = ref object of ErrorHandlingState +type PurchaseStarted* = ref object of PurchaseState method `$`*(state: PurchaseStarted): string = "started" -method run*(state: PurchaseStarted, machine: Machine): Future[?State] {.async.} = +method run*( + state: PurchaseStarted, machine: Machine +): Future[?State] {.async: (raises: []).} = codex_purchases_started.inc() let purchase = Purchase(machine) @@ -28,15 +31,24 @@ method run*(state: PurchaseStarted, machine: Machine): Future[?State] {.async.} proc callback(_: RequestId) = failed.complete() - let subscription = await market.subscribeRequestFailed(purchase.requestId, callback) + var ended: Future[void] + try: + let subscription = await market.subscribeRequestFailed(purchase.requestId, callback) - # Ensure that we're past the request end by waiting an additional second - let ended = clock.waitUntil((await market.getRequestEnd(purchase.requestId)) + 1) - let fut = await one(ended, failed) - await subscription.unsubscribe() - if fut.id == failed.id: + # Ensure that we're past the request end by waiting an additional second + ended = clock.waitUntil((await market.getRequestEnd(purchase.requestId)) + 1) + let fut = await one(ended, failed) + await subscription.unsubscribe() + if fut.id == failed.id: + ended.cancelSoon() + return some State(PurchaseFailed()) + else: + failed.cancelSoon() + return some State(PurchaseFinished()) + except CancelledError as e: ended.cancelSoon() - return some State(PurchaseFailed()) - else: failed.cancelSoon() - return some State(PurchaseFinished()) + trace "PurchaseStarted.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during PurchaseStarted.run", error = e.msgDetail + return some State(PurchaseErrored(error: e)) diff --git a/codex/purchasing/states/submitted.nim b/codex/purchasing/states/submitted.nim index 1cf65b1f..dd3669e4 100644 --- a/codex/purchasing/states/submitted.nim +++ b/codex/purchasing/states/submitted.nim @@ -1,22 +1,25 @@ import pkg/metrics import ../../logutils +import ../../utils/exceptions import ../statemachine -import ./errorhandling import ./started import ./cancelled +import ./error logScope: topics = "marketplace purchases submitted" declareCounter(codex_purchases_submitted, "codex purchases submitted") -type PurchaseSubmitted* = ref object of ErrorHandlingState +type PurchaseSubmitted* = ref object of PurchaseState method `$`*(state: PurchaseSubmitted): string = "submitted" -method run*(state: PurchaseSubmitted, machine: Machine): Future[?State] {.async.} = +method run*( + state: PurchaseSubmitted, machine: Machine +): Future[?State] {.async: (raises: []).} = codex_purchases_submitted.inc() let purchase = Purchase(machine) let request = !purchase.request @@ -44,5 +47,10 @@ method run*(state: PurchaseSubmitted, machine: Machine): Future[?State] {.async. await wait().withTimeout() except Timeout: return some State(PurchaseCancelled()) + except CancelledError as e: + trace "PurchaseSubmitted.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during PurchaseSubmitted.run", error = e.msgDetail + return some State(PurchaseErrored(error: e)) return some State(PurchaseStarted()) diff --git a/codex/purchasing/states/unknown.nim b/codex/purchasing/states/unknown.nim index 54e09942..8c2bff48 100644 --- a/codex/purchasing/states/unknown.nim +++ b/codex/purchasing/states/unknown.nim @@ -1,34 +1,44 @@ import pkg/metrics +import ../../utils/exceptions +import ../../logutils import ../statemachine -import ./errorhandling import ./submitted import ./started import ./cancelled import ./finished import ./failed +import ./error declareCounter(codex_purchases_unknown, "codex purchases unknown") -type PurchaseUnknown* = ref object of ErrorHandlingState +type PurchaseUnknown* = ref object of PurchaseState method `$`*(state: PurchaseUnknown): string = "unknown" -method run*(state: PurchaseUnknown, machine: Machine): Future[?State] {.async.} = - codex_purchases_unknown.inc() - let purchase = Purchase(machine) - if (request =? await purchase.market.getRequest(purchase.requestId)) and - (requestState =? await purchase.market.requestState(purchase.requestId)): - purchase.request = some request +method run*( + state: PurchaseUnknown, machine: Machine +): Future[?State] {.async: (raises: []).} = + try: + codex_purchases_unknown.inc() + let purchase = Purchase(machine) + if (request =? await purchase.market.getRequest(purchase.requestId)) and + (requestState =? await purchase.market.requestState(purchase.requestId)): + purchase.request = some request - case requestState - of RequestState.New: - return some State(PurchaseSubmitted()) - of RequestState.Started: - return some State(PurchaseStarted()) - of RequestState.Cancelled: - return some State(PurchaseCancelled()) - of RequestState.Finished: - return some State(PurchaseFinished()) - of RequestState.Failed: - return some State(PurchaseFailed()) + case requestState + of RequestState.New: + return some State(PurchaseSubmitted()) + of RequestState.Started: + return some State(PurchaseStarted()) + of RequestState.Cancelled: + return some State(PurchaseCancelled()) + of RequestState.Finished: + return some State(PurchaseFinished()) + of RequestState.Failed: + return some State(PurchaseFailed()) + except CancelledError as e: + trace "PurchaseUnknown.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during PurchaseUnknown.run", error = e.msgDetail + return some State(PurchaseErrored(error: e)) diff --git a/codex/sales/salesagent.nim b/codex/sales/salesagent.nim index 8a8e5dc0..f04182aa 100644 --- a/codex/sales/salesagent.nim +++ b/codex/sales/salesagent.nim @@ -6,6 +6,7 @@ import pkg/upraises import ../contracts/requests import ../errors import ../logutils +import ../utils/exceptions import ./statemachine import ./salescontext import ./salesdata @@ -68,41 +69,48 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} = let data = agent.data let clock = agent.context.clock - proc onCancelled() {.async.} = + proc onCancelled() {.async: (raises: []).} = without request =? data.request: return - let market = agent.context.market - let expiry = await market.requestExpiresAt(data.requestId) + try: + let market = agent.context.market + let expiry = await market.requestExpiresAt(data.requestId) - while true: - let deadline = max(clock.now, expiry) + 1 - trace "Waiting for request to be cancelled", now = clock.now, expiry = deadline - await clock.waitUntil(deadline) + while true: + let deadline = max(clock.now, expiry) + 1 + trace "Waiting for request to be cancelled", now = clock.now, expiry = deadline + await clock.waitUntil(deadline) - without state =? await agent.retrieveRequestState(): - error "Uknown request", requestId = data.requestId - return + without state =? await agent.retrieveRequestState(): + error "Unknown request", requestId = data.requestId + return - case state - of New: - discard - of RequestState.Cancelled: - agent.schedule(cancelledEvent(request)) - break - of RequestState.Started, RequestState.Finished, RequestState.Failed: - break + case state + of New: + discard + of RequestState.Cancelled: + agent.schedule(cancelledEvent(request)) + break + of RequestState.Started, RequestState.Finished, RequestState.Failed: + break - debug "The request is not yet canceled, even though it should be. Waiting for some more time.", - currentState = state, now = clock.now + debug "The request is not yet canceled, even though it should be. Waiting for some more time.", + currentState = state, now = clock.now + except CancelledError: + trace "Waiting for expiry to lapse was cancelled", requestId = data.requestId + except CatchableError as e: + error "Error while waiting for expiry to lapse", error = e.msgDetail data.cancelled = onCancelled() + asyncSpawn data.cancelled method onFulfilled*( agent: SalesAgent, requestId: RequestId ) {.base, gcsafe, upraises: [].} = - if agent.data.requestId == requestId and not agent.data.cancelled.isNil: - agent.data.cancelled.cancelSoon() + let cancelled = agent.data.cancelled + if agent.data.requestId == requestId and not cancelled.isNil and not cancelled.finished: + cancelled.cancelSoon() method onFailed*( agent: SalesAgent, requestId: RequestId diff --git a/codex/sales/states/cancelled.nim b/codex/sales/states/cancelled.nim index 3bb92a2c..3bdf8c2f 100644 --- a/codex/sales/states/cancelled.nim +++ b/codex/sales/states/cancelled.nim @@ -1,17 +1,20 @@ import ../../logutils +import ../../utils/exceptions import ../salesagent import ../statemachine -import ./errorhandling +import ./errored logScope: topics = "marketplace sales cancelled" -type SaleCancelled* = ref object of ErrorHandlingState +type SaleCancelled* = ref object of SaleState method `$`*(state: SaleCancelled): string = "SaleCancelled" -method run*(state: SaleCancelled, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleCancelled, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data let market = agent.context.market @@ -19,21 +22,27 @@ method run*(state: SaleCancelled, machine: Machine): Future[?State] {.async.} = without request =? data.request: raiseAssert "no sale request" - let slot = Slot(request: request, slotIndex: data.slotIndex) - debug "Collecting collateral and partial payout", - requestId = data.requestId, slotIndex = data.slotIndex - let currentCollateral = await market.currentCollateral(slot.id) - await market.freeSlot(slot.id) + try: + let slot = Slot(request: request, slotIndex: data.slotIndex) + debug "Collecting collateral and partial payout", + requestId = data.requestId, slotIndex = data.slotIndex + let currentCollateral = await market.currentCollateral(slot.id) + await market.freeSlot(slot.id) - if onClear =? agent.context.onClear and request =? data.request: - onClear(request, data.slotIndex) + if onClear =? agent.context.onClear and request =? data.request: + onClear(request, data.slotIndex) - if onCleanUp =? agent.onCleanUp: - await onCleanUp( - returnBytes = true, - reprocessSlot = false, - returnedCollateral = some currentCollateral, - ) + if onCleanUp =? agent.onCleanUp: + await onCleanUp( + returnBytes = true, + reprocessSlot = false, + returnedCollateral = some currentCollateral, + ) - warn "Sale cancelled due to timeout", - requestId = data.requestId, slotIndex = data.slotIndex + warn "Sale cancelled due to timeout", + requestId = data.requestId, slotIndex = data.slotIndex + except CancelledError as e: + trace "SaleCancelled.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleCancelled.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/downloading.nim b/codex/sales/states/downloading.nim index f6ced6be..cb991dc8 100644 --- a/codex/sales/states/downloading.nim +++ b/codex/sales/states/downloading.nim @@ -4,16 +4,16 @@ import pkg/questionable/results import ../../blocktype as bt import ../../logutils import ../../market +import ../../utils/exceptions import ../salesagent import ../statemachine -import ./errorhandling import ./cancelled import ./failed import ./filled import ./initialproving import ./errored -type SaleDownloading* = ref object of ErrorHandlingState +type SaleDownloading* = ref object of SaleState logScope: topics = "marketplace sales downloading" @@ -32,7 +32,9 @@ method onSlotFilled*( ): ?State = return some State(SaleFilled()) -method run*(state: SaleDownloading, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleDownloading, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data let context = agent.context @@ -64,9 +66,15 @@ method run*(state: SaleDownloading, machine: Machine): Future[?State] {.async.} trace "Releasing batch of bytes written to disk", bytes return await reservations.release(reservation.id, reservation.availabilityId, bytes) - trace "Starting download" - if err =? (await onStore(request, data.slotIndex, onBlocks)).errorOption: - return some State(SaleErrored(error: err, reprocessSlot: false)) + try: + trace "Starting download" + if err =? (await onStore(request, data.slotIndex, onBlocks)).errorOption: + return some State(SaleErrored(error: err, reprocessSlot: false)) - trace "Download complete" - return some State(SaleInitialProving()) + trace "Download complete" + return some State(SaleInitialProving()) + except CancelledError as e: + trace "SaleDownloading.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleDownloading.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/errored.nim b/codex/sales/states/errored.nim index b85b7930..77bf08d3 100644 --- a/codex/sales/states/errored.nim +++ b/codex/sales/states/errored.nim @@ -17,10 +17,9 @@ type SaleErrored* = ref object of SaleState method `$`*(state: SaleErrored): string = "SaleErrored" -method onError*(state: SaleState, err: ref CatchableError): ?State {.upraises: [].} = - error "error during SaleErrored run", error = err.msg - -method run*(state: SaleErrored, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleErrored, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data let context = agent.context @@ -30,8 +29,13 @@ method run*(state: SaleErrored, machine: Machine): Future[?State] {.async.} = requestId = data.requestId, slotIndex = data.slotIndex - if onClear =? context.onClear and request =? data.request: - onClear(request, data.slotIndex) + try: + if onClear =? context.onClear and request =? data.request: + onClear(request, data.slotIndex) - if onCleanUp =? agent.onCleanUp: - await onCleanUp(returnBytes = true, reprocessSlot = state.reprocessSlot) + if onCleanUp =? agent.onCleanUp: + await onCleanUp(returnBytes = true, reprocessSlot = state.reprocessSlot) + except CancelledError as e: + trace "SaleErrored.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleErrored.run", error = e.msgDetail diff --git a/codex/sales/states/errorhandling.nim b/codex/sales/states/errorhandling.nim deleted file mode 100644 index 2ee399ef..00000000 --- a/codex/sales/states/errorhandling.nim +++ /dev/null @@ -1,8 +0,0 @@ -import pkg/questionable -import ../statemachine -import ./errored - -type ErrorHandlingState* = ref object of SaleState - -method onError*(state: ErrorHandlingState, error: ref CatchableError): ?State = - some State(SaleErrored(error: error)) diff --git a/codex/sales/states/failed.nim b/codex/sales/states/failed.nim index 6103765c..b0d6a7cd 100644 --- a/codex/sales/states/failed.nim +++ b/codex/sales/states/failed.nim @@ -1,30 +1,39 @@ import ../../logutils +import ../../utils/exceptions +import ../../utils/exceptions import ../salesagent import ../statemachine -import ./errorhandling import ./errored logScope: topics = "marketplace sales failed" type - SaleFailed* = ref object of ErrorHandlingState + SaleFailed* = ref object of SaleState SaleFailedError* = object of SaleError method `$`*(state: SaleFailed): string = "SaleFailed" -method run*(state: SaleFailed, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleFailed, machine: Machine +): Future[?State] {.async: (raises: []).} = let data = SalesAgent(machine).data let market = SalesAgent(machine).context.market without request =? data.request: raiseAssert "no sale request" - let slot = Slot(request: request, slotIndex: data.slotIndex) - debug "Removing slot from mySlots", - requestId = data.requestId, slotIndex = data.slotIndex - await market.freeSlot(slot.id) + try: + let slot = Slot(request: request, slotIndex: data.slotIndex) + debug "Removing slot from mySlots", + requestId = data.requestId, slotIndex = data.slotIndex + await market.freeSlot(slot.id) - let error = newException(SaleFailedError, "Sale failed") - return some State(SaleErrored(error: error)) + let error = newException(SaleFailedError, "Sale failed") + return some State(SaleErrored(error: error)) + except CancelledError as e: + trace "SaleFailed.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleFailed.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/filled.nim b/codex/sales/states/filled.nim index 9e7d9906..b0fc65c9 100644 --- a/codex/sales/states/filled.nim +++ b/codex/sales/states/filled.nim @@ -3,9 +3,9 @@ import pkg/questionable/results import ../../conf import ../../logutils +import ../../utils/exceptions import ../statemachine import ../salesagent -import ./errorhandling import ./errored import ./cancelled import ./failed @@ -18,7 +18,7 @@ logScope: topics = "marketplace sales filled" type - SaleFilled* = ref object of ErrorHandlingState + SaleFilled* = ref object of SaleState HostMismatchError* = object of CatchableError method onCancelled*(state: SaleFilled, request: StorageRequest): ?State = @@ -30,40 +30,48 @@ method onFailed*(state: SaleFilled, request: StorageRequest): ?State = method `$`*(state: SaleFilled): string = "SaleFilled" -method run*(state: SaleFilled, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleFilled, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data let context = agent.context - let market = context.market - let host = await market.getHost(data.requestId, data.slotIndex) - let me = await market.getSigner() - if host == me.some: - info "Slot succesfully filled", - requestId = data.requestId, slotIndex = data.slotIndex + try: + let host = await market.getHost(data.requestId, data.slotIndex) + let me = await market.getSigner() - without request =? data.request: - raiseAssert "no sale request" + if host == me.some: + info "Slot succesfully filled", + requestId = data.requestId, slotIndex = data.slotIndex - if onFilled =? agent.onFilled: - onFilled(request, data.slotIndex) + without request =? data.request: + raiseAssert "no sale request" - without onExpiryUpdate =? context.onExpiryUpdate: - raiseAssert "onExpiryUpdate callback not set" + if onFilled =? agent.onFilled: + onFilled(request, data.slotIndex) - let requestEnd = await market.getRequestEnd(data.requestId) - if err =? (await onExpiryUpdate(request.content.cid, requestEnd)).errorOption: - return some State(SaleErrored(error: err)) + without onExpiryUpdate =? context.onExpiryUpdate: + raiseAssert "onExpiryUpdate callback not set" - when codex_enable_proof_failures: - if context.simulateProofFailures > 0: - info "Proving with failure rate", rate = context.simulateProofFailures - return some State( - SaleProvingSimulated(failEveryNProofs: context.simulateProofFailures) - ) + let requestEnd = await market.getRequestEnd(data.requestId) + if err =? (await onExpiryUpdate(request.content.cid, requestEnd)).errorOption: + return some State(SaleErrored(error: err)) - return some State(SaleProving()) - else: - let error = newException(HostMismatchError, "Slot filled by other host") - return some State(SaleErrored(error: error)) + when codex_enable_proof_failures: + if context.simulateProofFailures > 0: + info "Proving with failure rate", rate = context.simulateProofFailures + return some State( + SaleProvingSimulated(failEveryNProofs: context.simulateProofFailures) + ) + + return some State(SaleProving()) + else: + let error = newException(HostMismatchError, "Slot filled by other host") + return some State(SaleErrored(error: error)) + except CancelledError as e: + trace "SaleFilled.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleFilled.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/filling.nim b/codex/sales/states/filling.nim index 1934fc12..0c20a64e 100644 --- a/codex/sales/states/filling.nim +++ b/codex/sales/states/filling.nim @@ -1,9 +1,9 @@ import pkg/stint import ../../logutils import ../../market +import ../../utils/exceptions import ../statemachine import ../salesagent -import ./errorhandling import ./filled import ./cancelled import ./failed @@ -13,7 +13,7 @@ import ./errored logScope: topics = "marketplace sales filling" -type SaleFilling* = ref object of ErrorHandlingState +type SaleFilling* = ref object of SaleState proof*: Groth16Proof method `$`*(state: SaleFilling): string = @@ -25,7 +25,9 @@ method onCancelled*(state: SaleFilling, request: StorageRequest): ?State = method onFailed*(state: SaleFilling, request: StorageRequest): ?State = return some State(SaleFailed()) -method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleFilling, machine: Machine +): Future[?State] {.async: (raises: []).} = let data = SalesAgent(machine).data let market = SalesAgent(machine).context.market without (request =? data.request): @@ -35,28 +37,34 @@ method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} = requestId = data.requestId slotIndex = data.slotIndex - let slotState = await market.slotState(slotId(data.requestId, data.slotIndex)) - let requestedCollateral = request.ask.collateralPerSlot - var collateral: UInt256 - - if slotState == SlotState.Repair: - # When repairing the node gets "discount" on the collateral that it needs to - let repairRewardPercentage = (await market.repairRewardPercentage).u256 - collateral = - requestedCollateral - - ((requestedCollateral * repairRewardPercentage)).div(100.u256) - else: - collateral = requestedCollateral - - debug "Filling slot" try: - await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral) - except MarketError as e: - if e.msg.contains "Slot is not free": - debug "Slot is already filled, ignoring slot" - return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) - else: - return some State(SaleErrored(error: e)) - # other CatchableErrors are handled "automatically" by the ErrorHandlingState + let slotState = await market.slotState(slotId(data.requestId, data.slotIndex)) + let requestedCollateral = request.ask.collateralPerSlot + var collateral: UInt256 - return some State(SaleFilled()) + if slotState == SlotState.Repair: + # When repairing the node gets "discount" on the collateral that it needs to + let repairRewardPercentage = (await market.repairRewardPercentage).u256 + collateral = + requestedCollateral - + ((requestedCollateral * repairRewardPercentage)).div(100.u256) + else: + collateral = requestedCollateral + + debug "Filling slot" + try: + await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral) + except MarketError as e: + if e.msg.contains "Slot is not free": + debug "Slot is already filled, ignoring slot" + return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) + else: + return some State(SaleErrored(error: e)) + # other CatchableErrors are handled "automatically" by the SaleState + + return some State(SaleFilled()) + except CancelledError as e: + trace "SaleFilling.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleFilling.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/finished.nim b/codex/sales/states/finished.nim index 151300d0..2aba69eb 100644 --- a/codex/sales/states/finished.nim +++ b/codex/sales/states/finished.nim @@ -1,16 +1,17 @@ import pkg/chronos import ../../logutils +import ../../utils/exceptions import ../statemachine import ../salesagent -import ./errorhandling import ./cancelled import ./failed +import ./errored logScope: topics = "marketplace sales finished" -type SaleFinished* = ref object of ErrorHandlingState +type SaleFinished* = ref object of SaleState returnedCollateral*: ?UInt256 method `$`*(state: SaleFinished): string = @@ -22,7 +23,9 @@ method onCancelled*(state: SaleFinished, request: StorageRequest): ?State = method onFailed*(state: SaleFinished, request: StorageRequest): ?State = return some State(SaleFailed()) -method run*(state: SaleFinished, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleFinished, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data @@ -32,5 +35,11 @@ method run*(state: SaleFinished, machine: Machine): Future[?State] {.async.} = info "Slot finished and paid out", requestId = data.requestId, slotIndex = data.slotIndex - if onCleanUp =? agent.onCleanUp: - await onCleanUp(returnedCollateral = state.returnedCollateral) + try: + if onCleanUp =? agent.onCleanUp: + await onCleanUp(returnedCollateral = state.returnedCollateral) + except CancelledError as e: + trace "SaleFilled.run onCleanUp was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleFilled.run in onCleanUp callback", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/ignored.nim b/codex/sales/states/ignored.nim index b915bff5..b07a201c 100644 --- a/codex/sales/states/ignored.nim +++ b/codex/sales/states/ignored.nim @@ -1,9 +1,10 @@ import pkg/chronos import ../../logutils +import ../../utils/exceptions import ../statemachine import ../salesagent -import ./errorhandling +import ./errored logScope: topics = "marketplace sales ignored" @@ -11,17 +12,25 @@ logScope: # Ignored slots could mean there was no availability or that the slot could # not be reserved. -type SaleIgnored* = ref object of ErrorHandlingState +type SaleIgnored* = ref object of SaleState reprocessSlot*: bool # readd slot to queue with `seen` flag returnBytes*: bool # return unreleased bytes from Reservation to Availability method `$`*(state: SaleIgnored): string = "SaleIgnored" -method run*(state: SaleIgnored, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleIgnored, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) - if onCleanUp =? agent.onCleanUp: - await onCleanUp( - reprocessSlot = state.reprocessSlot, returnBytes = state.returnBytes - ) + try: + if onCleanUp =? agent.onCleanUp: + await onCleanUp( + reprocessSlot = state.reprocessSlot, returnBytes = state.returnBytes + ) + except CancelledError as e: + trace "SaleIgnored.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleIgnored.run in onCleanUp", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/initialproving.nim b/codex/sales/states/initialproving.nim index bc9ce6b6..3b35ba90 100644 --- a/codex/sales/states/initialproving.nim +++ b/codex/sales/states/initialproving.nim @@ -1,9 +1,9 @@ import pkg/questionable/results import ../../clock import ../../logutils +import ../../utils/exceptions import ../statemachine import ../salesagent -import ./errorhandling import ./filling import ./cancelled import ./errored @@ -12,7 +12,7 @@ import ./failed logScope: topics = "marketplace sales initial-proving" -type SaleInitialProving* = ref object of ErrorHandlingState +type SaleInitialProving* = ref object of SaleState method `$`*(state: SaleInitialProving): string = "SaleInitialProving" @@ -36,7 +36,9 @@ proc waitForStableChallenge(market: Market, clock: Clock, slotId: SlotId) {.asyn while (await market.getPointer(slotId)) > (256 - downtime): await clock.waitUntilNextPeriod(periodicity) -method run*(state: SaleInitialProving, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleInitialProving, machine: Machine +): Future[?State] {.async: (raises: []).} = let data = SalesAgent(machine).data let context = SalesAgent(machine).context let market = context.market @@ -48,16 +50,22 @@ method run*(state: SaleInitialProving, machine: Machine): Future[?State] {.async without onProve =? context.onProve: raiseAssert "onProve callback not set" - debug "Waiting for a proof challenge that is valid for the entire period" - let slot = Slot(request: request, slotIndex: data.slotIndex) - await waitForStableChallenge(market, clock, slot.id) + try: + debug "Waiting for a proof challenge that is valid for the entire period" + let slot = Slot(request: request, slotIndex: data.slotIndex) + await waitForStableChallenge(market, clock, slot.id) - debug "Generating initial proof", requestId = data.requestId - let challenge = await context.market.getChallenge(slot.id) - without proof =? (await onProve(slot, challenge)), err: - error "Failed to generate initial proof", error = err.msg - return some State(SaleErrored(error: err)) + debug "Generating initial proof", requestId = data.requestId + let challenge = await context.market.getChallenge(slot.id) + without proof =? (await onProve(slot, challenge)), err: + error "Failed to generate initial proof", error = err.msg + return some State(SaleErrored(error: err)) - debug "Finished proof calculation", requestId = data.requestId + debug "Finished proof calculation", requestId = data.requestId - return some State(SaleFilling(proof: proof)) + return some State(SaleFilling(proof: proof)) + except CancelledError as e: + trace "SaleInitialProving.run onCleanUp was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleInitialProving.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/payout.nim b/codex/sales/states/payout.nim index 9ce36613..e808307d 100644 --- a/codex/sales/states/payout.nim +++ b/codex/sales/states/payout.nim @@ -1,16 +1,17 @@ import ../../logutils import ../../market +import ../../utils/exceptions import ../statemachine import ../salesagent -import ./errorhandling import ./cancelled import ./failed import ./finished +import ./errored logScope: topics = "marketplace sales payout" -type SalePayout* = ref object of ErrorHandlingState +type SalePayout* = ref object of SaleState method `$`*(state: SalePayout): string = "SalePayout" @@ -21,17 +22,25 @@ method onCancelled*(state: SalePayout, request: StorageRequest): ?State = method onFailed*(state: SalePayout, request: StorageRequest): ?State = return some State(SaleFailed()) -method run*(state: SalePayout, machine: Machine): Future[?State] {.async.} = +method run*( + state: SalePayout, machine: Machine +): Future[?State] {.async: (raises: []).} = let data = SalesAgent(machine).data let market = SalesAgent(machine).context.market without request =? data.request: raiseAssert "no sale request" - let slot = Slot(request: request, slotIndex: data.slotIndex) - debug "Collecting finished slot's reward", - requestId = data.requestId, slotIndex = data.slotIndex - let currentCollateral = await market.currentCollateral(slot.id) - await market.freeSlot(slot.id) + try: + let slot = Slot(request: request, slotIndex: data.slotIndex) + debug "Collecting finished slot's reward", + requestId = data.requestId, slotIndex = data.slotIndex + let currentCollateral = await market.currentCollateral(slot.id) + await market.freeSlot(slot.id) - return some State(SaleFinished(returnedCollateral: some currentCollateral)) + return some State(SaleFinished(returnedCollateral: some currentCollateral)) + except CancelledError as e: + trace "SalePayout.run onCleanUp was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SalePayout.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/preparing.nim b/codex/sales/states/preparing.nim index bdde1249..7509558c 100644 --- a/codex/sales/states/preparing.nim +++ b/codex/sales/states/preparing.nim @@ -4,9 +4,9 @@ import pkg/metrics import ../../logutils import ../../market +import ../../utils/exceptions import ../salesagent import ../statemachine -import ./errorhandling import ./cancelled import ./failed import ./filled @@ -18,7 +18,7 @@ declareCounter( codex_reservations_availability_mismatch, "codex reservations availability_mismatch" ) -type SalePreparing* = ref object of ErrorHandlingState +type SalePreparing* = ref object of SaleState logScope: topics = "marketplace sales preparing" @@ -37,62 +37,70 @@ method onSlotFilled*( ): ?State = return some State(SaleFilled()) -method run*(state: SalePreparing, machine: Machine): Future[?State] {.async.} = +method run*( + state: SalePreparing, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data let context = agent.context let market = context.market let reservations = context.reservations - await agent.retrieveRequest() - await agent.subscribe() + try: + await agent.retrieveRequest() + await agent.subscribe() - without request =? data.request: - raiseAssert "no sale request" + without request =? data.request: + raiseAssert "no sale request" - let slotId = slotId(data.requestId, data.slotIndex) - let state = await market.slotState(slotId) - if state != SlotState.Free and state != SlotState.Repair: - return some State(SaleIgnored(reprocessSlot: false, returnBytes: false)) + let slotId = slotId(data.requestId, data.slotIndex) + let state = await market.slotState(slotId) + if state != SlotState.Free and state != SlotState.Repair: + return some State(SaleIgnored(reprocessSlot: false, returnBytes: false)) - # TODO: Once implemented, check to ensure the host is allowed to fill the slot, - # due to the [sliding window mechanism](https://github.com/codex-storage/codex-research/blob/master/design/marketplace.md#dispersal) + # TODO: Once implemented, check to ensure the host is allowed to fill the slot, + # due to the [sliding window mechanism](https://github.com/codex-storage/codex-research/blob/master/design/marketplace.md#dispersal) - logScope: - slotIndex = data.slotIndex - slotSize = request.ask.slotSize - duration = request.ask.duration - pricePerBytePerSecond = request.ask.pricePerBytePerSecond - collateralPerByte = request.ask.collateralPerByte + logScope: + slotIndex = data.slotIndex + slotSize = request.ask.slotSize + duration = request.ask.duration + pricePerBytePerSecond = request.ask.pricePerBytePerSecond + collateralPerByte = request.ask.collateralPerByte - without availability =? - await reservations.findAvailability( - request.ask.slotSize, request.ask.duration, request.ask.pricePerBytePerSecond, - request.ask.collateralPerByte, - ): - debug "No availability found for request, ignoring" + without availability =? + await reservations.findAvailability( + request.ask.slotSize, request.ask.duration, request.ask.pricePerBytePerSecond, + request.ask.collateralPerByte, + ): + debug "No availability found for request, ignoring" - return some State(SaleIgnored(reprocessSlot: true)) - - info "Availability found for request, creating reservation" - - without reservation =? - await reservations.createReservation( - availability.id, request.ask.slotSize, request.id, data.slotIndex, - request.ask.collateralPerByte, - ), error: - trace "Creation of reservation failed" - # Race condition: - # reservations.findAvailability (line 64) is no guarantee. You can never know for certain that the reservation can be created until after you have it. - # Should createReservation fail because there's no space, we proceed to SaleIgnored. - if error of BytesOutOfBoundsError: - # Lets monitor how often this happen and if it is often we can make it more inteligent to handle it - codex_reservations_availability_mismatch.inc() return some State(SaleIgnored(reprocessSlot: true)) - return some State(SaleErrored(error: error)) + info "Availability found for request, creating reservation" - trace "Reservation created succesfully" + without reservation =? + await reservations.createReservation( + availability.id, request.ask.slotSize, request.id, data.slotIndex, + request.ask.collateralPerByte, + ), error: + trace "Creation of reservation failed" + # Race condition: + # reservations.findAvailability (line 64) is no guarantee. You can never know for certain that the reservation can be created until after you have it. + # Should createReservation fail because there's no space, we proceed to SaleIgnored. + if error of BytesOutOfBoundsError: + # Lets monitor how often this happen and if it is often we can make it more inteligent to handle it + codex_reservations_availability_mismatch.inc() + return some State(SaleIgnored(reprocessSlot: true)) - data.reservation = some reservation - return some State(SaleSlotReserving()) + return some State(SaleErrored(error: error)) + + trace "Reservation created successfully" + + data.reservation = some reservation + return some State(SaleSlotReserving()) + except CancelledError as e: + trace "SalePreparing.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SalePreparing.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/proving.nim b/codex/sales/states/proving.nim index 0ee2ed60..759cad0c 100644 --- a/codex/sales/states/proving.nim +++ b/codex/sales/states/proving.nim @@ -6,7 +6,6 @@ import ../../utils/exceptions import ../statemachine import ../salesagent import ../salescontext -import ./errorhandling import ./cancelled import ./failed import ./errored @@ -18,7 +17,7 @@ logScope: type SlotFreedError* = object of CatchableError SlotNotFilledError* = object of CatchableError - SaleProving* = ref object of ErrorHandlingState + SaleProving* = ref object of SaleState loop: Future[void] method prove*( @@ -113,7 +112,9 @@ method onFailed*(state: SaleProving, request: StorageRequest): ?State = # state change return some State(SaleFailed()) -method run*(state: SaleProving, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleProving, machine: Machine +): Future[?State] {.async: (raises: []).} = let data = SalesAgent(machine).data let context = SalesAgent(machine).context @@ -129,27 +130,37 @@ method run*(state: SaleProving, machine: Machine): Future[?State] {.async.} = without clock =? context.clock: raiseAssert("clock not set") - debug "Start proving", requestId = data.requestId, slotIndex = data.slotIndex try: - let loop = state.proveLoop(market, clock, request, data.slotIndex, onProve) - state.loop = loop - await loop - except CancelledError: - discard + debug "Start proving", requestId = data.requestId, slotIndex = data.slotIndex + try: + let loop = state.proveLoop(market, clock, request, data.slotIndex, onProve) + state.loop = loop + await loop + except CancelledError as e: + trace "proving loop cancelled" + discard + except CatchableError as e: + error "Proving failed", + msg = e.msg, typ = $(type e), stack = e.getStackTrace(), error = e.msgDetail + return some State(SaleErrored(error: e)) + finally: + # Cleanup of the proving loop + debug "Stopping proving.", requestId = data.requestId, slotIndex = data.slotIndex + + if not state.loop.isNil: + if not state.loop.finished: + try: + await state.loop.cancelAndWait() + except CancelledError: + discard + except CatchableError as e: + error "Error during cancellation of proving loop", msg = e.msg + + state.loop = nil + + return some State(SalePayout()) + except CancelledError as e: + trace "SaleProving.run onCleanUp was cancelled", error = e.msgDetail except CatchableError as e: - error "Proving failed", msg = e.msg + error "Error during SaleProving.run", error = e.msgDetail return some State(SaleErrored(error: e)) - finally: - # Cleanup of the proving loop - debug "Stopping proving.", requestId = data.requestId, slotIndex = data.slotIndex - - if not state.loop.isNil: - if not state.loop.finished: - try: - await state.loop.cancelAndWait() - except CatchableError as e: - error "Error during cancellation of proving loop", msg = e.msg - - state.loop = nil - - return some State(SalePayout()) diff --git a/codex/sales/states/provingsimulated.nim b/codex/sales/states/provingsimulated.nim index e60169bc..a797e113 100644 --- a/codex/sales/states/provingsimulated.nim +++ b/codex/sales/states/provingsimulated.nim @@ -4,12 +4,14 @@ when codex_enable_proof_failures: import pkg/stint import pkg/ethers + import ../../contracts/marketplace import ../../contracts/requests import ../../logutils import ../../market import ../../utils/exceptions import ../salescontext import ./proving + import ./errored logScope: topics = "marketplace sales simulated-proving" @@ -29,22 +31,27 @@ when codex_enable_proof_failures: market: Market, currentPeriod: Period, ) {.async.} = - trace "Processing proving in simulated mode" - state.proofCount += 1 - if state.failEveryNProofs > 0 and state.proofCount mod state.failEveryNProofs == 0: - state.proofCount = 0 + try: + trace "Processing proving in simulated mode" + state.proofCount += 1 + if state.failEveryNProofs > 0 and state.proofCount mod state.failEveryNProofs == 0: + state.proofCount = 0 - try: - warn "Submitting INVALID proof", period = currentPeriod, slotId = slot.id - await market.submitProof(slot.id, Groth16Proof.default) - except MarketError as e: - if not e.msg.contains("Invalid proof"): + try: + warn "Submitting INVALID proof", period = currentPeriod, slotId = slot.id + await market.submitProof(slot.id, Groth16Proof.default) + except Proofs_InvalidProof as e: + discard # expected + except CancelledError as error: + raise error + except CatchableError as e: onSubmitProofError(e, currentPeriod, slot.id) - except CancelledError as error: - raise error - except CatchableError as e: - onSubmitProofError(e, currentPeriod, slot.id) - else: - await procCall SaleProving(state).prove( - slot, challenge, onProve, market, currentPeriod - ) + else: + await procCall SaleProving(state).prove( + slot, challenge, onProve, market, currentPeriod + ) + except CancelledError as e: + trace "Submitting INVALID proof cancelled", error = e.msgDetail + raise e + except CatchableError as e: + error "Submitting INVALID proof failed", error = e.msgDetail diff --git a/codex/sales/states/slotreserving.nim b/codex/sales/states/slotreserving.nim index 38b7fa76..a67c51a0 100644 --- a/codex/sales/states/slotreserving.nim +++ b/codex/sales/states/slotreserving.nim @@ -3,16 +3,16 @@ import pkg/metrics import ../../logutils import ../../market +import ../../utils/exceptions import ../salesagent import ../statemachine -import ./errorhandling import ./cancelled import ./failed import ./ignored import ./downloading import ./errored -type SaleSlotReserving* = ref object of ErrorHandlingState +type SaleSlotReserving* = ref object of SaleState logScope: topics = "marketplace sales reserving" @@ -26,7 +26,9 @@ method onCancelled*(state: SaleSlotReserving, request: StorageRequest): ?State = method onFailed*(state: SaleSlotReserving, request: StorageRequest): ?State = return some State(SaleFailed()) -method run*(state: SaleSlotReserving, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleSlotReserving, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data let context = agent.context @@ -36,23 +38,29 @@ method run*(state: SaleSlotReserving, machine: Machine): Future[?State] {.async. requestId = data.requestId slotIndex = data.slotIndex - let canReserve = await market.canReserveSlot(data.requestId, data.slotIndex) - if canReserve: - try: - trace "Reserving slot" - await market.reserveSlot(data.requestId, data.slotIndex) - except MarketError as e: - if e.msg.contains "SlotReservations_ReservationNotAllowed": - debug "Slot cannot be reserved, ignoring", error = e.msg - return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) - else: - return some State(SaleErrored(error: e)) - # other CatchableErrors are handled "automatically" by the ErrorHandlingState + try: + let canReserve = await market.canReserveSlot(data.requestId, data.slotIndex) + if canReserve: + try: + trace "Reserving slot" + await market.reserveSlot(data.requestId, data.slotIndex) + except MarketError as e: + if e.msg.contains "SlotReservations_ReservationNotAllowed": + debug "Slot cannot be reserved, ignoring", error = e.msg + return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) + else: + return some State(SaleErrored(error: e)) + # other CatchableErrors are handled "automatically" by the SaleState - trace "Slot successfully reserved" - return some State(SaleDownloading()) - else: - # do not re-add this slot to the queue, and return bytes from Reservation to - # the Availability - debug "Slot cannot be reserved, ignoring" - return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) + trace "Slot successfully reserved" + return some State(SaleDownloading()) + else: + # do not re-add this slot to the queue, and return bytes from Reservation to + # the Availability + debug "Slot cannot be reserved, ignoring" + return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) + except CancelledError as e: + trace "SaleSlotReserving.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleSlotReserving.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/sales/states/unknown.nim b/codex/sales/states/unknown.nim index 3034129a..d182d744 100644 --- a/codex/sales/states/unknown.nim +++ b/codex/sales/states/unknown.nim @@ -1,4 +1,5 @@ import ../../logutils +import ../../utils/exceptions import ../statemachine import ../salesagent import ./filled @@ -26,34 +27,42 @@ method onCancelled*(state: SaleUnknown, request: StorageRequest): ?State = method onFailed*(state: SaleUnknown, request: StorageRequest): ?State = return some State(SaleFailed()) -method run*(state: SaleUnknown, machine: Machine): Future[?State] {.async.} = +method run*( + state: SaleUnknown, machine: Machine +): Future[?State] {.async: (raises: []).} = let agent = SalesAgent(machine) let data = agent.data let market = agent.context.market - await agent.retrieveRequest() - await agent.subscribe() + try: + await agent.retrieveRequest() + await agent.subscribe() - let slotId = slotId(data.requestId, data.slotIndex) - let slotState = await market.slotState(slotId) + let slotId = slotId(data.requestId, data.slotIndex) + let slotState = await market.slotState(slotId) - case slotState - of SlotState.Free: - let error = - newException(UnexpectedSlotError, "Slot state on chain should not be 'free'") - return some State(SaleErrored(error: error)) - of SlotState.Filled: - return some State(SaleFilled()) - of SlotState.Finished: - return some State(SalePayout()) - of SlotState.Paid: - return some State(SaleFinished()) - of SlotState.Failed: - return some State(SaleFailed()) - of SlotState.Cancelled: - return some State(SaleCancelled()) - of SlotState.Repair: - let error = newException( - SlotFreedError, "Slot was forcible freed and host was removed from its hosting" - ) - return some State(SaleErrored(error: error)) + case slotState + of SlotState.Free: + let error = + newException(UnexpectedSlotError, "Slot state on chain should not be 'free'") + return some State(SaleErrored(error: error)) + of SlotState.Filled: + return some State(SaleFilled()) + of SlotState.Finished: + return some State(SalePayout()) + of SlotState.Paid: + return some State(SaleFinished()) + of SlotState.Failed: + return some State(SaleFailed()) + of SlotState.Cancelled: + return some State(SaleCancelled()) + of SlotState.Repair: + let error = newException( + SlotFreedError, "Slot was forcible freed and host was removed from its hosting" + ) + return some State(SaleErrored(error: error)) + except CancelledError as e: + trace "SaleUnknown.run was cancelled", error = e.msgDetail + except CatchableError as e: + error "Error during SaleUnknown.run", error = e.msgDetail + return some State(SaleErrored(error: e)) diff --git a/codex/utils/asyncstatemachine.nim b/codex/utils/asyncstatemachine.nim index 572ae246..2d87ebc1 100644 --- a/codex/utils/asyncstatemachine.nim +++ b/codex/utils/asyncstatemachine.nim @@ -2,6 +2,7 @@ import pkg/questionable import pkg/chronos import ../logutils import ./trackedfutures +import ./exceptions {.push raises: [].} @@ -46,24 +47,14 @@ proc schedule*(machine: Machine, event: Event) = except AsyncQueueFullError: raiseAssert "unlimited queue is full?!" -method run*(state: State, machine: Machine): Future[?State] {.base, async.} = +method run*( + state: State, machine: Machine +): Future[?State] {.base, async: (raises: []).} = discard -method onError*(state: State, error: ref CatchableError): ?State {.base.} = - raise (ref Defect)(msg: "error in state machine: " & error.msg, parent: error) - -proc onError(machine: Machine, error: ref CatchableError): Event = - return proc(state: State): ?State = - state.onError(error) - proc run(machine: Machine, state: State) {.async: (raises: []).} = - try: - if next =? await state.run(machine): - machine.schedule(Event.transition(state, next)) - except CancelledError: - discard # do not propagate - except CatchableError as e: - machine.schedule(machine.onError(e)) + if next =? await state.run(machine): + machine.schedule(Event.transition(state, next)) proc scheduler(machine: Machine) {.async: (raises: []).} = var running: Future[void].Raising([]) diff --git a/tests/codex/sales/testsales.nim b/tests/codex/sales/testsales.nim index 05f31057..09a2ce49 100644 --- a/tests/codex/sales/testsales.nim +++ b/tests/codex/sales/testsales.nim @@ -36,6 +36,7 @@ asyncchecksuite "Sales - start": var repo: RepoStore var queue: SlotQueue var itemsProcessed: seq[SlotQueueItem] + var expiry: SecondsSince1970 setup: request = StorageRequest( @@ -76,7 +77,8 @@ asyncchecksuite "Sales - start": ): Future[?!Groth16Proof] {.async.} = return success(proof) itemsProcessed = @[] - request.expiry = (clock.now() + 42).u256 + expiry = (clock.now() + 42) + request.expiry = expiry.u256 teardown: await sales.stop() @@ -97,6 +99,7 @@ asyncchecksuite "Sales - start": request.ask.slots = 2 market.requested = @[request] market.requestState[request.id] = RequestState.New + market.requestExpiry[request.id] = expiry let slot0 = MockSlot(requestId: request.id, slotIndex: 0.u256, proof: proof, host: me) @@ -430,23 +433,6 @@ asyncchecksuite "Sales": check eventually storingRequest == request check storingSlot < request.ask.slots.u256 - test "handles errors during state run": - var saleFailed = false - sales.onProve = proc( - slot: Slot, challenge: ProofChallenge - ): Future[?!Groth16Proof] {.async.} = - # raise exception so machine.onError is called - raise newException(ValueError, "some error") - - # onClear is called in SaleErrored.run - sales.onClear = proc(request: StorageRequest, idx: UInt256) = - saleFailed = true - createAvailability() - await market.requestStorage(request) - await allowRequestToStart() - - check eventually saleFailed - test "makes storage available again when data retrieval fails": let error = newException(IOError, "data retrieval failed") sales.onStore = proc( diff --git a/tests/codex/sales/testsalesagent.nim b/tests/codex/sales/testsalesagent.nim index f17711d3..f1cb53a6 100644 --- a/tests/codex/sales/testsalesagent.nim +++ b/tests/codex/sales/testsalesagent.nim @@ -4,7 +4,6 @@ import pkg/codex/sales import pkg/codex/sales/salesagent import pkg/codex/sales/salescontext import pkg/codex/sales/statemachine -import pkg/codex/sales/states/errorhandling import ../../asynctest import ../helpers/mockmarket @@ -15,18 +14,12 @@ import ../examples var onCancelCalled = false var onFailedCalled = false var onSlotFilledCalled = false -var onErrorCalled = false -type - MockState = ref object of SaleState - MockErrorState = ref object of ErrorHandlingState +type MockState = ref object of SaleState method `$`*(state: MockState): string = "MockState" -method `$`*(state: MockErrorState): string = - "MockErrorState" - method onCancelled*(state: MockState, request: StorageRequest): ?State = onCancelCalled = true @@ -38,12 +31,6 @@ method onSlotFilled*( ): ?State = onSlotFilledCalled = true -method onError*(state: MockErrorState, err: ref CatchableError): ?State = - onErrorCalled = true - -method run*(state: MockErrorState, machine: Machine): Future[?State] {.async.} = - raise newException(ValueError, "failure") - asyncchecksuite "Sales agent": let request = StorageRequest.example var agent: SalesAgent @@ -123,7 +110,9 @@ asyncchecksuite "Sales agent": agent.start(MockState.new()) await agent.subscribe() agent.onFulfilled(request.id) - check eventually agent.data.cancelled.cancelled() + # Note: futures that are cancelled, and do not re-raise the CancelledError + # will have a state of completed, not cancelled. + check eventually agent.data.cancelled.completed() test "current state onFailed called when onFailed called": agent.start(MockState.new()) @@ -134,7 +123,3 @@ asyncchecksuite "Sales agent": agent.start(MockState.new()) agent.onSlotFilled(request.id, slotIndex) check eventually onSlotFilledCalled - - test "ErrorHandlingState.onError can be overridden at the state level": - agent.start(MockErrorState.new()) - check eventually onErrorCalled diff --git a/tests/codex/utils/testasyncstatemachine.nim b/tests/codex/utils/testasyncstatemachine.nim index 40a040c4..ed3ea747 100644 --- a/tests/codex/utils/testasyncstatemachine.nim +++ b/tests/codex/utils/testasyncstatemachine.nim @@ -10,9 +10,8 @@ type State1 = ref object of State State2 = ref object of State State3 = ref object of State - State4 = ref object of State -var runs, cancellations, errors = [0, 0, 0, 0] +var runs, cancellations = [0, 0, 0, 0] method `$`(state: State1): string = "State1" @@ -23,28 +22,20 @@ method `$`(state: State2): string = method `$`(state: State3): string = "State3" -method `$`(state: State4): string = - "State4" - -method run(state: State1, machine: Machine): Future[?State] {.async.} = +method run(state: State1, machine: Machine): Future[?State] {.async: (raises: []).} = inc runs[0] return some State(State2.new()) -method run(state: State2, machine: Machine): Future[?State] {.async.} = +method run(state: State2, machine: Machine): Future[?State] {.async: (raises: []).} = inc runs[1] try: await sleepAsync(1.hours) except CancelledError: inc cancellations[1] - raise -method run(state: State3, machine: Machine): Future[?State] {.async.} = +method run(state: State3, machine: Machine): Future[?State] {.async: (raises: []).} = inc runs[2] -method run(state: State4, machine: Machine): Future[?State] {.async.} = - inc runs[3] - raise newException(ValueError, "failed") - method onMoveToNextStateEvent*(state: State): ?State {.base, upraises: [].} = discard @@ -54,19 +45,6 @@ method onMoveToNextStateEvent(state: State2): ?State = method onMoveToNextStateEvent(state: State3): ?State = some State(State1.new()) -method onError(state: State1, error: ref CatchableError): ?State = - inc errors[0] - -method onError(state: State2, error: ref CatchableError): ?State = - inc errors[1] - -method onError(state: State3, error: ref CatchableError): ?State = - inc errors[2] - -method onError(state: State4, error: ref CatchableError): ?State = - inc errors[3] - some State(State2.new()) - asyncchecksuite "async state machines": var machine: Machine @@ -76,7 +54,6 @@ asyncchecksuite "async state machines": setup: runs = [0, 0, 0, 0] cancellations = [0, 0, 0, 0] - errors = [0, 0, 0, 0] machine = Machine.new() test "should call run on start state": @@ -112,16 +89,6 @@ asyncchecksuite "async state machines": check runs == [0, 1, 0, 0] check cancellations == [0, 1, 0, 0] - test "forwards errors to error handler": - machine.start(State4.new()) - check eventually errors == [0, 0, 0, 1] and runs == [0, 1, 0, 1] - - test "error handler ignores CancelledError": - machine.start(State2.new()) - machine.schedule(moveToNextStateEvent) - check eventually cancellations == [0, 1, 0, 0] - check errors == [0, 0, 0, 0] - test "queries properties of the current state": proc description(state: State): string = $state From 04327a3986823470b805d653ead8ff05584d1b59 Mon Sep 17 00:00:00 2001 From: Eric <5089238+emizzle@users.noreply.github.com> Date: Thu, 20 Feb 2025 16:52:51 +1100 Subject: [PATCH 15/40] chore(integration): simplify block expiration integration test (#1100) * chore(integration): simplify block expiration integration test * clean up * fix after rebase --- tests/integration/codexconfig.nim | 48 ++++++++++ tests/integration/multinodes.nim | 1 + tests/integration/testblockexpiration.nim | 109 +++++++--------------- 3 files changed, 84 insertions(+), 74 deletions(-) diff --git a/tests/integration/codexconfig.nim b/tests/integration/codexconfig.nim index 41d7109c..138ae274 100644 --- a/tests/integration/codexconfig.nim +++ b/tests/integration/codexconfig.nim @@ -200,6 +200,54 @@ proc withLogLevel*( config.addCliOption("--log-level", $level) return startConfig +proc withBlockTtl*( + self: CodexConfig, ttl: int +): CodexConfig {.raises: [CodexConfigError].} = + var config = self + config.addCliOption("--block-ttl", $ttl) + return config + +proc withBlockTtl*( + self: CodexConfigs, idx: int, ttl: int +): CodexConfigs {.raises: [CodexConfigError].} = + self.checkBounds idx + + var startConfig = self + startConfig.configs[idx].addCliOption("--block-ttl", $ttl) + return startConfig + +proc withBlockTtl*( + self: CodexConfigs, ttl: int +): CodexConfigs {.raises: [CodexConfigError].} = + var startConfig = self + for config in startConfig.configs.mitems: + config.addCliOption("--block-ttl", $ttl) + return startConfig + +proc withBlockMaintenanceInterval*( + self: CodexConfig, interval: int +): CodexConfig {.raises: [CodexConfigError].} = + var config = self + config.addCliOption("--block-mi", $interval) + return config + +proc withBlockMaintenanceInterval*( + self: CodexConfigs, idx: int, interval: int +): CodexConfigs {.raises: [CodexConfigError].} = + self.checkBounds idx + + var startConfig = self + startConfig.configs[idx].addCliOption("--block-mi", $interval) + return startConfig + +proc withBlockMaintenanceInterval*( + self: CodexConfigs, interval: int +): CodexConfigs {.raises: [CodexConfigError].} = + var startConfig = self + for config in startConfig.configs.mitems: + config.addCliOption("--block-mi", $interval) + return startConfig + proc withSimulateProofFailures*( self: CodexConfigs, idx: int, failEveryNProofs: int ): CodexConfigs {.raises: [CodexConfigError].} = diff --git a/tests/integration/multinodes.nim b/tests/integration/multinodes.nim index ae9a38ab..bade6899 100644 --- a/tests/integration/multinodes.nim +++ b/tests/integration/multinodes.nim @@ -22,6 +22,7 @@ export hardhatprocess export codexprocess export hardhatconfig export codexconfig +export nodeconfigs type RunningNode* = ref object diff --git a/tests/integration/testblockexpiration.nim b/tests/integration/testblockexpiration.nim index e3fad75c..7e742c2a 100644 --- a/tests/integration/testblockexpiration.nim +++ b/tests/integration/testblockexpiration.nim @@ -1,89 +1,50 @@ -import std/os -import std/httpclient -import std/strutils -from std/net import TimeoutError +import ../examples +import ./multinodes -import pkg/chronos -import ../ethertest -import ./codexprocess -import ./nodeprocess - -ethersuite "Node block expiration tests": - var node: CodexProcess - var baseurl: string - - let dataDir = getTempDir() / "Codex1" - let content = "test file content" +multinodesuite "Node block expiration tests": + var content: seq[byte] setup: - baseurl = "http://localhost:8080/api/codex/v1" + content = await RandomChunker.example(blocks = 8) - teardown: - await node.stop() + test "node retains not-expired file", + NodeConfigs( + clients: CodexConfigs + .init(nodes = 1) + .withBlockTtl(0, 10) + .withBlockMaintenanceInterval(0, 1).some, + providers: CodexConfigs.none, + ): + let client = clients()[0] + let clientApi = client.client - dataDir.removeDir() - - proc startTestNode(blockTtlSeconds: int) {.async.} = - node = await CodexProcess.startNode( - @[ - "--api-port=8080", - "--data-dir=" & dataDir, - "--nat=none", - "--listen-addrs=/ip4/127.0.0.1/tcp/0", - "--disc-port=8090", - "--block-ttl=" & $blockTtlSeconds, - "--block-mi=1", - "--block-mn=10", - ], - false, - "cli-test-node", - ) - await node.waitUntilStarted() - - proc uploadTestFile(): string = - let client = newHttpClient() - let uploadUrl = baseurl & "/data" - let uploadResponse = client.post(uploadUrl, content) - check uploadResponse.status == "200 OK" - client.close() - uploadResponse.body - - proc downloadTestFile(contentId: string, local = false): Response = - let client = newHttpClient(timeout = 3000) - let downloadUrl = - baseurl & "/data/" & contentId & (if local: "" else: "/network/stream") - - let content = client.get(downloadUrl) - client.close() - content - - proc hasFile(contentId: string): bool = - let client = newHttpClient(timeout = 3000) - let dataLocalUrl = baseurl & "/data/" & contentId - let content = client.get(dataLocalUrl) - client.close() - content.code == Http200 - - test "node retains not-expired file": - await startTestNode(blockTtlSeconds = 10) - - let contentId = uploadTestFile() + let contentId = clientApi.upload(content).get await sleepAsync(2.seconds) - let response = downloadTestFile(contentId, local = true) + let download = clientApi.download(contentId, local = true) + check: - hasFile(contentId) - response.status == "200 OK" - response.body == content + download.isOk + download.get == string.fromBytes(content) - test "node deletes expired file": - await startTestNode(blockTtlSeconds = 1) + test "node deletes expired file", + NodeConfigs( + clients: CodexConfigs + .init(nodes = 1) + .withBlockTtl(0, 1) + .withBlockMaintenanceInterval(0, 1).some, + providers: CodexConfigs.none, + ): + let client = clients()[0] + let clientApi = client.client - let contentId = uploadTestFile() + let contentId = clientApi.upload(content).get await sleepAsync(3.seconds) + let download = clientApi.download(contentId, local = true) + check: - not hasFile(contentId) - downloadTestFile(contentId, local = true).code == Http404 + download.isFailure + download.error.msg == "404 Not Found" From 44981d24d080a1b2737761c4df1e8fa1e60bdb82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adam=20Uhl=C3=AD=C5=99?= Date: Thu, 20 Feb 2025 08:11:06 +0100 Subject: [PATCH 16/40] perf: contract storage optimizations (#1094) * perf: contract storage optimizations * Apply optimization changes * Apply optimizing parameters sizing * Update codex-contracts-eth * bump latest changes in contracts branch * Change requestDurationLimit to uint64 * fix tests * fix tests --------- Co-authored-by: Arnaud Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com> --- codex/clock.nim | 3 + codex/contracts/config.nim | 12 +- codex/contracts/market.nim | 16 +-- codex/contracts/marketplace.nim | 18 +-- codex/contracts/requests.nim | 37 ++--- codex/market.nim | 31 +++-- codex/node.nim | 45 ++++--- codex/periods.nim | 6 +- codex/rest/api.nim | 2 +- codex/rest/json.nim | 12 +- codex/sales.nim | 53 +++++--- codex/sales/reservations.nim | 59 ++++---- codex/sales/salesagent.nim | 6 +- codex/sales/salescontext.nim | 6 +- codex/sales/salesdata.nim | 2 +- codex/sales/slotqueue.nim | 15 ++- codex/sales/statemachine.nim | 4 +- codex/sales/states/downloading.nim | 2 +- codex/sales/states/initialproving.nim | 6 +- codex/sales/states/preparing.nim | 2 +- codex/sales/states/proving.nim | 6 +- codex/sales/states/provingsimulated.nim | 2 +- codex/validation.nim | 26 ++-- tests/codex/examples.nim | 10 +- tests/codex/helpers/mockmarket.nim | 37 ++--- tests/codex/helpers/mockreservations.nim | 4 +- tests/codex/helpers/mocksalesagent.nim | 2 +- tests/codex/helpers/mockslotqueueitem.nim | 6 +- tests/codex/node/testcontracts.nim | 5 +- tests/codex/node/testnode.nim | 4 +- tests/codex/sales/helpers/periods.nim | 4 +- tests/codex/sales/states/testcancelled.nim | 2 +- tests/codex/sales/states/testdownloading.nim | 2 +- tests/codex/sales/states/testerrored.nim | 2 +- tests/codex/sales/states/testfilled.nim | 2 +- tests/codex/sales/states/testfilling.nim | 2 +- tests/codex/sales/states/testfinished.nim | 2 +- tests/codex/sales/states/testignored.nim | 2 +- .../codex/sales/states/testinitialproving.nim | 2 +- tests/codex/sales/states/testpayout.nim | 2 +- tests/codex/sales/states/testpreparing.nim | 8 +- tests/codex/sales/states/testproving.nim | 4 +- .../sales/states/testsimulatedproving.nim | 4 +- .../codex/sales/states/testslotreserving.nim | 2 +- tests/codex/sales/states/testunknown.nim | 2 +- tests/codex/sales/testreservations.nim | 41 +++--- tests/codex/sales/testsales.nim | 118 ++++++++-------- tests/codex/sales/testsalesagent.nim | 9 +- tests/codex/sales/testslotqueue.nim | 76 +++++------ tests/codex/testpurchasing.nim | 17 ++- tests/codex/testvalidation.nim | 14 +- tests/contracts/testContracts.nim | 27 ++-- tests/contracts/testMarket.nim | 126 +++++++++--------- tests/examples.nim | 10 +- tests/integration/codexclient.nim | 19 +-- tests/integration/marketplacesuite.nim | 26 ++-- tests/integration/testmarketplace.nim | 40 +++--- tests/integration/testproofs.nim | 19 ++- tests/integration/testpurchasing.nim | 30 ++--- tests/integration/testrestapi.nim | 32 ++--- tests/integration/testsales.nim | 53 ++++---- tests/integration/testvalidator.nim | 12 +- vendor/codex-contracts-eth | 2 +- 63 files changed, 585 insertions(+), 567 deletions(-) diff --git a/codex/clock.nim b/codex/clock.nim index 98db22f7..c02e04aa 100644 --- a/codex/clock.nim +++ b/codex/clock.nim @@ -40,5 +40,8 @@ proc toSecondsSince1970*(bytes: seq[byte]): SecondsSince1970 = let asUint = uint64.fromBytes(bytes) cast[int64](asUint) +proc toSecondsSince1970*(num: uint64): SecondsSince1970 = + cast[int64](num) + proc toSecondsSince1970*(bigint: UInt256): SecondsSince1970 = bigint.truncate(int64) diff --git a/codex/contracts/config.nim b/codex/contracts/config.nim index 986b1944..3c31c8b5 100644 --- a/codex/contracts/config.nim +++ b/codex/contracts/config.nim @@ -11,7 +11,7 @@ type collateral*: CollateralConfig proofs*: ProofConfig reservations*: SlotReservationsConfig - requestDurationLimit*: UInt256 + requestDurationLimit*: uint64 CollateralConfig* = object repairRewardPercentage*: uint8 @@ -22,14 +22,14 @@ type # percentage of the slashed amount going to the validators ProofConfig* = object - period*: UInt256 # proofs requirements are calculated per period (in seconds) - timeout*: UInt256 # mark proofs as missing before the timeout (in seconds) + period*: uint64 # proofs requirements are calculated per period (in seconds) + timeout*: uint64 # mark proofs as missing before the timeout (in seconds) downtime*: uint8 # ignore this much recent blocks for proof requirements + downtimeProduct*: uint8 zkeyHash*: string # hash of the zkey file which is linked to the verifier # Ensures the pointer does not remain in downtime for many consecutive # periods. For each period increase, move the pointer `pointerProduct` # blocks. Should be a prime number to ensure there are no cycles. - downtimeProduct*: uint8 SlotReservationsConfig* = object maxReservations*: uint8 @@ -39,8 +39,8 @@ func fromTuple(_: type ProofConfig, tupl: tuple): ProofConfig = period: tupl[0], timeout: tupl[1], downtime: tupl[2], - zkeyHash: tupl[3], - downtimeProduct: tupl[4], + downtimeProduct: tupl[3], + zkeyHash: tupl[4], ) func fromTuple(_: type SlotReservationsConfig, tupl: tuple): SlotReservationsConfig = diff --git a/codex/contracts/market.nim b/codex/contracts/market.nim index 208dbe07..3c016a59 100644 --- a/codex/contracts/market.nim +++ b/codex/contracts/market.nim @@ -84,7 +84,7 @@ method periodicity*(market: OnChainMarket): Future[Periodicity] {.async.} = let period = config.proofs.period return Periodicity(seconds: period) -method proofTimeout*(market: OnChainMarket): Future[UInt256] {.async.} = +method proofTimeout*(market: OnChainMarket): Future[uint64] {.async.} = convertEthersError: let config = await market.config() return config.proofs.timeout @@ -94,7 +94,7 @@ method repairRewardPercentage*(market: OnChainMarket): Future[uint8] {.async.} = let config = await market.config() return config.collateral.repairRewardPercentage -method requestDurationLimit*(market: OnChainMarket): Future[UInt256] {.async.} = +method requestDurationLimit*(market: OnChainMarket): Future[uint64] {.async.} = convertEthersError: let config = await market.config() return config.requestDurationLimit @@ -170,7 +170,7 @@ method requestExpiresAt*( return await market.contract.requestExpiry(id) method getHost( - market: OnChainMarket, requestId: RequestId, slotIndex: UInt256 + market: OnChainMarket, requestId: RequestId, slotIndex: uint64 ): Future[?Address] {.async.} = convertEthersError: let slotId = slotId(requestId, slotIndex) @@ -196,7 +196,7 @@ method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.as method fillSlot( market: OnChainMarket, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, proof: Groth16Proof, collateral: UInt256, ) {.async.} = @@ -280,7 +280,7 @@ method canProofBeMarkedAsMissing*( return false method reserveSlot*( - market: OnChainMarket, requestId: RequestId, slotIndex: UInt256 + market: OnChainMarket, requestId: RequestId, slotIndex: uint64 ) {.async.} = convertEthersError: discard await market.contract @@ -293,7 +293,7 @@ method reserveSlot*( .confirm(1) method canReserveSlot*( - market: OnChainMarket, requestId: RequestId, slotIndex: UInt256 + market: OnChainMarket, requestId: RequestId, slotIndex: uint64 ): Future[bool] {.async.} = convertEthersError: return await market.contract.canReserveSlot(requestId, slotIndex) @@ -329,10 +329,10 @@ method subscribeSlotFilled*( method subscribeSlotFilled*( market: OnChainMarket, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, callback: OnSlotFilled, ): Future[MarketSubscription] {.async.} = - proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: UInt256) = + proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: uint64) = if eventRequestId == requestId and eventSlotIndex == slotIndex: callback(requestId, slotIndex) diff --git a/codex/contracts/marketplace.nim b/codex/contracts/marketplace.nim index 091f45db..761caada 100644 --- a/codex/contracts/marketplace.nim +++ b/codex/contracts/marketplace.nim @@ -60,10 +60,6 @@ proc currentCollateral*( marketplace: Marketplace, id: SlotId ): UInt256 {.contract, view.} -proc slashMisses*(marketplace: Marketplace): UInt256 {.contract, view.} -proc slashPercentage*(marketplace: Marketplace): UInt256 {.contract, view.} -proc minCollateralThreshold*(marketplace: Marketplace): UInt256 {.contract, view.} - proc requestStorage*( marketplace: Marketplace, request: StorageRequest ): Confirmable {. @@ -76,10 +72,7 @@ proc requestStorage*( .} proc fillSlot*( - marketplace: Marketplace, - requestId: RequestId, - slotIndex: UInt256, - proof: Groth16Proof, + marketplace: Marketplace, requestId: RequestId, slotIndex: uint64, proof: Groth16Proof ): Confirmable {. contract, errors: [ @@ -155,9 +148,6 @@ proc requestExpiry*( marketplace: Marketplace, requestId: RequestId ): SecondsSince1970 {.contract, view.} -proc proofTimeout*(marketplace: Marketplace): UInt256 {.contract, view.} - -proc proofEnd*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.} proc missingProofs*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.} proc isProofRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.} proc willProofBeRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.} @@ -176,7 +166,7 @@ proc submitProof*( .} proc markProofAsMissing*( - marketplace: Marketplace, id: SlotId, period: UInt256 + marketplace: Marketplace, id: SlotId, period: uint64 ): Confirmable {. contract, errors: [ @@ -187,9 +177,9 @@ proc markProofAsMissing*( .} proc reserveSlot*( - marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256 + marketplace: Marketplace, requestId: RequestId, slotIndex: uint64 ): Confirmable {.contract.} proc canReserveSlot*( - marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256 + marketplace: Marketplace, requestId: RequestId, slotIndex: uint64 ): bool {.contract, view.} diff --git a/codex/contracts/requests.nim b/codex/contracts/requests.nim index 98d8c342..2b3811c3 100644 --- a/codex/contracts/requests.nim +++ b/codex/contracts/requests.nim @@ -9,6 +9,7 @@ import pkg/stew/byteutils import pkg/libp2p/[cid, multicodec] import ../logutils import ../utils/json +import ../clock from ../errors import mapFailure export contractabi @@ -18,16 +19,16 @@ type client* {.serialize.}: Address ask* {.serialize.}: StorageAsk content* {.serialize.}: StorageContent - expiry* {.serialize.}: UInt256 + expiry* {.serialize.}: uint64 nonce*: Nonce StorageAsk* = object - slots* {.serialize.}: uint64 - slotSize* {.serialize.}: UInt256 - duration* {.serialize.}: UInt256 proofProbability* {.serialize.}: UInt256 pricePerBytePerSecond* {.serialize.}: UInt256 collateralPerByte* {.serialize.}: UInt256 + slots* {.serialize.}: uint64 + slotSize* {.serialize.}: uint64 + duration* {.serialize.}: uint64 maxSlotLoss* {.serialize.}: uint64 StorageContent* = object @@ -36,7 +37,7 @@ type Slot* = object request* {.serialize.}: StorageRequest - slotIndex* {.serialize.}: UInt256 + slotIndex* {.serialize.}: uint64 SlotId* = distinct array[32, byte] RequestId* = distinct array[32, byte] @@ -110,12 +111,12 @@ func fromTuple(_: type Slot, tupl: tuple): Slot = func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk = StorageAsk( - slots: tupl[0], - slotSize: tupl[1], - duration: tupl[2], - proofProbability: tupl[3], - pricePerBytePerSecond: tupl[4], - collateralPerByte: tupl[5], + proofProbability: tupl[0], + pricePerBytePerSecond: tupl[1], + collateralPerByte: tupl[2], + slots: tupl[3], + slotSize: tupl[4], + duration: tupl[5], maxSlotLoss: tupl[6], ) @@ -177,21 +178,21 @@ func id*(request: StorageRequest): RequestId = let encoding = AbiEncoder.encode((request,)) RequestId(keccak256.digest(encoding).data) -func slotId*(requestId: RequestId, slotIndex: UInt256): SlotId = +func slotId*(requestId: RequestId, slotIndex: uint64): SlotId = let encoding = AbiEncoder.encode((requestId, slotIndex)) SlotId(keccak256.digest(encoding).data) -func slotId*(request: StorageRequest, slotIndex: UInt256): SlotId = +func slotId*(request: StorageRequest, slotIndex: uint64): SlotId = slotId(request.id, slotIndex) func id*(slot: Slot): SlotId = slotId(slot.request, slot.slotIndex) func pricePerSlotPerSecond*(ask: StorageAsk): UInt256 = - ask.pricePerBytePerSecond * ask.slotSize + ask.pricePerBytePerSecond * ask.slotSize.u256 func pricePerSlot*(ask: StorageAsk): UInt256 = - ask.duration * ask.pricePerSlotPerSecond + ask.duration.u256 * ask.pricePerSlotPerSecond func totalPrice*(ask: StorageAsk): UInt256 = ask.slots.u256 * ask.pricePerSlot @@ -200,7 +201,7 @@ func totalPrice*(request: StorageRequest): UInt256 = request.ask.totalPrice func collateralPerSlot*(ask: StorageAsk): UInt256 = - ask.collateralPerByte * ask.slotSize + ask.collateralPerByte * ask.slotSize.u256 -func size*(ask: StorageAsk): UInt256 = - ask.slots.u256 * ask.slotSize +func size*(ask: StorageAsk): uint64 = + ask.slots * ask.slotSize diff --git a/codex/market.nim b/codex/market.nim index 66f31804..5417c8e1 100644 --- a/codex/market.nim +++ b/codex/market.nim @@ -20,13 +20,12 @@ type MarketError* = object of CodexError Subscription* = ref object of RootObj OnRequest* = - proc(id: RequestId, ask: StorageAsk, expiry: UInt256) {.gcsafe, upraises: [].} + proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, upraises: [].} OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].} - OnSlotFilled* = - proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].} - OnSlotFreed* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].} + OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].} + OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].} OnSlotReservationsFull* = - proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].} + proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].} OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises: [].} OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises: [].} OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises: [].} @@ -37,19 +36,19 @@ type StorageRequested* = object of MarketplaceEvent requestId*: RequestId ask*: StorageAsk - expiry*: UInt256 + expiry*: uint64 SlotFilled* = object of MarketplaceEvent requestId* {.indexed.}: RequestId - slotIndex*: UInt256 + slotIndex*: uint64 SlotFreed* = object of MarketplaceEvent requestId* {.indexed.}: RequestId - slotIndex*: UInt256 + slotIndex*: uint64 SlotReservationsFull* = object of MarketplaceEvent requestId* {.indexed.}: RequestId - slotIndex*: UInt256 + slotIndex*: uint64 RequestFulfilled* = object of MarketplaceEvent requestId* {.indexed.}: RequestId @@ -72,13 +71,13 @@ method getSigner*(market: Market): Future[Address] {.base, async.} = method periodicity*(market: Market): Future[Periodicity] {.base, async.} = raiseAssert("not implemented") -method proofTimeout*(market: Market): Future[UInt256] {.base, async.} = +method proofTimeout*(market: Market): Future[uint64] {.base, async.} = raiseAssert("not implemented") method repairRewardPercentage*(market: Market): Future[uint8] {.base, async.} = raiseAssert("not implemented") -method requestDurationLimit*(market: Market): Future[UInt256] {.base, async.} = +method requestDurationLimit*(market: Market): Future[uint64] {.base, async.} = raiseAssert("not implemented") method proofDowntime*(market: Market): Future[uint8] {.base, async.} = @@ -125,7 +124,7 @@ method requestExpiresAt*( raiseAssert("not implemented") method getHost*( - market: Market, requestId: RequestId, slotIndex: UInt256 + market: Market, requestId: RequestId, slotIndex: uint64 ): Future[?Address] {.base, async.} = raiseAssert("not implemented") @@ -140,7 +139,7 @@ method getActiveSlot*(market: Market, slotId: SlotId): Future[?Slot] {.base, asy method fillSlot*( market: Market, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, proof: Groth16Proof, collateral: UInt256, ) {.base, async.} = @@ -180,12 +179,12 @@ method canProofBeMarkedAsMissing*( raiseAssert("not implemented") method reserveSlot*( - market: Market, requestId: RequestId, slotIndex: UInt256 + market: Market, requestId: RequestId, slotIndex: uint64 ) {.base, async.} = raiseAssert("not implemented") method canReserveSlot*( - market: Market, requestId: RequestId, slotIndex: UInt256 + market: Market, requestId: RequestId, slotIndex: uint64 ): Future[bool] {.base, async.} = raiseAssert("not implemented") @@ -205,7 +204,7 @@ method subscribeSlotFilled*( raiseAssert("not implemented") method subscribeSlotFilled*( - market: Market, requestId: RequestId, slotIndex: UInt256, callback: OnSlotFilled + market: Market, requestId: RequestId, slotIndex: uint64, callback: OnSlotFilled ): Future[Subscription] {.base, async.} = raiseAssert("not implemented") diff --git a/codex/node.nim b/codex/node.nim index 062ec2ce..e1647f3e 100644 --- a/codex/node.nim +++ b/codex/node.nim @@ -430,13 +430,13 @@ proc iterateManifests*(self: CodexNodeRef, onManifest: OnManifest) {.async.} = proc setupRequest( self: CodexNodeRef, cid: Cid, - duration: UInt256, + duration: uint64, proofProbability: UInt256, nodes: uint, tolerance: uint, pricePerBytePerSecond: UInt256, collateralPerByte: UInt256, - expiry: UInt256, + expiry: uint64, ): Future[?!StorageRequest] {.async.} = ## Setup slots for a given dataset ## @@ -494,7 +494,7 @@ proc setupRequest( request = StorageRequest( ask: StorageAsk( slots: verifiable.numSlots.uint64, - slotSize: builder.slotBytes.uint.u256, + slotSize: builder.slotBytes.uint64, duration: duration, proofProbability: proofProbability, pricePerBytePerSecond: pricePerBytePerSecond, @@ -511,13 +511,13 @@ proc setupRequest( proc requestStorage*( self: CodexNodeRef, cid: Cid, - duration: UInt256, + duration: uint64, proofProbability: UInt256, nodes: uint, tolerance: uint, pricePerBytePerSecond: UInt256, collateralPerByte: UInt256, - expiry: UInt256, + expiry: uint64, ): Future[?!PurchaseId] {.async.} = ## Initiate a request for storage sequence, this might ## be a multistep procedure. @@ -531,7 +531,7 @@ proc requestStorage*( pricePerBytePerSecond = pricePerBytePerSecond proofProbability = proofProbability collateralPerByte = collateralPerByte - expiry = expiry.truncate(int64) + expiry = expiry now = self.clock.now trace "Received a request for storage!" @@ -553,7 +553,7 @@ proc requestStorage*( success purchase.id proc onStore( - self: CodexNodeRef, request: StorageRequest, slotIdx: UInt256, blocksCb: BlocksCb + self: CodexNodeRef, request: StorageRequest, slotIdx: uint64, blocksCb: BlocksCb ): Future[?!void] {.async.} = ## store data in local storage ## @@ -575,11 +575,9 @@ proc onStore( trace "Unable to create slots builder", err = err.msg return failure(err) - let - slotIdx = slotIdx.truncate(int) - expiry = request.expiry.toSecondsSince1970 + let expiry = request.expiry - if slotIdx > manifest.slotRoots.high: + if slotIdx > manifest.slotRoots.high.uint64: trace "Slot index not in manifest", slotIdx return failure(newException(CodexError, "Slot index not in manifest")) @@ -587,7 +585,7 @@ proc onStore( trace "Updating expiry for blocks", blocks = blocks.len let ensureExpiryFutures = - blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry)) + blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry.toSecondsSince1970)) if updateExpiryErr =? (await allFutureResult(ensureExpiryFutures)).errorOption: return failure(updateExpiryErr) @@ -603,7 +601,11 @@ proc onStore( trace "Unable to create indexing strategy from protected manifest", err = err.msg return failure(err) - without blksIter =? indexer.getIndicies(slotIdx).catch, err: + if slotIdx > int.high.uint64: + error "Cannot cast slot index to int", slotIndex = slotIdx + return + + without blksIter =? indexer.getIndicies(slotIdx.int).catch, err: trace "Unable to get indicies from strategy", err = err.msg return failure(err) @@ -613,13 +615,13 @@ proc onStore( trace "Unable to fetch blocks", err = err.msg return failure(err) - without slotRoot =? (await builder.buildSlot(slotIdx.Natural)), err: + without slotRoot =? (await builder.buildSlot(slotIdx.int)), err: trace "Unable to build slot", err = err.msg return failure(err) trace "Slot successfully retrieved and reconstructed" - if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx.int]: + if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx]: trace "Slot root mismatch", manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid() return failure(newException(CodexError, "Slot root mismatch")) @@ -636,7 +638,7 @@ proc onProve( let cidStr = $slot.request.content.cid - slotIdx = slot.slotIndex.truncate(Natural) + slotIdx = slot.slotIndex logScope: cid = cidStr @@ -657,7 +659,8 @@ proc onProve( return failure(err) when defined(verify_circuit): - without (inputs, proof) =? await prover.prove(slotIdx, manifest, challenge), err: + without (inputs, proof) =? await prover.prove(slotIdx.int, manifest, challenge), + err: error "Unable to generate proof", err = err.msg return failure(err) @@ -671,7 +674,7 @@ proc onProve( trace "Proof verified successfully" else: - without (_, proof) =? await prover.prove(slotIdx, manifest, challenge), err: + without (_, proof) =? await prover.prove(slotIdx.int, manifest, challenge), err: error "Unable to generate proof", err = err.msg return failure(err) @@ -688,7 +691,7 @@ proc onExpiryUpdate( ): Future[?!void] {.async.} = return await self.updateExpiry(rootCid, expiry) -proc onClear(self: CodexNodeRef, request: StorageRequest, slotIndex: UInt256) = +proc onClear(self: CodexNodeRef, request: StorageRequest, slotIndex: uint64) = # TODO: remove data from local storage discard @@ -704,7 +707,7 @@ proc start*(self: CodexNodeRef) {.async.} = if hostContracts =? self.contracts.host: hostContracts.sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc ): Future[?!void] = self.onStore(request, slot, onBatch) @@ -713,7 +716,7 @@ proc start*(self: CodexNodeRef) {.async.} = ): Future[?!void] = self.onExpiryUpdate(rootCid, expiry) - hostContracts.sales.onClear = proc(request: StorageRequest, slotIndex: UInt256) = + hostContracts.sales.onClear = proc(request: StorageRequest, slotIndex: uint64) = # TODO: remove data from local storage self.onClear(request, slotIndex) diff --git a/codex/periods.nim b/codex/periods.nim index 429931ee..cbb860e2 100644 --- a/codex/periods.nim +++ b/codex/periods.nim @@ -2,10 +2,10 @@ import pkg/stint type Periodicity* = object - seconds*: UInt256 + seconds*: uint64 - Period* = UInt256 - Timestamp* = UInt256 + Period* = uint64 + Timestamp* = uint64 func periodOf*(periodicity: Periodicity, timestamp: Timestamp): Period = timestamp div periodicity.seconds diff --git a/codex/rest/api.nim b/codex/rest/api.nim index 8ba1abae..e5c8d195 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -463,7 +463,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = Http400, "Total size must be larger then zero", headers = headers ) - if not reservations.hasAvailable(restAv.totalSize.truncate(uint)): + if not reservations.hasAvailable(restAv.totalSize): return RestApiResponse.error(Http422, "Not enough storage quota", headers = headers) diff --git a/codex/rest/json.nim b/codex/rest/json.nim index 9bc7664e..c221ba73 100644 --- a/codex/rest/json.nim +++ b/codex/rest/json.nim @@ -13,11 +13,11 @@ export json type StorageRequestParams* = object - duration* {.serialize.}: UInt256 + duration* {.serialize.}: uint64 proofProbability* {.serialize.}: UInt256 pricePerBytePerSecond* {.serialize.}: UInt256 collateralPerByte* {.serialize.}: UInt256 - expiry* {.serialize.}: ?UInt256 + expiry* {.serialize.}: ?uint64 nodes* {.serialize.}: ?uint tolerance* {.serialize.}: ?uint @@ -28,16 +28,16 @@ type error* {.serialize.}: ?string RestAvailability* = object - totalSize* {.serialize.}: UInt256 - duration* {.serialize.}: UInt256 + totalSize* {.serialize.}: uint64 + duration* {.serialize.}: uint64 minPricePerBytePerSecond* {.serialize.}: UInt256 totalCollateral* {.serialize.}: UInt256 - freeSize* {.serialize.}: ?UInt256 + freeSize* {.serialize.}: ?uint64 RestSalesAgent* = object state* {.serialize.}: string requestId* {.serialize.}: RequestId - slotIndex* {.serialize.}: UInt256 + slotIndex* {.serialize.}: uint64 request* {.serialize.}: ?StorageRequest reservation* {.serialize.}: ?Reservation diff --git a/codex/sales.nim b/codex/sales.nim index 4bf2d13c..91d882b8 100644 --- a/codex/sales.nim +++ b/codex/sales.nim @@ -150,16 +150,16 @@ proc cleanUp( ).errorOption: error "failure deleting reservation", error = deleteErr.msg + if data.slotIndex > uint16.high.uint64: + error "Cannot cast slot index to uint16", slotIndex = data.slotIndex + return + # Re-add items back into the queue to prevent small availabilities from # draining the queue. Seen items will be ordered last. if reprocessSlot and request =? data.request: let queue = sales.context.slotQueue var seenItem = SlotQueueItem.init( - data.requestId, - data.slotIndex.truncate(uint16), - data.ask, - request.expiry, - seen = true, + data.requestId, data.slotIndex.uint16, data.ask, request.expiry, seen = true ) trace "pushing ignored item to queue, marked as seen" if err =? queue.push(seenItem).errorOption: @@ -172,7 +172,7 @@ proc cleanUp( processing.complete() proc filled( - sales: Sales, request: StorageRequest, slotIndex: UInt256, processing: Future[void] + sales: Sales, request: StorageRequest, slotIndex: uint64, processing: Future[void] ) = if onSale =? sales.context.onSale: onSale(request, slotIndex) @@ -184,16 +184,15 @@ proc filled( proc processSlot(sales: Sales, item: SlotQueueItem, done: Future[void]) = debug "Processing slot from queue", requestId = item.requestId, slot = item.slotIndex - let agent = newSalesAgent( - sales.context, item.requestId, item.slotIndex.u256, none StorageRequest - ) + let agent = + newSalesAgent(sales.context, item.requestId, item.slotIndex, none StorageRequest) agent.onCleanUp = proc( returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none ) {.async.} = await sales.cleanUp(agent, returnBytes, reprocessSlot, returnedCollateral, done) - agent.onFilled = some proc(request: StorageRequest, slotIndex: UInt256) = + agent.onFilled = some proc(request: StorageRequest, slotIndex: uint64) = sales.filled(request, slotIndex, done) agent.start(SalePreparing()) @@ -283,7 +282,7 @@ proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} = queue.unpause() proc onStorageRequested( - sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: UInt256 + sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: uint64 ) = logScope: topics = "marketplace sales onStorageRequested" @@ -312,7 +311,7 @@ proc onStorageRequested( else: warn "Error adding request to SlotQueue", error = err.msg -proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: UInt256) = +proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: uint64) = logScope: topics = "marketplace sales onSlotFreed" requestId @@ -325,8 +324,12 @@ proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: UInt256) = let market = context.market let queue = context.slotQueue - # first attempt to populate request using existing slot metadata in queue - without var found =? queue.populateItem(requestId, slotIndex.truncate(uint16)): + if slotIndex > uint16.high.uint64: + error "Cannot cast slot index to uint16, value = ", slotIndex + return + + # first attempt to populate request using existing metadata in queue + without var found =? queue.populateItem(requestId, slotIndex.uint16): trace "no existing request metadata, getting request info from contract" # if there's no existing slot for that request, retrieve the request # from the contract. @@ -335,7 +338,7 @@ proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: UInt256) = error "unknown request in contract" return - found = SlotQueueItem.init(request, slotIndex.truncate(uint16)) + found = SlotQueueItem.init(request, slotIndex.uint16) except CancelledError: discard # do not propagate as addSlotToQueue was asyncSpawned except CatchableError as e: @@ -353,7 +356,7 @@ proc subscribeRequested(sales: Sales) {.async.} = let context = sales.context let market = context.market - proc onStorageRequested(requestId: RequestId, ask: StorageAsk, expiry: UInt256) = + proc onStorageRequested(requestId: RequestId, ask: StorageAsk, expiry: uint64) = sales.onStorageRequested(requestId, ask, expiry) try: @@ -426,9 +429,13 @@ proc subscribeSlotFilled(sales: Sales) {.async.} = let market = context.market let queue = context.slotQueue - proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) = + proc onSlotFilled(requestId: RequestId, slotIndex: uint64) = + if slotIndex > uint16.high.uint64: + error "Cannot cast slot index to uint16, value = ", slotIndex + return + trace "slot filled, removing from slot queue", requestId, slotIndex - queue.delete(requestId, slotIndex.truncate(uint16)) + queue.delete(requestId, slotIndex.uint16) for agent in sales.agents: agent.onSlotFilled(requestId, slotIndex) @@ -445,7 +452,7 @@ proc subscribeSlotFreed(sales: Sales) {.async.} = let context = sales.context let market = context.market - proc onSlotFreed(requestId: RequestId, slotIndex: UInt256) = + proc onSlotFreed(requestId: RequestId, slotIndex: uint64) = sales.onSlotFreed(requestId, slotIndex) try: @@ -461,9 +468,13 @@ proc subscribeSlotReservationsFull(sales: Sales) {.async.} = let market = context.market let queue = context.slotQueue - proc onSlotReservationsFull(requestId: RequestId, slotIndex: UInt256) = + proc onSlotReservationsFull(requestId: RequestId, slotIndex: uint64) = + if slotIndex > uint16.high.uint64: + error "Cannot cast slot index to uint16, value = ", slotIndex + return + trace "reservations for slot full, removing from slot queue", requestId, slotIndex - queue.delete(requestId, slotIndex.truncate(uint16)) + queue.delete(requestId, slotIndex.uint16) try: let sub = await market.subscribeSlotReservationsFull(onSlotReservationsFull) diff --git a/codex/sales/reservations.nim b/codex/sales/reservations.nim index 4f48e057..a64cb602 100644 --- a/codex/sales/reservations.nim +++ b/codex/sales/reservations.nim @@ -64,9 +64,9 @@ type SomeStorableId = AvailabilityId | ReservationId Availability* = ref object id* {.serialize.}: AvailabilityId - totalSize* {.serialize.}: UInt256 - freeSize* {.serialize.}: UInt256 - duration* {.serialize.}: UInt256 + totalSize* {.serialize.}: uint64 + freeSize* {.serialize.}: uint64 + duration* {.serialize.}: uint64 minPricePerBytePerSecond* {.serialize.}: UInt256 totalCollateral {.serialize.}: UInt256 totalRemainingCollateral* {.serialize.}: UInt256 @@ -74,9 +74,9 @@ type Reservation* = ref object id* {.serialize.}: ReservationId availabilityId* {.serialize.}: AvailabilityId - size* {.serialize.}: UInt256 + size* {.serialize.}: uint64 requestId* {.serialize.}: RequestId - slotIndex* {.serialize.}: UInt256 + slotIndex* {.serialize.}: uint64 Reservations* = ref object of RootObj availabilityLock: AsyncLock @@ -123,9 +123,9 @@ proc new*(T: type Reservations, repo: RepoStore): Reservations = proc init*( _: type Availability, - totalSize: UInt256, - freeSize: UInt256, - duration: UInt256, + totalSize: uint64, + freeSize: uint64, + duration: uint64, minPricePerBytePerSecond: UInt256, totalCollateral: UInt256, ): Availability = @@ -151,9 +151,9 @@ proc `totalCollateral=`*(self: Availability, value: UInt256) {.inline.} = proc init*( _: type Reservation, availabilityId: AvailabilityId, - size: UInt256, + size: uint64, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, ): Reservation = var id: array[32, byte] doAssert randomBytes(id) == 32 @@ -206,7 +206,7 @@ func key*(availability: Availability): ?!Key = return availability.id.key func maxCollateralPerByte*(availability: Availability): UInt256 = - return availability.totalRemainingCollateral div availability.freeSize + return availability.totalRemainingCollateral div availability.freeSize.stuint(256) func key*(reservation: Reservation): ?!Key = return key(reservation.id, reservation.availabilityId) @@ -289,16 +289,12 @@ proc updateAvailability( trace "totalSize changed, updating repo reservation" if oldAvailability.totalSize < obj.totalSize: # storage added if reserveErr =? ( - await self.repo.reserve( - (obj.totalSize - oldAvailability.totalSize).truncate(uint).NBytes - ) + await self.repo.reserve((obj.totalSize - oldAvailability.totalSize).NBytes) ).errorOption: return failure(reserveErr.toErr(ReserveFailedError)) elif oldAvailability.totalSize > obj.totalSize: # storage removed if reserveErr =? ( - await self.repo.release( - (oldAvailability.totalSize - obj.totalSize).truncate(uint).NBytes - ) + await self.repo.release((oldAvailability.totalSize - obj.totalSize).NBytes) ).errorOption: return failure(reserveErr.toErr(ReleaseFailedError)) @@ -361,7 +357,7 @@ proc deleteReservation*( else: return failure(error) - if reservation.size > 0.u256: + if reservation.size > 0.uint64: trace "returning remaining reservation bytes to availability", size = reservation.size @@ -389,8 +385,8 @@ proc deleteReservation*( proc createAvailability*( self: Reservations, - size: UInt256, - duration: UInt256, + size: uint64, + duration: uint64, minPricePerBytePerSecond: UInt256, totalCollateral: UInt256, ): Future[?!Availability] {.async.} = @@ -399,7 +395,7 @@ proc createAvailability*( let availability = Availability.init(size, size, duration, minPricePerBytePerSecond, totalCollateral) - let bytes = availability.freeSize.truncate(uint) + let bytes = availability.freeSize if reserveErr =? (await self.repo.reserve(bytes.NBytes)).errorOption: return failure(reserveErr.toErr(ReserveFailedError)) @@ -418,9 +414,9 @@ proc createAvailability*( method createReservation*( self: Reservations, availabilityId: AvailabilityId, - slotSize: UInt256, + slotSize: uint64, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, collateralPerByte: UInt256, ): Future[?!Reservation] {.async, base.} = withLock(self.availabilityLock): @@ -450,7 +446,7 @@ method createReservation*( availability.freeSize -= slotSize # adjust the remaining totalRemainingCollateral - availability.totalRemainingCollateral -= slotSize * collateralPerByte + availability.totalRemainingCollateral -= slotSize.stuint(256) * collateralPerByte # update availability with reduced size trace "Updating availability with reduced size" @@ -475,7 +471,7 @@ proc returnBytesToAvailability*( self: Reservations, availabilityId: AvailabilityId, reservationId: ReservationId, - bytes: UInt256, + bytes: uint64, ): Future[?!void] {.async.} = logScope: reservationId @@ -502,8 +498,7 @@ proc returnBytesToAvailability*( # First lets see if we can re-reserve the bytes, if the Repo's quota # is depleted then we will fail-fast as there is nothing to be done atm. - if reserveErr =? - (await self.repo.reserve(bytesToBeReturned.truncate(uint).NBytes)).errorOption: + if reserveErr =? (await self.repo.reserve(bytesToBeReturned.NBytes)).errorOption: return failure(reserveErr.toErr(ReserveFailedError)) without availabilityKey =? availabilityId.key, error: @@ -517,8 +512,7 @@ proc returnBytesToAvailability*( # Update availability with returned size if updateErr =? (await self.updateAvailability(availability)).errorOption: trace "Rolling back returning bytes" - if rollbackErr =? - (await self.repo.release(bytesToBeReturned.truncate(uint).NBytes)).errorOption: + if rollbackErr =? (await self.repo.release(bytesToBeReturned.NBytes)).errorOption: rollbackErr.parent = updateErr return failure(rollbackErr) @@ -546,7 +540,7 @@ proc release*( without var reservation =? (await self.get(key, Reservation)), error: return failure(error) - if reservation.size < bytes.u256: + if reservation.size < bytes: let error = newException( BytesOutOfBoundsError, "trying to release an amount of bytes that is greater than the total size of the Reservation", @@ -556,7 +550,7 @@ proc release*( if releaseErr =? (await self.repo.release(bytes.NBytes)).errorOption: return failure(releaseErr.toErr(ReleaseFailedError)) - reservation.size -= bytes.u256 + reservation.size -= bytes # persist partially used Reservation with updated size if err =? (await self.update(reservation)).errorOption: @@ -643,7 +637,8 @@ proc all*( proc findAvailability*( self: Reservations, - size, duration, pricePerBytePerSecond, collateralPerByte: UInt256, + size, duration: uint64, + pricePerBytePerSecond, collateralPerByte: UInt256, ): Future[?Availability] {.async.} = without storables =? (await self.storables(Availability)), e: error "failed to get all storables", error = e.msg diff --git a/codex/sales/salesagent.nim b/codex/sales/salesagent.nim index f04182aa..e6328a83 100644 --- a/codex/sales/salesagent.nim +++ b/codex/sales/salesagent.nim @@ -29,7 +29,7 @@ type OnCleanUp* = proc( returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none ): Future[void] {.gcsafe, upraises: [].} - OnFilled* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].} + OnFilled* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, upraises: [].} SalesAgentError = object of CodexError AllSlotsFilledError* = object of SalesAgentError @@ -40,7 +40,7 @@ func `==`*(a, b: SalesAgent): bool = proc newSalesAgent*( context: SalesContext, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, request: ?StorageRequest, ): SalesAgent = var agent = SalesAgent.new() @@ -121,7 +121,7 @@ method onFailed*( agent.schedule(failedEvent(request)) method onSlotFilled*( - agent: SalesAgent, requestId: RequestId, slotIndex: UInt256 + agent: SalesAgent, requestId: RequestId, slotIndex: uint64 ) {.base, gcsafe, upraises: [].} = if agent.data.requestId == requestId and agent.data.slotIndex == slotIndex: agent.schedule(slotFilledEvent(requestId, slotIndex)) diff --git a/codex/sales/salescontext.nim b/codex/sales/salescontext.nim index 95f06c04..6e6a3568 100644 --- a/codex/sales/salescontext.nim +++ b/codex/sales/salescontext.nim @@ -26,7 +26,7 @@ type BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, raises: [].} OnStore* = proc( - request: StorageRequest, slot: UInt256, blocksCb: BlocksCb + request: StorageRequest, slot: uint64, blocksCb: BlocksCb ): Future[?!void] {.gcsafe, upraises: [].} OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {. gcsafe, upraises: [] @@ -34,5 +34,5 @@ type OnExpiryUpdate* = proc(rootCid: Cid, expiry: SecondsSince1970): Future[?!void] {. gcsafe, upraises: [] .} - OnClear* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].} - OnSale* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].} + OnClear* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, upraises: [].} + OnSale* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, upraises: [].} diff --git a/codex/sales/salesdata.nim b/codex/sales/salesdata.nim index 995c7a4b..de8eccb5 100644 --- a/codex/sales/salesdata.nim +++ b/codex/sales/salesdata.nim @@ -7,6 +7,6 @@ type SalesData* = ref object requestId*: RequestId ask*: StorageAsk request*: ?StorageRequest - slotIndex*: UInt256 + slotIndex*: uint64 cancelled*: Future[void] reservation*: ?Reservation diff --git a/codex/sales/slotqueue.nim b/codex/sales/slotqueue.nim index 332ec9e0..a032d46b 100644 --- a/codex/sales/slotqueue.nim +++ b/codex/sales/slotqueue.nim @@ -5,6 +5,7 @@ import pkg/questionable import pkg/questionable/results import pkg/upraises import ../errors +import ../clock import ../logutils import ../rng import ../utils @@ -30,11 +31,11 @@ type SlotQueueItem* = object requestId: RequestId slotIndex: uint16 - slotSize: UInt256 - duration: UInt256 + slotSize: uint64 + duration: uint64 pricePerBytePerSecond: UInt256 collateralPerByte: UInt256 - expiry: UInt256 + expiry: uint64 seen: bool # don't need to -1 to prevent overflow when adding 1 (to always allow push) @@ -135,7 +136,7 @@ proc init*( requestId: RequestId, slotIndex: uint16, ask: StorageAsk, - expiry: UInt256, + expiry: uint64, seen = false, ): SlotQueueItem = SlotQueueItem( @@ -155,7 +156,7 @@ proc init*( SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry) proc init*( - _: type SlotQueueItem, requestId: RequestId, ask: StorageAsk, expiry: UInt256 + _: type SlotQueueItem, requestId: RequestId, ask: StorageAsk, expiry: uint64 ): seq[SlotQueueItem] = if not ask.slots.inRange: raise newException(SlotsOutOfRangeError, "Too many slots") @@ -182,10 +183,10 @@ proc requestId*(self: SlotQueueItem): RequestId = proc slotIndex*(self: SlotQueueItem): uint16 = self.slotIndex -proc slotSize*(self: SlotQueueItem): UInt256 = +proc slotSize*(self: SlotQueueItem): uint64 = self.slotSize -proc duration*(self: SlotQueueItem): UInt256 = +proc duration*(self: SlotQueueItem): uint64 = self.duration proc pricePerBytePerSecond*(self: SlotQueueItem): UInt256 = diff --git a/codex/sales/statemachine.nim b/codex/sales/statemachine.nim index 6d3c7101..ec770ece 100644 --- a/codex/sales/statemachine.nim +++ b/codex/sales/statemachine.nim @@ -25,7 +25,7 @@ method onFailed*( discard method onSlotFilled*( - state: SaleState, requestId: RequestId, slotIndex: UInt256 + state: SaleState, requestId: RequestId, slotIndex: uint64 ): ?State {.base, upraises: [].} = discard @@ -37,6 +37,6 @@ proc failedEvent*(request: StorageRequest): Event = return proc(state: State): ?State = SaleState(state).onFailed(request) -proc slotFilledEvent*(requestId: RequestId, slotIndex: UInt256): Event = +proc slotFilledEvent*(requestId: RequestId, slotIndex: uint64): Event = return proc(state: State): ?State = SaleState(state).onSlotFilled(requestId, slotIndex) diff --git a/codex/sales/states/downloading.nim b/codex/sales/states/downloading.nim index cb991dc8..39137545 100644 --- a/codex/sales/states/downloading.nim +++ b/codex/sales/states/downloading.nim @@ -28,7 +28,7 @@ method onFailed*(state: SaleDownloading, request: StorageRequest): ?State = return some State(SaleFailed()) method onSlotFilled*( - state: SaleDownloading, requestId: RequestId, slotIndex: UInt256 + state: SaleDownloading, requestId: RequestId, slotIndex: uint64 ): ?State = return some State(SaleFilled()) diff --git a/codex/sales/states/initialproving.nim b/codex/sales/states/initialproving.nim index 3b35ba90..57e8cc2c 100644 --- a/codex/sales/states/initialproving.nim +++ b/codex/sales/states/initialproving.nim @@ -25,9 +25,9 @@ method onFailed*(state: SaleInitialProving, request: StorageRequest): ?State = proc waitUntilNextPeriod(clock: Clock, periodicity: Periodicity) {.async.} = trace "Waiting until next period" - let period = periodicity.periodOf(clock.now().u256) - let periodEnd = periodicity.periodEnd(period).truncate(int64) - await clock.waitUntil(periodEnd + 1) + let period = periodicity.periodOf(clock.now().Timestamp) + let periodEnd = periodicity.periodEnd(period) + await clock.waitUntil((periodEnd + 1).toSecondsSince1970) proc waitForStableChallenge(market: Market, clock: Clock, slotId: SlotId) {.async.} = let periodicity = await market.periodicity() diff --git a/codex/sales/states/preparing.nim b/codex/sales/states/preparing.nim index 7509558c..443aee0b 100644 --- a/codex/sales/states/preparing.nim +++ b/codex/sales/states/preparing.nim @@ -33,7 +33,7 @@ method onFailed*(state: SalePreparing, request: StorageRequest): ?State = return some State(SaleFailed()) method onSlotFilled*( - state: SalePreparing, requestId: RequestId, slotIndex: UInt256 + state: SalePreparing, requestId: RequestId, slotIndex: uint64 ): ?State = return some State(SaleFilled()) diff --git a/codex/sales/states/proving.nim b/codex/sales/states/proving.nim index 759cad0c..690e9136 100644 --- a/codex/sales/states/proving.nim +++ b/codex/sales/states/proving.nim @@ -46,7 +46,7 @@ proc proveLoop( market: Market, clock: Clock, request: StorageRequest, - slotIndex: UInt256, + slotIndex: uint64, onProve: OnProve, ) {.async.} = let slot = Slot(request: request, slotIndex: slotIndex) @@ -60,12 +60,12 @@ proc proveLoop( proc getCurrentPeriod(): Future[Period] {.async.} = let periodicity = await market.periodicity() - return periodicity.periodOf(clock.now().u256) + return periodicity.periodOf(clock.now().Timestamp) proc waitUntilPeriod(period: Period) {.async.} = let periodicity = await market.periodicity() # Ensure that we're past the period boundary by waiting an additional second - await clock.waitUntil(periodicity.periodStart(period).truncate(int64) + 1) + await clock.waitUntil((periodicity.periodStart(period) + 1).toSecondsSince1970) while true: let currentPeriod = await getCurrentPeriod() diff --git a/codex/sales/states/provingsimulated.nim b/codex/sales/states/provingsimulated.nim index a797e113..b8a3e9ce 100644 --- a/codex/sales/states/provingsimulated.nim +++ b/codex/sales/states/provingsimulated.nim @@ -20,7 +20,7 @@ when codex_enable_proof_failures: failEveryNProofs*: int proofCount: int - proc onSubmitProofError(error: ref CatchableError, period: UInt256, slotId: SlotId) = + proc onSubmitProofError(error: ref CatchableError, period: Period, slotId: SlotId) = error "Submitting invalid proof failed", period, slotId, msg = error.msgDetail method prove*( diff --git a/codex/validation.nim b/codex/validation.nim index 6659bc5b..18a444a6 100644 --- a/codex/validation.nim +++ b/codex/validation.nim @@ -2,6 +2,7 @@ import std/sets import std/sequtils import pkg/chronos import pkg/questionable/results +import pkg/stew/endians2 import ./validationconfig import ./market @@ -19,7 +20,7 @@ type Validation* = ref object subscriptions: seq[Subscription] running: Future[void] periodicity: Periodicity - proofTimeout: UInt256 + proofTimeout: uint64 config: ValidationConfig logScope: @@ -33,18 +34,19 @@ proc new*( proc slots*(validation: Validation): seq[SlotId] = validation.slots.toSeq -proc getCurrentPeriod(validation: Validation): UInt256 = - return validation.periodicity.periodOf(validation.clock.now().u256) +proc getCurrentPeriod(validation: Validation): Period = + return validation.periodicity.periodOf(validation.clock.now().Timestamp) proc waitUntilNextPeriod(validation: Validation) {.async.} = let period = validation.getCurrentPeriod() let periodEnd = validation.periodicity.periodEnd(period) trace "Waiting until next period", currentPeriod = period - await validation.clock.waitUntil(periodEnd.truncate(int64) + 1) + await validation.clock.waitUntil((periodEnd + 1).toSecondsSince1970) func groupIndexForSlotId*(slotId: SlotId, validationGroups: ValidationGroups): uint16 = - let slotIdUInt256 = UInt256.fromBytesBE(slotId.toArray) - (slotIdUInt256 mod validationGroups.u256).truncate(uint16) + let a = slotId.toArray + let slotIdInt64 = uint64.fromBytesBE(a) + (slotIdInt64 mod uint64(validationGroups)).uint16 func maxSlotsConstraintRespected(validation: Validation): bool = validation.config.maxSlots == 0 or validation.slots.len < validation.config.maxSlots @@ -55,7 +57,7 @@ func shouldValidateSlot(validation: Validation, slotId: SlotId): bool = groupIndexForSlotId(slotId, validationGroups) == validation.config.groupIndex proc subscribeSlotFilled(validation: Validation) {.async.} = - proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) = + proc onSlotFilled(requestId: RequestId, slotIndex: uint64) = if not validation.maxSlotsConstraintRespected: return let slotId = slotId(requestId, slotIndex) @@ -113,17 +115,13 @@ proc run(validation: Validation) {.async: (raises: []).} = except CatchableError as e: error "Validation failed", msg = e.msg -proc epochForDurationBackFromNow( - validation: Validation, duration: Duration -): SecondsSince1970 = - return validation.clock.now - duration.secs +proc findEpoch(validation: Validation, secondsAgo: uint64): SecondsSince1970 = + return validation.clock.now - secondsAgo.int64 proc restoreHistoricalState(validation: Validation) {.async.} = trace "Restoring historical state..." let requestDurationLimit = await validation.market.requestDurationLimit - let startTimeEpoch = validation.epochForDurationBackFromNow( - seconds(requestDurationLimit.truncate(int64)) - ) + let startTimeEpoch = validation.findEpoch(secondsAgo = requestDurationLimit) let slotFilledEvents = await validation.market.queryPastSlotFilledEvents(fromTime = startTimeEpoch) for event in slotFilledEvents: diff --git a/tests/codex/examples.nim b/tests/codex/examples.nim index 22a411c2..ed1dd52a 100644 --- a/tests/codex/examples.nim +++ b/tests/codex/examples.nim @@ -68,19 +68,19 @@ proc example*(_: type MultiHash, mcodec = Sha256HashCodec): MultiHash = proc example*( _: type Availability, collateralPerByte = uint8.example.u256 ): Availability = - let totalSize = uint16.example.u256 + let totalSize = uint16.example.uint64 Availability.init( totalSize = totalSize, - freeSize = uint16.example.u256, - duration = uint16.example.u256, + freeSize = uint16.example.uint64, + duration = uint16.example.uint64, minPricePerBytePerSecond = uint8.example.u256, - totalCollateral = totalSize * collateralPerByte, + totalCollateral = totalSize.u256 * collateralPerByte, ) proc example*(_: type Reservation): Reservation = Reservation.init( availabilityId = AvailabilityId(array[32, byte].example), - size = uint16.example.u256, + size = uint16.example.uint64, slotId = SlotId.example, ) diff --git a/tests/codex/helpers/mockmarket.nim b/tests/codex/helpers/mockmarket.nim index 3638d11e..48b20f28 100644 --- a/tests/codex/helpers/mockmarket.nim +++ b/tests/codex/helpers/mockmarket.nim @@ -57,7 +57,7 @@ type MockSlot* = object requestId*: RequestId host*: Address - slotIndex*: UInt256 + slotIndex*: uint64 proof*: Groth16Proof timestamp: ?SecondsSince1970 collateral*: UInt256 @@ -84,7 +84,7 @@ type SlotFilledSubscription* = ref object of Subscription market: MockMarket requestId: ?RequestId - slotIndex: ?UInt256 + slotIndex: ?uint64 callback: OnSlotFilled SlotFreedSubscription* = ref object of Subscription @@ -126,10 +126,13 @@ proc new*(_: type MockMarket, clock: ?Clock = Clock.none): MockMarket = validatorRewardPercentage: 20, ), proofs: ProofConfig( - period: 10.u256, timeout: 5.u256, downtime: 64.uint8, downtimeProduct: 67.uint8 + period: 10.Period, + timeout: 5.uint64, + downtime: 64.uint8, + downtimeProduct: 67.uint8, ), reservations: SlotReservationsConfig(maxReservations: 3), - requestDurationLimit: (60 * 60 * 24 * 30).u256, + requestDurationLimit: (60 * 60 * 24 * 30).uint64, ) MockMarket( signer: Address.example, config: config, canReserveSlot: true, clock: clock @@ -141,10 +144,10 @@ method getSigner*(market: MockMarket): Future[Address] {.async.} = method periodicity*(mock: MockMarket): Future[Periodicity] {.async.} = return Periodicity(seconds: mock.config.proofs.period) -method proofTimeout*(market: MockMarket): Future[UInt256] {.async.} = +method proofTimeout*(market: MockMarket): Future[uint64] {.async.} = return market.config.proofs.timeout -method requestDurationLimit*(market: MockMarket): Future[UInt256] {.async.} = +method requestDurationLimit*(market: MockMarket): Future[uint64] {.async.} = return market.config.requestDurationLimit method proofDowntime*(market: MockMarket): Future[uint8] {.async.} = @@ -176,9 +179,9 @@ method getRequest*( return some request return none StorageRequest -method getActiveSlot*(market: MockMarket, slotId: SlotId): Future[?Slot] {.async.} = +method getActiveSlot*(market: MockMarket, id: SlotId): Future[?Slot] {.async.} = for slot in market.filled: - if slotId(slot.requestId, slot.slotIndex) == slotId and + if slotId(slot.requestId, slot.slotIndex) == id and request =? await market.getRequest(slot.requestId): return some Slot(request: request, slotIndex: slot.slotIndex) return none Slot @@ -204,7 +207,7 @@ method requestExpiresAt*( return market.requestExpiry[id] method getHost*( - market: MockMarket, requestId: RequestId, slotIndex: UInt256 + market: MockMarket, requestId: RequestId, slotIndex: uint64 ): Future[?Address] {.async.} = for slot in market.filled: if slot.requestId == requestId and slot.slotIndex == slotIndex: @@ -219,7 +222,7 @@ method currentCollateral*( return slot.collateral return 0.u256 -proc emitSlotFilled*(market: MockMarket, requestId: RequestId, slotIndex: UInt256) = +proc emitSlotFilled*(market: MockMarket, requestId: RequestId, slotIndex: uint64) = var subscriptions = market.subscriptions.onSlotFilled for subscription in subscriptions: let requestMatches = @@ -229,13 +232,13 @@ proc emitSlotFilled*(market: MockMarket, requestId: RequestId, slotIndex: UInt25 if requestMatches and slotMatches: subscription.callback(requestId, slotIndex) -proc emitSlotFreed*(market: MockMarket, requestId: RequestId, slotIndex: UInt256) = +proc emitSlotFreed*(market: MockMarket, requestId: RequestId, slotIndex: uint64) = var subscriptions = market.subscriptions.onSlotFreed for subscription in subscriptions: subscription.callback(requestId, slotIndex) proc emitSlotReservationsFull*( - market: MockMarket, requestId: RequestId, slotIndex: UInt256 + market: MockMarket, requestId: RequestId, slotIndex: uint64 ) = var subscriptions = market.subscriptions.onSlotReservationsFull for subscription in subscriptions: @@ -262,7 +265,7 @@ proc emitRequestFailed*(market: MockMarket, requestId: RequestId) = proc fillSlot*( market: MockMarket, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, proof: Groth16Proof, host: Address, collateral = 0.u256, @@ -282,7 +285,7 @@ proc fillSlot*( method fillSlot*( market: MockMarket, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, proof: Groth16Proof, collateral: UInt256, ) {.async.} = @@ -346,13 +349,13 @@ method canProofBeMarkedAsMissing*( return market.canBeMarkedAsMissing.contains(id) method reserveSlot*( - market: MockMarket, requestId: RequestId, slotIndex: UInt256 + market: MockMarket, requestId: RequestId, slotIndex: uint64 ) {.async.} = if error =? market.reserveSlotThrowError: raise error method canReserveSlot*( - market: MockMarket, requestId: RequestId, slotIndex: UInt256 + market: MockMarket, requestId: RequestId, slotIndex: uint64 ): Future[bool] {.async.} = return market.canReserveSlot @@ -395,7 +398,7 @@ method subscribeSlotFilled*( return subscription method subscribeSlotFilled*( - market: MockMarket, requestId: RequestId, slotIndex: UInt256, callback: OnSlotFilled + market: MockMarket, requestId: RequestId, slotIndex: uint64, callback: OnSlotFilled ): Future[Subscription] {.async.} = let subscription = SlotFilledSubscription( market: market, diff --git a/tests/codex/helpers/mockreservations.nim b/tests/codex/helpers/mockreservations.nim index 060790a8..1bc76a09 100644 --- a/tests/codex/helpers/mockreservations.nim +++ b/tests/codex/helpers/mockreservations.nim @@ -24,9 +24,9 @@ proc setCreateReservationThrowError*( method createReservation*( self: MockReservations, availabilityId: AvailabilityId, - slotSize: UInt256, + slotSize: uint64, requestId: RequestId, - slotIndex: UInt256, + slotIndex: uint64, collateralPerByte: UInt256, ): Future[?!Reservation] {.async.} = if self.createReservationThrowBytesOutOfBoundsError: diff --git a/tests/codex/helpers/mocksalesagent.nim b/tests/codex/helpers/mocksalesagent.nim index 8374ae1d..d5de265a 100644 --- a/tests/codex/helpers/mocksalesagent.nim +++ b/tests/codex/helpers/mocksalesagent.nim @@ -12,6 +12,6 @@ method onFailed*(agent: SalesAgent, requestId: RequestId) = failedCalled = true method onSlotFilled*( - agent: SalesAgent, requestId: RequestId, slotIndex: UInt256 + agent: SalesAgent, requestId: RequestId, slotIndex: uint64 ) {.base.} = slotFilledCalled = true diff --git a/tests/codex/helpers/mockslotqueueitem.nim b/tests/codex/helpers/mockslotqueueitem.nim index bc0c1047..7a1505ec 100644 --- a/tests/codex/helpers/mockslotqueueitem.nim +++ b/tests/codex/helpers/mockslotqueueitem.nim @@ -4,11 +4,11 @@ import pkg/codex/sales/slotqueue type MockSlotQueueItem* = object requestId*: RequestId slotIndex*: uint16 - slotSize*: UInt256 - duration*: UInt256 + slotSize*: uint64 + duration*: uint64 pricePerBytePerSecond*: UInt256 collateralPerByte*: UInt256 - expiry*: UInt256 + expiry*: uint64 seen*: bool proc toSlotQueueItem*(item: MockSlotQueueItem): SlotQueueItem = diff --git a/tests/codex/node/testcontracts.nim b/tests/codex/node/testcontracts.nim index 0930d925..11f4f273 100644 --- a/tests/codex/node/testcontracts.nim +++ b/tests/codex/node/testcontracts.nim @@ -116,7 +116,8 @@ asyncchecksuite "Test Node - Host contracts": let onStore = !sales.onStore var request = StorageRequest.example request.content.cid = verifiableBlock.cid - request.expiry = (getTime() + DefaultBlockTtl.toTimesDuration + 1.hours).toUnix.u256 + request.expiry = + (getTime() + DefaultBlockTtl.toTimesDuration + 1.hours).toUnix.uint64 var fetchedBytes: uint = 0 let onBlocks = proc(blocks: seq[bt.Block]): Future[?!void] {.async.} = @@ -124,7 +125,7 @@ asyncchecksuite "Test Node - Host contracts": fetchedBytes += blk.data.len.uint return success() - (await onStore(request, 1.u256, onBlocks)).tryGet() + (await onStore(request, 1.uint64, onBlocks)).tryGet() check fetchedBytes == 12 * DefaultBlockSize.uint let indexer = verifiable.protectedStrategy.init( diff --git a/tests/codex/node/testnode.nim b/tests/codex/node/testnode.nim index 37960232..0700203d 100644 --- a/tests/codex/node/testnode.nim +++ b/tests/codex/node/testnode.nim @@ -157,10 +157,10 @@ asyncchecksuite "Test Node - Basic": cid = manifestBlock.cid, nodes = 5, tolerance = 2, - duration = 100.u256, + duration = 100.uint64, pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, - expiry = 200.u256, + expiry = 200.uint64, collateralPerByte = 1.u256, ) ).tryGet diff --git a/tests/codex/sales/helpers/periods.nim b/tests/codex/sales/helpers/periods.nim index ba1793c2..99716cec 100644 --- a/tests/codex/sales/helpers/periods.nim +++ b/tests/codex/sales/helpers/periods.nim @@ -3,6 +3,6 @@ import ../../helpers/mockclock proc advanceToNextPeriod*(clock: MockClock, market: Market) {.async.} = let periodicity = await market.periodicity() - let period = periodicity.periodOf(clock.now().u256) + let period = periodicity.periodOf(clock.now().Timestamp) let periodEnd = periodicity.periodEnd(period) - clock.set((periodEnd + 1).truncate(int)) + clock.set(periodEnd.toSecondsSince1970 + 1) diff --git a/tests/codex/sales/states/testcancelled.nim b/tests/codex/sales/states/testcancelled.nim index d2568b98..48f3e8a0 100644 --- a/tests/codex/sales/states/testcancelled.nim +++ b/tests/codex/sales/states/testcancelled.nim @@ -14,7 +14,7 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'cancelled'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let clock = MockClock.new() let currentCollateral = UInt256.example diff --git a/tests/codex/sales/states/testdownloading.nim b/tests/codex/sales/states/testdownloading.nim index e13ac53e..3df45749 100644 --- a/tests/codex/sales/states/testdownloading.nim +++ b/tests/codex/sales/states/testdownloading.nim @@ -10,7 +10,7 @@ import ../../helpers checksuite "sales state 'downloading'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 var state: SaleDownloading setup: diff --git a/tests/codex/sales/states/testerrored.nim b/tests/codex/sales/states/testerrored.nim index 9c8ee17a..07e325e3 100644 --- a/tests/codex/sales/states/testerrored.nim +++ b/tests/codex/sales/states/testerrored.nim @@ -14,7 +14,7 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'errored'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let market = MockMarket.new() let clock = MockClock.new() diff --git a/tests/codex/sales/states/testfilled.nim b/tests/codex/sales/states/testfilled.nim index 74413776..04ff26db 100644 --- a/tests/codex/sales/states/testfilled.nim +++ b/tests/codex/sales/states/testfilled.nim @@ -16,7 +16,7 @@ import ../../helpers checksuite "sales state 'filled'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 var market: MockMarket var slot: MockSlot diff --git a/tests/codex/sales/states/testfilling.nim b/tests/codex/sales/states/testfilling.nim index f0ce7059..ce1d32f2 100644 --- a/tests/codex/sales/states/testfilling.nim +++ b/tests/codex/sales/states/testfilling.nim @@ -9,7 +9,7 @@ import ../../helpers checksuite "sales state 'filling'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 var state: SaleFilling setup: diff --git a/tests/codex/sales/states/testfinished.nim b/tests/codex/sales/states/testfinished.nim index 4b353014..0c33a7b3 100644 --- a/tests/codex/sales/states/testfinished.nim +++ b/tests/codex/sales/states/testfinished.nim @@ -15,7 +15,7 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'finished'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let clock = MockClock.new() let currentCollateral = UInt256.example diff --git a/tests/codex/sales/states/testignored.nim b/tests/codex/sales/states/testignored.nim index 1c808e8b..2e1c6e91 100644 --- a/tests/codex/sales/states/testignored.nim +++ b/tests/codex/sales/states/testignored.nim @@ -14,7 +14,7 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'ignored'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let market = MockMarket.new() let clock = MockClock.new() diff --git a/tests/codex/sales/states/testinitialproving.nim b/tests/codex/sales/states/testinitialproving.nim index 97331a07..cae0a069 100644 --- a/tests/codex/sales/states/testinitialproving.nim +++ b/tests/codex/sales/states/testinitialproving.nim @@ -20,7 +20,7 @@ import ../helpers/periods asyncchecksuite "sales state 'initialproving'": let proof = Groth16Proof.example let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let market = MockMarket.new() let clock = MockClock.new() diff --git a/tests/codex/sales/states/testpayout.nim b/tests/codex/sales/states/testpayout.nim index b1748b45..403c663f 100644 --- a/tests/codex/sales/states/testpayout.nim +++ b/tests/codex/sales/states/testpayout.nim @@ -15,7 +15,7 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'payout'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let clock = MockClock.new() let currentCollateral = UInt256.example diff --git a/tests/codex/sales/states/testpreparing.nim b/tests/codex/sales/states/testpreparing.nim index e78ee25e..99d9c7fe 100644 --- a/tests/codex/sales/states/testpreparing.nim +++ b/tests/codex/sales/states/testpreparing.nim @@ -22,7 +22,7 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'preparing'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let market = MockMarket.new() let clock = MockClock.new() var agent: SalesAgent @@ -34,9 +34,9 @@ asyncchecksuite "sales state 'preparing'": setup: availability = Availability.init( - totalSize = request.ask.slotSize + 100.u256, - freeSize = request.ask.slotSize + 100.u256, - duration = request.ask.duration + 60.u256, + totalSize = request.ask.slotSize + 100.uint64, + freeSize = request.ask.slotSize + 100.uint64, + duration = request.ask.duration + 60.uint64, minPricePerBytePerSecond = request.ask.pricePerBytePerSecond, totalCollateral = request.ask.collateralPerSlot * request.ask.slots.u256, ) diff --git a/tests/codex/sales/states/testproving.nim b/tests/codex/sales/states/testproving.nim index afdeb4d2..6b7e7bd4 100644 --- a/tests/codex/sales/states/testproving.nim +++ b/tests/codex/sales/states/testproving.nim @@ -40,9 +40,9 @@ asyncchecksuite "sales state 'proving'": proc advanceToNextPeriod(market: Market) {.async.} = let periodicity = await market.periodicity() - let current = periodicity.periodOf(clock.now().u256) + let current = periodicity.periodOf(clock.now().Timestamp) let periodEnd = periodicity.periodEnd(current) - clock.set(periodEnd.truncate(int64) + 1) + clock.set(periodEnd.toSecondsSince1970 + 1) test "switches to cancelled state when request expires": let next = state.onCancelled(request) diff --git a/tests/codex/sales/states/testsimulatedproving.nim b/tests/codex/sales/states/testsimulatedproving.nim index 1fc5331c..c8f4ae1d 100644 --- a/tests/codex/sales/states/testsimulatedproving.nim +++ b/tests/codex/sales/states/testsimulatedproving.nim @@ -56,9 +56,9 @@ asyncchecksuite "sales state 'simulated-proving'": proc advanceToNextPeriod(market: Market) {.async.} = let periodicity = await market.periodicity() - let current = periodicity.periodOf(clock.now().u256) + let current = periodicity.periodOf(clock.now().Timestamp) let periodEnd = periodicity.periodEnd(current) - clock.set(periodEnd.truncate(int64) + 1) + clock.set(periodEnd.toSecondsSince1970 + 1) proc waitForProvingRounds(market: Market, rounds: int) {.async.} = var rnds = rounds - 1 # proof round runs prior to advancing diff --git a/tests/codex/sales/states/testslotreserving.nim b/tests/codex/sales/states/testslotreserving.nim index 1fd573fa..d9ecdfc8 100644 --- a/tests/codex/sales/states/testslotreserving.nim +++ b/tests/codex/sales/states/testslotreserving.nim @@ -19,7 +19,7 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'SlotReserving'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 var market: MockMarket var clock: MockClock var agent: SalesAgent diff --git a/tests/codex/sales/states/testunknown.nim b/tests/codex/sales/states/testunknown.nim index 97730f49..5e9f81f9 100644 --- a/tests/codex/sales/states/testunknown.nim +++ b/tests/codex/sales/states/testunknown.nim @@ -16,7 +16,7 @@ import ../../helpers checksuite "sales state 'unknown'": let request = StorageRequest.example - let slotIndex = (request.ask.slots div 2).u256 + let slotIndex = request.ask.slots div 2 let slotId = slotId(request.id, slotIndex) var market: MockMarket diff --git a/tests/codex/sales/testreservations.nim b/tests/codex/sales/testreservations.nim index a1c7d1a5..79fc3626 100644 --- a/tests/codex/sales/testreservations.nim +++ b/tests/codex/sales/testreservations.nim @@ -41,17 +41,17 @@ asyncchecksuite "Reservations module": proc createAvailability(): Availability = let example = Availability.example(collateralPerByte) - let totalSize = rand(100000 .. 200000).u256 - let totalCollateral = totalSize * collateralPerByte + let totalSize = rand(100000 .. 200000).uint64 + let totalCollateral = totalSize.u256 * collateralPerByte let availability = waitFor reservations.createAvailability( totalSize, example.duration, example.minPricePerBytePerSecond, totalCollateral ) return availability.get proc createReservation(availability: Availability): Reservation = - let size = rand(1 ..< availability.freeSize.truncate(int)) + let size = rand(1 ..< availability.freeSize.int) let reservation = waitFor reservations.createReservation( - availability.id, size.u256, RequestId.example, UInt256.example, 1.u256 + availability.id, size.uint64, RequestId.example, uint64.example, 1.u256 ) return reservation.get @@ -64,8 +64,8 @@ asyncchecksuite "Reservations module": check (await reservations.all(Availability)).get.len == 0 test "generates unique ids for storage availability": - let availability1 = Availability.init(1.u256, 2.u256, 3.u256, 4.u256, 5.u256) - let availability2 = Availability.init(1.u256, 2.u256, 3.u256, 4.u256, 5.u256) + let availability1 = Availability.init(1.uint64, 2.uint64, 3.uint64, 4.u256, 5.u256) + let availability2 = Availability.init(1.uint64, 2.uint64, 3.uint64, 4.u256, 5.u256) check availability1.id != availability2.id test "can reserve available storage": @@ -75,7 +75,7 @@ asyncchecksuite "Reservations module": test "creating availability reserves bytes in repo": let orig = repo.available.uint let availability = createAvailability() - check repo.available.uint == (orig.u256 - availability.freeSize).truncate(uint) + check repo.available.uint == orig - availability.freeSize test "can get all availabilities": let availability1 = createAvailability() @@ -129,7 +129,7 @@ asyncchecksuite "Reservations module": test "cannot create reservation with non-existant availability": let availability = Availability.example let created = await reservations.createReservation( - availability.id, UInt256.example, RequestId.example, UInt256.example, 1.u256 + availability.id, uint64.example, RequestId.example, uint64.example, 1.u256 ) check created.isErr check created.error of NotExistsError @@ -140,7 +140,7 @@ asyncchecksuite "Reservations module": availability.id, availability.totalSize + 1, RequestId.example, - UInt256.example, + uint64.example, UInt256.example, ) check created.isErr @@ -153,12 +153,12 @@ asyncchecksuite "Reservations module": availability.id, availability.totalSize - 1, RequestId.example, - UInt256.example, + uint64.example, UInt256.example, ) let two = reservations.createReservation( - availability.id, availability.totalSize, RequestId.example, UInt256.example, + availability.id, availability.totalSize, RequestId.example, uint64.example, UInt256.example, ) @@ -228,7 +228,7 @@ asyncchecksuite "Reservations module": let reservation = createReservation(availability) let orig = availability.freeSize - reservation.size let origQuota = repo.quotaReservedBytes - let returnedBytes = reservation.size + 200.u256 + let returnedBytes = reservation.size + 200.uint64 check isOk await reservations.returnBytesToAvailability( reservation.availabilityId, reservation.id, returnedBytes @@ -238,7 +238,7 @@ asyncchecksuite "Reservations module": let updated = !(await reservations.get(key, Availability)) check updated.freeSize > orig - check (updated.freeSize - orig) == 200.u256 + check (updated.freeSize - orig) == 200.uint64 check (repo.quotaReservedBytes - origQuota) == 200.NBytes test "update releases quota when lowering size": @@ -271,14 +271,14 @@ asyncchecksuite "Reservations module": let availability = createAvailability() let reservation = createReservation(availability) let updated = await reservations.release( - reservation.id, reservation.availabilityId, (reservation.size + 1).truncate(uint) + reservation.id, reservation.availabilityId, reservation.size + 1 ) check updated.isErr check updated.error of BytesOutOfBoundsError test "cannot release bytes from non-existant reservation": let availability = createAvailability() - let reservation = createReservation(availability) + discard createReservation(availability) let updated = await reservations.release(ReservationId.example, availability.id, 1) check updated.isErr check updated.error of NotExistsError @@ -297,7 +297,7 @@ asyncchecksuite "Reservations module": var added: Availability reservations.onAvailabilityAdded = proc(a: Availability) {.async.} = added = a - availability.freeSize += 1.u256 + availability.freeSize += 1 discard await reservations.update(availability) check added == availability @@ -307,7 +307,7 @@ asyncchecksuite "Reservations module": var called = false reservations.onAvailabilityAdded = proc(a: Availability) {.async.} = called = true - availability.freeSize -= 1.u256 + availability.freeSize -= 1 discard await reservations.update(availability) check not called @@ -356,14 +356,11 @@ asyncchecksuite "Reservations module": check reservations.hasAvailable(DefaultQuotaBytes.uint - 1) test "reports quota not available to be reserved": - check not reservations.hasAvailable(DefaultQuotaBytes.uint + 1) + check not reservations.hasAvailable(DefaultQuotaBytes.uint64 + 1) test "fails to create availability with size that is larger than available quota": let created = await reservations.createAvailability( - (DefaultQuotaBytes.uint + 1).u256, - UInt256.example, - UInt256.example, - UInt256.example, + DefaultQuotaBytes.uint64 + 1, uint64.example, UInt256.example, UInt256.example ) check created.isErr check created.error of ReserveFailedError diff --git a/tests/codex/sales/testsales.nim b/tests/codex/sales/testsales.nim index 09a2ce49..f078cbee 100644 --- a/tests/codex/sales/testsales.nim +++ b/tests/codex/sales/testsales.nim @@ -42,15 +42,15 @@ asyncchecksuite "Sales - start": request = StorageRequest( ask: StorageAsk( slots: 4, - slotSize: 100.u256, - duration: 60.u256, + slotSize: 100.uint64, + duration: 60.uint64, pricePerBytePerSecond: 1.u256, collateralPerByte: 1.u256, ), content: StorageContent( cid: Cid.init("zb2rhheVmk3bLks5MgzTqyznLu1zqGH5jrfTA1eAZXrjx7Vob").tryGet ), - expiry: (getTime() + initDuration(hours = 1)).toUnix.u256, + expiry: (getTime() + initDuration(hours = 1)).toUnix.uint64, ) market = MockMarket.new() @@ -62,7 +62,7 @@ asyncchecksuite "Sales - start": sales = Sales.new(market, clock, repo) reservations = sales.context.reservations sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc ): Future[?!void] {.async.} = return success() @@ -78,7 +78,7 @@ asyncchecksuite "Sales - start": return success(proof) itemsProcessed = @[] expiry = (clock.now() + 42) - request.expiry = expiry.u256 + request.expiry = expiry.uint64 teardown: await sales.stop() @@ -86,7 +86,7 @@ asyncchecksuite "Sales - start": await repoTmp.destroyDb() await metaTmp.destroyDb() - proc fillSlot(slotIdx: UInt256 = 0.u256) {.async.} = + proc fillSlot(slotIdx: uint64 = 0.uint64) {.async.} = let address = await market.getSigner() let slot = MockSlot(requestId: request.id, slotIndex: slotIdx, proof: proof, host: address) @@ -101,15 +101,13 @@ asyncchecksuite "Sales - start": market.requestState[request.id] = RequestState.New market.requestExpiry[request.id] = expiry - let slot0 = - MockSlot(requestId: request.id, slotIndex: 0.u256, proof: proof, host: me) + let slot0 = MockSlot(requestId: request.id, slotIndex: 0, proof: proof, host: me) await fillSlot(slot0.slotIndex) - let slot1 = - MockSlot(requestId: request.id, slotIndex: 1.u256, proof: proof, host: me) + let slot1 = MockSlot(requestId: request.id, slotIndex: 1, proof: proof, host: me) await fillSlot(slot1.slotIndex) - market.activeSlots[me] = @[request.slotId(0.u256), request.slotId(1.u256)] + market.activeSlots[me] = @[request.slotId(0), request.slotId(1)] market.requested = @[request] market.activeRequests[me] = @[request.id] @@ -117,10 +115,10 @@ asyncchecksuite "Sales - start": check eventually sales.agents.len == 2 check sales.agents.any( - agent => agent.data.requestId == request.id and agent.data.slotIndex == 0.u256 + agent => agent.data.requestId == request.id and agent.data.slotIndex == 0.uint64 ) check sales.agents.any( - agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.u256 + agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.uint64 ) asyncchecksuite "Sales": @@ -129,7 +127,7 @@ asyncchecksuite "Sales": repoTmp = TempLevelDb.new() metaTmp = TempLevelDb.new() - var totalAvailabilitySize: UInt256 + var totalAvailabilitySize: uint64 var minPricePerBytePerSecond: UInt256 var requestedCollateralPerByte: UInt256 var totalCollateral: UInt256 @@ -144,29 +142,29 @@ asyncchecksuite "Sales": var itemsProcessed: seq[SlotQueueItem] setup: - totalAvailabilitySize = 100.u256 + totalAvailabilitySize = 100.uint64 minPricePerBytePerSecond = 1.u256 requestedCollateralPerByte = 1.u256 - totalCollateral = requestedCollateralPerByte * totalAvailabilitySize + totalCollateral = requestedCollateralPerByte * totalAvailabilitySize.stuint(256) availability = Availability.init( totalSize = totalAvailabilitySize, freeSize = totalAvailabilitySize, - duration = 60.u256, + duration = 60.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, totalCollateral = totalCollateral, ) request = StorageRequest( ask: StorageAsk( slots: 4, - slotSize: 100.u256, - duration: 60.u256, + slotSize: 100.uint64, + duration: 60.uint64, pricePerBytePerSecond: minPricePerBytePerSecond, collateralPerByte: 1.u256, ), content: StorageContent( cid: Cid.init("zb2rhheVmk3bLks5MgzTqyznLu1zqGH5jrfTA1eAZXrjx7Vob").tryGet ), - expiry: (getTime() + initDuration(hours = 1)).toUnix.u256, + expiry: (getTime() + initDuration(hours = 1)).toUnix.uint64, ) market = MockMarket.new() @@ -183,7 +181,7 @@ asyncchecksuite "Sales": sales = Sales.new(market, clock, repo) reservations = sales.context.reservations sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc ): Future[?!void] {.async.} = return success() @@ -288,13 +286,13 @@ asyncchecksuite "Sales": test "removes slot index from slot queue once SlotFilled emitted": let request1 = await addRequestToSaturatedQueue() - market.emitSlotFilled(request1.id, 1.u256) + market.emitSlotFilled(request1.id, 1.uint64) let expected = SlotQueueItem.init(request1, 1'u16) check always (not itemsProcessed.contains(expected)) test "removes slot index from slot queue once SlotReservationsFull emitted": let request1 = await addRequestToSaturatedQueue() - market.emitSlotReservationsFull(request1.id, 1.u256) + market.emitSlotReservationsFull(request1.id, 1.uint64) let expected = SlotQueueItem.init(request1, 1'u16) check always (not itemsProcessed.contains(expected)) @@ -305,7 +303,7 @@ asyncchecksuite "Sales": createAvailability() market.requested.add request # "contract" must be able to return request - market.emitSlotFreed(request.id, 2.u256) + market.emitSlotFreed(request.id, 2.uint64) let expected = SlotQueueItem.init(request, 2.uint16) check eventually itemsProcessed.contains(expected) @@ -350,10 +348,10 @@ asyncchecksuite "Sales": test "availability size is reduced by request slot size when fully downloaded": sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc ): Future[?!void] {.async.} = let blk = bt.Block.new(@[1.byte]).get - await onBatch(blk.repeat(request.ask.slotSize.truncate(int))) + await onBatch(blk.repeat(request.ask.slotSize.int)) createAvailability() await market.requestStorage(request) @@ -361,16 +359,16 @@ asyncchecksuite "Sales": availability.freeSize - request.ask.slotSize test "non-downloaded bytes are returned to availability once finished": - var slotIndex = 0.u256 + var slotIndex = 0.uint64 sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc ): Future[?!void] {.async.} = slotIndex = slot let blk = bt.Block.new(@[1.byte]).get await onBatch(@[blk]) let sold = newFuture[void]() - sales.onSale = proc(request: StorageRequest, slotIndex: UInt256) = + sales.onSale = proc(request: StorageRequest, slotIndex: uint64) = sold.complete() createAvailability() @@ -381,7 +379,7 @@ asyncchecksuite "Sales": # complete request market.slotState[request.slotId(slotIndex)] = SlotState.Finished - clock.advance(request.ask.duration.truncate(int64)) + clock.advance(request.ask.duration.int64) check eventually getAvailability().freeSize == origSize - 1 @@ -413,17 +411,17 @@ asyncchecksuite "Sales": test "ignores request when slot state is not free": createAvailability() await market.requestStorage(request) - market.slotState[request.slotId(0.u256)] = SlotState.Filled - market.slotState[request.slotId(1.u256)] = SlotState.Filled - market.slotState[request.slotId(2.u256)] = SlotState.Filled - market.slotState[request.slotId(3.u256)] = SlotState.Filled + market.slotState[request.slotId(0.uint64)] = SlotState.Filled + market.slotState[request.slotId(1.uint64)] = SlotState.Filled + market.slotState[request.slotId(2.uint64)] = SlotState.Filled + market.slotState[request.slotId(3.uint64)] = SlotState.Filled check wasIgnored() test "retrieves and stores data locally": var storingRequest: StorageRequest - var storingSlot: UInt256 + var storingSlot: uint64 sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc ): Future[?!void] {.async.} = storingRequest = request storingSlot = slot @@ -431,12 +429,12 @@ asyncchecksuite "Sales": createAvailability() await market.requestStorage(request) check eventually storingRequest == request - check storingSlot < request.ask.slots.u256 + check storingSlot < request.ask.slots test "makes storage available again when data retrieval fails": let error = newException(IOError, "data retrieval failed") sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc ): Future[?!void] {.async.} = return failure(error) createAvailability() @@ -445,7 +443,7 @@ asyncchecksuite "Sales": test "generates proof of storage": var provingRequest: StorageRequest - var provingSlot: UInt256 + var provingSlot: uint64 sales.onProve = proc( slot: Slot, challenge: ProofChallenge ): Future[?!Groth16Proof] {.async.} = @@ -457,7 +455,7 @@ asyncchecksuite "Sales": await allowRequestToStart() check eventually provingRequest == request - check provingSlot < request.ask.slots.u256 + check provingSlot < request.ask.slots test "fills a slot": createAvailability() @@ -466,14 +464,14 @@ asyncchecksuite "Sales": check eventually market.filled.len > 0 check market.filled[0].requestId == request.id - check market.filled[0].slotIndex < request.ask.slots.u256 + check market.filled[0].slotIndex < request.ask.slots check market.filled[0].proof == proof check market.filled[0].host == await market.getSigner() test "calls onFilled when slot is filled": var soldRequest = StorageRequest.default - var soldSlotIndex = UInt256.high - sales.onSale = proc(request: StorageRequest, slotIndex: UInt256) = + var soldSlotIndex = uint64.high + sales.onSale = proc(request: StorageRequest, slotIndex: uint64) = soldRequest = request soldSlotIndex = slotIndex createAvailability() @@ -481,7 +479,7 @@ asyncchecksuite "Sales": await allowRequestToStart() check eventually soldRequest == request - check soldSlotIndex < request.ask.slots.u256 + check soldSlotIndex < request.ask.slots test "calls onClear when storage becomes available again": # fail the proof intentionally to trigger `agent.finish(success=false)`, @@ -491,8 +489,8 @@ asyncchecksuite "Sales": ): Future[?!Groth16Proof] {.async.} = raise newException(IOError, "proof failed") var clearedRequest: StorageRequest - var clearedSlotIndex: UInt256 - sales.onClear = proc(request: StorageRequest, slotIndex: UInt256) = + var clearedSlotIndex: uint64 + sales.onClear = proc(request: StorageRequest, slotIndex: uint64) = clearedRequest = request clearedSlotIndex = slotIndex createAvailability() @@ -500,19 +498,19 @@ asyncchecksuite "Sales": await allowRequestToStart() check eventually clearedRequest == request - check clearedSlotIndex < request.ask.slots.u256 + check clearedSlotIndex < request.ask.slots test "makes storage available again when other host fills the slot": let otherHost = Address.example sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc ): Future[?!void] {.async.} = await sleepAsync(chronos.hours(1)) return success() createAvailability() await market.requestStorage(request) for slotIndex in 0 ..< request.ask.slots: - market.fillSlot(request.id, slotIndex.u256, proof, otherHost) + market.fillSlot(request.id, slotIndex.uint64, proof, otherHost) check eventually (await reservations.all(Availability)).get == @[availability] test "makes storage available again when request expires": @@ -521,7 +519,7 @@ asyncchecksuite "Sales": let origSize = availability.freeSize sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc ): Future[?!void] {.async.} = await sleepAsync(chronos.hours(1)) return success() @@ -541,12 +539,12 @@ asyncchecksuite "Sales": # ensure only one slot, otherwise once bytes are returned to the # availability, the queue will be unpaused and availability will be consumed # by other slots - request.ask.slots = 1.uint64 + request.ask.slots = 1 market.requestExpiry[request.id] = expiry let origSize = availability.freeSize sales.onStore = proc( - request: StorageRequest, slot: UInt256, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc ): Future[?!void] {.async.} = await sleepAsync(chronos.hours(1)) return success() @@ -573,21 +571,19 @@ asyncchecksuite "Sales": market.requestState[request.id] = RequestState.New market.requestEnds[request.id] = request.expiry.toSecondsSince1970 - proc fillSlot(slotIdx: UInt256 = 0.u256) {.async.} = + proc fillSlot(slotIdx: uint64 = 0) {.async.} = let address = await market.getSigner() let slot = MockSlot(requestId: request.id, slotIndex: slotIdx, proof: proof, host: address) market.filled.add slot market.slotState[slotId(request.id, slotIdx)] = SlotState.Filled - let slot0 = - MockSlot(requestId: request.id, slotIndex: 0.u256, proof: proof, host: me) + let slot0 = MockSlot(requestId: request.id, slotIndex: 0, proof: proof, host: me) await fillSlot(slot0.slotIndex) - let slot1 = - MockSlot(requestId: request.id, slotIndex: 1.u256, proof: proof, host: me) + let slot1 = MockSlot(requestId: request.id, slotIndex: 1, proof: proof, host: me) await fillSlot(slot1.slotIndex) - market.activeSlots[me] = @[request.slotId(0.u256), request.slotId(1.u256)] + market.activeSlots[me] = @[request.slotId(0), request.slotId(1)] market.requested = @[request] market.activeRequests[me] = @[request.id] @@ -595,16 +591,16 @@ asyncchecksuite "Sales": check eventually sales.agents.len == 2 check sales.agents.any( - agent => agent.data.requestId == request.id and agent.data.slotIndex == 0.u256 + agent => agent.data.requestId == request.id and agent.data.slotIndex == 0.uint64 ) check sales.agents.any( - agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.u256 + agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.uint64 ) test "deletes inactive reservations on load": createAvailability() discard await reservations.createReservation( - availability.id, 100.u256, RequestId.example, UInt256.example, UInt256.example + availability.id, 100.uint64, RequestId.example, 0.uint64, UInt256.example ) check (await reservations.all(Reservation)).get.len == 1 await sales.load() diff --git a/tests/codex/sales/testsalesagent.nim b/tests/codex/sales/testsalesagent.nim index f1cb53a6..c795904d 100644 --- a/tests/codex/sales/testsalesagent.nim +++ b/tests/codex/sales/testsalesagent.nim @@ -27,7 +27,7 @@ method onFailed*(state: MockState, request: StorageRequest): ?State = onFailedCalled = true method onSlotFilled*( - state: MockState, requestId: RequestId, slotIndex: UInt256 + state: MockState, requestId: RequestId, slotIndex: uint64 ): ?State = onSlotFilledCalled = true @@ -35,17 +35,16 @@ asyncchecksuite "Sales agent": let request = StorageRequest.example var agent: SalesAgent var context: SalesContext - var slotIndex: UInt256 + var slotIndex: uint64 var market: MockMarket var clock: MockClock setup: market = MockMarket.new() - market.requestExpiry[request.id] = - getTime().toUnix() + request.expiry.truncate(int64) + market.requestExpiry[request.id] = getTime().toUnix() + request.expiry.int64 clock = MockClock.new() context = SalesContext(market: market, clock: clock) - slotIndex = 0.u256 + slotIndex = 0.uint64 onCancelCalled = false onFailedCalled = false onSlotFilledCalled = false diff --git a/tests/codex/sales/testslotqueue.nim b/tests/codex/sales/testslotqueue.nim index 2e0759ee..46c35b1c 100644 --- a/tests/codex/sales/testslotqueue.nim +++ b/tests/codex/sales/testslotqueue.nim @@ -146,18 +146,18 @@ suite "Slot queue": test "correctly compares SlotQueueItems": var requestA = StorageRequest.example - requestA.ask.duration = 1.u256 + requestA.ask.duration = 1.uint64 requestA.ask.pricePerBytePerSecond = 1.u256 - check requestA.ask.pricePerSlot == 1.u256 * requestA.ask.slotSize + check requestA.ask.pricePerSlot == 1.u256 * requestA.ask.slotSize.u256 requestA.ask.collateralPerByte = 100000.u256 - requestA.expiry = 1001.u256 + requestA.expiry = 1001.uint64 var requestB = StorageRequest.example - requestB.ask.duration = 100.u256 + requestB.ask.duration = 100.uint64 requestB.ask.pricePerBytePerSecond = 1000.u256 - check requestB.ask.pricePerSlot == 100000.u256 * requestB.ask.slotSize + check requestB.ask.pricePerSlot == 100000.u256 * requestB.ask.slotSize.u256 requestB.ask.collateralPerByte = 1.u256 - requestB.expiry = 1000.u256 + requestB.expiry = 1000.uint64 let itemA = SlotQueueItem.init(requestA, 0) let itemB = SlotQueueItem.init(requestB, 0) @@ -169,21 +169,21 @@ suite "Slot queue": let itemA = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, - duration: 1.u256, + slotSize: 1.uint64, + duration: 1.uint64, pricePerBytePerSecond: 2.u256, # profitability is higher (good) collateralPerByte: 1.u256, - expiry: 1.u256, + expiry: 1.uint64, seen: true, # seen (bad), more weight than profitability ) let itemB = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, - duration: 1.u256, + slotSize: 1.uint64, + duration: 1.uint64, pricePerBytePerSecond: 1.u256, # profitability is lower (bad) collateralPerByte: 1.u256, - expiry: 1.u256, + expiry: 1.uint64, seen: false, # not seen (good) ) check itemB.toSlotQueueItem < itemA.toSlotQueueItem # B higher priority than A @@ -194,22 +194,22 @@ suite "Slot queue": let itemA = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, - duration: 1.u256, + slotSize: 1.uint64, + duration: 1.uint64, pricePerBytePerSecond: 1.u256, # reward is lower (bad) collateralPerByte: 1.u256, # collateral is lower (good) - expiry: 1.u256, + expiry: 1.uint64, seen: false, ) let itemB = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, - duration: 1.u256, + slotSize: 1.uint64, + duration: 1.uint64, pricePerBytePerSecond: 2.u256, # reward is higher (good), more weight than collateral collateralPerByte: 2.u256, # collateral is higher (bad) - expiry: 1.u256, + expiry: 1.uint64, seen: false, ) @@ -220,21 +220,21 @@ suite "Slot queue": let itemA = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, - duration: 1.u256, + slotSize: 1.uint64, + duration: 1.uint64, pricePerBytePerSecond: 1.u256, collateralPerByte: 2.u256, # collateral is higher (bad) - expiry: 2.u256, # expiry is longer (good) + expiry: 2.uint64, # expiry is longer (good) seen: false, ) let itemB = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, - duration: 1.u256, + slotSize: 1.uint64, + duration: 1.uint64, pricePerBytePerSecond: 1.u256, collateralPerByte: 1.u256, # collateral is lower (good), more weight than expiry - expiry: 1.u256, # expiry is shorter (bad) + expiry: 1.uint64, # expiry is shorter (bad) seen: false, ) @@ -245,21 +245,21 @@ suite "Slot queue": let itemA = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, # slotSize is smaller (good) - duration: 1.u256, + slotSize: 1.uint64, # slotSize is smaller (good) + duration: 1.uint64, pricePerBytePerSecond: 1.u256, collateralPerByte: 1.u256, - expiry: 1.u256, # expiry is shorter (bad) + expiry: 1.uint64, # expiry is shorter (bad) seen: false, ) let itemB = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 2.u256, # slotSize is larger (bad) - duration: 1.u256, + slotSize: 2.uint64, # slotSize is larger (bad) + duration: 1.uint64, pricePerBytePerSecond: 1.u256, collateralPerByte: 1.u256, - expiry: 2.u256, # expiry is longer (good), more weight than slotSize + expiry: 2.uint64, # expiry is longer (good), more weight than slotSize seen: false, ) @@ -270,21 +270,21 @@ suite "Slot queue": let itemA = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 2.u256, # slotSize is larger (bad) - duration: 1.u256, + slotSize: 2.uint64, # slotSize is larger (bad) + duration: 1.uint64, pricePerBytePerSecond: 1.u256, collateralPerByte: 1.u256, - expiry: 1.u256, # expiry is shorter (bad) + expiry: 1.uint64, # expiry is shorter (bad) seen: false, ) let itemB = MockSlotQueueItem( requestId: request.id, slotIndex: 0, - slotSize: 1.u256, # slotSize is smaller (good) - duration: 1.u256, + slotSize: 1.uint64, # slotSize is smaller (good) + duration: 1.uint64, pricePerBytePerSecond: 1.u256, collateralPerByte: 1.u256, - expiry: 1.u256, + expiry: 1.uint64, seen: false, ) @@ -460,14 +460,14 @@ suite "Slot queue": test "sorts items by expiry descending (longer expiry = higher priority)": var request = StorageRequest.example let item0 = SlotQueueItem.init(request, 0) - request.expiry += 1.u256 + request.expiry += 1 let item1 = SlotQueueItem.init(request, 1) check item1 < item0 test "sorts items by slot size descending (bigger dataset = higher profitability = higher priority)": var request = StorageRequest.example let item0 = SlotQueueItem.init(request, 0) - request.ask.slotSize += 1.u256 + request.ask.slotSize += 1 let item1 = SlotQueueItem.init(request, 1) check item1 < item0 diff --git a/tests/codex/testpurchasing.nim b/tests/codex/testpurchasing.nim index bbab4197..5a4e85e9 100644 --- a/tests/codex/testpurchasing.nim +++ b/tests/codex/testpurchasing.nim @@ -28,8 +28,8 @@ asyncchecksuite "Purchasing": request = StorageRequest( ask: StorageAsk( slots: uint8.example.uint64, - slotSize: uint32.example.u256, - duration: uint16.example.u256, + slotSize: uint32.example.uint64, + duration: uint16.example.uint64, pricePerBytePerSecond: uint8.example.u256, ) ) @@ -100,7 +100,6 @@ asyncchecksuite "Purchasing": market.requestExpiry[populatedRequest.id] = expiry let purchase = await purchasing.purchase(populatedRequest) check eventually market.requested.len > 0 - let request = market.requested[0] clock.set(expiry + 1) expect PurchaseTimeout: @@ -130,8 +129,8 @@ checksuite "Purchasing state machine": request = StorageRequest( ask: StorageAsk( slots: uint8.example.uint64, - slotSize: uint32.example.u256, - duration: uint16.example.u256, + slotSize: uint32.example.uint64, + duration: uint16.example.uint64, pricePerBytePerSecond: uint8.example.u256, ) ) @@ -185,7 +184,7 @@ checksuite "Purchasing state machine": test "moves to PurchaseStarted when request state is Started": let request = StorageRequest.example let purchase = Purchase.new(request, market, clock) - market.requestEnds[request.id] = clock.now() + request.ask.duration.truncate(int64) + market.requestEnds[request.id] = clock.now() + request.ask.duration.int64 market.requested = @[request] market.requestState[request.id] = RequestState.Started let next = await PurchaseUnknown().run(purchase) @@ -218,7 +217,7 @@ checksuite "Purchasing state machine": test "moves to PurchaseFailed state once RequestFailed emitted": let request = StorageRequest.example let purchase = Purchase.new(request, market, clock) - market.requestEnds[request.id] = clock.now() + request.ask.duration.truncate(int64) + market.requestEnds[request.id] = clock.now() + request.ask.duration.int64 let future = PurchaseStarted().run(purchase) market.emitRequestFailed(request.id) @@ -229,10 +228,10 @@ checksuite "Purchasing state machine": test "moves to PurchaseFinished state once request finishes": let request = StorageRequest.example let purchase = Purchase.new(request, market, clock) - market.requestEnds[request.id] = clock.now() + request.ask.duration.truncate(int64) + market.requestEnds[request.id] = clock.now() + request.ask.duration.int64 let future = PurchaseStarted().run(purchase) - clock.advance(request.ask.duration.truncate(int64) + 1) + clock.advance(request.ask.duration.int64 + 1) let next = await future check !next of PurchaseFinished diff --git a/tests/codex/testvalidation.nim b/tests/codex/testvalidation.nim index 95d913c3..30d6e3f3 100644 --- a/tests/codex/testvalidation.nim +++ b/tests/codex/testvalidation.nim @@ -16,8 +16,8 @@ logScope: topics = "testValidation" asyncchecksuite "validation": - let period = 10 - let timeout = 5 + let period = 10.uint64 + let timeout = 5.uint64 let maxSlots = MaxSlots(100) let validationGroups = ValidationGroups(8).some let slot = Slot.example @@ -51,8 +51,8 @@ asyncchecksuite "validation": groupIndex = groupIndexForSlotId(slot.id, !validationGroups) clock = MockClock.new() market = MockMarket.new(clock = Clock(clock).some) - market.config.proofs.period = period.u256 - market.config.proofs.timeout = timeout.u256 + market.config.proofs.period = period + market.config.proofs.timeout = timeout validation = newValidation(clock, market, maxSlots, validationGroups, groupIndex) teardown: @@ -60,10 +60,10 @@ asyncchecksuite "validation": await validation.stop() proc advanceToNextPeriod() = - let periodicity = Periodicity(seconds: period.u256) - let period = periodicity.periodOf(clock.now().u256) + let periodicity = Periodicity(seconds: period) + let period = periodicity.periodOf(clock.now().Timestamp) let periodEnd = periodicity.periodEnd(period) - clock.set((periodEnd + 1).truncate(int)) + clock.set(periodEnd.toSecondsSince1970 + 1) test "the list of slots that it's monitoring is empty initially": check validation.slots.len == 0 diff --git a/tests/contracts/testContracts.nim b/tests/contracts/testContracts.nim index 3af63ac1..84708ecd 100644 --- a/tests/contracts/testContracts.nim +++ b/tests/contracts/testContracts.nim @@ -49,28 +49,29 @@ ethersuite "Marketplace contracts": switchAccount(host) discard await token.approve(marketplace.address, request.ask.collateralPerSlot).confirm(1) - discard await marketplace.reserveSlot(request.id, 0.u256).confirm(1) - let receipt = await marketplace.fillSlot(request.id, 0.u256, proof).confirm(1) + discard await marketplace.reserveSlot(request.id, 0.uint64).confirm(1) + let receipt = await marketplace.fillSlot(request.id, 0.uint64, proof).confirm(1) filledAt = await ethProvider.blockTime(BlockTag.init(!receipt.blockNumber)) - slotId = request.slotId(0.u256) + slotId = request.slotId(0.uint64) proc waitUntilProofRequired(slotId: SlotId) {.async.} = - let currentPeriod = periodicity.periodOf(await ethProvider.currentTime()) - await ethProvider.advanceTimeTo(periodicity.periodEnd(currentPeriod)) + let currentPeriod = + periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) + await ethProvider.advanceTimeTo(periodicity.periodEnd(currentPeriod).u256) while not ( (await marketplace.isProofRequired(slotId)) and (await marketplace.getPointer(slotId)) < 250 ) : - await ethProvider.advanceTime(periodicity.seconds) + await ethProvider.advanceTime(periodicity.seconds.u256) proc startContract() {.async.} = for slotIndex in 1 ..< request.ask.slots: discard await token .approve(marketplace.address, request.ask.collateralPerSlot) .confirm(1) - discard await marketplace.reserveSlot(request.id, slotIndex.u256).confirm(1) - discard await marketplace.fillSlot(request.id, slotIndex.u256, proof).confirm(1) + discard await marketplace.reserveSlot(request.id, slotIndex.uint64).confirm(1) + discard await marketplace.fillSlot(request.id, slotIndex.uint64, proof).confirm(1) test "accept marketplace proofs": switchAccount(host) @@ -80,9 +81,10 @@ ethersuite "Marketplace contracts": test "can mark missing proofs": switchAccount(host) await waitUntilProofRequired(slotId) - let missingPeriod = periodicity.periodOf(await ethProvider.currentTime()) + let missingPeriod = + periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) let endOfPeriod = periodicity.periodEnd(missingPeriod) - await ethProvider.advanceTimeTo(endOfPeriod + 1) + await ethProvider.advanceTimeTo(endOfPeriod.u256 + 1) switchAccount(client) discard await marketplace.markProofAsMissing(slotId, missingPeriod).confirm(1) @@ -123,7 +125,8 @@ ethersuite "Marketplace contracts": let expiry = await marketplace.requestExpiry(request.id) await ethProvider.advanceTimeTo((expiry + 1).u256) switchAccount(client) - let missingPeriod = periodicity.periodOf(await ethProvider.currentTime()) - await ethProvider.advanceTime(periodicity.seconds) + let missingPeriod = + periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) + await ethProvider.advanceTime(periodicity.seconds.u256) expect Marketplace_SlotNotAcceptingProofs: discard await marketplace.markProofAsMissing(slotId, missingPeriod).confirm(1) diff --git a/tests/contracts/testMarket.nim b/tests/contracts/testMarket.nim index 6506a2d6..74d6a65e 100644 --- a/tests/contracts/testMarket.nim +++ b/tests/contracts/testMarket.nim @@ -25,7 +25,7 @@ ethersuite "On-Chain Market": var marketplace: Marketplace var token: Erc20Token var request: StorageRequest - var slotIndex: UInt256 + var slotIndex: uint64 var periodicity: Periodicity var host: Signer var otherHost: Signer @@ -58,11 +58,12 @@ ethersuite "On-Chain Market": host = ethProvider.getSigner(accounts[1]) otherHost = ethProvider.getSigner(accounts[3]) - slotIndex = (request.ask.slots div 2).u256 + slotIndex = request.ask.slots div 2 proc advanceToNextPeriod() {.async.} = - let currentPeriod = periodicity.periodOf(await ethProvider.currentTime()) - await ethProvider.advanceTimeTo(periodicity.periodEnd(currentPeriod) + 1) + let currentPeriod = + periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) + await ethProvider.advanceTimeTo((periodicity.periodEnd(currentPeriod) + 1).u256) proc advanceToCancelledRequest(request: StorageRequest) {.async.} = let expiry = (await market.requestExpiresAt(request.id)) + 1 @@ -125,12 +126,13 @@ ethersuite "On-Chain Market": test "supports request subscriptions": var receivedIds: seq[RequestId] var receivedAsks: seq[StorageAsk] - proc onRequest(id: RequestId, ask: StorageAsk, expiry: UInt256) = + proc onRequest(id: RequestId, ask: StorageAsk, expiry: uint64) = receivedIds.add(id) receivedAsks.add(ask) let subscription = await market.subscribeRequests(onRequest) await market.requestStorage(request) + check eventually receivedIds == @[request.id] and receivedAsks == @[request.ask] await subscription.unsubscribe() @@ -172,7 +174,8 @@ ethersuite "On-Chain Market": await market.reserveSlot(request.id, slotIndex) await market.fillSlot(request.id, slotIndex, proof, request.ask.collateralPerSlot) await waitUntilProofRequired(slotId) - let missingPeriod = periodicity.periodOf(await ethProvider.currentTime()) + let missingPeriod = + periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) await advanceToNextPeriod() await market.markProofAsMissing(slotId, missingPeriod) check (await marketplace.missingProofs(slotId)) == 1 @@ -183,15 +186,16 @@ ethersuite "On-Chain Market": await market.reserveSlot(request.id, slotIndex) await market.fillSlot(request.id, slotIndex, proof, request.ask.collateralPerSlot) await waitUntilProofRequired(slotId) - let missingPeriod = periodicity.periodOf(await ethProvider.currentTime()) + let missingPeriod = + periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) await advanceToNextPeriod() check (await market.canProofBeMarkedAsMissing(slotId, missingPeriod)) == true test "supports slot filled subscriptions": await market.requestStorage(request) var receivedIds: seq[RequestId] - var receivedSlotIndices: seq[UInt256] - proc onSlotFilled(id: RequestId, slotIndex: UInt256) = + var receivedSlotIndices: seq[uint64] + proc onSlotFilled(id: RequestId, slotIndex: uint64) = receivedIds.add(id) receivedSlotIndices.add(slotIndex) @@ -206,8 +210,8 @@ ethersuite "On-Chain Market": test "subscribes only to a certain slot": var otherSlot = slotIndex - 1 await market.requestStorage(request) - var receivedSlotIndices: seq[UInt256] - proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) = + var receivedSlotIndices: seq[uint64] + proc onSlotFilled(requestId: RequestId, slotIndex: uint64) = receivedSlotIndices.add(slotIndex) let subscription = @@ -224,8 +228,8 @@ ethersuite "On-Chain Market": await market.reserveSlot(request.id, slotIndex) await market.fillSlot(request.id, slotIndex, proof, request.ask.collateralPerSlot) var receivedRequestIds: seq[RequestId] = @[] - var receivedIdxs: seq[UInt256] = @[] - proc onSlotFreed(requestId: RequestId, idx: UInt256) = + var receivedIdxs: seq[uint64] = @[] + proc onSlotFreed(requestId: RequestId, idx: uint64) = receivedRequestIds.add(requestId) receivedIdxs.add(idx) @@ -243,8 +247,8 @@ ethersuite "On-Chain Market": await market.requestStorage(request) var receivedRequestIds: seq[RequestId] = @[] - var receivedIdxs: seq[UInt256] = @[] - proc onSlotReservationsFull(requestId: RequestId, idx: UInt256) = + var receivedIdxs: seq[uint64] = @[] + proc onSlotReservationsFull(requestId: RequestId, idx: uint64) = receivedRequestIds.add(requestId) receivedIdxs.add(idx) @@ -270,9 +274,9 @@ ethersuite "On-Chain Market": let subscription = await market.subscribeFulfillment(request.id, onFulfillment) for slotIndex in 0 ..< request.ask.slots: - await market.reserveSlot(request.id, slotIndex.u256) + await market.reserveSlot(request.id, slotIndex.uint64) await market.fillSlot( - request.id, slotIndex.u256, proof, request.ask.collateralPerSlot + request.id, slotIndex.uint64, proof, request.ask.collateralPerSlot ) check eventually receivedIds == @[request.id] await subscription.unsubscribe() @@ -291,14 +295,14 @@ ethersuite "On-Chain Market": let subscription = await market.subscribeFulfillment(request.id, onFulfillment) for slotIndex in 0 ..< request.ask.slots: - await market.reserveSlot(request.id, slotIndex.u256) + await market.reserveSlot(request.id, slotIndex.uint64) await market.fillSlot( - request.id, slotIndex.u256, proof, request.ask.collateralPerSlot + request.id, slotIndex.uint64, proof, request.ask.collateralPerSlot ) for slotIndex in 0 ..< otherRequest.ask.slots: - await market.reserveSlot(otherRequest.id, slotIndex.u256) + await market.reserveSlot(otherRequest.id, slotIndex.uint64) await market.fillSlot( - otherRequest.id, slotIndex.u256, proof, otherRequest.ask.collateralPerSlot + otherRequest.id, slotIndex.uint64, proof, otherRequest.ask.collateralPerSlot ) check eventually receivedIds == @[request.id] @@ -330,18 +334,19 @@ ethersuite "On-Chain Market": let subscription = await market.subscribeRequestFailed(request.id, onRequestFailed) for slotIndex in 0 ..< request.ask.slots: - await market.reserveSlot(request.id, slotIndex.u256) + await market.reserveSlot(request.id, slotIndex.uint64) await market.fillSlot( - request.id, slotIndex.u256, proof, request.ask.collateralPerSlot + request.id, slotIndex.uint64, proof, request.ask.collateralPerSlot ) for slotIndex in 0 .. request.ask.maxSlotLoss: - let slotId = request.slotId(slotIndex.u256) + let slotId = request.slotId(slotIndex.uint64) while true: let slotState = await marketplace.slotState(slotId) if slotState == SlotState.Repair or slotState == SlotState.Failed: break await waitUntilProofRequired(slotId) - let missingPeriod = periodicity.periodOf(await ethProvider.currentTime()) + let missingPeriod = + periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) await advanceToNextPeriod() discard await marketplace.markProofAsMissing(slotId, missingPeriod).confirm(1) check eventually receivedIds == @[request.id] @@ -395,9 +400,9 @@ ethersuite "On-Chain Market": test "can retrieve request state": await market.requestStorage(request) for slotIndex in 0 ..< request.ask.slots: - await market.reserveSlot(request.id, slotIndex.u256) + await market.reserveSlot(request.id, slotIndex.uint64) await market.fillSlot( - request.id, slotIndex.u256, proof, request.ask.collateralPerSlot + request.id, slotIndex.uint64, proof, request.ask.collateralPerSlot ) check (await market.requestState(request.id)) == some RequestState.Started @@ -460,13 +465,12 @@ ethersuite "On-Chain Market": test "can query past SlotFilled events": await market.requestStorage(request) - await market.reserveSlot(request.id, 0.u256) - await market.reserveSlot(request.id, 1.u256) - await market.reserveSlot(request.id, 2.u256) - await market.fillSlot(request.id, 0.u256, proof, request.ask.collateralPerSlot) - await market.fillSlot(request.id, 1.u256, proof, request.ask.collateralPerSlot) - await market.fillSlot(request.id, 2.u256, proof, request.ask.collateralPerSlot) - let slotId = request.slotId(slotIndex) + await market.reserveSlot(request.id, 0.uint64) + await market.reserveSlot(request.id, 1.uint64) + await market.reserveSlot(request.id, 2.uint64) + await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot) + await market.fillSlot(request.id, 1.uint64, proof, request.ask.collateralPerSlot) + await market.fillSlot(request.id, 2.uint64, proof, request.ask.collateralPerSlot) # `market.fill` executes an `approve` tx before the `fillSlot` tx, so that's # two PoA blocks per `fillSlot` call (6 blocks for 3 calls). We don't need @@ -475,15 +479,15 @@ ethersuite "On-Chain Market": let events = await market.queryPastSlotFilledEvents(blocksAgo = 5) check events == @[ - SlotFilled(requestId: request.id, slotIndex: 0.u256), - SlotFilled(requestId: request.id, slotIndex: 1.u256), - SlotFilled(requestId: request.id, slotIndex: 2.u256), + SlotFilled(requestId: request.id, slotIndex: 0), + SlotFilled(requestId: request.id, slotIndex: 1), + SlotFilled(requestId: request.id, slotIndex: 2), ] test "can query past SlotFilled events since given timestamp": await market.requestStorage(request) - await market.reserveSlot(request.id, 0.u256) - await market.fillSlot(request.id, 0.u256, proof, request.ask.collateralPerSlot) + await market.reserveSlot(request.id, 0.uint64) + await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot) # The SlotFilled event will be included in the same block as # the fillSlot transaction. If we want to ignore the SlotFilled event @@ -494,10 +498,10 @@ ethersuite "On-Chain Market": let (_, fromTime) = await ethProvider.blockNumberAndTimestamp(BlockTag.latest) - await market.reserveSlot(request.id, 1.u256) - await market.reserveSlot(request.id, 2.u256) - await market.fillSlot(request.id, 1.u256, proof, request.ask.collateralPerSlot) - await market.fillSlot(request.id, 2.u256, proof, request.ask.collateralPerSlot) + await market.reserveSlot(request.id, 1.uint64) + await market.reserveSlot(request.id, 2.uint64) + await market.fillSlot(request.id, 1.uint64, proof, request.ask.collateralPerSlot) + await market.fillSlot(request.id, 2.uint64, proof, request.ask.collateralPerSlot) let events = await market.queryPastSlotFilledEvents( fromTime = fromTime.truncate(SecondsSince1970) @@ -505,19 +509,19 @@ ethersuite "On-Chain Market": check events == @[ - SlotFilled(requestId: request.id, slotIndex: 1.u256), - SlotFilled(requestId: request.id, slotIndex: 2.u256), + SlotFilled(requestId: request.id, slotIndex: 1), + SlotFilled(requestId: request.id, slotIndex: 2), ] test "queryPastSlotFilledEvents returns empty sequence of events when " & "no SlotFilled events have occurred since given timestamp": await market.requestStorage(request) - await market.reserveSlot(request.id, 0.u256) - await market.reserveSlot(request.id, 1.u256) - await market.reserveSlot(request.id, 2.u256) - await market.fillSlot(request.id, 0.u256, proof, request.ask.collateralPerSlot) - await market.fillSlot(request.id, 1.u256, proof, request.ask.collateralPerSlot) - await market.fillSlot(request.id, 2.u256, proof, request.ask.collateralPerSlot) + await market.reserveSlot(request.id, 0.uint64) + await market.reserveSlot(request.id, 1.uint64) + await market.reserveSlot(request.id, 2.uint64) + await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot) + await market.fillSlot(request.id, 1.uint64, proof, request.ask.collateralPerSlot) + await market.fillSlot(request.id, 2.uint64, proof, request.ask.collateralPerSlot) await ethProvider.advanceTime(10.u256) @@ -542,21 +546,21 @@ ethersuite "On-Chain Market": let address = await host.getAddress() switchAccount(host) - await market.reserveSlot(request.id, 0.u256) - await market.fillSlot(request.id, 0.u256, proof, request.ask.collateralPerSlot) + await market.reserveSlot(request.id, 0.uint64) + await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot) let filledAt = (await ethProvider.currentTime()) - 1.u256 for slotIndex in 1 ..< request.ask.slots: - await market.reserveSlot(request.id, slotIndex.u256) + await market.reserveSlot(request.id, slotIndex.uint64) await market.fillSlot( - request.id, slotIndex.u256, proof, request.ask.collateralPerSlot + request.id, slotIndex.uint64, proof, request.ask.collateralPerSlot ) let requestEnd = await market.getRequestEnd(request.id) await ethProvider.advanceTimeTo(requestEnd.u256 + 1) let startBalance = await token.balanceOf(address) - await market.freeSlot(request.slotId(0.u256)) + await market.freeSlot(request.slotId(0.uint64)) let endBalance = await token.balanceOf(address) let expectedPayout = request.expectedPayout(filledAt, requestEnd.u256) @@ -569,14 +573,14 @@ ethersuite "On-Chain Market": await market.requestStorage(request) switchAccount(host) - await market.reserveSlot(request.id, 0.u256) - await market.fillSlot(request.id, 0.u256, proof, request.ask.collateralPerSlot) + await market.reserveSlot(request.id, 0.uint64) + await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot) let filledAt = (await ethProvider.currentTime()) - 1.u256 for slotIndex in 1 ..< request.ask.slots: - await market.reserveSlot(request.id, slotIndex.u256) + await market.reserveSlot(request.id, slotIndex.uint64) await market.fillSlot( - request.id, slotIndex.u256, proof, request.ask.collateralPerSlot + request.id, slotIndex.uint64, proof, request.ask.collateralPerSlot ) let requestEnd = await market.getRequestEnd(request.id) @@ -585,7 +589,7 @@ ethersuite "On-Chain Market": let startBalanceHost = await token.balanceOf(hostAddress) let startBalanceReward = await token.balanceOf(hostRewardRecipient) - await market.freeSlot(request.slotId(0.u256)) + await market.freeSlot(request.slotId(0.uint64)) let endBalanceHost = await token.balanceOf(hostAddress) let endBalanceReward = await token.balanceOf(hostRewardRecipient) diff --git a/tests/examples.nim b/tests/examples.nim index 26013cdc..9b88b4a5 100644 --- a/tests/examples.nim +++ b/tests/examples.nim @@ -49,8 +49,8 @@ proc example*(_: type StorageRequest): StorageRequest = client: Address.example, ask: StorageAsk( slots: 4, - slotSize: (1 * 1024 * 1024 * 1024).u256, # 1 Gigabyte - duration: (10 * 60 * 60).u256, # 10 hours + slotSize: (1 * 1024 * 1024 * 1024).uint64, # 1 Gigabyte + duration: (10 * 60 * 60).uint64, # 10 hours collateralPerByte: 1.u256, proofProbability: 4.u256, # require a proof roughly once every 4 periods pricePerBytePerSecond: 1.u256, @@ -60,19 +60,19 @@ proc example*(_: type StorageRequest): StorageRequest = cid: Cid.init("zb2rhheVmk3bLks5MgzTqyznLu1zqGH5jrfTA1eAZXrjx7Vob").tryGet, merkleRoot: array[32, byte].example, ), - expiry: (60 * 60).u256, # 1 hour , + expiry: (60 * 60).uint64, # 1 hour , nonce: Nonce.example, ) proc example*(_: type Slot): Slot = let request = StorageRequest.example - let slotIndex = rand(request.ask.slots.int).u256 + let slotIndex = rand(request.ask.slots.int).uint64 Slot(request: request, slotIndex: slotIndex) proc example*(_: type SlotQueueItem): SlotQueueItem = let request = StorageRequest.example let slot = Slot.example - SlotQueueItem.init(request, slot.slotIndex.truncate(uint16)) + SlotQueueItem.init(request, slot.slotIndex.uint16) proc example(_: type G1Point): G1Point = G1Point(x: UInt256.example, y: UInt256.example) diff --git a/tests/integration/codexclient.nim b/tests/integration/codexclient.nim index 992b50d0..287f465f 100644 --- a/tests/integration/codexclient.nim +++ b/tests/integration/codexclient.nim @@ -117,11 +117,11 @@ proc space*(client: CodexClient): ?!RestRepoStore = proc requestStorageRaw*( client: CodexClient, cid: Cid, - duration: UInt256, + duration: uint64, pricePerBytePerSecond: UInt256, proofProbability: UInt256, collateralPerByte: UInt256, - expiry: uint = 0, + expiry: uint64 = 0, nodes: uint = 3, tolerance: uint = 1, ): Response = @@ -146,10 +146,10 @@ proc requestStorageRaw*( proc requestStorage*( client: CodexClient, cid: Cid, - duration: UInt256, + duration: uint64, pricePerBytePerSecond: UInt256, proofProbability: UInt256, - expiry: uint, + expiry: uint64, collateralPerByte: UInt256, nodes: uint = 3, tolerance: uint = 1, @@ -187,7 +187,8 @@ proc getSlots*(client: CodexClient): ?!seq[Slot] = proc postAvailability*( client: CodexClient, - totalSize, duration, minPricePerBytePerSecond, totalCollateral: UInt256, + totalSize, duration: uint64, + minPricePerBytePerSecond, totalCollateral: UInt256, ): ?!Availability = ## Post sales availability endpoint ## @@ -207,8 +208,8 @@ proc postAvailability*( proc patchAvailabilityRaw*( client: CodexClient, availabilityId: AvailabilityId, - totalSize, freeSize, duration, minPricePerBytePerSecond, totalCollateral: ?UInt256 = - UInt256.none, + totalSize, freeSize, duration: ?uint64 = uint64.none, + minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none, ): Response = ## Updates availability ## @@ -237,8 +238,8 @@ proc patchAvailabilityRaw*( proc patchAvailability*( client: CodexClient, availabilityId: AvailabilityId, - totalSize, duration, minPricePerBytePerSecond, totalCollateral: ?UInt256 = - UInt256.none, + totalSize, duration: ?uint64 = uint64.none, + minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none, ): void = let response = client.patchAvailabilityRaw( availabilityId, diff --git a/tests/integration/marketplacesuite.nim b/tests/integration/marketplacesuite.nim index 68283ad1..d7502bf4 100644 --- a/tests/integration/marketplacesuite.nim +++ b/tests/integration/marketplacesuite.nim @@ -20,14 +20,14 @@ template marketplacesuite*(name: string, body: untyped) = var token {.inject, used.}: Erc20Token proc getCurrentPeriod(): Future[Period] {.async.} = - return periodicity.periodOf(await ethProvider.currentTime()) + return periodicity.periodOf((await ethProvider.currentTime()).truncate(uint64)) proc advanceToNextPeriod() {.async.} = - let periodicity = Periodicity(seconds: period.u256) - let currentTime = await ethProvider.currentTime() + let periodicity = Periodicity(seconds: period) + let currentTime = (await ethProvider.currentTime()).truncate(uint64) let currentPeriod = periodicity.periodOf(currentTime) let endOfPeriod = periodicity.periodEnd(currentPeriod) - await ethProvider.advanceTimeTo(endOfPeriod + 1) + await ethProvider.advanceTimeTo(endOfPeriod.u256 + 1) template eventuallyP(condition: untyped, finalPeriod: Period): bool = proc eventuallyP(): Future[bool] {.async.} = @@ -56,19 +56,19 @@ template marketplacesuite*(name: string, body: untyped) = return nodes.u256 * slotSize(blocks, nodes, tolerance) proc createAvailabilities( - datasetSize: UInt256, + datasetSize: uint64, duration: uint64, collateralPerByte: UInt256, minPricePerBytePerSecond: UInt256, ) = - let totalCollateral = datasetSize * collateralPerByte + let totalCollateral = datasetSize.u256 * collateralPerByte # post availability to each provider for i in 0 ..< providers().len: let provider = providers()[i].client discard provider.postAvailability( totalSize = datasetSize, - duration = duration.u256, + duration = duration.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, totalCollateral = totalCollateral, ) @@ -76,7 +76,7 @@ template marketplacesuite*(name: string, body: untyped) = proc requestStorage( client: CodexClient, cid: Cid, - proofProbability = 1, + proofProbability = 1.u256, duration: uint64 = 12.periods, pricePerBytePerSecond = 1.u256, collateralPerByte = 1.u256, @@ -86,9 +86,9 @@ template marketplacesuite*(name: string, body: untyped) = ): Future[PurchaseId] {.async.} = let id = client.requestStorage( cid, - expiry = expiry.uint, - duration = duration.u256, - proofProbability = proofProbability.u256, + expiry = expiry, + duration = duration, + proofProbability = proofProbability, collateralPerByte = collateralPerByte, pricePerBytePerSecond = pricePerBytePerSecond, nodes = nodes.uint, @@ -102,7 +102,7 @@ template marketplacesuite*(name: string, body: untyped) = let tokenAddress = await marketplace.token() token = Erc20Token.new(tokenAddress, ethProvider.getSigner()) let config = await marketplace.configuration() - period = config.proofs.period.truncate(uint64) - periodicity = Periodicity(seconds: period.u256) + period = config.proofs.period + periodicity = Periodicity(seconds: period) body diff --git a/tests/integration/testmarketplace.nim b/tests/integration/testmarketplace.nim index 7813485b..727f3fad 100644 --- a/tests/integration/testmarketplace.nim +++ b/tests/integration/testmarketplace.nim @@ -34,28 +34,28 @@ marketplacesuite "Marketplace": await ethProvider.advanceTime(1.u256) test "nodes negotiate contracts on the marketplace", marketplaceConfig: - let size = 0xFFFFFF.u256 + let size = 0xFFFFFF.uint64 let data = await RandomChunker.example(blocks = blocks) # host makes storage available let availability = host.postAvailability( totalSize = size, - duration = 20 * 60.u256, + duration = 20 * 60.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, - totalCollateral = size * minPricePerBytePerSecond, + totalCollateral = size.u256 * minPricePerBytePerSecond, ).get # client requests storage let cid = client.upload(data).get - let id = client.requestStorage( + let id = await client.requestStorage( cid, - duration = 20 * 60.u256, + duration = 20 * 60.uint64, pricePerBytePerSecond = minPricePerBytePerSecond, proofProbability = 3.u256, - expiry = 10 * 60, + expiry = 10 * 60.uint64, collateralPerByte = collateralPerByte, nodes = ecNodes, tolerance = ecTolerance, - ).get + ) check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000) let purchase = client.getPurchase(id).get @@ -71,34 +71,34 @@ marketplacesuite "Marketplace": test "node slots gets paid out and rest of tokens are returned to client", marketplaceConfig: - let size = 0xFFFFFF.u256 + let size = 0xFFFFFF.uint64 let data = await RandomChunker.example(blocks = blocks) let marketplace = Marketplace.new(Marketplace.address, ethProvider.getSigner()) let tokenAddress = await marketplace.token() let token = Erc20Token.new(tokenAddress, ethProvider.getSigner()) - let duration = 20 * 60.u256 + let duration = 20 * 60.uint64 # host makes storage available let startBalanceHost = await token.balanceOf(hostAccount) discard host.postAvailability( totalSize = size, - duration = 20 * 60.u256, + duration = 20 * 60.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, - totalCollateral = size * minPricePerBytePerSecond, + totalCollateral = size.u256 * minPricePerBytePerSecond, ).get # client requests storage let cid = client.upload(data).get - let id = client.requestStorage( + let id = await client.requestStorage( cid, duration = duration, pricePerBytePerSecond = minPricePerBytePerSecond, proofProbability = 3.u256, - expiry = 10 * 60, + expiry = 10 * 60.uint64, collateralPerByte = collateralPerByte, nodes = ecNodes, tolerance = ecTolerance, - ).get + ) check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000) let purchase = client.getPurchase(id).get @@ -109,13 +109,13 @@ marketplacesuite "Marketplace": # Proving mechanism uses blockchain clock to do proving/collect/cleanup round # hence we must use `advanceTime` over `sleepAsync` as Hardhat does mine new blocks # only with new transaction - await ethProvider.advanceTime(duration) + await ethProvider.advanceTime(duration.u256) # Checking that the hosting node received reward for at least the time between let slotSize = slotSize(blocks, ecNodes, ecTolerance) let pricePerSlotPerSecond = minPricePerBytePerSecond * slotSize check eventually (await token.balanceOf(hostAccount)) - startBalanceHost >= - (duration - 5 * 60) * pricePerSlotPerSecond * ecNodes.u256 + (duration - 5 * 60).u256 * pricePerSlotPerSecond * ecNodes.u256 # Checking that client node receives some funds back that were not used for the host nodes check eventually( @@ -157,19 +157,19 @@ marketplacesuite "Marketplace payouts": # provider makes storage available let datasetSize = datasetSize(blocks, ecNodes, ecTolerance) - let totalAvailabilitySize = datasetSize div 2 + let totalAvailabilitySize = (datasetSize div 2).truncate(uint64) discard providerApi.postAvailability( # make availability size small enough that we can't fill all the slots, # thus causing a cancellation totalSize = totalAvailabilitySize, - duration = duration.u256, + duration = duration.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, - totalCollateral = collateralPerByte * totalAvailabilitySize, + totalCollateral = collateralPerByte * totalAvailabilitySize.u256, ) let cid = clientApi.upload(data).get - var slotIdxFilled = none UInt256 + var slotIdxFilled = none uint64 proc onSlotFilled(eventResult: ?!SlotFilled) = assert not eventResult.isErr slotIdxFilled = some (!eventResult).slotIndex diff --git a/tests/integration/testproofs.nim b/tests/integration/testproofs.nim index a547890b..ab29ca4e 100644 --- a/tests/integration/testproofs.nim +++ b/tests/integration/testproofs.nim @@ -43,7 +43,10 @@ marketplacesuite "Hosts submit regular proofs": let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) createAvailabilities( - datasetSize, duration, collateralPerByte, minPricePerBytePerSecond + datasetSize.truncate(uint64), + duration, + collateralPerByte, + minPricePerBytePerSecond, ) let cid = client0.upload(data).get @@ -117,7 +120,10 @@ marketplacesuite "Simulate invalid proofs": let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) createAvailabilities( - datasetSize, duration, collateralPerByte, minPricePerBytePerSecond + datasetSize.truncate(uint64), + duration, + collateralPerByte, + minPricePerBytePerSecond, ) let cid = client0.upload(data).get @@ -128,7 +134,7 @@ marketplacesuite "Simulate invalid proofs": duration = duration, nodes = ecNodes, tolerance = ecTolerance, - proofProbability = 1, + proofProbability = 1.u256, ) let requestId = client0.requestId(purchaseId).get @@ -177,7 +183,10 @@ marketplacesuite "Simulate invalid proofs": let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) createAvailabilities( - datasetSize, duration, collateralPerByte, minPricePerBytePerSecond + datasetSize.truncate(uint64), + duration, + collateralPerByte, + minPricePerBytePerSecond, ) let cid = client0.upload(data).get @@ -188,7 +197,7 @@ marketplacesuite "Simulate invalid proofs": duration = duration, nodes = ecNodes, tolerance = ecTolerance, - proofProbability = 1, + proofProbability = 1.u256, ) let requestId = client0.requestId(purchaseId).get diff --git a/tests/integration/testpurchasing.nim b/tests/integration/testpurchasing.nim index ebae78f6..259efcff 100644 --- a/tests/integration/testpurchasing.nim +++ b/tests/integration/testpurchasing.nim @@ -11,18 +11,18 @@ twonodessuite "Purchasing": let cid = client1.upload(data).get let id1 = client1.requestStorage( cid, - duration = 100.u256, + duration = 100.uint64, pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, - expiry = 10, + expiry = 10.uint64, collateralPerByte = 1.u256, ).get let id2 = client1.requestStorage( cid, - duration = 400.u256, + duration = 400.uint64, pricePerBytePerSecond = 2.u256, proofProbability = 6.u256, - expiry = 10, + expiry = 10.uint64, collateralPerByte = 2.u256, ).get check id1 != id2 @@ -37,10 +37,10 @@ twonodessuite "Purchasing": let cid = client1.upload(byteutils.toHex(data)).get let id = client1.requestStorage( cid, - duration = 100.u256, + duration = 100.uint64, pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, - expiry = 30, + expiry = 30.uint64, collateralPerByte = 1.u256, nodes = 3, tolerance = 1, @@ -49,10 +49,10 @@ twonodessuite "Purchasing": let request = client1.getPurchase(id).get.request.get check request.content.cid.data.buffer.len > 0 - check request.ask.duration == 100.u256 + check request.ask.duration == 100.uint64 check request.ask.pricePerBytePerSecond == 1.u256 check request.ask.proofProbability == 3.u256 - check request.expiry == 30 + check request.expiry == 30.uint64 check request.ask.collateralPerByte == 1.u256 check request.ask.slots == 3'u64 check request.ask.maxSlotLoss == 1'u64 @@ -78,10 +78,10 @@ twonodessuite "Purchasing": let cid = client1.upload(data).get let id = client1.requestStorage( cid, - duration = 10 * 60.u256, + duration = 10 * 60.uint64, pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, - expiry = 5 * 60, + expiry = 5 * 60.uint64, collateralPerByte = 1.u256, nodes = 3.uint, tolerance = 1.uint, @@ -93,10 +93,10 @@ twonodessuite "Purchasing": check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000) let request = client1.getPurchase(id).get.request.get - check request.ask.duration == (10 * 60).u256 + check request.ask.duration == (10 * 60).uint64 check request.ask.pricePerBytePerSecond == 1.u256 check request.ask.proofProbability == 3.u256 - check request.expiry == (5 * 60).u256 + check request.expiry == (5 * 60).uint64 check request.ask.collateralPerByte == 1.u256 check request.ask.slots == 3'u64 check request.ask.maxSlotLoss == 1'u64 @@ -107,7 +107,7 @@ twonodessuite "Purchasing": let responseMissing = client1.requestStorageRaw( cid, - duration = 1.u256, + duration = 1.uint64, pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, collateralPerByte = 1.u256, @@ -117,11 +117,11 @@ twonodessuite "Purchasing": let responseBefore = client1.requestStorageRaw( cid, - duration = 10.u256, + duration = 10.uint64, pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, collateralPerByte = 1.u256, - expiry = 10, + expiry = 10.uint64, ) check responseBefore.status == "400 Bad Request" check "Expiry needs value bigger then zero and smaller then the request's duration" in diff --git a/tests/integration/testrestapi.nim b/tests/integration/testrestapi.nim index 3918791e..a748c98e 100644 --- a/tests/integration/testrestapi.nim +++ b/tests/integration/testrestapi.nim @@ -25,12 +25,12 @@ twonodessuite "REST API": test "node shows used and available space", twoNodesConfig: discard client1.upload("some file contents").get - let totalSize = 12.u256 + let totalSize = 12.uint64 let minPricePerBytePerSecond = 1.u256 - let totalCollateral = totalSize * minPricePerBytePerSecond + let totalCollateral = totalSize.u256 * minPricePerBytePerSecond discard client1.postAvailability( totalSize = totalSize, - duration = 2.u256, + duration = 2.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, totalCollateral = totalCollateral, ).get @@ -56,11 +56,11 @@ twonodessuite "REST API": let cid = client1.upload("some file contents").get let response = client1.requestStorageRaw( cid, - duration = 10.u256, + duration = 10.uint64, pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, collateralPerByte = 1.u256, - expiry = 9, + expiry = 9.uint64, ) check: @@ -74,11 +74,11 @@ twonodessuite "REST API": let cid = client1.upload(data).get let response = client1.requestStorageRaw( cid, - duration = 10.u256, + duration = 10.uint64, pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, collateralPerByte = 1.u256, - expiry = 9, + expiry = 9.uint64, ) check: @@ -87,10 +87,10 @@ twonodessuite "REST API": test "request storage fails if tolerance is zero", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get - let duration = 100.u256 + let duration = 100.uint64 let pricePerBytePerSecond = 1.u256 let proofProbability = 3.u256 - let expiry = 30.uint + let expiry = 30.uint64 let collateralPerByte = 1.u256 let nodes = 3 let tolerance = 0 @@ -106,7 +106,7 @@ twonodessuite "REST API": test "request storage fails if duration exceeds limit", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get - let duration = (31 * 24 * 60 * 60).u256 + let duration = (31 * 24 * 60 * 60).uint64 # 31 days TODO: this should not be hardcoded, but waits for https://github.com/codex-storage/nim-codex/issues/1056 let proofProbability = 3.u256 let expiry = 30.uint @@ -126,10 +126,10 @@ twonodessuite "REST API": test "request storage fails if nodes and tolerance aren't correct", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get - let duration = 100.u256 + let duration = 100.uint64 let pricePerBytePerSecond = 1.u256 let proofProbability = 3.u256 - let expiry = 30.uint + let expiry = 30.uint64 let collateralPerByte = 1.u256 let ecParams = @[(1, 1), (2, 1), (3, 2), (3, 3)] @@ -149,10 +149,10 @@ twonodessuite "REST API": twoNodesConfig: let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get - let duration = 100.u256 + let duration = 100.uint64 let pricePerBytePerSecond = 1.u256 let proofProbability = 3.u256 - let expiry = 30.uint + let expiry = 30.uint64 let collateralPerByte = 1.u256 let ecParams = @[(0, 1), (1, 2), (2, 3)] @@ -176,10 +176,10 @@ twonodessuite "REST API": fmt"({minBlocks=}, {nodes=}, {tolerance=})", twoNodesConfig: let data = await RandomChunker.example(blocks = minBlocks) let cid = client1.upload(data).get - let duration = 100.u256 + let duration = 100.uint64 let pricePerBytePerSecond = 1.u256 let proofProbability = 3.u256 - let expiry = 30.uint + let expiry = 30.uint64 let collateralPerByte = 1.u256 var responseBefore = client1.requestStorageRaw( diff --git a/tests/integration/testsales.nim b/tests/integration/testsales.nim index a77e5649..6c5c30d5 100644 --- a/tests/integration/testsales.nim +++ b/tests/integration/testsales.nim @@ -31,14 +31,14 @@ multinodesuite "Sales": test "node handles new storage availability", salesConfig: let availability1 = host.postAvailability( - totalSize = 1.u256, - duration = 2.u256, + totalSize = 1.uint64, + duration = 2.uint64, minPricePerBytePerSecond = 3.u256, totalCollateral = 4.u256, ).get let availability2 = host.postAvailability( - totalSize = 4.u256, - duration = 5.u256, + totalSize = 4.uint64, + duration = 5.uint64, minPricePerBytePerSecond = 6.u256, totalCollateral = 7.u256, ).get @@ -46,8 +46,8 @@ multinodesuite "Sales": test "node lists storage that is for sale", salesConfig: let availability = host.postAvailability( - totalSize = 1.u256, - duration = 2.u256, + totalSize = 1.uint64, + duration = 2.uint64, minPricePerBytePerSecond = 3.u256, totalCollateral = 4.u256, ).get @@ -56,7 +56,7 @@ multinodesuite "Sales": test "updating non-existing availability", salesConfig: let nonExistingResponse = host.patchAvailabilityRaw( AvailabilityId.example, - duration = 100.u256.some, + duration = 100.uint64.some, minPricePerBytePerSecond = 2.u256.some, totalCollateral = 200.u256.some, ) @@ -64,60 +64,60 @@ multinodesuite "Sales": test "updating availability", salesConfig: let availability = host.postAvailability( - totalSize = 140000.u256, - duration = 200.u256, + totalSize = 140000.uint64, + duration = 200.uint64, minPricePerBytePerSecond = 3.u256, totalCollateral = 300.u256, ).get host.patchAvailability( availability.id, - duration = 100.u256.some, + duration = 100.uint64.some, minPricePerBytePerSecond = 2.u256.some, totalCollateral = 200.u256.some, ) let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get - check updatedAvailability.duration == 100 + check updatedAvailability.duration == 100.uint64 check updatedAvailability.minPricePerBytePerSecond == 2 check updatedAvailability.totalCollateral == 200 - check updatedAvailability.totalSize == 140000 - check updatedAvailability.freeSize == 140000 + check updatedAvailability.totalSize == 140000.uint64 + check updatedAvailability.freeSize == 140000.uint64 test "updating availability - freeSize is not allowed to be changed", salesConfig: let availability = host.postAvailability( - totalSize = 140000.u256, - duration = 200.u256, + totalSize = 140000.uint64, + duration = 200.uint64, minPricePerBytePerSecond = 3.u256, totalCollateral = 300.u256, ).get let freeSizeResponse = - host.patchAvailabilityRaw(availability.id, freeSize = 110000.u256.some) + host.patchAvailabilityRaw(availability.id, freeSize = 110000.uint64.some) check freeSizeResponse.status == "400 Bad Request" check "not allowed" in freeSizeResponse.body test "updating availability - updating totalSize", salesConfig: let availability = host.postAvailability( - totalSize = 140000.u256, - duration = 200.u256, + totalSize = 140000.uint64, + duration = 200.uint64, minPricePerBytePerSecond = 3.u256, totalCollateral = 300.u256, ).get - host.patchAvailability(availability.id, totalSize = 100000.u256.some) + host.patchAvailability(availability.id, totalSize = 100000.uint64.some) let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get check updatedAvailability.totalSize == 100000 check updatedAvailability.freeSize == 100000 test "updating availability - updating totalSize does not allow bellow utilized", salesConfig: - let originalSize = 0xFFFFFF.u256 + let originalSize = 0xFFFFFF.uint64 let data = await RandomChunker.example(blocks = 8) let minPricePerBytePerSecond = 3.u256 let collateralPerByte = 1.u256 - let totalCollateral = originalSize * collateralPerByte + let totalCollateral = originalSize.u256 * collateralPerByte let availability = host.postAvailability( totalSize = originalSize, - duration = 20 * 60.u256, + duration = 20 * 60.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, totalCollateral = totalCollateral, ).get @@ -126,10 +126,10 @@ multinodesuite "Sales": let cid = client.upload(data).get let id = client.requestStorage( cid, - duration = 20 * 60.u256, + duration = 20 * 60.uint64, pricePerBytePerSecond = minPricePerBytePerSecond, proofProbability = 3.u256, - expiry = 10 * 60, + expiry = (10 * 60).uint64, collateralPerByte = collateralPerByte, nodes = 3, tolerance = 1, @@ -140,9 +140,8 @@ multinodesuite "Sales": check updatedAvailability.totalSize != updatedAvailability.freeSize let utilizedSize = updatedAvailability.totalSize - updatedAvailability.freeSize - let totalSizeResponse = host.patchAvailabilityRaw( - availability.id, totalSize = (utilizedSize - 1.u256).some - ) + let totalSizeResponse = + host.patchAvailabilityRaw(availability.id, totalSize = (utilizedSize - 1).some) check totalSizeResponse.status == "400 Bad Request" check "totalSize must be larger then current totalSize" in totalSizeResponse.body diff --git a/tests/integration/testvalidator.nim b/tests/integration/testvalidator.nim index 8b7fbc5b..7f4bc851 100644 --- a/tests/integration/testvalidator.nim +++ b/tests/integration/testvalidator.nim @@ -37,7 +37,7 @@ marketplacesuite "Validation": const blocks = 8 const ecNodes = 3 const ecTolerance = 1 - const proofProbability = 1 + const proofProbability = 1.u256 const collateralPerByte = 1.u256 const minPricePerBytePerSecond = 1.u256 @@ -100,7 +100,10 @@ marketplacesuite "Validation": let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) createAvailabilities( - datasetSize, duration, collateralPerByte, minPricePerBytePerSecond + datasetSize.truncate(uint64), + duration, + collateralPerByte, + minPricePerBytePerSecond, ) let cid = client0.upload(data).get @@ -167,7 +170,10 @@ marketplacesuite "Validation": let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) createAvailabilities( - datasetSize, duration, collateralPerByte, minPricePerBytePerSecond + datasetSize.truncate(uint64), + duration, + collateralPerByte, + minPricePerBytePerSecond, ) let cid = client0.upload(data).get diff --git a/vendor/codex-contracts-eth b/vendor/codex-contracts-eth index ff82c26b..32a6c13d 160000 --- a/vendor/codex-contracts-eth +++ b/vendor/codex-contracts-eth @@ -1 +1 @@ -Subproject commit ff82c26b3669b52a09280c634141dace7f04659a +Subproject commit 32a6c13def1c1505765e9e0dc465117fba98c161 From f6aee4ff6e6f3d29f2988854e4fa613a7943a71f Mon Sep 17 00:00:00 2001 From: Eric <5089238+emizzle@users.noreply.github.com> Date: Fri, 21 Feb 2025 22:02:36 +1100 Subject: [PATCH 17/40] bump contracts to master (#1122) --- vendor/codex-contracts-eth | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendor/codex-contracts-eth b/vendor/codex-contracts-eth index 32a6c13d..c00152e6 160000 --- a/vendor/codex-contracts-eth +++ b/vendor/codex-contracts-eth @@ -1 +1 @@ -Subproject commit 32a6c13def1c1505765e9e0dc465117fba98c161 +Subproject commit c00152e6213a3ad4e6760a670213bfae22b0aabf From a609baea261b65673a1c7cf899710bbf6bd32924 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 24 Feb 2025 15:01:23 -0600 Subject: [PATCH 18/40] Add basic retry functionality (#1119) * adding basic retry functionality * avoid duplicate requests and batch them * fix cancelling blocks * properly resolve blocks * minor cleanup - use `self` * avoid useless asyncSpawn * track retries * limit max inflight and set libp2p maxIncomingStreams * cleanup * add basic yield in readLoop * use tuple instead of object * cleanup imports and logs * increase defaults * wip * fix prefetch batching * cleanup * decrease timeouts to speedup tests * remove outdated test * add retry tests * should track retries * remove useless test * use correct block address (index was off by 1) * remove duplicate noop proc * add BlockHandle type * Use BlockHandle type * add fetchLocal to control batching from local store * add format target * revert deps * adjust quotaMaxBytes * cleanup imports and logs * revert deps * cleanup blocks on cancelled * terminate erasure and prefetch jobs on stream end * split storing and retrieving data into separate tests * track `b.discoveryLoop` future * misc * remove useless check --- .gitignore | 2 + Makefile | 5 + codex/blockexchange/engine/discovery.nim | 1 - codex/blockexchange/engine/engine.nim | 378 ++++++++++-------- codex/blockexchange/engine/pendingblocks.nim | 146 ++++--- codex/blockexchange/network/network.nim | 63 ++- codex/blockexchange/network/networkpeer.nim | 29 +- codex/blockexchange/peers/peerctxstore.nim | 10 +- codex/codex.nim | 2 +- codex/conf.nim | 12 +- codex/logutils.nim | 2 +- codex/node.nim | 89 +++-- codex/rest/api.nim | 6 +- codex/rng.nim | 9 + codex/slots/builder/builder.nim | 2 +- codex/stores/maintenance.nim | 4 +- codex/stores/networkstore.nim | 8 + codex/stores/repostore/types.nim | 4 +- codex/utils/asyncspawn.nim | 10 - codex/utils/natutils.nim | 3 +- .../discovery/testdiscoveryengine.nim | 4 +- .../blockexchange/engine/testblockexc.nim | 46 +-- .../codex/blockexchange/engine/testengine.nim | 178 ++++++++- .../codex/blockexchange/testpendingblocks.nim | 35 +- tests/codex/node/helpers.nim | 2 +- tests/codex/node/testnode.nim | 49 ++- tests/integration/testrestapi.nim | 2 +- vendor/nim-serde | 2 +- 28 files changed, 697 insertions(+), 406 deletions(-) delete mode 100644 codex/utils/asyncspawn.nim diff --git a/.gitignore b/.gitignore index 0e1f27db..f6292dda 100644 --- a/.gitignore +++ b/.gitignore @@ -45,3 +45,5 @@ docker/prometheus-data .DS_Store nim.cfg tests/integration/logs + +data/ diff --git a/Makefile b/Makefile index 3dfe8e7e..29d6c11d 100644 --- a/Makefile +++ b/Makefile @@ -229,6 +229,11 @@ nph/%: build-nph echo -e $(FORMAT_MSG) "nph/$*" && \ $(NPH) $* +format: + $(NPH) *.nim + $(NPH) codex/ + $(NPH) tests/ + clean-nph: rm -f $(NPH) diff --git a/codex/blockexchange/engine/discovery.nim b/codex/blockexchange/engine/discovery.nim index ba773ac5..c664f212 100644 --- a/codex/blockexchange/engine/discovery.nim +++ b/codex/blockexchange/engine/discovery.nim @@ -144,7 +144,6 @@ proc start*(b: DiscoveryEngine) {.async.} = b.discoveryLoop = b.discoveryQueueLoop() b.trackedFutures.track(b.discoveryLoop) - asyncSpawn b.discoveryLoop proc stop*(b: DiscoveryEngine) {.async.} = ## Stop the discovery engine diff --git a/codex/blockexchange/engine/engine.nim b/codex/blockexchange/engine/engine.nim index d30f88d9..dafdd520 100644 --- a/codex/blockexchange/engine/engine.nim +++ b/codex/blockexchange/engine/engine.nim @@ -19,6 +19,7 @@ import pkg/metrics import pkg/stint import pkg/questionable +import ../../rng import ../../stores/blockstore import ../../blocktype import ../../utils @@ -67,12 +68,6 @@ const DefaultMaxPeersPerRequest* = 10 DefaultTaskQueueSize = 100 DefaultConcurrentTasks = 10 - # DefaultMaxRetries = 3 - # DefaultConcurrentDiscRequests = 10 - # DefaultConcurrentAdvertRequests = 10 - # DefaultDiscoveryTimeout = 1.minutes - # DefaultMaxQueriedBlocksCache = 1000 - # DefaultMinPeersPerBlock = 3 type TaskHandler* = proc(task: BlockExcPeerCtx): Future[void] {.gcsafe.} @@ -88,10 +83,8 @@ type trackedFutures: TrackedFutures # Tracks futures of blockexc tasks blockexcRunning: bool # Indicates if the blockexc task is running pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved - peersPerRequest: int # Max number of peers to request from wallet*: WalletRef # Nitro wallet for micropayments pricing*: ?Pricing # Optional bandwidth pricing - blockFetchTimeout*: Duration # Timeout for fetching blocks over the network discovery*: DiscoveryEngine advertiser*: Advertiser @@ -100,124 +93,147 @@ type price*: UInt256 # attach task scheduler to engine -proc scheduleTask(b: BlockExcEngine, task: BlockExcPeerCtx): bool {.gcsafe.} = - b.taskQueue.pushOrUpdateNoWait(task).isOk() +proc scheduleTask(self: BlockExcEngine, task: BlockExcPeerCtx): bool {.gcsafe.} = + self.taskQueue.pushOrUpdateNoWait(task).isOk() -proc blockexcTaskRunner(b: BlockExcEngine) {.async: (raises: []).} +proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} -proc start*(b: BlockExcEngine) {.async.} = +proc start*(self: BlockExcEngine) {.async.} = ## Start the blockexc task ## - await b.discovery.start() - await b.advertiser.start() + await self.discovery.start() + await self.advertiser.start() - trace "Blockexc starting with concurrent tasks", tasks = b.concurrentTasks - if b.blockexcRunning: + trace "Blockexc starting with concurrent tasks", tasks = self.concurrentTasks + if self.blockexcRunning: warn "Starting blockexc twice" return - b.blockexcRunning = true - for i in 0 ..< b.concurrentTasks: - let fut = b.blockexcTaskRunner() - b.trackedFutures.track(fut) - asyncSpawn fut + self.blockexcRunning = true + for i in 0 ..< self.concurrentTasks: + let fut = self.blockexcTaskRunner() + self.trackedFutures.track(fut) -proc stop*(b: BlockExcEngine) {.async.} = +proc stop*(self: BlockExcEngine) {.async.} = ## Stop the blockexc blockexc ## - await b.discovery.stop() - await b.advertiser.stop() + await self.trackedFutures.cancelTracked() + await self.network.stop() + await self.discovery.stop() + await self.advertiser.stop() trace "NetworkStore stop" - if not b.blockexcRunning: + if not self.blockexcRunning: warn "Stopping blockexc without starting it" return - b.blockexcRunning = false - await b.trackedFutures.cancelTracked() + self.blockexcRunning = false trace "NetworkStore stopped" proc sendWantHave( - b: BlockExcEngine, addresses: seq[BlockAddress], peers: seq[BlockExcPeerCtx] + self: BlockExcEngine, addresses: seq[BlockAddress], peers: seq[BlockExcPeerCtx] ): Future[void] {.async.} = for p in peers: let toAsk = addresses.filterIt(it notin p.peerHave) trace "Sending wantHave request", toAsk, peer = p.id - await b.network.request.sendWantList(p.id, toAsk, wantType = WantType.WantHave) + await self.network.request.sendWantList(p.id, toAsk, wantType = WantType.WantHave) codex_block_exchange_want_have_lists_sent.inc() proc sendWantBlock( - b: BlockExcEngine, addresses: seq[BlockAddress], blockPeer: BlockExcPeerCtx + self: BlockExcEngine, addresses: seq[BlockAddress], blockPeer: BlockExcPeerCtx ): Future[void] {.async.} = trace "Sending wantBlock request to", addresses, peer = blockPeer.id - await b.network.request.sendWantList( + await self.network.request.sendWantList( blockPeer.id, addresses, wantType = WantType.WantBlock ) # we want this remote to send us a block codex_block_exchange_want_block_lists_sent.inc() -proc monitorBlockHandle( - b: BlockExcEngine, handle: Future[Block], address: BlockAddress, peerId: PeerId -) {.async.} = +proc randomPeer(peers: seq[BlockExcPeerCtx]): BlockExcPeerCtx = + Rng.instance.sample(peers) + +proc downloadInternal( + self: BlockExcEngine, address: BlockAddress +) {.async: (raises: []).} = + logScope: + address = address + + let handle = self.pendingBlocks.getWantHandle(address) + trace "Downloading block" try: - discard await handle + while address in self.pendingBlocks: + logScope: + retries = self.pendingBlocks.retries(address) + interval = self.pendingBlocks.retryInterval + + if self.pendingBlocks.retriesExhausted(address): + trace "Error retries exhausted" + handle.fail(newException(RetriesExhaustedError, "Error retries exhausted")) + break + + trace "Running retry handle" + let peers = self.peers.getPeersForBlock(address) + logScope: + peersWith = peers.with.len + peersWithout = peers.without.len + + trace "Peers for block" + if peers.with.len > 0: + self.pendingBlocks.setInFlight(address, true) + await self.sendWantBlock(@[address], peers.with.randomPeer) + else: + self.pendingBlocks.setInFlight(address, false) + if peers.without.len > 0: + await self.sendWantHave(@[address], peers.without) + self.discovery.queueFindBlocksReq(@[address.cidOrTreeCid]) + + await (handle or sleepAsync(self.pendingBlocks.retryInterval)) + self.pendingBlocks.decRetries(address) + + if handle.finished: + trace "Handle for block finished", failed = handle.failed + break except CancelledError as exc: - trace "Block handle cancelled", address, peerId + trace "Block download cancelled" + if not handle.finished: + await handle.cancelAndWait() except CatchableError as exc: - warn "Error block handle, disconnecting peer", address, exc = exc.msg, peerId - - # TODO: really, this is just a quick and dirty way of - # preventing hitting the same "bad" peer every time, however, - # we might as well discover this on or next iteration, so - # it doesn't mean that we're never talking to this peer again. - # TODO: we need a lot more work around peer selection and - # prioritization - - # drop unresponsive peer - await b.network.switch.disconnect(peerId) - b.discovery.queueFindBlocksReq(@[address.cidOrTreeCid]) - -proc pickPseudoRandom( - address: BlockAddress, peers: seq[BlockExcPeerCtx] -): BlockExcPeerCtx = - return peers[hash(address) mod peers.len] + warn "Error downloadloading block", exc = exc.msg + if not handle.finished: + handle.fail(exc) + finally: + self.pendingBlocks.setInFlight(address, false) proc requestBlock*( - b: BlockExcEngine, address: BlockAddress -): Future[?!Block] {.async.} = - let blockFuture = b.pendingBlocks.getWantHandle(address, b.blockFetchTimeout) + self: BlockExcEngine, address: BlockAddress +): Future[?!Block] {.async: (raises: [CancelledError]).} = + if address notin self.pendingBlocks: + self.trackedFutures.track(self.downloadInternal(address)) - if not b.pendingBlocks.isInFlight(address): - let peers = b.peers.getPeersForBlock(address) - - if peers.with.len == 0: - b.discovery.queueFindBlocksReq(@[address.cidOrTreeCid]) - else: - let selected = pickPseudoRandom(address, peers.with) - asyncSpawn b.monitorBlockHandle(blockFuture, address, selected.id) - b.pendingBlocks.setInFlight(address) - await b.sendWantBlock(@[address], selected) - - await b.sendWantHave(@[address], peers.without) - - # Don't let timeouts bubble up. We can't be too broad here or we break - # cancellations. try: - success await blockFuture - except AsyncTimeoutError as err: + let handle = self.pendingBlocks.getWantHandle(address) + success await handle + except CancelledError as err: + warn "Block request cancelled", address + raise err + except CatchableError as err: + error "Block request failed", address, err = err.msg failure err -proc requestBlock*(b: BlockExcEngine, cid: Cid): Future[?!Block] = - b.requestBlock(BlockAddress.init(cid)) +proc requestBlock*( + self: BlockExcEngine, cid: Cid +): Future[?!Block] {.async: (raw: true, raises: [CancelledError]).} = + self.requestBlock(BlockAddress.init(cid)) proc blockPresenceHandler*( - b: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence] + self: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence] ) {.async.} = + trace "Received block presence from peer", peer, blocks = blocks.mapIt($it) let - peerCtx = b.peers.get(peer) - wantList = toSeq(b.pendingBlocks.wantList) + peerCtx = self.peers.get(peer) + ourWantList = toSeq(self.pendingBlocks.wantList) if peerCtx.isNil: return @@ -228,82 +244,99 @@ proc blockPresenceHandler*( let peerHave = peerCtx.peerHave - dontWantCids = peerHave.filterIt(it notin wantList) + dontWantCids = peerHave.filterIt(it notin ourWantList) if dontWantCids.len > 0: peerCtx.cleanPresence(dontWantCids) - let wantCids = wantList.filterIt(it in peerHave) + let ourWantCids = ourWantList.filter do(address: BlockAddress) -> bool: + if address in peerHave and not self.pendingBlocks.retriesExhausted(address) and + not self.pendingBlocks.isInFlight(address): + self.pendingBlocks.setInFlight(address, true) + self.pendingBlocks.decRetries(address) + true + else: + false - if wantCids.len > 0: - trace "Peer has blocks in our wantList", peer, wants = wantCids - await b.sendWantBlock(wantCids, peerCtx) + if ourWantCids.len > 0: + trace "Peer has blocks in our wantList", peer, wants = ourWantCids + await self.sendWantBlock(ourWantCids, peerCtx) - # if none of the connected peers report our wants in their have list, - # fire up discovery - b.discovery.queueFindBlocksReq( - toSeq(b.pendingBlocks.wantListCids).filter do(cid: Cid) -> bool: - not b.peers.anyIt(cid in it.peerHaveCids) - ) - -proc scheduleTasks(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} = +proc scheduleTasks(self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} = let cids = blocksDelivery.mapIt(it.blk.cid) # schedule any new peers to provide blocks to - for p in b.peers: + for p in self.peers: for c in cids: # for each cid # schedule a peer if it wants at least one cid # and we have it in our local store if c in p.peerWantsCids: - if await (c in b.localStore): - if b.scheduleTask(p): + if await (c in self.localStore): + if self.scheduleTask(p): trace "Task scheduled for peer", peer = p.id else: warn "Unable to schedule task for peer", peer = p.id break # do next peer -proc cancelBlocks(b: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} = +proc cancelBlocks(self: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} = ## Tells neighboring peers that we're no longer interested in a block. - trace "Sending block request cancellations to peers", - addrs, peers = b.peers.mapIt($it.id) + ## - let failed = ( - await allFinished( - b.peers.mapIt( - b.network.request.sendWantCancellations(peer = it.id, addresses = addrs) + if self.peers.len == 0: + return + + trace "Sending block request cancellations to peers", + addrs, peers = self.peers.peerIds + + proc mapPeers(peerCtx: BlockExcPeerCtx): Future[BlockExcPeerCtx] {.async.} = + let blocks = addrs.filter do(a: BlockAddress) -> bool: + a in peerCtx.blocks + + if blocks.len > 0: + trace "Sending block request cancellations to peer", peer = peerCtx.id, blocks + await self.network.request.sendWantCancellations( + peer = peerCtx.id, addresses = blocks ) - ) - ).filterIt(it.failed) + peerCtx.cleanPresence(addrs) + peerCtx + + let failed = (await allFinished(map(toSeq(self.peers.peers.values), mapPeers))).filterIt( + it.failed + ) if failed.len > 0: warn "Failed to send block request cancellations to peers", peers = failed.len + else: + trace "Block request cancellations sent to peers", peers = self.peers.len -proc resolveBlocks*(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} = - b.pendingBlocks.resolve(blocksDelivery) - await b.scheduleTasks(blocksDelivery) - await b.cancelBlocks(blocksDelivery.mapIt(it.address)) +proc resolveBlocks*( + self: BlockExcEngine, blocksDelivery: seq[BlockDelivery] +) {.async.} = + self.pendingBlocks.resolve(blocksDelivery) + await self.scheduleTasks(blocksDelivery) + await self.cancelBlocks(blocksDelivery.mapIt(it.address)) -proc resolveBlocks*(b: BlockExcEngine, blocks: seq[Block]) {.async.} = - await b.resolveBlocks( +proc resolveBlocks*(self: BlockExcEngine, blocks: seq[Block]) {.async.} = + await self.resolveBlocks( blocks.mapIt( BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid)) ) ) proc payForBlocks( - engine: BlockExcEngine, peer: BlockExcPeerCtx, blocksDelivery: seq[BlockDelivery] + self: BlockExcEngine, peer: BlockExcPeerCtx, blocksDelivery: seq[BlockDelivery] ) {.async.} = let - sendPayment = engine.network.request.sendPayment + sendPayment = self.network.request.sendPayment price = peer.price(blocksDelivery.mapIt(it.address)) - if payment =? engine.wallet.pay(peer, price): + if payment =? self.wallet.pay(peer, price): trace "Sending payment for blocks", price, len = blocksDelivery.len await sendPayment(peer.id, payment) -proc validateBlockDelivery(b: BlockExcEngine, bd: BlockDelivery): ?!void = - if bd.address notin b.pendingBlocks: +proc validateBlockDelivery(self: BlockExcEngine, bd: BlockDelivery): ?!void = + if bd.address notin self.pendingBlocks: return failure("Received block is not currently a pending block") if bd.address.leaf: @@ -333,7 +366,7 @@ proc validateBlockDelivery(b: BlockExcEngine, bd: BlockDelivery): ?!void = return success() proc blocksDeliveryHandler*( - b: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery] + self: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery] ) {.async.} = trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt(it.address)) @@ -343,11 +376,11 @@ proc blocksDeliveryHandler*( peer = peer address = bd.address - if err =? b.validateBlockDelivery(bd).errorOption: + if err =? self.validateBlockDelivery(bd).errorOption: warn "Block validation failed", msg = err.msg continue - if err =? (await b.localStore.putBlock(bd.blk)).errorOption: + if err =? (await self.localStore.putBlock(bd.blk)).errorOption: error "Unable to store block", err = err.msg continue @@ -356,7 +389,7 @@ proc blocksDeliveryHandler*( error "Proof expected for a leaf block delivery" continue if err =? ( - await b.localStore.putCidAndProof( + await self.localStore.putCidAndProof( bd.address.treeCid, bd.address.index, bd.blk.cid, proof ) ).errorOption: @@ -365,18 +398,22 @@ proc blocksDeliveryHandler*( validatedBlocksDelivery.add(bd) - await b.resolveBlocks(validatedBlocksDelivery) + await self.resolveBlocks(validatedBlocksDelivery) codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64) - let peerCtx = b.peers.get(peer) + let peerCtx = self.peers.get(peer) if peerCtx != nil: - await b.payForBlocks(peerCtx, blocksDelivery) + await self.payForBlocks(peerCtx, blocksDelivery) ## shouldn't we remove them from the want-list instead of this: peerCtx.cleanPresence(blocksDelivery.mapIt(it.address)) -proc wantListHandler*(b: BlockExcEngine, peer: PeerId, wantList: WantList) {.async.} = - let peerCtx = b.peers.get(peer) +proc wantListHandler*( + self: BlockExcEngine, peer: PeerId, wantList: WantList +) {.async.} = + trace "Received want list from peer", peer, wantList = wantList.entries.len + + let peerCtx = self.peers.get(peer) if peerCtx.isNil: return @@ -395,9 +432,14 @@ proc wantListHandler*(b: BlockExcEngine, peer: PeerId, wantList: WantList) {.asy if idx < 0: # Adding new entry to peer wants let - have = await e.address in b.localStore - price = @(b.pricing.get(Pricing(price: 0.u256)).price.toBytesBE) + have = await e.address in self.localStore + price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE) + if e.cancel: + trace "Received cancelation for untracked block, skipping", address = e.address + continue + + trace "Processing want list entry", wantList = $e case e.wantType of WantType.WantHave: if have: @@ -413,7 +455,6 @@ proc wantListHandler*(b: BlockExcEngine, peer: PeerId, wantList: WantList) {.asy address: e.address, `type`: BlockPresenceType.DontHave, price: price ) ) - peerCtx.peerWants.add(e) codex_block_exchange_want_have_lists_received.inc() of WantType.WantBlock: @@ -425,73 +466,76 @@ proc wantListHandler*(b: BlockExcEngine, peer: PeerId, wantList: WantList) {.asy if e.cancel: trace "Canceling want for block", address = e.address peerCtx.peerWants.del(idx) + trace "Canceled block request", address = e.address, len = peerCtx.peerWants.len else: + if e.wantType == WantType.WantBlock: + schedulePeer = true # peer might want to ask for the same cid with # different want params trace "Updating want for block", address = e.address peerCtx.peerWants[idx] = e # update entry + trace "Updated block request", address = e.address, len = peerCtx.peerWants.len if presence.len > 0: trace "Sending presence to remote", items = presence.mapIt($it).join(",") - await b.network.request.sendPresence(peer, presence) + await self.network.request.sendPresence(peer, presence) - if schedulePeer: - if not b.scheduleTask(peerCtx): - warn "Unable to schedule task for peer", peer + if schedulePeer and not self.scheduleTask(peerCtx): + warn "Unable to schedule task for peer", peer -proc accountHandler*(engine: BlockExcEngine, peer: PeerId, account: Account) {.async.} = - let context = engine.peers.get(peer) +proc accountHandler*(self: BlockExcEngine, peer: PeerId, account: Account) {.async.} = + let context = self.peers.get(peer) if context.isNil: return context.account = account.some proc paymentHandler*( - engine: BlockExcEngine, peer: PeerId, payment: SignedState + self: BlockExcEngine, peer: PeerId, payment: SignedState ) {.async.} = trace "Handling payments", peer - without context =? engine.peers.get(peer).option and account =? context.account: + without context =? self.peers.get(peer).option and account =? context.account: trace "No context or account for peer", peer return if channel =? context.paymentChannel: let sender = account.address - discard engine.wallet.acceptPayment(channel, Asset, sender, payment) + discard self.wallet.acceptPayment(channel, Asset, sender, payment) else: - context.paymentChannel = engine.wallet.acceptChannel(payment).option + context.paymentChannel = self.wallet.acceptChannel(payment).option -proc setupPeer*(b: BlockExcEngine, peer: PeerId) {.async.} = +proc setupPeer*(self: BlockExcEngine, peer: PeerId) {.async.} = ## Perform initial setup, such as want ## list exchange ## trace "Setting up peer", peer - if peer notin b.peers: + if peer notin self.peers: trace "Setting up new peer", peer - b.peers.add(BlockExcPeerCtx(id: peer)) - trace "Added peer", peers = b.peers.len + self.peers.add(BlockExcPeerCtx(id: peer)) + trace "Added peer", peers = self.peers.len # broadcast our want list, the other peer will do the same - if b.pendingBlocks.wantListLen > 0: + if self.pendingBlocks.wantListLen > 0: trace "Sending our want list to a peer", peer - let cids = toSeq(b.pendingBlocks.wantList) - await b.network.request.sendWantList(peer, cids, full = true) + let cids = toSeq(self.pendingBlocks.wantList) + await self.network.request.sendWantList(peer, cids, full = true) - if address =? b.pricing .? address: - await b.network.request.sendAccount(peer, Account(address: address)) + if address =? self.pricing .? address: + await self.network.request.sendAccount(peer, Account(address: address)) -proc dropPeer*(b: BlockExcEngine, peer: PeerId) = +proc dropPeer*(self: BlockExcEngine, peer: PeerId) = ## Cleanup disconnected peer ## trace "Dropping peer", peer # drop the peer from the peers table - b.peers.remove(peer) + self.peers.remove(peer) -proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} = +proc taskHandler*(self: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} = # Send to the peer blocks he wants to get, # if they present in our local store @@ -514,14 +558,14 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} = proc localLookup(e: WantListEntry): Future[?!BlockDelivery] {.async.} = if e.address.leaf: - (await b.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map( + (await self.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map( (blkAndProof: (Block, CodexProof)) => BlockDelivery( address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some ) ) else: - (await b.localStore.getBlock(e.address)).map( + (await self.localStore.getBlock(e.address)).map( (blk: Block) => BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none) ) @@ -540,22 +584,22 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} = if blocksDelivery.len > 0: trace "Sending blocks to peer", peer = task.id, blocks = (blocksDelivery.mapIt(it.address)) - await b.network.request.sendBlocksDelivery(task.id, blocksDelivery) + await self.network.request.sendBlocksDelivery(task.id, blocksDelivery) codex_block_exchange_blocks_sent.inc(blocksDelivery.len.int64) task.peerWants.keepItIf(it.address notin successAddresses) -proc blockexcTaskRunner(b: BlockExcEngine) {.async: (raises: []).} = +proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} = ## process tasks ## trace "Starting blockexc task runner" - while b.blockexcRunning: + while self.blockexcRunning: try: - let peerCtx = await b.taskQueue.pop() + let peerCtx = await self.taskQueue.pop() - await b.taskHandler(peerCtx) + await self.taskHandler(peerCtx) except CancelledError: break # do not propagate as blockexcTaskRunner was asyncSpawned except CatchableError as e: @@ -573,55 +617,51 @@ proc new*( peerStore: PeerCtxStore, pendingBlocks: PendingBlocksManager, concurrentTasks = DefaultConcurrentTasks, - peersPerRequest = DefaultMaxPeersPerRequest, - blockFetchTimeout = DefaultBlockTimeout, ): BlockExcEngine = ## Create new block exchange engine instance ## - let engine = BlockExcEngine( + let self = BlockExcEngine( localStore: localStore, peers: peerStore, pendingBlocks: pendingBlocks, - peersPerRequest: peersPerRequest, network: network, wallet: wallet, concurrentTasks: concurrentTasks, - trackedFutures: TrackedFutures.new(), + trackedFutures: TrackedFutures(), taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize), discovery: discovery, advertiser: advertiser, - blockFetchTimeout: blockFetchTimeout, ) proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} = if event.kind == PeerEventKind.Joined: - await engine.setupPeer(peerId) + await self.setupPeer(peerId) else: - engine.dropPeer(peerId) + self.dropPeer(peerId) if not isNil(network.switch): network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined) network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left) proc blockWantListHandler(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.} = - engine.wantListHandler(peer, wantList) + self.wantListHandler(peer, wantList) proc blockPresenceHandler( peer: PeerId, presence: seq[BlockPresence] ): Future[void] {.gcsafe.} = - engine.blockPresenceHandler(peer, presence) + self.blockPresenceHandler(peer, presence) proc blocksDeliveryHandler( peer: PeerId, blocksDelivery: seq[BlockDelivery] ): Future[void] {.gcsafe.} = - engine.blocksDeliveryHandler(peer, blocksDelivery) + self.blocksDeliveryHandler(peer, blocksDelivery) proc accountHandler(peer: PeerId, account: Account): Future[void] {.gcsafe.} = - engine.accountHandler(peer, account) + self.accountHandler(peer, account) proc paymentHandler(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} = - engine.paymentHandler(peer, payment) + self.paymentHandler(peer, payment) network.handlers = BlockExcHandlers( onWantList: blockWantListHandler, @@ -631,4 +671,4 @@ proc new*( onPayment: paymentHandler, ) - return engine + return self diff --git a/codex/blockexchange/engine/pendingblocks.nim b/codex/blockexchange/engine/pendingblocks.nim index 3b69e2d2..f169f744 100644 --- a/codex/blockexchange/engine/pendingblocks.nim +++ b/codex/blockexchange/engine/pendingblocks.nim @@ -7,13 +7,11 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. +{.push raises: [].} + import std/tables import std/monotimes - -import pkg/upraises - -push: - {.upraises: [].} +import std/strutils import pkg/chronos import pkg/libp2p @@ -34,66 +32,76 @@ declareGauge( codex_block_exchange_retrieval_time_us, "codex blockexchange block retrieval time us" ) -const DefaultBlockTimeout* = 10.minutes +const + DefaultBlockRetries* = 3000 + DefaultRetryInterval* = 500.millis type + RetriesExhaustedError* = object of CatchableError + BlockHandle* = Future[Block].Raising([CancelledError, RetriesExhaustedError]) + BlockReq* = object - handle*: Future[Block] + handle*: BlockHandle inFlight*: bool + blockRetries*: int startTime*: int64 PendingBlocksManager* = ref object of RootObj + blockRetries*: int = DefaultBlockRetries + retryInterval*: Duration = DefaultRetryInterval blocks*: Table[BlockAddress, BlockReq] # pending Block requests proc updatePendingBlockGauge(p: PendingBlocksManager) = codex_block_exchange_pending_block_requests.set(p.blocks.len.int64) proc getWantHandle*( - p: PendingBlocksManager, - address: BlockAddress, - timeout = DefaultBlockTimeout, - inFlight = false, -): Future[Block] {.async.} = + self: PendingBlocksManager, address: BlockAddress, inFlight = false +): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} = ## Add an event for a block ## - try: - if address notin p.blocks: - p.blocks[address] = BlockReq( - handle: newFuture[Block]("pendingBlocks.getWantHandle"), - inFlight: inFlight, - startTime: getMonoTime().ticks, - ) + self.blocks.withValue(address, blk): + return blk[].handle + do: + let blk = BlockReq( + handle: newFuture[Block]("pendingBlocks.getWantHandle"), + inFlight: inFlight, + blockRetries: self.blockRetries, + startTime: getMonoTime().ticks, + ) + self.blocks[address] = blk + let handle = blk.handle - p.updatePendingBlockGauge() - return await p.blocks[address].handle.wait(timeout) - except CancelledError as exc: - trace "Blocks cancelled", exc = exc.msg, address - raise exc - except CatchableError as exc: - error "Pending WANT failed or expired", exc = exc.msg - # no need to cancel, it is already cancelled by wait() - raise exc - finally: - p.blocks.del(address) - p.updatePendingBlockGauge() + proc cleanUpBlock(data: pointer) {.raises: [].} = + self.blocks.del(address) + self.updatePendingBlockGauge() + + handle.addCallback(cleanUpBlock) + handle.cancelCallback = proc(data: pointer) {.raises: [].} = + if not handle.finished: + handle.removeCallback(cleanUpBlock) + cleanUpBlock(nil) + + self.updatePendingBlockGauge() + return handle proc getWantHandle*( - p: PendingBlocksManager, cid: Cid, timeout = DefaultBlockTimeout, inFlight = false -): Future[Block] = - p.getWantHandle(BlockAddress.init(cid), timeout, inFlight) + self: PendingBlocksManager, cid: Cid, inFlight = false +): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} = + self.getWantHandle(BlockAddress.init(cid), inFlight) proc resolve*( - p: PendingBlocksManager, blocksDelivery: seq[BlockDelivery] + self: PendingBlocksManager, blocksDelivery: seq[BlockDelivery] ) {.gcsafe, raises: [].} = ## Resolve pending blocks ## for bd in blocksDelivery: - p.blocks.withValue(bd.address, blockReq): - if not blockReq.handle.finished: + self.blocks.withValue(bd.address, blockReq): + if not blockReq[].handle.finished: + trace "Resolving pending block", address = bd.address let - startTime = blockReq.startTime + startTime = blockReq[].startTime stopTime = getMonoTime().ticks retrievalDurationUs = (stopTime - startTime) div 1000 @@ -106,52 +114,70 @@ proc resolve*( else: trace "Block handle already finished", address = bd.address -proc setInFlight*(p: PendingBlocksManager, address: BlockAddress, inFlight = true) = +func retries*(self: PendingBlocksManager, address: BlockAddress): int = + self.blocks.withValue(address, pending): + result = pending[].blockRetries + do: + result = 0 + +func decRetries*(self: PendingBlocksManager, address: BlockAddress) = + self.blocks.withValue(address, pending): + pending[].blockRetries -= 1 + +func retriesExhausted*(self: PendingBlocksManager, address: BlockAddress): bool = + self.blocks.withValue(address, pending): + result = pending[].blockRetries <= 0 + +func setInFlight*(self: PendingBlocksManager, address: BlockAddress, inFlight = true) = ## Set inflight status for a block ## - p.blocks.withValue(address, pending): + self.blocks.withValue(address, pending): pending[].inFlight = inFlight -proc isInFlight*(p: PendingBlocksManager, address: BlockAddress): bool = +func isInFlight*(self: PendingBlocksManager, address: BlockAddress): bool = ## Check if a block is in flight ## - p.blocks.withValue(address, pending): + self.blocks.withValue(address, pending): result = pending[].inFlight -proc contains*(p: PendingBlocksManager, cid: Cid): bool = - BlockAddress.init(cid) in p.blocks +func contains*(self: PendingBlocksManager, cid: Cid): bool = + BlockAddress.init(cid) in self.blocks -proc contains*(p: PendingBlocksManager, address: BlockAddress): bool = - address in p.blocks +func contains*(self: PendingBlocksManager, address: BlockAddress): bool = + address in self.blocks -iterator wantList*(p: PendingBlocksManager): BlockAddress = - for a in p.blocks.keys: +iterator wantList*(self: PendingBlocksManager): BlockAddress = + for a in self.blocks.keys: yield a -iterator wantListBlockCids*(p: PendingBlocksManager): Cid = - for a in p.blocks.keys: +iterator wantListBlockCids*(self: PendingBlocksManager): Cid = + for a in self.blocks.keys: if not a.leaf: yield a.cid -iterator wantListCids*(p: PendingBlocksManager): Cid = +iterator wantListCids*(self: PendingBlocksManager): Cid = var yieldedCids = initHashSet[Cid]() - for a in p.blocks.keys: + for a in self.blocks.keys: let cid = a.cidOrTreeCid if cid notin yieldedCids: yieldedCids.incl(cid) yield cid -iterator wantHandles*(p: PendingBlocksManager): Future[Block] = - for v in p.blocks.values: +iterator wantHandles*(self: PendingBlocksManager): Future[Block] = + for v in self.blocks.values: yield v.handle -proc wantListLen*(p: PendingBlocksManager): int = - p.blocks.len +proc wantListLen*(self: PendingBlocksManager): int = + self.blocks.len -func len*(p: PendingBlocksManager): int = - p.blocks.len +func len*(self: PendingBlocksManager): int = + self.blocks.len -func new*(T: type PendingBlocksManager): PendingBlocksManager = - PendingBlocksManager() +func new*( + T: type PendingBlocksManager, + retries = DefaultBlockRetries, + interval = DefaultRetryInterval, +): PendingBlocksManager = + PendingBlocksManager(blockRetries: retries, retryInterval: interval) diff --git a/codex/blockexchange/network/network.nim b/codex/blockexchange/network/network.nim index ecb72890..daf358de 100644 --- a/codex/blockexchange/network/network.nim +++ b/codex/blockexchange/network/network.nim @@ -21,17 +21,18 @@ import ../../blocktype as bt import ../../logutils import ../protobuf/blockexc as pb import ../protobuf/payments +import ../../utils/trackedfutures import ./networkpeer -export network, payments +export networkpeer, payments logScope: topics = "codex blockexcnetwork" const Codec* = "/codex/blockexc/1.0.0" - MaxInflight* = 100 + DefaultMaxInflight* = 100 type WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.} @@ -82,6 +83,8 @@ type request*: BlockExcRequest getConn: ConnProvider inflightSema: AsyncSemaphore + maxInflight: int = DefaultMaxInflight + trackedFutures*: TrackedFutures = TrackedFutures() proc peerId*(b: BlockExcNetwork): PeerId = ## Return peer id @@ -220,23 +223,25 @@ proc handlePayment( if not network.handlers.onPayment.isNil: await network.handlers.onPayment(peer.id, payment) -proc rpcHandler(b: BlockExcNetwork, peer: NetworkPeer, msg: Message) {.raises: [].} = +proc rpcHandler( + b: BlockExcNetwork, peer: NetworkPeer, msg: Message +) {.async: (raises: [CatchableError]).} = ## handle rpc messages ## if msg.wantList.entries.len > 0: - asyncSpawn b.handleWantList(peer, msg.wantList) + b.trackedFutures.track(b.handleWantList(peer, msg.wantList)) if msg.payload.len > 0: - asyncSpawn b.handleBlocksDelivery(peer, msg.payload) + b.trackedFutures.track(b.handleBlocksDelivery(peer, msg.payload)) if msg.blockPresences.len > 0: - asyncSpawn b.handleBlockPresence(peer, msg.blockPresences) + b.trackedFutures.track(b.handleBlockPresence(peer, msg.blockPresences)) if account =? Account.init(msg.account): - asyncSpawn b.handleAccount(peer, account) + b.trackedFutures.track(b.handleAccount(peer, account)) if payment =? SignedState.init(msg.payment): - asyncSpawn b.handlePayment(peer, payment) + b.trackedFutures.track(b.handlePayment(peer, payment)) proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer = ## Creates or retrieves a BlockExcNetwork Peer @@ -247,6 +252,7 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer = var getConn: ConnProvider = proc(): Future[Connection] {.async, gcsafe, closure.} = try: + trace "Getting new connection stream", peer return await b.switch.dial(peer, Codec) except CancelledError as error: raise error @@ -256,8 +262,10 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer = if not isNil(b.getConn): getConn = b.getConn - let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async.} = - b.rpcHandler(p, msg) + let rpcHandler = proc( + p: NetworkPeer, msg: Message + ) {.async: (raises: [CatchableError]).} = + await b.rpcHandler(p, msg) # create new pubsub peer let blockExcPeer = NetworkPeer.new(peer, getConn, rpcHandler) @@ -282,48 +290,61 @@ proc dialPeer*(b: BlockExcNetwork, peer: PeerRecord) {.async.} = trace "Skipping dialing self", peer = peer.peerId return + if peer.peerId in b.peers: + trace "Already connected to peer", peer = peer.peerId + return + await b.switch.connect(peer.peerId, peer.addresses.mapIt(it.address)) proc dropPeer*(b: BlockExcNetwork, peer: PeerId) = ## Cleanup disconnected peer ## + trace "Dropping peer", peer b.peers.del(peer) -method init*(b: BlockExcNetwork) = +method init*(self: BlockExcNetwork) = ## Perform protocol initialization ## proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} = if event.kind == PeerEventKind.Joined: - b.setupPeer(peerId) + self.setupPeer(peerId) else: - b.dropPeer(peerId) + self.dropPeer(peerId) - b.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined) - b.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left) + self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined) + self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left) - proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} = + proc handler(conn: Connection, proto: string) {.async.} = let peerId = conn.peerId - let blockexcPeer = b.getOrCreatePeer(peerId) + let blockexcPeer = self.getOrCreatePeer(peerId) await blockexcPeer.readLoop(conn) # attach read loop - b.handler = handle - b.codec = Codec + self.handler = handler + self.codec = Codec + +proc stop*(self: BlockExcNetwork) {.async: (raises: []).} = + await self.trackedFutures.cancelTracked() proc new*( T: type BlockExcNetwork, switch: Switch, connProvider: ConnProvider = nil, - maxInflight = MaxInflight, + maxInflight = DefaultMaxInflight, ): BlockExcNetwork = ## Create a new BlockExcNetwork instance ## let self = BlockExcNetwork( - switch: switch, getConn: connProvider, inflightSema: newAsyncSemaphore(maxInflight) + switch: switch, + getConn: connProvider, + inflightSema: newAsyncSemaphore(maxInflight), + maxInflight: maxInflight, ) + self.maxIncomingStreams = self.maxInflight + proc sendWantList( id: PeerId, cids: seq[BlockAddress], diff --git a/codex/blockexchange/network/networkpeer.nim b/codex/blockexchange/network/networkpeer.nim index 90c538ea..4a100340 100644 --- a/codex/blockexchange/network/networkpeer.nim +++ b/codex/blockexchange/network/networkpeer.nim @@ -22,39 +22,56 @@ import ../../logutils logScope: topics = "codex blockexcnetworkpeer" +const DefaultYieldInterval = 50.millis + type ConnProvider* = proc(): Future[Connection] {.gcsafe, closure.} - RPCHandler* = proc(peer: NetworkPeer, msg: Message): Future[void] {.gcsafe.} + RPCHandler* = proc( + peer: NetworkPeer, msg: Message + ): Future[void].Raising(CatchableError) {.gcsafe.} NetworkPeer* = ref object of RootObj id*: PeerId handler*: RPCHandler sendConn: Connection getConn: ConnProvider + yieldInterval*: Duration = DefaultYieldInterval proc connected*(b: NetworkPeer): bool = not (isNil(b.sendConn)) and not (b.sendConn.closed or b.sendConn.atEof) proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} = if isNil(conn): + trace "No connection to read from", peer = b.id return + trace "Attaching read loop", peer = b.id, connId = conn.oid try: + var nextYield = Moment.now() + b.yieldInterval while not conn.atEof or not conn.closed: + if Moment.now() > nextYield: + nextYield = Moment.now() + b.yieldInterval + trace "Yielding in read loop", + peer = b.id, nextYield = nextYield, interval = b.yieldInterval + await sleepAsync(10.millis) + let data = await conn.readLp(MaxMessageSize.int) msg = Message.protobufDecode(data).mapFailure().tryGet() + trace "Received message", peer = b.id, connId = conn.oid await b.handler(b, msg) except CancelledError: trace "Read loop cancelled" except CatchableError as err: warn "Exception in blockexc read loop", msg = err.msg finally: + trace "Detaching read loop", peer = b.id, connId = conn.oid await conn.close() proc connect*(b: NetworkPeer): Future[Connection] {.async.} = if b.connected: + trace "Already connected", peer = b.id, connId = b.sendConn.oid return b.sendConn b.sendConn = await b.getConn() @@ -68,17 +85,9 @@ proc send*(b: NetworkPeer, msg: Message) {.async.} = warn "Unable to get send connection for peer message not sent", peer = b.id return + trace "Sending message", peer = b.id, connId = conn.oid await conn.writeLp(protobufEncode(msg)) -proc broadcast*(b: NetworkPeer, msg: Message) = - proc sendAwaiter() {.async.} = - try: - await b.send(msg) - except CatchableError as exc: - warn "Exception broadcasting message to peer", peer = b.id, exc = exc.msg - - asyncSpawn sendAwaiter() - func new*( T: type NetworkPeer, peer: PeerId, diff --git a/codex/blockexchange/peers/peerctxstore.nim b/codex/blockexchange/peers/peerctxstore.nim index 7cf167b4..739d92b5 100644 --- a/codex/blockexchange/peers/peerctxstore.nim +++ b/codex/blockexchange/peers/peerctxstore.nim @@ -10,6 +10,7 @@ import std/sequtils import std/tables import std/algorithm +import std/sequtils import pkg/upraises @@ -33,9 +34,7 @@ type PeerCtxStore* = ref object of RootObj peers*: OrderedTable[PeerId, BlockExcPeerCtx] - PeersForBlock* = object of RootObj - with*: seq[BlockExcPeerCtx] - without*: seq[BlockExcPeerCtx] + PeersForBlock* = tuple[with: seq[BlockExcPeerCtx], without: seq[BlockExcPeerCtx]] iterator items*(self: PeerCtxStore): BlockExcPeerCtx = for p in self.peers.values: @@ -47,6 +46,9 @@ proc contains*(a: openArray[BlockExcPeerCtx], b: PeerId): bool = a.anyIt(it.id == b) +func peerIds*(self: PeerCtxStore): seq[PeerId] = + toSeq(self.peers.keys) + func contains*(self: PeerCtxStore, peerId: PeerId): bool = peerId in self.peers @@ -75,7 +77,7 @@ func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] = toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it.address.cidOrTreeCid == cid)) proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock = - var res = PeersForBlock() + var res: PeersForBlock = (@[], @[]) for peer in self: if peer.peerHave.anyIt(it == address): res.with.add(peer) diff --git a/codex/codex.nim b/codex/codex.nim index dc577373..b8905205 100644 --- a/codex/codex.nim +++ b/codex/codex.nim @@ -311,7 +311,7 @@ proc new*( bufferSize = (1024 * 64), maxRequestBodySize = int.high, ) - .expect("Should start rest server!") + .expect("Should create rest server!") switch.mount(network) diff --git a/codex/conf.nim b/codex/conf.nim index 2a859efb..986a53d6 100644 --- a/codex/conf.nim +++ b/codex/conf.nim @@ -51,8 +51,8 @@ export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig export ValidationGroups, MaxSlots export - DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockMaintenanceInterval, - DefaultNumberOfBlocksToMaintainPerInterval, DefaultRequestCacheSize + DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockInterval, DefaultNumBlocksPerInterval, + DefaultRequestCacheSize type ThreadCount* = distinct Natural @@ -251,15 +251,15 @@ type desc: "Time interval in seconds - determines frequency of block " & "maintenance cycle: how often blocks are checked " & "for expiration and cleanup", - defaultValue: DefaultBlockMaintenanceInterval, - defaultValueDesc: $DefaultBlockMaintenanceInterval, + defaultValue: DefaultBlockInterval, + defaultValueDesc: $DefaultBlockInterval, name: "block-mi" .}: Duration blockMaintenanceNumberOfBlocks* {. desc: "Number of blocks to check every maintenance cycle", - defaultValue: DefaultNumberOfBlocksToMaintainPerInterval, - defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval, + defaultValue: DefaultNumBlocksPerInterval, + defaultValueDesc: $DefaultNumBlocksPerInterval, name: "block-mn" .}: int diff --git a/codex/logutils.nim b/codex/logutils.nim index b37f6952..e9604aba 100644 --- a/codex/logutils.nim +++ b/codex/logutils.nim @@ -152,7 +152,7 @@ proc formatTextLineSeq*(val: seq[string]): string = template formatIt*(format: LogFormat, T: typedesc, body: untyped) = # Provides formatters for logging with Chronicles for the given type and # `LogFormat`. - # NOTE: `seq[T]`, `Option[T]`, and `seq[Option[T]]` are overriddden + # NOTE: `seq[T]`, `Option[T]`, and `seq[Option[T]]` are overridden # since the base `setProperty` is generic using `auto` and conflicts with # providing a generic `seq` and `Option` override. when format == LogFormat.json: diff --git a/codex/node.nim b/codex/node.nim index e1647f3e..b0f66c90 100644 --- a/codex/node.nim +++ b/codex/node.nim @@ -45,13 +45,14 @@ import ./utils import ./errors import ./logutils import ./utils/asynciter +import ./utils/trackedfutures export logutils logScope: topics = "codex node" -const FetchBatch = 200 +const DefaultFetchBatch = 10 type Contracts* = @@ -72,6 +73,7 @@ type clock*: Clock storage*: Contracts taskpool: Taskpool + trackedFutures: TrackedFutures CodexNodeRef* = ref CodexNode @@ -163,8 +165,9 @@ proc fetchBatched*( self: CodexNodeRef, cid: Cid, iter: Iter[int], - batchSize = FetchBatch, + batchSize = DefaultFetchBatch, onBatch: BatchProc = nil, + fetchLocal = true, ): Future[?!void] {.async, gcsafe.} = ## Fetch blocks in batches of `batchSize` ## @@ -179,7 +182,9 @@ proc fetchBatched*( let blocks = collect: for i in 0 ..< batchSize: if not iter.finished: - self.networkStore.getBlock(BlockAddress.init(cid, iter.next())) + let address = BlockAddress.init(cid, iter.next()) + if not (await address in self.networkStore) or fetchLocal: + self.networkStore.getBlock(address) if blocksErr =? (await allFutureResult(blocks)).errorOption: return failure(blocksErr) @@ -188,21 +193,25 @@ proc fetchBatched*( batchErr =? (await onBatch(blocks.mapIt(it.read.get))).errorOption: return failure(batchErr) + await sleepAsync(1.millis) + success() proc fetchBatched*( self: CodexNodeRef, manifest: Manifest, - batchSize = FetchBatch, + batchSize = DefaultFetchBatch, onBatch: BatchProc = nil, + fetchLocal = true, ): Future[?!void] = ## Fetch manifest in batches of `batchSize` ## - trace "Fetching blocks in batches of", size = batchSize + trace "Fetching blocks in batches of", + size = batchSize, blocksCount = manifest.blocksCount let iter = Iter[int].new(0 ..< manifest.blocksCount) - self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch) + self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch, fetchLocal) proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async.} = ## Streams the contents of a single block. @@ -223,35 +232,64 @@ proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async finally: await stream.pushEof() - asyncSpawn streamOneBlock() + self.trackedFutures.track(streamOneBlock()) LPStream(stream).success proc streamEntireDataset( - self: CodexNodeRef, manifest: Manifest, manifestCid: Cid + self: CodexNodeRef, + manifest: Manifest, + manifestCid: Cid, + prefetchBatch = DefaultFetchBatch, ): Future[?!LPStream] {.async.} = ## Streams the contents of the entire dataset described by the manifest. + ## Background jobs (erasure decoding and prefetching) will be cancelled when + ## the stream is closed. ## trace "Retrieving blocks from manifest", manifestCid + let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false)) + var jobs: seq[Future[void]] + if manifest.protected: # Retrieve, decode and save to the local store all EС groups - proc erasureJob(): Future[?!void] {.async.} = - # Spawn an erasure decoding job - let erasure = Erasure.new( - self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskpool - ) - without _ =? (await erasure.decode(manifest)), error: - error "Unable to erasure decode manifest", manifestCid, exc = error.msg - return failure(error) + proc erasureJob(): Future[void] {.async.} = + try: + # Spawn an erasure decoding job + let erasure = Erasure.new( + self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskpool + ) + without _ =? (await erasure.decode(manifest)), error: + error "Unable to erasure decode manifest", manifestCid, exc = error.msg + except CancelledError: + trace "Erasure job cancelled", manifestCid + except CatchableError as exc: + trace "Error erasure decoding manifest", manifestCid, exc = exc.msg - return success() + jobs.add(erasureJob()) - if err =? (await erasureJob()).errorOption: - return failure(err) + proc prefetch(): Future[void] {.async.} = + try: + if err =? + (await self.fetchBatched(manifest, prefetchBatch, fetchLocal = false)).errorOption: + error "Unable to fetch blocks", err = err.msg + except CancelledError: + trace "Prefetch job cancelled" + except CatchableError as exc: + error "Error fetching blocks", exc = exc.msg + + jobs.add(prefetch()) + + # Monitor stream completion and cancel background jobs when done + proc monitorStream() {.async.} = + try: + await stream.join() + finally: + await allFutures(jobs.mapIt(it.cancelAndWait)) + + self.trackedFutures.track(monitorStream()) - # Retrieve all blocks of the dataset sequentially from the local store or network trace "Creating store stream for manifest", manifestCid - LPStream(StoreStream.new(self.networkStore, manifest, pad = false)).success + stream.success proc retrieve*( self: CodexNodeRef, cid: Cid, local: bool = true @@ -758,6 +796,11 @@ proc start*(self: CodexNodeRef) {.async.} = proc stop*(self: CodexNodeRef) {.async.} = trace "Stopping node" + if not self.taskpool.isNil: + self.taskpool.shutdown() + + await self.trackedFutures.cancelTracked() + if not self.engine.isNil: await self.engine.stop() @@ -779,9 +822,6 @@ proc stop*(self: CodexNodeRef) {.async.} = if not self.networkStore.isNil: await self.networkStore.close - if not self.taskpool.isNil: - self.taskpool.shutdown() - proc new*( T: type CodexNodeRef, switch: Switch, @@ -803,4 +843,5 @@ proc new*( discovery: discovery, taskPool: taskpool, contracts: contracts, + trackedFutures: TrackedFutures(), ) diff --git a/codex/rest/api.nim b/codex/rest/api.nim index e5c8d195..89dbe220 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -13,8 +13,8 @@ push: {.upraises: [].} import std/sequtils -import mimetypes -import os +import std/mimetypes +import std/os import pkg/questionable import pkg/questionable/results @@ -120,7 +120,7 @@ proc retrieveCid( await resp.finish() codex_api_downloads.inc() except CatchableError as exc: - warn "Excepting streaming blocks", exc = exc.msg + warn "Error streaming blocks", exc = exc.msg resp.status = Http500 return await resp.sendBody("") finally: diff --git a/codex/rng.nim b/codex/rng.nim index 9d82156e..866d65f8 100644 --- a/codex/rng.nim +++ b/codex/rng.nim @@ -55,6 +55,15 @@ proc sample*[T]( break +proc sample*[T]( + rng: Rng, sample: openArray[T], limit: int +): seq[T] {.raises: [Defect, RngSampleError].} = + if limit > sample.len: + raise newException(RngSampleError, "Limit cannot be larger than sample!") + + for _ in 0 ..< min(sample.len, limit): + result.add(rng.sample(sample, result)) + proc shuffle*[T](rng: Rng, a: var openArray[T]) = for i in countdown(a.high, 1): let j = rng.rand(i) diff --git a/codex/slots/builder/builder.nim b/codex/slots/builder/builder.nim index 74597ff1..30332f1c 100644 --- a/codex/slots/builder/builder.nim +++ b/codex/slots/builder/builder.nim @@ -189,7 +189,7 @@ proc getCellHashes*[T, H]( blkIdx = blkIdx pos = i - trace "Getting block CID for tree at index", index = blkIdx + trace "Getting block CID for tree at index" without (_, tree) =? (await self.buildBlockTree(blkIdx, i)) and digest =? tree.root, err: error "Failed to get block CID for tree at index", err = err.msg diff --git a/codex/stores/maintenance.nim b/codex/stores/maintenance.nim index e7ce1bdf..cced5da9 100644 --- a/codex/stores/maintenance.nim +++ b/codex/stores/maintenance.nim @@ -22,8 +22,8 @@ import ../logutils import ../systemclock const - DefaultBlockMaintenanceInterval* = 10.minutes - DefaultNumberOfBlocksToMaintainPerInterval* = 1000 + DefaultBlockInterval* = 10.minutes + DefaultNumBlocksPerInterval* = 1000 type BlockMaintainer* = ref object of RootObj repoStore: RepoStore diff --git a/codex/stores/networkstore.nim b/codex/stores/networkstore.nim index faee36e1..f94bca33 100644 --- a/codex/stores/networkstore.nim +++ b/codex/stores/networkstore.nim @@ -137,6 +137,14 @@ method hasBlock*(self: NetworkStore, cid: Cid): Future[?!bool] {.async.} = trace "Checking network store for block existence", cid return await self.localStore.hasBlock(cid) +method hasBlock*( + self: NetworkStore, tree: Cid, index: Natural +): Future[?!bool] {.async.} = + ## Check if the block exists in the blockstore + ## + trace "Checking network store for block existence", tree, index + return await self.localStore.hasBlock(tree, index) + method close*(self: NetworkStore): Future[void] {.async.} = ## Close the underlying local blockstore ## diff --git a/codex/stores/repostore/types.nim b/codex/stores/repostore/types.nim index 3d455d12..42f528e9 100644 --- a/codex/stores/repostore/types.nim +++ b/codex/stores/repostore/types.nim @@ -21,8 +21,8 @@ import ../../systemclock import ../../units const - DefaultBlockTtl* = 24.hours - DefaultQuotaBytes* = 8.GiBs + DefaultBlockTtl* = 30.days + DefaultQuotaBytes* = 20.GiBs type QuotaNotEnoughError* = object of CodexError diff --git a/codex/utils/asyncspawn.nim b/codex/utils/asyncspawn.nim deleted file mode 100644 index 95a9f014..00000000 --- a/codex/utils/asyncspawn.nim +++ /dev/null @@ -1,10 +0,0 @@ -import pkg/chronos - -proc asyncSpawn*(future: Future[void], ignore: type CatchableError) = - proc ignoringError() {.async.} = - try: - await future - except ignore: - discard - - asyncSpawn ignoringError() diff --git a/codex/utils/natutils.nim b/codex/utils/natutils.nim index 43909588..996d8dd0 100644 --- a/codex/utils/natutils.nim +++ b/codex/utils/natutils.nim @@ -1,6 +1,7 @@ {.push raises: [].} -import std/[tables, hashes], pkg/results, stew/shims/net as stewNet, chronos, chronicles +import + std/[tables, hashes], pkg/results, pkg/stew/shims/net as stewNet, chronos, chronicles import pkg/libp2p diff --git a/tests/codex/blockexchange/discovery/testdiscoveryengine.nim b/tests/codex/blockexchange/discovery/testdiscoveryengine.nim index 904703a0..93704726 100644 --- a/tests/codex/blockexchange/discovery/testdiscoveryengine.nim +++ b/tests/codex/blockexchange/discovery/testdiscoveryengine.nim @@ -76,7 +76,7 @@ asyncchecksuite "Test Discovery Engine": ) await discoveryEngine.start() - await allFuturesThrowing(allFinished(wants)).wait(1.seconds) + await allFuturesThrowing(allFinished(wants)).wait(100.millis) await discoveryEngine.stop() test "Should queue discovery request": @@ -101,7 +101,7 @@ asyncchecksuite "Test Discovery Engine": await discoveryEngine.start() discoveryEngine.queueFindBlocksReq(@[blocks[0].cid]) - await want.wait(1.seconds) + await want.wait(100.millis) await discoveryEngine.stop() test "Should not request more than minPeersPerBlock": diff --git a/tests/codex/blockexchange/engine/testblockexc.nim b/tests/codex/blockexchange/engine/testblockexc.nim index aa15f795..0c250231 100644 --- a/tests/codex/blockexchange/engine/testblockexc.nim +++ b/tests/codex/blockexchange/engine/testblockexc.nim @@ -1,5 +1,6 @@ import std/sequtils import std/algorithm +import std/importutils import pkg/chronos import pkg/stew/byteutils @@ -20,7 +21,7 @@ asyncchecksuite "NetworkStore engine - 2 nodes": peerCtx1, peerCtx2: BlockExcPeerCtx pricing1, pricing2: Pricing blocks1, blocks2: seq[bt.Block] - pendingBlocks1, pendingBlocks2: seq[Future[bt.Block]] + pendingBlocks1, pendingBlocks2: seq[BlockHandle] setup: blocks1 = await makeRandomBlocks(datasetSize = 2048, blockSize = 256'nb) @@ -56,7 +57,7 @@ asyncchecksuite "NetworkStore engine - 2 nodes": nodeCmps2.switch.peerInfo.peerId, nodeCmps2.switch.peerInfo.addrs ) - await sleepAsync(1.seconds) # give some time to exchange lists + await sleepAsync(100.millis) # give some time to exchange lists peerCtx2 = nodeCmps1.peerStore.get(nodeCmps2.switch.peerInfo.peerId) peerCtx1 = nodeCmps2.peerStore.get(nodeCmps1.switch.peerInfo.peerId) @@ -75,7 +76,6 @@ asyncchecksuite "NetworkStore engine - 2 nodes": test "Should exchange blocks on connect": await allFuturesThrowing(allFinished(pendingBlocks1)).wait(10.seconds) - await allFuturesThrowing(allFinished(pendingBlocks2)).wait(10.seconds) check: @@ -178,7 +178,7 @@ asyncchecksuite "NetworkStore - multiple nodes": (await nodes[i div 4].networkStore.engine.localStore.putBlock(blocks[i])).tryGet() await connectNodes(nodes) - await sleepAsync(1.seconds) + await sleepAsync(100.millis) await allFuturesThrowing(allFinished(pendingBlocks)) @@ -203,45 +203,9 @@ asyncchecksuite "NetworkStore - multiple nodes": (await nodes[i div 4].networkStore.engine.localStore.putBlock(blocks[i])).tryGet() await connectNodes(nodes) - await sleepAsync(1.seconds) + await sleepAsync(100.millis) await allFuturesThrowing(allFinished(pendingBlocks1), allFinished(pendingBlocks2)) check pendingBlocks1.mapIt(it.read) == blocks[0 .. 3] check pendingBlocks2.mapIt(it.read) == blocks[12 .. 15] - - test "Should actively cancel want-haves if block received from elsewhere": - let - # Peer wanting to download blocks - downloader = nodes[4] - # Bystander peer - gets block request but can't satisfy them - bystander = nodes[3] - # Holder of actual blocks - blockHolder = nodes[1] - - let aBlock = blocks[0] - (await blockHolder.engine.localStore.putBlock(aBlock)).tryGet() - - await connectNodes(@[downloader, bystander]) - # Downloader asks for block... - let blockRequest = downloader.engine.requestBlock(aBlock.cid) - - # ... and bystander learns that downloader wants it, but can't provide it. - check eventually( - bystander.engine.peers - .get(downloader.switch.peerInfo.peerId).peerWants - .filterIt(it.address == aBlock.address).len == 1 - ) - - # As soon as we connect the downloader to the blockHolder, the block should - # propagate to the downloader... - await connectNodes(@[downloader, blockHolder]) - check (await blockRequest).tryGet().cid == aBlock.cid - check (await downloader.engine.localStore.hasBlock(aBlock.cid)).tryGet() - - # ... and the bystander should have cancelled the want-have - check eventually( - bystander.engine.peers - .get(downloader.switch.peerInfo.peerId).peerWants - .filterIt(it.address == aBlock.address).len == 0 - ) diff --git a/tests/codex/blockexchange/engine/testengine.nim b/tests/codex/blockexchange/engine/testengine.nim index f7cc8294..cc5511e8 100644 --- a/tests/codex/blockexchange/engine/testengine.nim +++ b/tests/codex/blockexchange/engine/testengine.nim @@ -20,6 +20,11 @@ import ../../../asynctest import ../../helpers import ../../examples +const NopSendWantCancellationsProc = proc( + id: PeerId, addresses: seq[BlockAddress] +) {.gcsafe, async.} = + discard + asyncchecksuite "NetworkStore engine basic": var rng: Rng @@ -129,11 +134,6 @@ asyncchecksuite "NetworkStore engine handlers": localStore: BlockStore blocks: seq[Block] - const NopSendWantCancellationsProc = proc( - id: PeerId, addresses: seq[BlockAddress] - ) {.gcsafe, async.} = - discard - setup: rng = Rng.instance() chunker = RandomChunker.new(rng, size = 1024'nb, chunkSize = 256'nb) @@ -292,7 +292,8 @@ asyncchecksuite "NetworkStore engine handlers": await done.wait(100.millis) test "Should handle block presence": - var handles: Table[Cid, Future[Block]] + var handles: + Table[Cid, Future[Block].Raising([CancelledError, RetriesExhaustedError])] proc sendWantList( id: PeerId, @@ -333,6 +334,10 @@ asyncchecksuite "NetworkStore engine handlers": blocksDelivery = blocks.mapIt(BlockDelivery(blk: it, address: it.address)) cancellations = newTable(blocks.mapIt((it.address, newFuture[void]())).toSeq) + peerCtx.blocks = blocks.mapIt( + (it.address, Presence(address: it.address, have: true, price: UInt256.example)) + ).toTable + proc sendWantCancellations( id: PeerId, addresses: seq[BlockAddress] ) {.gcsafe, async.} = @@ -344,9 +349,168 @@ asyncchecksuite "NetworkStore engine handlers": ) await engine.blocksDeliveryHandler(peerId, blocksDelivery) - discard await allFinished(pending) + discard await allFinished(pending).wait(100.millis) await allFuturesThrowing(cancellations.values().toSeq) +asyncchecksuite "Block Download": + var + rng: Rng + seckey: PrivateKey + peerId: PeerId + chunker: Chunker + wallet: WalletRef + blockDiscovery: Discovery + peerStore: PeerCtxStore + pendingBlocks: PendingBlocksManager + network: BlockExcNetwork + engine: BlockExcEngine + discovery: DiscoveryEngine + advertiser: Advertiser + peerCtx: BlockExcPeerCtx + localStore: BlockStore + blocks: seq[Block] + + setup: + rng = Rng.instance() + chunker = RandomChunker.new(rng, size = 1024'nb, chunkSize = 256'nb) + + while true: + let chunk = await chunker.getBytes() + if chunk.len <= 0: + break + + blocks.add(Block.new(chunk).tryGet()) + + seckey = PrivateKey.random(rng[]).tryGet() + peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet() + wallet = WalletRef.example + blockDiscovery = Discovery.new() + peerStore = PeerCtxStore.new() + pendingBlocks = PendingBlocksManager.new() + + localStore = CacheStore.new() + network = BlockExcNetwork() + + discovery = + DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks) + + advertiser = Advertiser.new(localStore, blockDiscovery) + + engine = BlockExcEngine.new( + localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks + ) + + peerCtx = BlockExcPeerCtx(id: peerId) + engine.peers.add(peerCtx) + + test "Should exhaust retries": + var + retries = 2 + address = BlockAddress.init(blocks[0].cid) + + proc sendWantList( + id: PeerId, + addresses: seq[BlockAddress], + priority: int32 = 0, + cancel: bool = false, + wantType: WantType = WantType.WantHave, + full: bool = false, + sendDontHave: bool = false, + ) {.gcsafe, async.} = + check wantType == WantHave + check not engine.pendingBlocks.isInFlight(address) + check engine.pendingBlocks.retries(address) == retries + retries -= 1 + + engine.pendingBlocks.blockRetries = 2 + engine.pendingBlocks.retryInterval = 10.millis + engine.network = + BlockExcNetwork(request: BlockExcRequest(sendWantList: sendWantList)) + + let pending = engine.requestBlock(address) + + expect RetriesExhaustedError: + discard (await pending).tryGet() + + test "Should retry block request": + let + address = BlockAddress.init(blocks[0].cid) + steps = newAsyncEvent() + + proc sendWantList( + id: PeerId, + addresses: seq[BlockAddress], + priority: int32 = 0, + cancel: bool = false, + wantType: WantType = WantType.WantHave, + full: bool = false, + sendDontHave: bool = false, + ) {.gcsafe, async.} = + case wantType + of WantHave: + check engine.pendingBlocks.isInFlight(address) == false + check engine.pendingBlocks.retriesExhausted(address) == false + steps.fire() + of WantBlock: + check engine.pendingBlocks.isInFlight(address) == true + check engine.pendingBlocks.retriesExhausted(address) == false + steps.fire() + + engine.pendingBlocks.blockRetries = 10 + engine.pendingBlocks.retryInterval = 10.millis + engine.network = BlockExcNetwork( + request: BlockExcRequest( + sendWantList: sendWantList, sendWantCancellations: NopSendWantCancellationsProc + ) + ) + + let pending = engine.requestBlock(address) + await steps.wait() + + # add blocks presence + peerCtx.blocks = blocks.mapIt( + (it.address, Presence(address: it.address, have: true, price: UInt256.example)) + ).toTable + + steps.clear() + await steps.wait() + + await engine.blocksDeliveryHandler( + peerId, @[BlockDelivery(blk: blocks[0], address: address)] + ) + check (await pending).tryGet() == blocks[0] + + test "Should cancel block request": + var + address = BlockAddress.init(blocks[0].cid) + done = newFuture[void]() + + proc sendWantList( + id: PeerId, + addresses: seq[BlockAddress], + priority: int32 = 0, + cancel: bool = false, + wantType: WantType = WantType.WantHave, + full: bool = false, + sendDontHave: bool = false, + ) {.gcsafe, async.} = + done.complete() + + engine.pendingBlocks.blockRetries = 10 + engine.pendingBlocks.retryInterval = 1.seconds + engine.network = BlockExcNetwork( + request: BlockExcRequest( + sendWantList: sendWantList, sendWantCancellations: NopSendWantCancellationsProc + ) + ) + + let pending = engine.requestBlock(address) + await done.wait(100.millis) + + pending.cancel() + expect CancelledError: + discard (await pending).tryGet() + asyncchecksuite "Task Handler": var rng: Rng diff --git a/tests/codex/blockexchange/testpendingblocks.nim b/tests/codex/blockexchange/testpendingblocks.nim index 45b065c0..29410db7 100644 --- a/tests/codex/blockexchange/testpendingblocks.nim +++ b/tests/codex/blockexchange/testpendingblocks.nim @@ -28,7 +28,10 @@ checksuite "Pending Blocks": check blk.cid in pendingBlocks pendingBlocks.resolve(@[blk].mapIt(BlockDelivery(blk: it, address: it.address))) - check (await handle) == blk + await sleepAsync(0.millis) + # trigger the event loop, otherwise the block finishes before poll runs + let resolved = await handle + check resolved == blk check blk.cid notin pendingBlocks test "Should cancel want handle": @@ -41,20 +44,6 @@ checksuite "Pending Blocks": await handle.cancelAndWait() check blk.cid notin pendingBlocks - test "Should expire want handle": - let - pendingBlocks = PendingBlocksManager.new() - blk = bt.Block.new("Hello".toBytes).tryGet - handle = pendingBlocks.getWantHandle(blk.cid, 1.millis) - - check blk.cid in pendingBlocks - - await sleepAsync(10.millis) - expect AsyncTimeoutError: - discard await handle - - check blk.cid notin pendingBlocks - test "Should get wants list": let pendingBlocks = PendingBlocksManager.new() @@ -79,3 +68,19 @@ checksuite "Pending Blocks": check: (await allFinished(wantHandles)).mapIt($it.read.cid).sorted(cmp[string]) == (await allFinished(handles)).mapIt($it.read.cid).sorted(cmp[string]) + + test "Should handle retry counters": + let + pendingBlocks = PendingBlocksManager.new(3) + blk = bt.Block.new("Hello".toBytes).tryGet + address = BlockAddress.init(blk.cid) + handle = pendingBlocks.getWantHandle(blk.cid) + + check pendingBlocks.retries(address) == 3 + pendingBlocks.decRetries(address) + check pendingBlocks.retries(address) == 2 + pendingBlocks.decRetries(address) + check pendingBlocks.retries(address) == 1 + pendingBlocks.decRetries(address) + check pendingBlocks.retries(address) == 0 + check pendingBlocks.retriesExhausted(address) diff --git a/tests/codex/node/helpers.nim b/tests/codex/node/helpers.nim index 2d1a87dc..a28a1f37 100644 --- a/tests/codex/node/helpers.nim +++ b/tests/codex/node/helpers.nim @@ -123,7 +123,7 @@ template setupAndTearDown*() {.dirty.} = ) teardown: - close(file) + file.close() await node.stop() await metaTmp.destroyDb() await repoTmp.destroyDb() diff --git a/tests/codex/node/testnode.nim b/tests/codex/node/testnode.nim index 0700203d..511badef 100644 --- a/tests/codex/node/testnode.nim +++ b/tests/codex/node/testnode.nim @@ -64,21 +64,6 @@ asyncchecksuite "Test Node - Basic": check: fetched == manifest - test "Should not lookup non-existing blocks twice": - # https://github.com/codex-storage/nim-codex/issues/699 - let - cstore = CountingStore.new(engine, localStore) - node = CodexNodeRef.new(switch, cstore, engine, blockDiscovery, Taskpool.new()) - missingCid = - Cid.init("zDvZRwzmCvtiyubW9AecnxgLnXK8GrBvpQJBDzToxmzDN6Nrc2CZ").get() - - engine.blockFetchTimeout = timer.milliseconds(100) - - discard await node.retrieve(missingCid, local = false) - - let lookupCount = cstore.lookups.getOrDefault(missingCid) - check lookupCount == 1 - test "Block Batching": let manifest = await storeDataGetManifest(localStore, chunker) @@ -93,17 +78,15 @@ asyncchecksuite "Test Node - Basic": ) ).tryGet() - test "Store and retrieve Data Stream": + test "Should store Data Stream": let stream = BufferStream.new() storeFut = node.store(stream) - oddChunkSize = math.trunc(DefaultBlockSize.float / 3.14).NBytes # Let's check that node.store can correctly rechunk these odd chunks - oddChunker = FileChunker.new(file = file, chunkSize = oddChunkSize, pad = false) - # TODO: doesn't work with pad=tue + oddChunker = FileChunker.new(file = file, chunkSize = 1024.NBytes, pad = false) + # don't pad, so `node.store` gets the correct size var original: seq[byte] - try: while (let chunk = await oddChunker.getBytes(); chunk.len > 0): original &= chunk @@ -116,13 +99,35 @@ asyncchecksuite "Test Node - Basic": manifestCid = (await storeFut).tryGet() manifestBlock = (await localStore.getBlock(manifestCid)).tryGet() localManifest = Manifest.decode(manifestBlock).tryGet() - data = await (await node.retrieve(manifestCid)).drain() + var data: seq[byte] + for i in 0 ..< localManifest.blocksCount: + let blk = (await localStore.getBlock(localManifest.treeCid, i)).tryGet() + data &= blk.data + + data.setLen(localManifest.datasetSize.int) # truncate data to original size check: - data.len == localManifest.datasetSize.int data.len == original.len sha256.digest(data) == sha256.digest(original) + test "Should retrieve a Data Stream": + let + manifest = await storeDataGetManifest(localStore, chunker) + manifestBlk = + bt.Block.new(data = manifest.encode().tryGet, codec = ManifestCodec).tryGet() + + (await localStore.putBlock(manifestBlk)).tryGet() + let data = await ((await node.retrieve(manifestBlk.cid)).tryGet()).drain() + + var storedData: seq[byte] + for i in 0 ..< manifest.blocksCount: + let blk = (await localStore.getBlock(manifest.treeCid, i)).tryGet() + storedData &= blk.data + + storedData.setLen(manifest.datasetSize.int) # truncate data to original size + check: + storedData == data + test "Retrieve One Block": let testString = "Block 1" diff --git a/tests/integration/testrestapi.nim b/tests/integration/testrestapi.nim index a748c98e..2311dc22 100644 --- a/tests/integration/testrestapi.nim +++ b/tests/integration/testrestapi.nim @@ -37,7 +37,7 @@ twonodessuite "REST API": let space = client1.space().tryGet() check: space.totalBlocks == 2 - space.quotaMaxBytes == 8589934592.NBytes + space.quotaMaxBytes == 21474836480.NBytes space.quotaUsedBytes == 65592.NBytes space.quotaReservedBytes == 12.NBytes diff --git a/vendor/nim-serde b/vendor/nim-serde index 69a7a011..c82e85c6 160000 --- a/vendor/nim-serde +++ b/vendor/nim-serde @@ -1 +1 @@ -Subproject commit 69a7a0111addaa4aad885dd4bd7b5ee4684a06de +Subproject commit c82e85c62436218592fbe876df5ac389ef8b964b From 16dce0fc437dfc655bbf18991e205fa8777fe92a Mon Sep 17 00:00:00 2001 From: Slava <20563034+veaceslavdoina@users.noreply.github.com> Date: Tue, 25 Feb 2025 11:19:29 +0200 Subject: [PATCH 19/40] chore: update testnet marketplace address (#1127) https://github.com/codex-storage/nim-codex/issues/1126 --- codex/contracts/deployment.nim | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/codex/contracts/deployment.nim b/codex/contracts/deployment.nim index c4e59b80..cc125d18 100644 --- a/codex/contracts/deployment.nim +++ b/codex/contracts/deployment.nim @@ -18,9 +18,9 @@ const knownAddresses = { # Taiko Alpha-3 Testnet "167005": {"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")}.toTable, - # Codex Testnet - Nov 25 2024 18:41:29 PM (+00:00 UTC) + # Codex Testnet - Feb 25 2025 07:24:19 AM (+00:00 UTC) "789987": - {"Marketplace": Address.init("0xAB03b6a58C5262f530D54146DA2a552B1C0F7648")}.toTable, + {"Marketplace": Address.init("0xfFaF679D5Cbfdd5Dbc9Be61C616ed115DFb597ed")}.toTable, }.toTable proc getKnownAddress(T: type, chainId: UInt256): ?Address = From fab5e16afda7d9ffd22369cc04603ad9492c8b6b Mon Sep 17 00:00:00 2001 From: Ben Bierens <39762930+benbierens@users.noreply.github.com> Date: Thu, 27 Feb 2025 12:29:27 +0100 Subject: [PATCH 20/40] Missing nullability causes json-serialize failure in some generated clients. (#1129) --- openapi.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/openapi.yaml b/openapi.yaml index 70da398b..53a908a3 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -325,6 +325,7 @@ components: - unknown error: type: string + nullable: true description: If Request failed, then here is presented the error message request: $ref: "#/components/schemas/StorageRequest" From 7065718e0912004492c75cbed1036c7c8ec8939e Mon Sep 17 00:00:00 2001 From: Arnaud Date: Thu, 27 Feb 2025 17:58:23 +0100 Subject: [PATCH 21/40] feat(marketplace): indicate that slot is being repaired when trying to download (#1083) * Indicate that slot is being repaired when trying to download * Fix tests * Apply nph * Calculate the repair collateral when adding the item into the queue * Add slotCollateral calculation with getRequest cache and remove populationItem function * Update with pricePerByte * Simplify StorageAsk parameter * Minor fixes * Move cache request to another PR * Rename SlotQueueItem collateral and required in init * Use override func to optimise calls when the slot state is known * Remove unused code * Cosmetic change * Use raiseMarketError helper * Add exceptions to async pragma * Cosmetic change * Use raiseMarketError helper * Let slotCollateral determines the slot sate * Use configSync to avoid async pragma in onStorageRequested * Add loadConfig function * Add CatchableError to async pragma * Add missing pragma raises errors * Move loadConfig * Avoid swallow CancelledError * Avoid swallowing CancelledError * Avoid swallowing CancelledError * Update error messages * Except MarketError instead of CatchableError * Fix merge issue * Log fatal when configuration cannot be loaded * Propagate MarketError in slotCollateral * Remove useless configSync * Use result with explicit error * Fix syntax --------- Signed-off-by: Arnaud --- codex/codex.nim | 4 + codex/contracts/market.nim | 126 +++++++++--- codex/market.nim | 41 +++- codex/node.nim | 17 +- codex/sales.nim | 92 ++++++--- codex/sales/salescontext.nim | 2 +- codex/sales/slotqueue.nim | 55 ++--- codex/sales/states/downloading.nim | 5 +- codex/sales/states/filling.nim | 17 +- tests/codex/helpers/mockmarket.nim | 65 +++++- tests/codex/helpers/mockslotqueueitem.nim | 4 +- tests/codex/node/testcontracts.nim | 2 +- tests/codex/sales/testsales.nim | 41 ++-- tests/codex/sales/testslotqueue.nim | 235 ++++++++++++++-------- tests/contracts/testMarket.nim | 31 +++ tests/examples.nim | 4 +- 16 files changed, 521 insertions(+), 220 deletions(-) diff --git a/codex/codex.nim b/codex/codex.nim index b8905205..8a03510c 100644 --- a/codex/codex.nim +++ b/codex/codex.nim @@ -134,6 +134,10 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} = if config.simulateProofFailures > 0: warn "Proof failure simulation is not enabled for this build! Configuration ignored" + if error =? (await market.loadConfig()).errorOption: + fatal "Cannot load market configuration", error = error.msg + quit QuitFailure + let purchasing = Purchasing.new(market, clock) let sales = Sales.new(market, clock, repo, proofFailures) client = some ClientInteractions.new(clock, purchasing) diff --git a/codex/contracts/market.nim b/codex/contracts/market.nim index 3c016a59..9079ac8a 100644 --- a/codex/contracts/market.nim +++ b/codex/contracts/market.nim @@ -55,11 +55,17 @@ template convertEthersError(body) = except EthersError as error: raiseMarketError(error.msgDetail) -proc config(market: OnChainMarket): Future[MarketplaceConfig] {.async.} = +proc config( + market: OnChainMarket +): Future[MarketplaceConfig] {.async: (raises: [CancelledError, MarketError]).} = without resolvedConfig =? market.configuration: - let fetchedConfig = await market.contract.configuration() - market.configuration = some fetchedConfig - return fetchedConfig + if err =? (await market.loadConfig()).errorOption: + raiseMarketError(err.msg) + + without config =? market.configuration: + raiseMarketError("Failed to access to config from the Marketplace contract") + + return config return resolvedConfig @@ -70,7 +76,26 @@ proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} = let token = Erc20Token.new(tokenAddress, market.signer) discard await token.increaseAllowance(market.contract.address(), amount).confirm(1) -method getZkeyHash*(market: OnChainMarket): Future[?string] {.async.} = +method loadConfig*( + market: OnChainMarket +): Future[?!void] {.async: (raises: [CancelledError]).} = + try: + without config =? market.configuration: + let fetchedConfig = await market.contract.configuration() + + market.configuration = some fetchedConfig + + return success() + except AsyncLockError, EthersError: + let err = getCurrentException() + return failure newException( + MarketError, + "Failed to fetch the config from the Marketplace contract: " & err.msg, + ) + +method getZkeyHash*( + market: OnChainMarket +): Future[?string] {.async: (raises: [CancelledError, MarketError]).} = let config = await market.config() return some config.proofs.zkeyHash @@ -78,18 +103,24 @@ method getSigner*(market: OnChainMarket): Future[Address] {.async.} = convertEthersError: return await market.signer.getAddress() -method periodicity*(market: OnChainMarket): Future[Periodicity] {.async.} = +method periodicity*( + market: OnChainMarket +): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} = convertEthersError: let config = await market.config() let period = config.proofs.period return Periodicity(seconds: period) -method proofTimeout*(market: OnChainMarket): Future[uint64] {.async.} = +method proofTimeout*( + market: OnChainMarket +): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} = convertEthersError: let config = await market.config() return config.proofs.timeout -method repairRewardPercentage*(market: OnChainMarket): Future[uint8] {.async.} = +method repairRewardPercentage*( + market: OnChainMarket +): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} = convertEthersError: let config = await market.config() return config.collateral.repairRewardPercentage @@ -99,7 +130,9 @@ method requestDurationLimit*(market: OnChainMarket): Future[uint64] {.async.} = let config = await market.config() return config.requestDurationLimit -method proofDowntime*(market: OnChainMarket): Future[uint8] {.async.} = +method proofDowntime*( + market: OnChainMarket +): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} = convertEthersError: let config = await market.config() return config.proofs.downtime @@ -128,19 +161,22 @@ method requestStorage(market: OnChainMarket, request: StorageRequest) {.async.} method getRequest*( market: OnChainMarket, id: RequestId -): Future[?StorageRequest] {.async.} = - let key = $id +): Future[?StorageRequest] {.async: (raises: [CancelledError]).} = + try: + let key = $id - if market.requestCache.contains(key): - return some market.requestCache[key] + if key in market.requestCache: + return some market.requestCache[key] - convertEthersError: - try: - let request = await market.contract.getRequest(id) - market.requestCache[key] = request - return some request - except Marketplace_UnknownRequest: - return none StorageRequest + let request = await market.contract.getRequest(id) + market.requestCache[key] = request + return some request + except Marketplace_UnknownRequest, KeyError: + warn "Cannot retrieve the request", error = getCurrentExceptionMsg() + return none StorageRequest + except EthersError, AsyncLockError: + error "Cannot retrieve the request", error = getCurrentExceptionMsg() + return none StorageRequest method requestState*( market: OnChainMarket, requestId: RequestId @@ -152,10 +188,17 @@ method requestState*( except Marketplace_UnknownRequest: return none RequestState -method slotState*(market: OnChainMarket, slotId: SlotId): Future[SlotState] {.async.} = +method slotState*( + market: OnChainMarket, slotId: SlotId +): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} = convertEthersError: - let overrides = CallOverrides(blockTag: some BlockTag.pending) - return await market.contract.slotState(slotId, overrides) + try: + let overrides = CallOverrides(blockTag: some BlockTag.pending) + return await market.contract.slotState(slotId, overrides) + except AsyncLockError as err: + raiseMarketError( + "Failed to fetch the slot state from the Marketplace contract: " & err.msg + ) method getRequestEnd*( market: OnChainMarket, id: RequestId @@ -507,3 +550,40 @@ method queryPastStorageRequestedEvents*( let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo) return await market.queryPastStorageRequestedEvents(fromBlock) + +method slotCollateral*( + market: OnChainMarket, requestId: RequestId, slotIndex: uint64 +): Future[?!UInt256] {.async: (raises: [CancelledError]).} = + let slotid = slotId(requestId, slotIndex) + + try: + let slotState = await market.slotState(slotid) + + without request =? await market.getRequest(requestId): + return failure newException( + MarketError, "Failure calculating the slotCollateral, cannot get the request" + ) + + return market.slotCollateral(request.ask.collateralPerSlot, slotState) + except MarketError as error: + error "Error when trying to calculate the slotCollateral", error = error.msg + return failure error + +method slotCollateral*( + market: OnChainMarket, collateralPerSlot: UInt256, slotState: SlotState +): ?!UInt256 {.raises: [].} = + if slotState == SlotState.Repair: + without repairRewardPercentage =? + market.configuration .? collateral .? repairRewardPercentage: + return failure newException( + MarketError, + "Failure calculating the slotCollateral, cannot get the reward percentage", + ) + + return success ( + collateralPerSlot - (collateralPerSlot * repairRewardPercentage.u256).div( + 100.u256 + ) + ) + + return success(collateralPerSlot) diff --git a/codex/market.nim b/codex/market.nim index 5417c8e1..c5177aeb 100644 --- a/codex/market.nim +++ b/codex/market.nim @@ -62,25 +62,40 @@ type ProofSubmitted* = object of MarketplaceEvent id*: SlotId -method getZkeyHash*(market: Market): Future[?string] {.base, async.} = +method loadConfig*( + market: Market +): Future[?!void] {.base, async: (raises: [CancelledError]).} = + raiseAssert("not implemented") + +method getZkeyHash*( + market: Market +): Future[?string] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") method getSigner*(market: Market): Future[Address] {.base, async.} = raiseAssert("not implemented") -method periodicity*(market: Market): Future[Periodicity] {.base, async.} = +method periodicity*( + market: Market +): Future[Periodicity] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") -method proofTimeout*(market: Market): Future[uint64] {.base, async.} = +method proofTimeout*( + market: Market +): Future[uint64] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") -method repairRewardPercentage*(market: Market): Future[uint8] {.base, async.} = +method repairRewardPercentage*( + market: Market +): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") method requestDurationLimit*(market: Market): Future[uint64] {.base, async.} = raiseAssert("not implemented") -method proofDowntime*(market: Market): Future[uint8] {.base, async.} = +method proofDowntime*( + market: Market +): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") method getPointer*(market: Market, slotId: SlotId): Future[uint8] {.base, async.} = @@ -102,7 +117,7 @@ method mySlots*(market: Market): Future[seq[SlotId]] {.base, async.} = method getRequest*( market: Market, id: RequestId -): Future[?StorageRequest] {.base, async.} = +): Future[?StorageRequest] {.base, async: (raises: [CancelledError]).} = raiseAssert("not implemented") method requestState*( @@ -110,7 +125,9 @@ method requestState*( ): Future[?RequestState] {.base, async.} = raiseAssert("not implemented") -method slotState*(market: Market, slotId: SlotId): Future[SlotState] {.base, async.} = +method slotState*( + market: Market, slotId: SlotId +): Future[SlotState] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") method getRequestEnd*( @@ -270,3 +287,13 @@ method queryPastStorageRequestedEvents*( market: Market, blocksAgo: int ): Future[seq[StorageRequested]] {.base, async.} = raiseAssert("not implemented") + +method slotCollateral*( + market: Market, requestId: RequestId, slotIndex: uint64 +): Future[?!UInt256] {.base, async: (raises: [CancelledError]).} = + raiseAssert("not implemented") + +method slotCollateral*( + market: Market, collateralPerSlot: UInt256, slotState: SlotState +): ?!UInt256 {.base, gcsafe, raises: [].} = + raiseAssert("not implemented") diff --git a/codex/node.nim b/codex/node.nim index b0f66c90..b248e6df 100644 --- a/codex/node.nim +++ b/codex/node.nim @@ -591,7 +591,11 @@ proc requestStorage*( success purchase.id proc onStore( - self: CodexNodeRef, request: StorageRequest, slotIdx: uint64, blocksCb: BlocksCb + self: CodexNodeRef, + request: StorageRequest, + slotIdx: uint64, + blocksCb: BlocksCb, + isRepairing: bool = false, ): Future[?!void] {.async.} = ## store data in local storage ## @@ -604,6 +608,10 @@ proc onStore( trace "Received a request to store a slot" + # TODO: Use the isRepairing to manage the slot download. + # If isRepairing is true, the slot has to be repaired before + # being downloaded. + without manifest =? (await self.fetchManifest(cid)), err: trace "Unable to fetch manifest for cid", cid, err = err.msg return failure(err) @@ -745,9 +753,12 @@ proc start*(self: CodexNodeRef) {.async.} = if hostContracts =? self.contracts.host: hostContracts.sales.onStore = proc( - request: StorageRequest, slot: uint64, onBatch: BatchProc + request: StorageRequest, + slot: uint64, + onBatch: BatchProc, + isRepairing: bool = false, ): Future[?!void] = - self.onStore(request, slot, onBatch) + self.onStore(request, slot, onBatch, isRepairing) hostContracts.sales.onExpiryUpdate = proc( rootCid: Cid, expiry: SecondsSince1970 diff --git a/codex/sales.nim b/codex/sales.nim index 91d882b8..af594a9a 100644 --- a/codex/sales.nim +++ b/codex/sales.nim @@ -157,13 +157,28 @@ proc cleanUp( # Re-add items back into the queue to prevent small availabilities from # draining the queue. Seen items will be ordered last. if reprocessSlot and request =? data.request: - let queue = sales.context.slotQueue - var seenItem = SlotQueueItem.init( - data.requestId, data.slotIndex.uint16, data.ask, request.expiry, seen = true - ) - trace "pushing ignored item to queue, marked as seen" - if err =? queue.push(seenItem).errorOption: - error "failed to readd slot to queue", errorType = $(type err), error = err.msg + try: + without collateral =? + await sales.context.market.slotCollateral(data.requestId, data.slotIndex), err: + error "Failed to re-add item back to the slot queue: unable to calculate collateral", + error = err.msg + return + + let queue = sales.context.slotQueue + var seenItem = SlotQueueItem.init( + data.requestId, + data.slotIndex.uint16, + data.ask, + request.expiry, + seen = true, + collateral = collateral, + ) + trace "pushing ignored item to queue, marked as seen" + if err =? queue.push(seenItem).errorOption: + error "failed to readd slot to queue", errorType = $(type err), error = err.msg + except MarketError as e: + error "Failed to re-add item back to the slot queue.", error = e.msg + return await sales.remove(agent) @@ -283,7 +298,7 @@ proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} = proc onStorageRequested( sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: uint64 -) = +) {.raises: [].} = logScope: topics = "marketplace sales onStorageRequested" requestId @@ -294,7 +309,14 @@ proc onStorageRequested( trace "storage requested, adding slots to queue" - without items =? SlotQueueItem.init(requestId, ask, expiry).catch, err: + let market = sales.context.market + + without collateral =? market.slotCollateral(ask.collateralPerSlot, SlotState.Free), + err: + error "Request failure, unable to calculate collateral", error = err.msg + return + + without items =? SlotQueueItem.init(requestId, ask, expiry, collateral).catch, err: if err of SlotsOutOfRangeError: warn "Too many slots, cannot add to queue" else: @@ -319,35 +341,45 @@ proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: uint64) = trace "slot freed, adding to queue" - proc addSlotToQueue() {.async: (raises: []).} = + proc addSlotToQueue() {.async: (raises: [CancelledError]).} = let context = sales.context let market = context.market let queue = context.slotQueue + without request =? (await market.getRequest(requestId)), err: + error "unknown request in contract", error = err.msgDetail + return + + # Take the repairing state into consideration to calculate the collateral. + # This is particularly needed because it will affect the priority in the queue + # and we want to give the user the ability to tweak the parameters. + # Adding the repairing state directly in the queue priority calculation + # would not allow this flexibility. + without collateral =? + market.slotCollateral(request.ask.collateralPerSlot, SlotState.Repair), err: + error "Failed to add freed slot to queue: unable to calculate collateral", + error = err.msg + return + if slotIndex > uint16.high.uint64: error "Cannot cast slot index to uint16, value = ", slotIndex return - # first attempt to populate request using existing metadata in queue - without var found =? queue.populateItem(requestId, slotIndex.uint16): - trace "no existing request metadata, getting request info from contract" - # if there's no existing slot for that request, retrieve the request - # from the contract. - try: - without request =? await market.getRequest(requestId): - error "unknown request in contract" - return + without slotQueueItem =? + SlotQueueItem.init(request, slotIndex.uint16, collateral = collateral).catch, err: + warn "Too many slots, cannot add to queue", error = err.msgDetail + return - found = SlotQueueItem.init(request, slotIndex.uint16) - except CancelledError: - discard # do not propagate as addSlotToQueue was asyncSpawned - except CatchableError as e: - error "failed to get request from contract and add slots to queue", - error = e.msgDetail - - if err =? queue.push(found).errorOption: - error "failed to push slot items to queue", error = err.msgDetail + if err =? queue.push(slotQueueItem).errorOption: + if err of SlotQueueItemExistsError: + error "Failed to push item to queue becaue it already exists", + error = err.msgDetail + elif err of QueueNotRunningError: + warn "Failed to push item to queue becaue queue is not running", + error = err.msgDetail + # We could get rid of this by adding the storage ask in the SlotFreed event, + # so we would not need to call getRequest to get the collateralPerSlot. let fut = addSlotToQueue() sales.trackedFutures.track(fut) asyncSpawn fut @@ -356,7 +388,9 @@ proc subscribeRequested(sales: Sales) {.async.} = let context = sales.context let market = context.market - proc onStorageRequested(requestId: RequestId, ask: StorageAsk, expiry: uint64) = + proc onStorageRequested( + requestId: RequestId, ask: StorageAsk, expiry: uint64 + ) {.raises: [].} = sales.onStorageRequested(requestId, ask, expiry) try: diff --git a/codex/sales/salescontext.nim b/codex/sales/salescontext.nim index 6e6a3568..af940a4b 100644 --- a/codex/sales/salescontext.nim +++ b/codex/sales/salescontext.nim @@ -26,7 +26,7 @@ type BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, raises: [].} OnStore* = proc( - request: StorageRequest, slot: uint64, blocksCb: BlocksCb + request: StorageRequest, slot: uint64, blocksCb: BlocksCb, isRepairing: bool ): Future[?!void] {.gcsafe, upraises: [].} OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {. gcsafe, upraises: [] diff --git a/codex/sales/slotqueue.nim b/codex/sales/slotqueue.nim index a032d46b..fa57a983 100644 --- a/codex/sales/slotqueue.nim +++ b/codex/sales/slotqueue.nim @@ -34,7 +34,7 @@ type slotSize: uint64 duration: uint64 pricePerBytePerSecond: UInt256 - collateralPerByte: UInt256 + collateral: UInt256 # Collateral computed expiry: uint64 seen: bool @@ -76,9 +76,6 @@ proc profitability(item: SlotQueueItem): UInt256 = slotSize: item.slotSize, ).pricePerSlot -proc collateralPerSlot(item: SlotQueueItem): UInt256 = - StorageAsk(collateralPerByte: item.collateralPerByte, slotSize: item.slotSize).collateralPerSlot - proc `<`*(a, b: SlotQueueItem): bool = # for A to have a higher priority than B (in a min queue), A must be less than # B. @@ -95,8 +92,8 @@ proc `<`*(a, b: SlotQueueItem): bool = scoreA.addIf(a.profitability > b.profitability, 3) scoreB.addIf(a.profitability < b.profitability, 3) - scoreA.addIf(a.collateralPerSlot < b.collateralPerSlot, 2) - scoreB.addIf(a.collateralPerSlot > b.collateralPerSlot, 2) + scoreA.addIf(a.collateral < b.collateral, 2) + scoreB.addIf(a.collateral > b.collateral, 2) scoreA.addIf(a.expiry > b.expiry, 1) scoreB.addIf(a.expiry < b.expiry, 1) @@ -137,6 +134,7 @@ proc init*( slotIndex: uint16, ask: StorageAsk, expiry: uint64, + collateral: UInt256, seen = false, ): SlotQueueItem = SlotQueueItem( @@ -145,25 +143,32 @@ proc init*( slotSize: ask.slotSize, duration: ask.duration, pricePerBytePerSecond: ask.pricePerBytePerSecond, - collateralPerByte: ask.collateralPerByte, + collateral: collateral, expiry: expiry, seen: seen, ) proc init*( - _: type SlotQueueItem, request: StorageRequest, slotIndex: uint16 + _: type SlotQueueItem, + request: StorageRequest, + slotIndex: uint16, + collateral: UInt256, ): SlotQueueItem = - SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry) + SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry, collateral) proc init*( - _: type SlotQueueItem, requestId: RequestId, ask: StorageAsk, expiry: uint64 -): seq[SlotQueueItem] = + _: type SlotQueueItem, + requestId: RequestId, + ask: StorageAsk, + expiry: uint64, + collateral: UInt256, +): seq[SlotQueueItem] {.raises: [SlotsOutOfRangeError].} = if not ask.slots.inRange: raise newException(SlotsOutOfRangeError, "Too many slots") var i = 0'u16 proc initSlotQueueItem(): SlotQueueItem = - let item = SlotQueueItem.init(requestId, i, ask, expiry) + let item = SlotQueueItem.init(requestId, i, ask, expiry, collateral) inc i return item @@ -171,8 +176,10 @@ proc init*( Rng.instance.shuffle(items) return items -proc init*(_: type SlotQueueItem, request: StorageRequest): seq[SlotQueueItem] = - return SlotQueueItem.init(request.id, request.ask, request.expiry) +proc init*( + _: type SlotQueueItem, request: StorageRequest, collateral: UInt256 +): seq[SlotQueueItem] = + return SlotQueueItem.init(request.id, request.ask, request.expiry, collateral) proc inRange*(val: SomeUnsignedInt): bool = val.uint16 in SlotQueueSize.low .. SlotQueueSize.high @@ -234,25 +241,7 @@ proc unpause*(self: SlotQueue) = # set unpaused flag to true -- unblocks coroutines waiting on unpaused.wait() self.unpaused.fire() -proc populateItem*( - self: SlotQueue, requestId: RequestId, slotIndex: uint16 -): ?SlotQueueItem = - trace "populate item, items in queue", len = self.queue.len - for item in self.queue.items: - trace "populate item search", itemRequestId = item.requestId, requestId - if item.requestId == requestId: - return some SlotQueueItem( - requestId: requestId, - slotIndex: slotIndex, - slotSize: item.slotSize, - duration: item.duration, - pricePerBytePerSecond: item.pricePerBytePerSecond, - collateralPerByte: item.collateralPerByte, - expiry: item.expiry, - ) - return none SlotQueueItem - -proc push*(self: SlotQueue, item: SlotQueueItem): ?!void = +proc push*(self: SlotQueue, item: SlotQueueItem): ?!void {.raises: [].} = logScope: requestId = item.requestId slotIndex = item.slotIndex diff --git a/codex/sales/states/downloading.nim b/codex/sales/states/downloading.nim index 39137545..7cf304d3 100644 --- a/codex/sales/states/downloading.nim +++ b/codex/sales/states/downloading.nim @@ -67,8 +67,11 @@ method run*( return await reservations.release(reservation.id, reservation.availabilityId, bytes) try: + let slotId = slotId(request.id, data.slotIndex) + let isRepairing = (await context.market.slotState(slotId)) == SlotState.Repair + trace "Starting download" - if err =? (await onStore(request, data.slotIndex, onBlocks)).errorOption: + if err =? (await onStore(request, data.slotIndex, onBlocks, isRepairing)).errorOption: return some State(SaleErrored(error: err, reprocessSlot: false)) trace "Download complete" diff --git a/codex/sales/states/filling.nim b/codex/sales/states/filling.nim index 0c20a64e..03e2ef2b 100644 --- a/codex/sales/states/filling.nim +++ b/codex/sales/states/filling.nim @@ -38,18 +38,11 @@ method run*( slotIndex = data.slotIndex try: - let slotState = await market.slotState(slotId(data.requestId, data.slotIndex)) - let requestedCollateral = request.ask.collateralPerSlot - var collateral: UInt256 - - if slotState == SlotState.Repair: - # When repairing the node gets "discount" on the collateral that it needs to - let repairRewardPercentage = (await market.repairRewardPercentage).u256 - collateral = - requestedCollateral - - ((requestedCollateral * repairRewardPercentage)).div(100.u256) - else: - collateral = requestedCollateral + without collateral =? await market.slotCollateral(data.requestId, data.slotIndex), + err: + error "Failure attempting to fill slot: unable to calculate collateral", + error = err.msg + return debug "Filling slot" try: diff --git a/tests/codex/helpers/mockmarket.nim b/tests/codex/helpers/mockmarket.nim index 48b20f28..16806cb2 100644 --- a/tests/codex/helpers/mockmarket.nim +++ b/tests/codex/helpers/mockmarket.nim @@ -138,22 +138,35 @@ proc new*(_: type MockMarket, clock: ?Clock = Clock.none): MockMarket = signer: Address.example, config: config, canReserveSlot: true, clock: clock ) +method loadConfig*( + market: MockMarket +): Future[?!void] {.async: (raises: [CancelledError]).} = + discard + method getSigner*(market: MockMarket): Future[Address] {.async.} = return market.signer -method periodicity*(mock: MockMarket): Future[Periodicity] {.async.} = +method periodicity*( + mock: MockMarket +): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} = return Periodicity(seconds: mock.config.proofs.period) -method proofTimeout*(market: MockMarket): Future[uint64] {.async.} = +method proofTimeout*( + market: MockMarket +): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} = return market.config.proofs.timeout method requestDurationLimit*(market: MockMarket): Future[uint64] {.async.} = return market.config.requestDurationLimit -method proofDowntime*(market: MockMarket): Future[uint8] {.async.} = +method proofDowntime*( + market: MockMarket +): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} = return market.config.proofs.downtime -method repairRewardPercentage*(market: MockMarket): Future[uint8] {.async.} = +method repairRewardPercentage*( + market: MockMarket +): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} = return market.config.collateral.repairRewardPercentage method getPointer*(market: MockMarket, slotId: SlotId): Future[uint8] {.async.} = @@ -173,7 +186,7 @@ method mySlots*(market: MockMarket): Future[seq[SlotId]] {.async.} = method getRequest*( market: MockMarket, id: RequestId -): Future[?StorageRequest] {.async.} = +): Future[?StorageRequest] {.async: (raises: [CancelledError]).} = for request in market.requested: if request.id == id: return some request @@ -191,10 +204,16 @@ method requestState*( ): Future[?RequestState] {.async.} = return market.requestState .? [requestId] -method slotState*(market: MockMarket, slotId: SlotId): Future[SlotState] {.async.} = - if not market.slotState.hasKey(slotId): +method slotState*( + market: MockMarket, slotId: SlotId +): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} = + if slotId notin market.slotState: return SlotState.Free - return market.slotState[slotId] + + try: + return market.slotState[slotId] + except KeyError as e: + raiseAssert "SlotId not found in known slots (MockMarket.slotState)" method getRequestEnd*( market: MockMarket, id: RequestId @@ -534,3 +553,33 @@ method unsubscribe*(subscription: ProofSubmittedSubscription) {.async.} = method unsubscribe*(subscription: SlotReservationsFullSubscription) {.async.} = subscription.market.subscriptions.onSlotReservationsFull.keepItIf(it != subscription) + +method slotCollateral*( + market: MockMarket, requestId: RequestId, slotIndex: uint64 +): Future[?!UInt256] {.async: (raises: [CancelledError]).} = + let slotid = slotId(requestId, slotIndex) + + try: + let state = await slotState(market, slotid) + + without request =? await market.getRequest(requestId): + return failure newException( + MarketError, "Failure calculating the slotCollateral, cannot get the request" + ) + + return market.slotCollateral(request.ask.collateralPerSlot, state) + except MarketError as error: + error "Error when trying to calculate the slotCollateral", error = error.msg + return failure error + +method slotCollateral*( + market: MockMarket, collateralPerSlot: UInt256, slotState: SlotState +): ?!UInt256 {.raises: [].} = + if slotState == SlotState.Repair: + let repairRewardPercentage = market.config.collateral.repairRewardPercentage.u256 + + return success ( + collateralPerSlot - (collateralPerSlot * repairRewardPercentage).div(100.u256) + ) + + return success collateralPerSlot diff --git a/tests/codex/helpers/mockslotqueueitem.nim b/tests/codex/helpers/mockslotqueueitem.nim index 7a1505ec..8657850f 100644 --- a/tests/codex/helpers/mockslotqueueitem.nim +++ b/tests/codex/helpers/mockslotqueueitem.nim @@ -7,7 +7,7 @@ type MockSlotQueueItem* = object slotSize*: uint64 duration*: uint64 pricePerBytePerSecond*: UInt256 - collateralPerByte*: UInt256 + collateral*: UInt256 expiry*: uint64 seen*: bool @@ -19,8 +19,8 @@ proc toSlotQueueItem*(item: MockSlotQueueItem): SlotQueueItem = slotSize: item.slotSize, duration: item.duration, pricePerBytePerSecond: item.pricePerBytePerSecond, - collateralPerByte: item.collateralPerByte, ), expiry = item.expiry, seen = item.seen, + collateral = item.collateral, ) diff --git a/tests/codex/node/testcontracts.nim b/tests/codex/node/testcontracts.nim index 11f4f273..73dd8daf 100644 --- a/tests/codex/node/testcontracts.nim +++ b/tests/codex/node/testcontracts.nim @@ -125,7 +125,7 @@ asyncchecksuite "Test Node - Host contracts": fetchedBytes += blk.data.len.uint return success() - (await onStore(request, 1.uint64, onBlocks)).tryGet() + (await onStore(request, 1.uint64, onBlocks, isRepairing = false)).tryGet() check fetchedBytes == 12 * DefaultBlockSize.uint let indexer = verifiable.protectedStrategy.init( diff --git a/tests/codex/sales/testsales.nim b/tests/codex/sales/testsales.nim index f078cbee..e92f9607 100644 --- a/tests/codex/sales/testsales.nim +++ b/tests/codex/sales/testsales.nim @@ -62,7 +62,7 @@ asyncchecksuite "Sales - start": sales = Sales.new(market, clock, repo) reservations = sales.context.reservations sales.onStore = proc( - request: StorageRequest, slot: uint64, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = return success() @@ -181,7 +181,7 @@ asyncchecksuite "Sales": sales = Sales.new(market, clock, repo) reservations = sales.context.reservations sales.onStore = proc( - request: StorageRequest, slot: uint64, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = return success() @@ -229,7 +229,7 @@ asyncchecksuite "Sales": availability = a.get # update id proc notProcessed(itemsProcessed: seq[SlotQueueItem], request: StorageRequest): bool = - let items = SlotQueueItem.init(request) + let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot) for i in 0 ..< items.len: if itemsProcessed.contains(items[i]): return false @@ -266,7 +266,7 @@ asyncchecksuite "Sales": done.complete() createAvailability() await market.requestStorage(request) - let items = SlotQueueItem.init(request) + let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot) check eventually items.allIt(itemsProcessed.contains(it)) test "removes slots from slot queue once RequestCancelled emitted": @@ -287,13 +287,15 @@ asyncchecksuite "Sales": test "removes slot index from slot queue once SlotFilled emitted": let request1 = await addRequestToSaturatedQueue() market.emitSlotFilled(request1.id, 1.uint64) - let expected = SlotQueueItem.init(request1, 1'u16) + let expected = + SlotQueueItem.init(request1, 1'u16, collateral = request1.ask.collateralPerSlot) check always (not itemsProcessed.contains(expected)) test "removes slot index from slot queue once SlotReservationsFull emitted": let request1 = await addRequestToSaturatedQueue() market.emitSlotReservationsFull(request1.id, 1.uint64) - let expected = SlotQueueItem.init(request1, 1'u16) + let expected = + SlotQueueItem.init(request1, 1'u16, collateral = request1.ask.collateralPerSlot) check always (not itemsProcessed.contains(expected)) test "adds slot index to slot queue once SlotFreed emitted": @@ -303,14 +305,21 @@ asyncchecksuite "Sales": createAvailability() market.requested.add request # "contract" must be able to return request + market.emitSlotFreed(request.id, 2.uint64) - let expected = SlotQueueItem.init(request, 2.uint16) + without collateralPerSlot =? await market.slotCollateral(request.id, 2.uint64), + error: + fail() + + let expected = + SlotQueueItem.init(request, 2.uint16, collateral = request.ask.collateralPerSlot) + check eventually itemsProcessed.contains(expected) test "items in queue are readded (and marked seen) once ignored": await market.requestStorage(request) - let items = SlotQueueItem.init(request) + let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot) check eventually queue.len > 0 # queue starts paused, allow items to be added to the queue check eventually queue.paused @@ -331,7 +340,7 @@ asyncchecksuite "Sales": test "queue is paused once availability is insufficient to service slots in queue": createAvailability() # enough to fill a single slot await market.requestStorage(request) - let items = SlotQueueItem.init(request) + let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot) check eventually queue.len > 0 # queue starts paused, allow items to be added to the queue check eventually queue.paused @@ -348,7 +357,7 @@ asyncchecksuite "Sales": test "availability size is reduced by request slot size when fully downloaded": sales.onStore = proc( - request: StorageRequest, slot: uint64, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = let blk = bt.Block.new(@[1.byte]).get await onBatch(blk.repeat(request.ask.slotSize.int)) @@ -361,7 +370,7 @@ asyncchecksuite "Sales": test "non-downloaded bytes are returned to availability once finished": var slotIndex = 0.uint64 sales.onStore = proc( - request: StorageRequest, slot: uint64, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = slotIndex = slot let blk = bt.Block.new(@[1.byte]).get @@ -421,7 +430,7 @@ asyncchecksuite "Sales": var storingRequest: StorageRequest var storingSlot: uint64 sales.onStore = proc( - request: StorageRequest, slot: uint64, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = storingRequest = request storingSlot = slot @@ -434,7 +443,7 @@ asyncchecksuite "Sales": test "makes storage available again when data retrieval fails": let error = newException(IOError, "data retrieval failed") sales.onStore = proc( - request: StorageRequest, slot: uint64, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = return failure(error) createAvailability() @@ -503,7 +512,7 @@ asyncchecksuite "Sales": test "makes storage available again when other host fills the slot": let otherHost = Address.example sales.onStore = proc( - request: StorageRequest, slot: uint64, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = await sleepAsync(chronos.hours(1)) return success() @@ -519,7 +528,7 @@ asyncchecksuite "Sales": let origSize = availability.freeSize sales.onStore = proc( - request: StorageRequest, slot: uint64, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = await sleepAsync(chronos.hours(1)) return success() @@ -544,7 +553,7 @@ asyncchecksuite "Sales": let origSize = availability.freeSize sales.onStore = proc( - request: StorageRequest, slot: uint64, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = await sleepAsync(chronos.hours(1)) return success() diff --git a/tests/codex/sales/testslotqueue.nim b/tests/codex/sales/testslotqueue.nim index 46c35b1c..03c658be 100644 --- a/tests/codex/sales/testslotqueue.nim +++ b/tests/codex/sales/testslotqueue.nim @@ -159,8 +159,10 @@ suite "Slot queue": requestB.ask.collateralPerByte = 1.u256 requestB.expiry = 1000.uint64 - let itemA = SlotQueueItem.init(requestA, 0) - let itemB = SlotQueueItem.init(requestB, 0) + let itemA = + SlotQueueItem.init(requestA, 0, collateral = requestA.ask.collateralPerSlot) + let itemB = + SlotQueueItem.init(requestB, 0, collateral = requestB.ask.collateralPerSlot) check itemB < itemA # B higher priority than A check itemA > itemB @@ -172,7 +174,7 @@ suite "Slot queue": slotSize: 1.uint64, duration: 1.uint64, pricePerBytePerSecond: 2.u256, # profitability is higher (good) - collateralPerByte: 1.u256, + collateral: 1.u256, expiry: 1.uint64, seen: true, # seen (bad), more weight than profitability ) @@ -182,7 +184,7 @@ suite "Slot queue": slotSize: 1.uint64, duration: 1.uint64, pricePerBytePerSecond: 1.u256, # profitability is lower (bad) - collateralPerByte: 1.u256, + collateral: 1.u256, expiry: 1.uint64, seen: false, # not seen (good) ) @@ -197,7 +199,7 @@ suite "Slot queue": slotSize: 1.uint64, duration: 1.uint64, pricePerBytePerSecond: 1.u256, # reward is lower (bad) - collateralPerByte: 1.u256, # collateral is lower (good) + collateral: 1.u256, # collateral is lower (good) expiry: 1.uint64, seen: false, ) @@ -208,7 +210,7 @@ suite "Slot queue": duration: 1.uint64, pricePerBytePerSecond: 2.u256, # reward is higher (good), more weight than collateral - collateralPerByte: 2.u256, # collateral is higher (bad) + collateral: 2.u256, # collateral is higher (bad) expiry: 1.uint64, seen: false, ) @@ -223,7 +225,7 @@ suite "Slot queue": slotSize: 1.uint64, duration: 1.uint64, pricePerBytePerSecond: 1.u256, - collateralPerByte: 2.u256, # collateral is higher (bad) + collateral: 2.u256, # collateral is higher (bad) expiry: 2.uint64, # expiry is longer (good) seen: false, ) @@ -233,7 +235,7 @@ suite "Slot queue": slotSize: 1.uint64, duration: 1.uint64, pricePerBytePerSecond: 1.u256, - collateralPerByte: 1.u256, # collateral is lower (good), more weight than expiry + collateral: 1.u256, # collateral is lower (good), more weight than expiry expiry: 1.uint64, # expiry is shorter (bad) seen: false, ) @@ -248,7 +250,7 @@ suite "Slot queue": slotSize: 1.uint64, # slotSize is smaller (good) duration: 1.uint64, pricePerBytePerSecond: 1.u256, - collateralPerByte: 1.u256, + collateral: 1.u256, expiry: 1.uint64, # expiry is shorter (bad) seen: false, ) @@ -258,7 +260,7 @@ suite "Slot queue": slotSize: 2.uint64, # slotSize is larger (bad) duration: 1.uint64, pricePerBytePerSecond: 1.u256, - collateralPerByte: 1.u256, + collateral: 1.u256, expiry: 2.uint64, # expiry is longer (good), more weight than slotSize seen: false, ) @@ -273,7 +275,7 @@ suite "Slot queue": slotSize: 2.uint64, # slotSize is larger (bad) duration: 1.uint64, pricePerBytePerSecond: 1.u256, - collateralPerByte: 1.u256, + collateral: 1.u256, expiry: 1.uint64, # expiry is shorter (bad) seen: false, ) @@ -283,7 +285,7 @@ suite "Slot queue": slotSize: 1.uint64, # slotSize is smaller (good) duration: 1.uint64, pricePerBytePerSecond: 1.u256, - collateralPerByte: 1.u256, + collateral: 1.u256, expiry: 1.uint64, seen: false, ) @@ -292,11 +294,16 @@ suite "Slot queue": test "expands available all possible slot indices on init": let request = StorageRequest.example - let items = SlotQueueItem.init(request) + let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot) check items.len.uint64 == request.ask.slots var checked = 0 for slotIndex in 0'u16 ..< request.ask.slots.uint16: - check items.anyIt(it == SlotQueueItem.init(request, slotIndex)) + check items.anyIt( + it == + SlotQueueItem.init( + request, slotIndex, collateral = request.ask.collateralPerSlot + ) + ) inc checked check checked == items.len @@ -322,34 +329,17 @@ suite "Slot queue": check isOk queue.push(item3) check isOk queue.push(item4) - test "populates item with exisiting request metadata": - newSlotQueue(maxSize = 8, maxWorkers = 1, processSlotDelay = 10.millis) - let request0 = StorageRequest.example - var request1 = StorageRequest.example - request1.ask.collateralPerByte += 1.u256 - let items0 = SlotQueueItem.init(request0) - let items1 = SlotQueueItem.init(request1) - check queue.push(items0).isOk - check queue.push(items1).isOk - let populated = !queue.populateItem(request1.id, 12'u16) - check populated.requestId == request1.id - check populated.slotIndex == 12'u16 - check populated.slotSize == request1.ask.slotSize - check populated.duration == request1.ask.duration - check populated.pricePerBytePerSecond == request1.ask.pricePerBytePerSecond - check populated.collateralPerByte == request1.ask.collateralPerByte - - test "does not find exisiting request metadata": - newSlotQueue(maxSize = 2, maxWorkers = 2) - let item = SlotQueueItem.example - check queue.populateItem(item.requestId, 12'u16).isNone - test "can support uint16.high slots": var request = StorageRequest.example let maxUInt16 = uint16.high let uint64Slots = uint64(maxUInt16) request.ask.slots = uint64Slots - let items = SlotQueueItem.init(request.id, request.ask, request.expiry) + let items = SlotQueueItem.init( + request.id, + request.ask, + request.expiry, + collateral = request.ask.collateralPerSlot, + ) check items.len.uint16 == maxUInt16 test "cannot support greater than uint16.high slots": @@ -358,7 +348,12 @@ suite "Slot queue": let uint64Slots = uint64(int32Slots) request.ask.slots = uint64Slots expect SlotsOutOfRangeError: - discard SlotQueueItem.init(request.id, request.ask, request.expiry) + discard SlotQueueItem.init( + request.id, + request.ask, + request.expiry, + collateral = request.ask.collateralPerSlot, + ) test "cannot push duplicate items": newSlotQueue(maxSize = 6, maxWorkers = 1, processSlotDelay = 15.millis) @@ -399,8 +394,10 @@ suite "Slot queue": let request0 = StorageRequest.example var request1 = StorageRequest.example request1.ask.collateralPerByte += 1.u256 - let items0 = SlotQueueItem.init(request0) - let items1 = SlotQueueItem.init(request1) + let items0 = + SlotQueueItem.init(request0, collateral = request0.ask.collateralPerSlot) + let items1 = + SlotQueueItem.init(request1, collateral = request1.ask.collateralPerSlot) check queue.push(items0).isOk check queue.push(items1).isOk let last = items1[items1.high] @@ -413,8 +410,10 @@ suite "Slot queue": let request0 = StorageRequest.example var request1 = StorageRequest.example request1.ask.collateralPerByte += 1.u256 - let items0 = SlotQueueItem.init(request0) - let items1 = SlotQueueItem.init(request1) + let items0 = + SlotQueueItem.init(request0, collateral = request0.ask.collateralPerSlot) + let items1 = + SlotQueueItem.init(request1, collateral = request1.ask.collateralPerSlot) check queue.push(items0).isOk check queue.push(items1).isOk queue.delete(request1.id) @@ -433,42 +432,56 @@ suite "Slot queue": request3.ask.collateralPerByte = request2.ask.collateralPerByte + 1 request4.ask.collateralPerByte = request3.ask.collateralPerByte + 1 request5.ask.collateralPerByte = request4.ask.collateralPerByte + 1 - let item0 = SlotQueueItem.init(request0, 0) - let item1 = SlotQueueItem.init(request1, 0) - let item2 = SlotQueueItem.init(request2, 0) - let item3 = SlotQueueItem.init(request3, 0) - let item4 = SlotQueueItem.init(request4, 0) - let item5 = SlotQueueItem.init(request5, 0) + let item0 = + SlotQueueItem.init(request0, 0, collateral = request0.ask.collateralPerSlot) + let item1 = + SlotQueueItem.init(request1, 0, collateral = request1.ask.collateralPerSlot) + let item2 = + SlotQueueItem.init(request2, 0, collateral = request2.ask.collateralPerSlot) + let item3 = + SlotQueueItem.init(request3, 0, collateral = request3.ask.collateralPerSlot) + let item4 = + SlotQueueItem.init(request4, 0, collateral = request4.ask.collateralPerSlot) + let item5 = + SlotQueueItem.init(request5, 0, collateral = request5.ask.collateralPerSlot) check queue.contains(item5) == false check queue.push(@[item0, item1, item2, item3, item4, item5]).isOk check queue.contains(item5) test "sorts items by profitability descending (higher pricePerBytePerSecond == higher priority == goes first in the list)": var request = StorageRequest.example - let item0 = SlotQueueItem.init(request, 0) + let item0 = + SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot) request.ask.pricePerBytePerSecond += 1.u256 - let item1 = SlotQueueItem.init(request, 1) + let item1 = + SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot) check item1 < item0 - test "sorts items by collateral ascending (higher required collateralPerByte = lower priority == comes later in the list)": + test "sorts items by collateral ascending (higher required collateral = lower priority == comes later in the list)": var request = StorageRequest.example - let item0 = SlotQueueItem.init(request, 0) - request.ask.collateralPerByte += 1.u256 - let item1 = SlotQueueItem.init(request, 1) + let item0 = + SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot) + let item1 = SlotQueueItem.init( + request, 1, collateral = request.ask.collateralPerSlot + 1.u256 + ) check item1 > item0 test "sorts items by expiry descending (longer expiry = higher priority)": var request = StorageRequest.example - let item0 = SlotQueueItem.init(request, 0) + let item0 = + SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot) request.expiry += 1 - let item1 = SlotQueueItem.init(request, 1) + let item1 = + SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot) check item1 < item0 test "sorts items by slot size descending (bigger dataset = higher profitability = higher priority)": var request = StorageRequest.example - let item0 = SlotQueueItem.init(request, 0) + let item0 = + SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot) request.ask.slotSize += 1 - let item1 = SlotQueueItem.init(request, 1) + let item1 = + SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot) check item1 < item0 test "should call callback once an item is added": @@ -489,13 +502,17 @@ suite "Slot queue": # sleeping after push allows the slotqueue loop to iterate, # calling the callback for each pushed/updated item var request = StorageRequest.example - let item0 = SlotQueueItem.init(request, 0) + let item0 = + SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot) request.ask.pricePerBytePerSecond += 1.u256 - let item1 = SlotQueueItem.init(request, 1) + let item1 = + SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot) request.ask.pricePerBytePerSecond += 1.u256 - let item2 = SlotQueueItem.init(request, 2) + let item2 = + SlotQueueItem.init(request, 2, collateral = request.ask.collateralPerSlot) request.ask.pricePerBytePerSecond += 1.u256 - let item3 = SlotQueueItem.init(request, 3) + let item3 = + SlotQueueItem.init(request, 3, collateral = request.ask.collateralPerSlot) check queue.push(item0).isOk await sleepAsync(1.millis) @@ -520,13 +537,17 @@ suite "Slot queue": # sleeping after push allows the slotqueue loop to iterate, # calling the callback for each pushed/updated item var request = StorageRequest.example - let item0 = SlotQueueItem.init(request, 0) + let item0 = + SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot) request.ask.pricePerBytePerSecond += 1.u256 - let item1 = SlotQueueItem.init(request, 1) + let item1 = + SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot) request.ask.pricePerBytePerSecond += 1.u256 - let item2 = SlotQueueItem.init(request, 2) + let item2 = + SlotQueueItem.init(request, 2, collateral = request.ask.collateralPerSlot) request.ask.pricePerBytePerSecond += 1.u256 - let item3 = SlotQueueItem.init(request, 3) + let item3 = + SlotQueueItem.init(request, 3, collateral = request.ask.collateralPerSlot) check queue.push(item0).isOk check queue.push(item1).isOk @@ -550,7 +571,7 @@ suite "Slot queue": queue.pause let request = StorageRequest.example - var items = SlotQueueItem.init(request) + var items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot) check queue.push(items).isOk # check all items processed check eventually queue.len == 0 @@ -558,8 +579,14 @@ suite "Slot queue": test "pushing seen item does not unpause queue": newSlotQueue(maxSize = 4, maxWorkers = 4) let request = StorageRequest.example - let item0 = - SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = true) + let item0 = SlotQueueItem.init( + request.id, + 0'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = true, + ) check queue.paused check queue.push(item0).isOk check queue.paused @@ -567,8 +594,14 @@ suite "Slot queue": test "paused queue waits for unpause before continuing processing": newSlotQueue(maxSize = 4, maxWorkers = 4) let request = StorageRequest.example - let item = - SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = false) + let item = SlotQueueItem.init( + request.id, + 1'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = false, + ) check queue.paused # push causes unpause check queue.push(item).isOk @@ -579,10 +612,22 @@ suite "Slot queue": test "processing a 'seen' item pauses the queue": newSlotQueue(maxSize = 4, maxWorkers = 4) let request = StorageRequest.example - let unseen = - SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = false) - let seen = - SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = true) + let unseen = SlotQueueItem.init( + request.id, + 0'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = false, + ) + let seen = SlotQueueItem.init( + request.id, + 1'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = true, + ) # push causes unpause check queue.push(unseen).isSuccess # check all items processed @@ -595,10 +640,22 @@ suite "Slot queue": test "processing a 'seen' item does not decrease the number of workers": newSlotQueue(maxSize = 4, maxWorkers = 4) let request = StorageRequest.example - let unseen = - SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = false) - let seen = - SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = true) + let unseen = SlotQueueItem.init( + request.id, + 0'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = false, + ) + let seen = SlotQueueItem.init( + request.id, + 1'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = true, + ) # push seen item to ensure that queue is pausing check queue.push(seen).isSuccess # unpause and pause a number of times @@ -615,10 +672,22 @@ suite "Slot queue": test "item 'seen' flags can be cleared": newSlotQueue(maxSize = 4, maxWorkers = 1) let request = StorageRequest.example - let item0 = - SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = true) - let item1 = - SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = true) + let item0 = SlotQueueItem.init( + request.id, + 0'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = true, + ) + let item1 = SlotQueueItem.init( + request.id, + 1'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = true, + ) check queue.push(item0).isOk check queue.push(item1).isOk check queue[0].seen diff --git a/tests/contracts/testMarket.nim b/tests/contracts/testMarket.nim index 74d6a65e..068a4d2e 100644 --- a/tests/contracts/testMarket.nim +++ b/tests/contracts/testMarket.nim @@ -598,6 +598,37 @@ ethersuite "On-Chain Market": check endBalanceHost == (startBalanceHost + request.ask.collateralPerSlot) check endBalanceReward == (startBalanceReward + expectedPayout) + test "returns the collateral when the slot is not being repaired": + await market.requestStorage(request) + await market.reserveSlot(request.id, 0.uint64) + await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot) + + let slotId = request.slotId(0.uint64) + without collateral =? await market.slotCollateral(request.id, 0.uint64), error: + fail() + + check collateral == request.ask.collateralPerSlot + + test "calculates correctly the collateral when the slot is being repaired": + # Ensure that the config is loaded and repairRewardPercentage is available + discard await market.repairRewardPercentage() + + await market.requestStorage(request) + await market.reserveSlot(request.id, 0.uint64) + await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot) + await market.freeSlot(slotId(request.id, 0.uint64)) + + let slotId = request.slotId(0.uint64) + + without collateral =? await market.slotCollateral(request.id, 0.uint64), error: + fail() + + # slotCollateral + # repairRewardPercentage = 10 + # expected collateral = slotCollateral - slotCollateral * 0.1 + check collateral == + request.ask.collateralPerSlot - (request.ask.collateralPerSlot * 10).div(100.u256) + test "the request is added in cache after the fist access": await market.requestStorage(request) diff --git a/tests/examples.nim b/tests/examples.nim index 9b88b4a5..9ef4e292 100644 --- a/tests/examples.nim +++ b/tests/examples.nim @@ -72,7 +72,9 @@ proc example*(_: type Slot): Slot = proc example*(_: type SlotQueueItem): SlotQueueItem = let request = StorageRequest.example let slot = Slot.example - SlotQueueItem.init(request, slot.slotIndex.uint16) + SlotQueueItem.init( + request, slot.slotIndex.uint16, collateral = request.ask.collateralPerSlot + ) proc example(_: type G1Point): G1Point = G1Point(x: UInt256.example, y: UInt256.example) From eb09e610d5e1c649f32877d4d924332677a5fdd4 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Wed, 5 Mar 2025 09:35:46 +0100 Subject: [PATCH 22/40] fix(ci): handle coverage as a string to enable gcc 14 on linux (#1140) * Handle coverage as a string not a boolean * Update ubuntu version to latest --- .github/actions/nimbus-build-system/action.yml | 4 ++-- .github/workflows/nim-matrix.yml | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/actions/nimbus-build-system/action.yml b/.github/actions/nimbus-build-system/action.yml index 219966db..5d1917e3 100644 --- a/.github/actions/nimbus-build-system/action.yml +++ b/.github/actions/nimbus-build-system/action.yml @@ -89,7 +89,7 @@ runs: - name: Install gcc 14 on Linux # We don't want to install gcc 14 for coverage (Ubuntu 20.04) - if : ${{ inputs.os == 'linux' && !inputs.coverage }} + if : ${{ inputs.os == 'linux' && inputs.coverage != 'true' }} shell: ${{ inputs.shell }} {0} run: | # Add GCC-14 to alternatives @@ -202,7 +202,7 @@ runs: - name: Restore Nim toolchain binaries from cache id: nim-cache uses: actions/cache@v4 - if : ${{ !inputs.coverage }} + if : ${{ inputs.coverage != 'true' }} with: path: NimBinaries key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }} diff --git a/.github/workflows/nim-matrix.yml b/.github/workflows/nim-matrix.yml index 4d86d3bb..71129574 100644 --- a/.github/workflows/nim-matrix.yml +++ b/.github/workflows/nim-matrix.yml @@ -20,10 +20,10 @@ jobs: uses: fabiocaccamo/create-matrix-action@v5 with: matrix: | - os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} build: needs: matrix From 2a3a29720f8c3a6d8cb64c6e463dc3af0cf45c8b Mon Sep 17 00:00:00 2001 From: Giuliano Mega Date: Mon, 10 Mar 2025 10:27:16 -0300 Subject: [PATCH 23/40] Fixes Codex crashes on interrupted downloads (#1151) * fix: fixes Codex crashes on interrupted downloads * fix: add better feedback to 404, minor rewording in test comment --- codex/rest/api.nim | 24 +++++++++++++++++++----- tests/integration/testrestapi.nim | 30 ++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 5 deletions(-) diff --git a/codex/rest/api.nim b/codex/rest/api.nim index 89dbe220..6b8f2ac1 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -65,9 +65,15 @@ proc formatManifestBlocks(node: CodexNodeRef): Future[JsonNode] {.async.} = return %RestContentList.init(content) +proc isPending(resp: HttpResponseRef): bool = + ## Checks that an HttpResponseRef object is still pending; i.e., + ## that no body has yet been sent. This helps us guard against calling + ## sendBody(resp: HttpResponseRef, ...) twice, which is illegal. + return resp.getResponseState() == HttpResponseState.Empty + proc retrieveCid( node: CodexNodeRef, cid: Cid, local: bool = true, resp: HttpResponseRef -): Future[RestApiResponse] {.async.} = +): Future[void] {.async: (raises: [CancelledError, HttpWriteError]).} = ## Download a file from the node in a streaming ## manner ## @@ -79,16 +85,21 @@ proc retrieveCid( without stream =? (await node.retrieve(cid, local)), error: if error of BlockNotFoundError: resp.status = Http404 - return await resp.sendBody("") + await resp.sendBody( + "The requested CID could not be retrieved (" & error.msg & ")." + ) + return else: resp.status = Http500 - return await resp.sendBody(error.msg) + await resp.sendBody(error.msg) + return # It is ok to fetch again the manifest because it will hit the cache without manifest =? (await node.fetchManifest(cid)), err: error "Failed to fetch manifest", err = err.msg resp.status = Http404 - return await resp.sendBody(err.msg) + await resp.sendBody(err.msg) + return if manifest.mimetype.isSome: resp.setHeader("Content-Type", manifest.mimetype.get()) @@ -119,10 +130,13 @@ proc retrieveCid( await resp.sendChunk(addr buff[0], buff.len) await resp.finish() codex_api_downloads.inc() + except CancelledError as exc: + raise exc except CatchableError as exc: warn "Error streaming blocks", exc = exc.msg resp.status = Http500 - return await resp.sendBody("") + if resp.isPending(): + await resp.sendBody(exc.msg) finally: info "Sent bytes", cid = cid, bytes if not stream.isNil: diff --git a/tests/integration/testrestapi.nim b/tests/integration/testrestapi.nim index 2311dc22..20bf8bc8 100644 --- a/tests/integration/testrestapi.nim +++ b/tests/integration/testrestapi.nim @@ -1,4 +1,6 @@ import std/httpclient +import std/importutils +import std/net import std/sequtils import std/strformat from pkg/libp2p import `==`, `$`, Cid @@ -305,3 +307,31 @@ twonodessuite "REST API": let cid = Manifest.example().makeManifestBlock().get.cid let response = client1.deleteRaw($cid) check response.status == "204 No Content" + + test "should not crash if the download stream is closed before download completes", + twoNodesConfig: + privateAccess(client1.type) + privateAccess(client1.http.type) + + let cid = client1.upload(repeat("some file contents", 1000)).get + + try: + # Sadly, there's no high level API for preventing the client from + # consuming the whole response, and we need to close the socket + # before that happens if we want to trigger the bug, so we need to + # resort to this. + client1.http.getBody = false + let response = client1.downloadRaw($cid) + + # Read 4 bytes from the stream just to make sure we actually + # receive some data. + let data = client1.http.socket.recv(4) + check data.len == 4 + + # Prematurely closes the connection. + client1.http.close() + finally: + client1.http.getBody = true + + let response = client1.downloadRaw($cid) + check response.body == repeat("some file contents", 1000) From 703921df322e2c32b4f12786fc48e30989b025ca Mon Sep 17 00:00:00 2001 From: Arnaud Date: Mon, 10 Mar 2025 16:59:24 +0100 Subject: [PATCH 24/40] chore(restapi): add headers to support on progress when downloading (#1150) * Add headers to support on progress on download * Replace http session by http client in downloadBytes * Use int instead of int64 for datasetSize * Rename variable to avoid shallowing client --- codex/rest/api.nim | 3 +++ tests/integration/codexclient.nim | 12 ++++++------ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/codex/rest/api.nim b/codex/rest/api.nim index 6b8f2ac1..7cb0b43f 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -114,6 +114,8 @@ proc retrieveCid( else: resp.setHeader("Content-Disposition", "attachment") + resp.setHeader("Content-Length", $manifest.datasetSize.int) + await resp.prepareChunked() while not stream.atEof: @@ -342,6 +344,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute resp.setCorsHeaders("GET", corsOrigin) resp.setHeader("Access-Control-Headers", "X-Requested-With") + resp.setHeader("Access-Control-Expose-Headers", "Content-Disposition") await node.retrieveCid(cid.get(), local = false, resp = resp) router.api(MethodGet, "/api/codex/v1/data/{cid}/network/manifest") do( diff --git a/tests/integration/codexclient.nim b/tests/integration/codexclient.nim index 287f465f..f4c3f977 100644 --- a/tests/integration/codexclient.nim +++ b/tests/integration/codexclient.nim @@ -76,15 +76,15 @@ proc downloadNoStream*(client: CodexClient, cid: Cid): ?!string = proc downloadBytes*( client: CodexClient, cid: Cid, local = false ): Future[?!seq[byte]] {.async.} = - let uri = - parseUri(client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream")) + let uri = client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream") - let (status, bytes) = await client.session.fetch(uri) + let httpClient = newHttpClient() + let response = httpClient.get(uri) - if status != 200: - return failure("fetch failed with status " & $status) + if response.status != "200 OK": + return failure("fetch failed with status " & $response.status) - success bytes + success response.body.toBytes proc delete*(client: CodexClient, cid: Cid): ?!void = let From 17d3bb55cf63e3fe36724f28184035035c6a0aa9 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Wed, 12 Mar 2025 10:12:06 +0100 Subject: [PATCH 25/40] chore(marketplace): notify sales when duration, minPricePerBytePerSecond or totalCollateral is updated (#1148) * Call onAvailabilityAdded when freeSize, duration or minPricePerBytePerSecond is increased * Rename onAvailabilityAdded to onAvailabilitySaved * Rename OnAvailabilitySaved to OnAvailabilityUpserted * Go back to OnAvailabilitySaved --- codex/sales.nim | 8 +-- codex/sales/reservations.nim | 34 ++++++------ tests/codex/sales/testreservations.nim | 72 +++++++++++++++++++++++--- 3 files changed, 88 insertions(+), 26 deletions(-) diff --git a/codex/sales.nim b/codex/sales.nim index af594a9a..e2a884df 100644 --- a/codex/sales.nim +++ b/codex/sales.nim @@ -285,7 +285,7 @@ proc load*(sales: Sales) {.async.} = agent.start(SaleUnknown()) sales.agents.add agent -proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} = +proc OnAvailabilitySaved(sales: Sales, availability: Availability) {.async.} = ## When availabilities are modified or added, the queue should be unpaused if ## it was paused and any slots in the queue should have their `seen` flag ## cleared. @@ -528,10 +528,10 @@ proc startSlotQueue(sales: Sales) = slotQueue.start() - proc onAvailabilityAdded(availability: Availability) {.async.} = - await sales.onAvailabilityAdded(availability) + proc OnAvailabilitySaved(availability: Availability) {.async.} = + await sales.OnAvailabilitySaved(availability) - reservations.onAvailabilityAdded = onAvailabilityAdded + reservations.OnAvailabilitySaved = OnAvailabilitySaved proc subscribe(sales: Sales) {.async.} = await sales.subscribeRequested() diff --git a/codex/sales/reservations.nim b/codex/sales/reservations.nim index a64cb602..25ee2b99 100644 --- a/codex/sales/reservations.nim +++ b/codex/sales/reservations.nim @@ -82,11 +82,11 @@ type availabilityLock: AsyncLock # Lock for protecting assertions of availability's sizes when searching for matching availability repo: RepoStore - onAvailabilityAdded: ?OnAvailabilityAdded + OnAvailabilitySaved: ?OnAvailabilitySaved GetNext* = proc(): Future[?seq[byte]] {.upraises: [], gcsafe, closure.} IterDispose* = proc(): Future[?!void] {.gcsafe, closure.} - OnAvailabilityAdded* = + OnAvailabilitySaved* = proc(availability: Availability): Future[void] {.upraises: [], gcsafe.} StorableIter* = ref object finished*: bool @@ -189,10 +189,10 @@ logutils.formatIt(LogFormat.textLines, SomeStorableId): logutils.formatIt(LogFormat.json, SomeStorableId): it.to0xHexLog -proc `onAvailabilityAdded=`*( - self: Reservations, onAvailabilityAdded: OnAvailabilityAdded +proc `OnAvailabilitySaved=`*( + self: Reservations, OnAvailabilitySaved: OnAvailabilitySaved ) = - self.onAvailabilityAdded = some onAvailabilityAdded + self.OnAvailabilitySaved = some OnAvailabilitySaved func key*(id: AvailabilityId): ?!Key = ## sales / reservations / @@ -268,18 +268,18 @@ proc updateAvailability( trace "Creating new Availability" let res = await self.updateImpl(obj) # inform subscribers that Availability has been added - if onAvailabilityAdded =? self.onAvailabilityAdded: - # when chronos v4 is implemented, and OnAvailabilityAdded is annotated + if OnAvailabilitySaved =? self.OnAvailabilitySaved: + # when chronos v4 is implemented, and OnAvailabilitySaved is annotated # with async:(raises:[]), we can remove this try/catch as we know, with # certainty, that nothing will be raised try: - await onAvailabilityAdded(obj) + await OnAvailabilitySaved(obj) except CancelledError as e: raise e except CatchableError as e: # we don't have any insight into types of exceptions that - # `onAvailabilityAdded` can raise because it is caller-defined - warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg + # `OnAvailabilitySaved` can raise because it is caller-defined + warn "Unknown error during 'OnAvailabilitySaved' callback", error = e.msg return res else: return failure(err) @@ -300,21 +300,23 @@ proc updateAvailability( let res = await self.updateImpl(obj) - if oldAvailability.freeSize < obj.freeSize: # availability added + if oldAvailability.freeSize < obj.freeSize or oldAvailability.duration < obj.duration or + oldAvailability.minPricePerBytePerSecond < obj.minPricePerBytePerSecond or + oldAvailability.totalCollateral < obj.totalCollateral: # availability updated # inform subscribers that Availability has been modified (with increased # size) - if onAvailabilityAdded =? self.onAvailabilityAdded: - # when chronos v4 is implemented, and OnAvailabilityAdded is annotated + if OnAvailabilitySaved =? self.OnAvailabilitySaved: + # when chronos v4 is implemented, and OnAvailabilitySaved is annotated # with async:(raises:[]), we can remove this try/catch as we know, with # certainty, that nothing will be raised try: - await onAvailabilityAdded(obj) + await OnAvailabilitySaved(obj) except CancelledError as e: raise e except CatchableError as e: # we don't have any insight into types of exceptions that - # `onAvailabilityAdded` can raise because it is caller-defined - warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg + # `OnAvailabilitySaved` can raise because it is caller-defined + warn "Unknown error during 'OnAvailabilitySaved' callback", error = e.msg return res diff --git a/tests/codex/sales/testreservations.nim b/tests/codex/sales/testreservations.nim index 79fc3626..49df059d 100644 --- a/tests/codex/sales/testreservations.nim +++ b/tests/codex/sales/testreservations.nim @@ -283,35 +283,95 @@ asyncchecksuite "Reservations module": check updated.isErr check updated.error of NotExistsError - test "onAvailabilityAdded called when availability is created": + test "OnAvailabilitySaved called when availability is created": var added: Availability - reservations.onAvailabilityAdded = proc(a: Availability) {.async.} = + reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = added = a let availability = createAvailability() check added == availability - test "onAvailabilityAdded called when availability size is increased": + test "OnAvailabilitySaved called when availability size is increased": var availability = createAvailability() var added: Availability - reservations.onAvailabilityAdded = proc(a: Availability) {.async.} = + reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = added = a availability.freeSize += 1 discard await reservations.update(availability) check added == availability - test "onAvailabilityAdded is not called when availability size is decreased": + test "OnAvailabilitySaved is not called when availability size is decreased": var availability = createAvailability() var called = false - reservations.onAvailabilityAdded = proc(a: Availability) {.async.} = + reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = called = true availability.freeSize -= 1 discard await reservations.update(availability) check not called + test "OnAvailabilitySaved called when availability duration is increased": + var availability = createAvailability() + var added: Availability + reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = + added = a + availability.duration += 1 + discard await reservations.update(availability) + + check added == availability + + test "OnAvailabilitySaved is not called when availability duration is decreased": + var availability = createAvailability() + var called = false + reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = + called = true + availability.duration -= 1 + discard await reservations.update(availability) + + check not called + + test "OnAvailabilitySaved called when availability minPricePerBytePerSecond is increased": + var availability = createAvailability() + var added: Availability + reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = + added = a + availability.minPricePerBytePerSecond += 1.u256 + discard await reservations.update(availability) + + check added == availability + + test "OnAvailabilitySaved is not called when availability minPricePerBytePerSecond is decreased": + var availability = createAvailability() + var called = false + reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = + called = true + availability.minPricePerBytePerSecond -= 1.u256 + discard await reservations.update(availability) + + check not called + + test "OnAvailabilitySaved called when availability totalCollateral is increased": + var availability = createAvailability() + var added: Availability + reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = + added = a + availability.totalCollateral = availability.totalCollateral + 1.u256 + discard await reservations.update(availability) + + check added == availability + + test "OnAvailabilitySaved is not called when availability totalCollateral is decreased": + var availability = createAvailability() + var called = false + reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = + called = true + availability.totalCollateral = availability.totalCollateral - 1.u256 + discard await reservations.update(availability) + + check not called + test "availabilities can be found": let availability = createAvailability() From 2538ff8da397f9afe94b866ae725a7d20f41d925 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Wed, 12 Mar 2025 14:41:00 +0100 Subject: [PATCH 26/40] chore: create new httpClient per request (#1136) * Create new httpClient per request * Fix tests after rebase and close the clients at the end --- tests/integration/codexclient.nim | 86 ++++++++++++++-------------- tests/integration/testpurchasing.nim | 1 - tests/integration/testrestapi.nim | 13 +++-- 3 files changed, 51 insertions(+), 49 deletions(-) diff --git a/tests/integration/codexclient.nim b/tests/integration/codexclient.nim index f4c3f977..4a106253 100644 --- a/tests/integration/codexclient.nim +++ b/tests/integration/codexclient.nim @@ -14,33 +14,37 @@ import pkg/codex/sales/reservations export purchasing type CodexClient* = ref object - http: HttpClient baseurl: string - session: HttpSessionRef + httpClients: seq[HttpClient] type CodexClientError* = object of CatchableError const HttpClientTimeoutMs = 60 * 1000 proc new*(_: type CodexClient, baseurl: string): CodexClient = - CodexClient( - http: newHttpClient(timeout = HttpClientTimeoutMs), - baseurl: baseurl, - session: HttpSessionRef.new({HttpClientFlag.Http11Pipeline}), - ) + CodexClient(baseurl: baseurl, httpClients: newSeq[HttpClient]()) + +proc http*(client: CodexClient): HttpClient = + let httpClient = newHttpClient(timeout = HttpClientTimeoutMs) + client.httpClients.insert(httpClient) + return httpClient + +proc close*(client: CodexClient): void = + for httpClient in client.httpClients: + httpClient.close() proc info*(client: CodexClient): ?!JsonNode = let url = client.baseurl & "/debug/info" - JsonNode.parse(client.http.getContent(url)) + JsonNode.parse(client.http().getContent(url)) proc setLogLevel*(client: CodexClient, level: string) = let url = client.baseurl & "/debug/chronicles/loglevel?level=" & level let headers = newHttpHeaders({"Content-Type": "text/plain"}) - let response = client.http.request(url, httpMethod = HttpPost, headers = headers) + let response = client.http().request(url, httpMethod = HttpPost, headers = headers) assert response.status == "200 OK" proc upload*(client: CodexClient, contents: string): ?!Cid = - let response = client.http.post(client.baseurl & "/data", contents) + let response = client.http().post(client.baseurl & "/data", contents) assert response.status == "200 OK" Cid.init(response.body).mapFailure @@ -48,9 +52,9 @@ proc upload*(client: CodexClient, bytes: seq[byte]): ?!Cid = client.upload(string.fromBytes(bytes)) proc download*(client: CodexClient, cid: Cid, local = false): ?!string = - let response = client.http.get( - client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream") - ) + let response = client.http().get( + client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream") + ) if response.status != "200 OK": return failure(response.status) @@ -58,7 +62,8 @@ proc download*(client: CodexClient, cid: Cid, local = false): ?!string = success response.body proc downloadManifestOnly*(client: CodexClient, cid: Cid): ?!string = - let response = client.http.get(client.baseurl & "/data/" & $cid & "/network/manifest") + let response = + client.http().get(client.baseurl & "/data/" & $cid & "/network/manifest") if response.status != "200 OK": return failure(response.status) @@ -66,7 +71,7 @@ proc downloadManifestOnly*(client: CodexClient, cid: Cid): ?!string = success response.body proc downloadNoStream*(client: CodexClient, cid: Cid): ?!string = - let response = client.http.post(client.baseurl & "/data/" & $cid & "/network") + let response = client.http().post(client.baseurl & "/data/" & $cid & "/network") if response.status != "200 OK": return failure(response.status) @@ -78,8 +83,7 @@ proc downloadBytes*( ): Future[?!seq[byte]] {.async.} = let uri = client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream") - let httpClient = newHttpClient() - let response = httpClient.get(uri) + let response = client.http().get(uri) if response.status != "200 OK": return failure("fetch failed with status " & $response.status) @@ -89,7 +93,7 @@ proc downloadBytes*( proc delete*(client: CodexClient, cid: Cid): ?!void = let url = client.baseurl & "/data/" & $cid - response = client.http.delete(url) + response = client.http().delete(url) if response.status != "204 No Content": return failure(response.status) @@ -98,7 +102,7 @@ proc delete*(client: CodexClient, cid: Cid): ?!void = proc list*(client: CodexClient): ?!RestContentList = let url = client.baseurl & "/data" - let response = client.http.get(url) + let response = client.http().get(url) if response.status != "200 OK": return failure(response.status) @@ -107,7 +111,7 @@ proc list*(client: CodexClient): ?!RestContentList = proc space*(client: CodexClient): ?!RestRepoStore = let url = client.baseurl & "/space" - let response = client.http.get(url) + let response = client.http().get(url) if response.status != "200 OK": return failure(response.status) @@ -141,7 +145,7 @@ proc requestStorageRaw*( if expiry != 0: json["expiry"] = %($expiry) - return client.http.post(url, $json) + return client.http().post(url, $json) proc requestStorage*( client: CodexClient, @@ -167,7 +171,7 @@ proc requestStorage*( proc getPurchase*(client: CodexClient, purchaseId: PurchaseId): ?!RestPurchase = let url = client.baseurl & "/storage/purchases/" & purchaseId.toHex try: - let body = client.http.getContent(url) + let body = client.http().getContent(url) return RestPurchase.fromJson(body) except CatchableError as e: return failure e.msg @@ -175,14 +179,14 @@ proc getPurchase*(client: CodexClient, purchaseId: PurchaseId): ?!RestPurchase = proc getSalesAgent*(client: CodexClient, slotId: SlotId): ?!RestSalesAgent = let url = client.baseurl & "/sales/slots/" & slotId.toHex try: - let body = client.http.getContent(url) + let body = client.http().getContent(url) return RestSalesAgent.fromJson(body) except CatchableError as e: return failure e.msg proc getSlots*(client: CodexClient): ?!seq[Slot] = let url = client.baseurl & "/sales/slots" - let body = client.http.getContent(url) + let body = client.http().getContent(url) seq[Slot].fromJson(body) proc postAvailability*( @@ -200,7 +204,7 @@ proc postAvailability*( "minPricePerBytePerSecond": minPricePerBytePerSecond, "totalCollateral": totalCollateral, } - let response = client.http.post(url, $json) + let response = client.http().post(url, $json) doAssert response.status == "201 Created", "expected 201 Created, got " & response.status & ", body: " & response.body Availability.fromJson(response.body) @@ -233,7 +237,7 @@ proc patchAvailabilityRaw*( if totalCollateral =? totalCollateral: json["totalCollateral"] = %totalCollateral - client.http.patch(url, $json) + client.http().patch(url, $json) proc patchAvailability*( client: CodexClient, @@ -253,7 +257,7 @@ proc patchAvailability*( proc getAvailabilities*(client: CodexClient): ?!seq[Availability] = ## Call sales availability REST endpoint let url = client.baseurl & "/sales/availability" - let body = client.http.getContent(url) + let body = client.http().getContent(url) seq[Availability].fromJson(body) proc getAvailabilityReservations*( @@ -261,16 +265,9 @@ proc getAvailabilityReservations*( ): ?!seq[Reservation] = ## Retrieves Availability's Reservations let url = client.baseurl & "/sales/availability/" & $availabilityId & "/reservations" - let body = client.http.getContent(url) + let body = client.http().getContent(url) seq[Reservation].fromJson(body) -proc close*(client: CodexClient) = - client.http.close() - -proc restart*(client: CodexClient) = - client.http.close() - client.http = newHttpClient(timeout = HttpClientTimeoutMs) - proc purchaseStateIs*(client: CodexClient, id: PurchaseId, state: string): bool = client.getPurchase(id).option .? state == some state @@ -283,18 +280,23 @@ proc requestId*(client: CodexClient, id: PurchaseId): ?RequestId = proc uploadRaw*( client: CodexClient, contents: string, headers = newHttpHeaders() ): Response = - return client.http.request( - client.baseurl & "/data", body = contents, httpMethod = HttpPost, headers = headers - ) + return client.http().request( + client.baseurl & "/data", + body = contents, + httpMethod = HttpPost, + headers = headers, + ) proc listRaw*(client: CodexClient): Response = - return client.http.request(client.baseurl & "/data", httpMethod = HttpGet) + return client.http().request(client.baseurl & "/data", httpMethod = HttpGet) -proc downloadRaw*(client: CodexClient, cid: string, local = false): Response = - return client.http.request( +proc downloadRaw*( + client: CodexClient, cid: string, local = false, httpClient = client.http() +): Response = + return httpClient.request( client.baseurl & "/data/" & cid & (if local: "" else: "/network/stream"), httpMethod = HttpGet, ) proc deleteRaw*(client: CodexClient, cid: string): Response = - return client.http.request(client.baseurl & "/data/" & cid, httpMethod = HttpDelete) + return client.http().request(client.baseurl & "/data/" & cid, httpMethod = HttpDelete) diff --git a/tests/integration/testpurchasing.nim b/tests/integration/testpurchasing.nim index 259efcff..4eb5c775 100644 --- a/tests/integration/testpurchasing.nim +++ b/tests/integration/testpurchasing.nim @@ -89,7 +89,6 @@ twonodessuite "Purchasing": check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000) await node1.restart() - client1.restart() check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000) let request = client1.getPurchase(id).get.request.get diff --git a/tests/integration/testrestapi.nim b/tests/integration/testrestapi.nim index 20bf8bc8..7164372b 100644 --- a/tests/integration/testrestapi.nim +++ b/tests/integration/testrestapi.nim @@ -314,24 +314,25 @@ twonodessuite "REST API": privateAccess(client1.http.type) let cid = client1.upload(repeat("some file contents", 1000)).get + let httpClient = client1.http() try: # Sadly, there's no high level API for preventing the client from # consuming the whole response, and we need to close the socket # before that happens if we want to trigger the bug, so we need to # resort to this. - client1.http.getBody = false - let response = client1.downloadRaw($cid) + httpClient.getBody = false + let response = client1.downloadRaw($cid, httpClient = httpClient) # Read 4 bytes from the stream just to make sure we actually # receive some data. - let data = client1.http.socket.recv(4) + let data = httpClient.socket.recv(4) check data.len == 4 # Prematurely closes the connection. - client1.http.close() + httpClient.close() finally: - client1.http.getBody = true + httpClient.getBody = true - let response = client1.downloadRaw($cid) + let response = client1.downloadRaw($cid, httpClient = httpClient) check response.body == repeat("some file contents", 1000) From 1cac3e2a117eb9ce89c6ba4f6c324e44371da5fa Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Thu, 13 Mar 2025 08:33:15 -0600 Subject: [PATCH 27/40] Fix/rework async exceptions (#1130) * cleanup imports and logs * add BlockHandle type * revert deps * refactor: async error handling and future tracking improvements - Update async procedures to use explicit raises annotation - Modify TrackedFutures to handle futures with no raised exceptions - Replace `asyncSpawn` with explicit future tracking - Update test suites to use `unittest2` - Standardize error handling across network and async components - Remove deprecated error handling patterns This commit introduces a more robust approach to async error handling and future management, improving type safety and reducing potential runtime errors. * bump nim-serde * remove asyncSpawn * rework background downloads and prefetch * imporove logging * refactor: enhance async procedures with error handling and raise annotations * misc cleanup * misc * refactor: implement allFinishedFailed to aggregate future results with success and failure tracking * refactor: update error handling in reader procedures to raise ChunkerError and CancelledError * refactor: improve error handling in wantListHandler and accountHandler procedures * refactor: simplify LPStreamReadError creation by consolidating parameters * refactor: enhance error handling in AsyncStreamWrapper to catch unexpected errors * refactor: enhance error handling in advertiser and discovery loops to improve resilience * misc * refactor: improve code structure and readability * remove cancellation from addSlotToQueue * refactor: add assertion for unexpected errors in local store checks * refactor: prevent tracking of finished futures and improve test assertions * refactor: improve error handling in local store checks * remove usage of msgDetail * feat: add initial implementation of discovery engine and related components * refactor: improve task scheduling logic by removing unnecessary break statement * break after scheduling a task * make taskHandler cancelable * refactor: update async handlers to raise CancelledError * refactor(advertiser): streamline error handling and improve task flow in advertise loops * fix: correct spelling of "divisible" in error messages and comments * refactor(discovery): simplify discovery task loop and improve error handling * refactor(engine): filter peers before processing in cancelBlocks procedure --- codex/blockexchange/engine/advertiser.nim | 88 ++--- codex/blockexchange/engine/discovery.nim | 63 ++-- codex/blockexchange/engine/engine.nim | 320 ++++++++++-------- codex/blockexchange/engine/payments.nim | 5 +- codex/blockexchange/network/network.nim | 98 +++--- codex/blockexchange/network/networkpeer.nim | 68 ++-- codex/blockexchange/peers/peerctxstore.nim | 7 +- codex/blockexchange/protobuf/payments.nim | 6 +- codex/blockexchange/protobuf/presence.nim | 6 +- codex/chunker.nim | 11 +- codex/codex.nim | 18 +- codex/contracts/clock.nim | 14 +- codex/discovery.nim | 138 +++++--- codex/erasure/erasure.nim | 4 +- codex/errors.nim | 45 +-- codex/node.nim | 91 ++--- codex/rest/api.nim | 11 +- codex/sales.nim | 65 ++-- codex/sales/salesagent.nim | 1 - codex/sales/slotqueue.nim | 22 +- codex/slots/builder/builder.nim | 10 +- codex/slots/proofs/prover.nim | 2 + codex/streams/asyncstreamwrapper.nim | 8 +- codex/streams/storestream.nim | 6 +- codex/utils/asyncstatemachine.nim | 2 - codex/utils/timer.nim | 1 - codex/utils/trackedfutures.nim | 26 +- codex/validation.nim | 1 - tests/asynctest.nim | 4 +- .../blockexchange/discovery/testdiscovery.nim | 68 ++-- .../discovery/testdiscoveryengine.nim | 10 +- .../blockexchange/engine/testadvertiser.nim | 2 +- .../codex/blockexchange/engine/testengine.nim | 61 ++-- .../blockexchange/engine/testpayments.nim | 4 +- .../blockexchange/protobuf/testpayments.nim | 4 +- .../blockexchange/protobuf/testpresence.nim | 2 +- tests/codex/blockexchange/testnetwork.nim | 28 +- .../codex/blockexchange/testpeerctxstore.nim | 6 +- .../codex/blockexchange/testpendingblocks.nim | 2 +- tests/codex/helpers/mockchunker.nim | 2 +- tests/codex/helpers/mockdiscovery.nim | 41 ++- tests/codex/helpers/randomchunker.nim | 2 +- tests/codex/merkletree/generictreetests.nim | 2 +- tests/codex/merkletree/testcodexcoders.nim | 4 +- tests/codex/merkletree/testcodextree.nim | 2 +- tests/codex/merkletree/testmerkledigest.nim | 2 +- tests/codex/merkletree/testposeidon2tree.nim | 2 +- tests/codex/sales/states/testdownloading.nim | 4 +- tests/codex/sales/states/testfilled.nim | 2 +- tests/codex/sales/states/testfilling.nim | 4 +- tests/codex/sales/states/testunknown.nim | 2 +- tests/codex/sales/testsales.nim | 29 +- tests/codex/sales/testslotqueue.nim | 42 ++- tests/codex/slots/testslotbuilder.nim | 4 +- tests/codex/stores/repostore/testcoders.nim | 4 +- tests/codex/stores/testcachestore.nim | 2 +- tests/codex/stores/testkeyutils.nim | 2 +- tests/codex/stores/testmaintenance.nim | 2 +- tests/codex/stores/testrepostore.nim | 2 +- tests/codex/testasyncheapqueue.nim | 2 +- tests/codex/testchunking.nim | 13 +- tests/codex/testclock.nim | 4 +- tests/codex/testlogutils.nim | 3 +- tests/codex/testmanifest.nim | 2 +- tests/codex/testpurchasing.nim | 2 +- tests/codex/testsystemclock.nim | 6 +- tests/codex/utils/testiter.nim | 2 +- tests/codex/utils/testkeyutils.nim | 8 +- tests/codex/utils/testoptions.nim | 9 +- tests/codex/utils/testtrackedfutures.nim | 54 ++- tests/codex/utils/testutils.nim | 2 +- tests/helpers.nim | 32 ++ tests/helpers/trackers.nim | 2 +- vendor/nim-serde | 2 +- 74 files changed, 937 insertions(+), 690 deletions(-) diff --git a/codex/blockexchange/engine/advertiser.nim b/codex/blockexchange/engine/advertiser.nim index f5f28bc1..d094c454 100644 --- a/codex/blockexchange/engine/advertiser.nim +++ b/codex/blockexchange/engine/advertiser.nim @@ -41,80 +41,86 @@ type Advertiser* = ref object of RootObj advertiserRunning*: bool # Indicates if discovery is running concurrentAdvReqs: int # Concurrent advertise requests - advertiseLocalStoreLoop*: Future[void] # Advertise loop task handle + advertiseLocalStoreLoop*: Future[void].Raising([]) # Advertise loop task handle advertiseQueue*: AsyncQueue[Cid] # Advertise queue trackedFutures*: TrackedFutures # Advertise tasks futures advertiseLocalStoreLoopSleep: Duration # Advertise loop sleep inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests -proc addCidToQueue(b: Advertiser, cid: Cid) {.async.} = +proc addCidToQueue(b: Advertiser, cid: Cid) {.async: (raises: [CancelledError]).} = if cid notin b.advertiseQueue: await b.advertiseQueue.put(cid) + trace "Advertising", cid -proc advertiseBlock(b: Advertiser, cid: Cid) {.async.} = +proc advertiseBlock(b: Advertiser, cid: Cid) {.async: (raises: [CancelledError]).} = without isM =? cid.isManifest, err: warn "Unable to determine if cid is manifest" return - if isM: - without blk =? await b.localStore.getBlock(cid), err: - error "Error retrieving manifest block", cid, err = err.msg - return + try: + if isM: + without blk =? await b.localStore.getBlock(cid), err: + error "Error retrieving manifest block", cid, err = err.msg + return - without manifest =? Manifest.decode(blk), err: - error "Unable to decode as manifest", err = err.msg - return + without manifest =? Manifest.decode(blk), err: + error "Unable to decode as manifest", err = err.msg + return - # announce manifest cid and tree cid - await b.addCidToQueue(cid) - await b.addCidToQueue(manifest.treeCid) + # announce manifest cid and tree cid + await b.addCidToQueue(cid) + await b.addCidToQueue(manifest.treeCid) + except CancelledError as exc: + trace "Cancelled advertise block", cid + raise exc + except CatchableError as e: + error "failed to advertise block", cid, error = e.msgDetail proc advertiseLocalStoreLoop(b: Advertiser) {.async: (raises: []).} = - while b.advertiserRunning: - try: - if cids =? await b.localStore.listBlocks(blockType = BlockType.Manifest): - trace "Advertiser begins iterating blocks..." - for c in cids: - if cid =? await c: - await b.advertiseBlock(cid) - trace "Advertiser iterating blocks finished." + try: + while b.advertiserRunning: + try: + if cids =? await b.localStore.listBlocks(blockType = BlockType.Manifest): + trace "Advertiser begins iterating blocks..." + for c in cids: + if cid =? await c: + await b.advertiseBlock(cid) + trace "Advertiser iterating blocks finished." + except CatchableError as e: + error "Error in advertise local store loop", error = e.msgDetail + raiseAssert("Unexpected exception in advertiseLocalStoreLoop") await sleepAsync(b.advertiseLocalStoreLoopSleep) - except CancelledError: - break # do not propagate as advertiseLocalStoreLoop was asyncSpawned - except CatchableError as e: - error "failed to advertise blocks in local store", error = e.msgDetail + except CancelledError: + warn "Cancelled advertise local store loop" info "Exiting advertise task loop" proc processQueueLoop(b: Advertiser) {.async: (raises: []).} = - while b.advertiserRunning: - try: + try: + while b.advertiserRunning: let cid = await b.advertiseQueue.get() if cid in b.inFlightAdvReqs: continue - try: - let request = b.discovery.provide(cid) + let request = b.discovery.provide(cid) + b.inFlightAdvReqs[cid] = request + codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64) - b.inFlightAdvReqs[cid] = request - codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64) - await request - finally: + defer: b.inFlightAdvReqs.del(cid) codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64) - except CancelledError: - trace "Advertise task cancelled" - return - except CatchableError as exc: - warn "Exception in advertise task runner", exc = exc.msg + + await request + except CancelledError: + warn "Cancelled advertise task runner" info "Exiting advertise task runner" -proc start*(b: Advertiser) {.async.} = +proc start*(b: Advertiser) {.async: (raises: []).} = ## Start the advertiser ## @@ -134,13 +140,11 @@ proc start*(b: Advertiser) {.async.} = for i in 0 ..< b.concurrentAdvReqs: let fut = b.processQueueLoop() b.trackedFutures.track(fut) - asyncSpawn fut b.advertiseLocalStoreLoop = advertiseLocalStoreLoop(b) b.trackedFutures.track(b.advertiseLocalStoreLoop) - asyncSpawn b.advertiseLocalStoreLoop -proc stop*(b: Advertiser) {.async.} = +proc stop*(b: Advertiser) {.async: (raises: []).} = ## Stop the advertiser ## diff --git a/codex/blockexchange/engine/discovery.nim b/codex/blockexchange/engine/discovery.nim index c664f212..b32b8555 100644 --- a/codex/blockexchange/engine/discovery.nim +++ b/codex/blockexchange/engine/discovery.nim @@ -48,7 +48,7 @@ type DiscoveryEngine* = ref object of RootObj pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved discEngineRunning*: bool # Indicates if discovery is running concurrentDiscReqs: int # Concurrent discovery requests - discoveryLoop*: Future[void] # Discovery loop task handle + discoveryLoop*: Future[void].Raising([]) # Discovery loop task handle discoveryQueue*: AsyncQueue[Cid] # Discovery queue trackedFutures*: TrackedFutures # Tracked Discovery tasks futures minPeersPerBlock*: int # Max number of peers with block @@ -57,30 +57,21 @@ type DiscoveryEngine* = ref object of RootObj # Inflight discovery requests proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} = - while b.discEngineRunning: - for cid in toSeq(b.pendingBlocks.wantListBlockCids): - try: + try: + while b.discEngineRunning: + for cid in toSeq(b.pendingBlocks.wantListBlockCids): await b.discoveryQueue.put(cid) - except CancelledError: - trace "Discovery loop cancelled" - return - except CatchableError as exc: - warn "Exception in discovery loop", exc = exc.msg - try: - logScope: - sleep = b.discoveryLoopSleep - wanted = b.pendingBlocks.len await sleepAsync(b.discoveryLoopSleep) - except CancelledError: - discard # do not propagate as discoveryQueueLoop was asyncSpawned + except CancelledError: + trace "Discovery loop cancelled" proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} = ## Run discovery tasks ## - while b.discEngineRunning: - try: + try: + while b.discEngineRunning: let cid = await b.discoveryQueue.get() if cid in b.inFlightDiscReqs: @@ -90,35 +81,28 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} = let haves = b.peers.peersHave(cid) if haves.len < b.minPeersPerBlock: - try: - let request = b.discovery.find(cid).wait(DefaultDiscoveryTimeout) + let request = b.discovery.find(cid) + b.inFlightDiscReqs[cid] = request + codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64) - b.inFlightDiscReqs[cid] = request + defer: + b.inFlightDiscReqs.del(cid) codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64) - let peers = await request + if (await request.withTimeout(DefaultDiscoveryTimeout)) and + peers =? (await request).catch: let dialed = await allFinished(peers.mapIt(b.network.dialPeer(it.data))) for i, f in dialed: if f.failed: await b.discovery.removeProvider(peers[i].data.peerId) - finally: - b.inFlightDiscReqs.del(cid) - codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64) - except CancelledError: - trace "Discovery task cancelled" - return - except CatchableError as exc: - warn "Exception in discovery task runner", exc = exc.msg - except Exception as e: - # Raised by b.discovery.removeProvider somehow... - # This should not be catchable, and we should never get here. Therefore, - # raise a Defect. - raiseAssert "Exception when removing provider" + except CancelledError: + trace "Discovery task cancelled" + return info "Exiting discovery task runner" -proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} = +proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) = for cid in cids: if cid notin b.discoveryQueue: try: @@ -126,11 +110,11 @@ proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} = except CatchableError as exc: warn "Exception queueing discovery request", exc = exc.msg -proc start*(b: DiscoveryEngine) {.async.} = +proc start*(b: DiscoveryEngine) {.async: (raises: []).} = ## Start the discengine task ## - trace "Discovery engine start" + trace "Discovery engine starting" if b.discEngineRunning: warn "Starting discovery engine twice" @@ -140,12 +124,13 @@ proc start*(b: DiscoveryEngine) {.async.} = for i in 0 ..< b.concurrentDiscReqs: let fut = b.discoveryTaskLoop() b.trackedFutures.track(fut) - asyncSpawn fut b.discoveryLoop = b.discoveryQueueLoop() b.trackedFutures.track(b.discoveryLoop) -proc stop*(b: DiscoveryEngine) {.async.} = + trace "Discovery engine started" + +proc stop*(b: DiscoveryEngine) {.async: (raises: []).} = ## Stop the discovery engine ## diff --git a/codex/blockexchange/engine/engine.nim b/codex/blockexchange/engine/engine.nim index dafdd520..befb8ae9 100644 --- a/codex/blockexchange/engine/engine.nim +++ b/codex/blockexchange/engine/engine.nim @@ -93,12 +93,15 @@ type price*: UInt256 # attach task scheduler to engine -proc scheduleTask(self: BlockExcEngine, task: BlockExcPeerCtx): bool {.gcsafe.} = - self.taskQueue.pushOrUpdateNoWait(task).isOk() +proc scheduleTask(self: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, raises: [].} = + if self.taskQueue.pushOrUpdateNoWait(task).isOk(): + trace "Task scheduled for peer", peer = task.id + else: + warn "Unable to schedule task for peer", peer = task.id proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} -proc start*(self: BlockExcEngine) {.async.} = +proc start*(self: BlockExcEngine) {.async: (raises: []).} = ## Start the blockexc task ## @@ -115,7 +118,7 @@ proc start*(self: BlockExcEngine) {.async.} = let fut = self.blockexcTaskRunner() self.trackedFutures.track(fut) -proc stop*(self: BlockExcEngine) {.async.} = +proc stop*(self: BlockExcEngine) {.async: (raises: []).} = ## Stop the blockexc blockexc ## @@ -135,7 +138,7 @@ proc stop*(self: BlockExcEngine) {.async.} = proc sendWantHave( self: BlockExcEngine, addresses: seq[BlockAddress], peers: seq[BlockExcPeerCtx] -): Future[void] {.async.} = +): Future[void] {.async: (raises: [CancelledError]).} = for p in peers: let toAsk = addresses.filterIt(it notin p.peerHave) trace "Sending wantHave request", toAsk, peer = p.id @@ -144,7 +147,7 @@ proc sendWantHave( proc sendWantBlock( self: BlockExcEngine, addresses: seq[BlockAddress], blockPeer: BlockExcPeerCtx -): Future[void] {.async.} = +): Future[void] {.async: (raises: [CancelledError]).} = trace "Sending wantBlock request to", addresses, peer = blockPeer.id await self.network.request.sendWantList( blockPeer.id, addresses, wantType = WantType.WantBlock @@ -229,7 +232,7 @@ proc requestBlock*( proc blockPresenceHandler*( self: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence] -) {.async.} = +) {.async: (raises: []).} = trace "Received block presence from peer", peer, blocks = blocks.mapIt($it) let peerCtx = self.peers.get(peer) @@ -249,20 +252,23 @@ proc blockPresenceHandler*( if dontWantCids.len > 0: peerCtx.cleanPresence(dontWantCids) - let ourWantCids = ourWantList.filter do(address: BlockAddress) -> bool: - if address in peerHave and not self.pendingBlocks.retriesExhausted(address) and - not self.pendingBlocks.isInFlight(address): - self.pendingBlocks.setInFlight(address, true) - self.pendingBlocks.decRetries(address) - true - else: - false + let ourWantCids = ourWantList.filterIt( + it in peerHave and not self.pendingBlocks.retriesExhausted(it) and + not self.pendingBlocks.isInFlight(it) + ) + + for address in ourWantCids: + self.pendingBlocks.setInFlight(address, true) + self.pendingBlocks.decRetries(address) if ourWantCids.len > 0: trace "Peer has blocks in our wantList", peer, wants = ourWantCids - await self.sendWantBlock(ourWantCids, peerCtx) + if err =? catch(await self.sendWantBlock(ourWantCids, peerCtx)).errorOption: + warn "Failed to send wantBlock to peer", peer, err = err.msg -proc scheduleTasks(self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} = +proc scheduleTasks( + self: BlockExcEngine, blocksDelivery: seq[BlockDelivery] +) {.async: (raises: [CancelledError]).} = let cids = blocksDelivery.mapIt(it.blk.cid) # schedule any new peers to provide blocks to @@ -271,15 +277,21 @@ proc scheduleTasks(self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.a # schedule a peer if it wants at least one cid # and we have it in our local store if c in p.peerWantsCids: - if await (c in self.localStore): - if self.scheduleTask(p): - trace "Task scheduled for peer", peer = p.id - else: - warn "Unable to schedule task for peer", peer = p.id + try: + if await (c in self.localStore): + # TODO: the try/except should go away once blockstore tracks exceptions + self.scheduleTask(p) + break + except CancelledError as exc: + warn "Checking local store canceled", cid = c, err = exc.msg + return + except CatchableError as exc: + error "Error checking local store for cid", cid = c, err = exc.msg + raiseAssert "Unexpected error checking local store for cid" - break # do next peer - -proc cancelBlocks(self: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} = +proc cancelBlocks( + self: BlockExcEngine, addrs: seq[BlockAddress] +) {.async: (raises: [CancelledError]).} = ## Tells neighboring peers that we're no longer interested in a block. ## @@ -289,35 +301,43 @@ proc cancelBlocks(self: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} = trace "Sending block request cancellations to peers", addrs, peers = self.peers.peerIds - proc mapPeers(peerCtx: BlockExcPeerCtx): Future[BlockExcPeerCtx] {.async.} = - let blocks = addrs.filter do(a: BlockAddress) -> bool: - a in peerCtx.blocks + proc processPeer(peerCtx: BlockExcPeerCtx): Future[BlockExcPeerCtx] {.async.} = + await self.network.request.sendWantCancellations( + peer = peerCtx.id, addresses = addrs.filterIt(it in peerCtx) + ) - if blocks.len > 0: - trace "Sending block request cancellations to peer", peer = peerCtx.id, blocks - await self.network.request.sendWantCancellations( - peer = peerCtx.id, addresses = blocks + return peerCtx + + try: + let (succeededFuts, failedFuts) = await allFinishedFailed( + toSeq(self.peers.peers.values).filterIt(it.peerHave.anyIt(it in addrs)).map( + processPeer ) + ) + + (await allFinished(succeededFuts)).mapIt(it.read).apply do(peerCtx: BlockExcPeerCtx): peerCtx.cleanPresence(addrs) - peerCtx - let failed = (await allFinished(map(toSeq(self.peers.peers.values), mapPeers))).filterIt( - it.failed - ) - - if failed.len > 0: - warn "Failed to send block request cancellations to peers", peers = failed.len - else: - trace "Block request cancellations sent to peers", peers = self.peers.len + if failedFuts.len > 0: + warn "Failed to send block request cancellations to peers", peers = failedFuts.len + else: + trace "Block request cancellations sent to peers", peers = self.peers.len + except CancelledError as exc: + warn "Error sending block request cancellations", error = exc.msg + raise exc + except CatchableError as exc: + warn "Error sending block request cancellations", error = exc.msg proc resolveBlocks*( self: BlockExcEngine, blocksDelivery: seq[BlockDelivery] -) {.async.} = +) {.async: (raises: [CancelledError]).} = self.pendingBlocks.resolve(blocksDelivery) await self.scheduleTasks(blocksDelivery) await self.cancelBlocks(blocksDelivery.mapIt(it.address)) -proc resolveBlocks*(self: BlockExcEngine, blocks: seq[Block]) {.async.} = +proc resolveBlocks*( + self: BlockExcEngine, blocks: seq[Block] +) {.async: (raises: [CancelledError]).} = await self.resolveBlocks( blocks.mapIt( BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid)) @@ -326,7 +346,7 @@ proc resolveBlocks*(self: BlockExcEngine, blocks: seq[Block]) {.async.} = proc payForBlocks( self: BlockExcEngine, peer: BlockExcPeerCtx, blocksDelivery: seq[BlockDelivery] -) {.async.} = +) {.async: (raises: [CancelledError]).} = let sendPayment = self.network.request.sendPayment price = peer.price(blocksDelivery.mapIt(it.address)) @@ -367,7 +387,7 @@ proc validateBlockDelivery(self: BlockExcEngine, bd: BlockDelivery): ?!void = proc blocksDeliveryHandler*( self: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery] -) {.async.} = +) {.async: (raises: []).} = trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt(it.address)) var validatedBlocksDelivery: seq[BlockDelivery] @@ -376,41 +396,47 @@ proc blocksDeliveryHandler*( peer = peer address = bd.address - if err =? self.validateBlockDelivery(bd).errorOption: - warn "Block validation failed", msg = err.msg - continue - - if err =? (await self.localStore.putBlock(bd.blk)).errorOption: - error "Unable to store block", err = err.msg - continue - - if bd.address.leaf: - without proof =? bd.proof: - error "Proof expected for a leaf block delivery" + try: + if err =? self.validateBlockDelivery(bd).errorOption: + warn "Block validation failed", msg = err.msg continue - if err =? ( - await self.localStore.putCidAndProof( - bd.address.treeCid, bd.address.index, bd.blk.cid, proof - ) - ).errorOption: - error "Unable to store proof and cid for a block" + + if err =? (await self.localStore.putBlock(bd.blk)).errorOption: + error "Unable to store block", err = err.msg continue + if bd.address.leaf: + without proof =? bd.proof: + warn "Proof expected for a leaf block delivery" + continue + if err =? ( + await self.localStore.putCidAndProof( + bd.address.treeCid, bd.address.index, bd.blk.cid, proof + ) + ).errorOption: + warn "Unable to store proof and cid for a block" + continue + except CatchableError as exc: + warn "Error handling block delivery", error = exc.msg + continue + validatedBlocksDelivery.add(bd) - await self.resolveBlocks(validatedBlocksDelivery) codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64) let peerCtx = self.peers.get(peer) - if peerCtx != nil: - await self.payForBlocks(peerCtx, blocksDelivery) - ## shouldn't we remove them from the want-list instead of this: - peerCtx.cleanPresence(blocksDelivery.mapIt(it.address)) + if err =? catch(await self.payForBlocks(peerCtx, blocksDelivery)).errorOption: + warn "Error paying for blocks", err = err.msg + return + + if err =? catch(await self.resolveBlocks(validatedBlocksDelivery)).errorOption: + warn "Error resolving blocks", err = err.msg + return proc wantListHandler*( self: BlockExcEngine, peer: PeerId, wantList: WantList -) {.async.} = +) {.async: (raises: []).} = trace "Received want list from peer", peer, wantList = wantList.entries.len let peerCtx = self.peers.get(peer) @@ -422,68 +448,81 @@ proc wantListHandler*( presence: seq[BlockPresence] schedulePeer = false - for e in wantList.entries: - let idx = peerCtx.peerWants.findIt(it.address == e.address) + try: + for e in wantList.entries: + let idx = peerCtx.peerWants.findIt(it.address == e.address) - logScope: - peer = peerCtx.id - address = e.address - wantType = $e.wantType + logScope: + peer = peerCtx.id + address = e.address + wantType = $e.wantType - if idx < 0: # Adding new entry to peer wants - let - have = await e.address in self.localStore - price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE) + if idx < 0: # Adding new entry to peer wants + let + have = + try: + await e.address in self.localStore + except CatchableError as exc: + # TODO: should not be necessary once we have proper exception tracking on the BlockStore interface + false + price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE) - if e.cancel: - trace "Received cancelation for untracked block, skipping", address = e.address - continue + if e.cancel: + trace "Received cancelation for untracked block, skipping", + address = e.address + continue - trace "Processing want list entry", wantList = $e - case e.wantType - of WantType.WantHave: - if have: - presence.add( - BlockPresence( - address: e.address, `type`: BlockPresenceType.Have, price: price - ) - ) - else: - if e.sendDontHave: + trace "Processing want list entry", wantList = $e + case e.wantType + of WantType.WantHave: + if have: presence.add( BlockPresence( - address: e.address, `type`: BlockPresenceType.DontHave, price: price + address: e.address, `type`: BlockPresenceType.Have, price: price ) ) + else: + if e.sendDontHave: + presence.add( + BlockPresence( + address: e.address, `type`: BlockPresenceType.DontHave, price: price + ) + ) - codex_block_exchange_want_have_lists_received.inc() - of WantType.WantBlock: - peerCtx.peerWants.add(e) - schedulePeer = true - codex_block_exchange_want_block_lists_received.inc() - else: # Updating existing entry in peer wants - # peer doesn't want this block anymore - if e.cancel: - trace "Canceling want for block", address = e.address - peerCtx.peerWants.del(idx) - trace "Canceled block request", address = e.address, len = peerCtx.peerWants.len - else: - if e.wantType == WantType.WantBlock: + codex_block_exchange_want_have_lists_received.inc() + of WantType.WantBlock: + peerCtx.peerWants.add(e) schedulePeer = true - # peer might want to ask for the same cid with - # different want params - trace "Updating want for block", address = e.address - peerCtx.peerWants[idx] = e # update entry - trace "Updated block request", address = e.address, len = peerCtx.peerWants.len + codex_block_exchange_want_block_lists_received.inc() + else: # Updating existing entry in peer wants + # peer doesn't want this block anymore + if e.cancel: + trace "Canceling want for block", address = e.address + peerCtx.peerWants.del(idx) + trace "Canceled block request", + address = e.address, len = peerCtx.peerWants.len + else: + if e.wantType == WantType.WantBlock: + schedulePeer = true + # peer might want to ask for the same cid with + # different want params + trace "Updating want for block", address = e.address + peerCtx.peerWants[idx] = e # update entry + trace "Updated block request", + address = e.address, len = peerCtx.peerWants.len - if presence.len > 0: - trace "Sending presence to remote", items = presence.mapIt($it).join(",") - await self.network.request.sendPresence(peer, presence) + if presence.len > 0: + trace "Sending presence to remote", items = presence.mapIt($it).join(",") + await self.network.request.sendPresence(peer, presence) - if schedulePeer and not self.scheduleTask(peerCtx): - warn "Unable to schedule task for peer", peer + if schedulePeer: + self.scheduleTask(peerCtx) + except CancelledError as exc: #TODO: replace with CancelledError + warn "Error processing want list", error = exc.msg -proc accountHandler*(self: BlockExcEngine, peer: PeerId, account: Account) {.async.} = +proc accountHandler*( + self: BlockExcEngine, peer: PeerId, account: Account +) {.async: (raises: []).} = let context = self.peers.get(peer) if context.isNil: return @@ -492,7 +531,7 @@ proc accountHandler*(self: BlockExcEngine, peer: PeerId, account: Account) {.asy proc paymentHandler*( self: BlockExcEngine, peer: PeerId, payment: SignedState -) {.async.} = +) {.async: (raises: []).} = trace "Handling payments", peer without context =? self.peers.get(peer).option and account =? context.account: @@ -505,7 +544,9 @@ proc paymentHandler*( else: context.paymentChannel = self.wallet.acceptChannel(payment).option -proc setupPeer*(self: BlockExcEngine, peer: PeerId) {.async.} = +proc setupPeer*( + self: BlockExcEngine, peer: PeerId +) {.async: (raises: [CancelledError]).} = ## Perform initial setup, such as want ## list exchange ## @@ -524,9 +565,10 @@ proc setupPeer*(self: BlockExcEngine, peer: PeerId) {.async.} = await self.network.request.sendWantList(peer, cids, full = true) if address =? self.pricing .? address: + trace "Sending account to peer", peer await self.network.request.sendAccount(peer, Account(address: address)) -proc dropPeer*(self: BlockExcEngine, peer: PeerId) = +proc dropPeer*(self: BlockExcEngine, peer: PeerId) {.raises: [].} = ## Cleanup disconnected peer ## @@ -535,7 +577,9 @@ proc dropPeer*(self: BlockExcEngine, peer: PeerId) = # drop the peer from the peers table self.peers.remove(peer) -proc taskHandler*(self: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} = +proc taskHandler*( + self: BlockExcEngine, task: BlockExcPeerCtx +) {.gcsafe, async: (raises: [CancelledError, RetriesExhaustedError]).} = # Send to the peer blocks he wants to get, # if they present in our local store @@ -572,8 +616,11 @@ proc taskHandler*(self: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} let blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup)) - blocksDelivery = - blocksDeliveryFut.filterIt(it.completed and it.read.isOk).mapIt(it.read.get) + blocksDelivery = blocksDeliveryFut.filterIt(it.completed and it.value.isOk).mapIt: + if bd =? it.value: + bd + else: + raiseAssert "Unexpected error in local lookup" # All the wants that failed local lookup must be set to not-in-flight again. let @@ -595,15 +642,12 @@ proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} = ## trace "Starting blockexc task runner" - while self.blockexcRunning: - try: + try: + while self.blockexcRunning: let peerCtx = await self.taskQueue.pop() - await self.taskHandler(peerCtx) - except CancelledError: - break # do not propagate as blockexcTaskRunner was asyncSpawned - except CatchableError as e: - error "error running block exchange task", error = e.msgDetail + except CatchableError as exc: + error "error running block exchange task", error = exc.msg info "Exiting blockexc task runner" @@ -644,23 +688,29 @@ proc new*( network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined) network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left) - proc blockWantListHandler(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.} = + proc blockWantListHandler( + peer: PeerId, wantList: WantList + ): Future[void] {.async: (raises: []).} = self.wantListHandler(peer, wantList) proc blockPresenceHandler( peer: PeerId, presence: seq[BlockPresence] - ): Future[void] {.gcsafe.} = + ): Future[void] {.async: (raises: []).} = self.blockPresenceHandler(peer, presence) proc blocksDeliveryHandler( peer: PeerId, blocksDelivery: seq[BlockDelivery] - ): Future[void] {.gcsafe.} = + ): Future[void] {.async: (raises: []).} = self.blocksDeliveryHandler(peer, blocksDelivery) - proc accountHandler(peer: PeerId, account: Account): Future[void] {.gcsafe.} = + proc accountHandler( + peer: PeerId, account: Account + ): Future[void] {.async: (raises: []).} = self.accountHandler(peer, account) - proc paymentHandler(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} = + proc paymentHandler( + peer: PeerId, payment: SignedState + ): Future[void] {.async: (raises: []).} = self.paymentHandler(peer, payment) network.handlers = BlockExcHandlers( diff --git a/codex/blockexchange/engine/payments.nim b/codex/blockexchange/engine/payments.nim index 88953976..260a3005 100644 --- a/codex/blockexchange/engine/payments.nim +++ b/codex/blockexchange/engine/payments.nim @@ -7,6 +7,8 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. +{.push raises: [].} + import std/math import pkg/nitro import pkg/questionable/results @@ -15,9 +17,6 @@ import ../peers export nitro export results -push: - {.upraises: [].} - const ChainId* = 0.u256 # invalid chain id for now const Asset* = EthAddress.zero # invalid ERC20 asset address for now const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals diff --git a/codex/blockexchange/network/network.nim b/codex/blockexchange/network/network.nim index daf358de..26c07445 100644 --- a/codex/blockexchange/network/network.nim +++ b/codex/blockexchange/network/network.nim @@ -35,13 +35,15 @@ const DefaultMaxInflight* = 100 type - WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.} + WantListHandler* = + proc(peer: PeerId, wantList: WantList) {.gcsafe, async: (raises: []).} BlocksDeliveryHandler* = - proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.} + proc(peer: PeerId, blocks: seq[BlockDelivery]) {.gcsafe, async: (raises: []).} BlockPresenceHandler* = - proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.} - AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.} - PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} + proc(peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async: (raises: []).} + AccountHandler* = proc(peer: PeerId, account: Account) {.gcsafe, async: (raises: []).} + PaymentHandler* = + proc(peer: PeerId, payment: SignedState) {.gcsafe, async: (raises: []).} BlockExcHandlers* = object onWantList*: WantListHandler @@ -58,15 +60,20 @@ type wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false, - ): Future[void] {.gcsafe.} - WantCancellationSender* = - proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.} - BlocksDeliverySender* = - proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} - PresenceSender* = - proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} - AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.} - PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} + ) {.async: (raises: [CancelledError]).} + WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]) {. + async: (raises: [CancelledError]) + .} + BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {. + async: (raises: [CancelledError]) + .} + PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]) {. + async: (raises: [CancelledError]) + .} + AccountSender* = + proc(peer: PeerId, account: Account) {.async: (raises: [CancelledError]).} + PaymentSender* = + proc(peer: PeerId, payment: SignedState) {.async: (raises: [CancelledError]).} BlockExcRequest* = object sendWantList*: WantListSender @@ -98,7 +105,9 @@ proc isSelf*(b: BlockExcNetwork, peer: PeerId): bool = return b.peerId == peer -proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} = +proc send*( + b: BlockExcNetwork, id: PeerId, msg: pb.Message +) {.async: (raises: [CancelledError]).} = ## Send message to peer ## @@ -106,8 +115,9 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} = trace "Unable to send, peer not found", peerId = id return - let peer = b.peers[id] try: + let peer = b.peers[id] + await b.inflightSema.acquire() await peer.send(msg) except CancelledError as error: @@ -117,7 +127,9 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} = finally: b.inflightSema.release() -proc handleWantList(b: BlockExcNetwork, peer: NetworkPeer, list: WantList) {.async.} = +proc handleWantList( + b: BlockExcNetwork, peer: NetworkPeer, list: WantList +) {.async: (raises: []).} = ## Handle incoming want list ## @@ -133,7 +145,7 @@ proc sendWantList*( wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false, -): Future[void] = +) {.async: (raw: true, raises: [CancelledError]).} = ## Send a want message to peer ## @@ -154,14 +166,14 @@ proc sendWantList*( proc sendWantCancellations*( b: BlockExcNetwork, id: PeerId, addresses: seq[BlockAddress] -): Future[void] {.async.} = +): Future[void] {.async: (raises: [CancelledError]).} = ## Informs a remote peer that we're no longer interested in a set of blocks ## await b.sendWantList(id = id, addresses = addresses, cancel = true) proc handleBlocksDelivery( b: BlockExcNetwork, peer: NetworkPeer, blocksDelivery: seq[BlockDelivery] -) {.async.} = +) {.async: (raises: []).} = ## Handle incoming blocks ## @@ -170,7 +182,7 @@ proc handleBlocksDelivery( proc sendBlocksDelivery*( b: BlockExcNetwork, id: PeerId, blocksDelivery: seq[BlockDelivery] -): Future[void] = +) {.async: (raw: true, raises: [CancelledError]).} = ## Send blocks to remote ## @@ -178,7 +190,7 @@ proc sendBlocksDelivery*( proc handleBlockPresence( b: BlockExcNetwork, peer: NetworkPeer, presence: seq[BlockPresence] -) {.async.} = +) {.async: (raises: []).} = ## Handle block presence ## @@ -187,7 +199,7 @@ proc handleBlockPresence( proc sendBlockPresence*( b: BlockExcNetwork, id: PeerId, presence: seq[BlockPresence] -): Future[void] = +) {.async: (raw: true, raises: [CancelledError]).} = ## Send presence to remote ## @@ -195,20 +207,24 @@ proc sendBlockPresence*( proc handleAccount( network: BlockExcNetwork, peer: NetworkPeer, account: Account -) {.async.} = +) {.async: (raises: []).} = ## Handle account info ## if not network.handlers.onAccount.isNil: await network.handlers.onAccount(peer.id, account) -proc sendAccount*(b: BlockExcNetwork, id: PeerId, account: Account): Future[void] = +proc sendAccount*( + b: BlockExcNetwork, id: PeerId, account: Account +) {.async: (raw: true, raises: [CancelledError]).} = ## Send account info to remote ## b.send(id, Message(account: AccountMessage.init(account))) -proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[void] = +proc sendPayment*( + b: BlockExcNetwork, id: PeerId, payment: SignedState +) {.async: (raw: true, raises: [CancelledError]).} = ## Send payment to remote ## @@ -216,7 +232,7 @@ proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[ proc handlePayment( network: BlockExcNetwork, peer: NetworkPeer, payment: SignedState -) {.async.} = +) {.async: (raises: []).} = ## Handle payment ## @@ -225,7 +241,7 @@ proc handlePayment( proc rpcHandler( b: BlockExcNetwork, peer: NetworkPeer, msg: Message -) {.async: (raises: [CatchableError]).} = +) {.async: (raises: []).} = ## handle rpc messages ## if msg.wantList.entries.len > 0: @@ -250,7 +266,9 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer = if peer in b.peers: return b.peers.getOrDefault(peer, nil) - var getConn: ConnProvider = proc(): Future[Connection] {.async, gcsafe, closure.} = + var getConn: ConnProvider = proc(): Future[Connection] {. + async: (raises: [CancelledError]) + .} = try: trace "Getting new connection stream", peer return await b.switch.dial(peer, Codec) @@ -262,9 +280,7 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer = if not isNil(b.getConn): getConn = b.getConn - let rpcHandler = proc( - p: NetworkPeer, msg: Message - ) {.async: (raises: [CatchableError]).} = + let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async: (raises: []).} = await b.rpcHandler(p, msg) # create new pubsub peer @@ -353,26 +369,32 @@ proc new*( wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false, - ): Future[void] {.gcsafe.} = + ): Future[void] {.async: (raw: true, raises: [CancelledError]).} = self.sendWantList(id, cids, priority, cancel, wantType, full, sendDontHave) proc sendWantCancellations( id: PeerId, addresses: seq[BlockAddress] - ): Future[void] {.gcsafe.} = + ): Future[void] {.async: (raw: true, raises: [CancelledError]).} = self.sendWantCancellations(id, addresses) proc sendBlocksDelivery( id: PeerId, blocksDelivery: seq[BlockDelivery] - ): Future[void] {.gcsafe.} = + ): Future[void] {.async: (raw: true, raises: [CancelledError]).} = self.sendBlocksDelivery(id, blocksDelivery) - proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} = + proc sendPresence( + id: PeerId, presence: seq[BlockPresence] + ): Future[void] {.async: (raw: true, raises: [CancelledError]).} = self.sendBlockPresence(id, presence) - proc sendAccount(id: PeerId, account: Account): Future[void] {.gcsafe.} = + proc sendAccount( + id: PeerId, account: Account + ): Future[void] {.async: (raw: true, raises: [CancelledError]).} = self.sendAccount(id, account) - proc sendPayment(id: PeerId, payment: SignedState): Future[void] {.gcsafe.} = + proc sendPayment( + id: PeerId, payment: SignedState + ): Future[void] {.async: (raw: true, raises: [CancelledError]).} = self.sendPayment(id, payment) self.request = BlockExcRequest( diff --git a/codex/blockexchange/network/networkpeer.nim b/codex/blockexchange/network/networkpeer.nim index 4a100340..66c39294 100644 --- a/codex/blockexchange/network/networkpeer.nim +++ b/codex/blockexchange/network/networkpeer.nim @@ -7,9 +7,7 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. -import pkg/upraises -push: - {.upraises: [].} +{.push raises: [].} import pkg/chronos import pkg/libp2p @@ -18,6 +16,7 @@ import ../protobuf/blockexc import ../protobuf/message import ../../errors import ../../logutils +import ../../utils/trackedfutures logScope: topics = "codex blockexcnetworkpeer" @@ -25,11 +24,10 @@ logScope: const DefaultYieldInterval = 50.millis type - ConnProvider* = proc(): Future[Connection] {.gcsafe, closure.} + ConnProvider* = + proc(): Future[Connection] {.gcsafe, async: (raises: [CancelledError]).} - RPCHandler* = proc( - peer: NetworkPeer, msg: Message - ): Future[void].Raising(CatchableError) {.gcsafe.} + RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.gcsafe, async: (raises: []).} NetworkPeer* = ref object of RootObj id*: PeerId @@ -37,55 +35,60 @@ type sendConn: Connection getConn: ConnProvider yieldInterval*: Duration = DefaultYieldInterval + trackedFutures: TrackedFutures -proc connected*(b: NetworkPeer): bool = - not (isNil(b.sendConn)) and not (b.sendConn.closed or b.sendConn.atEof) +proc connected*(self: NetworkPeer): bool = + not (isNil(self.sendConn)) and not (self.sendConn.closed or self.sendConn.atEof) -proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} = +proc readLoop*(self: NetworkPeer, conn: Connection) {.async: (raises: []).} = if isNil(conn): - trace "No connection to read from", peer = b.id + trace "No connection to read from", peer = self.id return - trace "Attaching read loop", peer = b.id, connId = conn.oid + trace "Attaching read loop", peer = self.id, connId = conn.oid try: - var nextYield = Moment.now() + b.yieldInterval + var nextYield = Moment.now() + self.yieldInterval while not conn.atEof or not conn.closed: if Moment.now() > nextYield: - nextYield = Moment.now() + b.yieldInterval + nextYield = Moment.now() + self.yieldInterval trace "Yielding in read loop", - peer = b.id, nextYield = nextYield, interval = b.yieldInterval + peer = self.id, nextYield = nextYield, interval = self.yieldInterval await sleepAsync(10.millis) let data = await conn.readLp(MaxMessageSize.int) msg = Message.protobufDecode(data).mapFailure().tryGet() - trace "Received message", peer = b.id, connId = conn.oid - await b.handler(b, msg) + trace "Received message", peer = self.id, connId = conn.oid + await self.handler(self, msg) except CancelledError: trace "Read loop cancelled" except CatchableError as err: warn "Exception in blockexc read loop", msg = err.msg finally: - trace "Detaching read loop", peer = b.id, connId = conn.oid + trace "Detaching read loop", peer = self.id, connId = conn.oid await conn.close() -proc connect*(b: NetworkPeer): Future[Connection] {.async.} = - if b.connected: - trace "Already connected", peer = b.id, connId = b.sendConn.oid - return b.sendConn +proc connect*( + self: NetworkPeer +): Future[Connection] {.async: (raises: [CancelledError]).} = + if self.connected: + trace "Already connected", peer = self.id, connId = self.sendConn.oid + return self.sendConn - b.sendConn = await b.getConn() - asyncSpawn b.readLoop(b.sendConn) - return b.sendConn + self.sendConn = await self.getConn() + self.trackedFutures.track(self.readLoop(self.sendConn)) + return self.sendConn -proc send*(b: NetworkPeer, msg: Message) {.async.} = - let conn = await b.connect() +proc send*( + self: NetworkPeer, msg: Message +) {.async: (raises: [CancelledError, LPStreamError]).} = + let conn = await self.connect() if isNil(conn): - warn "Unable to get send connection for peer message not sent", peer = b.id + warn "Unable to get send connection for peer message not sent", peer = self.id return - trace "Sending message", peer = b.id, connId = conn.oid + trace "Sending message", peer = self.id, connId = conn.oid await conn.writeLp(protobufEncode(msg)) func new*( @@ -96,4 +99,9 @@ func new*( ): NetworkPeer = doAssert(not isNil(connProvider), "should supply connection provider") - NetworkPeer(id: peer, getConn: connProvider, handler: rpcHandler) + NetworkPeer( + id: peer, + getConn: connProvider, + handler: rpcHandler, + trackedFutures: TrackedFutures(), + ) diff --git a/codex/blockexchange/peers/peerctxstore.nim b/codex/blockexchange/peers/peerctxstore.nim index 739d92b5..ce2506a8 100644 --- a/codex/blockexchange/peers/peerctxstore.nim +++ b/codex/blockexchange/peers/peerctxstore.nim @@ -7,16 +7,13 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. +{.push raises: [].} + import std/sequtils import std/tables import std/algorithm import std/sequtils -import pkg/upraises - -push: - {.upraises: [].} - import pkg/chronos import pkg/libp2p diff --git a/codex/blockexchange/protobuf/payments.nim b/codex/blockexchange/protobuf/payments.nim index 5d010a81..885562c4 100644 --- a/codex/blockexchange/protobuf/payments.nim +++ b/codex/blockexchange/protobuf/payments.nim @@ -1,8 +1,9 @@ +{.push raises: [].} + import pkg/stew/byteutils import pkg/stint import pkg/nitro import pkg/questionable -import pkg/upraises import ./blockexc export AccountMessage @@ -11,9 +12,6 @@ export StateChannelUpdate export stint export nitro -push: - {.upraises: [].} - type Account* = object address*: EthAddress diff --git a/codex/blockexchange/protobuf/presence.nim b/codex/blockexchange/protobuf/presence.nim index d941746d..3b24a570 100644 --- a/codex/blockexchange/protobuf/presence.nim +++ b/codex/blockexchange/protobuf/presence.nim @@ -1,8 +1,9 @@ +{.push raises: [].} + import libp2p import pkg/stint import pkg/questionable import pkg/questionable/results -import pkg/upraises import ./blockexc import ../../blocktype @@ -11,9 +12,6 @@ export questionable export stint export BlockPresenceType -upraises.push: - {.upraises: [].} - type PresenceMessage* = blockexc.BlockPresence Presence* = object diff --git a/codex/chunker.nim b/codex/chunker.nim index f735aa4b..908dd0c0 100644 --- a/codex/chunker.nim +++ b/codex/chunker.nim @@ -28,8 +28,11 @@ const DefaultChunkSize* = DefaultBlockSize type # default reader type + ChunkerError* = object of CatchableError ChunkBuffer* = ptr UncheckedArray[byte] - Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.gcsafe, raises: [Defect].} + Reader* = proc(data: ChunkBuffer, len: int): Future[int] {. + gcsafe, async: (raises: [ChunkerError, CancelledError]) + .} # Reader that splits input data into fixed-size chunks Chunker* = ref object @@ -74,7 +77,7 @@ proc new*( proc reader( data: ChunkBuffer, len: int - ): Future[int] {.gcsafe, async, raises: [Defect].} = + ): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} = var res = 0 try: while res < len: @@ -85,7 +88,7 @@ proc new*( raise error except LPStreamError as error: error "LPStream error", err = error.msg - raise error + raise newException(ChunkerError, "LPStream error", error) except CatchableError as exc: error "CatchableError exception", exc = exc.msg raise newException(Defect, exc.msg) @@ -102,7 +105,7 @@ proc new*( proc reader( data: ChunkBuffer, len: int - ): Future[int] {.gcsafe, async, raises: [Defect].} = + ): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} = var total = 0 try: while total < len: diff --git a/codex/codex.nim b/codex/codex.nim index 8a03510c..391a94fc 100644 --- a/codex/codex.nim +++ b/codex/codex.nim @@ -177,14 +177,20 @@ proc start*(s: CodexServer) {.async.} = proc stop*(s: CodexServer) {.async.} = notice "Stopping codex node" - await allFuturesThrowing( - s.restServer.stop(), - s.codexNode.switch.stop(), - s.codexNode.stop(), - s.repoStore.stop(), - s.maintenance.stop(), + let res = await noCancel allFinishedFailed( + @[ + s.restServer.stop(), + s.codexNode.switch.stop(), + s.codexNode.stop(), + s.repoStore.stop(), + s.maintenance.stop(), + ] ) + if res.failure.len > 0: + error "Failed to stop codex node", failures = res.failure.len + raiseAssert "Failed to stop codex node" + proc new*( T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey ): CodexServer = diff --git a/codex/contracts/clock.nim b/codex/contracts/clock.nim index b5bf7ebb..b7863539 100644 --- a/codex/contracts/clock.nim +++ b/codex/contracts/clock.nim @@ -5,6 +5,7 @@ import pkg/chronos import pkg/stint import ../clock import ../conf +import ../utils/trackedfutures export clock @@ -18,9 +19,12 @@ type OnChainClock* = ref object of Clock blockNumber: UInt256 started: bool newBlock: AsyncEvent + trackedFutures: TrackedFutures proc new*(_: type OnChainClock, provider: Provider): OnChainClock = - OnChainClock(provider: provider, newBlock: newAsyncEvent()) + OnChainClock( + provider: provider, newBlock: newAsyncEvent(), trackedFutures: TrackedFutures() + ) proc update(clock: OnChainClock, blck: Block) = if number =? blck.number and number > clock.blockNumber: @@ -32,15 +36,12 @@ proc update(clock: OnChainClock, blck: Block) = blockTime = blck.timestamp, blockNumber = number, offset = clock.offset clock.newBlock.fire() -proc update(clock: OnChainClock) {.async.} = +proc update(clock: OnChainClock) {.async: (raises: []).} = try: if latest =? (await clock.provider.getBlock(BlockTag.latest)): clock.update(latest) - except CancelledError as error: - raise error except CatchableError as error: debug "error updating clock: ", error = error.msg - discard method start*(clock: OnChainClock) {.async.} = if clock.started: @@ -52,7 +53,7 @@ method start*(clock: OnChainClock) {.async.} = return # ignore block parameter; hardhat may call this with pending blocks - asyncSpawn clock.update() + clock.trackedFutures.track(clock.update()) await clock.update() @@ -64,6 +65,7 @@ method stop*(clock: OnChainClock) {.async.} = return await clock.subscription.unsubscribe() + await clock.trackedFutures.cancelTracked() clock.started = false method now*(clock: OnChainClock): SecondsSince1970 = diff --git a/codex/discovery.nim b/codex/discovery.nim index 9aa8c7d8..eed1f89b 100644 --- a/codex/discovery.nim +++ b/codex/discovery.nim @@ -7,6 +7,8 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. +{.push raises: [].} + import std/algorithm import std/sequtils @@ -54,70 +56,122 @@ proc toNodeId*(host: ca.Address): NodeId = readUintBE[256](keccak256.digest(host.toArray).data) -proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} = +proc findPeer*( + d: Discovery, peerId: PeerId +): Future[?PeerRecord] {.async: (raises: [CancelledError]).} = trace "protocol.resolve..." ## Find peer using the given Discovery object ## - let node = await d.protocol.resolve(toNodeId(peerId)) - return - if node.isSome(): - node.get().record.data.some - else: - PeerRecord.none + try: + let node = await d.protocol.resolve(toNodeId(peerId)) -method find*(d: Discovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} = + return + if node.isSome(): + node.get().record.data.some + else: + PeerRecord.none + except CancelledError as exc: + warn "Error finding peer", peerId = peerId, exc = exc.msg + raise exc + except CatchableError as exc: + warn "Error finding peer", peerId = peerId, exc = exc.msg + + return PeerRecord.none + +method find*( + d: Discovery, cid: Cid +): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} = ## Find block providers ## - without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure, error: - warn "Error finding providers for block", cid, error = error.msg - return providers.filterIt(not (it.data.peerId == d.peerId)) + try: + without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure, + error: + warn "Error finding providers for block", cid, error = error.msg -method provide*(d: Discovery, cid: Cid) {.async, base.} = + return providers.filterIt(not (it.data.peerId == d.peerId)) + except CancelledError as exc: + warn "Error finding providers for block", cid, exc = exc.msg + raise exc + except CatchableError as exc: + warn "Error finding providers for block", cid, exc = exc.msg + +method provide*(d: Discovery, cid: Cid) {.async: (raises: [CancelledError]), base.} = ## Provide a block Cid ## - let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get) + try: + let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get) - if nodes.len <= 0: - warn "Couldn't provide to any nodes!" + if nodes.len <= 0: + warn "Couldn't provide to any nodes!" + except CancelledError as exc: + warn "Error providing block", cid, exc = exc.msg + raise exc + except CatchableError as exc: + warn "Error providing block", cid, exc = exc.msg method find*( d: Discovery, host: ca.Address -): Future[seq[SignedPeerRecord]] {.async, base.} = +): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} = ## Find host providers ## - trace "Finding providers for host", host = $host - without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure, - error: - trace "Error finding providers for host", host = $host, exc = error.msg - return + try: + trace "Finding providers for host", host = $host + without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure, + error: + trace "Error finding providers for host", host = $host, exc = error.msg + return - if providers.len <= 0: - trace "No providers found", host = $host - return + if providers.len <= 0: + trace "No providers found", host = $host + return - providers.sort do(a, b: SignedPeerRecord) -> int: - system.cmp[uint64](a.data.seqNo, b.data.seqNo) + providers.sort do(a, b: SignedPeerRecord) -> int: + system.cmp[uint64](a.data.seqNo, b.data.seqNo) - return providers + return providers + except CancelledError as exc: + warn "Error finding providers for host", host = $host, exc = exc.msg + raise exc + except CatchableError as exc: + warn "Error finding providers for host", host = $host, exc = exc.msg -method provide*(d: Discovery, host: ca.Address) {.async, base.} = +method provide*( + d: Discovery, host: ca.Address +) {.async: (raises: [CancelledError]), base.} = ## Provide hosts ## - trace "Providing host", host = $host - let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get) - if nodes.len > 0: - trace "Provided to nodes", nodes = nodes.len + try: + trace "Providing host", host = $host + let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get) + if nodes.len > 0: + trace "Provided to nodes", nodes = nodes.len + except CancelledError as exc: + warn "Error providing host", host = $host, exc = exc.msg + raise exc + except CatchableError as exc: + warn "Error providing host", host = $host, exc = exc.msg -method removeProvider*(d: Discovery, peerId: PeerId): Future[void] {.base, gcsafe.} = +method removeProvider*( + d: Discovery, peerId: PeerId +): Future[void] {.base, gcsafe, async: (raises: [CancelledError]).} = ## Remove provider from providers table ## trace "Removing provider", peerId - d.protocol.removeProvidersLocal(peerId) + try: + await d.protocol.removeProvidersLocal(peerId) + except CancelledError as exc: + warn "Error removing provider", peerId = peerId, exc = exc.msg + raise exc + except CatchableError as exc: + warn "Error removing provider", peerId = peerId, exc = exc.msg + except Exception as exc: # Something in discv5 is raising Exception + warn "Error removing provider", peerId = peerId, exc = exc.msg + raiseAssert("Unexpected Exception in removeProvider") proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) = ## Update providers record @@ -145,12 +199,18 @@ proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) = if not d.protocol.isNil: d.protocol.updateRecord(d.dhtRecord).expect("Should update SPR") -proc start*(d: Discovery) {.async.} = - d.protocol.open() - await d.protocol.start() +proc start*(d: Discovery) {.async: (raises: []).} = + try: + d.protocol.open() + await d.protocol.start() + except CatchableError as exc: + error "Error starting discovery", exc = exc.msg -proc stop*(d: Discovery) {.async.} = - await d.protocol.closeWait() +proc stop*(d: Discovery) {.async: (raises: []).} = + try: + await noCancel d.protocol.closeWait() + except CatchableError as exc: + error "Error stopping discovery", exc = exc.msg proc new*( T: type Discovery, diff --git a/codex/erasure/erasure.nim b/codex/erasure/erasure.nim index 107f85bc..78ce3971 100644 --- a/codex/erasure/erasure.nim +++ b/codex/erasure/erasure.nim @@ -330,7 +330,7 @@ proc encodeAsync*( defer: freeDoubleArray(blockData, blocksLen) - ## Create an ecode task with block data + ## Create an ecode task with block data var task = EncodeTask( erasure: addr self, blockSize: blockSize, @@ -540,7 +540,7 @@ proc decodeAsync*( freeDoubleArray(blocksData, blocksLen) freeDoubleArray(parityData, parityLen) - ## Create an decode task with block data + ## Create an decode task with block data var task = DecodeTask( erasure: addr self, blockSize: blockSize, diff --git a/codex/errors.nim b/codex/errors.nim index 75cefde4..fadf7299 100644 --- a/codex/errors.nim +++ b/codex/errors.nim @@ -19,6 +19,8 @@ type CodexError* = object of CatchableError # base codex error CodexResult*[T] = Result[T, ref CodexError] + FinishedFailed*[T] = tuple[success: seq[Future[T]], failure: seq[Future[T]]] + template mapFailure*[T, V, E]( exp: Result[T, V], exc: typedesc[E] ): Result[T, ref CatchableError] = @@ -40,35 +42,18 @@ func toFailure*[T](exp: Option[T]): Result[T, ref CatchableError] {.inline.} = else: T.failure("Option is None") -# allFuturesThrowing was moved to the tests in libp2p -proc allFuturesThrowing*[T](args: varargs[Future[T]]): Future[void] = - var futs: seq[Future[T]] - for fut in args: - futs &= fut - proc call() {.async.} = - var first: ref CatchableError = nil - futs = await allFinished(futs) - for fut in futs: - if fut.failed: - let err = fut.readError() - if err of Defect: - raise err - else: - if err of CancelledError: - raise err - if isNil(first): - first = err - if not isNil(first): - raise first +proc allFinishedFailed*[T](futs: seq[Future[T]]): Future[FinishedFailed[T]] {.async.} = + ## Check if all futures have finished or failed + ## + ## TODO: wip, not sure if we want this - at the minimum, + ## we should probably avoid the async transform - return call() + var res: FinishedFailed[T] = (@[], @[]) + await allFutures(futs) + for f in futs: + if f.failed: + res.failure.add f + else: + res.success.add f -proc allFutureResult*[T](fut: seq[Future[T]]): Future[?!void] {.async.} = - try: - await allFuturesThrowing(fut) - except CancelledError as exc: - raise exc - except CatchableError as exc: - return failure(exc.msg) - - return success() + return res diff --git a/codex/node.nim b/codex/node.nim index b248e6df..203e034a 100644 --- a/codex/node.nim +++ b/codex/node.nim @@ -153,7 +153,11 @@ proc updateExpiry*( let ensuringFutures = Iter[int].new(0 ..< manifest.blocksCount).mapIt( self.networkStore.localStore.ensureExpiry(manifest.treeCid, it, expiry) ) - await allFuturesThrowing(ensuringFutures) + + let res = await allFinishedFailed(ensuringFutures) + if res.failure.len > 0: + trace "Some blocks failed to update expiry", len = res.failure.len + return failure("Some blocks failed to update expiry (" & $res.failure.len & " )") except CancelledError as exc: raise exc except CatchableError as exc: @@ -186,8 +190,10 @@ proc fetchBatched*( if not (await address in self.networkStore) or fetchLocal: self.networkStore.getBlock(address) - if blocksErr =? (await allFutureResult(blocks)).errorOption: - return failure(blocksErr) + let res = await allFinishedFailed(blocks) + if res.failure.len > 0: + trace "Some blocks failed to fetch", len = res.failure.len + return failure("Some blocks failed to fetch (" & $res.failure.len & " )") if not onBatch.isNil and batchErr =? (await onBatch(blocks.mapIt(it.read.get))).errorOption: @@ -213,6 +219,30 @@ proc fetchBatched*( let iter = Iter[int].new(0 ..< manifest.blocksCount) self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch, fetchLocal) +proc fetchDatasetAsync*( + self: CodexNodeRef, manifest: Manifest, fetchLocal = true +): Future[void] {.async: (raises: []).} = + ## Asynchronously fetch a dataset in the background. + ## This task will be tracked and cleaned up on node shutdown. + ## + try: + if err =? ( + await self.fetchBatched( + manifest = manifest, batchSize = DefaultFetchBatch, fetchLocal = fetchLocal + ) + ).errorOption: + error "Unable to fetch blocks", err = err.msg + except CancelledError as exc: + trace "Cancelled fetching blocks", exc = exc.msg + except CatchableError as exc: + error "Error fetching blocks", exc = exc.msg + +proc fetchDatasetAsyncTask*(self: CodexNodeRef, manifest: Manifest) = + ## Start fetching a dataset in the background. + ## The task will be tracked and cleaned up on node shutdown. + ## + self.trackedFutures.track(self.fetchDatasetAsync(manifest, fetchLocal = false)) + proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async.} = ## Streams the contents of a single block. ## @@ -223,36 +253,27 @@ proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async without blk =? (await self.networkStore.getBlock(BlockAddress.init(cid))), err: return failure(err) - proc streamOneBlock(): Future[void] {.async.} = + proc streamOneBlock(): Future[void] {.async: (raises: []).} = try: + defer: + await stream.pushEof() await stream.pushData(blk.data) except CatchableError as exc: trace "Unable to send block", cid, exc = exc.msg - discard - finally: - await stream.pushEof() self.trackedFutures.track(streamOneBlock()) LPStream(stream).success proc streamEntireDataset( - self: CodexNodeRef, - manifest: Manifest, - manifestCid: Cid, - prefetchBatch = DefaultFetchBatch, + self: CodexNodeRef, manifest: Manifest, manifestCid: Cid ): Future[?!LPStream] {.async.} = ## Streams the contents of the entire dataset described by the manifest. - ## Background jobs (erasure decoding and prefetching) will be cancelled when - ## the stream is closed. ## trace "Retrieving blocks from manifest", manifestCid - let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false)) - var jobs: seq[Future[void]] - if manifest.protected: # Retrieve, decode and save to the local store all EС groups - proc erasureJob(): Future[void] {.async.} = + proc erasureJob(): Future[void] {.async: (raises: []).} = try: # Spawn an erasure decoding job let erasure = Erasure.new( @@ -260,36 +281,17 @@ proc streamEntireDataset( ) without _ =? (await erasure.decode(manifest)), error: error "Unable to erasure decode manifest", manifestCid, exc = error.msg - except CancelledError: - trace "Erasure job cancelled", manifestCid except CatchableError as exc: trace "Error erasure decoding manifest", manifestCid, exc = exc.msg - jobs.add(erasureJob()) + self.trackedFutures.track(erasureJob()) - proc prefetch(): Future[void] {.async.} = - try: - if err =? - (await self.fetchBatched(manifest, prefetchBatch, fetchLocal = false)).errorOption: - error "Unable to fetch blocks", err = err.msg - except CancelledError: - trace "Prefetch job cancelled" - except CatchableError as exc: - error "Error fetching blocks", exc = exc.msg - - jobs.add(prefetch()) - - # Monitor stream completion and cancel background jobs when done - proc monitorStream() {.async.} = - try: - await stream.join() - finally: - await allFutures(jobs.mapIt(it.cancelAndWait)) - - self.trackedFutures.track(monitorStream()) + self.trackedFutures.track(self.fetchDatasetAsync(manifest, fetchLocal = false)) + # prefetch task should not fetch from local store + # Retrieve all blocks of the dataset sequentially from the local store or network trace "Creating store stream for manifest", manifestCid - stream.success + LPStream(StoreStream.new(self.networkStore, manifest, pad = false)).success proc retrieve*( self: CodexNodeRef, cid: Cid, local: bool = true @@ -632,8 +634,11 @@ proc onStore( let ensureExpiryFutures = blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry.toSecondsSince1970)) - if updateExpiryErr =? (await allFutureResult(ensureExpiryFutures)).errorOption: - return failure(updateExpiryErr) + + let res = await allFinishedFailed(ensureExpiryFutures) + if res.failure.len > 0: + trace "Some blocks failed to update expiry", len = res.failure.len + return failure("Some blocks failed to update expiry (" & $res.failure.len & " )") if not blocksCb.isNil and err =? (await blocksCb(blocks)).errorOption: trace "Unable to process blocks", err = err.msg diff --git a/codex/rest/api.nim b/codex/rest/api.nim index 7cb0b43f..054e1c2b 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -315,15 +315,8 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute error "Failed to fetch manifest", err = err.msg return RestApiResponse.error(Http404, err.msg, headers = headers) - proc fetchDatasetAsync(): Future[void] {.async.} = - try: - if err =? (await node.fetchBatched(manifest)).errorOption: - error "Unable to fetch dataset", cid = cid.get(), err = err.msg - except CatchableError as exc: - error "CatchableError when fetching dataset", cid = cid.get(), exc = exc.msg - discard - - asyncSpawn fetchDatasetAsync() + # Start fetching the dataset in the background + node.fetchDatasetAsyncTask(manifest) let json = %formatManifest(cid.get(), manifest) return RestApiResponse.response($json, contentType = "application/json") diff --git a/codex/sales.nim b/codex/sales.nim index e2a884df..998a2967 100644 --- a/codex/sales.nim +++ b/codex/sales.nim @@ -341,48 +341,51 @@ proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: uint64) = trace "slot freed, adding to queue" - proc addSlotToQueue() {.async: (raises: [CancelledError]).} = + proc addSlotToQueue() {.async: (raises: []).} = let context = sales.context let market = context.market let queue = context.slotQueue - without request =? (await market.getRequest(requestId)), err: - error "unknown request in contract", error = err.msgDetail - return + try: + without request =? (await market.getRequest(requestId)), err: + error "unknown request in contract", error = err.msgDetail + return - # Take the repairing state into consideration to calculate the collateral. - # This is particularly needed because it will affect the priority in the queue - # and we want to give the user the ability to tweak the parameters. - # Adding the repairing state directly in the queue priority calculation - # would not allow this flexibility. - without collateral =? - market.slotCollateral(request.ask.collateralPerSlot, SlotState.Repair), err: - error "Failed to add freed slot to queue: unable to calculate collateral", - error = err.msg - return + # Take the repairing state into consideration to calculate the collateral. + # This is particularly needed because it will affect the priority in the queue + # and we want to give the user the ability to tweak the parameters. + # Adding the repairing state directly in the queue priority calculation + # would not allow this flexibility. + without collateral =? + market.slotCollateral(request.ask.collateralPerSlot, SlotState.Repair), err: + error "Failed to add freed slot to queue: unable to calculate collateral", + error = err.msg + return - if slotIndex > uint16.high.uint64: - error "Cannot cast slot index to uint16, value = ", slotIndex - return + if slotIndex > uint16.high.uint64: + error "Cannot cast slot index to uint16, value = ", slotIndex + return - without slotQueueItem =? - SlotQueueItem.init(request, slotIndex.uint16, collateral = collateral).catch, err: - warn "Too many slots, cannot add to queue", error = err.msgDetail - return + without slotQueueItem =? + SlotQueueItem.init(request, slotIndex.uint16, collateral = collateral).catch, + err: + warn "Too many slots, cannot add to queue", error = err.msgDetail + return - if err =? queue.push(slotQueueItem).errorOption: - if err of SlotQueueItemExistsError: - error "Failed to push item to queue becaue it already exists", - error = err.msgDetail - elif err of QueueNotRunningError: - warn "Failed to push item to queue becaue queue is not running", - error = err.msgDetail + if err =? queue.push(slotQueueItem).errorOption: + if err of SlotQueueItemExistsError: + error "Failed to push item to queue becaue it already exists", + error = err.msgDetail + elif err of QueueNotRunningError: + warn "Failed to push item to queue becaue queue is not running", + error = err.msgDetail + except CatchableError as e: + warn "Failed to add slot to queue", error = e.msg # We could get rid of this by adding the storage ask in the SlotFreed event, # so we would not need to call getRequest to get the collateralPerSlot. let fut = addSlotToQueue() sales.trackedFutures.track(fut) - asyncSpawn fut proc subscribeRequested(sales: Sales) {.async.} = let context = sales.context @@ -522,7 +525,9 @@ proc startSlotQueue(sales: Sales) = let slotQueue = sales.context.slotQueue let reservations = sales.context.reservations - slotQueue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = + slotQueue.onProcessSlot = proc( + item: SlotQueueItem, done: Future[void] + ) {.async: (raises: []).} = trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex sales.processSlot(item, done) diff --git a/codex/sales/salesagent.nim b/codex/sales/salesagent.nim index e6328a83..f0abf3ee 100644 --- a/codex/sales/salesagent.nim +++ b/codex/sales/salesagent.nim @@ -103,7 +103,6 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} = error "Error while waiting for expiry to lapse", error = e.msgDetail data.cancelled = onCancelled() - asyncSpawn data.cancelled method onFulfilled*( agent: SalesAgent, requestId: RequestId diff --git a/codex/sales/slotqueue.nim b/codex/sales/slotqueue.nim index fa57a983..60700d44 100644 --- a/codex/sales/slotqueue.nim +++ b/codex/sales/slotqueue.nim @@ -3,7 +3,6 @@ import std/tables import pkg/chronos import pkg/questionable import pkg/questionable/results -import pkg/upraises import ../errors import ../clock import ../logutils @@ -17,8 +16,9 @@ logScope: topics = "marketplace slotqueue" type - OnProcessSlot* = - proc(item: SlotQueueItem, done: Future[void]): Future[void] {.gcsafe, upraises: [].} + OnProcessSlot* = proc(item: SlotQueueItem, done: Future[void]): Future[void] {. + gcsafe, async: (raises: []) + .} # Non-ref obj copies value when assigned, preventing accidental modification # of values which could cause an incorrect order (eg @@ -26,7 +26,7 @@ type # but the heap invariant would no longer be honoured. When non-ref, the # compiler can ensure that statement will fail). SlotQueueWorker = object - doneProcessing*: Future[void] + doneProcessing*: Future[void].Raising([]) SlotQueueItem* = object requestId: RequestId @@ -126,7 +126,17 @@ proc new*( # `newAsyncQueue` procedure proc init(_: type SlotQueueWorker): SlotQueueWorker = - SlotQueueWorker(doneProcessing: newFuture[void]("slotqueue.worker.processing")) + let workerFut = Future[void].Raising([]).init( + "slotqueue.worker.processing", {FutureFlag.OwnCancelSchedule} + ) + + workerFut.cancelCallback = proc(data: pointer) {.raises: [].} = + # this is equivalent to try: ... except CatchableError: ... + if not workerFut.finished: + workerFut.complete() + trace "Cancelling `SlotQueue` worker processing future" + + SlotQueueWorker(doneProcessing: workerFut) proc init*( _: type SlotQueueItem, @@ -419,7 +429,6 @@ proc run(self: SlotQueue) {.async: (raises: []).} = let fut = self.dispatch(worker, item) self.trackedFutures.track(fut) - asyncSpawn fut await sleepAsync(1.millis) # poll except CancelledError: @@ -447,7 +456,6 @@ proc start*(self: SlotQueue) = let fut = self.run() self.trackedFutures.track(fut) - asyncSpawn fut proc stop*(self: SlotQueue) {.async.} = if not self.running: diff --git a/codex/slots/builder/builder.nim b/codex/slots/builder/builder.nim index 30332f1c..1ea57a0f 100644 --- a/codex/slots/builder/builder.nim +++ b/codex/slots/builder/builder.nim @@ -315,13 +315,15 @@ proc new*[T, H]( cellSize = cellSize if (manifest.blocksCount mod manifest.numSlots) != 0: - trace "Number of blocks must be divisable by number of slots." - return failure("Number of blocks must be divisable by number of slots.") + const msg = "Number of blocks must be divisible by number of slots." + trace msg + return failure(msg) let cellSize = if manifest.verifiable: manifest.cellSize else: cellSize if (manifest.blockSize mod cellSize) != 0.NBytes: - trace "Block size must be divisable by cell size." - return failure("Block size must be divisable by cell size.") + const msg = "Block size must be divisible by cell size." + trace msg + return failure(msg) let numSlotBlocks = manifest.numSlotBlocks diff --git a/codex/slots/proofs/prover.nim b/codex/slots/proofs/prover.nim index 36fc0a05..b1aa77c0 100644 --- a/codex/slots/proofs/prover.nim +++ b/codex/slots/proofs/prover.nim @@ -38,7 +38,9 @@ type AnyProof* = CircomProof AnySampler* = Poseidon2Sampler + # add any other generic type here, eg. Poseidon2Sampler | ReinforceConcreteSampler AnyBuilder* = Poseidon2Builder + # add any other generic type here, eg. Poseidon2Builder | ReinforceConcreteBuilder AnyProofInputs* = ProofInputs[Poseidon2Hash] Prover* = ref object of RootObj diff --git a/codex/streams/asyncstreamwrapper.nim b/codex/streams/asyncstreamwrapper.nim index 6d5e703a..6708816d 100644 --- a/codex/streams/asyncstreamwrapper.nim +++ b/codex/streams/asyncstreamwrapper.nim @@ -57,6 +57,8 @@ template withExceptions(body: untyped) = raise newLPStreamEOFError() except AsyncStreamError as exc: raise newException(LPStreamError, exc.msg) + except CatchableError as exc: + raise newException(Defect, "Unexpected error in AsyncStreamWrapper", exc) method readOnce*( self: AsyncStreamWrapper, pbytes: pointer, nbytes: int @@ -74,11 +76,13 @@ method readOnce*( proc completeWrite( self: AsyncStreamWrapper, fut: Future[void], msgLen: int -): Future[void] {.async.} = +): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} = withExceptions: await fut -method write*(self: AsyncStreamWrapper, msg: seq[byte]): Future[void] = +method write*( + self: AsyncStreamWrapper, msg: seq[byte] +): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} = # Avoid a copy of msg being kept in the closure created by `{.async.}` as this # drives up memory usage diff --git a/codex/streams/storestream.nim b/codex/streams/storestream.nim index a68e2ea7..64a356de 100644 --- a/codex/streams/storestream.nim +++ b/codex/streams/storestream.nim @@ -67,13 +67,9 @@ method atEof*(self: StoreStream): bool = self.offset >= self.size type LPStreamReadError* = object of LPStreamError - par*: ref CatchableError proc newLPStreamReadError*(p: ref CatchableError): ref LPStreamReadError = - var w = newException(LPStreamReadError, "Read stream failed") - w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg - w.par = p - result = w + newException(LPStreamReadError, "Read stream failed", p) method readOnce*( self: StoreStream, pbytes: pointer, nbytes: int diff --git a/codex/utils/asyncstatemachine.nim b/codex/utils/asyncstatemachine.nim index 2d87ebc1..eb84378c 100644 --- a/codex/utils/asyncstatemachine.nim +++ b/codex/utils/asyncstatemachine.nim @@ -74,7 +74,6 @@ proc scheduler(machine: Machine) {.async: (raises: []).} = debug "enter state", state = fromState & " => " & $machine.state running = machine.run(machine.state) machine.trackedFutures.track(running) - asyncSpawn running except CancelledError: break # do not propagate bc it is asyncSpawned @@ -88,7 +87,6 @@ proc start*(machine: Machine, initialState: State) = machine.started = true let fut = machine.scheduler() machine.trackedFutures.track(fut) - asyncSpawn fut machine.schedule(Event.transition(machine.state, initialState)) proc stop*(machine: Machine) {.async.} = diff --git a/codex/utils/timer.nim b/codex/utils/timer.nim index 0a5a940a..5a9537cf 100644 --- a/codex/utils/timer.nim +++ b/codex/utils/timer.nim @@ -50,7 +50,6 @@ method start*( timer.callback = callback timer.interval = interval timer.loopFuture = timerLoop(timer) - asyncSpawn timer.loopFuture method stop*(timer: Timer) {.async, base.} = if timer.loopFuture != nil and not timer.loopFuture.finished: diff --git a/codex/utils/trackedfutures.nim b/codex/utils/trackedfutures.nim index eb3cc219..34007e08 100644 --- a/codex/utils/trackedfutures.nim +++ b/codex/utils/trackedfutures.nim @@ -5,9 +5,11 @@ import ../logutils {.push raises: [].} -type TrackedFutures* = ref object - futures: Table[uint, FutureBase] - cancelling: bool +type + TrackedFuture = Future[void].Raising([]) + TrackedFutures* = ref object + futures: Table[uint, TrackedFuture] + cancelling: bool logScope: topics = "trackable futures" @@ -15,15 +17,18 @@ logScope: proc len*(self: TrackedFutures): int = self.futures.len -proc removeFuture(self: TrackedFutures, future: FutureBase) = +proc removeFuture(self: TrackedFutures, future: TrackedFuture) = if not self.cancelling and not future.isNil: self.futures.del(future.id) -proc track*[T](self: TrackedFutures, fut: Future[T]) = +proc track*(self: TrackedFutures, fut: TrackedFuture) = if self.cancelling: return - self.futures[fut.id] = FutureBase(fut) + if fut.finished: + return + + self.futures[fut.id] = fut proc cb(udata: pointer) = self.removeFuture(fut) @@ -33,13 +38,8 @@ proc track*[T](self: TrackedFutures, fut: Future[T]) = proc cancelTracked*(self: TrackedFutures) {.async: (raises: []).} = self.cancelling = true - trace "cancelling tracked futures" - - var cancellations: seq[FutureBase] - for future in self.futures.values: - if not future.isNil and not future.finished: - cancellations.add future.cancelAndWait() - + trace "cancelling tracked futures", len = self.futures.len + let cancellations = self.futures.values.toSeq.mapIt(it.cancelAndWait()) await noCancel allFutures cancellations self.futures.clear() diff --git a/codex/validation.nim b/codex/validation.nim index 18a444a6..e6d74840 100644 --- a/codex/validation.nim +++ b/codex/validation.nim @@ -142,7 +142,6 @@ proc start*(validation: Validation) {.async.} = await validation.subscribeSlotFilled() await validation.restoreHistoricalState() validation.running = validation.run() - asyncSpawn validation.running proc stop*(validation: Validation) {.async.} = if not validation.running.isNil and not validation.running.finished: diff --git a/tests/asynctest.nim b/tests/asynctest.nim index 7c6a4afd..4db8277f 100644 --- a/tests/asynctest.nim +++ b/tests/asynctest.nim @@ -1,3 +1,3 @@ -import pkg/asynctest/chronos/unittest +import pkg/asynctest/chronos/unittest2 -export unittest +export unittest2 diff --git a/tests/codex/blockexchange/discovery/testdiscovery.nim b/tests/codex/blockexchange/discovery/testdiscovery.nim index 97a455e1..c54a1fff 100644 --- a/tests/codex/blockexchange/discovery/testdiscovery.nim +++ b/tests/codex/blockexchange/discovery/testdiscovery.nim @@ -84,12 +84,12 @@ asyncchecksuite "Block Advertising and Discovery": blockDiscovery.publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ): Future[void] {.async, gcsafe.} = + ): Future[void] {.async: (raises: [CancelledError]).} = return blockDiscovery.findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] {.async.} = + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = await engine.resolveBlocks(blocks.filterIt(it.cid == cid)) await allFuturesThrowing(allFinished(pendingBlocks)) @@ -97,17 +97,17 @@ asyncchecksuite "Block Advertising and Discovery": await engine.stop() test "Should advertise trees": - let - cids = @[manifest.treeCid] - advertised = initTable.collect: - for cid in cids: - {cid: newFuture[void]()} + let cids = @[manifest.treeCid] + var advertised = initTable.collect: + for cid in cids: + {cid: newFuture[void]()} blockDiscovery.publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ) {.async.} = - if cid in advertised and not advertised[cid].finished(): - advertised[cid].complete() + ) {.async: (raises: [CancelledError]).} = + advertised.withValue(cid, fut): + if not fut[].finished: + fut[].complete() await engine.start() await allFuturesThrowing(allFinished(toSeq(advertised.values))) @@ -118,7 +118,7 @@ asyncchecksuite "Block Advertising and Discovery": blockDiscovery.publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ) {.async.} = + ) {.async: (raises: [CancelledError]).} = check: cid notin blockCids @@ -138,7 +138,7 @@ asyncchecksuite "Block Advertising and Discovery": blockDiscovery.findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] = + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = check false await engine.start() @@ -221,17 +221,17 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ): Future[void] {.async.} = + ) {.async: (raises: [CancelledError]).} = advertised[cid] = switch[1].peerInfo.signedPeerRecord MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ): Future[void] {.async.} = + ) {.async: (raises: [CancelledError]).} = advertised[cid] = switch[2].peerInfo.signedPeerRecord MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ): Future[void] {.async.} = + ) {.async: (raises: [CancelledError]).} = advertised[cid] = switch[3].peerInfo.signedPeerRecord discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid) @@ -266,23 +266,21 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] {.async.} = - if cid in advertised: - result.add(advertised[cid]) + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = + advertised.withValue(cid, val): + result.add(val[]) let futs = collect(newSeq): for m in mBlocks[0 .. 2]: blockexc[0].engine.requestBlock(m.cid) - await allFuturesThrowing( - switch.mapIt(it.start()) & blockexc.mapIt(it.engine.start()) - ) - .wait(10.seconds) + await allFuturesThrowing(switch.mapIt(it.start())).wait(10.seconds) + await allFuturesThrowing(blockexc.mapIt(it.engine.start())).wait(10.seconds) await allFutures(futs).wait(10.seconds) - await allFuturesThrowing(blockexc.mapIt(it.engine.stop()) & switch.mapIt(it.stop())) - .wait(10.seconds) + await allFuturesThrowing(blockexc.mapIt(it.engine.stop())).wait(10.seconds) + await allFuturesThrowing(switch.mapIt(it.stop())).wait(10.seconds) test "E2E - Should advertise and discover blocks with peers already connected": # Distribute the blocks amongst 1..3 @@ -292,17 +290,17 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ): Future[void] {.async.} = + ) {.async: (raises: [CancelledError]).} = advertised[cid] = switch[1].peerInfo.signedPeerRecord MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ): Future[void] {.async.} = + ) {.async: (raises: [CancelledError]).} = advertised[cid] = switch[2].peerInfo.signedPeerRecord MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ): Future[void] {.async.} = + ) {.async: (raises: [CancelledError]).} = advertised[cid] = switch[3].peerInfo.signedPeerRecord discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid) @@ -337,18 +335,16 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] {.async.} = - if cid in advertised: - return @[advertised[cid]] + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = + advertised.withValue(cid, val): + return @[val[]] let futs = mBlocks[0 .. 2].mapIt(blockexc[0].engine.requestBlock(it.cid)) - await allFuturesThrowing( - switch.mapIt(it.start()) & blockexc.mapIt(it.engine.start()) - ) - .wait(10.seconds) + await allFuturesThrowing(switch.mapIt(it.start())).wait(10.seconds) + await allFuturesThrowing(blockexc.mapIt(it.engine.start())).wait(10.seconds) await allFutures(futs).wait(10.seconds) - await allFuturesThrowing(blockexc.mapIt(it.engine.stop()) & switch.mapIt(it.stop())) - .wait(10.seconds) + await allFuturesThrowing(blockexc.mapIt(it.engine.stop())).wait(10.seconds) + await allFuturesThrowing(switch.mapIt(it.stop())).wait(10.seconds) diff --git a/tests/codex/blockexchange/discovery/testdiscoveryengine.nim b/tests/codex/blockexchange/discovery/testdiscoveryengine.nim index 93704726..9efab1a6 100644 --- a/tests/codex/blockexchange/discovery/testdiscoveryengine.nim +++ b/tests/codex/blockexchange/discovery/testdiscoveryengine.nim @@ -68,7 +68,7 @@ asyncchecksuite "Test Discovery Engine": blockDiscovery.findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = pendingBlocks.resolve( blocks.filterIt(it.cid == cid).mapIt( BlockDelivery(blk: it, address: it.address) @@ -94,7 +94,7 @@ asyncchecksuite "Test Discovery Engine": blockDiscovery.findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = check cid == blocks[0].cid if not want.finished: want.complete() @@ -122,7 +122,7 @@ asyncchecksuite "Test Discovery Engine": var pendingCids = newSeq[Cid]() blockDiscovery.findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = check cid in pendingCids pendingCids.keepItIf(it != cid) check peerStore.len < minPeers @@ -159,12 +159,12 @@ asyncchecksuite "Test Discovery Engine": discoveryLoopSleep = 100.millis, concurrentDiscReqs = 2, ) - reqs = newFuture[void]() + reqs = Future[void].Raising([CancelledError]).init() count = 0 blockDiscovery.findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] {.gcsafe, async.} = + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = check cid == blocks[0].cid if count > 0: check false diff --git a/tests/codex/blockexchange/engine/testadvertiser.nim b/tests/codex/blockexchange/engine/testadvertiser.nim index 157564d6..83a70f65 100644 --- a/tests/codex/blockexchange/engine/testadvertiser.nim +++ b/tests/codex/blockexchange/engine/testadvertiser.nim @@ -34,7 +34,7 @@ asyncchecksuite "Advertiser": advertised = newSeq[Cid]() blockDiscovery.publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ) {.async, gcsafe.} = + ) {.async: (raises: [CancelledError]), gcsafe.} = advertised.add(cid) advertiser = Advertiser.new(localStore, blockDiscovery) diff --git a/tests/codex/blockexchange/engine/testengine.nim b/tests/codex/blockexchange/engine/testengine.nim index cc5511e8..0541c119 100644 --- a/tests/codex/blockexchange/engine/testengine.nim +++ b/tests/codex/blockexchange/engine/testengine.nim @@ -22,7 +22,7 @@ import ../../examples const NopSendWantCancellationsProc = proc( id: PeerId, addresses: seq[BlockAddress] -) {.gcsafe, async.} = +) {.async: (raises: [CancelledError]).} = discard asyncchecksuite "NetworkStore engine basic": @@ -66,20 +66,17 @@ asyncchecksuite "NetworkStore engine basic": wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false, - ) {.gcsafe, async.} = + ) {.async: (raises: [CancelledError]).} = check addresses.mapIt($it.cidOrTreeCid).sorted == blocks.mapIt($it.cid).sorted done.complete() let network = BlockExcNetwork(request: BlockExcRequest(sendWantList: sendWantList)) - localStore = CacheStore.new(blocks.mapIt(it)) discovery = DiscoveryEngine.new( localStore, peerStore, network, blockDiscovery, pendingBlocks ) - advertiser = Advertiser.new(localStore, blockDiscovery) - engine = BlockExcEngine.new( localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks ) @@ -93,7 +90,9 @@ asyncchecksuite "NetworkStore engine basic": test "Should send account to new peers": let pricing = Pricing.example - proc sendAccount(peer: PeerId, account: Account) {.gcsafe, async.} = + proc sendAccount( + peer: PeerId, account: Account + ) {.async: (raises: [CancelledError]).} = check account.address == pricing.address done.complete() @@ -186,7 +185,9 @@ asyncchecksuite "NetworkStore engine handlers": done = newFuture[void]() wantList = makeWantList(blocks.mapIt(it.cid)) - proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = + proc sendPresence( + peerId: PeerId, presence: seq[BlockPresence] + ) {.async: (raises: [CancelledError]).} = check presence.mapIt(it.address) == wantList.entries.mapIt(it.address) done.complete() @@ -203,7 +204,9 @@ asyncchecksuite "NetworkStore engine handlers": done = newFuture[void]() wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true) - proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = + proc sendPresence( + peerId: PeerId, presence: seq[BlockPresence] + ) {.async: (raises: [CancelledError]).} = check presence.mapIt(it.address) == wantList.entries.mapIt(it.address) for p in presence: check: @@ -222,7 +225,9 @@ asyncchecksuite "NetworkStore engine handlers": done = newFuture[void]() wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true) - proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = + proc sendPresence( + peerId: PeerId, presence: seq[BlockPresence] + ) {.async: (raises: [CancelledError]).} = for p in presence: if p.address.cidOrTreeCid != blocks[0].cid and p.address.cidOrTreeCid != blocks[1].cid: @@ -266,19 +271,21 @@ asyncchecksuite "NetworkStore engine handlers": peerContext.account = account.some peerContext.blocks = blocks.mapIt( - (it.address, Presence(address: it.address, price: rand(uint16).u256)) + (it.address, Presence(address: it.address, price: rand(uint16).u256, have: true)) ).toTable engine.network = BlockExcNetwork( request: BlockExcRequest( - sendPayment: proc(receiver: PeerId, payment: SignedState) {.gcsafe, async.} = + sendPayment: proc( + receiver: PeerId, payment: SignedState + ) {.async: (raises: [CancelledError]).} = let - amount = blocks.mapIt(peerContext.blocks[it.address].price).foldl(a + b) - + amount = + blocks.mapIt(peerContext.blocks[it.address].catch.get.price).foldl(a + b) balances = !payment.state.outcome.balances(Asset) check receiver == peerId - check balances[account.address.toDestination] == amount + check balances[account.address.toDestination].catch.get == amount done.complete(), # Install NOP for want list cancellations so they don't cause a crash @@ -286,10 +293,12 @@ asyncchecksuite "NetworkStore engine handlers": ) ) + let requestedBlocks = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.address)) await engine.blocksDeliveryHandler( peerId, blocks.mapIt(BlockDelivery(blk: it, address: it.address)) ) await done.wait(100.millis) + await allFuturesThrowing(requestedBlocks).wait(100.millis) test "Should handle block presence": var handles: @@ -303,7 +312,7 @@ asyncchecksuite "NetworkStore engine handlers": wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false, - ) {.gcsafe, async.} = + ) {.async: (raises: [CancelledError]).} = engine.pendingBlocks.resolve( blocks.filterIt(it.address in addresses).mapIt( BlockDelivery(blk: it, address: it.address) @@ -340,9 +349,9 @@ asyncchecksuite "NetworkStore engine handlers": proc sendWantCancellations( id: PeerId, addresses: seq[BlockAddress] - ) {.gcsafe, async.} = + ) {.async: (raises: [CancelledError]).} = for address in addresses: - cancellations[address].complete() + cancellations[address].catch.expect("address should exist").complete() engine.network = BlockExcNetwork( request: BlockExcRequest(sendWantCancellations: sendWantCancellations) @@ -416,7 +425,7 @@ asyncchecksuite "Block Download": wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false, - ) {.gcsafe, async.} = + ) {.async: (raises: [CancelledError]).} = check wantType == WantHave check not engine.pendingBlocks.isInFlight(address) check engine.pendingBlocks.retries(address) == retries @@ -433,7 +442,7 @@ asyncchecksuite "Block Download": discard (await pending).tryGet() test "Should retry block request": - let + var address = BlockAddress.init(blocks[0].cid) steps = newAsyncEvent() @@ -445,7 +454,7 @@ asyncchecksuite "Block Download": wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false, - ) {.gcsafe, async.} = + ) {.async: (raises: [CancelledError]).} = case wantType of WantHave: check engine.pendingBlocks.isInFlight(address) == false @@ -467,7 +476,7 @@ asyncchecksuite "Block Download": let pending = engine.requestBlock(address) await steps.wait() - # add blocks presence + # add blocks precense peerCtx.blocks = blocks.mapIt( (it.address, Presence(address: it.address, have: true, price: UInt256.example)) ).toTable @@ -493,7 +502,7 @@ asyncchecksuite "Block Download": wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false, - ) {.gcsafe, async.} = + ) {.async: (raises: [CancelledError]).} = done.complete() engine.pendingBlocks.blockRetries = 10 @@ -573,7 +582,7 @@ asyncchecksuite "Task Handler": test "Should send want-blocks in priority order": proc sendBlocksDelivery( id: PeerId, blocksDelivery: seq[BlockDelivery] - ) {.gcsafe, async.} = + ) {.async: (raises: [CancelledError]).} = check blocksDelivery.len == 2 check: blocksDelivery[1].address == blocks[0].address @@ -610,7 +619,7 @@ asyncchecksuite "Task Handler": test "Should set in-flight for outgoing blocks": proc sendBlocksDelivery( id: PeerId, blocksDelivery: seq[BlockDelivery] - ) {.gcsafe, async.} = + ) {.async: (raises: [CancelledError]).} = check peersCtx[0].peerWants[0].inFlight for blk in blocks: @@ -649,7 +658,9 @@ asyncchecksuite "Task Handler": let missing = @[Block.new("missing".toBytes).tryGet()] let price = (!engine.pricing).price - proc sendPresence(id: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = + proc sendPresence( + id: PeerId, presence: seq[BlockPresence] + ) {.async: (raises: [CancelledError]).} = check presence.mapIt(!Presence.init(it)) == @[ Presence(address: present[0].address, have: true, price: price), diff --git a/tests/codex/blockexchange/engine/testpayments.nim b/tests/codex/blockexchange/engine/testpayments.nim index 24d5dab6..e93cc837 100644 --- a/tests/codex/blockexchange/engine/testpayments.nim +++ b/tests/codex/blockexchange/engine/testpayments.nim @@ -1,10 +1,10 @@ -import std/unittest +import pkg/unittest2 import pkg/codex/stores import ../../examples import ../../helpers -checksuite "engine payments": +suite "Engine payments": let address = EthAddress.example let amount = 42.u256 diff --git a/tests/codex/blockexchange/protobuf/testpayments.nim b/tests/codex/blockexchange/protobuf/testpayments.nim index d0773d70..3ada0105 100644 --- a/tests/codex/blockexchange/protobuf/testpayments.nim +++ b/tests/codex/blockexchange/protobuf/testpayments.nim @@ -6,7 +6,7 @@ import ../../../asynctest import ../../examples import ../../helpers -checksuite "account protobuf messages": +suite "account protobuf messages": let account = Account(address: EthAddress.example) let message = AccountMessage.init(account) @@ -21,7 +21,7 @@ checksuite "account protobuf messages": incorrect.address.del(0) check Account.init(incorrect).isNone -checksuite "channel update messages": +suite "channel update messages": let state = SignedState.example let update = StateChannelUpdate.init(state) diff --git a/tests/codex/blockexchange/protobuf/testpresence.nim b/tests/codex/blockexchange/protobuf/testpresence.nim index 7e3b94e6..dc048c59 100644 --- a/tests/codex/blockexchange/protobuf/testpresence.nim +++ b/tests/codex/blockexchange/protobuf/testpresence.nim @@ -6,7 +6,7 @@ import ../../../asynctest import ../../examples import ../../helpers -checksuite "block presence protobuf messages": +suite "block presence protobuf messages": let cid = Cid.example address = BlockAddress(leaf: false, cid: cid) diff --git a/tests/codex/blockexchange/testnetwork.nim b/tests/codex/blockexchange/testnetwork.nim index 0fae4ffe..b9a51c9d 100644 --- a/tests/codex/blockexchange/testnetwork.nim +++ b/tests/codex/blockexchange/testnetwork.nim @@ -26,7 +26,7 @@ asyncchecksuite "Network - Handlers": blocks: seq[bt.Block] done: Future[void] - proc getConn(): Future[Connection] {.async.} = + proc getConn(): Future[Connection] {.async: (raises: [CancelledError]).} = return Connection(buffer) setup: @@ -45,7 +45,7 @@ asyncchecksuite "Network - Handlers": discard await networkPeer.connect() test "Want List handler": - proc wantListHandler(peer: PeerId, wantList: WantList) {.gcsafe, async.} = + proc wantListHandler(peer: PeerId, wantList: WantList) {.async: (raises: []).} = # check that we got the correct amount of entries check wantList.entries.len == 4 @@ -72,7 +72,7 @@ asyncchecksuite "Network - Handlers": test "Blocks Handler": proc blocksDeliveryHandler( peer: PeerId, blocksDelivery: seq[BlockDelivery] - ) {.gcsafe, async.} = + ) {.async: (raises: []).} = check blocks == blocksDelivery.mapIt(it.blk) done.complete() @@ -85,7 +85,9 @@ asyncchecksuite "Network - Handlers": await done.wait(500.millis) test "Presence Handler": - proc presenceHandler(peer: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = + proc presenceHandler( + peer: PeerId, presence: seq[BlockPresence] + ) {.async: (raises: []).} = for b in blocks: check: b.address in presence @@ -105,7 +107,7 @@ asyncchecksuite "Network - Handlers": test "Handles account messages": let account = Account(address: EthAddress.example) - proc handleAccount(peer: PeerId, received: Account) {.gcsafe, async.} = + proc handleAccount(peer: PeerId, received: Account) {.async: (raises: []).} = check received == account done.complete() @@ -119,7 +121,7 @@ asyncchecksuite "Network - Handlers": test "Handles payment messages": let payment = SignedState.example - proc handlePayment(peer: PeerId, received: SignedState) {.gcsafe, async.} = + proc handlePayment(peer: PeerId, received: SignedState) {.async: (raises: []).} = check received == payment done.complete() @@ -165,7 +167,7 @@ asyncchecksuite "Network - Senders": await allFuturesThrowing(switch1.stop(), switch2.stop()) test "Send want list": - proc wantListHandler(peer: PeerId, wantList: WantList) {.gcsafe, async.} = + proc wantListHandler(peer: PeerId, wantList: WantList) {.async: (raises: []).} = # check that we got the correct amount of entries check wantList.entries.len == 4 @@ -195,7 +197,7 @@ asyncchecksuite "Network - Senders": test "send blocks": proc blocksDeliveryHandler( peer: PeerId, blocksDelivery: seq[BlockDelivery] - ) {.gcsafe, async.} = + ) {.async: (raises: []).} = check blocks == blocksDelivery.mapIt(it.blk) done.complete() @@ -207,7 +209,9 @@ asyncchecksuite "Network - Senders": await done.wait(500.millis) test "send presence": - proc presenceHandler(peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async.} = + proc presenceHandler( + peer: PeerId, precense: seq[BlockPresence] + ) {.async: (raises: []).} = for b in blocks: check: b.address in precense @@ -226,7 +230,7 @@ asyncchecksuite "Network - Senders": test "send account": let account = Account(address: EthAddress.example) - proc handleAccount(peer: PeerId, received: Account) {.gcsafe, async.} = + proc handleAccount(peer: PeerId, received: Account) {.async: (raises: []).} = check received == account done.complete() @@ -238,7 +242,7 @@ asyncchecksuite "Network - Senders": test "send payment": let payment = SignedState.example - proc handlePayment(peer: PeerId, received: SignedState) {.gcsafe, async.} = + proc handlePayment(peer: PeerId, received: SignedState) {.async: (raises: []).} = check received == payment done.complete() @@ -276,7 +280,7 @@ asyncchecksuite "Network - Test Limits": let account = Account(address: EthAddress.example) network2.handlers.onAccount = proc( peer: PeerId, received: Account - ) {.gcsafe, async.} = + ) {.async: (raises: []).} = check false let fut = network1.send( diff --git a/tests/codex/blockexchange/testpeerctxstore.nim b/tests/codex/blockexchange/testpeerctxstore.nim index 6ea601d1..e2983d10 100644 --- a/tests/codex/blockexchange/testpeerctxstore.nim +++ b/tests/codex/blockexchange/testpeerctxstore.nim @@ -1,7 +1,7 @@ import std/sugar import std/sequtils -import std/unittest +import pkg/unittest2 import pkg/libp2p import pkg/codex/blockexchange/peers @@ -11,7 +11,7 @@ import pkg/codex/blockexchange/protobuf/presence import ../helpers import ../examples -checksuite "Peer Context Store": +suite "Peer Context Store": var store: PeerCtxStore peerCtx: BlockExcPeerCtx @@ -31,7 +31,7 @@ checksuite "Peer Context Store": test "Should get peer": check store.get(peerCtx.id) == peerCtx -checksuite "Peer Context Store Peer Selection": +suite "Peer Context Store Peer Selection": var store: PeerCtxStore peerCtxs: seq[BlockExcPeerCtx] diff --git a/tests/codex/blockexchange/testpendingblocks.nim b/tests/codex/blockexchange/testpendingblocks.nim index 29410db7..af1e6728 100644 --- a/tests/codex/blockexchange/testpendingblocks.nim +++ b/tests/codex/blockexchange/testpendingblocks.nim @@ -10,7 +10,7 @@ import pkg/codex/blockexchange import ../helpers import ../../asynctest -checksuite "Pending Blocks": +suite "Pending Blocks": test "Should add want handle": let pendingBlocks = PendingBlocksManager.new() diff --git a/tests/codex/helpers/mockchunker.nim b/tests/codex/helpers/mockchunker.nim index 0d38cf3b..eb51f7ca 100644 --- a/tests/codex/helpers/mockchunker.nim +++ b/tests/codex/helpers/mockchunker.nim @@ -21,7 +21,7 @@ proc new*( var consumed = 0 proc reader( data: ChunkBuffer, len: int - ): Future[int] {.async, gcsafe, raises: [Defect].} = + ): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} = if consumed >= dataset.len: return 0 diff --git a/tests/codex/helpers/mockdiscovery.nim b/tests/codex/helpers/mockdiscovery.nim index 42ad76a9..4110c577 100644 --- a/tests/codex/helpers/mockdiscovery.nim +++ b/tests/codex/helpers/mockdiscovery.nim @@ -14,29 +14,42 @@ import pkg/codex/discovery import pkg/contractabi/address as ca type MockDiscovery* = ref object of Discovery - findBlockProvidersHandler*: - proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.gcsafe.} - publishBlockProvideHandler*: proc(d: MockDiscovery, cid: Cid): Future[void] {.gcsafe.} - findHostProvidersHandler*: - proc(d: MockDiscovery, host: ca.Address): Future[seq[SignedPeerRecord]] {.gcsafe.} - publishHostProvideHandler*: - proc(d: MockDiscovery, host: ca.Address): Future[void] {.gcsafe.} + findBlockProvidersHandler*: proc( + d: MockDiscovery, cid: Cid + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} + + publishBlockProvideHandler*: + proc(d: MockDiscovery, cid: Cid): Future[void] {.async: (raises: [CancelledError]).} + + findHostProvidersHandler*: proc( + d: MockDiscovery, host: ca.Address + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} + + publishHostProvideHandler*: proc(d: MockDiscovery, host: ca.Address): Future[void] {. + async: (raises: [CancelledError]) + .} proc new*(T: type MockDiscovery): MockDiscovery = MockDiscovery() -proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} = +proc findPeer*( + d: Discovery, peerId: PeerId +): Future[?PeerRecord] {.async: (raises: [CancelledError]).} = ## mock find a peer - always return none - ## + ## return none(PeerRecord) -method find*(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async.} = +method find*( + d: MockDiscovery, cid: Cid +): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = if isNil(d.findBlockProvidersHandler): return return await d.findBlockProvidersHandler(d, cid) -method provide*(d: MockDiscovery, cid: Cid): Future[void] {.async.} = +method provide*( + d: MockDiscovery, cid: Cid +): Future[void] {.async: (raises: [CancelledError]).} = if isNil(d.publishBlockProvideHandler): return @@ -44,13 +57,15 @@ method provide*(d: MockDiscovery, cid: Cid): Future[void] {.async.} = method find*( d: MockDiscovery, host: ca.Address -): Future[seq[SignedPeerRecord]] {.async.} = +): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = if isNil(d.findHostProvidersHandler): return return await d.findHostProvidersHandler(d, host) -method provide*(d: MockDiscovery, host: ca.Address): Future[void] {.async.} = +method provide*( + d: MockDiscovery, host: ca.Address +): Future[void] {.async: (raises: [CancelledError]).} = if isNil(d.publishHostProvideHandler): return diff --git a/tests/codex/helpers/randomchunker.nim b/tests/codex/helpers/randomchunker.nim index b482f67f..cf857595 100644 --- a/tests/codex/helpers/randomchunker.nim +++ b/tests/codex/helpers/randomchunker.nim @@ -26,7 +26,7 @@ proc new*( var consumed = 0 proc reader( data: ChunkBuffer, len: int - ): Future[int] {.async, gcsafe, raises: [Defect].} = + ): Future[int] {.async: (raises: [ChunkerError, CancelledError]), gcsafe.} = var alpha = toSeq(byte('A') .. byte('z')) if consumed >= size: diff --git a/tests/codex/merkletree/generictreetests.nim b/tests/codex/merkletree/generictreetests.nim index 0e1f7c9f..6244bc1c 100644 --- a/tests/codex/merkletree/generictreetests.nim +++ b/tests/codex/merkletree/generictreetests.nim @@ -1,4 +1,4 @@ -import std/unittest +import pkg/unittest2 import pkg/codex/merkletree diff --git a/tests/codex/merkletree/testcodexcoders.nim b/tests/codex/merkletree/testcodexcoders.nim index d9544083..6da56844 100644 --- a/tests/codex/merkletree/testcodexcoders.nim +++ b/tests/codex/merkletree/testcodexcoders.nim @@ -1,4 +1,4 @@ -import std/unittest +import pkg/unittest2 import pkg/questionable/results import pkg/stew/byteutils @@ -18,7 +18,7 @@ const data = [ "00000000000000000000000000000009".toBytes, "00000000000000000000000000000010".toBytes, ] -checksuite "merkletree - coders": +suite "merkletree - coders": test "encoding and decoding a tree yields the same tree": let tree = CodexTree.init(Sha256HashCodec, data).tryGet() diff --git a/tests/codex/merkletree/testcodextree.nim b/tests/codex/merkletree/testcodextree.nim index c4713d40..29390c16 100644 --- a/tests/codex/merkletree/testcodextree.nim +++ b/tests/codex/merkletree/testcodextree.nim @@ -1,6 +1,6 @@ -import std/unittest import std/sequtils +import pkg/unittest2 import pkg/questionable/results import pkg/stew/byteutils import pkg/libp2p diff --git a/tests/codex/merkletree/testmerkledigest.nim b/tests/codex/merkletree/testmerkledigest.nim index ccb138da..4cc2d197 100644 --- a/tests/codex/merkletree/testmerkledigest.nim +++ b/tests/codex/merkletree/testmerkledigest.nim @@ -1,7 +1,7 @@ -import std/unittest import std/sequtils import std/random +import pkg/unittest2 import pkg/poseidon2 import pkg/poseidon2/sponge diff --git a/tests/codex/merkletree/testposeidon2tree.nim b/tests/codex/merkletree/testposeidon2tree.nim index f60fdb39..e12751b7 100644 --- a/tests/codex/merkletree/testposeidon2tree.nim +++ b/tests/codex/merkletree/testposeidon2tree.nim @@ -1,6 +1,6 @@ -import std/unittest import std/sequtils +import pkg/unittest2 import pkg/poseidon2 import pkg/poseidon2/io import pkg/questionable/results diff --git a/tests/codex/sales/states/testdownloading.nim b/tests/codex/sales/states/testdownloading.nim index 3df45749..71376fc8 100644 --- a/tests/codex/sales/states/testdownloading.nim +++ b/tests/codex/sales/states/testdownloading.nim @@ -1,4 +1,4 @@ -import std/unittest +import pkg/unittest2 import pkg/questionable import pkg/codex/contracts/requests import pkg/codex/sales/states/cancelled @@ -8,7 +8,7 @@ import pkg/codex/sales/states/filled import ../../examples import ../../helpers -checksuite "sales state 'downloading'": +suite "sales state 'downloading'": let request = StorageRequest.example let slotIndex = request.ask.slots div 2 var state: SaleDownloading diff --git a/tests/codex/sales/states/testfilled.nim b/tests/codex/sales/states/testfilled.nim index 04ff26db..f077b780 100644 --- a/tests/codex/sales/states/testfilled.nim +++ b/tests/codex/sales/states/testfilled.nim @@ -14,7 +14,7 @@ import ../../helpers/mockmarket import ../../examples import ../../helpers -checksuite "sales state 'filled'": +suite "sales state 'filled'": let request = StorageRequest.example let slotIndex = request.ask.slots div 2 diff --git a/tests/codex/sales/states/testfilling.nim b/tests/codex/sales/states/testfilling.nim index ce1d32f2..1a26753d 100644 --- a/tests/codex/sales/states/testfilling.nim +++ b/tests/codex/sales/states/testfilling.nim @@ -1,4 +1,4 @@ -import std/unittest +import pkg/unittest2 import pkg/questionable import pkg/codex/contracts/requests import pkg/codex/sales/states/filling @@ -7,7 +7,7 @@ import pkg/codex/sales/states/failed import ../../examples import ../../helpers -checksuite "sales state 'filling'": +suite "sales state 'filling'": let request = StorageRequest.example let slotIndex = request.ask.slots div 2 var state: SaleFilling diff --git a/tests/codex/sales/states/testunknown.nim b/tests/codex/sales/states/testunknown.nim index 5e9f81f9..98b23224 100644 --- a/tests/codex/sales/states/testunknown.nim +++ b/tests/codex/sales/states/testunknown.nim @@ -14,7 +14,7 @@ import ../../helpers/mockmarket import ../../examples import ../../helpers -checksuite "sales state 'unknown'": +suite "sales state 'unknown'": let request = StorageRequest.example let slotIndex = request.ask.slots div 2 let slotId = slotId(request.id, slotIndex) diff --git a/tests/codex/sales/testsales.nim b/tests/codex/sales/testsales.nim index e92f9607..74ea8a2b 100644 --- a/tests/codex/sales/testsales.nim +++ b/tests/codex/sales/testsales.nim @@ -236,10 +236,17 @@ asyncchecksuite "Sales": return true proc addRequestToSaturatedQueue(): Future[StorageRequest] {.async.} = - queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = - await sleepAsync(10.millis) - itemsProcessed.add item - done.complete() + queue.onProcessSlot = proc( + item: SlotQueueItem, done: Future[void] + ) {.async: (raises: []).} = + try: + await sleepAsync(10.millis) + itemsProcessed.add item + except CancelledError as exc: + checkpoint(exc.msg) + finally: + if not done.finished: + done.complete() var request1 = StorageRequest.example request1.ask.collateralPerByte = request.ask.collateralPerByte + 1 @@ -261,9 +268,12 @@ asyncchecksuite "Sales": waitFor run() test "processes all request's slots once StorageRequested emitted": - queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = + queue.onProcessSlot = proc( + item: SlotQueueItem, done: Future[void] + ) {.async: (raises: []).} = itemsProcessed.add item - done.complete() + if not done.finished: + done.complete() createAvailability() await market.requestStorage(request) let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot) @@ -299,9 +309,12 @@ asyncchecksuite "Sales": check always (not itemsProcessed.contains(expected)) test "adds slot index to slot queue once SlotFreed emitted": - queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = + queue.onProcessSlot = proc( + item: SlotQueueItem, done: Future[void] + ) {.async: (raises: []).} = itemsProcessed.add item - done.complete() + if not done.finished: + done.complete() createAvailability() market.requested.add request # "contract" must be able to return request diff --git a/tests/codex/sales/testslotqueue.nim b/tests/codex/sales/testslotqueue.nim index 03c658be..7abad7eb 100644 --- a/tests/codex/sales/testslotqueue.nim +++ b/tests/codex/sales/testslotqueue.nim @@ -50,12 +50,19 @@ suite "Slot queue start/stop": suite "Slot queue workers": var queue: SlotQueue - proc onProcessSlot(item: SlotQueueItem, doneProcessing: Future[void]) {.async.} = - await sleepAsync(1000.millis) + proc onProcessSlot( + item: SlotQueueItem, doneProcessing: Future[void] + ) {.async: (raises: []).} = # this is not illustrative of the realistic scenario as the # `doneProcessing` future would be passed to another context before being # completed and therefore is not as simple as making the callback async - doneProcessing.complete() + try: + await sleepAsync(1000.millis) + except CatchableError as exc: + checkpoint(exc.msg) + finally: + if not doneProcessing.finished: + doneProcessing.complete() setup: let request = StorageRequest.example @@ -89,9 +96,14 @@ suite "Slot queue workers": check eventually queue.activeWorkers == 3 test "discards workers once processing completed": - proc processSlot(item: SlotQueueItem, done: Future[void]) {.async.} = - await sleepAsync(1.millis) - done.complete() + proc processSlot(item: SlotQueueItem, done: Future[void]) {.async: (raises: []).} = + try: + await sleepAsync(1.millis) + except CatchableError as exc: + checkpoint(exc.msg) + finally: + if not done.finished: + done.complete() queue.onProcessSlot = processSlot @@ -114,11 +126,19 @@ suite "Slot queue": proc newSlotQueue(maxSize, maxWorkers: int, processSlotDelay = 1.millis) = queue = SlotQueue.new(maxWorkers, maxSize.uint16) - queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = - await sleepAsync(processSlotDelay) - onProcessSlotCalled = true - onProcessSlotCalledWith.add (item.requestId, item.slotIndex) - done.complete() + queue.onProcessSlot = proc( + item: SlotQueueItem, done: Future[void] + ) {.async: (raises: []).} = + try: + await sleepAsync(processSlotDelay) + except CatchableError as exc: + checkpoint(exc.msg) + finally: + onProcessSlotCalled = true + onProcessSlotCalledWith.add (item.requestId, item.slotIndex) + if not done.finished: + done.complete() + queue.start() setup: diff --git a/tests/codex/slots/testslotbuilder.nim b/tests/codex/slots/testslotbuilder.nim index ef83bdee..9a2043a8 100644 --- a/tests/codex/slots/testslotbuilder.nim +++ b/tests/codex/slots/testslotbuilder.nim @@ -133,7 +133,7 @@ suite "Slot builder": check: Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize).error.msg == - "Number of blocks must be divisable by number of slots." + "Number of blocks must be divisible by number of slots." test "Block size must be divisable by cell size": let mismatchManifest = Manifest.new( @@ -151,7 +151,7 @@ suite "Slot builder": check: Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize).error.msg == - "Block size must be divisable by cell size." + "Block size must be divisible by cell size." test "Should build correct slot builder": builder = diff --git a/tests/codex/stores/repostore/testcoders.nim b/tests/codex/stores/repostore/testcoders.nim index f4d2b5e7..9d341af0 100644 --- a/tests/codex/stores/repostore/testcoders.nim +++ b/tests/codex/stores/repostore/testcoders.nim @@ -1,6 +1,6 @@ -import std/unittest import std/random +import pkg/unittest2 import pkg/stew/objects import pkg/questionable import pkg/questionable/results @@ -11,7 +11,7 @@ import pkg/codex/stores/repostore/coders import ../../helpers -checksuite "Test coders": +suite "Test coders": proc rand(T: type NBytes): T = rand(Natural).NBytes diff --git a/tests/codex/stores/testcachestore.nim b/tests/codex/stores/testcachestore.nim index e7025388..03075e1a 100644 --- a/tests/codex/stores/testcachestore.nim +++ b/tests/codex/stores/testcachestore.nim @@ -11,7 +11,7 @@ import ./commonstoretests import ../../asynctest import ../helpers -checksuite "Cache Store": +suite "Cache Store": var newBlock, newBlock1, newBlock2, newBlock3: Block store: CacheStore diff --git a/tests/codex/stores/testkeyutils.nim b/tests/codex/stores/testkeyutils.nim index 238e2681..86365c5c 100644 --- a/tests/codex/stores/testkeyutils.nim +++ b/tests/codex/stores/testkeyutils.nim @@ -36,7 +36,7 @@ proc createManifestCid(): ?!Cid = let cid = ?Cid.init(version, codec, hash).mapFailure return success cid -checksuite "KeyUtils": +suite "KeyUtils": test "makePrefixKey should create block key": let length = 6 let cid = Cid.example diff --git a/tests/codex/stores/testmaintenance.nim b/tests/codex/stores/testmaintenance.nim index e5ff519e..89e75700 100644 --- a/tests/codex/stores/testmaintenance.nim +++ b/tests/codex/stores/testmaintenance.nim @@ -21,7 +21,7 @@ import ../examples import codex/stores/maintenance -checksuite "BlockMaintainer": +suite "BlockMaintainer": var mockRepoStore: MockRepoStore var interval: Duration var mockTimer: MockTimer diff --git a/tests/codex/stores/testrepostore.nim b/tests/codex/stores/testrepostore.nim index 0279b56f..5274d046 100644 --- a/tests/codex/stores/testrepostore.nim +++ b/tests/codex/stores/testrepostore.nim @@ -24,7 +24,7 @@ import ../helpers/mockclock import ../examples import ./commonstoretests -checksuite "Test RepoStore start/stop": +suite "Test RepoStore start/stop": var repoDs: Datastore metaDs: Datastore diff --git a/tests/codex/testasyncheapqueue.nim b/tests/codex/testasyncheapqueue.nim index a9c6769b..2d2cfb0c 100644 --- a/tests/codex/testasyncheapqueue.nim +++ b/tests/codex/testasyncheapqueue.nim @@ -22,7 +22,7 @@ proc toSortedSeq[T](h: AsyncHeapQueue[T], queueType = QueueType.Min): seq[T] = while tmp.len > 0: result.add(popNoWait(tmp).tryGet()) -checksuite "Synchronous tests": +suite "Synchronous tests": test "Test pushNoWait - Min": var heap = newAsyncHeapQueue[int]() let data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0] diff --git a/tests/codex/testchunking.nim b/tests/codex/testchunking.nim index 2241a82b..44202c40 100644 --- a/tests/codex/testchunking.nim +++ b/tests/codex/testchunking.nim @@ -27,7 +27,7 @@ asyncchecksuite "Chunking": let contents = [1.byte, 2, 3, 4, 5, 6, 7, 8, 9, 0] proc reader( data: ChunkBuffer, len: int - ): Future[int] {.gcsafe, async, raises: [Defect].} = + ): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} = let read = min(contents.len - offset, len) if read == 0: return 0 @@ -97,8 +97,13 @@ asyncchecksuite "Chunking": discard (await chunker.getBytes()) test "stream should forward LPStreamError": - expect LPStreamError: + try: await raiseStreamException(newException(LPStreamError, "test error")) + except ChunkerError as exc: + check exc.parent of LPStreamError + except CatchableError as exc: + checkpoint("Unexpected error: " & exc.msg) + fail() test "stream should catch LPStreamEOFError": await raiseStreamException(newException(LPStreamEOFError, "test error")) @@ -106,7 +111,3 @@ asyncchecksuite "Chunking": test "stream should forward CancelledError": expect CancelledError: await raiseStreamException(newException(CancelledError, "test error")) - - test "stream should forward LPStreamError": - expect LPStreamError: - await raiseStreamException(newException(LPStreamError, "test error")) diff --git a/tests/codex/testclock.nim b/tests/codex/testclock.nim index 2b0158cf..967de672 100644 --- a/tests/codex/testclock.nim +++ b/tests/codex/testclock.nim @@ -1,9 +1,9 @@ -import std/unittest +import pkg/unittest2 import codex/clock import ./helpers -checksuite "Clock": +suite "Clock": proc testConversion(seconds: SecondsSince1970) = let asBytes = seconds.toBytes diff --git a/tests/codex/testlogutils.nim b/tests/codex/testlogutils.nim index b2694ee9..2077fb81 100644 --- a/tests/codex/testlogutils.nim +++ b/tests/codex/testlogutils.nim @@ -1,6 +1,7 @@ import std/options import std/strutils -import std/unittest + +import pkg/unittest2 import pkg/codex/blocktype import pkg/codex/conf import pkg/codex/contracts/requests diff --git a/tests/codex/testmanifest.nim b/tests/codex/testmanifest.nim index 241bec61..ea9465d5 100644 --- a/tests/codex/testmanifest.nim +++ b/tests/codex/testmanifest.nim @@ -13,7 +13,7 @@ import ../asynctest import ./helpers import ./examples -checksuite "Manifest": +suite "Manifest": let manifest = Manifest.new(treeCid = Cid.example, blockSize = 1.MiBs, datasetSize = 100.MiBs) diff --git a/tests/codex/testpurchasing.nim b/tests/codex/testpurchasing.nim index 5a4e85e9..1834ee03 100644 --- a/tests/codex/testpurchasing.nim +++ b/tests/codex/testpurchasing.nim @@ -116,7 +116,7 @@ asyncchecksuite "Purchasing": await purchase.wait() check market.withdrawn == @[request.id] -checksuite "Purchasing state machine": +suite "Purchasing state machine": var purchasing: Purchasing var market: MockMarket var clock: MockClock diff --git a/tests/codex/testsystemclock.nim b/tests/codex/testsystemclock.nim index 6f743283..3f179260 100644 --- a/tests/codex/testsystemclock.nim +++ b/tests/codex/testsystemclock.nim @@ -1,10 +1,10 @@ import std/times -import std/unittest -import codex/systemclock +import pkg/unittest2 +import pkg/codex/systemclock import ./helpers -checksuite "SystemClock": +suite "SystemClock": test "Should get now": let clock = SystemClock.new() diff --git a/tests/codex/utils/testiter.nim b/tests/codex/utils/testiter.nim index 801e1937..ec19c484 100644 --- a/tests/codex/utils/testiter.nim +++ b/tests/codex/utils/testiter.nim @@ -7,7 +7,7 @@ import pkg/codex/utils/iter import ../../asynctest import ../helpers -checksuite "Test Iter": +suite "Test Iter": test "Should be finished": let iter = Iter[int].empty() diff --git a/tests/codex/utils/testkeyutils.nim b/tests/codex/utils/testkeyutils.nim index 2124e682..104258f3 100644 --- a/tests/codex/utils/testkeyutils.nim +++ b/tests/codex/utils/testkeyutils.nim @@ -1,12 +1,14 @@ -import std/unittest import std/os -import codex/utils/keyutils + +import pkg/unittest2 +import pkg/codex/utils/keyutils + import ../helpers when defined(windows): import stew/windows/acl -checksuite "keyutils": +suite "keyutils": let path = getTempDir() / "CodexTest" setup: diff --git a/tests/codex/utils/testoptions.nim b/tests/codex/utils/testoptions.nim index 05f7509e..650715bc 100644 --- a/tests/codex/utils/testoptions.nim +++ b/tests/codex/utils/testoptions.nim @@ -1,8 +1,9 @@ -import std/unittest -import codex/utils/options +import pkg/unittest2 +import pkg/codex/utils/options + import ../helpers -checksuite "optional casts": +suite "optional casts": test "casting value to same type works": check 42 as int == some 42 @@ -31,7 +32,7 @@ checksuite "optional casts": check 42.some as string == string.none check int.none as int == int.none -checksuite "Optionalize": +suite "Optionalize": test "does not except non-object types": static: doAssert not compiles(Optionalize(int)) diff --git a/tests/codex/utils/testtrackedfutures.nim b/tests/codex/utils/testtrackedfutures.nim index 35074919..993d5b43 100644 --- a/tests/codex/utils/testtrackedfutures.nim +++ b/tests/codex/utils/testtrackedfutures.nim @@ -17,47 +17,71 @@ asyncchecksuite "tracked futures": check module.trackedFutures.len == 0 test "tracks unfinished futures": - let fut = newFuture[void]("test") + let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule}) module.trackedFutures.track(fut) check module.trackedFutures.len == 1 test "does not track completed futures": - let fut = newFuture[void]("test") + let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule}) fut.complete() module.trackedFutures.track(fut) - check eventually module.trackedFutures.len == 0 - - test "does not track failed futures": - let fut = newFuture[void]("test") - fut.fail((ref CatchableError)(msg: "some error")) - module.trackedFutures.track(fut) - check eventually module.trackedFutures.len == 0 + check module.trackedFutures.len == 0 test "does not track cancelled futures": - let fut = newFuture[void]("test") + let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule}) + fut.cancelCallback = proc(data: pointer) = + fut.cancelAndSchedule() # manually schedule the cancel + await fut.cancelAndWait() module.trackedFutures.track(fut) check eventually module.trackedFutures.len == 0 test "removes tracked future when finished": - let fut = newFuture[void]("test") + let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule}) module.trackedFutures.track(fut) + check module.trackedFutures.len == 1 fut.complete() check eventually module.trackedFutures.len == 0 test "removes tracked future when cancelled": - let fut = newFuture[void]("test") + let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule}) + fut.cancelCallback = proc(data: pointer) = + fut.cancelAndSchedule() # manually schedule the cancel + module.trackedFutures.track(fut) + check module.trackedFutures.len == 1 + await fut.cancelAndWait() + check eventually module.trackedFutures.len == 0 + + test "completed and removes future on cancel": + let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule}) + fut.cancelCallback = proc(data: pointer) = + fut.complete() + + module.trackedFutures.track(fut) + check module.trackedFutures.len == 1 await fut.cancelAndWait() check eventually module.trackedFutures.len == 0 test "cancels and removes all tracked futures": - let fut1 = newFuture[void]("test1") - let fut2 = newFuture[void]("test2") - let fut3 = newFuture[void]("test3") + let fut1 = Future[void].Raising([]).init("test1", {FutureFlag.OwnCancelSchedule}) + fut1.cancelCallback = proc(data: pointer) = + fut1.cancelAndSchedule() # manually schedule the cancel + + let fut2 = Future[void].Raising([]).init("test2", {FutureFlag.OwnCancelSchedule}) + fut2.cancelCallback = proc(data: pointer) = + fut2.cancelAndSchedule() # manually schedule the cancel + + let fut3 = Future[void].Raising([]).init("test3", {FutureFlag.OwnCancelSchedule}) + fut3.cancelCallback = proc(data: pointer) = + fut3.cancelAndSchedule() # manually schedule the cancel + module.trackedFutures.track(fut1) + check module.trackedFutures.len == 1 module.trackedFutures.track(fut2) + check module.trackedFutures.len == 2 module.trackedFutures.track(fut3) + check module.trackedFutures.len == 3 await module.trackedFutures.cancelTracked() check eventually fut1.cancelled check eventually fut2.cancelled diff --git a/tests/codex/utils/testutils.nim b/tests/codex/utils/testutils.nim index 92c883be..b0bb20b5 100644 --- a/tests/codex/utils/testutils.nim +++ b/tests/codex/utils/testutils.nim @@ -1,4 +1,4 @@ -import std/unittest +import pkg/unittest2 import pkg/codex/utils diff --git a/tests/helpers.nim b/tests/helpers.nim index a6a6ff44..82b544f1 100644 --- a/tests/helpers.nim +++ b/tests/helpers.nim @@ -2,4 +2,36 @@ import helpers/multisetup import helpers/trackers import helpers/templeveldb +import std/sequtils, chronos + export multisetup, trackers, templeveldb + +### taken from libp2p errorhelpers.nim +proc allFuturesThrowing*(args: varargs[FutureBase]): Future[void] = + # This proc is only meant for use in tests / not suitable for general use. + # - Swallowing errors arbitrarily instead of aggregating them is bad design + # - It raises `CatchableError` instead of the union of the `futs` errors, + # inflating the caller's `raises` list unnecessarily. `macro` could fix it + let futs = @args + ( + proc() {.async: (raises: [CatchableError]).} = + await allFutures(futs) + var firstErr: ref CatchableError + for fut in futs: + if fut.failed: + let err = fut.error() + if err of CancelledError: + raise err + if firstErr == nil: + firstErr = err + if firstErr != nil: + raise firstErr + )() + +proc allFuturesThrowing*[T](futs: varargs[Future[T]]): Future[void] = + allFuturesThrowing(futs.mapIt(FutureBase(it))) + +proc allFuturesThrowing*[T, E]( # https://github.com/nim-lang/Nim/issues/23432 + futs: varargs[InternalRaisesFuture[T, E]] +): Future[void] = + allFuturesThrowing(futs.mapIt(FutureBase(it))) diff --git a/tests/helpers/trackers.nim b/tests/helpers/trackers.nim index ed8c5692..898053c2 100644 --- a/tests/helpers/trackers.nim +++ b/tests/helpers/trackers.nim @@ -1,5 +1,5 @@ import pkg/codex/streams/storestream -import std/unittest +import pkg/unittest2 # From lip2p/tests/helpers const trackerNames = [StoreStreamTrackerName] diff --git a/vendor/nim-serde b/vendor/nim-serde index c82e85c6..5ced7c88 160000 --- a/vendor/nim-serde +++ b/vendor/nim-serde @@ -1 +1 @@ -Subproject commit c82e85c62436218592fbe876df5ac389ef8b964b +Subproject commit 5ced7c88b97d99c582285ce796957fb71fd42434 From a0ddcef08da8f71fccf80d976a73f5771c545eb4 Mon Sep 17 00:00:00 2001 From: Ben Bierens <39762930+benbierens@users.noreply.github.com> Date: Thu, 13 Mar 2025 23:45:44 +0100 Subject: [PATCH 28/40] changes trace to info for updates of the annouce/dht record logs (#1156) --- codex/discovery.nim | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/codex/discovery.nim b/codex/discovery.nim index eed1f89b..4a211c20 100644 --- a/codex/discovery.nim +++ b/codex/discovery.nim @@ -179,7 +179,7 @@ proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) = d.announceAddrs = @addrs - trace "Updating announce record", addrs = d.announceAddrs + info "Updating announce record", addrs = d.announceAddrs d.providerRecord = SignedPeerRecord .init(d.key, PeerRecord.init(d.peerId, d.announceAddrs)) .expect("Should construct signed record").some @@ -191,7 +191,7 @@ proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) = ## Update providers record ## - trace "Updating Dht record", addrs = addrs + info "Updating Dht record", addrs = addrs d.dhtRecord = SignedPeerRecord .init(d.key, PeerRecord.init(d.peerId, @addrs)) .expect("Should construct signed record").some From a5db757de39df6e08807e8917d8f924e4a68d76a Mon Sep 17 00:00:00 2001 From: Eric <5089238+emizzle@users.noreply.github.com> Date: Fri, 14 Mar 2025 09:46:05 +1100 Subject: [PATCH 29/40] fix: ethers no longer leaks AsyncLockError (#1146) * fix: ethers no longer leaks AsyncLockError * Add message to convertEthersEthers - adds a message to convertEthersError allowing contextual error messages - replaces try/except EthersError with convertEthersError (PR feedback) * bump ethers after PR merged upstream --- codex/contracts/market.nim | 117 +++++++++++++++++++------------------ vendor/nim-ethers | 2 +- 2 files changed, 60 insertions(+), 59 deletions(-) diff --git a/codex/contracts/market.nim b/codex/contracts/market.nim index 9079ac8a..58495b45 100644 --- a/codex/contracts/market.nim +++ b/codex/contracts/market.nim @@ -1,3 +1,4 @@ +import std/strformat import std/strutils import pkg/ethers import pkg/upraises @@ -49,11 +50,17 @@ func new*( proc raiseMarketError(message: string) {.raises: [MarketError].} = raise newException(MarketError, message) -template convertEthersError(body) = +func prefixWith(suffix, prefix: string, separator = ": "): string = + if prefix.len > 0: + return &"{prefix}{separator}{suffix}" + else: + return suffix + +template convertEthersError(msg: string = "", body) = try: body except EthersError as error: - raiseMarketError(error.msgDetail) + raiseMarketError(error.msgDetail.prefixWith(msg)) proc config( market: OnChainMarket @@ -71,7 +78,7 @@ proc config( proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} = debug "Approving tokens", amount - convertEthersError: + convertEthersError("Failed to approve funds"): let tokenAddress = await market.contract.token() let token = Erc20Token.new(tokenAddress, market.signer) discard await token.increaseAllowance(market.contract.address(), amount).confirm(1) @@ -86,8 +93,7 @@ method loadConfig*( market.configuration = some fetchedConfig return success() - except AsyncLockError, EthersError: - let err = getCurrentException() + except EthersError as err: return failure newException( MarketError, "Failed to fetch the config from the Marketplace contract: " & err.msg, @@ -100,13 +106,13 @@ method getZkeyHash*( return some config.proofs.zkeyHash method getSigner*(market: OnChainMarket): Future[Address] {.async.} = - convertEthersError: + convertEthersError("Failed to get signer address"): return await market.signer.getAddress() method periodicity*( market: OnChainMarket ): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} = - convertEthersError: + convertEthersError("Failed to get Marketplace config"): let config = await market.config() let period = config.proofs.period return Periodicity(seconds: period) @@ -114,47 +120,47 @@ method periodicity*( method proofTimeout*( market: OnChainMarket ): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} = - convertEthersError: + convertEthersError("Failed to get Marketplace config"): let config = await market.config() return config.proofs.timeout method repairRewardPercentage*( market: OnChainMarket ): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} = - convertEthersError: + convertEthersError("Failed to get Marketplace config"): let config = await market.config() return config.collateral.repairRewardPercentage method requestDurationLimit*(market: OnChainMarket): Future[uint64] {.async.} = - convertEthersError: + convertEthersError("Failed to get Marketplace config"): let config = await market.config() return config.requestDurationLimit method proofDowntime*( market: OnChainMarket ): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} = - convertEthersError: + convertEthersError("Failed to get Marketplace config"): let config = await market.config() return config.proofs.downtime method getPointer*(market: OnChainMarket, slotId: SlotId): Future[uint8] {.async.} = - convertEthersError: + convertEthersError("Failed to get slot pointer"): let overrides = CallOverrides(blockTag: some BlockTag.pending) return await market.contract.getPointer(slotId, overrides) method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} = - convertEthersError: + convertEthersError("Failed to get my requests"): return await market.contract.myRequests method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} = - convertEthersError: + convertEthersError("Failed to get my slots"): let slots = await market.contract.mySlots() debug "Fetched my slots", numSlots = len(slots) return slots method requestStorage(market: OnChainMarket, request: StorageRequest) {.async.} = - convertEthersError: + convertEthersError("Failed to request storage"): debug "Requesting storage" await market.approveFunds(request.totalPrice()) discard await market.contract.requestStorage(request).confirm(1) @@ -174,14 +180,14 @@ method getRequest*( except Marketplace_UnknownRequest, KeyError: warn "Cannot retrieve the request", error = getCurrentExceptionMsg() return none StorageRequest - except EthersError, AsyncLockError: - error "Cannot retrieve the request", error = getCurrentExceptionMsg() + except EthersError as e: + error "Cannot retrieve the request", error = e.msg return none StorageRequest method requestState*( market: OnChainMarket, requestId: RequestId ): Future[?RequestState] {.async.} = - convertEthersError: + convertEthersError("Failed to get request state"): try: let overrides = CallOverrides(blockTag: some BlockTag.pending) return some await market.contract.requestState(requestId, overrides) @@ -191,31 +197,26 @@ method requestState*( method slotState*( market: OnChainMarket, slotId: SlotId ): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} = - convertEthersError: - try: - let overrides = CallOverrides(blockTag: some BlockTag.pending) - return await market.contract.slotState(slotId, overrides) - except AsyncLockError as err: - raiseMarketError( - "Failed to fetch the slot state from the Marketplace contract: " & err.msg - ) + convertEthersError("Failed to fetch the slot state from the Marketplace contract"): + let overrides = CallOverrides(blockTag: some BlockTag.pending) + return await market.contract.slotState(slotId, overrides) method getRequestEnd*( market: OnChainMarket, id: RequestId ): Future[SecondsSince1970] {.async.} = - convertEthersError: + convertEthersError("Failed to get request end"): return await market.contract.requestEnd(id) method requestExpiresAt*( market: OnChainMarket, id: RequestId ): Future[SecondsSince1970] {.async.} = - convertEthersError: + convertEthersError("Failed to get request expiry"): return await market.contract.requestExpiry(id) method getHost( market: OnChainMarket, requestId: RequestId, slotIndex: uint64 ): Future[?Address] {.async.} = - convertEthersError: + convertEthersError("Failed to get slot's host"): let slotId = slotId(requestId, slotIndex) let address = await market.contract.getHost(slotId) if address != Address.default: @@ -226,11 +227,11 @@ method getHost( method currentCollateral*( market: OnChainMarket, slotId: SlotId ): Future[UInt256] {.async.} = - convertEthersError: + convertEthersError("Failed to get slot's current collateral"): return await market.contract.currentCollateral(slotId) method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} = - convertEthersError: + convertEthersError("Failed to get active slot"): try: return some await market.contract.getActiveSlot(slotId) except Marketplace_SlotIsFree: @@ -243,7 +244,7 @@ method fillSlot( proof: Groth16Proof, collateral: UInt256, ) {.async.} = - convertEthersError: + convertEthersError("Failed to fill slot"): logScope: requestId slotIndex @@ -254,7 +255,7 @@ method fillSlot( trace "fillSlot transaction completed" method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} = - convertEthersError: + convertEthersError("Failed to free slot"): var freeSlot: Future[Confirmable] if rewardRecipient =? market.rewardRecipient: # If --reward-recipient specified, use it as the reward recipient, and use @@ -273,11 +274,11 @@ method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} = discard await freeSlot.confirm(1) method withdrawFunds(market: OnChainMarket, requestId: RequestId) {.async.} = - convertEthersError: + convertEthersError("Failed to withdraw funds"): discard await market.contract.withdrawFunds(requestId).confirm(1) method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} = - convertEthersError: + convertEthersError("Failed to get proof requirement"): try: let overrides = CallOverrides(blockTag: some BlockTag.pending) return await market.contract.isProofRequired(id, overrides) @@ -285,7 +286,7 @@ method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async return false method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} = - convertEthersError: + convertEthersError("Failed to get future proof requirement"): try: let overrides = CallOverrides(blockTag: some BlockTag.pending) return await market.contract.willProofBeRequired(id, overrides) @@ -295,18 +296,18 @@ method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.a method getChallenge*( market: OnChainMarket, id: SlotId ): Future[ProofChallenge] {.async.} = - convertEthersError: + convertEthersError("Failed to get proof challenge"): let overrides = CallOverrides(blockTag: some BlockTag.pending) return await market.contract.getChallenge(id, overrides) method submitProof*(market: OnChainMarket, id: SlotId, proof: Groth16Proof) {.async.} = - convertEthersError: + convertEthersError("Failed to submit proof"): discard await market.contract.submitProof(id, proof).confirm(1) method markProofAsMissing*( market: OnChainMarket, id: SlotId, period: Period ) {.async.} = - convertEthersError: + convertEthersError("Failed to mark proof as missing"): discard await market.contract.markProofAsMissing(id, period).confirm(1) method canProofBeMarkedAsMissing*( @@ -325,7 +326,7 @@ method canProofBeMarkedAsMissing*( method reserveSlot*( market: OnChainMarket, requestId: RequestId, slotIndex: uint64 ) {.async.} = - convertEthersError: + convertEthersError("Failed to reserve slot"): discard await market.contract .reserveSlot( requestId, @@ -338,7 +339,7 @@ method reserveSlot*( method canReserveSlot*( market: OnChainMarket, requestId: RequestId, slotIndex: uint64 ): Future[bool] {.async.} = - convertEthersError: + convertEthersError("Unable to determine if slot can be reserved"): return await market.contract.canReserveSlot(requestId, slotIndex) method subscribeRequests*( @@ -351,7 +352,7 @@ method subscribeRequests*( callback(event.requestId, event.ask, event.expiry) - convertEthersError: + convertEthersError("Failed to subscribe to StorageRequested events"): let subscription = await market.contract.subscribe(StorageRequested, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -365,7 +366,7 @@ method subscribeSlotFilled*( callback(event.requestId, event.slotIndex) - convertEthersError: + convertEthersError("Failed to subscribe to SlotFilled events"): let subscription = await market.contract.subscribe(SlotFilled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -379,7 +380,7 @@ method subscribeSlotFilled*( if eventRequestId == requestId and eventSlotIndex == slotIndex: callback(requestId, slotIndex) - convertEthersError: + convertEthersError("Failed to subscribe to SlotFilled events"): return await market.subscribeSlotFilled(onSlotFilled) method subscribeSlotFreed*( @@ -392,7 +393,7 @@ method subscribeSlotFreed*( callback(event.requestId, event.slotIndex) - convertEthersError: + convertEthersError("Failed to subscribe to SlotFreed events"): let subscription = await market.contract.subscribe(SlotFreed, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -407,7 +408,7 @@ method subscribeSlotReservationsFull*( callback(event.requestId, event.slotIndex) - convertEthersError: + convertEthersError("Failed to subscribe to SlotReservationsFull events"): let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -421,7 +422,7 @@ method subscribeFulfillment( callback(event.requestId) - convertEthersError: + convertEthersError("Failed to subscribe to RequestFulfilled events"): let subscription = await market.contract.subscribe(RequestFulfilled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -436,7 +437,7 @@ method subscribeFulfillment( if event.requestId == requestId: callback(event.requestId) - convertEthersError: + convertEthersError("Failed to subscribe to RequestFulfilled events"): let subscription = await market.contract.subscribe(RequestFulfilled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -450,7 +451,7 @@ method subscribeRequestCancelled*( callback(event.requestId) - convertEthersError: + convertEthersError("Failed to subscribe to RequestCancelled events"): let subscription = await market.contract.subscribe(RequestCancelled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -465,7 +466,7 @@ method subscribeRequestCancelled*( if event.requestId == requestId: callback(event.requestId) - convertEthersError: + convertEthersError("Failed to subscribe to RequestCancelled events"): let subscription = await market.contract.subscribe(RequestCancelled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -479,7 +480,7 @@ method subscribeRequestFailed*( callback(event.requestId) - convertEthersError: + convertEthersError("Failed to subscribe to RequestFailed events"): let subscription = await market.contract.subscribe(RequestFailed, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -494,7 +495,7 @@ method subscribeRequestFailed*( if event.requestId == requestId: callback(event.requestId) - convertEthersError: + convertEthersError("Failed to subscribe to RequestFailed events"): let subscription = await market.contract.subscribe(RequestFailed, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -508,7 +509,7 @@ method subscribeProofSubmission*( callback(event.id) - convertEthersError: + convertEthersError("Failed to subscribe to ProofSubmitted events"): let subscription = await market.contract.subscribe(ProofSubmitted, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -518,13 +519,13 @@ method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} = method queryPastSlotFilledEvents*( market: OnChainMarket, fromBlock: BlockTag ): Future[seq[SlotFilled]] {.async.} = - convertEthersError: + convertEthersError("Failed to get past SlotFilled events from block"): return await market.contract.queryFilter(SlotFilled, fromBlock, BlockTag.latest) method queryPastSlotFilledEvents*( market: OnChainMarket, blocksAgo: int ): Future[seq[SlotFilled]] {.async.} = - convertEthersError: + convertEthersError("Failed to get past SlotFilled events"): let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo) return await market.queryPastSlotFilledEvents(fromBlock) @@ -532,21 +533,21 @@ method queryPastSlotFilledEvents*( method queryPastSlotFilledEvents*( market: OnChainMarket, fromTime: SecondsSince1970 ): Future[seq[SlotFilled]] {.async.} = - convertEthersError: + convertEthersError("Failed to get past SlotFilled events from time"): let fromBlock = await market.contract.provider.blockNumberForEpoch(fromTime) return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock)) method queryPastStorageRequestedEvents*( market: OnChainMarket, fromBlock: BlockTag ): Future[seq[StorageRequested]] {.async.} = - convertEthersError: + convertEthersError("Failed to get past StorageRequested events from block"): return await market.contract.queryFilter(StorageRequested, fromBlock, BlockTag.latest) method queryPastStorageRequestedEvents*( market: OnChainMarket, blocksAgo: int ): Future[seq[StorageRequested]] {.async.} = - convertEthersError: + convertEthersError("Failed to get past StorageRequested events"): let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo) return await market.queryPastStorageRequestedEvents(fromBlock) diff --git a/vendor/nim-ethers b/vendor/nim-ethers index d2b11a86..b505ef1a 160000 --- a/vendor/nim-ethers +++ b/vendor/nim-ethers @@ -1 +1 @@ -Subproject commit d2b11a865796a55296027f8ffba68398035ad435 +Subproject commit b505ef1ab889be8161bb1efb4908e3dfde5bc1c9 From f1b84dc6d1b295a59e6060f8a1026a5b1e280a9e Mon Sep 17 00:00:00 2001 From: tianzedavid <168427849+tianzedavid@users.noreply.github.com> Date: Fri, 14 Mar 2025 06:46:44 +0800 Subject: [PATCH 30/40] chore: fix some typos (#1110) Signed-off-by: tianzedavid Co-authored-by: Dmitriy Ryajov --- README.md | 4 ++-- nix/default.nix | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index d073057f..2a15051f 100644 --- a/README.md +++ b/README.md @@ -59,8 +59,8 @@ Feel free to dive in, contributions are welcomed! Open an issue or submit PRs. ### Linting and formatting -`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is requrired to adhere to its styling. +`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling. If you are setting up fresh setup, in order to get `nph` run `make build-nph`. In order to format files run `make nph/`. -If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior commiting them. +If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them. If you are using VSCode and the [NimLang](https://marketplace.visualstudio.com/items?itemName=NimLang.nimlang) extension you can enable "Format On Save" (eq. the `nim.formatOnSave` property) that will format the files using `nph`. \ No newline at end of file diff --git a/nix/default.nix b/nix/default.nix index 691e2af3..b5823f86 100644 --- a/nix/default.nix +++ b/nix/default.nix @@ -56,7 +56,7 @@ in pkgs.gcc13Stdenv.mkDerivation rec { fakeCargo ]; - # Disable CPU optmizations that make binary not portable. + # Disable CPU optimizations that make binary not portable. NIMFLAGS = "-d:disableMarchNative -d:git_revision_override=${revision}"; # Avoid Nim cache permission errors. XDG_CACHE_HOME = "/tmp"; From 75db491d84116f4cf9e38550d0bc52763d42a81f Mon Sep 17 00:00:00 2001 From: munna0908 <88337208+munna0908@users.noreply.github.com> Date: Fri, 14 Mar 2025 18:39:18 +0530 Subject: [PATCH 31/40] fix: optimise erasure encode/decode (#1123) * avoid copying block,parity data to shared memory * use alloc instead of allocShared * code cleanup --- codex/erasure/erasure.nim | 82 +++++++++++++------------------------ codex/utils/arrayutils.nim | 13 ++++++ tests/codex/testerasure.nim | 16 ++++---- 3 files changed, 49 insertions(+), 62 deletions(-) diff --git a/codex/erasure/erasure.nim b/codex/erasure/erasure.nim index 78ce3971..884969d0 100644 --- a/codex/erasure/erasure.nim +++ b/codex/erasure/erasure.nim @@ -310,10 +310,10 @@ proc leopardEncodeTask(tp: Taskpool, task: ptr EncodeTask) {.gcsafe.} = else: task[].success.store(true) -proc encodeAsync*( +proc asyncEncode*( self: Erasure, blockSize, blocksLen, parityLen: int, - data: ref seq[seq[byte]], + blocks: ref seq[seq[byte]], parity: ptr UncheckedArray[ptr UncheckedArray[byte]], ): Future[?!void] {.async: (raises: [CancelledError]).} = without threadPtr =? ThreadSignalPtr.new(): @@ -322,13 +322,10 @@ proc encodeAsync*( defer: threadPtr.close().expect("closing once works") - var blockData = createDoubleArray(blocksLen, blockSize) - - for i in 0 ..< data[].len: - copyMem(blockData[i], addr data[i][0], blockSize) + var data = makeUncheckedArray(blocks) defer: - freeDoubleArray(blockData, blocksLen) + dealloc(data) ## Create an ecode task with block data var task = EncodeTask( @@ -336,7 +333,7 @@ proc encodeAsync*( blockSize: blockSize, blocksLen: blocksLen, parityLen: parityLen, - blocks: blockData, + blocks: data, parity: parity, signal: threadPtr, ) @@ -348,18 +345,13 @@ proc encodeAsync*( self.taskPool.spawn leopardEncodeTask(self.taskPool, t) let threadFut = threadPtr.wait() - try: - await threadFut.join() - except CatchableError as exc: - try: - await threadFut - except AsyncError as asyncExc: - return failure(asyncExc.msg) - finally: - if exc of CancelledError: - raise (ref CancelledError) exc - else: - return failure(exc.msg) + if joinErr =? catch(await threadFut.join()).errorOption: + if err =? catch(await noCancel threadFut).errorOption: + return failure(err) + if joinErr of CancelledError: + raise (ref CancelledError) joinErr + else: + return failure(joinErr) if not t.success.load(): return failure("Leopard encoding failed") @@ -409,7 +401,7 @@ proc encodeData( try: if err =? ( - await self.encodeAsync( + await self.asyncEncode( manifest.blockSize.int, params.ecK, params.ecM, data, parity ) ).errorOption: @@ -489,6 +481,7 @@ proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} = task[].erasure.decoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen) defer: decoder.release() + discard task[].signal.fireSync() if ( let res = decoder.decode( @@ -506,9 +499,7 @@ proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} = else: task[].success.store(true) - discard task[].signal.fireSync() - -proc decodeAsync*( +proc asyncDecode*( self: Erasure, blockSize, blocksLen, parityLen: int, blocks, parity: ref seq[seq[byte]], @@ -521,24 +512,12 @@ proc decodeAsync*( threadPtr.close().expect("closing once works") var - blocksData = createDoubleArray(blocksLen, blockSize) - parityData = createDoubleArray(parityLen, blockSize) - - for i in 0 ..< blocks[].len: - if blocks[i].len > 0: - copyMem(blocksData[i], addr blocks[i][0], blockSize) - else: - blocksData[i] = nil - - for i in 0 ..< parity[].len: - if parity[i].len > 0: - copyMem(parityData[i], addr parity[i][0], blockSize) - else: - parityData[i] = nil + blockData = makeUncheckedArray(blocks) + parityData = makeUncheckedArray(parity) defer: - freeDoubleArray(blocksData, blocksLen) - freeDoubleArray(parityData, parityLen) + dealloc(blockData) + dealloc(parityData) ## Create an decode task with block data var task = DecodeTask( @@ -547,7 +526,7 @@ proc decodeAsync*( blocksLen: blocksLen, parityLen: parityLen, recoveredLen: blocksLen, - blocks: blocksData, + blocks: blockData, parity: parityData, recovered: recovered, signal: threadPtr, @@ -560,18 +539,13 @@ proc decodeAsync*( self.taskPool.spawn leopardDecodeTask(self.taskPool, t) let threadFut = threadPtr.wait() - try: - await threadFut.join() - except CatchableError as exc: - try: - await threadFut - except AsyncError as asyncExc: - return failure(asyncExc.msg) - finally: - if exc of CancelledError: - raise (ref CancelledError) exc - else: - return failure(exc.msg) + if joinErr =? catch(await threadFut.join()).errorOption: + if err =? catch(await noCancel threadFut).errorOption: + return failure(err) + if joinErr of CancelledError: + raise (ref CancelledError) joinErr + else: + return failure(joinErr) if not t.success.load(): return failure("Leopard encoding failed") @@ -627,7 +601,7 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} = trace "Erasure decoding data" try: if err =? ( - await self.decodeAsync( + await self.asyncDecode( encoded.blockSize.int, encoded.ecK, encoded.ecM, data, parityData, recovered ) ).errorOption: diff --git a/codex/utils/arrayutils.nim b/codex/utils/arrayutils.nim index c398921f..e36a0cb3 100644 --- a/codex/utils/arrayutils.nim +++ b/codex/utils/arrayutils.nim @@ -23,3 +23,16 @@ proc freeDoubleArray*( # Free outer array if not arr.isNil: deallocShared(arr) + +proc makeUncheckedArray*( + data: ref seq[seq[byte]] +): ptr UncheckedArray[ptr UncheckedArray[byte]] = + result = cast[ptr UncheckedArray[ptr UncheckedArray[byte]]](alloc0( + sizeof(ptr UncheckedArray[byte]) * data[].len + )) + + for i, blk in data[]: + if blk.len > 0: + result[i] = cast[ptr UncheckedArray[byte]](addr blk[0]) + else: + result[i] = nil diff --git a/tests/codex/testerasure.nim b/tests/codex/testerasure.nim index d469b379..5046bac2 100644 --- a/tests/codex/testerasure.nim +++ b/tests/codex/testerasure.nim @@ -228,7 +228,7 @@ suite "Erasure encode/decode": discard (await erasure.decode(encoded)).tryGet() test "Should concurrently encode/decode multiple datasets": - const iterations = 2 + const iterations = 5 let datasetSize = 1.MiBs @@ -335,18 +335,18 @@ suite "Erasure encode/decode": for i in 0 ..< parityLen: paritySeq[i] = cast[seq[byte]](parity[i]) - # call encodeAsync to get the parity + # call asyncEncode to get the parity let encFut = - await erasure.encodeAsync(BlockSize.int, blocksLen, parityLen, data, parity) + await erasure.asyncEncode(BlockSize.int, blocksLen, parityLen, data, parity) check encFut.isOk - let decFut = await erasure.decodeAsync( + let decFut = await erasure.asyncDecode( BlockSize.int, blocksLen, parityLen, data, paritySeq, recovered ) check decFut.isOk - # call encodeAsync and cancel the task - let encodeFut = erasure.encodeAsync( + # call asyncEncode and cancel the task + let encodeFut = erasure.asyncEncode( BlockSize.int, blocksLen, parityLen, data, cancelledTaskParity ) encodeFut.cancel() @@ -359,8 +359,8 @@ suite "Erasure encode/decode": for i in 0 ..< parityLen: check equalMem(parity[i], cancelledTaskParity[i], BlockSize.int) - # call decodeAsync and cancel the task - let decodeFut = erasure.decodeAsync( + # call asyncDecode and cancel the task + let decodeFut = erasure.asyncDecode( BlockSize.int, blocksLen, parityLen, data, paritySeq, cancelledTaskRecovered ) decodeFut.cancel() From 54177e9fbfd143534b51131d7893459fe7469f4b Mon Sep 17 00:00:00 2001 From: Giuliano Mega Date: Mon, 17 Mar 2025 17:08:24 -0300 Subject: [PATCH 32/40] feat(integration): use async client instead of standard Nim HTTP client (#1159) * WiP: migrating CodexClient to chronos http client * fix(api): fixes #1163 * feat: fully working API integration tests * convert most of the tests in testupdownload * feat: working updownload tests on async client * feat: make testsales work with async codexclient * feat: make testpurchasing work with async codexclient * feat: make testblockexpiration work with async codexclient * feat: make marketplacesuite work with async codexclient * make testproofs work with async codexclient * chore: refactor client to express higher level in terms of lower level operations * fix: set correct content-length for erasure-coded datasets * feat: make testecbug work with async client * feat: make testvalidator work with async client * refactor: simplify request aliases, add close operation * wire back client.close at node shutdown * refactor: remove unused exception * fix: use await instead of waitFor on async call sites --- codex/rest/api.nim | 11 +- tests/integration/codexclient.nim | 357 ++++++++++++++-------- tests/integration/codexprocess.nim | 2 +- tests/integration/marketplacesuite.nim | 26 +- tests/integration/multinodes.nim | 10 +- tests/integration/testblockexpiration.nim | 10 +- tests/integration/testecbug.nim | 31 +- tests/integration/testmarketplace.nim | 52 ++-- tests/integration/testproofs.nim | 38 +-- tests/integration/testpurchasing.nim | 104 ++++--- tests/integration/testrestapi.nim | 302 +++++++++--------- tests/integration/testsales.nim | 153 ++++++---- tests/integration/testupdownload.nim | 41 +-- tests/integration/testvalidator.nim | 16 +- tests/testTaiko.nim | 2 +- 15 files changed, 656 insertions(+), 499 deletions(-) diff --git a/codex/rest/api.nim b/codex/rest/api.nim index 054e1c2b..553cb91c 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -114,9 +114,14 @@ proc retrieveCid( else: resp.setHeader("Content-Disposition", "attachment") - resp.setHeader("Content-Length", $manifest.datasetSize.int) + # For erasure-coded datasets, we need to return the _original_ length; i.e., + # the length of the non-erasure-coded dataset, as that's what we will be + # returning to the client. + let contentLength = + if manifest.protected: manifest.originalDatasetSize else: manifest.datasetSize + resp.setHeader("Content-Length", $(contentLength.int)) - await resp.prepareChunked() + await resp.prepare(HttpResponseStreamType.Plain) while not stream.atEof: var @@ -129,7 +134,7 @@ proc retrieveCid( bytes += buff.len - await resp.sendChunk(addr buff[0], buff.len) + await resp.send(addr buff[0], buff.len) await resp.finish() codex_api_downloads.inc() except CancelledError as exc: diff --git a/tests/integration/codexclient.nim b/tests/integration/codexclient.nim index 4a106253..ef76b577 100644 --- a/tests/integration/codexclient.nim +++ b/tests/integration/codexclient.nim @@ -4,119 +4,216 @@ import std/strutils from pkg/libp2p import Cid, `$`, init import pkg/stint import pkg/questionable/results -import pkg/chronos/apps/http/[httpserver, shttpserver, httpclient] +import pkg/chronos/apps/http/[httpserver, shttpserver, httpclient, httptable] import pkg/codex/logutils import pkg/codex/rest/json import pkg/codex/purchasing import pkg/codex/errors import pkg/codex/sales/reservations -export purchasing +export purchasing, httptable, httpclient type CodexClient* = ref object baseurl: string - httpClients: seq[HttpClient] - -type CodexClientError* = object of CatchableError - -const HttpClientTimeoutMs = 60 * 1000 + session: HttpSessionRef proc new*(_: type CodexClient, baseurl: string): CodexClient = - CodexClient(baseurl: baseurl, httpClients: newSeq[HttpClient]()) + CodexClient(session: HttpSessionRef.new(), baseurl: baseurl) -proc http*(client: CodexClient): HttpClient = - let httpClient = newHttpClient(timeout = HttpClientTimeoutMs) - client.httpClients.insert(httpClient) - return httpClient +proc close*(self: CodexClient): Future[void] {.async: (raises: []).} = + await self.session.closeWait() -proc close*(client: CodexClient): void = - for httpClient in client.httpClients: - httpClient.close() +proc request( + self: CodexClient, + httpMethod: httputils.HttpMethod, + url: string, + body: openArray[char] = [], + headers: openArray[HttpHeaderTuple] = [], +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + HttpClientRequestRef + .new( + self.session, + url, + httpMethod, + version = HttpVersion11, + flags = {}, + maxResponseHeadersSize = HttpMaxHeadersSize, + headers = headers, + body = body.toOpenArrayByte(0, len(body) - 1), + ).get + .send() -proc info*(client: CodexClient): ?!JsonNode = - let url = client.baseurl & "/debug/info" - JsonNode.parse(client.http().getContent(url)) +proc post( + self: CodexClient, + url: string, + body: string = "", + headers: seq[HttpHeaderTuple] = @[], +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return self.request(MethodPost, url, headers = headers, body = body) -proc setLogLevel*(client: CodexClient, level: string) = - let url = client.baseurl & "/debug/chronicles/loglevel?level=" & level - let headers = newHttpHeaders({"Content-Type": "text/plain"}) - let response = client.http().request(url, httpMethod = HttpPost, headers = headers) - assert response.status == "200 OK" +proc get( + self: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[] +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return self.request(MethodGet, url, headers = headers) -proc upload*(client: CodexClient, contents: string): ?!Cid = - let response = client.http().post(client.baseurl & "/data", contents) - assert response.status == "200 OK" - Cid.init(response.body).mapFailure +proc delete( + self: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[] +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return self.request(MethodDelete, url, headers = headers) -proc upload*(client: CodexClient, bytes: seq[byte]): ?!Cid = - client.upload(string.fromBytes(bytes)) +proc patch( + self: CodexClient, + url: string, + body: string = "", + headers: seq[HttpHeaderTuple] = @[], +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return self.request(MethodPatch, url, headers = headers, body = body) -proc download*(client: CodexClient, cid: Cid, local = false): ?!string = - let response = client.http().get( - client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream") - ) +proc body*( + response: HttpClientResponseRef +): Future[string] {.async: (raises: [CancelledError, HttpError]).} = + return bytesToString (await response.getBodyBytes()) - if response.status != "200 OK": - return failure(response.status) +proc getContent( + client: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[] +): Future[string] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.get(url, headers) + return await response.body - success response.body +proc info*( + client: CodexClient +): Future[?!JsonNode] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.get(client.baseurl & "/debug/info") + return JsonNode.parse(await response.body) -proc downloadManifestOnly*(client: CodexClient, cid: Cid): ?!string = - let response = - client.http().get(client.baseurl & "/data/" & $cid & "/network/manifest") +proc setLogLevel*( + client: CodexClient, level: string +): Future[void] {.async: (raises: [CancelledError, HttpError]).} = + let + url = client.baseurl & "/debug/chronicles/loglevel?level=" & level + headers = @[("Content-Type", "text/plain")] + response = await client.post(url, headers = headers, body = "") + assert response.status == 200 - if response.status != "200 OK": - return failure(response.status) +proc uploadRaw*( + client: CodexClient, contents: string, headers: seq[HttpHeaderTuple] = @[] +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return client.post(client.baseurl & "/data", body = contents, headers = headers) - success response.body +proc upload*( + client: CodexClient, contents: string +): Future[?!Cid] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.uploadRaw(contents) + assert response.status == 200 + Cid.init(await response.body).mapFailure -proc downloadNoStream*(client: CodexClient, cid: Cid): ?!string = - let response = client.http().post(client.baseurl & "/data/" & $cid & "/network") +proc upload*( + client: CodexClient, bytes: seq[byte] +): Future[?!Cid] {.async: (raw: true).} = + return client.upload(string.fromBytes(bytes)) - if response.status != "200 OK": - return failure(response.status) - - success response.body +proc downloadRaw*( + client: CodexClient, cid: string, local = false +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return + client.get(client.baseurl & "/data/" & cid & (if local: "" else: "/network/stream")) proc downloadBytes*( client: CodexClient, cid: Cid, local = false -): Future[?!seq[byte]] {.async.} = - let uri = client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream") +): Future[?!seq[byte]] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.downloadRaw($cid, local = local) - let response = client.http().get(uri) + if response.status != 200: + return failure($response.status) - if response.status != "200 OK": - return failure("fetch failed with status " & $response.status) + success await response.getBodyBytes() - success response.body.toBytes +proc download*( + client: CodexClient, cid: Cid, local = false +): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} = + without response =? await client.downloadBytes(cid, local = local), err: + return failure(err) + return success bytesToString(response) -proc delete*(client: CodexClient, cid: Cid): ?!void = - let - url = client.baseurl & "/data/" & $cid - response = client.http().delete(url) +proc downloadNoStream*( + client: CodexClient, cid: Cid +): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.post(client.baseurl & "/data/" & $cid & "/network") - if response.status != "204 No Content": - return failure(response.status) + if response.status != 200: + return failure($response.status) + + success await response.body + +proc downloadManifestOnly*( + client: CodexClient, cid: Cid +): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} = + let response = + await client.get(client.baseurl & "/data/" & $cid & "/network/manifest") + + if response.status != 200: + return failure($response.status) + + success await response.body + +proc deleteRaw*( + client: CodexClient, cid: string +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return client.delete(client.baseurl & "/data/" & cid) + +proc delete*( + client: CodexClient, cid: Cid +): Future[?!void] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.deleteRaw($cid) + + if response.status != 204: + return failure($response.status) success() -proc list*(client: CodexClient): ?!RestContentList = - let url = client.baseurl & "/data" - let response = client.http().get(url) +proc listRaw*( + client: CodexClient +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return client.get(client.baseurl & "/data") - if response.status != "200 OK": - return failure(response.status) +proc list*( + client: CodexClient +): Future[?!RestContentList] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.listRaw() - RestContentList.fromJson(response.body) + if response.status != 200: + return failure($response.status) -proc space*(client: CodexClient): ?!RestRepoStore = + RestContentList.fromJson(await response.body) + +proc space*( + client: CodexClient +): Future[?!RestRepoStore] {.async: (raises: [CancelledError, HttpError]).} = let url = client.baseurl & "/space" - let response = client.http().get(url) + let response = await client.get(url) - if response.status != "200 OK": - return failure(response.status) + if response.status != 200: + return failure($response.status) - RestRepoStore.fromJson(response.body) + RestRepoStore.fromJson(await response.body) proc requestStorageRaw*( client: CodexClient, @@ -128,7 +225,9 @@ proc requestStorageRaw*( expiry: uint64 = 0, nodes: uint = 3, tolerance: uint = 1, -): Response = +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = ## Call request storage REST endpoint ## let url = client.baseurl & "/storage/request/" & $cid @@ -145,7 +244,7 @@ proc requestStorageRaw*( if expiry != 0: json["expiry"] = %($expiry) - return client.http().post(url, $json) + return client.post(url, $json) proc requestStorage*( client: CodexClient, @@ -157,43 +256,45 @@ proc requestStorage*( collateralPerByte: UInt256, nodes: uint = 3, tolerance: uint = 1, -): ?!PurchaseId = +): Future[?!PurchaseId] {.async: (raises: [CancelledError, HttpError]).} = ## Call request storage REST endpoint ## - let response = client.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, - nodes, tolerance, - ) - if response.status != "200 OK": - doAssert(false, response.body) - PurchaseId.fromHex(response.body).catch + let + response = await client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, + nodes, tolerance, + ) + body = await response.body -proc getPurchase*(client: CodexClient, purchaseId: PurchaseId): ?!RestPurchase = + if response.status != 200: + doAssert(false, body) + PurchaseId.fromHex(body).catch + +proc getPurchase*( + client: CodexClient, purchaseId: PurchaseId +): Future[?!RestPurchase] {.async: (raises: [CancelledError, HttpError]).} = let url = client.baseurl & "/storage/purchases/" & purchaseId.toHex try: - let body = client.http().getContent(url) + let body = await client.getContent(url) return RestPurchase.fromJson(body) except CatchableError as e: return failure e.msg -proc getSalesAgent*(client: CodexClient, slotId: SlotId): ?!RestSalesAgent = +proc getSalesAgent*( + client: CodexClient, slotId: SlotId +): Future[?!RestSalesAgent] {.async: (raises: [CancelledError, HttpError]).} = let url = client.baseurl & "/sales/slots/" & slotId.toHex try: - let body = client.http().getContent(url) + let body = await client.getContent(url) return RestSalesAgent.fromJson(body) except CatchableError as e: return failure e.msg -proc getSlots*(client: CodexClient): ?!seq[Slot] = - let url = client.baseurl & "/sales/slots" - let body = client.http().getContent(url) - seq[Slot].fromJson(body) - proc postAvailability*( client: CodexClient, totalSize, duration: uint64, minPricePerBytePerSecond, totalCollateral: UInt256, -): ?!Availability = +): Future[?!Availability] {.async: (raises: [CancelledError, HttpError]).} = ## Post sales availability endpoint ## let url = client.baseurl & "/sales/availability" @@ -204,17 +305,21 @@ proc postAvailability*( "minPricePerBytePerSecond": minPricePerBytePerSecond, "totalCollateral": totalCollateral, } - let response = client.http().post(url, $json) - doAssert response.status == "201 Created", - "expected 201 Created, got " & response.status & ", body: " & response.body - Availability.fromJson(response.body) + let response = await client.post(url, $json) + let body = await response.body + + doAssert response.status == 201, + "expected 201 Created, got " & $response.status & ", body: " & body + Availability.fromJson(body) proc patchAvailabilityRaw*( client: CodexClient, availabilityId: AvailabilityId, totalSize, freeSize, duration: ?uint64 = uint64.none, minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none, -): Response = +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = ## Updates availability ## let url = client.baseurl & "/sales/availability/" & $availabilityId @@ -237,66 +342,50 @@ proc patchAvailabilityRaw*( if totalCollateral =? totalCollateral: json["totalCollateral"] = %totalCollateral - client.http().patch(url, $json) + client.patch(url, $json) proc patchAvailability*( client: CodexClient, availabilityId: AvailabilityId, totalSize, duration: ?uint64 = uint64.none, minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none, -): void = - let response = client.patchAvailabilityRaw( +): Future[void] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.patchAvailabilityRaw( availabilityId, totalSize = totalSize, duration = duration, minPricePerBytePerSecond = minPricePerBytePerSecond, totalCollateral = totalCollateral, ) - doAssert response.status == "200 OK", "expected 200 OK, got " & response.status + doAssert response.status == 200, "expected 200 OK, got " & $response.status -proc getAvailabilities*(client: CodexClient): ?!seq[Availability] = +proc getAvailabilities*( + client: CodexClient +): Future[?!seq[Availability]] {.async: (raises: [CancelledError, HttpError]).} = ## Call sales availability REST endpoint let url = client.baseurl & "/sales/availability" - let body = client.http().getContent(url) + let body = await client.getContent(url) seq[Availability].fromJson(body) proc getAvailabilityReservations*( client: CodexClient, availabilityId: AvailabilityId -): ?!seq[Reservation] = +): Future[?!seq[Reservation]] {.async: (raises: [CancelledError, HttpError]).} = ## Retrieves Availability's Reservations let url = client.baseurl & "/sales/availability/" & $availabilityId & "/reservations" - let body = client.http().getContent(url) + let body = await client.getContent(url) seq[Reservation].fromJson(body) -proc purchaseStateIs*(client: CodexClient, id: PurchaseId, state: string): bool = - client.getPurchase(id).option .? state == some state +proc purchaseStateIs*( + client: CodexClient, id: PurchaseId, state: string +): Future[bool] {.async: (raises: [CancelledError, HttpError]).} = + (await client.getPurchase(id)).option .? state == some state -proc saleStateIs*(client: CodexClient, id: SlotId, state: string): bool = - client.getSalesAgent(id).option .? state == some state +proc saleStateIs*( + client: CodexClient, id: SlotId, state: string +): Future[bool] {.async: (raises: [CancelledError, HttpError]).} = + (await client.getSalesAgent(id)).option .? state == some state -proc requestId*(client: CodexClient, id: PurchaseId): ?RequestId = - return client.getPurchase(id).option .? requestId - -proc uploadRaw*( - client: CodexClient, contents: string, headers = newHttpHeaders() -): Response = - return client.http().request( - client.baseurl & "/data", - body = contents, - httpMethod = HttpPost, - headers = headers, - ) - -proc listRaw*(client: CodexClient): Response = - return client.http().request(client.baseurl & "/data", httpMethod = HttpGet) - -proc downloadRaw*( - client: CodexClient, cid: string, local = false, httpClient = client.http() -): Response = - return httpClient.request( - client.baseurl & "/data/" & cid & (if local: "" else: "/network/stream"), - httpMethod = HttpGet, - ) - -proc deleteRaw*(client: CodexClient, cid: string): Response = - return client.http().request(client.baseurl & "/data/" & cid, httpMethod = HttpDelete) +proc requestId*( + client: CodexClient, id: PurchaseId +): Future[?RequestId] {.async: (raises: [CancelledError, HttpError]).} = + return (await client.getPurchase(id)).option .? requestId diff --git a/tests/integration/codexprocess.nim b/tests/integration/codexprocess.nim index 79d4b040..3eca5b04 100644 --- a/tests/integration/codexprocess.nim +++ b/tests/integration/codexprocess.nim @@ -68,7 +68,7 @@ method stop*(node: CodexProcess) {.async.} = trace "stopping codex client" if client =? node.client: - client.close() + await client.close() node.client = none CodexClient method removeDataDir*(node: CodexProcess) = diff --git a/tests/integration/marketplacesuite.nim b/tests/integration/marketplacesuite.nim index d7502bf4..1e09963b 100644 --- a/tests/integration/marketplacesuite.nim +++ b/tests/integration/marketplacesuite.nim @@ -60,13 +60,13 @@ template marketplacesuite*(name: string, body: untyped) = duration: uint64, collateralPerByte: UInt256, minPricePerBytePerSecond: UInt256, - ) = + ): Future[void] {.async: (raises: [CancelledError, HttpError, ConfigurationError]).} = let totalCollateral = datasetSize.u256 * collateralPerByte # post availability to each provider for i in 0 ..< providers().len: let provider = providers()[i].client - discard provider.postAvailability( + discard await provider.postAvailability( totalSize = datasetSize, duration = duration.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, @@ -83,16 +83,18 @@ template marketplacesuite*(name: string, body: untyped) = expiry: uint64 = 4.periods, nodes = providers().len, tolerance = 0, - ): Future[PurchaseId] {.async.} = - let id = client.requestStorage( - cid, - expiry = expiry, - duration = duration, - proofProbability = proofProbability, - collateralPerByte = collateralPerByte, - pricePerBytePerSecond = pricePerBytePerSecond, - nodes = nodes.uint, - tolerance = tolerance.uint, + ): Future[PurchaseId] {.async: (raises: [CancelledError, HttpError]).} = + let id = ( + await client.requestStorage( + cid, + expiry = expiry, + duration = duration, + proofProbability = proofProbability, + collateralPerByte = collateralPerByte, + pricePerBytePerSecond = pricePerBytePerSecond, + nodes = nodes.uint, + tolerance = tolerance.uint, + ) ).get return id diff --git a/tests/integration/multinodes.nim b/tests/integration/multinodes.nim index bade6899..0003b216 100644 --- a/tests/integration/multinodes.nim +++ b/tests/integration/multinodes.nim @@ -275,8 +275,10 @@ template multinodesuite*(name: string, body: untyped) = fail() quit(1) - proc updateBootstrapNodes(node: CodexProcess) = - without ninfo =? node.client.info(): + proc updateBootstrapNodes( + node: CodexProcess + ): Future[void] {.async: (raises: [CatchableError]).} = + without ninfo =? await node.client.info(): # raise CatchableError instead of Defect (with .get or !) so we # can gracefully shutdown and prevent zombies raiseMultiNodeSuiteError "Failed to get node info" @@ -315,14 +317,14 @@ template multinodesuite*(name: string, body: untyped) = for config in clients.configs: let node = await startClientNode(config) running.add RunningNode(role: Role.Client, node: node) - CodexProcess(node).updateBootstrapNodes() + await CodexProcess(node).updateBootstrapNodes() if var providers =? nodeConfigs.providers: failAndTeardownOnError "failed to start provider nodes": for config in providers.configs.mitems: let node = await startProviderNode(config) running.add RunningNode(role: Role.Provider, node: node) - CodexProcess(node).updateBootstrapNodes() + await CodexProcess(node).updateBootstrapNodes() if var validators =? nodeConfigs.validators: failAndTeardownOnError "failed to start validator nodes": diff --git a/tests/integration/testblockexpiration.nim b/tests/integration/testblockexpiration.nim index 7e742c2a..6a33f3c6 100644 --- a/tests/integration/testblockexpiration.nim +++ b/tests/integration/testblockexpiration.nim @@ -18,11 +18,11 @@ multinodesuite "Node block expiration tests": let client = clients()[0] let clientApi = client.client - let contentId = clientApi.upload(content).get + let contentId = (await clientApi.upload(content)).get await sleepAsync(2.seconds) - let download = clientApi.download(contentId, local = true) + let download = await clientApi.download(contentId, local = true) check: download.isOk @@ -39,12 +39,12 @@ multinodesuite "Node block expiration tests": let client = clients()[0] let clientApi = client.client - let contentId = clientApi.upload(content).get + let contentId = (await clientApi.upload(content)).get await sleepAsync(3.seconds) - let download = clientApi.download(contentId, local = true) + let download = await clientApi.download(contentId, local = true) check: download.isFailure - download.error.msg == "404 Not Found" + download.error.msg == "404" diff --git a/tests/integration/testecbug.nim b/tests/integration/testecbug.nim index 29a3bc6f..6b86fd29 100644 --- a/tests/integration/testecbug.nim +++ b/tests/integration/testecbug.nim @@ -13,21 +13,18 @@ marketplacesuite "Bug #821 - node crashes during erasure coding": .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log .withLogTopics("node", "erasure", "marketplace").some, - providers: CodexConfigs.init(nodes = 0) - # .debug() # uncomment to enable console log output - # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock") - .some, + providers: CodexConfigs.init(nodes = 0).some, ): - let pricePerBytePerSecond = 1.u256 - let duration = 20.periods - let collateralPerByte = 1.u256 - let expiry = 10.periods - let data = await RandomChunker.example(blocks = 8) - let client = clients()[0] - let clientApi = client.client + let + pricePerBytePerSecond = 1.u256 + duration = 20.periods + collateralPerByte = 1.u256 + expiry = 10.periods + data = await RandomChunker.example(blocks = 8) + client = clients()[0] + clientApi = client.client - let cid = clientApi.upload(data).get + let cid = (await clientApi.upload(data)).get var requestId = none RequestId proc onStorageRequested(eventResult: ?!StorageRequested) = @@ -49,9 +46,11 @@ marketplacesuite "Bug #821 - node crashes during erasure coding": check eventually(requestId.isSome, timeout = expiry.int * 1000) - let request = await marketplace.getRequest(requestId.get) - let cidFromRequest = request.content.cid - let downloaded = await clientApi.downloadBytes(cidFromRequest, local = true) + let + request = await marketplace.getRequest(requestId.get) + cidFromRequest = request.content.cid + downloaded = await clientApi.downloadBytes(cidFromRequest, local = true) + check downloaded.isOk check downloaded.get.toHex == data.toHex diff --git a/tests/integration/testmarketplace.nim b/tests/integration/testmarketplace.nim index 727f3fad..dee3645e 100644 --- a/tests/integration/testmarketplace.nim +++ b/tests/integration/testmarketplace.nim @@ -37,15 +37,17 @@ marketplacesuite "Marketplace": let size = 0xFFFFFF.uint64 let data = await RandomChunker.example(blocks = blocks) # host makes storage available - let availability = host.postAvailability( - totalSize = size, - duration = 20 * 60.uint64, - minPricePerBytePerSecond = minPricePerBytePerSecond, - totalCollateral = size.u256 * minPricePerBytePerSecond, + let availability = ( + await host.postAvailability( + totalSize = size, + duration = 20 * 60.uint64, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = size.u256 * minPricePerBytePerSecond, + ) ).get # client requests storage - let cid = client.upload(data).get + let cid = (await client.upload(data)).get let id = await client.requestStorage( cid, duration = 20 * 60.uint64, @@ -57,15 +59,17 @@ marketplacesuite "Marketplace": tolerance = ecTolerance, ) - check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000) - let purchase = client.getPurchase(id).get + check eventually( + await client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000 + ) + let purchase = (await client.getPurchase(id)).get check purchase.error == none string - let availabilities = host.getAvailabilities().get + let availabilities = (await host.getAvailabilities()).get check availabilities.len == 1 let newSize = availabilities[0].freeSize check newSize > 0 and newSize < size - let reservations = host.getAvailabilityReservations(availability.id).get + let reservations = (await host.getAvailabilityReservations(availability.id)).get check reservations.len == 3 check reservations[0].requestId == purchase.requestId @@ -80,15 +84,17 @@ marketplacesuite "Marketplace": # host makes storage available let startBalanceHost = await token.balanceOf(hostAccount) - discard host.postAvailability( - totalSize = size, - duration = 20 * 60.uint64, - minPricePerBytePerSecond = minPricePerBytePerSecond, - totalCollateral = size.u256 * minPricePerBytePerSecond, + discard ( + await host.postAvailability( + totalSize = size, + duration = 20 * 60.uint64, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = size.u256 * minPricePerBytePerSecond, + ) ).get # client requests storage - let cid = client.upload(data).get + let cid = (await client.upload(data)).get let id = await client.requestStorage( cid, duration = duration, @@ -100,8 +106,10 @@ marketplacesuite "Marketplace": tolerance = ecTolerance, ) - check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000) - let purchase = client.getPurchase(id).get + check eventually( + await client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000 + ) + let purchase = (await client.getPurchase(id)).get check purchase.error == none string let clientBalanceBeforeFinished = await token.balanceOf(clientAccount) @@ -158,7 +166,7 @@ marketplacesuite "Marketplace payouts": # provider makes storage available let datasetSize = datasetSize(blocks, ecNodes, ecTolerance) let totalAvailabilitySize = (datasetSize div 2).truncate(uint64) - discard providerApi.postAvailability( + discard await providerApi.postAvailability( # make availability size small enough that we can't fill all the slots, # thus causing a cancellation totalSize = totalAvailabilitySize, @@ -167,7 +175,7 @@ marketplacesuite "Marketplace payouts": totalCollateral = collateralPerByte * totalAvailabilitySize.u256, ) - let cid = clientApi.upload(data).get + let cid = (await clientApi.upload(data)).get var slotIdxFilled = none uint64 proc onSlotFilled(eventResult: ?!SlotFilled) = @@ -189,11 +197,11 @@ marketplacesuite "Marketplace payouts": # wait until one slot is filled check eventually(slotIdxFilled.isSome, timeout = expiry.int * 1000) - let slotId = slotId(!clientApi.requestId(id), !slotIdxFilled) + let slotId = slotId(!(await clientApi.requestId(id)), !slotIdxFilled) # wait until sale is cancelled await ethProvider.advanceTime(expiry.u256) - check eventually providerApi.saleStateIs(slotId, "SaleCancelled") + check eventually await providerApi.saleStateIs(slotId, "SaleCancelled") await advanceToNextPeriod() diff --git a/tests/integration/testproofs.nim b/tests/integration/testproofs.nim index ab29ca4e..b0ede765 100644 --- a/tests/integration/testproofs.nim +++ b/tests/integration/testproofs.nim @@ -42,14 +42,14 @@ marketplacesuite "Hosts submit regular proofs": let data = await RandomChunker.example(blocks = blocks) let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) - createAvailabilities( + await createAvailabilities( datasetSize.truncate(uint64), duration, collateralPerByte, minPricePerBytePerSecond, ) - let cid = client0.upload(data).get + let cid = (await client0.upload(data)).get let purchaseId = await client0.requestStorage( cid, @@ -59,13 +59,13 @@ marketplacesuite "Hosts submit regular proofs": tolerance = ecTolerance, ) - let purchase = client0.getPurchase(purchaseId).get + let purchase = (await client0.getPurchase(purchaseId)).get check purchase.error == none string let slotSize = slotSize(blocks, ecNodes, ecTolerance) check eventually( - client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000 + await client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000 ) var proofWasSubmitted = false @@ -119,27 +119,29 @@ marketplacesuite "Simulate invalid proofs": let data = await RandomChunker.example(blocks = blocks) let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) - createAvailabilities( + await createAvailabilities( datasetSize.truncate(uint64), duration, collateralPerByte, minPricePerBytePerSecond, ) - let cid = client0.upload(data).get + let cid = (await client0.upload(data)).get - let purchaseId = await client0.requestStorage( - cid, - expiry = expiry, - duration = duration, - nodes = ecNodes, - tolerance = ecTolerance, - proofProbability = 1.u256, + let purchaseId = ( + await client0.requestStorage( + cid, + expiry = expiry, + duration = duration, + nodes = ecNodes, + tolerance = ecTolerance, + proofProbability = 1.u256, + ) ) - let requestId = client0.requestId(purchaseId).get + let requestId = (await client0.requestId(purchaseId)).get check eventually( - client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000 + await client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000 ) var slotWasFreed = false @@ -182,14 +184,14 @@ marketplacesuite "Simulate invalid proofs": let data = await RandomChunker.example(blocks = blocks) let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) - createAvailabilities( + await createAvailabilities( datasetSize.truncate(uint64), duration, collateralPerByte, minPricePerBytePerSecond, ) - let cid = client0.upload(data).get + let cid = (await client0.upload(data)).get let purchaseId = await client0.requestStorage( cid, @@ -199,7 +201,7 @@ marketplacesuite "Simulate invalid proofs": tolerance = ecTolerance, proofProbability = 1.u256, ) - let requestId = client0.requestId(purchaseId).get + let requestId = (await client0.requestId(purchaseId)).get var slotWasFilled = false proc onSlotFilled(eventResult: ?!SlotFilled) = diff --git a/tests/integration/testpurchasing.nim b/tests/integration/testpurchasing.nim index 4eb5c775..e5adebe2 100644 --- a/tests/integration/testpurchasing.nim +++ b/tests/integration/testpurchasing.nim @@ -8,22 +8,26 @@ import ../examples twonodessuite "Purchasing": test "node handles storage request", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get - let id1 = client1.requestStorage( - cid, - duration = 100.uint64, - pricePerBytePerSecond = 1.u256, - proofProbability = 3.u256, - expiry = 10.uint64, - collateralPerByte = 1.u256, + let cid = (await client1.upload(data)).get + let id1 = ( + await client1.requestStorage( + cid, + duration = 100.uint64, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + expiry = 10.uint64, + collateralPerByte = 1.u256, + ) ).get - let id2 = client1.requestStorage( - cid, - duration = 400.uint64, - pricePerBytePerSecond = 2.u256, - proofProbability = 6.u256, - expiry = 10.uint64, - collateralPerByte = 2.u256, + let id2 = ( + await client1.requestStorage( + cid, + duration = 400.uint64, + pricePerBytePerSecond = 2.u256, + proofProbability = 6.u256, + expiry = 10.uint64, + collateralPerByte = 2.u256, + ) ).get check id1 != id2 @@ -34,19 +38,21 @@ twonodessuite "Purchasing": rng, size = DefaultBlockSize * 2, chunkSize = DefaultBlockSize * 2 ) let data = await chunker.getBytes() - let cid = client1.upload(byteutils.toHex(data)).get - let id = client1.requestStorage( - cid, - duration = 100.uint64, - pricePerBytePerSecond = 1.u256, - proofProbability = 3.u256, - expiry = 30.uint64, - collateralPerByte = 1.u256, - nodes = 3, - tolerance = 1, + let cid = (await client1.upload(byteutils.toHex(data))).get + let id = ( + await client1.requestStorage( + cid, + duration = 100.uint64, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + expiry = 30.uint64, + collateralPerByte = 1.u256, + nodes = 3, + tolerance = 1, + ) ).get - let request = client1.getPurchase(id).get.request.get + let request = (await client1.getPurchase(id)).get.request.get check request.content.cid.data.buffer.len > 0 check request.ask.duration == 100.uint64 @@ -75,23 +81,29 @@ twonodessuite "Purchasing": test "node remembers purchase status after restart", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get - let id = client1.requestStorage( - cid, - duration = 10 * 60.uint64, - pricePerBytePerSecond = 1.u256, - proofProbability = 3.u256, - expiry = 5 * 60.uint64, - collateralPerByte = 1.u256, - nodes = 3.uint, - tolerance = 1.uint, + let cid = (await client1.upload(data)).get + let id = ( + await client1.requestStorage( + cid, + duration = 10 * 60.uint64, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + expiry = 5 * 60.uint64, + collateralPerByte = 1.u256, + nodes = 3.uint, + tolerance = 1.uint, + ) ).get - check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000) + check eventually( + await client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000 + ) await node1.restart() - check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000) - let request = client1.getPurchase(id).get.request.get + check eventually( + await client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000 + ) + let request = (await client1.getPurchase(id)).get.request.get check request.ask.duration == (10 * 60).uint64 check request.ask.pricePerBytePerSecond == 1.u256 check request.ask.proofProbability == 3.u256 @@ -102,19 +114,19 @@ twonodessuite "Purchasing": test "node requires expiry and its value to be in future", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get + let cid = (await client1.upload(data)).get - let responseMissing = client1.requestStorageRaw( + let responseMissing = await client1.requestStorageRaw( cid, duration = 1.uint64, pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, collateralPerByte = 1.u256, ) - check responseMissing.status == "400 Bad Request" - check responseMissing.body == "Expiry required" + check responseMissing.status == 400 + check (await responseMissing.body) == "Expiry required" - let responseBefore = client1.requestStorageRaw( + let responseBefore = await client1.requestStorageRaw( cid, duration = 10.uint64, pricePerBytePerSecond = 1.u256, @@ -122,6 +134,6 @@ twonodessuite "Purchasing": collateralPerByte = 1.u256, expiry = 10.uint64, ) - check responseBefore.status == "400 Bad Request" + check responseBefore.status == 400 check "Expiry needs value bigger then zero and smaller then the request's duration" in - responseBefore.body + (await responseBefore.body) diff --git a/tests/integration/testrestapi.nim b/tests/integration/testrestapi.nim index 7164372b..761eda31 100644 --- a/tests/integration/testrestapi.nim +++ b/tests/integration/testrestapi.nim @@ -1,4 +1,3 @@ -import std/httpclient import std/importutils import std/net import std/sequtils @@ -14,29 +13,31 @@ import json twonodessuite "REST API": test "nodes can print their peer information", twoNodesConfig: - check !client1.info() != !client2.info() + check !(await client1.info()) != !(await client2.info()) test "nodes can set chronicles log level", twoNodesConfig: - client1.setLogLevel("DEBUG;TRACE:codex") + await client1.setLogLevel("DEBUG;TRACE:codex") test "node accepts file uploads", twoNodesConfig: - let cid1 = client1.upload("some file contents").get - let cid2 = client1.upload("some other contents").get + let cid1 = (await client1.upload("some file contents")).get + let cid2 = (await client1.upload("some other contents")).get check cid1 != cid2 test "node shows used and available space", twoNodesConfig: - discard client1.upload("some file contents").get + discard (await client1.upload("some file contents")).get let totalSize = 12.uint64 let minPricePerBytePerSecond = 1.u256 let totalCollateral = totalSize.u256 * minPricePerBytePerSecond - discard client1.postAvailability( - totalSize = totalSize, - duration = 2.uint64, - minPricePerBytePerSecond = minPricePerBytePerSecond, - totalCollateral = totalCollateral, + discard ( + await client1.postAvailability( + totalSize = totalSize, + duration = 2.uint64, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = totalCollateral, + ) ).get - let space = client1.space().tryGet() + let space = (await client1.space()).tryGet() check: space.totalBlocks == 2 space.quotaMaxBytes == 21474836480.NBytes @@ -47,48 +48,52 @@ twonodessuite "REST API": let content1 = "some file contents" let content2 = "some other contents" - let cid1 = client1.upload(content1).get - let cid2 = client1.upload(content2).get - let list = client1.list().get + let cid1 = (await client1.upload(content1)).get + let cid2 = (await client1.upload(content2)).get + let list = (await client1.list()).get check: [cid1, cid2].allIt(it in list.content.mapIt(it.cid)) test "request storage fails for datasets that are too small", twoNodesConfig: - let cid = client1.upload("some file contents").get - let response = client1.requestStorageRaw( - cid, - duration = 10.uint64, - pricePerBytePerSecond = 1.u256, - proofProbability = 3.u256, - collateralPerByte = 1.u256, - expiry = 9.uint64, + let cid = (await client1.upload("some file contents")).get + let response = ( + await client1.requestStorageRaw( + cid, + duration = 10.uint64, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + collateralPerByte = 1.u256, + expiry = 9.uint64, + ) ) check: - response.status == "400 Bad Request" - response.body == + response.status == 400 + (await response.body) == "Dataset too small for erasure parameters, need at least " & $(2 * DefaultBlockSize.int) & " bytes" test "request storage succeeds for sufficiently sized datasets", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get - let response = client1.requestStorageRaw( - cid, - duration = 10.uint64, - pricePerBytePerSecond = 1.u256, - proofProbability = 3.u256, - collateralPerByte = 1.u256, - expiry = 9.uint64, + let cid = (await client1.upload(data)).get + let response = ( + await client1.requestStorageRaw( + cid, + duration = 10.uint64, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + collateralPerByte = 1.u256, + expiry = 9.uint64, + ) ) check: - response.status == "200 OK" + response.status == 200 test "request storage fails if tolerance is zero", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get + let cid = (await client1.upload(data)).get let duration = 100.uint64 let pricePerBytePerSecond = 1.u256 let proofProbability = 3.u256 @@ -97,17 +102,19 @@ twonodessuite "REST API": let nodes = 3 let tolerance = 0 - var responseBefore = client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, - nodes.uint, tolerance.uint, + var responseBefore = ( + await client1.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) ) - check responseBefore.status == "400 Bad Request" - check responseBefore.body == "Tolerance needs to be bigger then zero" + check responseBefore.status == 400 + check (await responseBefore.body) == "Tolerance needs to be bigger then zero" test "request storage fails if duration exceeds limit", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get + let cid = (await client1.upload(data)).get let duration = (31 * 24 * 60 * 60).uint64 # 31 days TODO: this should not be hardcoded, but waits for https://github.com/codex-storage/nim-codex/issues/1056 let proofProbability = 3.u256 @@ -117,17 +124,19 @@ twonodessuite "REST API": let tolerance = 2 let pricePerBytePerSecond = 1.u256 - var responseBefore = client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, - nodes.uint, tolerance.uint, + var responseBefore = ( + await client1.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) ) - check responseBefore.status == "400 Bad Request" - check "Duration exceeds limit of" in responseBefore.body + check responseBefore.status == 400 + check "Duration exceeds limit of" in (await responseBefore.body) test "request storage fails if nodes and tolerance aren't correct", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get + let cid = (await client1.upload(data)).get let duration = 100.uint64 let pricePerBytePerSecond = 1.u256 let proofProbability = 3.u256 @@ -138,19 +147,21 @@ twonodessuite "REST API": for ecParam in ecParams: let (nodes, tolerance) = ecParam - var responseBefore = client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, - expiry, nodes.uint, tolerance.uint, + var responseBefore = ( + await client1.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) ) - check responseBefore.status == "400 Bad Request" - check responseBefore.body == + check responseBefore.status == 400 + check (await responseBefore.body) == "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`" test "request storage fails if tolerance > nodes (underflow protection)", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get + let cid = (await client1.upload(data)).get let duration = 100.uint64 let pricePerBytePerSecond = 1.u256 let proofProbability = 3.u256 @@ -161,13 +172,15 @@ twonodessuite "REST API": for ecParam in ecParams: let (nodes, tolerance) = ecParam - var responseBefore = client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, - expiry, nodes.uint, tolerance.uint, + var responseBefore = ( + await client1.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) ) - check responseBefore.status == "400 Bad Request" - check responseBefore.body == + check responseBefore.status == 400 + check (await responseBefore.body) == "Invalid parameters: `tolerance` cannot be greater than `nodes`" for ecParams in @[ @@ -177,70 +190,69 @@ twonodessuite "REST API": test "request storage succeeds if nodes and tolerance within range " & fmt"({minBlocks=}, {nodes=}, {tolerance=})", twoNodesConfig: let data = await RandomChunker.example(blocks = minBlocks) - let cid = client1.upload(data).get + let cid = (await client1.upload(data)).get let duration = 100.uint64 let pricePerBytePerSecond = 1.u256 let proofProbability = 3.u256 let expiry = 30.uint64 let collateralPerByte = 1.u256 - var responseBefore = client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, - expiry, nodes.uint, tolerance.uint, + var responseBefore = ( + await client1.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) ) - check responseBefore.status == "200 OK" + check responseBefore.status == 200 test "node accepts file uploads with content type", twoNodesConfig: - let headers = newHttpHeaders({"Content-Type": "text/plain"}) - let response = client1.uploadRaw("some file contents", headers) + let headers = @[("Content-Type", "text/plain")] + let response = await client1.uploadRaw("some file contents", headers) - check response.status == "200 OK" - check response.body != "" + check response.status == 200 + check (await response.body) != "" test "node accepts file uploads with content disposition", twoNodesConfig: - let headers = - newHttpHeaders({"Content-Disposition": "attachment; filename=\"example.txt\""}) - let response = client1.uploadRaw("some file contents", headers) + let headers = @[("Content-Disposition", "attachment; filename=\"example.txt\"")] + let response = await client1.uploadRaw("some file contents", headers) - check response.status == "200 OK" - check response.body != "" + check response.status == 200 + check (await response.body) != "" test "node accepts file uploads with content disposition without filename", twoNodesConfig: - let headers = newHttpHeaders({"Content-Disposition": "attachment"}) - let response = client1.uploadRaw("some file contents", headers) + let headers = @[("Content-Disposition", "attachment")] + let response = await client1.uploadRaw("some file contents", headers) - check response.status == "200 OK" - check response.body != "" + check response.status == 200 + check (await response.body) != "" test "upload fails if content disposition contains bad filename", twoNodesConfig: - let headers = - newHttpHeaders({"Content-Disposition": "attachment; filename=\"exam*ple.txt\""}) - let response = client1.uploadRaw("some file contents", headers) + let headers = @[("Content-Disposition", "attachment; filename=\"exam*ple.txt\"")] + let response = await client1.uploadRaw("some file contents", headers) - check response.status == "422 Unprocessable Entity" - check response.body == "The filename is not valid." + check response.status == 422 + check (await response.body) == "The filename is not valid." test "upload fails if content type is invalid", twoNodesConfig: - let headers = newHttpHeaders({"Content-Type": "hello/world"}) - let response = client1.uploadRaw("some file contents", headers) + let headers = @[("Content-Type", "hello/world")] + let response = await client1.uploadRaw("some file contents", headers) - check response.status == "422 Unprocessable Entity" - check response.body == "The MIME type 'hello/world' is not valid." + check response.status == 422 + check (await response.body) == "The MIME type 'hello/world' is not valid." test "node retrieve the metadata", twoNodesConfig: - let headers = newHttpHeaders( - { - "Content-Type": "text/plain", - "Content-Disposition": "attachment; filename=\"example.txt\"", - } - ) - let uploadResponse = client1.uploadRaw("some file contents", headers) - let cid = uploadResponse.body - let listResponse = client1.listRaw() + let headers = + @[ + ("Content-Type", "text/plain"), + ("Content-Disposition", "attachment; filename=\"example.txt\""), + ] + let uploadResponse = await client1.uploadRaw("some file contents", headers) + let cid = await uploadResponse.body + let listResponse = await client1.listRaw() - let jsonData = parseJson(listResponse.body) + let jsonData = parseJson(await listResponse.body) check jsonData.hasKey("content") == true @@ -256,83 +268,79 @@ twonodessuite "REST API": check manifest["mimetype"].getStr() == "text/plain" test "node set the headers when for download", twoNodesConfig: - let headers = newHttpHeaders( - { - "Content-Disposition": "attachment; filename=\"example.txt\"", - "Content-Type": "text/plain", - } - ) + let headers = + @[ + ("Content-Disposition", "attachment; filename=\"example.txt\""), + ("Content-Type", "text/plain"), + ] - let uploadResponse = client1.uploadRaw("some file contents", headers) - let cid = uploadResponse.body + let uploadResponse = await client1.uploadRaw("some file contents", headers) + let cid = await uploadResponse.body - check uploadResponse.status == "200 OK" + check uploadResponse.status == 200 - let response = client1.downloadRaw(cid) + let response = await client1.downloadRaw(cid) - check response.status == "200 OK" - check response.headers.hasKey("Content-Type") == true - check response.headers["Content-Type"] == "text/plain" - check response.headers.hasKey("Content-Disposition") == true - check response.headers["Content-Disposition"] == + check response.status == 200 + check "Content-Type" in response.headers + check response.headers.getString("Content-Type") == "text/plain" + check "Content-Disposition" in response.headers + check response.headers.getString("Content-Disposition") == "attachment; filename=\"example.txt\"" let local = true - let localResponse = client1.downloadRaw(cid, local) + let localResponse = await client1.downloadRaw(cid, local) - check localResponse.status == "200 OK" - check localResponse.headers.hasKey("Content-Type") == true - check localResponse.headers["Content-Type"] == "text/plain" - check localResponse.headers.hasKey("Content-Disposition") == true - check localResponse.headers["Content-Disposition"] == + check localResponse.status == 200 + check "Content-Type" in localResponse.headers + check localResponse.headers.getString("Content-Type") == "text/plain" + check "Content-Disposition" in localResponse.headers + check localResponse.headers.getString("Content-Disposition") == "attachment; filename=\"example.txt\"" test "should delete a dataset when requested", twoNodesConfig: - let cid = client1.upload("some file contents").get + let cid = (await client1.upload("some file contents")).get - var response = client1.downloadRaw($cid, local = true) - check response.body == "some file contents" + var response = await client1.downloadRaw($cid, local = true) + check (await response.body) == "some file contents" - client1.delete(cid).get + (await client1.delete(cid)).get - response = client1.downloadRaw($cid, local = true) - check response.status == "404 Not Found" + response = await client1.downloadRaw($cid, local = true) + check response.status == 404 test "should return 200 when attempting delete of non-existing block", twoNodesConfig: - let response = client1.deleteRaw($(Cid.example())) - check response.status == "204 No Content" + let response = await client1.deleteRaw($(Cid.example())) + check response.status == 204 test "should return 200 when attempting delete of non-existing dataset", twoNodesConfig: let cid = Manifest.example().makeManifestBlock().get.cid - let response = client1.deleteRaw($cid) - check response.status == "204 No Content" + let response = await client1.deleteRaw($cid) + check response.status == 204 test "should not crash if the download stream is closed before download completes", twoNodesConfig: - privateAccess(client1.type) - privateAccess(client1.http.type) + # FIXME this is not a good test. For some reason, to get this to fail, I have to + # store content that is several times the default stream buffer size, otherwise + # the test will succeed even when the bug is present. Since this is probably some + # setting that is internal to chronos, it might change in future versions, + # invalidating this test. Works on Chronos 4.0.3. - let cid = client1.upload(repeat("some file contents", 1000)).get - let httpClient = client1.http() + let + contents = repeat("b", DefaultStreamBufferSize * 10) + cid = (await client1.upload(contents)).get + response = await client1.downloadRaw($cid) - try: - # Sadly, there's no high level API for preventing the client from - # consuming the whole response, and we need to close the socket - # before that happens if we want to trigger the bug, so we need to - # resort to this. - httpClient.getBody = false - let response = client1.downloadRaw($cid, httpClient = httpClient) + let reader = response.getBodyReader() - # Read 4 bytes from the stream just to make sure we actually - # receive some data. - let data = httpClient.socket.recv(4) - check data.len == 4 + # Read 4 bytes from the stream just to make sure we actually + # receive some data. + check (bytesToString await reader.read(4)) == "bbbb" - # Prematurely closes the connection. - httpClient.close() - finally: - httpClient.getBody = true + # Abruptly closes the stream (we have to dig all the way to the transport + # or Chronos will close things "nicely"). + response.connection.reader.tsource.close() - let response = client1.downloadRaw($cid, httpClient = httpClient) - check response.body == repeat("some file contents", 1000) + let response2 = await client1.downloadRaw($cid) + check (await response2.body) == contents diff --git a/tests/integration/testsales.nim b/tests/integration/testsales.nim index 6c5c30d5..2d7a199c 100644 --- a/tests/integration/testsales.nim +++ b/tests/integration/testsales.nim @@ -30,54 +30,63 @@ multinodesuite "Sales": client = clients()[0].client test "node handles new storage availability", salesConfig: - let availability1 = host.postAvailability( - totalSize = 1.uint64, - duration = 2.uint64, - minPricePerBytePerSecond = 3.u256, - totalCollateral = 4.u256, + let availability1 = ( + await host.postAvailability( + totalSize = 1.uint64, + duration = 2.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 4.u256, + ) ).get - let availability2 = host.postAvailability( - totalSize = 4.uint64, - duration = 5.uint64, - minPricePerBytePerSecond = 6.u256, - totalCollateral = 7.u256, + let availability2 = ( + await host.postAvailability( + totalSize = 4.uint64, + duration = 5.uint64, + minPricePerBytePerSecond = 6.u256, + totalCollateral = 7.u256, + ) ).get check availability1 != availability2 test "node lists storage that is for sale", salesConfig: - let availability = host.postAvailability( - totalSize = 1.uint64, - duration = 2.uint64, - minPricePerBytePerSecond = 3.u256, - totalCollateral = 4.u256, + let availability = ( + await host.postAvailability( + totalSize = 1.uint64, + duration = 2.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 4.u256, + ) ).get - check availability in host.getAvailabilities().get + check availability in (await host.getAvailabilities()).get test "updating non-existing availability", salesConfig: - let nonExistingResponse = host.patchAvailabilityRaw( + let nonExistingResponse = await host.patchAvailabilityRaw( AvailabilityId.example, duration = 100.uint64.some, minPricePerBytePerSecond = 2.u256.some, totalCollateral = 200.u256.some, ) - check nonExistingResponse.status == "404 Not Found" + check nonExistingResponse.status == 404 test "updating availability", salesConfig: - let availability = host.postAvailability( - totalSize = 140000.uint64, - duration = 200.uint64, - minPricePerBytePerSecond = 3.u256, - totalCollateral = 300.u256, + let availability = ( + await host.postAvailability( + totalSize = 140000.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) ).get - host.patchAvailability( + await host.patchAvailability( availability.id, duration = 100.uint64.some, minPricePerBytePerSecond = 2.u256.some, totalCollateral = 200.u256.some, ) - let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get + let updatedAvailability = + ((await host.getAvailabilities()).get).findItem(availability).get check updatedAvailability.duration == 100.uint64 check updatedAvailability.minPricePerBytePerSecond == 2 check updatedAvailability.totalCollateral == 200 @@ -85,26 +94,31 @@ multinodesuite "Sales": check updatedAvailability.freeSize == 140000.uint64 test "updating availability - freeSize is not allowed to be changed", salesConfig: - let availability = host.postAvailability( - totalSize = 140000.uint64, - duration = 200.uint64, - minPricePerBytePerSecond = 3.u256, - totalCollateral = 300.u256, + let availability = ( + await host.postAvailability( + totalSize = 140000.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) ).get let freeSizeResponse = - host.patchAvailabilityRaw(availability.id, freeSize = 110000.uint64.some) - check freeSizeResponse.status == "400 Bad Request" - check "not allowed" in freeSizeResponse.body + await host.patchAvailabilityRaw(availability.id, freeSize = 110000.uint64.some) + check freeSizeResponse.status == 400 + check "not allowed" in (await freeSizeResponse.body) test "updating availability - updating totalSize", salesConfig: - let availability = host.postAvailability( - totalSize = 140000.uint64, - duration = 200.uint64, - minPricePerBytePerSecond = 3.u256, - totalCollateral = 300.u256, + let availability = ( + await host.postAvailability( + totalSize = 140000.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) ).get - host.patchAvailability(availability.id, totalSize = 100000.uint64.some) - let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get + await host.patchAvailability(availability.id, totalSize = 100000.uint64.some) + let updatedAvailability = + ((await host.getAvailabilities()).get).findItem(availability).get check updatedAvailability.totalSize == 100000 check updatedAvailability.freeSize == 100000 @@ -115,38 +129,51 @@ multinodesuite "Sales": let minPricePerBytePerSecond = 3.u256 let collateralPerByte = 1.u256 let totalCollateral = originalSize.u256 * collateralPerByte - let availability = host.postAvailability( - totalSize = originalSize, - duration = 20 * 60.uint64, - minPricePerBytePerSecond = minPricePerBytePerSecond, - totalCollateral = totalCollateral, + let availability = ( + await host.postAvailability( + totalSize = originalSize, + duration = 20 * 60.uint64, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = totalCollateral, + ) ).get # Lets create storage request that will utilize some of the availability's space - let cid = client.upload(data).get - let id = client.requestStorage( - cid, - duration = 20 * 60.uint64, - pricePerBytePerSecond = minPricePerBytePerSecond, - proofProbability = 3.u256, - expiry = (10 * 60).uint64, - collateralPerByte = collateralPerByte, - nodes = 3, - tolerance = 1, + let cid = (await client.upload(data)).get + let id = ( + await client.requestStorage( + cid, + duration = 20 * 60.uint64, + pricePerBytePerSecond = minPricePerBytePerSecond, + proofProbability = 3.u256, + expiry = (10 * 60).uint64, + collateralPerByte = collateralPerByte, + nodes = 3, + tolerance = 1, + ) ).get - check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000) - let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get + check eventually( + await client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000 + ) + let updatedAvailability = + ((await host.getAvailabilities()).get).findItem(availability).get check updatedAvailability.totalSize != updatedAvailability.freeSize let utilizedSize = updatedAvailability.totalSize - updatedAvailability.freeSize - let totalSizeResponse = - host.patchAvailabilityRaw(availability.id, totalSize = (utilizedSize - 1).some) - check totalSizeResponse.status == "400 Bad Request" - check "totalSize must be larger then current totalSize" in totalSizeResponse.body + let totalSizeResponse = ( + await host.patchAvailabilityRaw( + availability.id, totalSize = (utilizedSize - 1).some + ) + ) + check totalSizeResponse.status == 400 + check "totalSize must be larger then current totalSize" in + (await totalSizeResponse.body) - host.patchAvailability(availability.id, totalSize = (originalSize + 20000).some) + await host.patchAvailability( + availability.id, totalSize = (originalSize + 20000).some + ) let newUpdatedAvailability = - (host.getAvailabilities().get).findItem(availability).get + ((await host.getAvailabilities()).get).findItem(availability).get check newUpdatedAvailability.totalSize == originalSize + 20000 check newUpdatedAvailability.freeSize - updatedAvailability.freeSize == 20000 diff --git a/tests/integration/testupdownload.nim b/tests/integration/testupdownload.nim index 05d3a496..24e6039c 100644 --- a/tests/integration/testupdownload.nim +++ b/tests/integration/testupdownload.nim @@ -9,11 +9,11 @@ twonodessuite "Uploads and downloads": let content1 = "some file contents" let content2 = "some other contents" - let cid1 = client1.upload(content1).get - let cid2 = client2.upload(content2).get + let cid1 = (await client1.upload(content1)).get + let cid2 = (await client2.upload(content2)).get - let resp1 = client1.download(cid1, local = true).get - let resp2 = client2.download(cid2, local = true).get + let resp1 = (await client1.download(cid1, local = true)).get + let resp2 = (await client2.download(cid2, local = true)).get check: content1 == resp1 @@ -23,11 +23,11 @@ twonodessuite "Uploads and downloads": let content1 = "some file contents" let content2 = "some other contents" - let cid1 = client1.upload(content1).get - let cid2 = client2.upload(content2).get + let cid1 = (await client1.upload(content1)).get + let cid2 = (await client2.upload(content2)).get - let resp2 = client1.download(cid2, local = false).get - let resp1 = client2.download(cid1, local = false).get + let resp2 = (await client1.download(cid2, local = false)).get + let resp1 = (await client2.download(cid1, local = false)).get check: content1 == resp1 @@ -35,11 +35,12 @@ twonodessuite "Uploads and downloads": test "node fails retrieving non-existing local file", twoNodesConfig: let content1 = "some file contents" - let cid1 = client1.upload(content1).get # upload to first node - let resp2 = client2.download(cid1, local = true) # try retrieving from second node + let cid1 = (await client1.upload(content1)).get # upload to first node + let resp2 = + await client2.download(cid1, local = true) # try retrieving from second node check: - resp2.error.msg == "404 Not Found" + resp2.error.msg == "404" proc checkRestContent(cid: Cid, content: ?!string) = let c = content.tryGet() @@ -67,26 +68,28 @@ twonodessuite "Uploads and downloads": test "node allows downloading only manifest", twoNodesConfig: let content1 = "some file contents" - let cid1 = client1.upload(content1).get + let cid1 = (await client1.upload(content1)).get - let resp2 = client1.downloadManifestOnly(cid1) + let resp2 = await client1.downloadManifestOnly(cid1) checkRestContent(cid1, resp2) test "node allows downloading content without stream", twoNodesConfig: - let content1 = "some file contents" - let cid1 = client1.upload(content1).get + let + content1 = "some file contents" + cid1 = (await client1.upload(content1)).get + resp1 = await client2.downloadNoStream(cid1) - let resp1 = client2.downloadNoStream(cid1) checkRestContent(cid1, resp1) - let resp2 = client2.download(cid1, local = true).get + + let resp2 = (await client2.download(cid1, local = true)).get check: content1 == resp2 test "reliable transfer test", twoNodesConfig: proc transferTest(a: CodexClient, b: CodexClient) {.async.} = let data = await RandomChunker.example(blocks = 8) - let cid = a.upload(data).get - let response = b.download(cid).get + let cid = (await a.upload(data)).get + let response = (await b.download(cid)).get check: @response.mapIt(it.byte) == data diff --git a/tests/integration/testvalidator.nim b/tests/integration/testvalidator.nim index 7f4bc851..0d1a50e8 100644 --- a/tests/integration/testvalidator.nim +++ b/tests/integration/testvalidator.nim @@ -99,14 +99,14 @@ marketplacesuite "Validation": let data = await RandomChunker.example(blocks = blocks) let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) - createAvailabilities( + await createAvailabilities( datasetSize.truncate(uint64), duration, collateralPerByte, minPricePerBytePerSecond, ) - let cid = client0.upload(data).get + let cid = (await client0.upload(data)).get let purchaseId = await client0.requestStorage( cid, expiry = expiry, @@ -115,12 +115,12 @@ marketplacesuite "Validation": tolerance = ecTolerance, proofProbability = proofProbability, ) - let requestId = client0.requestId(purchaseId).get + let requestId = (await client0.requestId(purchaseId)).get debug "validation suite", purchaseId = purchaseId.toHex, requestId = requestId if not eventuallyS( - client0.purchaseStateIs(purchaseId, "started"), + await client0.purchaseStateIs(purchaseId, "started"), timeout = (expiry + 60).int, step = 5, ): @@ -169,14 +169,14 @@ marketplacesuite "Validation": let data = await RandomChunker.example(blocks = blocks) let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) - createAvailabilities( + await createAvailabilities( datasetSize.truncate(uint64), duration, collateralPerByte, minPricePerBytePerSecond, ) - let cid = client0.upload(data).get + let cid = (await client0.upload(data)).get let purchaseId = await client0.requestStorage( cid, expiry = expiry, @@ -185,12 +185,12 @@ marketplacesuite "Validation": tolerance = ecTolerance, proofProbability = proofProbability, ) - let requestId = client0.requestId(purchaseId).get + let requestId = (await client0.requestId(purchaseId)).get debug "validation suite", purchaseId = purchaseId.toHex, requestId = requestId if not eventuallyS( - client0.purchaseStateIs(purchaseId, "started"), + await client0.purchaseStateIs(purchaseId, "started"), timeout = (expiry + 60).int, step = 5, ): diff --git a/tests/testTaiko.nim b/tests/testTaiko.nim index 8036e8a3..b1555bfb 100644 --- a/tests/testTaiko.nim +++ b/tests/testTaiko.nim @@ -24,7 +24,7 @@ suite "Taiko L2 Integration Tests": ) node1.waitUntilStarted() - let bootstrap = (!node1.client.info())["spr"].getStr() + let bootstrap = (!(await node1.client.info()))["spr"].getStr() node2 = startNode( [ From 9d7b521519329766cee675ece4013614adc7c6ad Mon Sep 17 00:00:00 2001 From: Arnaud Date: Tue, 18 Mar 2025 08:06:46 +0100 Subject: [PATCH 33/40] chore: add missing custom errors (#1134) * Add missing custom errors * Separate mock state errors * Remove the Option in the error setters * Wrap the contract errors in MarketError * Remove async raises (needs to address it in another PR) * Wrap contract errors into specific error types * Rename SlotNotFreeError to SlotStateMismatchError --- codex/contracts/market.nim | 36 +++++++++++------ codex/contracts/marketplace.nim | 1 + codex/market.nim | 2 + codex/sales/states/filling.nim | 12 +++--- codex/sales/states/slotreserving.nim | 9 ++--- tests/codex/helpers/mockmarket.nim | 23 +++++++++-- tests/codex/sales/states/testfilling.nim | 40 ++++++++++++++++++- .../codex/sales/states/testslotreserving.nim | 9 +++-- 8 files changed, 100 insertions(+), 32 deletions(-) diff --git a/codex/contracts/market.nim b/codex/contracts/market.nim index 58495b45..0b846099 100644 --- a/codex/contracts/market.nim +++ b/codex/contracts/market.nim @@ -249,10 +249,16 @@ method fillSlot( requestId slotIndex - await market.approveFunds(collateral) - trace "calling fillSlot on contract" - discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(1) - trace "fillSlot transaction completed" + try: + await market.approveFunds(collateral) + trace "calling fillSlot on contract" + discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(1) + trace "fillSlot transaction completed" + except Marketplace_SlotNotFree as parent: + raise newException( + SlotStateMismatchError, "Failed to fill slot because the slot is not free", + parent, + ) method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} = convertEthersError("Failed to free slot"): @@ -327,14 +333,20 @@ method reserveSlot*( market: OnChainMarket, requestId: RequestId, slotIndex: uint64 ) {.async.} = convertEthersError("Failed to reserve slot"): - discard await market.contract - .reserveSlot( - requestId, - slotIndex, - # reserveSlot runs out of gas for unknown reason, but 100k gas covers it - TransactionOverrides(gasLimit: some 100000.u256), - ) - .confirm(1) + try: + discard await market.contract + .reserveSlot( + requestId, + slotIndex, + # reserveSlot runs out of gas for unknown reason, but 100k gas covers it + TransactionOverrides(gasLimit: some 100000.u256), + ) + .confirm(1) + except SlotReservations_ReservationNotAllowed: + raise newException( + SlotReservationNotAllowedError, + "Failed to reserve slot because reservation is not allowed", + ) method canReserveSlot*( market: OnChainMarket, requestId: RequestId, slotIndex: uint64 diff --git a/codex/contracts/marketplace.nim b/codex/contracts/marketplace.nim index 761caada..686414fb 100644 --- a/codex/contracts/marketplace.nim +++ b/codex/contracts/marketplace.nim @@ -53,6 +53,7 @@ type Proofs_ProofAlreadyMarkedMissing* = object of SolidityError Proofs_InvalidProbability* = object of SolidityError Periods_InvalidSecondsPerPeriod* = object of SolidityError + SlotReservations_ReservationNotAllowed* = object of SolidityError proc configuration*(marketplace: Marketplace): MarketplaceConfig {.contract, view.} proc token*(marketplace: Marketplace): Address {.contract, view.} diff --git a/codex/market.nim b/codex/market.nim index c5177aeb..dd8e14ba 100644 --- a/codex/market.nim +++ b/codex/market.nim @@ -18,6 +18,8 @@ export periods type Market* = ref object of RootObj MarketError* = object of CodexError + SlotStateMismatchError* = object of MarketError + SlotReservationNotAllowedError* = object of MarketError Subscription* = ref object of RootObj OnRequest* = proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, upraises: [].} diff --git a/codex/sales/states/filling.nim b/codex/sales/states/filling.nim index 03e2ef2b..13644223 100644 --- a/codex/sales/states/filling.nim +++ b/codex/sales/states/filling.nim @@ -30,6 +30,7 @@ method run*( ): Future[?State] {.async: (raises: []).} = let data = SalesAgent(machine).data let market = SalesAgent(machine).context.market + without (request =? data.request): raiseAssert "Request not set" @@ -42,17 +43,16 @@ method run*( err: error "Failure attempting to fill slot: unable to calculate collateral", error = err.msg - return + return some State(SaleErrored(error: err)) debug "Filling slot" try: await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral) + except SlotStateMismatchError as e: + debug "Slot is already filled, ignoring slot" + return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) except MarketError as e: - if e.msg.contains "Slot is not free": - debug "Slot is already filled, ignoring slot" - return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) - else: - return some State(SaleErrored(error: e)) + return some State(SaleErrored(error: e)) # other CatchableErrors are handled "automatically" by the SaleState return some State(SaleFilled()) diff --git a/codex/sales/states/slotreserving.nim b/codex/sales/states/slotreserving.nim index a67c51a0..e9ac8dcd 100644 --- a/codex/sales/states/slotreserving.nim +++ b/codex/sales/states/slotreserving.nim @@ -44,12 +44,11 @@ method run*( try: trace "Reserving slot" await market.reserveSlot(data.requestId, data.slotIndex) + except SlotReservationNotAllowedError as e: + debug "Slot cannot be reserved, ignoring", error = e.msg + return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) except MarketError as e: - if e.msg.contains "SlotReservations_ReservationNotAllowed": - debug "Slot cannot be reserved, ignoring", error = e.msg - return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) - else: - return some State(SaleErrored(error: e)) + return some State(SaleErrored(error: e)) # other CatchableErrors are handled "automatically" by the SaleState trace "Slot successfully reserved" diff --git a/tests/codex/helpers/mockmarket.nim b/tests/codex/helpers/mockmarket.nim index 16806cb2..edf8a62d 100644 --- a/tests/codex/helpers/mockmarket.nim +++ b/tests/codex/helpers/mockmarket.nim @@ -46,7 +46,8 @@ type subscriptions: Subscriptions config*: MarketplaceConfig canReserveSlot*: bool - reserveSlotThrowError*: ?(ref MarketError) + errorOnReserveSlot*: ?(ref MarketError) + errorOnFillSlot*: ?(ref CatchableError) clock: ?Clock Fulfillment* = object @@ -289,6 +290,9 @@ proc fillSlot*( host: Address, collateral = 0.u256, ) = + if error =? market.errorOnFillSlot: + raise error + let slot = MockSlot( requestId: requestId, slotIndex: slotIndex, @@ -370,7 +374,7 @@ method canProofBeMarkedAsMissing*( method reserveSlot*( market: MockMarket, requestId: RequestId, slotIndex: uint64 ) {.async.} = - if error =? market.reserveSlotThrowError: + if error =? market.errorOnReserveSlot: raise error method canReserveSlot*( @@ -381,8 +385,19 @@ method canReserveSlot*( func setCanReserveSlot*(market: MockMarket, canReserveSlot: bool) = market.canReserveSlot = canReserveSlot -func setReserveSlotThrowError*(market: MockMarket, error: ?(ref MarketError)) = - market.reserveSlotThrowError = error +func setErrorOnReserveSlot*(market: MockMarket, error: ref MarketError) = + market.errorOnReserveSlot = + if error.isNil: + none (ref MarketError) + else: + some error + +func setErrorOnFillSlot*(market: MockMarket, error: ref CatchableError) = + market.errorOnFillSlot = + if error.isNil: + none (ref CatchableError) + else: + some error method subscribeRequests*( market: MockMarket, callback: OnRequest diff --git a/tests/codex/sales/states/testfilling.nim b/tests/codex/sales/states/testfilling.nim index 1a26753d..f746b5a8 100644 --- a/tests/codex/sales/states/testfilling.nim +++ b/tests/codex/sales/states/testfilling.nim @@ -1,18 +1,31 @@ -import pkg/unittest2 import pkg/questionable import pkg/codex/contracts/requests import pkg/codex/sales/states/filling import pkg/codex/sales/states/cancelled import pkg/codex/sales/states/failed +import pkg/codex/sales/states/ignored +import pkg/codex/sales/states/errored +import pkg/codex/sales/salesagent +import pkg/codex/sales/salescontext +import ../../../asynctest import ../../examples import ../../helpers +import ../../helpers/mockmarket +import ../../helpers/mockclock suite "sales state 'filling'": let request = StorageRequest.example let slotIndex = request.ask.slots div 2 var state: SaleFilling + var market: MockMarket + var clock: MockClock + var agent: SalesAgent setup: + clock = MockClock.new() + market = MockMarket.new() + let context = SalesContext(market: market, clock: clock) + agent = newSalesAgent(context, request.id, slotIndex, request.some) state = SaleFilling.new() test "switches to cancelled state when request expires": @@ -22,3 +35,28 @@ suite "sales state 'filling'": test "switches to failed state when request fails": let next = state.onFailed(request) check !next of SaleFailed + + test "run switches to ignored when slot is not free": + let error = newException( + SlotStateMismatchError, "Failed to fill slot because the slot is not free" + ) + market.setErrorOnFillSlot(error) + market.requested.add(request) + market.slotState[request.slotId(slotIndex)] = SlotState.Filled + + let next = !(await state.run(agent)) + check next of SaleIgnored + check SaleIgnored(next).reprocessSlot == false + check SaleIgnored(next).returnBytes + + test "run switches to errored with other error ": + let error = newException(MarketError, "some error") + market.setErrorOnFillSlot(error) + market.requested.add(request) + market.slotState[request.slotId(slotIndex)] = SlotState.Filled + + let next = !(await state.run(agent)) + check next of SaleErrored + + let errored = SaleErrored(next) + check errored.error == error diff --git a/tests/codex/sales/states/testslotreserving.nim b/tests/codex/sales/states/testslotreserving.nim index d9ecdfc8..0e2e2cc7 100644 --- a/tests/codex/sales/states/testslotreserving.nim +++ b/tests/codex/sales/states/testslotreserving.nim @@ -54,15 +54,16 @@ asyncchecksuite "sales state 'SlotReserving'": test "run switches to errored when slot reservation errors": let error = newException(MarketError, "some error") - market.setReserveSlotThrowError(some error) + market.setErrorOnReserveSlot(error) let next = !(await state.run(agent)) check next of SaleErrored let errored = SaleErrored(next) check errored.error == error - test "catches reservation not allowed error": - let error = newException(MarketError, "SlotReservations_ReservationNotAllowed") - market.setReserveSlotThrowError(some error) + test "run switches to ignored when reservation is not allowed": + let error = + newException(SlotReservationNotAllowedError, "Reservation is not allowed") + market.setErrorOnReserveSlot(error) let next = !(await state.run(agent)) check next of SaleIgnored check SaleIgnored(next).reprocessSlot == false From 3a312596bf1b7cc6842047112777488bc9f0e4f8 Mon Sep 17 00:00:00 2001 From: munna0908 <88337208+munna0908@users.noreply.github.com> Date: Fri, 21 Mar 2025 07:41:00 +0530 Subject: [PATCH 34/40] deps: upgrade libp2p & constantine (#1167) * upgrade libp2p and constantine * fix libp2p update issues * add missing vendor package * add missing vendor package --- .gitmodules | 10 ++++++++++ codex/blockexchange/engine/engine.nim | 4 +++- codex/blockexchange/network/network.nim | 8 ++++++-- codex/blockexchange/protobuf/message.nim | 10 ++++------ codex/merkletree/codex/coders.nim | 12 ++++++------ vendor/constantine | 2 +- vendor/nim-codex-dht | 2 +- vendor/nim-libp2p | 2 +- vendor/nim-ngtcp2 | 1 + vendor/nim-quic | 1 + 10 files changed, 34 insertions(+), 18 deletions(-) create mode 160000 vendor/nim-ngtcp2 create mode 160000 vendor/nim-quic diff --git a/.gitmodules b/.gitmodules index ece88749..5cc2bfab 100644 --- a/.gitmodules +++ b/.gitmodules @@ -221,3 +221,13 @@ [submodule "vendor/nph"] path = vendor/nph url = https://github.com/arnetheduck/nph.git +[submodule "vendor/nim-quic"] + path = vendor/nim-quic + url = https://github.com/vacp2p/nim-quic.git + ignore = untracked + branch = master +[submodule "vendor/nim-ngtcp2"] + path = vendor/nim-ngtcp2 + url = https://github.com/vacp2p/nim-ngtcp2.git + ignore = untracked + branch = master diff --git a/codex/blockexchange/engine/engine.nim b/codex/blockexchange/engine/engine.nim index befb8ae9..35785cfe 100644 --- a/codex/blockexchange/engine/engine.nim +++ b/codex/blockexchange/engine/engine.nim @@ -678,7 +678,9 @@ proc new*( advertiser: advertiser, ) - proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} = + proc peerEventHandler( + peerId: PeerId, event: PeerEvent + ): Future[void] {.gcsafe, async: (raises: [CancelledError]).} = if event.kind == PeerEventKind.Joined: await self.setupPeer(peerId) else: diff --git a/codex/blockexchange/network/network.nim b/codex/blockexchange/network/network.nim index 26c07445..d4754110 100644 --- a/codex/blockexchange/network/network.nim +++ b/codex/blockexchange/network/network.nim @@ -323,7 +323,9 @@ method init*(self: BlockExcNetwork) = ## Perform protocol initialization ## - proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} = + proc peerEventHandler( + peerId: PeerId, event: PeerEvent + ): Future[void] {.gcsafe, async: (raises: [CancelledError]).} = if event.kind == PeerEventKind.Joined: self.setupPeer(peerId) else: @@ -332,7 +334,9 @@ method init*(self: BlockExcNetwork) = self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined) self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left) - proc handler(conn: Connection, proto: string) {.async.} = + proc handler( + conn: Connection, proto: string + ): Future[void] {.async: (raises: [CancelledError]).} = let peerId = conn.peerId let blockexcPeer = self.getOrCreatePeer(peerId) await blockexcPeer.readLoop(conn) # attach read loop diff --git a/codex/blockexchange/protobuf/message.nim b/codex/blockexchange/protobuf/message.nim index 73cb60f1..4db89729 100644 --- a/codex/blockexchange/protobuf/message.nim +++ b/codex/blockexchange/protobuf/message.nim @@ -97,7 +97,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: WantList) = pb.write(field, ipb) proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) = - var ipb = initProtoBuffer(maxSize = MaxBlockSize) + var ipb = initProtoBuffer() ipb.write(1, value.blk.cid.data.buffer) ipb.write(2, value.blk.data) ipb.write(3, value.address) @@ -128,7 +128,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) = pb.write(field, ipb) proc protobufEncode*(value: Message): seq[byte] = - var ipb = initProtoBuffer(maxSize = MaxMessageSize) + var ipb = initProtoBuffer() ipb.write(1, value.wantList) for v in value.payload: ipb.write(3, v) @@ -254,16 +254,14 @@ proc decode*( proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] = var value = Message() - pb = initProtoBuffer(msg, maxSize = MaxMessageSize) + pb = initProtoBuffer(msg) ipb: ProtoBuffer sublist: seq[seq[byte]] if ?pb.getField(1, ipb): value.wantList = ?WantList.decode(ipb) if ?pb.getRepeatedField(3, sublist): for item in sublist: - value.payload.add( - ?BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize)) - ) + value.payload.add(?BlockDelivery.decode(initProtoBuffer(item))) if ?pb.getRepeatedField(4, sublist): for item in sublist: value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item))) diff --git a/codex/merkletree/codex/coders.nim b/codex/merkletree/codex/coders.nim index b8209991..1d50707c 100644 --- a/codex/merkletree/codex/coders.nim +++ b/codex/merkletree/codex/coders.nim @@ -27,11 +27,11 @@ const MaxMerkleTreeSize = 100.MiBs.uint const MaxMerkleProofSize = 1.MiBs.uint proc encode*(self: CodexTree): seq[byte] = - var pb = initProtoBuffer(maxSize = MaxMerkleTreeSize) + var pb = initProtoBuffer() pb.write(1, self.mcodec.uint64) pb.write(2, self.leavesCount.uint64) for node in self.nodes: - var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize) + var nodesPb = initProtoBuffer() nodesPb.write(1, node) nodesPb.finish() pb.write(3, nodesPb) @@ -40,7 +40,7 @@ proc encode*(self: CodexTree): seq[byte] = pb.buffer proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree = - var pb = initProtoBuffer(data, maxSize = MaxMerkleTreeSize) + var pb = initProtoBuffer(data) var mcodecCode: uint64 var leavesCount: uint64 discard ?pb.getField(1, mcodecCode).mapFailure @@ -63,13 +63,13 @@ proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree = CodexTree.fromNodes(mcodec, nodes, leavesCount.int) proc encode*(self: CodexProof): seq[byte] = - var pb = initProtoBuffer(maxSize = MaxMerkleProofSize) + var pb = initProtoBuffer() pb.write(1, self.mcodec.uint64) pb.write(2, self.index.uint64) pb.write(3, self.nleaves.uint64) for node in self.path: - var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize) + var nodesPb = initProtoBuffer() nodesPb.write(1, node) nodesPb.finish() pb.write(4, nodesPb) @@ -78,7 +78,7 @@ proc encode*(self: CodexProof): seq[byte] = pb.buffer proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof = - var pb = initProtoBuffer(data, maxSize = MaxMerkleProofSize) + var pb = initProtoBuffer(data) var mcodecCode: uint64 var index: uint64 var nleaves: uint64 diff --git a/vendor/constantine b/vendor/constantine index bc3845aa..8d6a6a38 160000 --- a/vendor/constantine +++ b/vendor/constantine @@ -1 +1 @@ -Subproject commit bc3845aa492b52f7fef047503b1592e830d1a774 +Subproject commit 8d6a6a38b90fb8ee3ec2230839773e69aab36d80 diff --git a/vendor/nim-codex-dht b/vendor/nim-codex-dht index 4bd3a39e..f6eef1ac 160000 --- a/vendor/nim-codex-dht +++ b/vendor/nim-codex-dht @@ -1 +1 @@ -Subproject commit 4bd3a39e0030f8ee269ef217344b6b59ec2be6dc +Subproject commit f6eef1ac95c70053b2518f1e3909c909ed8701a6 diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p index 036e110a..c08d8073 160000 --- a/vendor/nim-libp2p +++ b/vendor/nim-libp2p @@ -1 +1 @@ -Subproject commit 036e110a6080fba1a1662c58cfd8c21f9a548021 +Subproject commit c08d80734989b028b3d1705f2188d783a343aac0 diff --git a/vendor/nim-ngtcp2 b/vendor/nim-ngtcp2 new file mode 160000 index 00000000..6834f475 --- /dev/null +++ b/vendor/nim-ngtcp2 @@ -0,0 +1 @@ +Subproject commit 6834f4756b6af58356ac9c4fef3d71db3c3ae5fe diff --git a/vendor/nim-quic b/vendor/nim-quic new file mode 160000 index 00000000..ddcb31ff --- /dev/null +++ b/vendor/nim-quic @@ -0,0 +1 @@ +Subproject commit ddcb31ffb74b5460ab37fd13547eca90594248bc From 110147d8efbb1411fa4cb393125ada1b2e461be1 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Fri, 21 Mar 2025 11:23:07 -0600 Subject: [PATCH 35/40] monitor background tasks on streaming dataset (#1164) --- codex/node.nim | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/codex/node.nim b/codex/node.nim index 203e034a..9932deb6 100644 --- a/codex/node.nim +++ b/codex/node.nim @@ -271,6 +271,8 @@ proc streamEntireDataset( ## trace "Retrieving blocks from manifest", manifestCid + var jobs: seq[Future[void]] + let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false)) if manifest.protected: # Retrieve, decode and save to the local store all EС groups proc erasureJob(): Future[void] {.async: (raises: []).} = @@ -284,14 +286,25 @@ proc streamEntireDataset( except CatchableError as exc: trace "Error erasure decoding manifest", manifestCid, exc = exc.msg - self.trackedFutures.track(erasureJob()) + jobs.add(erasureJob()) - self.trackedFutures.track(self.fetchDatasetAsync(manifest, fetchLocal = false)) - # prefetch task should not fetch from local store + jobs.add(self.fetchDatasetAsync(manifest)) + + # Monitor stream completion and cancel background jobs when done + proc monitorStream() {.async: (raises: []).} = + try: + await stream.join() + except CatchableError as exc: + warn "Stream failed", exc = exc.msg + finally: + await noCancel allFutures(jobs.mapIt(it.cancelAndWait)) + + self.trackedFutures.track(monitorStream()) # Retrieve all blocks of the dataset sequentially from the local store or network trace "Creating store stream for manifest", manifestCid - LPStream(StoreStream.new(self.networkStore, manifest, pad = false)).success + + stream.success proc retrieve*( self: CodexNodeRef, cid: Cid, local: bool = true From 709a8648fd9854253b5fc0c55adef1c5177c5ae3 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Mon, 24 Mar 2025 12:53:34 +0100 Subject: [PATCH 36/40] chore: add request validations (#1144) * Add request validations * Define expiry as required field in storage request params and fix tests * Fix error messages * Enable logs to figure out the issue with recurring failing test on macos * Add custom errors raised by contract * Remove custom error non existing anymore * Update asynctest module * Update timer tests after updating asynctest --- codex/contracts/marketplace.nim | 5 +- codex/rest/api.nim | 46 ++++++++++----- codex/rest/json.nim | 2 +- openapi.yaml | 2 + tests/codex/utils/testtimer.nim | 10 ++-- tests/integration/testpurchasing.nim | 9 +-- tests/integration/testrestapi.nim | 88 ++++++++++++++++++++++++++-- tests/integration/testsales.nim | 14 ++++- vendor/asynctest | 2 +- 9 files changed, 143 insertions(+), 35 deletions(-) diff --git a/codex/contracts/marketplace.nim b/codex/contracts/marketplace.nim index 686414fb..11eca5be 100644 --- a/codex/contracts/marketplace.nim +++ b/codex/contracts/marketplace.nim @@ -51,7 +51,6 @@ type Proofs_ProofNotMissing* = object of SolidityError Proofs_ProofNotRequired* = object of SolidityError Proofs_ProofAlreadyMarkedMissing* = object of SolidityError - Proofs_InvalidProbability* = object of SolidityError Periods_InvalidSecondsPerPeriod* = object of SolidityError SlotReservations_ReservationNotAllowed* = object of SolidityError @@ -68,7 +67,9 @@ proc requestStorage*( errors: [ Marketplace_InvalidClientAddress, Marketplace_RequestAlreadyExists, Marketplace_InvalidExpiry, Marketplace_InsufficientSlots, - Marketplace_InvalidMaxSlotLoss, + Marketplace_InvalidMaxSlotLoss, Marketplace_InsufficientDuration, + Marketplace_InsufficientProofProbability, Marketplace_InsufficientCollateral, + Marketplace_InsufficientReward, Marketplace_InvalidCid, ] .} diff --git a/codex/rest/api.nim b/codex/rest/api.nim index 553cb91c..5d813188 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -652,10 +652,36 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) = without params =? StorageRequestParams.fromJson(body), error: return RestApiResponse.error(Http400, error.msg, headers = headers) + let expiry = params.expiry + + if expiry <= 0 or expiry >= params.duration: + return RestApiResponse.error( + Http422, + "Expiry must be greater than zero and less than the request's duration", + headers = headers, + ) + + if params.proofProbability <= 0: + return RestApiResponse.error( + Http422, "Proof probability must be greater than zero", headers = headers + ) + + if params.collateralPerByte <= 0: + return RestApiResponse.error( + Http422, "Collateral per byte must be greater than zero", headers = headers + ) + + if params.pricePerBytePerSecond <= 0: + return RestApiResponse.error( + Http422, + "Price per byte per second must be greater than zero", + headers = headers, + ) + let requestDurationLimit = await contracts.purchasing.market.requestDurationLimit if params.duration > requestDurationLimit: return RestApiResponse.error( - Http400, + Http422, "Duration exceeds limit of " & $requestDurationLimit & " seconds", headers = headers, ) @@ -665,13 +691,13 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) = if tolerance == 0: return RestApiResponse.error( - Http400, "Tolerance needs to be bigger then zero", headers = headers + Http422, "Tolerance needs to be bigger then zero", headers = headers ) # prevent underflow if tolerance > nodes: return RestApiResponse.error( - Http400, + Http422, "Invalid parameters: `tolerance` cannot be greater than `nodes`", headers = headers, ) @@ -682,21 +708,11 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) = # ensure leopard constrainst of 1 < K ≥ M if ecK <= 1 or ecK < ecM: return RestApiResponse.error( - Http400, + Http422, "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`", headers = headers, ) - without expiry =? params.expiry: - return RestApiResponse.error(Http400, "Expiry required", headers = headers) - - if expiry <= 0 or expiry >= params.duration: - return RestApiResponse.error( - Http400, - "Expiry needs value bigger then zero and smaller then the request's duration", - headers = headers, - ) - without purchaseId =? await node.requestStorage( cid, params.duration, params.proofProbability, nodes, tolerance, @@ -704,7 +720,7 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) = ), error: if error of InsufficientBlocksError: return RestApiResponse.error( - Http400, + Http422, "Dataset too small for erasure parameters, need at least " & $(ref InsufficientBlocksError)(error).minSize.int & " bytes", headers = headers, diff --git a/codex/rest/json.nim b/codex/rest/json.nim index c221ba73..50c8b514 100644 --- a/codex/rest/json.nim +++ b/codex/rest/json.nim @@ -17,7 +17,7 @@ type proofProbability* {.serialize.}: UInt256 pricePerBytePerSecond* {.serialize.}: UInt256 collateralPerByte* {.serialize.}: UInt256 - expiry* {.serialize.}: ?uint64 + expiry* {.serialize.}: uint64 nodes* {.serialize.}: ?uint tolerance* {.serialize.}: ?uint diff --git a/openapi.yaml b/openapi.yaml index 53a908a3..ad1b166b 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -800,6 +800,8 @@ paths: type: string "400": description: Invalid or missing Request ID + "422": + description: The storage request parameters are not valid "404": description: Request ID not found "503": diff --git a/tests/codex/utils/testtimer.nim b/tests/codex/utils/testtimer.nim index 303c43fb..2f356df9 100644 --- a/tests/codex/utils/testtimer.nim +++ b/tests/codex/utils/testtimer.nim @@ -52,21 +52,21 @@ asyncchecksuite "Timer": test "Start timer1 should execute callback": startNumbersTimer() - check eventually output == "0" + check eventually(output == "0", pollInterval = 10) test "Start timer1 should execute callback multiple times": startNumbersTimer() - check eventually output == "012" + check eventually(output == "012", pollInterval = 10) test "Starting timer1 multiple times has no impact": startNumbersTimer() startNumbersTimer() startNumbersTimer() - check eventually output == "01234" + check eventually(output == "01234", pollInterval = 10) test "Stop timer1 should stop execution of the callback": startNumbersTimer() - check eventually output == "012" + check eventually(output == "012", pollInterval = 10) await timer1.stop() await sleepAsync(30.milliseconds) let stoppedOutput = output @@ -81,4 +81,4 @@ asyncchecksuite "Timer": test "Starting both timers should execute callbacks sequentially": startNumbersTimer() startLettersTimer() - check eventually output == "0a1b2c3d4e" + check eventually(output == "0a1b2c3d4e", pollInterval = 10) diff --git a/tests/integration/testpurchasing.nim b/tests/integration/testpurchasing.nim index e5adebe2..ba8dd190 100644 --- a/tests/integration/testpurchasing.nim +++ b/tests/integration/testpurchasing.nim @@ -123,8 +123,9 @@ twonodessuite "Purchasing": proofProbability = 3.u256, collateralPerByte = 1.u256, ) - check responseMissing.status == 400 - check (await responseMissing.body) == "Expiry required" + check responseMissing.status == 422 + check (await responseMissing.body) == + "Expiry must be greater than zero and less than the request's duration" let responseBefore = await client1.requestStorageRaw( cid, @@ -134,6 +135,6 @@ twonodessuite "Purchasing": collateralPerByte = 1.u256, expiry = 10.uint64, ) - check responseBefore.status == 400 - check "Expiry needs value bigger then zero and smaller then the request's duration" in + check responseBefore.status == 422 + check "Expiry must be greater than zero and less than the request's duration" in (await responseBefore.body) diff --git a/tests/integration/testrestapi.nim b/tests/integration/testrestapi.nim index 761eda31..e7e185b8 100644 --- a/tests/integration/testrestapi.nim +++ b/tests/integration/testrestapi.nim @@ -69,7 +69,7 @@ twonodessuite "REST API": ) check: - response.status == 400 + response.status == 422 (await response.body) == "Dataset too small for erasure parameters, need at least " & $(2 * DefaultBlockSize.int) & " bytes" @@ -109,7 +109,7 @@ twonodessuite "REST API": ) ) - check responseBefore.status == 400 + check responseBefore.status == 422 check (await responseBefore.body) == "Tolerance needs to be bigger then zero" test "request storage fails if duration exceeds limit", twoNodesConfig: @@ -131,7 +131,7 @@ twonodessuite "REST API": ) ) - check responseBefore.status == 400 + check responseBefore.status == 422 check "Duration exceeds limit of" in (await responseBefore.body) test "request storage fails if nodes and tolerance aren't correct", twoNodesConfig: @@ -154,7 +154,7 @@ twonodessuite "REST API": ) ) - check responseBefore.status == 400 + check responseBefore.status == 422 check (await responseBefore.body) == "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`" @@ -179,10 +179,88 @@ twonodessuite "REST API": ) ) - check responseBefore.status == 400 + check responseBefore.status == 422 check (await responseBefore.body) == "Invalid parameters: `tolerance` cannot be greater than `nodes`" + test "request storage fails if expiry is zero", twoNodesConfig: + let data = await RandomChunker.example(blocks = 2) + let cid = (await client1.upload(data)).get + let duration = 100.uint64 + let pricePerBytePerSecond = 1.u256 + let proofProbability = 3.u256 + let expiry = 0.uint64 + let collateralPerByte = 1.u256 + let nodes = 3 + let tolerance = 1 + + var responseBefore = await client1.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, + nodes.uint, tolerance.uint, + ) + + check responseBefore.status == 422 + check (await responseBefore.body) == + "Expiry must be greater than zero and less than the request's duration" + + test "request storage fails if proof probability is zero", twoNodesConfig: + let data = await RandomChunker.example(blocks = 2) + let cid = (await client1.upload(data)).get + let duration = 100.uint64 + let pricePerBytePerSecond = 1.u256 + let proofProbability = 0.u256 + let expiry = 30.uint64 + let collateralPerByte = 1.u256 + let nodes = 3 + let tolerance = 1 + + var responseBefore = await client1.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, + nodes.uint, tolerance.uint, + ) + + check responseBefore.status == 422 + check (await responseBefore.body) == "Proof probability must be greater than zero" + + test "request storage fails if collareral per byte is zero", twoNodesConfig: + let data = await RandomChunker.example(blocks = 2) + let cid = (await client1.upload(data)).get + let duration = 100.uint64 + let pricePerBytePerSecond = 1.u256 + let proofProbability = 3.u256 + let expiry = 30.uint64 + let collateralPerByte = 0.u256 + let nodes = 3 + let tolerance = 1 + + var responseBefore = await client1.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, + nodes.uint, tolerance.uint, + ) + + check responseBefore.status == 422 + check (await responseBefore.body) == "Collateral per byte must be greater than zero" + + test "request storage fails if price per byte per second is zero", twoNodesConfig: + let data = await RandomChunker.example(blocks = 2) + let cid = (await client1.upload(data)).get + let duration = 100.uint64 + let pricePerBytePerSecond = 0.u256 + let proofProbability = 3.u256 + let expiry = 30.uint64 + let collateralPerByte = 1.u256 + let nodes = 3 + let tolerance = 1 + + var responseBefore = await client1.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, + nodes.uint, tolerance.uint, + ) + + check responseBefore.status == 422 + check (await responseBefore.body) == + "Price per byte per second must be greater than zero" + for ecParams in @[ (minBlocks: 2, nodes: 3, tolerance: 1), (minBlocks: 3, nodes: 5, tolerance: 2) ]: diff --git a/tests/integration/testsales.nim b/tests/integration/testsales.nim index 2d7a199c..9bf8a97c 100644 --- a/tests/integration/testsales.nim +++ b/tests/integration/testsales.nim @@ -16,8 +16,18 @@ proc findItem[T](items: seq[T], item: T): ?!T = multinodesuite "Sales": let salesConfig = NodeConfigs( - clients: CodexConfigs.init(nodes = 1).some, - providers: CodexConfigs.init(nodes = 1).some, + clients: CodexConfigs + .init(nodes = 1) + .withLogFile() + .withLogTopics( + "node", "marketplace", "sales", "reservations", "node", "proving", "clock" + ).some, + providers: CodexConfigs + .init(nodes = 1) + .withLogFile() + .withLogTopics( + "node", "marketplace", "sales", "reservations", "node", "proving", "clock" + ).some, ) let minPricePerBytePerSecond = 1.u256 diff --git a/vendor/asynctest b/vendor/asynctest index 5154c0d7..73c08f77 160000 --- a/vendor/asynctest +++ b/vendor/asynctest @@ -1 +1 @@ -Subproject commit 5154c0d79dd8bb086ab418cc659e923330ac24f2 +Subproject commit 73c08f77afc5cc2a5628d00f915b97bf72f70c9b From a0d6fbaf0248c36782a14bf95a6771861ae1034e Mon Sep 17 00:00:00 2001 From: Arnaud Date: Mon, 24 Mar 2025 16:47:05 +0100 Subject: [PATCH 37/40] chore(marketplace) - fix the http error codes when validating the availability requests (#1104) * Use 422 http code when there is a validation error * Update the open api description * Fix typo * Add more tests for total size * Catch CancelledError because TrackedFuture raise no error * Split rest api validation test to a new file * Change the way of testing negative numbers * Rename client variable and fix test status code * Try to reduce the number of requests in CI when asserting in tests * Fix rebase and remove safeEventually --- codex/rest/api.nim | 12 +- codex/sales.nim | 8 +- openapi.yaml | 4 +- tests/helpers.nim | 2 +- tests/integration/codexclient.nim | 23 +- tests/integration/multinodes.nim | 17 +- tests/integration/testrestapi.nim | 203 ----------- tests/integration/testrestapivalidation.nim | 368 ++++++++++++++++++++ tests/integration/testsales.nim | 26 +- tests/testIntegration.nim | 1 + 10 files changed, 418 insertions(+), 246 deletions(-) create mode 100644 tests/integration/testrestapivalidation.nim diff --git a/codex/rest/api.nim b/codex/rest/api.nim index 5d813188..243d4ed6 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -475,7 +475,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = if restAv.totalSize == 0: return RestApiResponse.error( - Http400, "Total size must be larger then zero", headers = headers + Http422, "Total size must be larger then zero", headers = headers ) if not reservations.hasAvailable(restAv.totalSize): @@ -548,17 +548,23 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = return RestApiResponse.error(Http500, error.msg) if isSome restAv.freeSize: - return RestApiResponse.error(Http400, "Updating freeSize is not allowed") + return RestApiResponse.error(Http422, "Updating freeSize is not allowed") if size =? restAv.totalSize: + if size == 0: + return RestApiResponse.error(Http422, "Total size must be larger then zero") + # we don't allow lowering the totalSize bellow currently utilized size if size < (availability.totalSize - availability.freeSize): return RestApiResponse.error( - Http400, + Http422, "New totalSize must be larger then current totalSize - freeSize, which is currently: " & $(availability.totalSize - availability.freeSize), ) + if not reservations.hasAvailable(size): + return RestApiResponse.error(Http422, "Not enough storage quota") + availability.freeSize += size - availability.totalSize availability.totalSize = size diff --git a/codex/sales.nim b/codex/sales.nim index 998a2967..a4a174c1 100644 --- a/codex/sales.nim +++ b/codex/sales.nim @@ -374,13 +374,13 @@ proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: uint64) = if err =? queue.push(slotQueueItem).errorOption: if err of SlotQueueItemExistsError: - error "Failed to push item to queue becaue it already exists", + error "Failed to push item to queue because it already exists", error = err.msgDetail elif err of QueueNotRunningError: - warn "Failed to push item to queue becaue queue is not running", + warn "Failed to push item to queue because queue is not running", error = err.msgDetail - except CatchableError as e: - warn "Failed to add slot to queue", error = e.msg + except CancelledError as e: + trace "sales.addSlotToQueue was cancelled" # We could get rid of this by adding the storage ask in the SlotFreed event, # so we would not need to call getRequest to get the collateralPerSlot. diff --git a/openapi.yaml b/openapi.yaml index ad1b166b..c2088cc5 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -704,7 +704,7 @@ paths: "400": description: Invalid data input "422": - description: Not enough node's storage quota available + description: The provided parameters did not pass validation "500": description: Error reserving availability "503": @@ -737,7 +737,7 @@ paths: "404": description: Availability not found "422": - description: Not enough node's storage quota available + description: The provided parameters did not pass validation "500": description: Error reserving availability "503": diff --git a/tests/helpers.nim b/tests/helpers.nim index 82b544f1..b48b787e 100644 --- a/tests/helpers.nim +++ b/tests/helpers.nim @@ -1,7 +1,7 @@ import helpers/multisetup import helpers/trackers import helpers/templeveldb - +import std/times import std/sequtils, chronos export multisetup, trackers, templeveldb diff --git a/tests/integration/codexclient.nim b/tests/integration/codexclient.nim index ef76b577..5d5f0cc2 100644 --- a/tests/integration/codexclient.nim +++ b/tests/integration/codexclient.nim @@ -45,7 +45,7 @@ proc request( ).get .send() -proc post( +proc post*( self: CodexClient, url: string, body: string = "", @@ -69,7 +69,7 @@ proc delete( .} = return self.request(MethodDelete, url, headers = headers) -proc patch( +proc patch*( self: CodexClient, url: string, body: string = "", @@ -290,11 +290,11 @@ proc getSalesAgent*( except CatchableError as e: return failure e.msg -proc postAvailability*( +proc postAvailabilityRaw*( client: CodexClient, totalSize, duration: uint64, minPricePerBytePerSecond, totalCollateral: UInt256, -): Future[?!Availability] {.async: (raises: [CancelledError, HttpError]).} = +): Future[HttpClientResponseRef] {.async: (raises: [CancelledError, HttpError]).} = ## Post sales availability endpoint ## let url = client.baseurl & "/sales/availability" @@ -305,7 +305,17 @@ proc postAvailability*( "minPricePerBytePerSecond": minPricePerBytePerSecond, "totalCollateral": totalCollateral, } - let response = await client.post(url, $json) + + return await client.post(url, $json) + +proc postAvailability*( + client: CodexClient, + totalSize, duration: uint64, + minPricePerBytePerSecond, totalCollateral: UInt256, +): Future[?!Availability] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.postAvailabilityRaw( + totalSize, duration, minPricePerBytePerSecond, totalCollateral + ) let body = await response.body doAssert response.status == 201, @@ -389,3 +399,6 @@ proc requestId*( client: CodexClient, id: PurchaseId ): Future[?RequestId] {.async: (raises: [CancelledError, HttpError]).} = return (await client.getPurchase(id)).option .? requestId + +proc buildUrl*(client: CodexClient, path: string): string = + return client.baseurl & path diff --git a/tests/integration/multinodes.nim b/tests/integration/multinodes.nim index 0003b216..4b183674 100644 --- a/tests/integration/multinodes.nim +++ b/tests/integration/multinodes.nim @@ -37,10 +37,12 @@ type MultiNodeSuiteError = object of CatchableError +const jsonRpcProviderUrl* = "http://127.0.0.1:8545" + proc raiseMultiNodeSuiteError(msg: string) = raise newException(MultiNodeSuiteError, msg) -proc nextFreePort(startPort: int): Future[int] {.async.} = +proc nextFreePort*(startPort: int): Future[int] {.async.} = proc client(server: StreamServer, transp: StreamTransport) {.async.} = await transp.closeWait() @@ -60,6 +62,15 @@ proc nextFreePort(startPort: int): Future[int] {.async.} = trace "port is not free", port inc port +proc sanitize(pathSegment: string): string = + var sanitized = pathSegment + for invalid in invalidFilenameChars.items: + sanitized = sanitized.replace(invalid, '_').replace(' ', '_') + sanitized + +proc getTempDirName*(starttime: string, role: Role, roleIdx: int): string = + getTempDir() / "Codex" / sanitize($starttime) / sanitize($role & "_" & $roleIdx) + template multinodesuite*(name: string, body: untyped) = asyncchecksuite name: # Following the problem described here: @@ -82,7 +93,6 @@ template multinodesuite*(name: string, body: untyped) = # .withEthProvider("ws://localhost:8545") # .some, # ... - let jsonRpcProviderUrl = "http://127.0.0.1:8545" var running {.inject, used.}: seq[RunningNode] var bootstrapNodes: seq[string] let starttime = now().format("yyyy-MM-dd'_'HH:mm:ss") @@ -148,8 +158,7 @@ template multinodesuite*(name: string, body: untyped) = raiseMultiNodeSuiteError "Cannot start node at nodeIdx " & $nodeIdx & ", not enough eth accounts." - let datadir = - getTempDir() / "Codex" / sanitize($starttime) / sanitize($role & "_" & $roleIdx) + let datadir = getTempDirName(starttime, role, roleIdx) try: if config.logFile.isSome: diff --git a/tests/integration/testrestapi.nim b/tests/integration/testrestapi.nim index e7e185b8..415658c1 100644 --- a/tests/integration/testrestapi.nim +++ b/tests/integration/testrestapi.nim @@ -55,25 +55,6 @@ twonodessuite "REST API": check: [cid1, cid2].allIt(it in list.content.mapIt(it.cid)) - test "request storage fails for datasets that are too small", twoNodesConfig: - let cid = (await client1.upload("some file contents")).get - let response = ( - await client1.requestStorageRaw( - cid, - duration = 10.uint64, - pricePerBytePerSecond = 1.u256, - proofProbability = 3.u256, - collateralPerByte = 1.u256, - expiry = 9.uint64, - ) - ) - - check: - response.status == 422 - (await response.body) == - "Dataset too small for erasure parameters, need at least " & - $(2 * DefaultBlockSize.int) & " bytes" - test "request storage succeeds for sufficiently sized datasets", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) let cid = (await client1.upload(data)).get @@ -91,176 +72,6 @@ twonodessuite "REST API": check: response.status == 200 - test "request storage fails if tolerance is zero", twoNodesConfig: - let data = await RandomChunker.example(blocks = 2) - let cid = (await client1.upload(data)).get - let duration = 100.uint64 - let pricePerBytePerSecond = 1.u256 - let proofProbability = 3.u256 - let expiry = 30.uint64 - let collateralPerByte = 1.u256 - let nodes = 3 - let tolerance = 0 - - var responseBefore = ( - await client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, - expiry, nodes.uint, tolerance.uint, - ) - ) - - check responseBefore.status == 422 - check (await responseBefore.body) == "Tolerance needs to be bigger then zero" - - test "request storage fails if duration exceeds limit", twoNodesConfig: - let data = await RandomChunker.example(blocks = 2) - let cid = (await client1.upload(data)).get - let duration = (31 * 24 * 60 * 60).uint64 - # 31 days TODO: this should not be hardcoded, but waits for https://github.com/codex-storage/nim-codex/issues/1056 - let proofProbability = 3.u256 - let expiry = 30.uint - let collateralPerByte = 1.u256 - let nodes = 3 - let tolerance = 2 - let pricePerBytePerSecond = 1.u256 - - var responseBefore = ( - await client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, - expiry, nodes.uint, tolerance.uint, - ) - ) - - check responseBefore.status == 422 - check "Duration exceeds limit of" in (await responseBefore.body) - - test "request storage fails if nodes and tolerance aren't correct", twoNodesConfig: - let data = await RandomChunker.example(blocks = 2) - let cid = (await client1.upload(data)).get - let duration = 100.uint64 - let pricePerBytePerSecond = 1.u256 - let proofProbability = 3.u256 - let expiry = 30.uint64 - let collateralPerByte = 1.u256 - let ecParams = @[(1, 1), (2, 1), (3, 2), (3, 3)] - - for ecParam in ecParams: - let (nodes, tolerance) = ecParam - - var responseBefore = ( - await client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, - expiry, nodes.uint, tolerance.uint, - ) - ) - - check responseBefore.status == 422 - check (await responseBefore.body) == - "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`" - - test "request storage fails if tolerance > nodes (underflow protection)", - twoNodesConfig: - let data = await RandomChunker.example(blocks = 2) - let cid = (await client1.upload(data)).get - let duration = 100.uint64 - let pricePerBytePerSecond = 1.u256 - let proofProbability = 3.u256 - let expiry = 30.uint64 - let collateralPerByte = 1.u256 - let ecParams = @[(0, 1), (1, 2), (2, 3)] - - for ecParam in ecParams: - let (nodes, tolerance) = ecParam - - var responseBefore = ( - await client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, - expiry, nodes.uint, tolerance.uint, - ) - ) - - check responseBefore.status == 422 - check (await responseBefore.body) == - "Invalid parameters: `tolerance` cannot be greater than `nodes`" - - test "request storage fails if expiry is zero", twoNodesConfig: - let data = await RandomChunker.example(blocks = 2) - let cid = (await client1.upload(data)).get - let duration = 100.uint64 - let pricePerBytePerSecond = 1.u256 - let proofProbability = 3.u256 - let expiry = 0.uint64 - let collateralPerByte = 1.u256 - let nodes = 3 - let tolerance = 1 - - var responseBefore = await client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, - nodes.uint, tolerance.uint, - ) - - check responseBefore.status == 422 - check (await responseBefore.body) == - "Expiry must be greater than zero and less than the request's duration" - - test "request storage fails if proof probability is zero", twoNodesConfig: - let data = await RandomChunker.example(blocks = 2) - let cid = (await client1.upload(data)).get - let duration = 100.uint64 - let pricePerBytePerSecond = 1.u256 - let proofProbability = 0.u256 - let expiry = 30.uint64 - let collateralPerByte = 1.u256 - let nodes = 3 - let tolerance = 1 - - var responseBefore = await client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, - nodes.uint, tolerance.uint, - ) - - check responseBefore.status == 422 - check (await responseBefore.body) == "Proof probability must be greater than zero" - - test "request storage fails if collareral per byte is zero", twoNodesConfig: - let data = await RandomChunker.example(blocks = 2) - let cid = (await client1.upload(data)).get - let duration = 100.uint64 - let pricePerBytePerSecond = 1.u256 - let proofProbability = 3.u256 - let expiry = 30.uint64 - let collateralPerByte = 0.u256 - let nodes = 3 - let tolerance = 1 - - var responseBefore = await client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, - nodes.uint, tolerance.uint, - ) - - check responseBefore.status == 422 - check (await responseBefore.body) == "Collateral per byte must be greater than zero" - - test "request storage fails if price per byte per second is zero", twoNodesConfig: - let data = await RandomChunker.example(blocks = 2) - let cid = (await client1.upload(data)).get - let duration = 100.uint64 - let pricePerBytePerSecond = 0.u256 - let proofProbability = 3.u256 - let expiry = 30.uint64 - let collateralPerByte = 1.u256 - let nodes = 3 - let tolerance = 1 - - var responseBefore = await client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, - nodes.uint, tolerance.uint, - ) - - check responseBefore.status == 422 - check (await responseBefore.body) == - "Price per byte per second must be greater than zero" - for ecParams in @[ (minBlocks: 2, nodes: 3, tolerance: 1), (minBlocks: 3, nodes: 5, tolerance: 2) ]: @@ -306,20 +117,6 @@ twonodessuite "REST API": check response.status == 200 check (await response.body) != "" - test "upload fails if content disposition contains bad filename", twoNodesConfig: - let headers = @[("Content-Disposition", "attachment; filename=\"exam*ple.txt\"")] - let response = await client1.uploadRaw("some file contents", headers) - - check response.status == 422 - check (await response.body) == "The filename is not valid." - - test "upload fails if content type is invalid", twoNodesConfig: - let headers = @[("Content-Type", "hello/world")] - let response = await client1.uploadRaw("some file contents", headers) - - check response.status == 422 - check (await response.body) == "The MIME type 'hello/world' is not valid." - test "node retrieve the metadata", twoNodesConfig: let headers = @[ diff --git a/tests/integration/testrestapivalidation.nim b/tests/integration/testrestapivalidation.nim new file mode 100644 index 00000000..00caefdd --- /dev/null +++ b/tests/integration/testrestapivalidation.nim @@ -0,0 +1,368 @@ +import std/httpclient +import std/times +import pkg/ethers +import pkg/codex/manifest +import pkg/codex/conf +import pkg/codex/contracts +from pkg/codex/stores/repostore/types import DefaultQuotaBytes +import ../asynctest +import ../checktest +import ../examples +import ../codex/examples +import ./codexconfig +import ./codexprocess + +from ./multinodes import Role, getTempDirName, jsonRpcProviderUrl, nextFreePort + +# This suite allows to run fast the basic rest api validation. +# It starts only one node for all the checks in order to speed up +# the execution. +asyncchecksuite "Rest API validation": + var node: CodexProcess + var config = CodexConfigs.init(nodes = 1).configs[0] + let starttime = now().format("yyyy-MM-dd'_'HH:mm:ss") + let nodexIdx = 0 + let datadir = getTempDirName(starttime, Role.Client, nodexIdx) + + config.addCliOption("--api-port", $(waitFor nextFreePort(8081))) + config.addCliOption("--data-dir", datadir) + config.addCliOption("--nat", "none") + config.addCliOption("--listen-addrs", "/ip4/127.0.0.1/tcp/0") + config.addCliOption("--disc-port", $(waitFor nextFreePort(8081))) + config.addCliOption(StartUpCmd.persistence, "--eth-provider", jsonRpcProviderUrl) + config.addCliOption(StartUpCmd.persistence, "--eth-account", $EthAddress.example) + + node = + waitFor CodexProcess.startNode(config.cliArgs, config.debugEnabled, $Role.Client) + + waitFor node.waitUntilStarted() + + let client = node.client() + + test "should return 422 when attempting delete of non-existing dataset": + let data = await RandomChunker.example(blocks = 2) + let cid = (await client.upload(data)).get + let duration = 100.uint64 + let pricePerBytePerSecond = 1.u256 + let proofProbability = 3.u256 + let expiry = 30.uint64 + let collateralPerByte = 1.u256 + let nodes = 3 + let tolerance = 0 + + var responseBefore = await client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, + nodes.uint, tolerance.uint, + ) + + check responseBefore.status == 422 + check (await responseBefore.body) == "Tolerance needs to be bigger then zero" + + test "request storage fails for datasets that are too small": + let cid = (await client.upload("some file contents")).get + let response = ( + await client.requestStorageRaw( + cid, + duration = 10.uint64, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + collateralPerByte = 1.u256, + expiry = 9.uint64, + ) + ) + + check: + response.status == 422 + (await response.body) == + "Dataset too small for erasure parameters, need at least " & + $(2 * DefaultBlockSize.int) & " bytes" + + test "request storage fails if nodes and tolerance aren't correct": + let data = await RandomChunker.example(blocks = 2) + let cid = (await client.upload(data)).get + let duration = 100.uint64 + let pricePerBytePerSecond = 1.u256 + let proofProbability = 3.u256 + let expiry = 30.uint64 + let collateralPerByte = 1.u256 + let ecParams = @[(1, 1), (2, 1), (3, 2), (3, 3)] + + for ecParam in ecParams: + let (nodes, tolerance) = ecParam + + var responseBefore = ( + await client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) + ) + + check responseBefore.status == 422 + check (await responseBefore.body) == + "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`" + + test "request storage fails if tolerance > nodes (underflow protection)": + let data = await RandomChunker.example(blocks = 2) + let cid = (await client.upload(data)).get + let duration = 100.uint64 + let pricePerBytePerSecond = 1.u256 + let proofProbability = 3.u256 + let expiry = 30.uint64 + let collateralPerByte = 1.u256 + let nodes = 3 + let tolerance = 0 + + var responseBefore = ( + await client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) + ) + + check responseBefore.status == 422 + check (await responseBefore.body) == "Tolerance needs to be bigger then zero" + + test "upload fails if content disposition contains bad filename": + let headers = @[("Content-Disposition", "attachment; filename=\"exam*ple.txt\"")] + let response = await client.uploadRaw("some file contents", headers) + + check response.status == 422 + check (await response.body) == "The filename is not valid." + + test "upload fails if content type is invalid": + let headers = @[("Content-Type", "hello/world")] + let response = await client.uploadRaw("some file contents", headers) + + check response.status == 422 + check (await response.body) == "The MIME type 'hello/world' is not valid." + + test "updating non-existing availability": + let nonExistingResponse = await client.patchAvailabilityRaw( + AvailabilityId.example, + duration = 100.uint64.some, + minPricePerBytePerSecond = 2.u256.some, + totalCollateral = 200.u256.some, + ) + check nonExistingResponse.status == 404 + + test "updating availability - freeSize is not allowed to be changed": + let availability = ( + await client.postAvailability( + totalSize = 140000.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) + ).get + let freeSizeResponse = + await client.patchAvailabilityRaw(availability.id, freeSize = 110000.uint64.some) + check freeSizeResponse.status == 422 + check "not allowed" in (await freeSizeResponse.body) + + test "creating availability above the node quota returns 422": + let response = await client.postAvailabilityRaw( + totalSize = 24000000000.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) + + check response.status == 422 + check (await response.body) == "Not enough storage quota" + + test "updating availability above the node quota returns 422": + let availability = ( + await client.postAvailability( + totalSize = 140000.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) + ).get + let response = await client.patchAvailabilityRaw( + availability.id, totalSize = 24000000000.uint64.some + ) + + check response.status == 422 + check (await response.body) == "Not enough storage quota" + + test "creating availability when total size is zero returns 422": + let response = await client.postAvailabilityRaw( + totalSize = 0.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) + + check response.status == 422 + check (await response.body) == "Total size must be larger then zero" + + test "updating availability when total size is zero returns 422": + let availability = ( + await client.postAvailability( + totalSize = 140000.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) + ).get + let response = + await client.patchAvailabilityRaw(availability.id, totalSize = 0.uint64.some) + + check response.status == 422 + check (await response.body) == "Total size must be larger then zero" + + test "creating availability when total size is negative returns 422": + let json = + %*{ + "totalSize": "-1", + "duration": "200", + "minPricePerBytePerSecond": "3", + "totalCollateral": "300", + } + let response = await client.post(client.buildUrl("/sales/availability"), $json) + + check response.status == 400 + check (await response.body) == "Parsed integer outside of valid range" + + test "updating availability when total size is negative returns 422": + let availability = ( + await client.postAvailability( + totalSize = 140000.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) + ).get + + let json = %*{"totalSize": "-1"} + let response = await client.patch( + client.buildUrl("/sales/availability/") & $availability.id, $json + ) + + check response.status == 400 + check (await response.body) == "Parsed integer outside of valid range" + + test "request storage fails if tolerance is zero": + let data = await RandomChunker.example(blocks = 2) + let cid = (await client.upload(data)).get + let duration = 100.uint64 + let pricePerBytePerSecond = 1.u256 + let proofProbability = 3.u256 + let expiry = 30.uint64 + let collateralPerByte = 1.u256 + let nodes = 3 + let tolerance = 0 + + var responseBefore = ( + await client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) + ) + + check responseBefore.status == 422 + check (await responseBefore.body) == "Tolerance needs to be bigger then zero" + + test "request storage fails if duration exceeds limit": + let data = await RandomChunker.example(blocks = 2) + let cid = (await client.upload(data)).get + let duration = (31 * 24 * 60 * 60).uint64 + # 31 days TODO: this should not be hardcoded, but waits for https://github.com/codex-storage/nim-codex/issues/1056 + let proofProbability = 3.u256 + let expiry = 30.uint + let collateralPerByte = 1.u256 + let nodes = 3 + let tolerance = 2 + let pricePerBytePerSecond = 1.u256 + + var responseBefore = ( + await client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) + ) + + check responseBefore.status == 422 + check "Duration exceeds limit of" in (await responseBefore.body) + + test "request storage fails if expiry is zero": + let data = await RandomChunker.example(blocks = 2) + let cid = (await client.upload(data)).get + let duration = 100.uint64 + let pricePerBytePerSecond = 1.u256 + let proofProbability = 3.u256 + let expiry = 0.uint64 + let collateralPerByte = 1.u256 + let nodes = 3 + let tolerance = 1 + + var responseBefore = await client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, + nodes.uint, tolerance.uint, + ) + + check responseBefore.status == 422 + check (await responseBefore.body) == + "Expiry must be greater than zero and less than the request's duration" + + test "request storage fails if proof probability is zero": + let data = await RandomChunker.example(blocks = 2) + let cid = (await client.upload(data)).get + let duration = 100.uint64 + let pricePerBytePerSecond = 1.u256 + let proofProbability = 0.u256 + let expiry = 30.uint64 + let collateralPerByte = 1.u256 + let nodes = 3 + let tolerance = 1 + + var responseBefore = await client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, + nodes.uint, tolerance.uint, + ) + + check responseBefore.status == 422 + check (await responseBefore.body) == "Proof probability must be greater than zero" + + test "request storage fails if price per byte per second is zero": + let data = await RandomChunker.example(blocks = 2) + let cid = (await client.upload(data)).get + let duration = 100.uint64 + let pricePerBytePerSecond = 0.u256 + let proofProbability = 3.u256 + let expiry = 30.uint64 + let collateralPerByte = 1.u256 + let nodes = 3 + let tolerance = 1 + + var responseBefore = await client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, + nodes.uint, tolerance.uint, + ) + + check responseBefore.status == 422 + check (await responseBefore.body) == + "Price per byte per second must be greater than zero" + + test "request storage fails if collareral per byte is zero": + let data = await RandomChunker.example(blocks = 2) + let cid = (await client.upload(data)).get + let duration = 100.uint64 + let pricePerBytePerSecond = 1.u256 + let proofProbability = 3.u256 + let expiry = 30.uint64 + let collateralPerByte = 0.u256 + let nodes = 3 + let tolerance = 1 + + var responseBefore = await client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, + nodes.uint, tolerance.uint, + ) + + check responseBefore.status == 422 + check (await responseBefore.body) == "Collateral per byte must be greater than zero" + + waitFor node.stop() + node.removeDataDir() diff --git a/tests/integration/testsales.nim b/tests/integration/testsales.nim index 9bf8a97c..5e9b26df 100644 --- a/tests/integration/testsales.nim +++ b/tests/integration/testsales.nim @@ -1,5 +1,6 @@ import std/httpclient import pkg/codex/contracts +from pkg/codex/stores/repostore/types import DefaultQuotaBytes import ./twonodes import ../codex/examples import ../contracts/time @@ -69,15 +70,6 @@ multinodesuite "Sales": ).get check availability in (await host.getAvailabilities()).get - test "updating non-existing availability", salesConfig: - let nonExistingResponse = await host.patchAvailabilityRaw( - AvailabilityId.example, - duration = 100.uint64.some, - minPricePerBytePerSecond = 2.u256.some, - totalCollateral = 200.u256.some, - ) - check nonExistingResponse.status == 404 - test "updating availability", salesConfig: let availability = ( await host.postAvailability( @@ -103,20 +95,6 @@ multinodesuite "Sales": check updatedAvailability.totalSize == 140000.uint64 check updatedAvailability.freeSize == 140000.uint64 - test "updating availability - freeSize is not allowed to be changed", salesConfig: - let availability = ( - await host.postAvailability( - totalSize = 140000.uint64, - duration = 200.uint64, - minPricePerBytePerSecond = 3.u256, - totalCollateral = 300.u256, - ) - ).get - let freeSizeResponse = - await host.patchAvailabilityRaw(availability.id, freeSize = 110000.uint64.some) - check freeSizeResponse.status == 400 - check "not allowed" in (await freeSizeResponse.body) - test "updating availability - updating totalSize", salesConfig: let availability = ( await host.postAvailability( @@ -176,7 +154,7 @@ multinodesuite "Sales": availability.id, totalSize = (utilizedSize - 1).some ) ) - check totalSizeResponse.status == 400 + check totalSizeResponse.status == 422 check "totalSize must be larger then current totalSize" in (await totalSizeResponse.body) diff --git a/tests/testIntegration.nim b/tests/testIntegration.nim index 9a2dc472..152d22dd 100644 --- a/tests/testIntegration.nim +++ b/tests/testIntegration.nim @@ -1,5 +1,6 @@ import ./integration/testcli import ./integration/testrestapi +import ./integration/testrestapivalidation import ./integration/testupdownload import ./integration/testsales import ./integration/testpurchasing From 60b6996eb0c4c2b85e49c65c7464d83222d7cdf2 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Wed, 26 Mar 2025 09:06:37 +0100 Subject: [PATCH 38/40] chore(marketplace): define raises for async pragma (#1165) * Define raises for async pragma * Update nim ethers * Replace CatchableError by MarketError --- codex/contracts/market.nim | 30 ++++++++++++++++++-------- codex/market.nim | 28 +++++++++++++++++------- tests/codex/helpers/mockmarket.nim | 34 ++++++++++++++++++++---------- vendor/nim-ethers | 2 +- 4 files changed, 65 insertions(+), 29 deletions(-) diff --git a/codex/contracts/market.nim b/codex/contracts/market.nim index 0b846099..74694285 100644 --- a/codex/contracts/market.nim +++ b/codex/contracts/market.nim @@ -76,7 +76,9 @@ proc config( return resolvedConfig -proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} = +proc approveFunds( + market: OnChainMarket, amount: UInt256 +) {.async: (raises: [CancelledError, MarketError]).} = debug "Approving tokens", amount convertEthersError("Failed to approve funds"): let tokenAddress = await market.contract.token() @@ -105,7 +107,9 @@ method getZkeyHash*( let config = await market.config() return some config.proofs.zkeyHash -method getSigner*(market: OnChainMarket): Future[Address] {.async.} = +method getSigner*( + market: OnChainMarket +): Future[Address] {.async: (raises: [CancelledError, MarketError]).} = convertEthersError("Failed to get signer address"): return await market.signer.getAddress() @@ -159,7 +163,9 @@ method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} = return slots -method requestStorage(market: OnChainMarket, request: StorageRequest) {.async.} = +method requestStorage( + market: OnChainMarket, request: StorageRequest +) {.async: (raises: [CancelledError, MarketError]).} = convertEthersError("Failed to request storage"): debug "Requesting storage" await market.approveFunds(request.totalPrice()) @@ -243,7 +249,7 @@ method fillSlot( slotIndex: uint64, proof: Groth16Proof, collateral: UInt256, -) {.async.} = +) {.async: (raises: [CancelledError, MarketError]).} = convertEthersError("Failed to fill slot"): logScope: requestId @@ -260,7 +266,9 @@ method fillSlot( parent, ) -method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} = +method freeSlot*( + market: OnChainMarket, slotId: SlotId +) {.async: (raises: [CancelledError, MarketError]).} = convertEthersError("Failed to free slot"): var freeSlot: Future[Confirmable] if rewardRecipient =? market.rewardRecipient: @@ -279,7 +287,9 @@ method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} = discard await freeSlot.confirm(1) -method withdrawFunds(market: OnChainMarket, requestId: RequestId) {.async.} = +method withdrawFunds( + market: OnChainMarket, requestId: RequestId +) {.async: (raises: [CancelledError, MarketError]).} = convertEthersError("Failed to withdraw funds"): discard await market.contract.withdrawFunds(requestId).confirm(1) @@ -306,13 +316,15 @@ method getChallenge*( let overrides = CallOverrides(blockTag: some BlockTag.pending) return await market.contract.getChallenge(id, overrides) -method submitProof*(market: OnChainMarket, id: SlotId, proof: Groth16Proof) {.async.} = +method submitProof*( + market: OnChainMarket, id: SlotId, proof: Groth16Proof +) {.async: (raises: [CancelledError, MarketError]).} = convertEthersError("Failed to submit proof"): discard await market.contract.submitProof(id, proof).confirm(1) method markProofAsMissing*( market: OnChainMarket, id: SlotId, period: Period -) {.async.} = +) {.async: (raises: [CancelledError, MarketError]).} = convertEthersError("Failed to mark proof as missing"): discard await market.contract.markProofAsMissing(id, period).confirm(1) @@ -331,7 +343,7 @@ method canProofBeMarkedAsMissing*( method reserveSlot*( market: OnChainMarket, requestId: RequestId, slotIndex: uint64 -) {.async.} = +) {.async: (raises: [CancelledError, MarketError]).} = convertEthersError("Failed to reserve slot"): try: discard await market.contract diff --git a/codex/market.nim b/codex/market.nim index dd8e14ba..71cad9a9 100644 --- a/codex/market.nim +++ b/codex/market.nim @@ -74,7 +74,9 @@ method getZkeyHash*( ): Future[?string] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") -method getSigner*(market: Market): Future[Address] {.base, async.} = +method getSigner*( + market: Market +): Future[Address] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") method periodicity*( @@ -108,7 +110,9 @@ proc inDowntime*(market: Market, slotId: SlotId): Future[bool] {.async.} = let pntr = await market.getPointer(slotId) return pntr < downtime -method requestStorage*(market: Market, request: StorageRequest) {.base, async.} = +method requestStorage*( + market: Market, request: StorageRequest +) {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} = @@ -161,13 +165,17 @@ method fillSlot*( slotIndex: uint64, proof: Groth16Proof, collateral: UInt256, -) {.base, async.} = +) {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") -method freeSlot*(market: Market, slotId: SlotId) {.base, async.} = +method freeSlot*( + market: Market, slotId: SlotId +) {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") -method withdrawFunds*(market: Market, requestId: RequestId) {.base, async.} = +method withdrawFunds*( + market: Market, requestId: RequestId +) {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") method subscribeRequests*( @@ -186,10 +194,14 @@ method getChallenge*( ): Future[ProofChallenge] {.base, async.} = raiseAssert("not implemented") -method submitProof*(market: Market, id: SlotId, proof: Groth16Proof) {.base, async.} = +method submitProof*( + market: Market, id: SlotId, proof: Groth16Proof +) {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") -method markProofAsMissing*(market: Market, id: SlotId, period: Period) {.base, async.} = +method markProofAsMissing*( + market: Market, id: SlotId, period: Period +) {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") method canProofBeMarkedAsMissing*( @@ -199,7 +211,7 @@ method canProofBeMarkedAsMissing*( method reserveSlot*( market: Market, requestId: RequestId, slotIndex: uint64 -) {.base, async.} = +) {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") method canReserveSlot*( diff --git a/tests/codex/helpers/mockmarket.nim b/tests/codex/helpers/mockmarket.nim index edf8a62d..03e76762 100644 --- a/tests/codex/helpers/mockmarket.nim +++ b/tests/codex/helpers/mockmarket.nim @@ -47,7 +47,7 @@ type config*: MarketplaceConfig canReserveSlot*: bool errorOnReserveSlot*: ?(ref MarketError) - errorOnFillSlot*: ?(ref CatchableError) + errorOnFillSlot*: ?(ref MarketError) clock: ?Clock Fulfillment* = object @@ -144,7 +144,9 @@ method loadConfig*( ): Future[?!void] {.async: (raises: [CancelledError]).} = discard -method getSigner*(market: MockMarket): Future[Address] {.async.} = +method getSigner*( + market: MockMarket +): Future[Address] {.async: (raises: [CancelledError, MarketError]).} = return market.signer method periodicity*( @@ -173,7 +175,9 @@ method repairRewardPercentage*( method getPointer*(market: MockMarket, slotId: SlotId): Future[uint8] {.async.} = return market.proofPointer -method requestStorage*(market: MockMarket, request: StorageRequest) {.async.} = +method requestStorage*( + market: MockMarket, request: StorageRequest +) {.async: (raises: [CancelledError, MarketError]).} = market.requested.add(request) var subscriptions = market.subscriptions.onRequest for subscription in subscriptions: @@ -311,10 +315,12 @@ method fillSlot*( slotIndex: uint64, proof: Groth16Proof, collateral: UInt256, -) {.async.} = +) {.async: (raises: [CancelledError, MarketError]).} = market.fillSlot(requestId, slotIndex, proof, market.signer, collateral) -method freeSlot*(market: MockMarket, slotId: SlotId) {.async.} = +method freeSlot*( + market: MockMarket, slotId: SlotId +) {.async: (raises: [CancelledError, MarketError]).} = market.freed.add(slotId) for s in market.filled: if slotId(s.requestId, s.slotIndex) == slotId: @@ -322,7 +328,9 @@ method freeSlot*(market: MockMarket, slotId: SlotId) {.async.} = break market.slotState[slotId] = SlotState.Free -method withdrawFunds*(market: MockMarket, requestId: RequestId) {.async.} = +method withdrawFunds*( + market: MockMarket, requestId: RequestId +) {.async: (raises: [CancelledError, MarketError]).} = market.withdrawn.add(requestId) if state =? market.requestState .? [requestId] and state == RequestState.Cancelled: @@ -352,12 +360,16 @@ method getChallenge*(mock: MockMarket, id: SlotId): Future[ProofChallenge] {.asy proc setProofEnd*(mock: MockMarket, id: SlotId, proofEnd: UInt256) = mock.proofEnds[id] = proofEnd -method submitProof*(mock: MockMarket, id: SlotId, proof: Groth16Proof) {.async.} = +method submitProof*( + mock: MockMarket, id: SlotId, proof: Groth16Proof +) {.async: (raises: [CancelledError, MarketError]).} = mock.submitted.add(proof) for subscription in mock.subscriptions.onProofSubmitted: subscription.callback(id) -method markProofAsMissing*(market: MockMarket, id: SlotId, period: Period) {.async.} = +method markProofAsMissing*( + market: MockMarket, id: SlotId, period: Period +) {.async: (raises: [CancelledError, MarketError]).} = market.markedAsMissingProofs.add(id) proc setCanProofBeMarkedAsMissing*(mock: MockMarket, id: SlotId, required: bool) = @@ -373,7 +385,7 @@ method canProofBeMarkedAsMissing*( method reserveSlot*( market: MockMarket, requestId: RequestId, slotIndex: uint64 -) {.async.} = +) {.async: (raises: [CancelledError, MarketError]).} = if error =? market.errorOnReserveSlot: raise error @@ -392,10 +404,10 @@ func setErrorOnReserveSlot*(market: MockMarket, error: ref MarketError) = else: some error -func setErrorOnFillSlot*(market: MockMarket, error: ref CatchableError) = +func setErrorOnFillSlot*(market: MockMarket, error: ref MarketError) = market.errorOnFillSlot = if error.isNil: - none (ref CatchableError) + none (ref MarketError) else: some error diff --git a/vendor/nim-ethers b/vendor/nim-ethers index b505ef1a..5d07b5db 160000 --- a/vendor/nim-ethers +++ b/vendor/nim-ethers @@ -1 +1 @@ -Subproject commit b505ef1ab889be8161bb1efb4908e3dfde5bc1c9 +Subproject commit 5d07b5dbcf584b020c732e84cc8b7229ab3e1083 From 7deeb7d2b34862889e5bc30e31e44709ca60ff9f Mon Sep 17 00:00:00 2001 From: Arnaud Date: Wed, 26 Mar 2025 12:45:22 +0100 Subject: [PATCH 39/40] feat(marketplace): persistent availabilities (#1099) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add availability enabled parameter * Return bytes to availability when finished * Add until parameter * Remove debug message * Clean up and fix tests * Update documentations and cleanup * Avoid swallowing CancelledError * Move until validation to reservations module * Call onAvailabilityAdded callabck when the availability is enabled in sales * Remove until validation in restapi when creating an availability * Add openapi documentation * Use results instead of stew/results (#1112) * feat: request duration limit (#1057) * feat: request duration limit * Fix tests and duration type * Add custom error * Remove merge issue * Update codex contracts eth * Update market config and fix test * Fix SlotReservationsConfig syntax * Update dependencies * test: remove doubled test * chore: update contracts repo --------- Co-authored-by: Arnaud * fix(statemachine): do not raise from state.run (#1115) * fix(statemachine): do not raise from state.run * fix rebase * fix exception handling in SaleProvingSimulated.prove - re-raise CancelledError - don't return State on CatchableError - expect the Proofs_InvalidProof custom error instead of checking a string * asyncSpawn salesagent.onCancelled This was swallowing a KeyError in one of the tests (fixed in the previous commit) * remove error handling states in asyncstatemachine * revert unneeded changes * formatting * PR feedback, logging updates * chore(integration): simplify block expiration integration test (#1100) * chore(integration): simplify block expiration integration test * clean up * fix after rebase * perf: contract storage optimizations (#1094) * perf: contract storage optimizations * Apply optimization changes * Apply optimizing parameters sizing * Update codex-contracts-eth * bump latest changes in contracts branch * Change requestDurationLimit to uint64 * fix tests * fix tests --------- Co-authored-by: Arnaud Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com> * bump contracts to master (#1122) * Add availability enabled parameter * Return bytes to availability when finished * Add until parameter * Clean up and fix tests * Move until validation to reservations module * Apply suggestion changes: return the reservation module error * Apply suggestion changes for until dates * Apply suggestion changes: reorganize tests * Fix indent * Remove test related to timing issue * Add raises errors to async pragram and remove useless try except * Update open api documentation * Fix wording * Remove the httpClient restart statements * Use market.getRequestEnd to set validUntil * Remove returnBytes * Use clock.now in testing * Move the api validation file to the right file --------- Co-authored-by: Adam Uhlíř Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com> --- codex/rest/api.nim | 29 ++- codex/rest/json.nim | 2 + codex/sales.nim | 20 +- codex/sales/reservations.nim | 195 ++++++++++++------ codex/sales/salesagent.nim | 2 +- codex/sales/states/cancelled.nim | 4 +- codex/sales/states/errored.nim | 2 +- codex/sales/states/filling.nim | 2 +- codex/sales/states/finished.nim | 3 + codex/sales/states/ignored.nim | 5 +- codex/sales/states/preparing.nim | 8 +- codex/sales/states/slotreserving.nim | 4 +- codex/stores/repostore/operations.nim | 2 +- codex/stores/repostore/store.nim | 8 +- openapi.yaml | 13 +- tests/codex/examples.nim | 2 + tests/codex/helpers/mockreservations.nim | 3 + tests/codex/sales/states/testcancelled.nim | 7 +- tests/codex/sales/states/testerrored.nim | 7 +- tests/codex/sales/states/testfilling.nim | 1 - tests/codex/sales/states/testfinished.nim | 11 +- tests/codex/sales/states/testignored.nim | 7 +- tests/codex/sales/states/testpreparing.nim | 22 +- .../codex/sales/states/testslotreserving.nim | 1 - tests/codex/sales/testreservations.nim | 161 +++++++++++++-- tests/codex/sales/testsales.nim | 64 +++++- tests/integration/codexclient.nim | 29 ++- tests/integration/testmarketplace.nim | 2 + tests/integration/testproofs.nim | 4 +- tests/integration/testrestapi.nim | 1 + tests/integration/testrestapivalidation.nim | 16 ++ tests/integration/testsales.nim | 97 +++++++-- vendor/nim-datastore | 2 +- 33 files changed, 564 insertions(+), 172 deletions(-) diff --git a/codex/rest/api.nim b/codex/rest/api.nim index 243d4ed6..ee493e03 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -484,10 +484,19 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = without availability =? ( await reservations.createAvailability( - restAv.totalSize, restAv.duration, restAv.minPricePerBytePerSecond, + restAv.totalSize, + restAv.duration, + restAv.minPricePerBytePerSecond, restAv.totalCollateral, + enabled = restAv.enabled |? true, + until = restAv.until |? 0, ) ), error: + if error of CancelledError: + raise error + if error of UntilOutOfBoundsError: + return RestApiResponse.error(Http422, error.msg) + return RestApiResponse.error(Http500, error.msg, headers = headers) return RestApiResponse.response( @@ -524,6 +533,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = ## tokens) to be matched against the request's pricePerBytePerSecond ## totalCollateral - total collateral (in amount of ## tokens) that can be distributed among matching requests + try: without contracts =? node.contracts.host: return RestApiResponse.error(Http503, "Persistence is not enabled") @@ -577,10 +587,21 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = if totalCollateral =? restAv.totalCollateral: availability.totalCollateral = totalCollateral - if err =? (await reservations.update(availability)).errorOption: - return RestApiResponse.error(Http500, err.msg) + if until =? restAv.until: + availability.until = until - return RestApiResponse.response(Http200) + if enabled =? restAv.enabled: + availability.enabled = enabled + + if err =? (await reservations.update(availability)).errorOption: + if err of CancelledError: + raise err + if err of UntilOutOfBoundsError: + return RestApiResponse.error(Http422, err.msg) + else: + return RestApiResponse.error(Http500, err.msg) + + return RestApiResponse.response(Http204) except CatchableError as exc: trace "Excepting processing request", exc = exc.msg return RestApiResponse.error(Http500) diff --git a/codex/rest/json.nim b/codex/rest/json.nim index 50c8b514..1b9459c1 100644 --- a/codex/rest/json.nim +++ b/codex/rest/json.nim @@ -33,6 +33,8 @@ type minPricePerBytePerSecond* {.serialize.}: UInt256 totalCollateral* {.serialize.}: UInt256 freeSize* {.serialize.}: ?uint64 + enabled* {.serialize.}: ?bool + until* {.serialize.}: ?SecondsSince1970 RestSalesAgent* = object state* {.serialize.}: string diff --git a/codex/sales.nim b/codex/sales.nim index a4a174c1..37e2c06a 100644 --- a/codex/sales.nim +++ b/codex/sales.nim @@ -113,7 +113,6 @@ proc remove(sales: Sales, agent: SalesAgent) {.async.} = proc cleanUp( sales: Sales, agent: SalesAgent, - returnBytes: bool, reprocessSlot: bool, returnedCollateral: ?UInt256, processing: Future[void], @@ -132,7 +131,7 @@ proc cleanUp( # if reservation for the SalesAgent was not created, then it means # that the cleanUp was called before the sales process really started, so # there are not really any bytes to be returned - if returnBytes and request =? data.request and reservation =? data.reservation: + if request =? data.request and reservation =? data.reservation: if returnErr =? ( await sales.context.reservations.returnBytesToAvailability( reservation.availabilityId, reservation.id, request.ask.slotSize @@ -203,9 +202,9 @@ proc processSlot(sales: Sales, item: SlotQueueItem, done: Future[void]) = newSalesAgent(sales.context, item.requestId, item.slotIndex, none StorageRequest) agent.onCleanUp = proc( - returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none + reprocessSlot = false, returnedCollateral = UInt256.none ) {.async.} = - await sales.cleanUp(agent, returnBytes, reprocessSlot, returnedCollateral, done) + await sales.cleanUp(agent, reprocessSlot, returnedCollateral, done) agent.onFilled = some proc(request: StorageRequest, slotIndex: uint64) = sales.filled(request, slotIndex, done) @@ -271,12 +270,12 @@ proc load*(sales: Sales) {.async.} = newSalesAgent(sales.context, slot.request.id, slot.slotIndex, some slot.request) agent.onCleanUp = proc( - returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none + reprocessSlot = false, returnedCollateral = UInt256.none ) {.async.} = # since workers are not being dispatched, this future has not been created # by a worker. Create a dummy one here so we can call sales.cleanUp let done: Future[void] = nil - await sales.cleanUp(agent, returnBytes, reprocessSlot, returnedCollateral, done) + await sales.cleanUp(agent, reprocessSlot, returnedCollateral, done) # There is no need to assign agent.onFilled as slots loaded from `mySlots` # are inherently already filled and so assigning agent.onFilled would be @@ -285,7 +284,9 @@ proc load*(sales: Sales) {.async.} = agent.start(SaleUnknown()) sales.agents.add agent -proc OnAvailabilitySaved(sales: Sales, availability: Availability) {.async.} = +proc OnAvailabilitySaved( + sales: Sales, availability: Availability +) {.async: (raises: []).} = ## When availabilities are modified or added, the queue should be unpaused if ## it was paused and any slots in the queue should have their `seen` flag ## cleared. @@ -533,8 +534,9 @@ proc startSlotQueue(sales: Sales) = slotQueue.start() - proc OnAvailabilitySaved(availability: Availability) {.async.} = - await sales.OnAvailabilitySaved(availability) + proc OnAvailabilitySaved(availability: Availability) {.async: (raises: []).} = + if availability.enabled: + await sales.OnAvailabilitySaved(availability) reservations.OnAvailabilitySaved = OnAvailabilitySaved diff --git a/codex/sales/reservations.nim b/codex/sales/reservations.nim index 25ee2b99..b717cc1c 100644 --- a/codex/sales/reservations.nim +++ b/codex/sales/reservations.nim @@ -35,6 +35,7 @@ import std/sequtils import std/sugar import std/typetraits import std/sequtils +import std/times import pkg/chronos import pkg/datastore import pkg/nimcrypto @@ -70,6 +71,12 @@ type minPricePerBytePerSecond* {.serialize.}: UInt256 totalCollateral {.serialize.}: UInt256 totalRemainingCollateral* {.serialize.}: UInt256 + # If set to false, the availability will not accept new slots. + # If enabled, it will not impact any existing slots that are already being hosted. + enabled* {.serialize.}: bool + # Specifies the latest timestamp after which the availability will no longer host any slots. + # If set to 0, there will be no restrictions. + until* {.serialize.}: SecondsSince1970 Reservation* = ref object id* {.serialize.}: ReservationId @@ -77,6 +84,7 @@ type size* {.serialize.}: uint64 requestId* {.serialize.}: RequestId slotIndex* {.serialize.}: uint64 + validUntil* {.serialize.}: SecondsSince1970 Reservations* = ref object of RootObj availabilityLock: AsyncLock @@ -84,10 +92,14 @@ type repo: RepoStore OnAvailabilitySaved: ?OnAvailabilitySaved - GetNext* = proc(): Future[?seq[byte]] {.upraises: [], gcsafe, closure.} - IterDispose* = proc(): Future[?!void] {.gcsafe, closure.} - OnAvailabilitySaved* = - proc(availability: Availability): Future[void] {.upraises: [], gcsafe.} + GetNext* = proc(): Future[?seq[byte]] {. + upraises: [], gcsafe, async: (raises: [CancelledError]), closure + .} + IterDispose* = + proc(): Future[?!void] {.gcsafe, async: (raises: [CancelledError]), closure.} + OnAvailabilitySaved* = proc(availability: Availability): Future[void] {. + upraises: [], gcsafe, async: (raises: []) + .} StorableIter* = ref object finished*: bool next*: GetNext @@ -102,13 +114,20 @@ type SerializationError* = object of ReservationsError UpdateFailedError* = object of ReservationsError BytesOutOfBoundsError* = object of ReservationsError + UntilOutOfBoundsError* = object of ReservationsError const SalesKey = (CodexMetaKey / "sales").tryGet # TODO: move to sales module ReservationsKey = (SalesKey / "reservations").tryGet proc hash*(x: AvailabilityId): Hash {.borrow.} -proc all*(self: Reservations, T: type SomeStorableObject): Future[?!seq[T]] {.async.} +proc all*( + self: Reservations, T: type SomeStorableObject +): Future[?!seq[T]] {.async: (raises: [CancelledError]).} + +proc all*( + self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId +): Future[?!seq[T]] {.async: (raises: [CancelledError]).} template withLock(lock, body) = try: @@ -128,6 +147,8 @@ proc init*( duration: uint64, minPricePerBytePerSecond: UInt256, totalCollateral: UInt256, + enabled: bool, + until: SecondsSince1970, ): Availability = var id: array[32, byte] doAssert randomBytes(id) == 32 @@ -139,6 +160,8 @@ proc init*( minPricePerBytePerSecond: minPricePerBytePerSecond, totalCollateral: totalCollateral, totalRemainingCollateral: totalCollateral, + enabled: enabled, + until: until, ) func totalCollateral*(self: Availability): UInt256 {.inline.} = @@ -154,6 +177,7 @@ proc init*( size: uint64, requestId: RequestId, slotIndex: uint64, + validUntil: SecondsSince1970, ): Reservation = var id: array[32, byte] doAssert randomBytes(id) == 32 @@ -163,6 +187,7 @@ proc init*( size: size, requestId: requestId, slotIndex: slotIndex, + validUntil: validUntil, ) func toArray(id: SomeStorableId): array[32, byte] = @@ -217,11 +242,19 @@ func available*(self: Reservations): uint = func hasAvailable*(self: Reservations, bytes: uint): bool = self.repo.available(bytes.NBytes) -proc exists*(self: Reservations, key: Key): Future[bool] {.async.} = +proc exists*( + self: Reservations, key: Key +): Future[bool] {.async: (raises: [CancelledError]).} = let exists = await self.repo.metaDs.ds.contains(key) return exists -proc getImpl(self: Reservations, key: Key): Future[?!seq[byte]] {.async.} = +iterator items(self: StorableIter): Future[?seq[byte]] = + while not self.finished: + yield self.next() + +proc getImpl( + self: Reservations, key: Key +): Future[?!seq[byte]] {.async: (raises: [CancelledError]).} = if not await self.exists(key): let err = newException(NotExistsError, "object with key " & $key & " does not exist") @@ -234,7 +267,7 @@ proc getImpl(self: Reservations, key: Key): Future[?!seq[byte]] {.async.} = proc get*( self: Reservations, key: Key, T: type SomeStorableObject -): Future[?!T] {.async.} = +): Future[?!T] {.async: (raises: [CancelledError]).} = without serialized =? await self.getImpl(key), error: return failure(error) @@ -243,7 +276,9 @@ proc get*( return success obj -proc updateImpl(self: Reservations, obj: SomeStorableObject): Future[?!void] {.async.} = +proc updateImpl( + self: Reservations, obj: SomeStorableObject +): Future[?!void] {.async: (raises: [CancelledError]).} = trace "updating " & $(obj.type), id = obj.id without key =? obj.key, error: @@ -256,10 +291,15 @@ proc updateImpl(self: Reservations, obj: SomeStorableObject): Future[?!void] {.a proc updateAvailability( self: Reservations, obj: Availability -): Future[?!void] {.async.} = +): Future[?!void] {.async: (raises: [CancelledError]).} = logScope: availabilityId = obj.id + if obj.until < 0: + let error = + newException(UntilOutOfBoundsError, "Cannot set until to a negative value") + return failure(error) + without key =? obj.key, error: return failure(error) @@ -269,21 +309,25 @@ proc updateAvailability( let res = await self.updateImpl(obj) # inform subscribers that Availability has been added if OnAvailabilitySaved =? self.OnAvailabilitySaved: - # when chronos v4 is implemented, and OnAvailabilitySaved is annotated - # with async:(raises:[]), we can remove this try/catch as we know, with - # certainty, that nothing will be raised - try: - await OnAvailabilitySaved(obj) - except CancelledError as e: - raise e - except CatchableError as e: - # we don't have any insight into types of exceptions that - # `OnAvailabilitySaved` can raise because it is caller-defined - warn "Unknown error during 'OnAvailabilitySaved' callback", error = e.msg + await OnAvailabilitySaved(obj) return res else: return failure(err) + if obj.until > 0: + without allReservations =? await self.all(Reservation, obj.id), error: + error.msg = "Error updating reservation: " & error.msg + return failure(error) + + let requestEnds = allReservations.mapIt(it.validUntil) + + if requestEnds.len > 0 and requestEnds.max > obj.until: + let error = newException( + UntilOutOfBoundsError, + "Until parameter must be greater or equal to the longest currently hosted slot", + ) + return failure(error) + # Sizing of the availability changed, we need to adjust the repo reservation accordingly if oldAvailability.totalSize != obj.totalSize: trace "totalSize changed, updating repo reservation" @@ -306,26 +350,23 @@ proc updateAvailability( # inform subscribers that Availability has been modified (with increased # size) if OnAvailabilitySaved =? self.OnAvailabilitySaved: - # when chronos v4 is implemented, and OnAvailabilitySaved is annotated - # with async:(raises:[]), we can remove this try/catch as we know, with - # certainty, that nothing will be raised - try: - await OnAvailabilitySaved(obj) - except CancelledError as e: - raise e - except CatchableError as e: - # we don't have any insight into types of exceptions that - # `OnAvailabilitySaved` can raise because it is caller-defined - warn "Unknown error during 'OnAvailabilitySaved' callback", error = e.msg - + await OnAvailabilitySaved(obj) return res -proc update*(self: Reservations, obj: Reservation): Future[?!void] {.async.} = +proc update*( + self: Reservations, obj: Reservation +): Future[?!void] {.async: (raises: [CancelledError]).} = return await self.updateImpl(obj) -proc update*(self: Reservations, obj: Availability): Future[?!void] {.async.} = - withLock(self.availabilityLock): - return await self.updateAvailability(obj) +proc update*( + self: Reservations, obj: Availability +): Future[?!void] {.async: (raises: [CancelledError]).} = + try: + withLock(self.availabilityLock): + return await self.updateAvailability(obj) + except AsyncLockError as e: + error "Lock error when trying to update the availability", err = e.msg + return failure(e) proc delete(self: Reservations, key: Key): Future[?!void] {.async.} = trace "deleting object", key @@ -391,12 +432,20 @@ proc createAvailability*( duration: uint64, minPricePerBytePerSecond: UInt256, totalCollateral: UInt256, + enabled: bool, + until: SecondsSince1970, ): Future[?!Availability] {.async.} = trace "creating availability", - size, duration, minPricePerBytePerSecond, totalCollateral + size, duration, minPricePerBytePerSecond, totalCollateral, enabled, until - let availability = - Availability.init(size, size, duration, minPricePerBytePerSecond, totalCollateral) + if until < 0: + let error = + newException(UntilOutOfBoundsError, "Cannot set until to a negative value") + return failure(error) + + let availability = Availability.init( + size, size, duration, minPricePerBytePerSecond, totalCollateral, enabled, until + ) let bytes = availability.freeSize if reserveErr =? (await self.repo.reserve(bytes.NBytes)).errorOption: @@ -420,6 +469,7 @@ method createReservation*( requestId: RequestId, slotIndex: uint64, collateralPerByte: UInt256, + validUntil: SecondsSince1970, ): Future[?!Reservation] {.async, base.} = withLock(self.availabilityLock): without availabilityKey =? availabilityId.key, error: @@ -436,9 +486,11 @@ method createReservation*( ) return failure(error) - trace "Creating reservation", availabilityId, slotSize, requestId, slotIndex + trace "Creating reservation", + availabilityId, slotSize, requestId, slotIndex, validUntil = validUntil - let reservation = Reservation.init(availabilityId, slotSize, requestId, slotIndex) + let reservation = + Reservation.init(availabilityId, slotSize, requestId, slotIndex, validUntil) if createResErr =? (await self.update(reservation)).errorOption: return failure(createResErr) @@ -448,7 +500,7 @@ method createReservation*( availability.freeSize -= slotSize # adjust the remaining totalRemainingCollateral - availability.totalRemainingCollateral -= slotSize.stuint(256) * collateralPerByte + availability.totalRemainingCollateral -= slotSize.u256 * collateralPerByte # update availability with reduced size trace "Updating availability with reduced size" @@ -527,7 +579,7 @@ proc release*( reservationId: ReservationId, availabilityId: AvailabilityId, bytes: uint, -): Future[?!void] {.async.} = +): Future[?!void] {.async: (raises: [CancelledError]).} = logScope: topics = "release" bytes @@ -565,13 +617,9 @@ proc release*( return success() -iterator items(self: StorableIter): Future[?seq[byte]] = - while not self.finished: - yield self.next() - proc storables( self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey -): Future[?!StorableIter] {.async.} = +): Future[?!StorableIter] {.async: (raises: [CancelledError]).} = var iter = StorableIter() let query = Query.init(queryKey) when T is Availability: @@ -589,7 +637,7 @@ proc storables( return failure(error) # /sales/reservations - proc next(): Future[?seq[byte]] {.async.} = + proc next(): Future[?seq[byte]] {.async: (raises: [CancelledError]).} = await idleAsync() iter.finished = results.finished if not results.finished and res =? (await results.next()) and res.data.len > 0 and @@ -598,7 +646,7 @@ proc storables( return none seq[byte] - proc dispose(): Future[?!void] {.async.} = + proc dispose(): Future[?!void] {.async: (raises: [CancelledError]).} = return await results.dispose() iter.next = next @@ -607,32 +655,40 @@ proc storables( proc allImpl( self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey -): Future[?!seq[T]] {.async.} = +): Future[?!seq[T]] {.async: (raises: [CancelledError]).} = var ret: seq[T] = @[] without storables =? (await self.storables(T, queryKey)), error: return failure(error) for storable in storables.items: - without bytes =? (await storable): - continue + try: + without bytes =? (await storable): + continue - without obj =? T.fromJson(bytes), error: - error "json deserialization error", - json = string.fromBytes(bytes), error = error.msg - continue + without obj =? T.fromJson(bytes), error: + error "json deserialization error", + json = string.fromBytes(bytes), error = error.msg + continue - ret.add obj + ret.add obj + except CancelledError as err: + raise err + except CatchableError as err: + error "Error when retrieving storable", error = err.msg + continue return success(ret) -proc all*(self: Reservations, T: type SomeStorableObject): Future[?!seq[T]] {.async.} = +proc all*( + self: Reservations, T: type SomeStorableObject +): Future[?!seq[T]] {.async: (raises: [CancelledError]).} = return await self.allImpl(T) proc all*( self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId -): Future[?!seq[T]] {.async.} = - without key =? (ReservationsKey / $availabilityId): +): Future[?!seq[T]] {.async: (raises: [CancelledError]).} = + without key =? key(availabilityId): return failure("no key") return await self.allImpl(T, key) @@ -641,6 +697,7 @@ proc findAvailability*( self: Reservations, size, duration: uint64, pricePerBytePerSecond, collateralPerByte: UInt256, + validUntil: SecondsSince1970, ): Future[?Availability] {.async.} = without storables =? (await self.storables(Availability)), e: error "failed to get all storables", error = e.msg @@ -648,11 +705,14 @@ proc findAvailability*( for item in storables.items: if bytes =? (await item) and availability =? Availability.fromJson(bytes): - if size <= availability.freeSize and duration <= availability.duration and + if availability.enabled and size <= availability.freeSize and + duration <= availability.duration and collateralPerByte <= availability.maxCollateralPerByte and - pricePerBytePerSecond >= availability.minPricePerBytePerSecond: + pricePerBytePerSecond >= availability.minPricePerBytePerSecond and + (availability.until == 0 or availability.until >= validUntil): trace "availability matched", id = availability.id, + enabled = availability.enabled, size, availFreeSize = availability.freeSize, duration, @@ -660,7 +720,8 @@ proc findAvailability*( pricePerBytePerSecond, availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond, collateralPerByte, - availMaxCollateralPerByte = availability.maxCollateralPerByte + availMaxCollateralPerByte = availability.maxCollateralPerByte, + until = availability.until # TODO: As soon as we're on ARC-ORC, we can use destructors # to automatically dispose our iterators when they fall out of scope. @@ -672,6 +733,7 @@ proc findAvailability*( trace "availability did not match", id = availability.id, + enabled = availability.enabled, size, availFreeSize = availability.freeSize, duration, @@ -679,4 +741,5 @@ proc findAvailability*( pricePerBytePerSecond, availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond, collateralPerByte, - availMaxCollateralPerByte = availability.maxCollateralPerByte + availMaxCollateralPerByte = availability.maxCollateralPerByte, + until = availability.until diff --git a/codex/sales/salesagent.nim b/codex/sales/salesagent.nim index f0abf3ee..61f3a9d3 100644 --- a/codex/sales/salesagent.nim +++ b/codex/sales/salesagent.nim @@ -27,7 +27,7 @@ type onFilled*: ?OnFilled OnCleanUp* = proc( - returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none + reprocessSlot = false, returnedCollateral = UInt256.none ): Future[void] {.gcsafe, upraises: [].} OnFilled* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, upraises: [].} diff --git a/codex/sales/states/cancelled.nim b/codex/sales/states/cancelled.nim index 3bdf8c2f..2c240e15 100644 --- a/codex/sales/states/cancelled.nim +++ b/codex/sales/states/cancelled.nim @@ -34,9 +34,7 @@ method run*( if onCleanUp =? agent.onCleanUp: await onCleanUp( - returnBytes = true, - reprocessSlot = false, - returnedCollateral = some currentCollateral, + reprocessSlot = false, returnedCollateral = some currentCollateral ) warn "Sale cancelled due to timeout", diff --git a/codex/sales/states/errored.nim b/codex/sales/states/errored.nim index 77bf08d3..95848fd3 100644 --- a/codex/sales/states/errored.nim +++ b/codex/sales/states/errored.nim @@ -34,7 +34,7 @@ method run*( onClear(request, data.slotIndex) if onCleanUp =? agent.onCleanUp: - await onCleanUp(returnBytes = true, reprocessSlot = state.reprocessSlot) + await onCleanUp(reprocessSlot = state.reprocessSlot) except CancelledError as e: trace "SaleErrored.run was cancelled", error = e.msgDetail except CatchableError as e: diff --git a/codex/sales/states/filling.nim b/codex/sales/states/filling.nim index 13644223..1b76150a 100644 --- a/codex/sales/states/filling.nim +++ b/codex/sales/states/filling.nim @@ -50,7 +50,7 @@ method run*( await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral) except SlotStateMismatchError as e: debug "Slot is already filled, ignoring slot" - return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) + return some State(SaleIgnored(reprocessSlot: false)) except MarketError as e: return some State(SaleErrored(error: e)) # other CatchableErrors are handled "automatically" by the SaleState diff --git a/codex/sales/states/finished.nim b/codex/sales/states/finished.nim index 2aba69eb..16e66d27 100644 --- a/codex/sales/states/finished.nim +++ b/codex/sales/states/finished.nim @@ -36,6 +36,9 @@ method run*( requestId = data.requestId, slotIndex = data.slotIndex try: + if onClear =? agent.context.onClear: + onClear(request, data.slotIndex) + if onCleanUp =? agent.onCleanUp: await onCleanUp(returnedCollateral = state.returnedCollateral) except CancelledError as e: diff --git a/codex/sales/states/ignored.nim b/codex/sales/states/ignored.nim index b07a201c..7f2ae5b1 100644 --- a/codex/sales/states/ignored.nim +++ b/codex/sales/states/ignored.nim @@ -14,7 +14,6 @@ logScope: type SaleIgnored* = ref object of SaleState reprocessSlot*: bool # readd slot to queue with `seen` flag - returnBytes*: bool # return unreleased bytes from Reservation to Availability method `$`*(state: SaleIgnored): string = "SaleIgnored" @@ -26,9 +25,7 @@ method run*( try: if onCleanUp =? agent.onCleanUp: - await onCleanUp( - reprocessSlot = state.reprocessSlot, returnBytes = state.returnBytes - ) + await onCleanUp(reprocessSlot = state.reprocessSlot) except CancelledError as e: trace "SaleIgnored.run was cancelled", error = e.msgDetail except CatchableError as e: diff --git a/codex/sales/states/preparing.nim b/codex/sales/states/preparing.nim index 443aee0b..a3aee4c9 100644 --- a/codex/sales/states/preparing.nim +++ b/codex/sales/states/preparing.nim @@ -56,7 +56,7 @@ method run*( let slotId = slotId(data.requestId, data.slotIndex) let state = await market.slotState(slotId) if state != SlotState.Free and state != SlotState.Repair: - return some State(SaleIgnored(reprocessSlot: false, returnBytes: false)) + return some State(SaleIgnored(reprocessSlot: false)) # TODO: Once implemented, check to ensure the host is allowed to fill the slot, # due to the [sliding window mechanism](https://github.com/codex-storage/codex-research/blob/master/design/marketplace.md#dispersal) @@ -68,10 +68,12 @@ method run*( pricePerBytePerSecond = request.ask.pricePerBytePerSecond collateralPerByte = request.ask.collateralPerByte + let requestEnd = await market.getRequestEnd(data.requestId) + without availability =? await reservations.findAvailability( request.ask.slotSize, request.ask.duration, request.ask.pricePerBytePerSecond, - request.ask.collateralPerByte, + request.ask.collateralPerByte, requestEnd, ): debug "No availability found for request, ignoring" @@ -82,7 +84,7 @@ method run*( without reservation =? await reservations.createReservation( availability.id, request.ask.slotSize, request.id, data.slotIndex, - request.ask.collateralPerByte, + request.ask.collateralPerByte, requestEnd, ), error: trace "Creation of reservation failed" # Race condition: diff --git a/codex/sales/states/slotreserving.nim b/codex/sales/states/slotreserving.nim index e9ac8dcd..780dadfc 100644 --- a/codex/sales/states/slotreserving.nim +++ b/codex/sales/states/slotreserving.nim @@ -46,7 +46,7 @@ method run*( await market.reserveSlot(data.requestId, data.slotIndex) except SlotReservationNotAllowedError as e: debug "Slot cannot be reserved, ignoring", error = e.msg - return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) + return some State(SaleIgnored(reprocessSlot: false)) except MarketError as e: return some State(SaleErrored(error: e)) # other CatchableErrors are handled "automatically" by the SaleState @@ -57,7 +57,7 @@ method run*( # do not re-add this slot to the queue, and return bytes from Reservation to # the Availability debug "Slot cannot be reserved, ignoring" - return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) + return some State(SaleIgnored(reprocessSlot: false)) except CancelledError as e: trace "SaleSlotReserving.run was cancelled", error = e.msgDetail except CatchableError as e: diff --git a/codex/stores/repostore/operations.nim b/codex/stores/repostore/operations.nim index 125741e1..cc488240 100644 --- a/codex/stores/repostore/operations.nim +++ b/codex/stores/repostore/operations.nim @@ -105,7 +105,7 @@ proc updateQuotaUsage*( minusUsed: NBytes = 0.NBytes, plusReserved: NBytes = 0.NBytes, minusReserved: NBytes = 0.NBytes, -): Future[?!void] {.async.} = +): Future[?!void] {.async: (raises: [CancelledError]).} = await self.metaDs.modify( QuotaUsedKey, proc(maybeCurrUsage: ?QuotaUsage): Future[?QuotaUsage] {.async.} = diff --git a/codex/stores/repostore/store.nim b/codex/stores/repostore/store.nim index d7305107..130ab15e 100644 --- a/codex/stores/repostore/store.nim +++ b/codex/stores/repostore/store.nim @@ -380,7 +380,9 @@ method close*(self: RepoStore): Future[void] {.async.} = # RepoStore procs ########################################################### -proc reserve*(self: RepoStore, bytes: NBytes): Future[?!void] {.async.} = +proc reserve*( + self: RepoStore, bytes: NBytes +): Future[?!void] {.async: (raises: [CancelledError]).} = ## Reserve bytes ## @@ -388,7 +390,9 @@ proc reserve*(self: RepoStore, bytes: NBytes): Future[?!void] {.async.} = await self.updateQuotaUsage(plusReserved = bytes) -proc release*(self: RepoStore, bytes: NBytes): Future[?!void] {.async.} = +proc release*( + self: RepoStore, bytes: NBytes +): Future[?!void] {.async: (raises: [CancelledError]).} = ## Release bytes ## diff --git a/openapi.yaml b/openapi.yaml index c2088cc5..8bae1b10 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -163,6 +163,14 @@ components: totalCollateral: type: string description: Total collateral (in amount of tokens) that can be used for matching requests + enabled: + type: boolean + description: Enable the ability to receive sales on this availability. + default: true + until: + type: integer + description: Specifies the latest timestamp, after which the availability will no longer host any slots. If set to 0, there will be no restrictions. + default: 0 SalesAvailabilityREAD: allOf: @@ -239,6 +247,9 @@ components: slotIndex: type: string description: Slot Index as decimal string + validUntil: + type: integer + description: Timestamp after which the reservation will no longer be valid. StorageRequestCreation: type: object @@ -704,7 +715,7 @@ paths: "400": description: Invalid data input "422": - description: The provided parameters did not pass validation + description: Not enough node's storage quota available or the provided parameters did not pass validation "500": description: Error reserving availability "503": diff --git a/tests/codex/examples.nim b/tests/codex/examples.nim index ed1dd52a..52b8a0b8 100644 --- a/tests/codex/examples.nim +++ b/tests/codex/examples.nim @@ -75,6 +75,8 @@ proc example*( duration = uint16.example.uint64, minPricePerBytePerSecond = uint8.example.u256, totalCollateral = totalSize.u256 * collateralPerByte, + enabled = true, + until = 0.SecondsSince1970, ) proc example*(_: type Reservation): Reservation = diff --git a/tests/codex/helpers/mockreservations.nim b/tests/codex/helpers/mockreservations.nim index 1bc76a09..91ed04ec 100644 --- a/tests/codex/helpers/mockreservations.nim +++ b/tests/codex/helpers/mockreservations.nim @@ -2,6 +2,7 @@ import pkg/chronos import pkg/codex/sales import pkg/codex/stores import pkg/questionable/results +import pkg/codex/clock type MockReservations* = ref object of Reservations createReservationThrowBytesOutOfBoundsError: bool @@ -28,6 +29,7 @@ method createReservation*( requestId: RequestId, slotIndex: uint64, collateralPerByte: UInt256, + validUntil: SecondsSince1970, ): Future[?!Reservation] {.async.} = if self.createReservationThrowBytesOutOfBoundsError: let error = newException( @@ -45,4 +47,5 @@ method createReservation*( requestId, slotIndex, collateralPerByte, + validUntil, ) diff --git a/tests/codex/sales/states/testcancelled.nim b/tests/codex/sales/states/testcancelled.nim index 48f3e8a0..ab450200 100644 --- a/tests/codex/sales/states/testcancelled.nim +++ b/tests/codex/sales/states/testcancelled.nim @@ -22,16 +22,14 @@ asyncchecksuite "sales state 'cancelled'": var market: MockMarket var state: SaleCancelled var agent: SalesAgent - var returnBytesWas = bool.none var reprocessSlotWas = bool.none var returnedCollateralValue = UInt256.none setup: market = MockMarket.new() let onCleanUp = proc( - returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none + reprocessSlot = false, returnedCollateral = UInt256.none ) {.async.} = - returnBytesWas = some returnBytes reprocessSlotWas = some reprocessSlot returnedCollateralValue = returnedCollateral @@ -40,7 +38,7 @@ asyncchecksuite "sales state 'cancelled'": agent.onCleanUp = onCleanUp state = SaleCancelled.new() - test "calls onCleanUp with returnBytes = false, reprocessSlot = true, and returnedCollateral = currentCollateral": + test "calls onCleanUp with reprocessSlot = true, and returnedCollateral = currentCollateral": market.fillSlot( requestId = request.id, slotIndex = slotIndex, @@ -49,6 +47,5 @@ asyncchecksuite "sales state 'cancelled'": collateral = currentCollateral, ) discard await state.run(agent) - check eventually returnBytesWas == some true check eventually reprocessSlotWas == some false check eventually returnedCollateralValue == some currentCollateral diff --git a/tests/codex/sales/states/testerrored.nim b/tests/codex/sales/states/testerrored.nim index 07e325e3..0cc26cf8 100644 --- a/tests/codex/sales/states/testerrored.nim +++ b/tests/codex/sales/states/testerrored.nim @@ -20,14 +20,12 @@ asyncchecksuite "sales state 'errored'": var state: SaleErrored var agent: SalesAgent - var returnBytesWas = false var reprocessSlotWas = false setup: let onCleanUp = proc( - returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none + reprocessSlot = false, returnedCollateral = UInt256.none ) {.async.} = - returnBytesWas = returnBytes reprocessSlotWas = reprocessSlot let context = SalesContext(market: market, clock: clock) @@ -35,8 +33,7 @@ asyncchecksuite "sales state 'errored'": agent.onCleanUp = onCleanUp state = SaleErrored(error: newException(ValueError, "oh no!")) - test "calls onCleanUp with returnBytes = false and reprocessSlot = true": + test "calls onCleanUp with reprocessSlot = true": state = SaleErrored(error: newException(ValueError, "oh no!"), reprocessSlot: true) discard await state.run(agent) - check eventually returnBytesWas == true check eventually reprocessSlotWas == true diff --git a/tests/codex/sales/states/testfilling.nim b/tests/codex/sales/states/testfilling.nim index f746b5a8..54536a4c 100644 --- a/tests/codex/sales/states/testfilling.nim +++ b/tests/codex/sales/states/testfilling.nim @@ -47,7 +47,6 @@ suite "sales state 'filling'": let next = !(await state.run(agent)) check next of SaleIgnored check SaleIgnored(next).reprocessSlot == false - check SaleIgnored(next).returnBytes test "run switches to errored with other error ": let error = newException(MarketError, "some error") diff --git a/tests/codex/sales/states/testfinished.nim b/tests/codex/sales/states/testfinished.nim index 0c33a7b3..1648df3a 100644 --- a/tests/codex/sales/states/testfinished.nim +++ b/tests/codex/sales/states/testfinished.nim @@ -23,22 +23,23 @@ asyncchecksuite "sales state 'finished'": var market: MockMarket var state: SaleFinished var agent: SalesAgent - var returnBytesWas = bool.none var reprocessSlotWas = bool.none var returnedCollateralValue = UInt256.none + var saleCleared = bool.none setup: market = MockMarket.new() let onCleanUp = proc( - returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none + reprocessSlot = false, returnedCollateral = UInt256.none ) {.async.} = - returnBytesWas = some returnBytes reprocessSlotWas = some reprocessSlot returnedCollateralValue = returnedCollateral let context = SalesContext(market: market, clock: clock) agent = newSalesAgent(context, request.id, slotIndex, request.some) agent.onCleanUp = onCleanUp + agent.context.onClear = some proc(request: StorageRequest, idx: uint64) = + saleCleared = some true state = SaleFinished(returnedCollateral: some currentCollateral) test "switches to cancelled state when request expires": @@ -49,8 +50,8 @@ asyncchecksuite "sales state 'finished'": let next = state.onFailed(request) check !next of SaleFailed - test "calls onCleanUp with returnBytes = false, reprocessSlot = true, and returnedCollateral = currentCollateral": + test "calls onCleanUp with reprocessSlot = true, and returnedCollateral = currentCollateral": discard await state.run(agent) - check eventually returnBytesWas == some false check eventually reprocessSlotWas == some false check eventually returnedCollateralValue == some currentCollateral + check eventually saleCleared == some true diff --git a/tests/codex/sales/states/testignored.nim b/tests/codex/sales/states/testignored.nim index 2e1c6e91..5eea7d16 100644 --- a/tests/codex/sales/states/testignored.nim +++ b/tests/codex/sales/states/testignored.nim @@ -20,14 +20,12 @@ asyncchecksuite "sales state 'ignored'": var state: SaleIgnored var agent: SalesAgent - var returnBytesWas = false var reprocessSlotWas = false setup: let onCleanUp = proc( - returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none + reprocessSlot = false, returnedCollateral = UInt256.none ) {.async.} = - returnBytesWas = returnBytes reprocessSlotWas = reprocessSlot let context = SalesContext(market: market, clock: clock) @@ -36,7 +34,6 @@ asyncchecksuite "sales state 'ignored'": state = SaleIgnored.new() test "calls onCleanUp with values assigned to SaleIgnored": - state = SaleIgnored(reprocessSlot: true, returnBytes: true) + state = SaleIgnored(reprocessSlot: true) discard await state.run(agent) - check eventually returnBytesWas == true check eventually reprocessSlotWas == true diff --git a/tests/codex/sales/states/testpreparing.nim b/tests/codex/sales/states/testpreparing.nim index 99d9c7fe..802489a1 100644 --- a/tests/codex/sales/states/testpreparing.nim +++ b/tests/codex/sales/states/testpreparing.nim @@ -13,6 +13,7 @@ import pkg/codex/sales/salesagent import pkg/codex/sales/salescontext import pkg/codex/sales/reservations import pkg/codex/stores/repostore +import times import ../../../asynctest import ../../helpers import ../../examples @@ -39,6 +40,8 @@ asyncchecksuite "sales state 'preparing'": duration = request.ask.duration + 60.uint64, minPricePerBytePerSecond = request.ask.pricePerBytePerSecond, totalCollateral = request.ask.collateralPerSlot * request.ask.slots.u256, + enabled = true, + until = 0.SecondsSince1970, ) let repoDs = SQLiteDatastore.new(Memory).tryGet() let metaDs = SQLiteDatastore.new(Memory).tryGet() @@ -52,6 +55,8 @@ asyncchecksuite "sales state 'preparing'": context.reservations = reservations agent = newSalesAgent(context, request.id, slotIndex, request.some) + market.requestEnds[request.id] = clock.now() + cast[int64](request.ask.duration) + teardown: await repo.stop() @@ -67,10 +72,14 @@ asyncchecksuite "sales state 'preparing'": let next = state.onSlotFilled(request.id, slotIndex) check !next of SaleFilled - proc createAvailability() {.async.} = + proc createAvailability(enabled = true) {.async.} = let a = await reservations.createAvailability( - availability.totalSize, availability.duration, - availability.minPricePerBytePerSecond, availability.totalCollateral, + availability.totalSize, + availability.duration, + availability.minPricePerBytePerSecond, + availability.totalCollateral, + enabled, + until = 0.SecondsSince1970, ) availability = a.get @@ -79,7 +88,11 @@ asyncchecksuite "sales state 'preparing'": check next of SaleIgnored let ignored = SaleIgnored(next) check ignored.reprocessSlot - check ignored.returnBytes == false + + test "run switches to ignored when availability is not enabled": + await createAvailability(enabled = false) + let next = !(await state.run(agent)) + check next of SaleIgnored test "run switches to slot reserving state after reservation created": await createAvailability() @@ -94,7 +107,6 @@ asyncchecksuite "sales state 'preparing'": check next of SaleIgnored let ignored = SaleIgnored(next) check ignored.reprocessSlot - check ignored.returnBytes == false test "run switches to errored when reserve fails with other error": await createAvailability() diff --git a/tests/codex/sales/states/testslotreserving.nim b/tests/codex/sales/states/testslotreserving.nim index 0e2e2cc7..b223338a 100644 --- a/tests/codex/sales/states/testslotreserving.nim +++ b/tests/codex/sales/states/testslotreserving.nim @@ -67,4 +67,3 @@ asyncchecksuite "sales state 'SlotReserving'": let next = !(await state.run(agent)) check next of SaleIgnored check SaleIgnored(next).reprocessSlot == false - check SaleIgnored(next).returnBytes diff --git a/tests/codex/sales/testreservations.nim b/tests/codex/sales/testreservations.nim index 49df059d..ff5e153c 100644 --- a/tests/codex/sales/testreservations.nim +++ b/tests/codex/sales/testreservations.nim @@ -1,5 +1,5 @@ import std/random - +import std/times import pkg/questionable import pkg/questionable/results import pkg/chronos @@ -8,6 +8,7 @@ import pkg/datastore import pkg/codex/stores import pkg/codex/errors import pkg/codex/sales +import pkg/codex/clock import pkg/codex/utils/json import ../../asynctest @@ -39,19 +40,22 @@ asyncchecksuite "Reservations module": await repoTmp.destroyDb() await metaTmp.destroyDb() - proc createAvailability(): Availability = + proc createAvailability(enabled = true, until = 0.SecondsSince1970): Availability = let example = Availability.example(collateralPerByte) let totalSize = rand(100000 .. 200000).uint64 let totalCollateral = totalSize.u256 * collateralPerByte let availability = waitFor reservations.createAvailability( - totalSize, example.duration, example.minPricePerBytePerSecond, totalCollateral + totalSize, example.duration, example.minPricePerBytePerSecond, totalCollateral, + enabled, until, ) return availability.get proc createReservation(availability: Availability): Reservation = let size = rand(1 ..< availability.freeSize.int) + let validUntil = getTime().toUnix() + 30.SecondsSince1970 let reservation = waitFor reservations.createReservation( - availability.id, size.uint64, RequestId.example, uint64.example, 1.u256 + availability.id, size.uint64, RequestId.example, uint64.example, 1.u256, + validUntil, ) return reservation.get @@ -64,8 +68,12 @@ asyncchecksuite "Reservations module": check (await reservations.all(Availability)).get.len == 0 test "generates unique ids for storage availability": - let availability1 = Availability.init(1.uint64, 2.uint64, 3.uint64, 4.u256, 5.u256) - let availability2 = Availability.init(1.uint64, 2.uint64, 3.uint64, 4.u256, 5.u256) + let availability1 = Availability.init( + 1.uint64, 2.uint64, 3.uint64, 4.u256, 5.u256, true, 0.SecondsSince1970 + ) + let availability2 = Availability.init( + 1.uint64, 2.uint64, 3.uint64, 4.u256, 5.u256, true, 0.SecondsSince1970 + ) check availability1.id != availability2.id test "can reserve available storage": @@ -128,20 +136,24 @@ asyncchecksuite "Reservations module": test "cannot create reservation with non-existant availability": let availability = Availability.example + let validUntil = getTime().toUnix() + 30.SecondsSince1970 let created = await reservations.createReservation( - availability.id, uint64.example, RequestId.example, uint64.example, 1.u256 + availability.id, uint64.example, RequestId.example, uint64.example, 1.u256, + validUntil, ) check created.isErr check created.error of NotExistsError test "cannot create reservation larger than availability size": let availability = createAvailability() + let validUntil = getTime().toUnix() + 30.SecondsSince1970 let created = await reservations.createReservation( availability.id, availability.totalSize + 1, RequestId.example, uint64.example, UInt256.example, + validUntil, ) check created.isErr check created.error of BytesOutOfBoundsError @@ -149,23 +161,26 @@ asyncchecksuite "Reservations module": test "cannot create reservation larger than availability size - concurrency test": proc concurrencyTest(): Future[void] {.async.} = let availability = createAvailability() + let validUntil = getTime().toUnix() + 30.SecondsSince1970 let one = reservations.createReservation( availability.id, availability.totalSize - 1, RequestId.example, uint64.example, UInt256.example, + validUntil, ) let two = reservations.createReservation( availability.id, availability.totalSize, RequestId.example, uint64.example, - UInt256.example, + UInt256.example, validUntil, ) let oneResult = await one let twoResult = await two check oneResult.isErr or twoResult.isErr + if oneResult.isErr: check oneResult.error of BytesOutOfBoundsError if twoResult.isErr: @@ -259,6 +274,48 @@ asyncchecksuite "Reservations module": check isOk await reservations.update(availability) check (repo.quotaReservedBytes - origQuota) == 100.NBytes + test "create availability set enabled to true by default": + let availability = createAvailability() + check availability.enabled == true + + test "create availability set until to 0 by default": + let availability = createAvailability() + check availability.until == 0.SecondsSince1970 + + test "create availability whith correct values": + var until = getTime().toUnix() + + let availability = createAvailability(enabled = false, until = until) + check availability.enabled == false + check availability.until == until + + test "create an availability fails when trying set until with a negative value": + let totalSize = rand(100000 .. 200000).uint64 + let example = Availability.example(collateralPerByte) + let totalCollateral = totalSize.u256 * collateralPerByte + + let result = await reservations.createAvailability( + totalSize, + example.duration, + example.minPricePerBytePerSecond, + totalCollateral, + enabled = true, + until = -1.SecondsSince1970, + ) + + check result.isErr + check result.error of UntilOutOfBoundsError + + test "update an availability fails when trying set until with a negative value": + let until = getTime().toUnix() + let availability = createAvailability(until = until) + + availability.until = -1 + + let result = await reservations.update(availability) + check result.isErr + check result.error of UntilOutOfBoundsError + test "reservation can be partially released": let availability = createAvailability() let reservation = createReservation(availability) @@ -285,7 +342,9 @@ asyncchecksuite "Reservations module": test "OnAvailabilitySaved called when availability is created": var added: Availability - reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = + reservations.OnAvailabilitySaved = proc( + a: Availability + ) {.gcsafe, async: (raises: []).} = added = a let availability = createAvailability() @@ -295,7 +354,9 @@ asyncchecksuite "Reservations module": test "OnAvailabilitySaved called when availability size is increased": var availability = createAvailability() var added: Availability - reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = + reservations.OnAvailabilitySaved = proc( + a: Availability + ) {.gcsafe, async: (raises: []).} = added = a availability.freeSize += 1 discard await reservations.update(availability) @@ -305,7 +366,21 @@ asyncchecksuite "Reservations module": test "OnAvailabilitySaved is not called when availability size is decreased": var availability = createAvailability() var called = false - reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = + reservations.OnAvailabilitySaved = proc( + a: Availability + ) {.gcsafe, async: (raises: []).} = + called = true + availability.freeSize -= 1.uint64 + discard await reservations.update(availability) + + check not called + + test "OnAvailabilitySaved is not called when availability is disabled": + var availability = createAvailability(enabled = false) + var called = false + reservations.OnAvailabilitySaved = proc( + a: Availability + ) {.gcsafe, async: (raises: []).} = called = true availability.freeSize -= 1 discard await reservations.update(availability) @@ -315,7 +390,7 @@ asyncchecksuite "Reservations module": test "OnAvailabilitySaved called when availability duration is increased": var availability = createAvailability() var added: Availability - reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = + reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} = added = a availability.duration += 1 discard await reservations.update(availability) @@ -325,7 +400,7 @@ asyncchecksuite "Reservations module": test "OnAvailabilitySaved is not called when availability duration is decreased": var availability = createAvailability() var called = false - reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = + reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} = called = true availability.duration -= 1 discard await reservations.update(availability) @@ -335,7 +410,7 @@ asyncchecksuite "Reservations module": test "OnAvailabilitySaved called when availability minPricePerBytePerSecond is increased": var availability = createAvailability() var added: Availability - reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = + reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} = added = a availability.minPricePerBytePerSecond += 1.u256 discard await reservations.update(availability) @@ -345,7 +420,7 @@ asyncchecksuite "Reservations module": test "OnAvailabilitySaved is not called when availability minPricePerBytePerSecond is decreased": var availability = createAvailability() var called = false - reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = + reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} = called = true availability.minPricePerBytePerSecond -= 1.u256 discard await reservations.update(availability) @@ -355,7 +430,7 @@ asyncchecksuite "Reservations module": test "OnAvailabilitySaved called when availability totalCollateral is increased": var availability = createAvailability() var added: Availability - reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = + reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} = added = a availability.totalCollateral = availability.totalCollateral + 1.u256 discard await reservations.update(availability) @@ -365,7 +440,7 @@ asyncchecksuite "Reservations module": test "OnAvailabilitySaved is not called when availability totalCollateral is decreased": var availability = createAvailability() var called = false - reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = + reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} = called = true availability.totalCollateral = availability.totalCollateral - 1.u256 discard await reservations.update(availability) @@ -374,32 +449,69 @@ asyncchecksuite "Reservations module": test "availabilities can be found": let availability = createAvailability() - + let validUntil = getTime().toUnix() + 30.SecondsSince1970 let found = await reservations.findAvailability( availability.freeSize, availability.duration, - availability.minPricePerBytePerSecond, collateralPerByte, + availability.minPricePerBytePerSecond, collateralPerByte, validUntil, ) check found.isSome check found.get == availability + test "does not find an availability when is it disabled": + let availability = createAvailability(enabled = false) + let validUntil = getTime().toUnix() + 30.SecondsSince1970 + let found = await reservations.findAvailability( + availability.freeSize, availability.duration, + availability.minPricePerBytePerSecond, collateralPerByte, validUntil, + ) + + check found.isNone + + test "finds an availability when the until date is after the duration": + let example = Availability.example(collateralPerByte) + let until = getTime().toUnix() + example.duration.SecondsSince1970 + let availability = createAvailability(until = until) + let validUntil = getTime().toUnix() + 30.SecondsSince1970 + let found = await reservations.findAvailability( + availability.freeSize, availability.duration, + availability.minPricePerBytePerSecond, collateralPerByte, validUntil, + ) + + check found.isSome + check found.get == availability + + test "does not find an availability when the until date is before the duration": + let example = Availability.example(collateralPerByte) + let until = getTime().toUnix() + 1.SecondsSince1970 + let availability = createAvailability(until = until) + let validUntil = getTime().toUnix() + 30.SecondsSince1970 + let found = await reservations.findAvailability( + availability.freeSize, availability.duration, + availability.minPricePerBytePerSecond, collateralPerByte, validUntil, + ) + + check found.isNone + test "non-matching availabilities are not found": let availability = createAvailability() - + let validUntil = getTime().toUnix() + 30.SecondsSince1970 let found = await reservations.findAvailability( availability.freeSize + 1, availability.duration, availability.minPricePerBytePerSecond, collateralPerByte, + validUntil, ) check found.isNone test "non-existent availability cannot be found": let availability = Availability.example + let validUntil = getTime().toUnix() + 30.SecondsSince1970 let found = await reservations.findAvailability( availability.freeSize, availability.duration, - availability.minPricePerBytePerSecond, collateralPerByte, + availability.minPricePerBytePerSecond, collateralPerByte, validUntil, ) check found.isNone @@ -420,7 +532,12 @@ asyncchecksuite "Reservations module": test "fails to create availability with size that is larger than available quota": let created = await reservations.createAvailability( - DefaultQuotaBytes.uint64 + 1, uint64.example, UInt256.example, UInt256.example + DefaultQuotaBytes.uint64 + 1, + uint64.example, + UInt256.example, + UInt256.example, + enabled = true, + until = 0.SecondsSince1970, ) check created.isErr check created.error of ReserveFailedError diff --git a/tests/codex/sales/testsales.nim b/tests/codex/sales/testsales.nim index 74ea8a2b..f4d9cbae 100644 --- a/tests/codex/sales/testsales.nim +++ b/tests/codex/sales/testsales.nim @@ -14,6 +14,7 @@ import pkg/codex/stores/repostore import pkg/codex/blocktype as bt import pkg/codex/node import pkg/codex/utils/asyncstatemachine +import times import ../../asynctest import ../helpers import ../helpers/mockmarket @@ -152,6 +153,8 @@ asyncchecksuite "Sales": duration = 60.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, totalCollateral = totalCollateral, + enabled = true, + until = 0.SecondsSince1970, ) request = StorageRequest( ask: StorageAsk( @@ -221,10 +224,11 @@ asyncchecksuite "Sales": let key = availability.id.key.get (waitFor reservations.get(key, Availability)).get - proc createAvailability() = + proc createAvailability(enabled = true, until = 0.SecondsSince1970) = let a = waitFor reservations.createAvailability( availability.totalSize, availability.duration, - availability.minPricePerBytePerSecond, availability.totalCollateral, + availability.minPricePerBytePerSecond, availability.totalCollateral, enabled, + until, ) availability = a.get # update id @@ -380,14 +384,14 @@ asyncchecksuite "Sales": check eventually getAvailability().freeSize == availability.freeSize - request.ask.slotSize - test "non-downloaded bytes are returned to availability once finished": + test "bytes are returned to availability once finished": var slotIndex = 0.uint64 sales.onStore = proc( request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = slotIndex = slot let blk = bt.Block.new(@[1.byte]).get - await onBatch(@[blk]) + await onBatch(blk.repeat(request.ask.slotSize)) let sold = newFuture[void]() sales.onSale = proc(request: StorageRequest, slotIndex: uint64) = @@ -403,7 +407,7 @@ asyncchecksuite "Sales": market.slotState[request.slotId(slotIndex)] = SlotState.Finished clock.advance(request.ask.duration.int64) - check eventually getAvailability().freeSize == origSize - 1 + check eventually getAvailability().freeSize == origSize test "ignores download when duration not long enough": availability.duration = request.ask.duration - 1 @@ -439,6 +443,34 @@ asyncchecksuite "Sales": market.slotState[request.slotId(3.uint64)] = SlotState.Filled check wasIgnored() + test "ignores request when availability is not enabled": + createAvailability(enabled = false) + await market.requestStorage(request) + check wasIgnored() + + test "ignores request when availability until terminates before the duration": + let until = getTime().toUnix() + createAvailability(until = until) + await market.requestStorage(request) + + check wasIgnored() + + test "retrieves request when availability until terminates after the duration": + let requestEnd = getTime().toUnix() + cast[int64](request.ask.duration) + let until = requestEnd + 1 + createAvailability(until = until) + + var storingRequest: StorageRequest + sales.onStore = proc( + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false + ): Future[?!void] {.async.} = + storingRequest = request + return success() + + market.requestEnds[request.id] = requestEnd + await market.requestStorage(request) + check eventually storingRequest == request + test "retrieves and stores data locally": var storingRequest: StorageRequest var storingSlot: uint64 @@ -563,6 +595,8 @@ asyncchecksuite "Sales": # by other slots request.ask.slots = 1 market.requestExpiry[request.id] = expiry + market.requestEnds[request.id] = + getTime().toUnix() + cast[int64](request.ask.duration) let origSize = availability.freeSize sales.onStore = proc( @@ -621,10 +655,28 @@ asyncchecksuite "Sales": test "deletes inactive reservations on load": createAvailability() + let validUntil = getTime().toUnix() + 30.SecondsSince1970 discard await reservations.createReservation( - availability.id, 100.uint64, RequestId.example, 0.uint64, UInt256.example + availability.id, 100.uint64, RequestId.example, 0.uint64, UInt256.example, + validUntil, ) check (await reservations.all(Reservation)).get.len == 1 await sales.load() check (await reservations.all(Reservation)).get.len == 0 check getAvailability().freeSize == availability.freeSize # was restored + + test "update an availability fails when trying change the until date before an existing reservation": + let until = getTime().toUnix() + 300.SecondsSince1970 + createAvailability(until = until) + + market.requestEnds[request.id] = + getTime().toUnix() + cast[int64](request.ask.duration) + + await market.requestStorage(request) + await allowRequestToStart() + + availability.until = getTime().toUnix() + + let result = await reservations.update(availability) + check result.isErr + check result.error of UntilOutOfBoundsError diff --git a/tests/integration/codexclient.nim b/tests/integration/codexclient.nim index 5d5f0cc2..d7ed3df2 100644 --- a/tests/integration/codexclient.nim +++ b/tests/integration/codexclient.nim @@ -294,6 +294,8 @@ proc postAvailabilityRaw*( client: CodexClient, totalSize, duration: uint64, minPricePerBytePerSecond, totalCollateral: UInt256, + enabled: ?bool = bool.none, + until: ?SecondsSince1970 = SecondsSince1970.none, ): Future[HttpClientResponseRef] {.async: (raises: [CancelledError, HttpError]).} = ## Post sales availability endpoint ## @@ -304,18 +306,27 @@ proc postAvailabilityRaw*( "duration": duration, "minPricePerBytePerSecond": minPricePerBytePerSecond, "totalCollateral": totalCollateral, + "enabled": enabled, + "until": until, } - return await client.post(url, $json) proc postAvailability*( client: CodexClient, totalSize, duration: uint64, minPricePerBytePerSecond, totalCollateral: UInt256, + enabled: ?bool = bool.none, + until: ?SecondsSince1970 = SecondsSince1970.none, ): Future[?!Availability] {.async: (raises: [CancelledError, HttpError]).} = let response = await client.postAvailabilityRaw( - totalSize, duration, minPricePerBytePerSecond, totalCollateral + totalSize = totalSize, + duration = duration, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = totalCollateral, + enabled = enabled, + until = until, ) + let body = await response.body doAssert response.status == 201, @@ -327,6 +338,8 @@ proc patchAvailabilityRaw*( availabilityId: AvailabilityId, totalSize, freeSize, duration: ?uint64 = uint64.none, minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none, + enabled: ?bool = bool.none, + until: ?SecondsSince1970 = SecondsSince1970.none, ): Future[HttpClientResponseRef] {. async: (raw: true, raises: [CancelledError, HttpError]) .} = @@ -352,6 +365,12 @@ proc patchAvailabilityRaw*( if totalCollateral =? totalCollateral: json["totalCollateral"] = %totalCollateral + if enabled =? enabled: + json["enabled"] = %enabled + + if until =? until: + json["until"] = %until + client.patch(url, $json) proc patchAvailability*( @@ -359,6 +378,8 @@ proc patchAvailability*( availabilityId: AvailabilityId, totalSize, duration: ?uint64 = uint64.none, minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none, + enabled: ?bool = bool.none, + until: ?SecondsSince1970 = SecondsSince1970.none, ): Future[void] {.async: (raises: [CancelledError, HttpError]).} = let response = await client.patchAvailabilityRaw( availabilityId, @@ -366,8 +387,10 @@ proc patchAvailability*( duration = duration, minPricePerBytePerSecond = minPricePerBytePerSecond, totalCollateral = totalCollateral, + enabled = enabled, + until = until, ) - doAssert response.status == 200, "expected 200 OK, got " & $response.status + doAssert response.status == 204, "expected No Content, got " & $response.status proc getAvailabilities*( client: CodexClient diff --git a/tests/integration/testmarketplace.nim b/tests/integration/testmarketplace.nim index dee3645e..40f394e0 100644 --- a/tests/integration/testmarketplace.nim +++ b/tests/integration/testmarketplace.nim @@ -1,3 +1,5 @@ +import std/times +import std/httpclient import ../examples import ../contracts/time import ../contracts/deployment diff --git a/tests/integration/testproofs.nim b/tests/integration/testproofs.nim index b0ede765..c49b7b6f 100644 --- a/tests/integration/testproofs.nim +++ b/tests/integration/testproofs.nim @@ -275,7 +275,9 @@ marketplacesuite "Simulate invalid proofs": # totalSize=slotSize, # should match 1 slot only # duration=totalPeriods.periods.u256, # minPricePerBytePerSecond=minPricePerBytePerSecond, - # totalCollateral=slotSize * minPricePerBytePerSecond + # totalCollateral=slotSize * minPricePerBytePerSecond, + # enabled = true.some, + # until = 0.SecondsSince1970.some, # ) # let cid = client0.upload(data).get diff --git a/tests/integration/testrestapi.nim b/tests/integration/testrestapi.nim index 415658c1..57e38b39 100644 --- a/tests/integration/testrestapi.nim +++ b/tests/integration/testrestapi.nim @@ -35,6 +35,7 @@ twonodessuite "REST API": duration = 2.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, totalCollateral = totalCollateral, + enabled = true.some, ) ).get let space = (await client1.space()).tryGet() diff --git a/tests/integration/testrestapivalidation.nim b/tests/integration/testrestapivalidation.nim index 00caefdd..adeffa77 100644 --- a/tests/integration/testrestapivalidation.nim +++ b/tests/integration/testrestapivalidation.nim @@ -364,5 +364,21 @@ asyncchecksuite "Rest API validation": check responseBefore.status == 422 check (await responseBefore.body) == "Collateral per byte must be greater than zero" + test "creating availability fails when until is negative": + let totalSize = 12.uint64 + let minPricePerBytePerSecond = 1.u256 + let totalCollateral = totalSize.u256 * minPricePerBytePerSecond + let response = await client.postAvailabilityRaw( + totalSize = totalSize, + duration = 2.uint64, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = totalCollateral, + until = -1.SecondsSince1970.some, + ) + + check: + response.status == 422 + (await response.body) == "Cannot set until to a negative value" + waitFor node.stop() node.removeDataDir() diff --git a/tests/integration/testsales.nim b/tests/integration/testsales.nim index 5e9b26df..ef999990 100644 --- a/tests/integration/testsales.nim +++ b/tests/integration/testsales.nim @@ -1,4 +1,5 @@ import std/httpclient +import std/times import pkg/codex/contracts from pkg/codex/stores/repostore/types import DefaultQuotaBytes import ./twonodes @@ -17,22 +18,14 @@ proc findItem[T](items: seq[T], item: T): ?!T = multinodesuite "Sales": let salesConfig = NodeConfigs( - clients: CodexConfigs - .init(nodes = 1) - .withLogFile() - .withLogTopics( - "node", "marketplace", "sales", "reservations", "node", "proving", "clock" - ).some, - providers: CodexConfigs - .init(nodes = 1) - .withLogFile() - .withLogTopics( - "node", "marketplace", "sales", "reservations", "node", "proving", "clock" - ).some, + clients: CodexConfigs.init(nodes = 1).some, + providers: CodexConfigs.init(nodes = 1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock") + .some, ) - let minPricePerBytePerSecond = 1.u256 - var host: CodexClient var client: CodexClient @@ -80,11 +73,15 @@ multinodesuite "Sales": ) ).get + var until = getTime().toUnix() + await host.patchAvailability( availability.id, duration = 100.uint64.some, minPricePerBytePerSecond = 2.u256.some, totalCollateral = 200.u256.some, + enabled = false.some, + until = until.some, ) let updatedAvailability = @@ -94,6 +91,8 @@ multinodesuite "Sales": check updatedAvailability.totalCollateral == 200 check updatedAvailability.totalSize == 140000.uint64 check updatedAvailability.freeSize == 140000.uint64 + check updatedAvailability.enabled == false + check updatedAvailability.until == until test "updating availability - updating totalSize", salesConfig: let availability = ( @@ -105,6 +104,7 @@ multinodesuite "Sales": ) ).get await host.patchAvailability(availability.id, totalSize = 100000.uint64.some) + let updatedAvailability = ((await host.getAvailabilities()).get).findItem(availability).get check updatedAvailability.totalSize == 100000 @@ -165,3 +165,72 @@ multinodesuite "Sales": ((await host.getAvailabilities()).get).findItem(availability).get check newUpdatedAvailability.totalSize == originalSize + 20000 check newUpdatedAvailability.freeSize - updatedAvailability.freeSize == 20000 + + test "updating availability fails with until negative", salesConfig: + let availability = ( + await host.postAvailability( + totalSize = 140000.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) + ).get + + let response = + await host.patchAvailabilityRaw(availability.id, until = -1.SecondsSince1970.some) + + check: + (await response.body) == "Cannot set until to a negative value" + + test "returns an error when trying to update the until date before an existing a request is finished", + salesConfig: + let size = 0xFFFFFF.uint64 + let data = await RandomChunker.example(blocks = 8) + let duration = 20 * 60.uint64 + let minPricePerBytePerSecond = 3.u256 + let collateralPerByte = 1.u256 + let ecNodes = 3.uint + let ecTolerance = 1.uint + + # host makes storage available + let availability = ( + await host.postAvailability( + totalSize = size, + duration = duration, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = size.u256 * minPricePerBytePerSecond, + ) + ).get + + # client requests storage + let cid = (await client.upload(data)).get + let id = ( + await client.requestStorage( + cid, + duration = duration, + pricePerBytePerSecond = minPricePerBytePerSecond, + proofProbability = 3.u256, + expiry = 10 * 60.uint64, + collateralPerByte = collateralPerByte, + nodes = ecNodes, + tolerance = ecTolerance, + ) + ).get + + check eventually( + await client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000 + ) + let purchase = (await client.getPurchase(id)).get + check purchase.error == none string + + let unixNow = getTime().toUnix() + let until = unixNow + 1.SecondsSince1970 + + let response = await host.patchAvailabilityRaw( + availabilityId = availability.id, until = until.some + ) + + check: + response.status == 422 + (await response.body) == + "Until parameter must be greater or equal to the longest currently hosted slot" diff --git a/vendor/nim-datastore b/vendor/nim-datastore index d67860ad..5778e373 160000 --- a/vendor/nim-datastore +++ b/vendor/nim-datastore @@ -1 +1 @@ -Subproject commit d67860add63fd23cdacde1d3da8f4739c2660c2d +Subproject commit 5778e373fa97286f389e0aef61f1e8f30a934dab From 0032e60398f800527e30315ed6c39985cc79e11c Mon Sep 17 00:00:00 2001 From: Arnaud Date: Wed, 26 Mar 2025 16:17:39 +0100 Subject: [PATCH 40/40] fix(marketplace): catch Marketplace_SlotIsFree and continue the cancelled process (#1139) * Catch Marketplace_SlotIsFree and continue the cancelled process * Add log message when the slot if free during failed state * Reduce log level to debug for slot free error * Separate slot mock errors * Initialize variable in setyp * Improve tests * Remove non-meaningful checks and rename test * Remove the Option in the error setters * Return collateral when the state is cancelled only if the slot is filled by the host * Do not propagate AsyncLockError * Wrap contract error into specific error type * Remove debug message * Catch only SlotStateMismatchError in cancelled * Fix error * Remove returnBytesWas * Use MarketError after raises pragma were defined * Fix typo * Fix lint --- codex/contracts/market.nim | 39 ++++++------ codex/market.nim | 4 +- codex/sales/states/cancelled.nim | 32 +++++++--- codex/sales/states/failed.nim | 1 + tests/codex/helpers/mockmarket.nim | 27 ++++++++- tests/codex/sales/states/testcancelled.nim | 69 ++++++++++++++++++++-- 6 files changed, 139 insertions(+), 33 deletions(-) diff --git a/codex/contracts/market.nim b/codex/contracts/market.nim index 74694285..8b235876 100644 --- a/codex/contracts/market.nim +++ b/codex/contracts/market.nim @@ -221,7 +221,7 @@ method requestExpiresAt*( method getHost( market: OnChainMarket, requestId: RequestId, slotIndex: uint64 -): Future[?Address] {.async.} = +): Future[?Address] {.async: (raises: [CancelledError, MarketError]).} = convertEthersError("Failed to get slot's host"): let slotId = slotId(requestId, slotIndex) let address = await market.contract.getHost(slotId) @@ -232,7 +232,7 @@ method getHost( method currentCollateral*( market: OnChainMarket, slotId: SlotId -): Future[UInt256] {.async.} = +): Future[UInt256] {.async: (raises: [MarketError, CancelledError]).} = convertEthersError("Failed to get slot's current collateral"): return await market.contract.currentCollateral(slotId) @@ -270,22 +270,27 @@ method freeSlot*( market: OnChainMarket, slotId: SlotId ) {.async: (raises: [CancelledError, MarketError]).} = convertEthersError("Failed to free slot"): - var freeSlot: Future[Confirmable] - if rewardRecipient =? market.rewardRecipient: - # If --reward-recipient specified, use it as the reward recipient, and use - # the SP's address as the collateral recipient - let collateralRecipient = await market.getSigner() - freeSlot = market.contract.freeSlot( - slotId, - rewardRecipient, # --reward-recipient - collateralRecipient, - ) # SP's address - else: - # Otherwise, use the SP's address as both the reward and collateral - # recipient (the contract will use msg.sender for both) - freeSlot = market.contract.freeSlot(slotId) + try: + var freeSlot: Future[Confirmable] + if rewardRecipient =? market.rewardRecipient: + # If --reward-recipient specified, use it as the reward recipient, and use + # the SP's address as the collateral recipient + let collateralRecipient = await market.getSigner() + freeSlot = market.contract.freeSlot( + slotId, + rewardRecipient, # --reward-recipient + collateralRecipient, + ) # SP's address + else: + # Otherwise, use the SP's address as both the reward and collateral + # recipient (the contract will use msg.sender for both) + freeSlot = market.contract.freeSlot(slotId) - discard await freeSlot.confirm(1) + discard await freeSlot.confirm(1) + except Marketplace_SlotIsFree as parent: + raise newException( + SlotStateMismatchError, "Failed to free slot, slot is already free", parent + ) method withdrawFunds( market: OnChainMarket, requestId: RequestId diff --git a/codex/market.nim b/codex/market.nim index 71cad9a9..31c0687f 100644 --- a/codex/market.nim +++ b/codex/market.nim @@ -148,12 +148,12 @@ method requestExpiresAt*( method getHost*( market: Market, requestId: RequestId, slotIndex: uint64 -): Future[?Address] {.base, async.} = +): Future[?Address] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") method currentCollateral*( market: Market, slotId: SlotId -): Future[UInt256] {.base, async.} = +): Future[UInt256] {.base, async: (raises: [MarketError, CancelledError]).} = raiseAssert("not implemented") method getActiveSlot*(market: Market, slotId: SlotId): Future[?Slot] {.base, async.} = diff --git a/codex/sales/states/cancelled.nim b/codex/sales/states/cancelled.nim index 2c240e15..f3c755a3 100644 --- a/codex/sales/states/cancelled.nim +++ b/codex/sales/states/cancelled.nim @@ -12,6 +12,14 @@ type SaleCancelled* = ref object of SaleState method `$`*(state: SaleCancelled): string = "SaleCancelled" +proc slotIsFilledByMe( + market: Market, requestId: RequestId, slotIndex: uint64 +): Future[bool] {.async: (raises: [CancelledError, MarketError]).} = + let host = await market.getHost(requestId, slotIndex) + let me = await market.getSigner() + + return host == me.some + method run*( state: SaleCancelled, machine: Machine ): Future[?State] {.async: (raises: []).} = @@ -23,19 +31,27 @@ method run*( raiseAssert "no sale request" try: - let slot = Slot(request: request, slotIndex: data.slotIndex) - debug "Collecting collateral and partial payout", - requestId = data.requestId, slotIndex = data.slotIndex - let currentCollateral = await market.currentCollateral(slot.id) - await market.freeSlot(slot.id) + var returnedCollateral = UInt256.none + + if await slotIsFilledByMe(market, data.requestId, data.slotIndex): + debug "Collecting collateral and partial payout", + requestId = data.requestId, slotIndex = data.slotIndex + + let slot = Slot(request: request, slotIndex: data.slotIndex) + let currentCollateral = await market.currentCollateral(slot.id) + + try: + await market.freeSlot(slot.id) + except SlotStateMismatchError as e: + warn "Failed to free slot because slot is already free", error = e.msg + + returnedCollateral = currentCollateral.some if onClear =? agent.context.onClear and request =? data.request: onClear(request, data.slotIndex) if onCleanUp =? agent.onCleanUp: - await onCleanUp( - reprocessSlot = false, returnedCollateral = some currentCollateral - ) + await onCleanUp(reprocessSlot = false, returnedCollateral = returnedCollateral) warn "Sale cancelled due to timeout", requestId = data.requestId, slotIndex = data.slotIndex diff --git a/codex/sales/states/failed.nim b/codex/sales/states/failed.nim index b0d6a7cd..f1490d20 100644 --- a/codex/sales/states/failed.nim +++ b/codex/sales/states/failed.nim @@ -28,6 +28,7 @@ method run*( let slot = Slot(request: request, slotIndex: data.slotIndex) debug "Removing slot from mySlots", requestId = data.requestId, slotIndex = data.slotIndex + await market.freeSlot(slot.id) let error = newException(SaleFailedError, "Sale failed") diff --git a/tests/codex/helpers/mockmarket.nim b/tests/codex/helpers/mockmarket.nim index 03e76762..55abeb14 100644 --- a/tests/codex/helpers/mockmarket.nim +++ b/tests/codex/helpers/mockmarket.nim @@ -8,6 +8,7 @@ import pkg/codex/market import pkg/codex/contracts/requests import pkg/codex/contracts/proofs import pkg/codex/contracts/config +import pkg/questionable/results from pkg/ethers import BlockTag import codex/clock @@ -48,6 +49,8 @@ type canReserveSlot*: bool errorOnReserveSlot*: ?(ref MarketError) errorOnFillSlot*: ?(ref MarketError) + errorOnFreeSlot*: ?(ref MarketError) + errorOnGetHost*: ?(ref MarketError) clock: ?Clock Fulfillment* = object @@ -232,7 +235,10 @@ method requestExpiresAt*( method getHost*( market: MockMarket, requestId: RequestId, slotIndex: uint64 -): Future[?Address] {.async.} = +): Future[?Address] {.async: (raises: [CancelledError, MarketError]).} = + if error =? market.errorOnGetHost: + raise error + for slot in market.filled: if slot.requestId == requestId and slot.slotIndex == slotIndex: return some slot.host @@ -240,7 +246,7 @@ method getHost*( method currentCollateral*( market: MockMarket, slotId: SlotId -): Future[UInt256] {.async.} = +): Future[UInt256] {.async: (raises: [MarketError, CancelledError]).} = for slot in market.filled: if slotId == slotId(slot.requestId, slot.slotIndex): return slot.collateral @@ -321,6 +327,9 @@ method fillSlot*( method freeSlot*( market: MockMarket, slotId: SlotId ) {.async: (raises: [CancelledError, MarketError]).} = + if error =? market.errorOnFreeSlot: + raise error + market.freed.add(slotId) for s in market.filled: if slotId(s.requestId, s.slotIndex) == slotId: @@ -411,6 +420,20 @@ func setErrorOnFillSlot*(market: MockMarket, error: ref MarketError) = else: some error +func setErrorOnFreeSlot*(market: MockMarket, error: ref MarketError) = + market.errorOnFreeSlot = + if error.isNil: + none (ref MarketError) + else: + some error + +func setErrorOnGetHost*(market: MockMarket, error: ref MarketError) = + market.errorOnGetHost = + if error.isNil: + none (ref MarketError) + else: + some error + method subscribeRequests*( market: MockMarket, callback: OnRequest ): Future[Subscription] {.async.} = diff --git a/tests/codex/sales/states/testcancelled.nim b/tests/codex/sales/states/testcancelled.nim index ab450200..6eaf1f5a 100644 --- a/tests/codex/sales/states/testcancelled.nim +++ b/tests/codex/sales/states/testcancelled.nim @@ -2,9 +2,11 @@ import pkg/questionable import pkg/chronos import pkg/codex/contracts/requests import pkg/codex/sales/states/cancelled +import pkg/codex/sales/states/errored import pkg/codex/sales/salesagent import pkg/codex/sales/salescontext import pkg/codex/market +from pkg/codex/utils/asyncstatemachine import State import ../../../asynctest import ../../examples @@ -22,8 +24,8 @@ asyncchecksuite "sales state 'cancelled'": var market: MockMarket var state: SaleCancelled var agent: SalesAgent - var reprocessSlotWas = bool.none - var returnedCollateralValue = UInt256.none + var reprocessSlotWas: ?bool + var returnedCollateralValue: ?UInt256 setup: market = MockMarket.new() @@ -37,8 +39,43 @@ asyncchecksuite "sales state 'cancelled'": agent = newSalesAgent(context, request.id, slotIndex, request.some) agent.onCleanUp = onCleanUp state = SaleCancelled.new() + reprocessSlotWas = bool.none + returnedCollateralValue = UInt256.none + teardown: + reprocessSlotWas = bool.none + returnedCollateralValue = UInt256.none test "calls onCleanUp with reprocessSlot = true, and returnedCollateral = currentCollateral": + market.fillSlot( + requestId = request.id, + slotIndex = slotIndex, + proof = Groth16Proof.default, + host = await market.getSigner(), + collateral = currentCollateral, + ) + discard await state.run(agent) + check eventually reprocessSlotWas == some false + check eventually returnedCollateralValue == some currentCollateral + + test "completes the cancelled state when free slot error is raised and the collateral is returned when a host is hosting a slot": + market.fillSlot( + requestId = request.id, + slotIndex = slotIndex, + proof = Groth16Proof.default, + host = await market.getSigner(), + collateral = currentCollateral, + ) + + let error = + newException(SlotStateMismatchError, "Failed to free slot, slot is already free") + market.setErrorOnFreeSlot(error) + + let next = await state.run(agent) + check next == none State + check eventually reprocessSlotWas == some false + check eventually returnedCollateralValue == some currentCollateral + + test "completes the cancelled state when free slot error is raised and the collateral is not returned when a host is not hosting a slot": market.fillSlot( requestId = request.id, slotIndex = slotIndex, @@ -46,6 +83,30 @@ asyncchecksuite "sales state 'cancelled'": host = Address.example, collateral = currentCollateral, ) - discard await state.run(agent) + + let error = + newException(SlotStateMismatchError, "Failed to free slot, slot is already free") + market.setErrorOnFreeSlot(error) + + let next = await state.run(agent) + check next == none State check eventually reprocessSlotWas == some false - check eventually returnedCollateralValue == some currentCollateral + check eventually returnedCollateralValue == UInt256.none + + test "calls onCleanUp and returns the collateral when an error is raised": + market.fillSlot( + requestId = request.id, + slotIndex = slotIndex, + proof = Groth16Proof.default, + host = Address.example, + collateral = currentCollateral, + ) + + let error = newException(MarketError, "") + market.setErrorOnGetHost(error) + + let next = !(await state.run(agent)) + + check next of SaleErrored + let errored = SaleErrored(next) + check errored.error == error