From 6708202a5f922086566bb0a8d5dc5d7994907a6d Mon Sep 17 00:00:00 2001 From: Ben Bierens <39762930+benbierens@users.noreply.github.com> Date: Wed, 19 Jul 2023 16:06:59 +0200 Subject: [PATCH 1/9] Makes rootHash private to Manifest object. (#488) * Makes rootHash private to Manifest object. * Locks down all fields of Manifest. * Review comments by Mark --- codex/manifest/coders.nim | 48 +++++++++------- codex/manifest/manifest.nim | 104 ++++++++++++++++++++++++++++++++++- codex/manifest/types.nim | 18 ------ codex/node.nim | 6 +- tests/codex/testmanifest.nim | 4 +- 5 files changed, 136 insertions(+), 44 deletions(-) diff --git a/codex/manifest/coders.nim b/codex/manifest/coders.nim index e0515ba0..0a3c894d 100644 --- a/codex/manifest/coders.nim +++ b/codex/manifest/coders.nim @@ -26,7 +26,7 @@ import ../errors import ../blocktype import ./types -func encode*(_: DagPBCoder, manifest: Manifest): ?!seq[byte] = +proc encode*(_: DagPBCoder, manifest: Manifest): ?!seq[byte] = ## Encode the manifest into a ``ManifestCodec`` ## multicodec container (Dag-pb) for now ## @@ -60,7 +60,7 @@ func encode*(_: DagPBCoder, manifest: Manifest): ?!seq[byte] = # ``` # - let cid = !manifest.rootHash + let cid = ? manifest.cid var header = initProtoBuffer() header.write(1, cid.data.buffer) header.write(2, manifest.blockSize.uint32) @@ -145,22 +145,31 @@ func decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest = if blocksLen.int != blocks.len: return failure("Total blocks and length of blocks in header don't match!") - var - self = Manifest( - rootHash: rootHashCid.some, - originalBytes: originalBytes.NBytes, - blockSize: blockSize.NBytes, - blocks: blocks, - hcodec: (? rootHashCid.mhash.mapFailure).mcodec, - codec: rootHashCid.mcodec, - version: rootHashCid.cidver, - protected: pbErasureInfo.buffer.len > 0) - - if self.protected: - self.ecK = ecK.int - self.ecM = ecM.int - self.originalCid = ? Cid.init(originalCid).mapFailure - self.originalLen = originalLen.int + let + self = if pbErasureInfo.buffer.len > 0: + Manifest.new( + rootHash = rootHashCid, + originalBytes = originalBytes.NBytes, + blockSize = blockSize.NBytes, + blocks = blocks, + version = rootHashCid.cidver, + hcodec = (? rootHashCid.mhash.mapFailure).mcodec, + codec = rootHashCid.mcodec, + ecK = ecK.int, + ecM = ecM.int, + originalCid = ? Cid.init(originalCid).mapFailure, + originalLen = originalLen.int + ) + else: + Manifest.new( + rootHash = rootHashCid, + originalBytes = originalBytes.NBytes, + blockSize = blockSize.NBytes, + blocks = blocks, + version = rootHashCid.cidver, + hcodec = (? rootHashCid.mhash.mapFailure).mcodec, + codec = rootHashCid.mcodec + ) ? self.verify() self.success @@ -172,9 +181,6 @@ proc encode*( ## Encode a manifest using `encoder` ## - if self.rootHash.isNone: - ? self.makeRoot() - encoder.encode(self) func decode*( diff --git a/codex/manifest/manifest.nim b/codex/manifest/manifest.nim index d29b8fea..d231f69c 100644 --- a/codex/manifest/manifest.nim +++ b/codex/manifest/manifest.nim @@ -27,6 +27,58 @@ import ./types export types +type + Manifest* = ref object of RootObj + rootHash: ?Cid # Root (tree) hash of the contained data set + originalBytes*: NBytes # Exact size of the original (uploaded) file + blockSize: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed) + blocks: seq[Cid] # Block Cid + version: CidVersion # Cid version + hcodec: MultiCodec # Multihash codec + codec: MultiCodec # Data set codec + case protected: bool # Protected datasets have erasure coded info + of true: + ecK: int # Number of blocks to encode + ecM: int # Number of resulting parity blocks + originalCid: Cid # The original Cid of the dataset being erasure coded + originalLen: int # The length of the original manifest + else: + discard + +############################################################ +# Accessors +############################################################ + +proc blockSize*(self: Manifest): NBytes = + self.blockSize + +proc blocks*(self: Manifest): seq[Cid] = + self.blocks + +proc version*(self: Manifest): CidVersion = + self.version + +proc hcodec*(self: Manifest): MultiCodec = + self.hcodec + +proc codec*(self: Manifest): MultiCodec = + self.codec + +proc protected*(self: Manifest): bool = + self.protected + +proc ecK*(self: Manifest): int = + self.ecK + +proc ecM*(self: Manifest): int = + self.ecM + +proc originalCid*(self: Manifest): Cid = + self.originalCid + +proc originalLen*(self: Manifest): int = + self.originalLen + ############################################################ # Operations on block list ############################################################ @@ -231,5 +283,55 @@ proc new*( decoder = ManifestContainers[$DagPBCodec] ): ?!Manifest = ## Create a manifest instance from given data - ## + ## Manifest.decode(data, decoder) + +proc new*( + T: type Manifest, + rootHash: Cid, + originalBytes: NBytes, + blockSize: NBytes, + blocks: seq[Cid], + version: CidVersion, + hcodec: MultiCodec, + codec: MultiCodec, + ecK: int, + ecM: int, + originalCid: Cid, + originalLen: int +): Manifest = + Manifest( + rootHash: rootHash.some, + originalBytes: originalBytes, + blockSize: blockSize, + blocks: blocks, + version: version, + hcodec: hcodec, + codec: codec, + protected: true, + ecK: ecK, + ecM: ecM, + originalCid: originalCid, + originalLen: originalLen + ) + +proc new*( + T: type Manifest, + rootHash: Cid, + originalBytes: NBytes, + blockSize: NBytes, + blocks: seq[Cid], + version: CidVersion, + hcodec: MultiCodec, + codec: MultiCodec +): Manifest = + Manifest( + rootHash: rootHash.some, + originalBytes: originalBytes, + blockSize: blockSize, + blocks: blocks, + version: version, + hcodec: hcodec, + codec: codec, + protected: false, + ) diff --git a/codex/manifest/types.nim b/codex/manifest/types.nim index f37c995f..cf940601 100644 --- a/codex/manifest/types.nim +++ b/codex/manifest/types.nim @@ -28,21 +28,3 @@ const ManifestContainers* = { $DagPBCodec: DagPBCoder() }.toTable - -type - Manifest* = ref object of RootObj - rootHash*: ?Cid # Root (tree) hash of the contained data set - originalBytes*: NBytes # Exact size of the original (uploaded) file - blockSize*: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed) - blocks*: seq[Cid] # Block Cid - version*: CidVersion # Cid version - hcodec*: MultiCodec # Multihash codec - codec*: MultiCodec # Data set codec - case protected*: bool # Protected datasets have erasure coded info - of true: - ecK*: int # Number of blocks to encode - ecM*: int # Number of resulting parity blocks - originalCid*: Cid # The original Cid of the dataset being erasure coded - originalLen*: int # The length of the original manifest - else: - discard diff --git a/codex/node.nim b/codex/node.nim index f580c33c..ebc3c540 100644 --- a/codex/node.nim +++ b/codex/node.nim @@ -63,7 +63,7 @@ proc findPeer*( peerId: PeerId ): Future[?PeerRecord] {.async.} = ## Find peer using the discovery service from the given CodexNode - ## + ## return await node.discovery.findPeer(peerId) proc connect*( @@ -221,7 +221,7 @@ proc store*( await stream.close() # Generate manifest - blockManifest.originalBytes = NBytes chunker.offset # store the exact file size + blockManifest.originalBytes = NBytes(chunker.offset) # store the exact file size without data =? blockManifest.encode(): return failure( newException(CodexError, "Could not generate dataset manifest!")) @@ -332,7 +332,7 @@ proc new*( contracts = Contracts.default ): CodexNodeRef = ## Create new instance of a Codex node, call `start` to run it - ## + ## CodexNodeRef( switch: switch, blockStore: store, diff --git a/tests/codex/testmanifest.nim b/tests/codex/testmanifest.nim index 07147008..5c43897e 100644 --- a/tests/codex/testmanifest.nim +++ b/tests/codex/testmanifest.nim @@ -62,6 +62,8 @@ checksuite "Manifest": Block.new(("Block " & $it).toBytes).tryGet().cid ) manifest = Manifest.new(blocks).tryGet() + + var protected = Manifest.new(manifest, 2, 2).tryGet() check: @@ -72,7 +74,7 @@ checksuite "Manifest": # fill up with empty Cid's for i in protected.rounded.. Date: Wed, 19 Jul 2023 17:35:16 +0200 Subject: [PATCH 2/9] Fix a typo in docs/TWOCLIENTTEST.md (#470) --- docs/TWOCLIENTTEST.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/TWOCLIENTTEST.md b/docs/TWOCLIENTTEST.md index 1f888fc6..be8fc040 100644 --- a/docs/TWOCLIENTTEST.md +++ b/docs/TWOCLIENTTEST.md @@ -78,7 +78,7 @@ This GET request will return the node's debug information. The response JSON sho Replace `` in the next command with the string value for `spr`, returned by the first node's `debug/info` response. Open a new terminal and run: -- Mac/Unx: `"build/codex" --data-dir="$(pwd)\Data2" --listen-addrs=/ip4/127.0.0.1/tcp/8071 --api-port=8081 --disc-port=8091 --bootstrap-node=` +- Mac/Unx: `"build/codex" --data-dir="$(pwd)/Data2" --listen-addrs=/ip4/127.0.0.1/tcp/8071 --api-port=8081 --disc-port=8091 --bootstrap-node=` - Windows: `"build/codex.exe" --data-dir="Data2" --listen-addrs=/ip4/127.0.0.1/tcp/8071 --api-port=8081 --disc-port=8091 --bootstrap-node=` Notice we're using a new data-dir, and we've increased each port number by one. This is needed so that the new node won't try to open ports already in use by the first node. From 8bebc9042890cc0e28213a896981970ded9cac5d Mon Sep 17 00:00:00 2001 From: Jaremy Creechley Date: Wed, 19 Jul 2023 16:00:34 -0700 Subject: [PATCH 3/9] split windows CI tests into parts (#485) * setup to use env variable for parallel tests * use explicit targets * use target in includes * test windows split * try simpler logic * only use include in ci.yml * add cpu * fix name * re-add unit tests! * restore coverage * not sure why env broke there * startup node for part 1 & 2 tests * fixup part 1 & 2 tests * split windows into 3! --- .github/workflows/ci.yml | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 65080675..e8dbbe46 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,23 +12,38 @@ jobs: build: strategy: matrix: - os: [linux, macos, windows] include: - os: linux + cpu: amd64 builder: ubuntu-latest shell: bash --noprofile --norc -e -o pipefail + tests: all - os: macos + cpu: amd64 builder: macos-latest shell: bash --noprofile --norc -e -o pipefail + tests: all - os: windows + cpu: amd64 builder: windows-latest shell: msys2 + tests: unittest + - os: windows + cpu: amd64 + builder: windows-latest + shell: msys2 + tests: contract + - os: windows + cpu: amd64 + builder: windows-latest + shell: msys2 + tests: integration defaults: run: shell: ${{ matrix.shell }} {0} - name: '${{ matrix.os }}' + name: '${{ matrix.os }}-${{ matrix.cpu }}-tests-${{ matrix.tests }}' runs-on: ${{ matrix.builder }} timeout-minutes: 80 steps: @@ -44,7 +59,9 @@ jobs: shell: ${{ matrix.shell }} nim_version: ${{ env.nim_version }} + ## Part 1 Tests ## - name: Unit tests + if: matrix.tests == 'unittest' || matrix.tests == 'all' run: make -j${ncpu} test # workaround for https://github.com/NomicFoundation/hardhat/issues/3877 @@ -54,6 +71,7 @@ jobs: node-version: 18.15 - name: Start Ethereum node with Codex contracts + if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'all' working-directory: vendor/codex-contracts-eth env: MSYS2_PATH_TYPE: inherit @@ -61,10 +79,14 @@ jobs: npm install npm start & + ## Part 2 Tests ## - name: Contract tests + if: matrix.tests == 'contract' || matrix.tests == 'all' run: make -j${ncpu} testContracts + ## Part 3 Tests ## - name: Integration tests + if: matrix.tests == 'integration' || matrix.tests == 'all' run: make -j${ncpu} testIntegration coverage: From 14c5270e836011f8168d2da6a389080d0324645b Mon Sep 17 00:00:00 2001 From: Ben Bierens <39762930+benbierens@users.noreply.github.com> Date: Thu, 20 Jul 2023 09:56:28 +0200 Subject: [PATCH 4/9] Add metrics (#478) * Adds metrics to block exchange. * Adds metrics to purchasing * Adds metrics to upload and download API * Adds metrics to the repo store * Fixes exception in repostore start. * Merge managed to mess up indentation. --- codex/blockexchange/engine/engine.nim | 23 +++++++++- codex/blockexchange/engine/pendingblocks.nim | 8 ++++ codex/namespaces.nim | 1 + codex/purchasing/states/cancelled.nim | 4 ++ codex/purchasing/states/error.nim | 4 ++ codex/purchasing/states/failed.nim | 4 ++ codex/purchasing/states/finished.nim | 4 ++ codex/purchasing/states/pending.nim | 4 ++ codex/purchasing/states/started.nim | 4 ++ codex/purchasing/states/submitted.nim | 4 ++ codex/purchasing/states/unknown.nim | 4 ++ codex/rest/api.nim | 6 +++ codex/stores/keyutils.nim | 1 + codex/stores/repostore.nim | 48 ++++++++++++++++++-- 14 files changed, 115 insertions(+), 4 deletions(-) diff --git a/codex/blockexchange/engine/engine.nim b/codex/blockexchange/engine/engine.nim index c517c72a..be4f3da3 100644 --- a/codex/blockexchange/engine/engine.nim +++ b/codex/blockexchange/engine/engine.nim @@ -15,6 +15,7 @@ import std/algorithm import pkg/chronos import pkg/chronicles import pkg/libp2p +import pkg/metrics import pkg/stint import ../../stores/blockstore @@ -36,6 +37,13 @@ export peers, pendingblocks, payments, discovery logScope: topics = "codex blockexcengine" +declareCounter(codexBlockExchangeWantHaveListsSent, "codex blockexchange wantHave lists sent") +declareCounter(codexBlockExchangeWantHaveListsReceived, "codex blockexchange wantHave lists received") +declareCounter(codexBlockExchangeWantBlockListsSent, "codex blockexchange wantBlock lists sent") +declareCounter(codexBlockExchangeWantBlockListsReceived, "codex blockexchange wantBlock lists received") +declareCounter(codexBlockExchangeBlocksSent, "codex blockexchange blocks sent") +declareCounter(codexBlockExchangeBlocksReceived, "codex blockexchange blocks received") + const DefaultMaxPeersPerRequest* = 10 DefaultTaskQueueSize = 100 @@ -190,6 +198,8 @@ proc requestBlock*( await b.sendWantBlock(cid, blockPeer) + codexBlockExchangeWantBlockListsSent.inc() + if (peers.len - 1) == 0: trace "No peers to send want list to", cid b.discovery.queueFindBlocksReq(@[cid]) @@ -197,6 +207,8 @@ proc requestBlock*( await b.sendWantHave(cid, blockPeer, toSeq(b.peers)) + codexBlockExchangeWantHaveListsSent.inc() + return await blk proc blockPresenceHandler*( @@ -297,6 +309,8 @@ proc blocksHandler*( trace "Unable to store block", cid = blk.cid await b.resolveBlocks(blocks) + codexBlockExchangeBlocksReceived.inc(blocks.len.int64) + let peerCtx = b.peers.get(peer) @@ -336,6 +350,9 @@ proc wantListHandler*( b.pricing.get(Pricing(price: 0.u256)) .price.toBytesBE) + if e.wantType == WantType.WantHave: + codexBlockExchangeWantHaveListsReceived.inc() + if not have and e.sendDontHave: trace "Adding dont have entry to presence response", cid = e.cid presence.add( @@ -353,6 +370,7 @@ proc wantListHandler*( elif e.wantType == WantType.WantBlock: trace "Added entry to peer's want blocks list", cid = e.cid peerCtx.peerWants.add(e) + codexBlockExchangeWantBlockListsReceived.inc() else: # peer doesn't want this block anymore if e.cancel: @@ -467,6 +485,9 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} = task.id, blocks) + codexBlockExchangeBlocksSent.inc(blocks.len.int64) + + trace "About to remove entries from peerWants", blocks = blocks.len, items = task.peerWants.len # Remove successfully sent blocks task.peerWants.keepIf( proc(e: Entry): bool = @@ -500,7 +521,7 @@ proc new*( peersPerRequest = DefaultMaxPeersPerRequest ): BlockExcEngine = ## Create new block exchange engine instance - ## + ## let engine = BlockExcEngine( diff --git a/codex/blockexchange/engine/pendingblocks.nim b/codex/blockexchange/engine/pendingblocks.nim index 171311f4..e897a66f 100644 --- a/codex/blockexchange/engine/pendingblocks.nim +++ b/codex/blockexchange/engine/pendingblocks.nim @@ -16,12 +16,15 @@ push: {.upraises: [].} import pkg/chronicles import pkg/chronos import pkg/libp2p +import pkg/metrics import ../../blocktype logScope: topics = "codex pendingblocks" +declareGauge(codexBlockExchangePendingBlockRequests, "codex blockexchange pending block requests") + const DefaultBlockTimeout* = 10.minutes @@ -33,6 +36,9 @@ type PendingBlocksManager* = ref object of RootObj blocks*: Table[Cid, BlockReq] # pending Block requests +proc updatePendingBlockGauge(p: PendingBlocksManager) = + codexBlockExchangePendingBlockRequests.set(p.blocks.len.int64) + proc getWantHandle*( p: PendingBlocksManager, cid: Cid, @@ -50,6 +56,7 @@ proc getWantHandle*( trace "Adding pending future for block", cid, inFlight = p.blocks[cid].inFlight + p.updatePendingBlockGauge() return await p.blocks[cid].handle.wait(timeout) except CancelledError as exc: trace "Blocks cancelled", exc = exc.msg, cid @@ -60,6 +67,7 @@ proc getWantHandle*( raise exc finally: p.blocks.del(cid) + p.updatePendingBlockGauge() proc resolve*(p: PendingBlocksManager, blocks: seq[Block]) = diff --git a/codex/namespaces.nim b/codex/namespaces.nim index 93d7902c..b7988cdc 100644 --- a/codex/namespaces.nim +++ b/codex/namespaces.nim @@ -11,6 +11,7 @@ const # Namespaces CodexMetaNamespace* = "meta" # meta info stored here CodexRepoNamespace* = "repo" # repository namespace, blocks and manifests are subkeys + CodexBlockTotalNamespace* = CodexMetaNamespace & "/total" # number of blocks in the repo CodexBlocksNamespace* = CodexRepoNamespace & "/blocks" # blocks namespace CodexManifestNamespace* = CodexRepoNamespace & "/manifests" # manifest namespace CodexBlocksTtlNamespace* = # Cid TTL diff --git a/codex/purchasing/states/cancelled.nim b/codex/purchasing/states/cancelled.nim index a0d8315b..cb0be05d 100644 --- a/codex/purchasing/states/cancelled.nim +++ b/codex/purchasing/states/cancelled.nim @@ -1,13 +1,17 @@ +import pkg/metrics import ../statemachine import ./errorhandling import ./error +declareCounter(codexPurchasesCancelled, "codex purchases cancelled") + type PurchaseCancelled* = ref object of ErrorHandlingState method `$`*(state: PurchaseCancelled): string = "cancelled" method run*(state: PurchaseCancelled, machine: Machine): Future[?State] {.async.} = + codexPurchasesCancelled.inc() let purchase = Purchase(machine) await purchase.market.withdrawFunds(purchase.requestId) let error = newException(Timeout, "Purchase cancelled due to timeout") diff --git a/codex/purchasing/states/error.nim b/codex/purchasing/states/error.nim index df1c8d5c..c1fbbc6a 100644 --- a/codex/purchasing/states/error.nim +++ b/codex/purchasing/states/error.nim @@ -1,5 +1,8 @@ +import pkg/metrics import ../statemachine +declareCounter(codexPurchasesError, "codex purchases error") + type PurchaseErrored* = ref object of PurchaseState error*: ref CatchableError @@ -7,5 +10,6 @@ method `$`*(state: PurchaseErrored): string = "errored" method run*(state: PurchaseErrored, machine: Machine): Future[?State] {.async.} = + codexPurchasesError.inc() let purchase = Purchase(machine) purchase.future.fail(state.error) diff --git a/codex/purchasing/states/failed.nim b/codex/purchasing/states/failed.nim index 3fbe36f7..8a814147 100644 --- a/codex/purchasing/states/failed.nim +++ b/codex/purchasing/states/failed.nim @@ -1,6 +1,9 @@ +import pkg/metrics import ../statemachine import ./error +declareCounter(codexPurchasesFailed, "codex purchases failed") + type PurchaseFailed* = ref object of PurchaseState @@ -8,5 +11,6 @@ method `$`*(state: PurchaseFailed): string = "failed" method run*(state: PurchaseFailed, machine: Machine): Future[?State] {.async.} = + codexPurchasesFailed.inc() let error = newException(PurchaseError, "Purchase failed") return some State(PurchaseErrored(error: error)) diff --git a/codex/purchasing/states/finished.nim b/codex/purchasing/states/finished.nim index 93e8b4f0..6e28bec2 100644 --- a/codex/purchasing/states/finished.nim +++ b/codex/purchasing/states/finished.nim @@ -1,10 +1,14 @@ +import pkg/metrics import ../statemachine +declareCounter(codexPurchasesFinished, "codex purchases finished") + type PurchaseFinished* = ref object of PurchaseState method `$`*(state: PurchaseFinished): string = "finished" method run*(state: PurchaseFinished, machine: Machine): Future[?State] {.async.} = + codexPurchasesFinished.inc() let purchase = Purchase(machine) purchase.future.complete() diff --git a/codex/purchasing/states/pending.nim b/codex/purchasing/states/pending.nim index 64a7fdd5..14d1240b 100644 --- a/codex/purchasing/states/pending.nim +++ b/codex/purchasing/states/pending.nim @@ -1,13 +1,17 @@ +import pkg/metrics import ../statemachine import ./errorhandling import ./submitted +declareCounter(codexPurchasesPending, "codex purchases pending") + type PurchasePending* = ref object of ErrorHandlingState method `$`*(state: PurchasePending): string = "pending" method run*(state: PurchasePending, machine: Machine): Future[?State] {.async.} = + codexPurchasesPending.inc() let purchase = Purchase(machine) let request = !purchase.request await purchase.market.requestStorage(request) diff --git a/codex/purchasing/states/started.nim b/codex/purchasing/states/started.nim index 27d28ddf..26cc871d 100644 --- a/codex/purchasing/states/started.nim +++ b/codex/purchasing/states/started.nim @@ -1,14 +1,18 @@ +import pkg/metrics import ../statemachine import ./errorhandling import ./finished import ./failed +declareCounter(codexPurchasesStarted, "codex purchases started") + type PurchaseStarted* = ref object of ErrorHandlingState method `$`*(state: PurchaseStarted): string = "started" method run*(state: PurchaseStarted, machine: Machine): Future[?State] {.async.} = + codexPurchasesStarted.inc() let purchase = Purchase(machine) let clock = purchase.clock diff --git a/codex/purchasing/states/submitted.nim b/codex/purchasing/states/submitted.nim index 5e6dd892..4505ae91 100644 --- a/codex/purchasing/states/submitted.nim +++ b/codex/purchasing/states/submitted.nim @@ -1,14 +1,18 @@ +import pkg/metrics import ../statemachine import ./errorhandling import ./started import ./cancelled +declareCounter(codexPurchasesSubmitted, "codex purchases submitted") + type PurchaseSubmitted* = ref object of ErrorHandlingState method `$`*(state: PurchaseSubmitted): string = "submitted" method run*(state: PurchaseSubmitted, machine: Machine): Future[?State] {.async.} = + codexPurchasesSubmitted.inc() let purchase = Purchase(machine) let request = !purchase.request let market = purchase.market diff --git a/codex/purchasing/states/unknown.nim b/codex/purchasing/states/unknown.nim index 38628334..0431396e 100644 --- a/codex/purchasing/states/unknown.nim +++ b/codex/purchasing/states/unknown.nim @@ -1,3 +1,4 @@ +import pkg/metrics import ../statemachine import ./errorhandling import ./submitted @@ -6,12 +7,15 @@ import ./cancelled import ./finished import ./failed +declareCounter(codexPurchasesUnknown, "codex purchases unknown") + type PurchaseUnknown* = ref object of ErrorHandlingState method `$`*(state: PurchaseUnknown): string = "unknown" method run*(state: PurchaseUnknown, machine: Machine): Future[?State] {.async.} = + codexPurchasesUnknown.inc() let purchase = Purchase(machine) if (request =? await purchase.market.getRequest(purchase.requestId)) and (requestState =? await purchase.market.requestState(purchase.requestId)): diff --git a/codex/rest/api.nim b/codex/rest/api.nim index 77d36f63..651d370d 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -20,6 +20,7 @@ import pkg/chronicles import pkg/chronos import pkg/presto import pkg/libp2p +import pkg/metrics import pkg/stew/base10 import pkg/stew/byteutils import pkg/confutils @@ -42,6 +43,9 @@ import ./json logScope: topics = "codex restapi" +declareCounter(codexApiUploads, "codex API uploads") +declareCounter(codexApiDownloads, "codex API downloads") + proc validate( pattern: string, value: string): int @@ -164,6 +168,7 @@ proc initRestApi*(node: CodexNodeRef, conf: CodexConf): RestRouter = trace "Sending chunk", size = buff.len await resp.sendChunk(addr buff[0], buff.len) await resp.finish() + codexApiDownloads.inc() except CatchableError as exc: trace "Excepting streaming blocks", exc = exc.msg return RestApiResponse.error(Http500) @@ -238,6 +243,7 @@ proc initRestApi*(node: CodexNodeRef, conf: CodexConf): RestRouter = trace "Error uploading file", exc = error.msg return RestApiResponse.error(Http500, error.msg) + codexApiUploads.inc() trace "Uploaded file", cid return RestApiResponse.response($cid) except CancelledError: diff --git a/codex/stores/keyutils.nim b/codex/stores/keyutils.nim index e4a6c98a..4b8507d0 100644 --- a/codex/stores/keyutils.nim +++ b/codex/stores/keyutils.nim @@ -20,6 +20,7 @@ const CodexMetaKey* = Key.init(CodexMetaNamespace).tryGet CodexRepoKey* = Key.init(CodexRepoNamespace).tryGet CodexBlocksKey* = Key.init(CodexBlocksNamespace).tryGet + CodexTotalBlocksKey* = Key.init(CodexBlockTotalNamespace).tryGet CodexManifestKey* = Key.init(CodexManifestNamespace).tryGet BlocksTtlKey* = Key.init(CodexBlocksTtlNamespace).tryGet QuotaKey* = Key.init(CodexQuotaNamespace).tryGet diff --git a/codex/stores/repostore.nim b/codex/stores/repostore.nim index 6e2731a4..c9c79f36 100644 --- a/codex/stores/repostore.nim +++ b/codex/stores/repostore.nim @@ -14,6 +14,7 @@ push: {.upraises: [].} import pkg/chronos import pkg/chronicles import pkg/libp2p +import pkg/metrics import pkg/questionable import pkg/questionable/results import pkg/datastore @@ -30,6 +31,10 @@ export blocktype, libp2p logScope: topics = "codex repostore" +declareGauge(codexRepostoreBlocks, "codex repostore blocks") +declareGauge(codexRepostoreBytesUsed, "codex repostore bytes used") +declareGauge(codexRepostoreBytesReserved, "codex repostore bytes reserved") + const DefaultBlockTtl* = 24.hours DefaultQuotaBytes* = 1'u shl 33'u # ~8GB @@ -43,6 +48,7 @@ type repoDs*: Datastore metaDs*: Datastore clock: Clock + totalBlocks*: uint # number of blocks in the store quotaMaxBytes*: uint # maximum available bytes quotaUsedBytes*: uint # bytes used by the repo quotaReservedBytes*: uint # bytes reserved by the repo @@ -61,6 +67,11 @@ iterator items*(q: BlockExpirationIter): Future[?BlockExpiration] = while not q.finished: yield q.next() +proc updateMetrics(self: RepoStore) = + codexRepostoreBlocks.set(self.totalBlocks.int64) + codexRepostoreBytesUsed.set(self.quotaUsedBytes.int64) + codexRepostoreBytesReserved.set(self.quotaReservedBytes.int64) + func totalUsed*(self: RepoStore): uint = (self.quotaUsedBytes + self.quotaReservedBytes) @@ -105,6 +116,14 @@ proc getBlockExpirationEntry( let value = self.getBlockExpirationTimestamp(ttl).toBytes return success((key, value)) +proc persistTotalBlocksCount(self: RepoStore): Future[?!void] {.async.} = + if err =? (await self.metaDs.put( + CodexTotalBlocksKey, + @(self.totalBlocks.uint64.toBytesBE))).errorOption: + trace "Error total blocks key!", err = err.msg + return failure(err) + return success() + method putBlock*( self: RepoStore, blk: Block, @@ -156,6 +175,12 @@ method putBlock*( return failure(err) self.quotaUsedBytes = used + inc self.totalBlocks + if isErr (await self.persistTotalBlocksCount()): + trace "Unable to update block total metadata" + return failure("Unable to update block total metadata") + + self.updateMetrics() return success() proc updateQuotaBytesUsed(self: RepoStore, blk: Block): Future[?!void] {.async.} = @@ -166,6 +191,7 @@ proc updateQuotaBytesUsed(self: RepoStore, blk: Block): Future[?!void] {.async.} trace "Error updating quota key!", err = err.msg return failure(err) self.quotaUsedBytes = used + self.updateMetrics() return success() proc removeBlockExpirationEntry(self: RepoStore, cid: Cid): Future[?!void] {.async.} = @@ -195,6 +221,12 @@ method delBlock*(self: RepoStore, cid: Cid): Future[?!void] {.async.} = trace "Deleted block", cid, totalUsed = self.totalUsed + dec self.totalBlocks + if isErr (await self.persistTotalBlocksCount()): + trace "Unable to update block total metadata" + return failure("Unable to update block total metadata") + + self.updateMetrics() return success() method hasBlock*(self: RepoStore, cid: Cid): Future[?!bool] {.async.} = @@ -251,7 +283,7 @@ method getBlockExpirations*( offset: int ): Future[?!BlockExpirationIter] {.async, base.} = ## Get block experiartions from the given RepoStore - ## + ## without query =? createBlockExpirationQuery(maxNumber, offset), err: trace "Unable to format block expirations query" return failure(err) @@ -346,6 +378,7 @@ proc release*(self: RepoStore, bytes: uint): Future[?!void] {.async.} = return failure(err) trace "Released bytes", bytes + self.updateMetrics() return success() proc start*(self: RepoStore): Future[void] {.async.} = @@ -358,6 +391,14 @@ proc start*(self: RepoStore): Future[void] {.async.} = trace "Starting repo" + without total =? await self.metaDs.get(CodexTotalBlocksKey), err: + if not (err of DatastoreKeyNotFound): + error "Unable to read total number of blocks from metadata store", err = err.msg, key = $CodexTotalBlocksKey + + if total.len > 0: + self.totalBlocks = uint64.fromBytesBE(total).uint + trace "Number of blocks in store at start", total = self.totalBlocks + ## load current persist and cache bytes from meta ds without quotaUsedBytes =? await self.metaDs.get(QuotaUsedKey), err: if not (err of DatastoreKeyNotFound): @@ -386,6 +427,7 @@ proc start*(self: RepoStore): Future[void] {.async.} = notice "Current bytes used for persist quota", bytes = self.quotaReservedBytes + self.updateMetrics() self.started = true proc stop*(self: RepoStore): Future[void] {.async.} = @@ -410,8 +452,8 @@ func new*( quotaMaxBytes = DefaultQuotaBytes, blockTtl = DefaultBlockTtl ): RepoStore = - ## Create new instance of a RepoStore - ## + ## Create new instance of a RepoStore + ## RepoStore( repoDs: repoDs, metaDs: metaDs, From 1d161d383ec2738cdc3a0eac16e8b0788f218edb Mon Sep 17 00:00:00 2001 From: Eric <5089238+emizzle@users.noreply.github.com> Date: Tue, 25 Jul 2023 12:50:30 +1000 Subject: [PATCH 5/9] Slot queue (#455) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Slot queue Adds a slot queue, as per the [slot queue design](https://github.com/codex-storage/codex-research/blob/master/design/sales.md#slot-queue). Any time storage is requested, all slots from that request are immediately added to the queue. Finished, Canclled, Failed requests remove all slots with that request id from the queue. SlotFreed events add a new slot to the queue and SlotFilled events remove the slot from the queue. This allows popping of a slot each time one is processed, making things much simpler. When an entire request of slots is added to the queue, the slot indices are shuffled randomly to hopefully prevent nodes that pick up the same storage requested event from clashing on the first processed slot index. This allowed removal of assigning a random slot index in the SalePreparing state and it also ensured that all SalesAgents will have a slot index assigned to them at the start thus the removal of the optional slotIndex. Remove slotId from SlotFreed event as it was not being used. RequestId and slotIndex were added to the SlotFreed event earlier and those are now being used The slot queue invariant that prioritises queue items added to the queue relies on a scoring mechanism to sort them based on the [sort order in the design document](https://github.com/codex-storage/codex-research/blob/master/design/sales.md#sort-order). When a storage request is handled by the sales module, a slot index was randomly assigned and then the slot was filled. Now, a random slot index is only assigned when adding an entire request to the slot queue. Additionally, the slot is checked that its state is `SlotState.Free` before continuing with the download process. SlotQueue should always ensure the underlying AsyncHeapQueue has one less than the maximum items, ensuring the SlotQueue can always have space to add an additional item regardless if it’s full or not. Constructing `SlotQueue.workers` in `SlotQueue.new` calls `newAsyncQueue` which causes side effects, so the construction call had to be moved to `SlotQueue.start`. Prevent loading request from contract (network request) if there is an existing item in queue for that request. Check availability before adding request to queue. Add ability to query market contract for past events. When new availabilities are added, the `onReservationAdded` callback is triggered in which past `StorageRequested` events are queried, and those slots are added to the queue (filtered by availability on `push` and filtered by state in `SalePreparing`). #### Request Workers Limit the concurrent requests being processed in the queue by using a limited pool of workers (default = 3). Workers are in a data structure of type `AsyncQueue[SlotQueueWorker]`. This allows us to await a `popFirst` for available workers inside of the main SlotQueue event loop Add an `onCleanUp` that stops the agents and removes them from the sales module agent list. `onCleanUp` is called from sales end states (eg ignored, cancelled, finished, failed, errored). Add a `doneProcessing` future to `SlotQueueWorker` to be completed in the `OnProcessSlot` callback. Each `doneProcessing` future created is cancelled and awaited in `SlotQueue.stop` (thanks to `TrackableFuturees`), which forced `stop` to become async. - Cancel dispatched workers and the `onProcessSlot` callbacks, prevents zombie callbacks #### Add TrackableFutures Allow tracking of futures in a module so they can be cancelled at a later time. Useful for asyncSpawned futures, but works for any future. ### Sales module The sales module needed to subscribe to request events to ensure that the request queue was managed correctly on each event. In the process of doing this, the sales agents were updated to avoid subscribing to events in each agent, and instead dispatch received events from the sales module to all created sales agents. This would prevent memory leaks on having too many eventemitters subscribed to. - prevent removal of agents from sales module while stopping, otherwise the agents seq len is modified while iterating An additional sales agent state was added, `SalePreparing`, that handles all state machine setup, such as retrieving the request and subscribing to events that were previously in the `SaleDownloading` state. Once agents have parked in an end state (eg ignored, cancelled, finished, failed, errored), they were not getting cleaned up and the sales module was keeping a handle on their reference. An `onCleanUp` callback was created to be called after the state machine enters an end state, which could prevent a memory leak if the number of requests coming in is high. Move the SalesAgent callback raises pragmas from the Sales module to the proc definition in SalesAgent. This avoids having to catch `Exception`. - remove unneeded error handling as pragmas were moved Move sales.subscriptions from an object containing named subscriptions to a `seq[Subscription]` directly on the sales object. Sales tests: shut down repo after sales stop, to fix SIGABRT in CI ### Add async Promise API - modelled after JavaScript Promise API - alternative to `asyncSpawn` that allows handling of async calls in a synchronous context (including access to the synchronous closure) with less additional procs to be declared - Write less code, catch errors that would otherwise defect in asyncspawn, and execute a callback after completion - Add cancellation callbacks to utils/then, ensuring cancellations are handled properly ## Dependencies - bump codex-contracts-eth to support slot queue (https://github.com/codex-storage/codex-contracts-eth/pull/61) - bump nim-ethers to 0.5.0 - Bump nim-json-rpc submodule to 0bf2bcb --------- Co-authored-by: Jaremy Creechley --- codex.nimble | 2 +- codex/contracts/clock.nim | 4 +- codex/contracts/market.nim | 58 ++- codex/contracts/marketplace.nim | 6 +- codex/contracts/requests.nim | 16 + codex/market.nim | 30 +- codex/sales.nim | 339 ++++++++++++++-- codex/sales/reservations.nim | 21 +- codex/sales/salesagent.nim | 82 ++-- codex/sales/salescontext.nim | 6 +- codex/sales/salesdata.nim | 3 - codex/sales/slotqueue.nim | 396 ++++++++++++++++++ codex/sales/states/downloading.nim | 25 +- codex/sales/states/errored.nim | 6 +- codex/sales/states/filled.nim | 5 +- codex/sales/states/filling.nim | 7 +- codex/sales/states/finished.nim | 3 +- codex/sales/states/ignored.nim | 6 +- codex/sales/states/preparing.nim | 69 ++++ codex/sales/states/proving.nim | 5 +- codex/sales/states/unknown.nim | 5 +- codex/sales/trackedfutures.nim | 46 +++ codex/utils/asyncheapqueue.nim | 2 +- codex/utils/then.nim | 226 +++++++++++ tests/codex/helpers/eventually.nim | 12 + tests/codex/helpers/mockmarket.nim | 89 +++- tests/codex/helpers/mocksalesagent.nim | 16 + tests/codex/sales/states/testpreparing.nim | 29 ++ tests/codex/sales/testsales.nim | 130 +++++- tests/codex/sales/testsalesagent.nim | 39 +- tests/codex/sales/testslotqueue.nim | 450 +++++++++++++++++++++ tests/codex/testsales.nim | 1 + tests/codex/testutils.nim | 1 + tests/codex/utils/testthen.nim | 344 ++++++++++++++++ tests/contracts/testContracts.nim | 6 +- tests/contracts/testMarket.nim | 48 ++- tests/examples.nim | 6 + tests/integration/testproofs.nim | 4 +- vendor/codex-contracts-eth | 2 +- vendor/nim-ethers | 2 +- vendor/nim-json-rpc | 2 +- 41 files changed, 2331 insertions(+), 218 deletions(-) create mode 100644 codex/sales/slotqueue.nim create mode 100644 codex/sales/states/preparing.nim create mode 100644 codex/sales/trackedfutures.nim create mode 100644 codex/utils/then.nim create mode 100644 tests/codex/helpers/mocksalesagent.nim create mode 100644 tests/codex/sales/states/testpreparing.nim create mode 100644 tests/codex/sales/testslotqueue.nim create mode 100644 tests/codex/utils/testthen.nim diff --git a/codex.nimble b/codex.nimble index 1cc86e40..d57c5b91 100644 --- a/codex.nimble +++ b/codex.nimble @@ -13,7 +13,7 @@ requires "bearssl >= 0.1.4" requires "chronicles >= 0.7.2" requires "chronos >= 2.5.2" requires "confutils" -requires "ethers >= 0.2.4 & < 0.3.0" +requires "ethers >= 0.5.0 & < 0.6.0" requires "libbacktrace" requires "libp2p" requires "metrics" diff --git a/codex/contracts/clock.nim b/codex/contracts/clock.nim index d7136573..7b5187c8 100644 --- a/codex/contracts/clock.nim +++ b/codex/contracts/clock.nim @@ -22,14 +22,14 @@ proc start*(clock: OnChainClock) {.async.} = return clock.started = true - proc onBlock(blck: Block) {.async, upraises:[].} = + proc onBlock(blck: Block) {.upraises:[].} = let blockTime = initTime(blck.timestamp.truncate(int64), 0) let computerTime = getTime() clock.offset = blockTime - computerTime clock.newBlock.fire() if latestBlock =? (await clock.provider.getBlock(BlockTag.latest)): - await onBlock(latestBlock) + onBlock(latestBlock) clock.subscription = await clock.provider.subscribe(onBlock) diff --git a/codex/contracts/market.nim b/codex/contracts/market.nim index b7b7c94f..90f9f5b8 100644 --- a/codex/contracts/market.nim +++ b/codex/contracts/market.nim @@ -1,5 +1,6 @@ +import std/sequtils import std/strutils -import std/strformat +import std/sugar import pkg/chronicles import pkg/ethers import pkg/ethers/testing @@ -36,7 +37,7 @@ proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} = let tokenAddress = await market.contract.token() let token = Erc20Token.new(tokenAddress, market.signer) - await token.approve(market.contract.address(), amount) + discard await token.approve(market.contract.address(), amount) method getSigner*(market: OnChainMarket): Future[Address] {.async.} = return await market.signer.getAddress() @@ -168,11 +169,13 @@ method canProofBeMarkedAsMissing*( trace "Proof can not be marked as missing", msg = e.msg return false -method subscribeRequests(market: OnChainMarket, +method subscribeRequests*(market: OnChainMarket, callback: OnRequest): Future[MarketSubscription] {.async.} = proc onEvent(event: StorageRequested) {.upraises:[].} = - callback(event.requestId, event.ask) + callback(event.requestId, + event.ask, + event.expiry) let subscription = await market.contract.subscribe(StorageRequested, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -198,10 +201,18 @@ method subscribeSlotFreed*(market: OnChainMarket, callback: OnSlotFreed): Future[MarketSubscription] {.async.} = proc onEvent(event: SlotFreed) {.upraises:[].} = - callback(event.slotId) + callback(event.requestId, event.slotIndex) let subscription = await market.contract.subscribe(SlotFreed, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) +method subscribeFulfillment(market: OnChainMarket, + callback: OnFulfillment): + Future[MarketSubscription] {.async.} = + proc onEvent(event: RequestFulfilled) {.upraises:[].} = + callback(event.requestId) + let subscription = await market.contract.subscribe(RequestFulfilled, onEvent) + return OnChainMarketSubscription(eventSubscription: subscription) + method subscribeFulfillment(market: OnChainMarket, requestId: RequestId, callback: OnFulfillment): @@ -212,6 +223,14 @@ method subscribeFulfillment(market: OnChainMarket, let subscription = await market.contract.subscribe(RequestFulfilled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) +method subscribeRequestCancelled*(market: OnChainMarket, + callback: OnRequestCancelled): + Future[MarketSubscription] {.async.} = + proc onEvent(event: RequestCancelled) {.upraises:[].} = + callback(event.requestId) + let subscription = await market.contract.subscribe(RequestCancelled, onEvent) + return OnChainMarketSubscription(eventSubscription: subscription) + method subscribeRequestCancelled*(market: OnChainMarket, requestId: RequestId, callback: OnRequestCancelled): @@ -222,6 +241,14 @@ method subscribeRequestCancelled*(market: OnChainMarket, let subscription = await market.contract.subscribe(RequestCancelled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) +method subscribeRequestFailed*(market: OnChainMarket, + callback: OnRequestFailed): + Future[MarketSubscription] {.async.} = + proc onEvent(event: RequestFailed) {.upraises:[]} = + callback(event.requestId) + let subscription = await market.contract.subscribe(RequestFailed, onEvent) + return OnChainMarketSubscription(eventSubscription: subscription) + method subscribeRequestFailed*(market: OnChainMarket, requestId: RequestId, callback: OnRequestFailed): @@ -242,3 +269,24 @@ method subscribeProofSubmission*(market: OnChainMarket, method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} = await subscription.eventSubscription.unsubscribe() + +method queryPastStorageRequests*(market: OnChainMarket, + blocksAgo: int): + Future[seq[PastStorageRequest]] {.async.} = + + let contract = market.contract + let provider = contract.provider + + let head = await provider.getBlockNumber() + let fromBlock = BlockTag.init(head - blocksAgo.abs.u256) + + let events = await contract.queryFilter(StorageRequested, + fromBlock, + BlockTag.latest) + return events.map(event => + PastStorageRequest( + requestId: event.requestId, + ask: event.ask, + expiry: event.expiry + ) + ) diff --git a/codex/contracts/marketplace.nim b/codex/contracts/marketplace.nim index 45d92335..f09e8720 100644 --- a/codex/contracts/marketplace.nim +++ b/codex/contracts/marketplace.nim @@ -18,13 +18,13 @@ type StorageRequested* = object of Event requestId*: RequestId ask*: StorageAsk + expiry*: UInt256 SlotFilled* = object of Event requestId* {.indexed.}: RequestId - slotIndex* {.indexed.}: UInt256 - slotId*: SlotId + slotIndex*: UInt256 SlotFreed* = object of Event requestId* {.indexed.}: RequestId - slotId*: SlotId + slotIndex*: UInt256 RequestFulfilled* = object of Event requestId* {.indexed.}: RequestId RequestCancelled* = object of Event diff --git a/codex/contracts/requests.nim b/codex/contracts/requests.nim index 4c6e8b10..7393f278 100644 --- a/codex/contracts/requests.nim +++ b/codex/contracts/requests.nim @@ -4,6 +4,8 @@ import pkg/nimcrypto import pkg/ethers/fields import pkg/questionable/results import pkg/stew/byteutils +import pkg/json_serialization +import pkg/upraises export contractabi @@ -203,3 +205,17 @@ func price*(request: StorageRequest): UInt256 = func size*(ask: StorageAsk): UInt256 = ask.slots.u256 * ask.slotSize + +proc writeValue*( + writer: var JsonWriter, + value: SlotId | RequestId) {.upraises:[IOError].} = + + mixin writeValue + writer.writeValue value.toArray + +proc readValue*[T: SlotId | RequestId]( + reader: var JsonReader, + value: var T) {.upraises: [SerializationError, IOError].} = + + mixin readValue + value = T reader.readValue(T.distinctBase) diff --git a/codex/market.nim b/codex/market.nim index e2a233a6..be0d06fc 100644 --- a/codex/market.nim +++ b/codex/market.nim @@ -15,13 +15,19 @@ export periods type Market* = ref object of RootObj Subscription* = ref object of RootObj - OnRequest* = proc(id: RequestId, ask: StorageAsk) {.gcsafe, upraises:[].} + OnRequest* = proc(id: RequestId, + ask: StorageAsk, + expiry: UInt256) {.gcsafe, upraises:[].} OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].} OnSlotFilled* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises:[].} - OnSlotFreed* = proc(slotId: SlotId) {.gcsafe, upraises: [].} + OnSlotFreed* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].} OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises:[].} OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises:[].} OnProofSubmitted* = proc(id: SlotId, proof: seq[byte]) {.gcsafe, upraises:[].} + PastStorageRequest* = object + requestId*: RequestId + ask*: StorageAsk + expiry*: UInt256 method getSigner*(market: Market): Future[Address] {.base, async.} = raiseAssert("not implemented") @@ -112,6 +118,11 @@ method canProofBeMarkedAsMissing*(market: Market, period: Period): Future[bool] {.base, async.} = raiseAssert("not implemented") +method subscribeFulfillment*(market: Market, + callback: OnFulfillment): + Future[Subscription] {.base, async.} = + raiseAssert("not implemented") + method subscribeFulfillment*(market: Market, requestId: RequestId, callback: OnFulfillment): @@ -135,12 +146,22 @@ method subscribeSlotFreed*(market: Market, Future[Subscription] {.base, async.} = raiseAssert("not implemented") +method subscribeRequestCancelled*(market: Market, + callback: OnRequestCancelled): + Future[Subscription] {.base, async.} = + raiseAssert("not implemented") + method subscribeRequestCancelled*(market: Market, requestId: RequestId, callback: OnRequestCancelled): Future[Subscription] {.base, async.} = raiseAssert("not implemented") +method subscribeRequestFailed*(market: Market, + callback: OnRequestFailed): + Future[Subscription] {.base, async.} = + raiseAssert("not implemented") + method subscribeRequestFailed*(market: Market, requestId: RequestId, callback: OnRequestFailed): @@ -154,3 +175,8 @@ method subscribeProofSubmission*(market: Market, method unsubscribe*(subscription: Subscription) {.base, async, upraises:[].} = raiseAssert("not implemented") + +method queryPastStorageRequests*(market: Market, + blocksAgo: int): + Future[seq[PastStorageRequest]] {.base, async.} = + raiseAssert("not implemented") diff --git a/codex/sales.nim b/codex/sales.nim index 8efeaf61..8a7f0cb5 100644 --- a/codex/sales.nim +++ b/codex/sales.nim @@ -1,20 +1,24 @@ import std/sequtils +import std/sugar +import std/tables import pkg/questionable -import pkg/upraises import pkg/stint import pkg/chronicles import pkg/datastore -import ./rng import ./market import ./clock import ./proving import ./stores import ./contracts/requests +import ./contracts/marketplace import ./sales/salescontext import ./sales/salesagent import ./sales/statemachine -import ./sales/states/downloading +import ./sales/slotqueue +import ./sales/trackedfutures +import ./sales/states/preparing import ./sales/states/unknown +import ./utils/then ## Sales holds a list of available storage that it may sell. ## @@ -43,8 +47,10 @@ logScope: type Sales* = ref object context*: SalesContext - subscription*: ?market.Subscription agents*: seq[SalesAgent] + running: bool + subscriptions: seq[market.Subscription] + trackedFutures: TrackedFutures proc `onStore=`*(sales: Sales, onStore: OnStore) = sales.context.onStore = some onStore @@ -67,37 +73,47 @@ func new*(_: type Sales, proving: Proving, repo: RepoStore): Sales = - Sales(context: SalesContext( - market: market, - clock: clock, - proving: proving, - reservations: Reservations.new(repo) - )) + let reservations = Reservations.new(repo) + Sales( + context: SalesContext( + market: market, + clock: clock, + proving: proving, + reservations: reservations, + slotQueue: SlotQueue.new(reservations) + ), + trackedFutures: TrackedFutures.new(), + subscriptions: @[] + ) -proc randomSlotIndex(numSlots: uint64): UInt256 = - let rng = Rng.instance - let slotIndex = rng.rand(numSlots - 1) - return slotIndex.u256 +proc remove(sales: Sales, agent: SalesAgent) {.async.} = + await agent.stop() + if sales.running: + sales.agents.keepItIf(it != agent) -proc handleRequest(sales: Sales, - requestId: RequestId, - ask: StorageAsk) = +proc cleanUp(sales: Sales, + agent: SalesAgent, + processing: Future[void]) {.async.} = + await sales.remove(agent) + # signal back to the slot queue to cycle a worker + if not processing.isNil and not processing.finished(): + processing.complete() - debug "handling storage requested", - slots = ask.slots, slotSize = ask.slotSize, duration = ask.duration, - reward = ask.reward, maxSlotLoss = ask.maxSlotLoss +proc processSlot(sales: Sales, item: SlotQueueItem, done: Future[void]) = + debug "processing slot from queue", requestId = $item.requestId, + slot = item.slotIndex - # TODO: check if random slot is actually available (not already filled) - let slotIndex = randomSlotIndex(ask.slots) let agent = newSalesAgent( sales.context, - requestId, - slotIndex, + item.requestId, + item.slotIndex.u256, none StorageRequest ) - agent.context.onIgnored = proc {.gcsafe, upraises:[].} = - sales.agents.keepItIf(it != agent) - agent.start(SaleDownloading()) + + agent.context.onCleanUp = proc {.async.} = + await sales.cleanUp(agent, done) + + agent.start(SalePreparing()) sales.agents.add agent proc mySlots*(sales: Sales): Future[seq[Slot]] {.async.} = @@ -120,27 +136,272 @@ proc load*(sales: Sales) {.async.} = slot.request.id, slot.slotIndex, some slot.request) + + agent.context.onCleanUp = proc {.async.} = await sales.remove(agent) + agent.start(SaleUnknown()) sales.agents.add agent -proc start*(sales: Sales) {.async.} = - doAssert sales.subscription.isNone, "Sales already started" +proc onReservationAdded(sales: Sales, availability: Availability) {.async.} = + ## Query last 256 blocks for new requests, adding them to the queue. `push` + ## checks for availability before adding to the queue. If processed, the + ## sales agent will check if the slot is free. + let context = sales.context + let market = context.market + let queue = context.slotQueue - proc onRequest(requestId: RequestId, ask: StorageAsk) {.gcsafe, upraises:[].} = - sales.handleRequest(requestId, ask) + logScope: + topics = "sales onReservationAdded callback" + + trace "reservation added, querying past storage requests to add to queue" try: - sales.subscription = some await sales.context.market.subscribeRequests(onRequest) + let events = await market.queryPastStorageRequests(256) + let requests = events.map(event => + SlotQueueItem.init(event.requestId, event.ask, event.expiry) + ) + + trace "found past storage requested events to add to queue", + events = events.len + + for slots in requests: + for slot in slots: + if err =? (await queue.push(slot)).errorOption: + # continue on error + if err of QueueNotRunningError: + warn "cannot push items to queue, queue is not running" + elif err of NoMatchingAvailabilityError: + info "slot in queue had no matching availabilities, ignoring" + elif err of SlotsOutOfRangeError: + warn "Too many slots, cannot add to queue" + elif err of SlotQueueItemExistsError: + trace "item already exists, ignoring" + discard + else: raise err + except CatchableError as e: - error "Unable to start sales", msg = e.msg + warn "Error adding request to SlotQueue", error = e.msg + discard + +proc onStorageRequested(sales: Sales, + requestId: RequestId, + ask: StorageAsk, + expiry: UInt256) = + + logScope: + topics = "sales onStorageRequested" + requestId + slots = ask.slots + expiry + + let slotQueue = sales.context.slotQueue + + trace "storage requested, adding slots to queue" + + without items =? SlotQueueItem.init(requestId, ask, expiry).catch, err: + if err of SlotsOutOfRangeError: + warn "Too many slots, cannot add to queue" + else: + warn "Failed to create slot queue items from request", error = err.msg + + for item in items: + # continue on failure + slotQueue.push(item) + .track(sales) + .catch(proc(err: ref CatchableError) = + if err of NoMatchingAvailabilityError: + info "slot in queue had no matching availabilities, ignoring" + elif err of SlotQueueItemExistsError: + error "Failed to push item to queue becaue it already exists" + elif err of QueueNotRunningError: + warn "Failed to push item to queue becaue queue is not running" + else: + warn "Error adding request to SlotQueue", error = err.msg + ) + +proc onSlotFreed(sales: Sales, + requestId: RequestId, + slotIndex: UInt256) = + + logScope: + topics = "sales onSlotFreed" + requestId + slotIndex + + trace "slot freed, adding to queue" + + proc addSlotToQueue() {.async.} = + let context = sales.context + let market = context.market + let queue = context.slotQueue + + # first attempt to populate request using existing slot metadata in queue + without var found =? queue.populateItem(requestId, + slotIndex.truncate(uint16)): + trace "no existing request metadata, getting request info from contract" + # if there's no existing slot for that request, retrieve the request + # from the contract. + without request =? await market.getRequest(requestId): + error "unknown request in contract" + return + + found = SlotQueueItem.init(request, slotIndex.truncate(uint16)) + + if err =? (await queue.push(found)).errorOption: + raise err + + addSlotToQueue() + .track(sales) + .catch(proc(err: ref CatchableError) = + if err of NoMatchingAvailabilityError: + info "slot in queue had no matching availabilities, ignoring" + elif err of SlotQueueItemExistsError: + error "Failed to push item to queue becaue it already exists" + elif err of QueueNotRunningError: + warn "Failed to push item to queue becaue queue is not running" + else: + warn "Error adding request to SlotQueue", error = err.msg + ) + +proc subscribeRequested(sales: Sales) {.async.} = + let context = sales.context + let market = context.market + + proc onStorageRequested(requestId: RequestId, + ask: StorageAsk, + expiry: UInt256) = + sales.onStorageRequested(requestId, ask, expiry) + + try: + let sub = await market.subscribeRequests(onStorageRequested) + sales.subscriptions.add(sub) + except CatchableError as e: + error "Unable to subscribe to storage request events", msg = e.msg + +proc subscribeCancellation(sales: Sales) {.async.} = + let context = sales.context + let market = context.market + let queue = context.slotQueue + + proc onCancelled(requestId: RequestId) = + trace "request cancelled, removing all request slots from queue" + queue.delete(requestId) + + try: + let sub = await market.subscribeRequestCancelled(onCancelled) + sales.subscriptions.add(sub) + except CatchableError as e: + error "Unable to subscribe to cancellation events", msg = e.msg + +proc subscribeFulfilled*(sales: Sales) {.async.} = + let context = sales.context + let market = context.market + let queue = context.slotQueue + + proc onFulfilled(requestId: RequestId) = + trace "request fulfilled, removing all request slots from queue" + queue.delete(requestId) + + for agent in sales.agents: + agent.onFulfilled(requestId) + + try: + let sub = await market.subscribeFulfillment(onFulfilled) + sales.subscriptions.add(sub) + except CatchableError as e: + error "Unable to subscribe to storage fulfilled events", msg = e.msg + +proc subscribeFailure(sales: Sales) {.async.} = + let context = sales.context + let market = context.market + let queue = context.slotQueue + + proc onFailed(requestId: RequestId) = + trace "request failed, removing all request slots from queue" + queue.delete(requestId) + + for agent in sales.agents: + agent.onFailed(requestId) + + try: + let sub = await market.subscribeRequestFailed(onFailed) + sales.subscriptions.add(sub) + except CatchableError as e: + error "Unable to subscribe to storage failure events", msg = e.msg + +proc subscribeSlotFilled(sales: Sales) {.async.} = + let context = sales.context + let market = context.market + let queue = context.slotQueue + + proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) = + trace "slot filled, removing from slot queue", requestId, slotIndex + queue.delete(requestId, slotIndex.truncate(uint16)) + + for agent in sales.agents: + agent.onSlotFilled(requestId, slotIndex) + + try: + let sub = await market.subscribeSlotFilled(onSlotFilled) + sales.subscriptions.add(sub) + except CatchableError as e: + error "Unable to subscribe to slot filled events", msg = e.msg + +proc subscribeSlotFreed(sales: Sales) {.async.} = + let context = sales.context + let market = context.market + + proc onSlotFreed(requestId: RequestId, slotIndex: UInt256) = + sales.onSlotFreed(requestId, slotIndex) + + try: + let sub = await market.subscribeSlotFreed(onSlotFreed) + sales.subscriptions.add(sub) + except CatchableError as e: + error "Unable to subscribe to slot freed events", msg = e.msg + +proc startSlotQueue(sales: Sales) {.async.} = + let slotQueue = sales.context.slotQueue + let reservations = sales.context.reservations + + slotQueue.onProcessSlot = + proc(item: SlotQueueItem, done: Future[void]) {.async.} = + sales.processSlot(item, done) + + asyncSpawn slotQueue.start() + + reservations.onReservationAdded = + proc(availability: Availability) {.async.} = + await sales.onReservationAdded(availability) + + +proc subscribe(sales: Sales) {.async.} = + await sales.subscribeRequested() + await sales.subscribeFulfilled() + await sales.subscribeFailure() + await sales.subscribeSlotFilled() + await sales.subscribeSlotFreed() + await sales.subscribeCancellation() + +proc unsubscribe(sales: Sales) {.async.} = + for sub in sales.subscriptions: + try: + await sub.unsubscribe() + except CatchableError as e: + error "Unable to unsubscribe from subscription", error = e.msg + +proc start*(sales: Sales) {.async.} = + await sales.startSlotQueue() + await sales.subscribe() proc stop*(sales: Sales) {.async.} = - if subscription =? sales.subscription: - sales.subscription = market.Subscription.none - try: - await subscription.unsubscribe() - except CatchableError as e: - warn "Unsubscribe failed", msg = e.msg + trace "stopping sales" + sales.running = false + await sales.context.slotQueue.stop() + await sales.unsubscribe() + await sales.trackedFutures.cancelTracked() for agent in sales.agents: await agent.stop() + + sales.agents = @[] diff --git a/codex/sales/reservations.nim b/codex/sales/reservations.nim index 86aacd64..0895307c 100644 --- a/codex/sales/reservations.nim +++ b/codex/sales/reservations.nim @@ -42,7 +42,9 @@ type used*: bool Reservations* = ref object repo: RepoStore + onReservationAdded: ?OnReservationAdded GetNext* = proc(): Future[?Availability] {.upraises: [], gcsafe, closure.} + OnReservationAdded* = proc(availability: Availability): Future[void] {.upraises: [], gcsafe.} AvailabilityIter* = ref object finished*: bool next*: GetNext @@ -96,18 +98,22 @@ proc toErr[E1: ref CatchableError, E2: AvailabilityError]( proc writeValue*( writer: var JsonWriter, - value: SlotId | AvailabilityId) {.upraises:[IOError].} = + value: AvailabilityId) {.upraises:[IOError].} = mixin writeValue writer.writeValue value.toArray -proc readValue*[T: SlotId | AvailabilityId]( +proc readValue*[T: AvailabilityId]( reader: var JsonReader, value: var T) {.upraises: [SerializationError, IOError].} = mixin readValue value = T reader.readValue(T.distinctBase) +proc `onReservationAdded=`*(self: Reservations, + onReservationAdded: OnReservationAdded) = + self.onReservationAdded = some onReservationAdded + func key(id: AvailabilityId): ?!Key = (ReservationsKey / id.toArray.toHex) @@ -210,6 +216,15 @@ proc reserve*( return failure(updateErr) + if onReservationAdded =? self.onReservationAdded: + try: + await onReservationAdded(availability) + except CatchableError as e: + # we don't have any insight into types of errors that `onProcessSlot` can + # throw because it is caller-defined + warn "Unknown error during 'onReservationAdded' callback", + availabilityId = availability.id, error = e.msg + return success() proc release*( @@ -320,7 +335,7 @@ proc unused*(r: Reservations): Future[?!seq[Availability]] {.async.} = proc find*( self: Reservations, - size, duration, minPrice: UInt256, collateral: UInt256, + size, duration, minPrice, collateral: UInt256, used: bool): Future[?Availability] {.async.} = diff --git a/codex/sales/salesagent.nim b/codex/sales/salesagent.nim index 7a1f7876..ef7b255b 100644 --- a/codex/sales/salesagent.nim +++ b/codex/sales/salesagent.nim @@ -1,8 +1,11 @@ import pkg/chronos import pkg/chronicles +import pkg/questionable +import pkg/questionable/results import pkg/stint +import pkg/upraises import ../contracts/requests -import ../utils/asyncspawn +import ../errors import ./statemachine import ./salescontext import ./salesdata @@ -13,10 +16,13 @@ export reservations logScope: topics = "marketplace sales" -type SalesAgent* = ref object of Machine - context*: SalesContext - data*: SalesData - subscribed: bool +type + SalesAgent* = ref object of Machine + context*: SalesContext + data*: SalesData + subscribed: bool + SalesAgentError = object of CodexError + AllSlotsFilledError* = object of SalesAgentError func `==`*(a, b: SalesAgent): bool = a.data.requestId == b.data.requestId and @@ -41,7 +47,6 @@ proc retrieveRequest*(agent: SalesAgent) {.async.} = proc subscribeCancellation(agent: SalesAgent) {.async.} = let data = agent.data - let market = agent.context.market let clock = agent.context.clock proc onCancelled() {.async.} = @@ -49,51 +54,34 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} = return await clock.waitUntil(request.expiry.truncate(int64)) - if not data.fulfilled.isNil: - asyncSpawn data.fulfilled.unsubscribe(), ignore = CatchableError agent.schedule(cancelledEvent(request)) data.cancelled = onCancelled() - proc onFulfilled(_: RequestId) = - data.cancelled.cancel() +method onFulfilled*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, upraises: [].} = + if agent.data.requestId == requestId and + not agent.data.cancelled.isNil: + agent.data.cancelled.cancel() - data.fulfilled = - await market.subscribeFulfillment(data.requestId, onFulfilled) - -proc subscribeFailure(agent: SalesAgent) {.async.} = - let data = agent.data - let market = agent.context.market - - proc onFailed(_: RequestId) = - without request =? data.request: - return - asyncSpawn data.failed.unsubscribe(), ignore = CatchableError +method onFailed*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, upraises: [].} = + without request =? agent.data.request: + return + if agent.data.requestId == requestId: agent.schedule(failedEvent(request)) - data.failed = - await market.subscribeRequestFailed(data.requestId, onFailed) +method onSlotFilled*(agent: SalesAgent, + requestId: RequestId, + slotIndex: UInt256) {.base, gcsafe, upraises: [].} = -proc subscribeSlotFilled(agent: SalesAgent) {.async.} = - let data = agent.data - let market = agent.context.market - - proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) = - asyncSpawn data.slotFilled.unsubscribe(), ignore = CatchableError - agent.schedule(slotFilledEvent(requestId, data.slotIndex)) - - data.slotFilled = - await market.subscribeSlotFilled(data.requestId, - data.slotIndex, - onSlotFilled) + if agent.data.requestId == requestId and + agent.data.slotIndex == slotIndex: + agent.schedule(slotFilledEvent(requestId, slotIndex)) proc subscribe*(agent: SalesAgent) {.async.} = if agent.subscribed: return await agent.subscribeCancellation() - await agent.subscribeFailure() - await agent.subscribeSlotFilled() agent.subscribed = true proc unsubscribe*(agent: SalesAgent) {.async.} = @@ -101,25 +89,7 @@ proc unsubscribe*(agent: SalesAgent) {.async.} = return let data = agent.data - try: - if not data.fulfilled.isNil: - await data.fulfilled.unsubscribe() - data.fulfilled = nil - except CatchableError: - discard - try: - if not data.failed.isNil: - await data.failed.unsubscribe() - data.failed = nil - except CatchableError: - discard - try: - if not data.slotFilled.isNil: - await data.slotFilled.unsubscribe() - data.slotFilled = nil - except CatchableError: - discard - if not data.cancelled.isNil: + if not data.cancelled.isNil and not data.cancelled.finished: await data.cancelled.cancelAndWait() data.cancelled = nil diff --git a/codex/sales/salescontext.nim b/codex/sales/salescontext.nim index ede2b1a6..9063ba31 100644 --- a/codex/sales/salescontext.nim +++ b/codex/sales/salescontext.nim @@ -5,6 +5,7 @@ import ../node/batch import ../market import ../clock import ../proving +import ./slotqueue import ./reservations type @@ -14,9 +15,10 @@ type onStore*: ?OnStore onClear*: ?OnClear onSale*: ?OnSale - onIgnored*: OnIgnored + onCleanUp*: OnCleanUp proving*: Proving reservations*: Reservations + slotQueue*: SlotQueue OnStore* = proc(request: StorageRequest, slot: UInt256, @@ -27,4 +29,4 @@ type slotIndex: UInt256) {.gcsafe, upraises: [].} OnSale* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].} - OnIgnored* = proc() {.gcsafe, upraises: [].} + OnCleanUp* = proc: Future[void] {.gcsafe, upraises: [].} diff --git a/codex/sales/salesdata.nim b/codex/sales/salesdata.nim index d8226877..0e975ac1 100644 --- a/codex/sales/salesdata.nim +++ b/codex/sales/salesdata.nim @@ -9,7 +9,4 @@ type ask*: StorageAsk request*: ?StorageRequest slotIndex*: UInt256 - failed*: market.Subscription - fulfilled*: market.Subscription - slotFilled*: market.Subscription cancelled*: Future[void] diff --git a/codex/sales/slotqueue.nim b/codex/sales/slotqueue.nim new file mode 100644 index 00000000..b512360d --- /dev/null +++ b/codex/sales/slotqueue.nim @@ -0,0 +1,396 @@ +import std/sequtils +import std/sugar +import std/tables +import pkg/chronicles +import pkg/chronos +import pkg/questionable +import pkg/questionable/results +import pkg/upraises +import ./reservations +import ./trackedfutures +import ../errors +import ../rng +import ../utils +import ../contracts/requests +import ../utils/asyncheapqueue +import ../utils/then + +logScope: + topics = "marketplace slotqueue" + +type + OnProcessSlot* = + proc(item: SlotQueueItem, done: Future[void]): Future[void] {.gcsafe, upraises:[].} + + # Non-ref obj copies value when assigned, preventing accidental modification + # of values which could cause an incorrect order (eg + # ``slotQueue[1].collateral = 1`` would cause ``collateral`` to be updated, + # but the heap invariant would no longer be honoured. When non-ref, the + # compiler can ensure that statement will fail). + SlotQueueWorker = object + doneProcessing*: Future[void] + + SlotQueueItem* = object + requestId: RequestId + slotIndex: uint16 + slotSize: UInt256 + duration: UInt256 + reward: UInt256 + collateral: UInt256 + expiry: UInt256 + + # don't need to -1 to prevent overflow when adding 1 (to always allow push) + # because AsyncHeapQueue size is of type `int`, which is larger than `uint16` + SlotQueueSize = range[1'u16..uint16.high] + + SlotQueue* = ref object + maxWorkers: int + onProcessSlot: ?OnProcessSlot + queue: AsyncHeapQueue[SlotQueueItem] + reservations: Reservations + running: bool + workers: AsyncQueue[SlotQueueWorker] + trackedFutures: TrackedFutures + + SlotQueueError = object of CodexError + SlotQueueItemExistsError* = object of SlotQueueError + SlotQueueItemNotExistsError* = object of SlotQueueError + SlotsOutOfRangeError* = object of SlotQueueError + NoMatchingAvailabilityError* = object of SlotQueueError + QueueNotRunningError* = object of SlotQueueError + +# Number of concurrent workers used for processing SlotQueueItems +const DefaultMaxWorkers = 3 + +# Cap slot queue size to prevent unbounded growth and make sifting more +# efficient. Max size is not equivalent to the number of slots a host can +# service, which is limited by host availabilities and new requests circulating +# the network. Additionally, each new request/slot in the network will be +# included in the queue if it is higher priority than any of the exisiting +# items. Older slots should be unfillable over time as other hosts fill the +# slots. +const DefaultMaxSize = 64'u16 + +proc profitability(item: SlotQueueItem): UInt256 = + StorageAsk(collateral: item.collateral, + duration: item.duration, + reward: item.reward, + slotSize: item.slotSize).pricePerSlot + +proc `<`*(a, b: SlotQueueItem): bool = + # for A to have a higher priority than B (in a min queue), A must be less than + # B. + var scoreA: uint8 = 0 + var scoreB: uint8 = 0 + + proc addIf(score: var uint8, condition: bool, addition: int) = + if condition: + score += 1'u8 shl addition + + scoreA.addIf(a.profitability > b.profitability, 3) + scoreB.addIf(a.profitability < b.profitability, 3) + + scoreA.addIf(a.collateral < b.collateral, 2) + scoreB.addIf(a.collateral > b.collateral, 2) + + scoreA.addIf(a.expiry > b.expiry, 1) + scoreB.addIf(a.expiry < b.expiry, 1) + + scoreA.addIf(a.slotSize < b.slotSize, 0) + scoreB.addIf(a.slotSize > b.slotSize, 0) + + return scoreA > scoreB + +proc `==`*(a, b: SlotQueueItem): bool = + a.requestId == b.requestId and + a.slotIndex == b.slotIndex + +proc new*(_: type SlotQueue, + reservations: Reservations, + maxWorkers = DefaultMaxWorkers, + maxSize: SlotQueueSize = DefaultMaxSize): SlotQueue = + + if maxWorkers <= 0: + raise newException(ValueError, "maxWorkers must be positive") + if maxWorkers.uint16 > maxSize: + raise newException(ValueError, "maxWorkers must be less than maxSize") + + SlotQueue( + maxWorkers: maxWorkers, + # Add 1 to always allow for an extra item to be pushed onto the queue + # temporarily. After push (and sort), the bottom-most item will be deleted + queue: newAsyncHeapQueue[SlotQueueItem](maxSize.int + 1), + reservations: reservations, + running: false, + trackedFutures: TrackedFutures.new() + ) + # avoid instantiating `workers` in constructor to avoid side effects in + # `newAsyncQueue` procedure + +proc init*(_: type SlotQueueWorker): SlotQueueWorker = + SlotQueueWorker( + doneProcessing: newFuture[void]("slotqueue.worker.processing") + ) + +proc init*(_: type SlotQueueItem, + requestId: RequestId, + slotIndex: uint16, + ask: StorageAsk, + expiry: UInt256): SlotQueueItem = + + SlotQueueItem( + requestId: requestId, + slotIndex: slotIndex, + slotSize: ask.slotSize, + duration: ask.duration, + reward: ask.reward, + collateral: ask.collateral, + expiry: expiry + ) + +proc init*(_: type SlotQueueItem, + request: StorageRequest, + slotIndex: uint16): SlotQueueItem = + + SlotQueueItem.init(request.id, + slotIndex, + request.ask, + request.expiry) + +proc init*(_: type SlotQueueItem, + requestId: RequestId, + ask: StorageAsk, + expiry: UInt256): seq[SlotQueueItem] = + + if not ask.slots.inRange: + raise newException(SlotsOutOfRangeError, "Too many slots") + + var i = 0'u16 + proc initSlotQueueItem: SlotQueueItem = + let item = SlotQueueItem.init(requestId, i, ask, expiry) + inc i + return item + + var items = newSeqWith(ask.slots.int, initSlotQueueItem()) + Rng.instance.shuffle(items) + return items + +proc init*(_: type SlotQueueItem, + request: StorageRequest): seq[SlotQueueItem] = + + return SlotQueueItem.init(request.id, request.ask, request.expiry) + +proc inRange*(val: SomeUnsignedInt): bool = + val.uint16 in SlotQueueSize.low..SlotQueueSize.high + +proc requestId*(self: SlotQueueItem): RequestId = self.requestId +proc slotIndex*(self: SlotQueueItem): uint16 = self.slotIndex +proc slotSize*(self: SlotQueueItem): UInt256 = self.slotSize +proc duration*(self: SlotQueueItem): UInt256 = self.duration +proc reward*(self: SlotQueueItem): UInt256 = self.reward +proc collateral*(self: SlotQueueItem): UInt256 = self.collateral + +proc running*(self: SlotQueue): bool = self.running + +proc len*(self: SlotQueue): int = self.queue.len + +proc size*(self: SlotQueue): int = self.queue.size - 1 + +proc `$`*(self: SlotQueue): string = $self.queue + +proc `onProcessSlot=`*(self: SlotQueue, onProcessSlot: OnProcessSlot) = + self.onProcessSlot = some onProcessSlot + +proc activeWorkers*(self: SlotQueue): int = + if not self.running: return 0 + + # active = capacity - available + self.maxWorkers - self.workers.len + +proc contains*(self: SlotQueue, item: SlotQueueItem): bool = + self.queue.contains(item) + +proc populateItem*(self: SlotQueue, + requestId: RequestId, + slotIndex: uint16): ?SlotQueueItem = + + trace "populate item, items in queue", len = self.queue.len + for item in self.queue.items: + trace "populate item search", itemRequestId = item.requestId, requestId + if item.requestId == requestId: + return some SlotQueueItem( + requestId: requestId, + slotIndex: slotIndex, + slotSize: item.slotSize, + duration: item.duration, + reward: item.reward, + collateral: item.collateral, + expiry: item.expiry + ) + return none SlotQueueItem + +proc push*(self: SlotQueue, item: SlotQueueItem): Future[?!void] {.async.} = + + trace "pushing item to queue", + requestId = item.requestId, slotIndex = item.slotIndex + + if not self.running: + let err = newException(QueueNotRunningError, "queue not running") + return failure(err) + + without availability =? await self.reservations.find(item.slotSize, + item.duration, + item.profitability, + item.collateral, + used = false): + let err = newException(NoMatchingAvailabilityError, "no availability") + return failure(err) + + if self.contains(item): + let err = newException(SlotQueueItemExistsError, "item already exists") + return failure(err) + + if err =? self.queue.pushNoWait(item).mapFailure.errorOption: + return failure(err) + + if self.queue.full(): + # delete the last item + self.queue.del(self.queue.size - 1) + + doAssert self.queue.len <= self.queue.size - 1 + return success() + +proc push*(self: SlotQueue, items: seq[SlotQueueItem]): Future[?!void] {.async.} = + for item in items: + if err =? (await self.push(item)).errorOption: + return failure(err) + + return success() + +proc findByRequest(self: SlotQueue, requestId: RequestId): seq[SlotQueueItem] = + var items: seq[SlotQueueItem] = @[] + for item in self.queue.items: + if item.requestId == requestId: + items.add item + return items + +proc delete*(self: SlotQueue, item: SlotQueueItem) = + logScope: + requestId = item.requestId + slotIndex = item.slotIndex + + trace "removing item from queue" + + if not self.running: + trace "cannot delete item from queue, queue not running" + return + + self.queue.delete(item) + +proc delete*(self: SlotQueue, requestId: RequestId, slotIndex: uint16) = + let item = SlotQueueItem(requestId: requestId, slotIndex: slotIndex) + self.delete(item) + +proc delete*(self: SlotQueue, requestId: RequestId) = + let items = self.findByRequest(requestId) + for item in items: + self.delete(item) + +proc `[]`*(self: SlotQueue, i: Natural): SlotQueueItem = + self.queue[i] + +proc addWorker(self: SlotQueue): ?!void = + if not self.running: + let err = newException(QueueNotRunningError, "queue must be running") + return failure(err) + + trace "adding new worker to worker queue" + + let worker = SlotQueueWorker.init() + try: + self.workers.addLastNoWait(worker) + except AsyncQueueFullError: + return failure("failed to add worker, worker queue full") + + return success() + +proc dispatch(self: SlotQueue, + worker: SlotQueueWorker, + item: SlotQueueItem) {.async.} = + logScope: + requestId = item.requestId + slotIndex = item.slotIndex + + if not self.running: + warn "Could not dispatch worker because queue is not running" + return + + if onProcessSlot =? self.onProcessSlot: + try: + await onProcessSlot(item, worker.doneProcessing) + await worker.doneProcessing + + if err =? self.addWorker().errorOption: + raise err # catch below + + except QueueNotRunningError as e: + info "could not re-add worker to worker queue, queue not running", + error = e.msg + except CancelledError: + # do not bubble exception up as it is called with `asyncSpawn` which would + # convert the exception into a `FutureDefect` + discard + except CatchableError as e: + # we don't have any insight into types of errors that `onProcessSlot` can + # throw because it is caller-defined + warn "Unknown error processing slot in worker", error = e.msg + +proc start*(self: SlotQueue) {.async.} = + if self.running: + return + + trace "starting slot queue" + + self.running = true + + # must be called in `start` to avoid sideeffects in `new` + self.workers = newAsyncQueue[SlotQueueWorker](self.maxWorkers) + + # Add initial workers to the `AsyncHeapQueue`. Once a worker has completed its + # task, a new worker will be pushed to the queue + for i in 0.. removeFuture()) + .catch((e: ref CatchableError) => removeFuture()) + + trace "tracking future" + self.futures[fut.id] = FutureBase(fut) + return fut + +proc track*[T, U](future: Future[T], self: U): Future[T] = + ## Convenience method that allows chaining future, eg: + ## `await someFut().track(sales)`, where `sales` has declared a + ## `trackedFutures` property. + self.trackedFutures.track(future) + +proc cancelTracked*(self: TrackedFutures) {.async.} = + self.cancelling = true + + for future in self.futures.values: + if not future.isNil and not future.finished: + trace "cancelling tracked future", id = future.id + await future.cancelAndWait() + + self.cancelling = false diff --git a/codex/utils/asyncheapqueue.nim b/codex/utils/asyncheapqueue.nim index 17ca1f78..e7d7edad 100644 --- a/codex/utils/asyncheapqueue.nim +++ b/codex/utils/asyncheapqueue.nim @@ -283,7 +283,7 @@ proc len*[T](heap: AsyncHeapQueue[T]): int {.inline.} = proc size*[T](heap: AsyncHeapQueue[T]): int {.inline.} = ## Return the maximum number of elements in ``heap``. - len(heap.maxsize) + heap.maxsize proc `[]`*[T](heap: AsyncHeapQueue[T], i: Natural) : T {.inline.} = ## Access the i-th element of ``heap`` by order from first to last. diff --git a/codex/utils/then.nim b/codex/utils/then.nim new file mode 100644 index 00000000..2bb5699e --- /dev/null +++ b/codex/utils/then.nim @@ -0,0 +1,226 @@ +import pkg/chronos +import pkg/questionable +import pkg/questionable/results +import pkg/upraises + +# Similar to JavaScript's Promise API, `.then` and `.catch` can be used to +# handle results and errors of async `Futures` within a synchronous closure. +# They can be used as an alternative to `asyncSpawn` which does not return a +# value and will raise a `FutureDefect` if there are unhandled errors +# encountered. Both `.then` and `.catch` act as callbacks that do not block the +# synchronous closure's flow. + +# `.then` is called when the `Future` is successfully completed and can be +# chained as many times as desired, calling each `.then` callback in order. When +# the `Future` returns `Result[T, ref CatchableError]` (or `?!T`), the value +# called in the `.then` callback will be unpacked from the `Result` as a +# convenience. In other words, for `Future[?!T]`, the `.then` callback will take +# a single parameter `T`. See `tests/utils/testthen.nim` for more examples. To +# allow for chaining, `.then` returns its future. If the future is already +# complete, the `.then` callback will be executed immediately. + +# `.catch` is called when the `Future` fails. In the case when the `Future` +# returns a `Result[T, ref CatchableError` (or `?!T`), `.catch` will be called +# if the `Result` contains an error. If the `Future` is already failed (or +# `Future[?!T]` contains an error), the `.catch` callback will be excuted +# immediately. + +# NOTE: Cancelled `Futures` are discarded as bubbling the `CancelledError` to +# the synchronous closure will likely cause an unintended and unhandled +# exception. + +# More info on JavaScript's Promise API can be found at: +# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise + +runnableExamples: + proc asyncProc(): Future[int] {.async.} = + await sleepAsync(1.millis) + return 1 + + asyncProc() + .then(proc(i: int) = echo "returned ", i) + .catch(proc(e: ref CatchableError) = doAssert false, "will not be triggered") + + # outputs "returned 1" + + proc asyncProcWithError(): Future[int] {.async.} = + await sleepAsync(1.millis) + raise newException(ValueError, "some error") + + asyncProcWithError() + .then(proc(i: int) = doAssert false, "will not be triggered") + .catch(proc(e: ref CatchableError) = echo "errored: ", e.msg) + + # outputs "errored: some error" + +type + OnSuccess*[T] = proc(val: T) {.gcsafe, upraises: [].} + OnError* = proc(err: ref CatchableError) {.gcsafe, upraises: [].} + +proc ignoreError(err: ref CatchableError) = discard + +template returnOrError(future: FutureBase, onError: OnError) = + if not future.finished: + return + + if future.cancelled: + # do not bubble as closure is synchronous + return + + if future.failed: + onError(future.error) + return + + +proc then*(future: Future[void], + onError: OnError): + Future[void] = + + proc cb(udata: pointer) = + future.returnOrError(onError) + + proc cancellation(udata: pointer) = + if not future.finished(): + future.removeCallback(cb) + + future.addCallback(cb) + future.cancelCallback = cancellation + return future + +proc then*(future: Future[void], + onSuccess: OnSuccess[void], + onError: OnError = ignoreError): + Future[void] = + + proc cb(udata: pointer) = + future.returnOrError(onError) + onSuccess() + + proc cancellation(udata: pointer) = + if not future.finished(): + future.removeCallback(cb) + + future.addCallback(cb) + future.cancelCallback = cancellation + return future + +proc then*[T](future: Future[T], + onSuccess: OnSuccess[T], + onError: OnError = ignoreError): + Future[T] = + + proc cb(udata: pointer) = + future.returnOrError(onError) + + without val =? future.read.catch, err: + onError(err) + return + onSuccess(val) + + proc cancellation(udata: pointer) = + if not future.finished(): + future.removeCallback(cb) + + future.addCallback(cb) + future.cancelCallback = cancellation + return future + +proc then*[T](future: Future[?!T], + onSuccess: OnSuccess[T], + onError: OnError = ignoreError): + Future[?!T] = + + proc cb(udata: pointer) = + future.returnOrError(onError) + + try: + without val =? future.read, err: + onError(err) + return + onSuccess(val) + except CatchableError as e: + onError(e) + + proc cancellation(udata: pointer) = + if not future.finished(): + future.removeCallback(cb) + + future.addCallback(cb) + future.cancelCallback = cancellation + return future + +proc then*(future: Future[?!void], + onError: OnError = ignoreError): + Future[?!void] = + + proc cb(udata: pointer) = + future.returnOrError(onError) + + try: + if err =? future.read.errorOption: + onError(err) + except CatchableError as e: + onError(e) + + proc cancellation(udata: pointer) = + if not future.finished(): + future.removeCallback(cb) + + future.addCallback(cb) + future.cancelCallback = cancellation + return future + +proc then*(future: Future[?!void], + onSuccess: OnSuccess[void], + onError: OnError = ignoreError): + Future[?!void] = + + proc cb(udata: pointer) = + future.returnOrError(onError) + + try: + if err =? future.read.errorOption: + onError(err) + return + except CatchableError as e: + onError(e) + return + onSuccess() + + proc cancellation(udata: pointer) = + if not future.finished(): + future.removeCallback(cb) + + future.addCallback(cb) + future.cancelCallback = cancellation + return future + +proc catch*[T](future: Future[T], onError: OnError) = + + proc cb(udata: pointer) = + future.returnOrError(onError) + + proc cancellation(udata: pointer) = + if not future.finished(): + future.removeCallback(cb) + + future.addCallback(cb) + future.cancelCallback = cancellation + +proc catch*[T](future: Future[?!T], onError: OnError) = + + proc cb(udata: pointer) = + future.returnOrError(onError) + + try: + if err =? future.read.errorOption: + onError(err) + except CatchableError as e: + onError(e) + + proc cancellation(udata: pointer) = + if not future.finished(): + future.removeCallback(cb) + + future.addCallback(cb) + future.cancelCallback = cancellation diff --git a/tests/codex/helpers/eventually.nim b/tests/codex/helpers/eventually.nim index bbeef3be..3d68fc62 100644 --- a/tests/codex/helpers/eventually.nim +++ b/tests/codex/helpers/eventually.nim @@ -11,3 +11,15 @@ template eventually*(condition: untyped, timeout = 5.seconds): bool = else: await sleepAsync(1.millis) await loop() + +template always*(condition: untyped, timeout = 50.millis): bool = + proc loop: Future[bool] {.async.} = + let start = Moment.now() + while true: + if not condition: + return false + if Moment.now() > (start + timeout): + return true + else: + await sleepAsync(1.millis) + await loop() diff --git a/tests/codex/helpers/mockmarket.nim b/tests/codex/helpers/mockmarket.nim index 6616ef57..867a3ef5 100644 --- a/tests/codex/helpers/mockmarket.nim +++ b/tests/codex/helpers/mockmarket.nim @@ -2,6 +2,8 @@ import std/sequtils import std/tables import std/hashes import std/sets +import std/sugar +import pkg/questionable import pkg/codex/market import pkg/codex/contracts/requests import pkg/codex/contracts/config @@ -53,7 +55,7 @@ type callback: OnRequest FulfillmentSubscription* = ref object of Subscription market: MockMarket - requestId: RequestId + requestId: ?RequestId callback: OnFulfillment SlotFilledSubscription* = ref object of Subscription market: MockMarket @@ -65,11 +67,11 @@ type callback: OnSlotFreed RequestCancelledSubscription* = ref object of Subscription market: MockMarket - requestId: RequestId + requestId: ?RequestId callback: OnRequestCancelled RequestFailedSubscription* = ref object of Subscription market: MockMarket - requestId: RequestId + requestId: ?RequestId callback: OnRequestCancelled ProofSubmittedSubscription = ref object of Subscription market: MockMarket @@ -83,7 +85,7 @@ proc hash*(requestId: RequestId): Hash = proc new*(_: type MockMarket): MockMarket = ## Create a new mocked Market instance - ## + ## let config = MarketplaceConfig( collateral: CollateralConfig( repairRewardPercentage: 10, @@ -112,7 +114,9 @@ method requestStorage*(market: MockMarket, request: StorageRequest) {.async.} = market.requested.add(request) var subscriptions = market.subscriptions.onRequest for subscription in subscriptions: - subscription.callback(request.id, request.ask) + subscription.callback(request.id, + request.ask, + request.expiry) method myRequests*(market: MockMarket): Future[seq[RequestId]] {.async.} = return market.activeRequests[market.signer] @@ -173,28 +177,32 @@ proc emitSlotFilled*(market: MockMarket, if requestMatches and slotMatches: subscription.callback(requestId, slotIndex) -proc emitSlotFreed*(market: MockMarket, slotId: SlotId) = +proc emitSlotFreed*(market: MockMarket, + requestId: RequestId, + slotIndex: UInt256) = var subscriptions = market.subscriptions.onSlotFreed for subscription in subscriptions: - subscription.callback(slotId) + subscription.callback(requestId, slotIndex) -proc emitRequestCancelled*(market: MockMarket, - requestId: RequestId) = +proc emitRequestCancelled*(market: MockMarket, requestId: RequestId) = var subscriptions = market.subscriptions.onRequestCancelled for subscription in subscriptions: - if subscription.requestId == requestId: + if subscription.requestId == requestId.some or + subscription.requestId.isNone: subscription.callback(requestId) proc emitRequestFulfilled*(market: MockMarket, requestId: RequestId) = var subscriptions = market.subscriptions.onFulfillment for subscription in subscriptions: - if subscription.requestId == requestId: + if subscription.requestId == requestId.some or + subscription.requestId.isNone: subscription.callback(requestId) proc emitRequestFailed*(market: MockMarket, requestId: RequestId) = var subscriptions = market.subscriptions.onRequestFailed for subscription in subscriptions: - if subscription.requestId == requestId: + if subscription.requestId == requestId.some or + subscription.requestId.isNone: subscription.callback(requestId) proc fillSlot*(market: MockMarket, @@ -221,7 +229,12 @@ method fillSlot*(market: MockMarket, method freeSlot*(market: MockMarket, slotId: SlotId) {.async.} = market.freed.add(slotId) - market.emitSlotFreed(slotId) + for s in market.filled: + if slotId(s.requestId, s.slotIndex) == slotId: + market.emitSlotFreed(s.requestId, s.slotIndex) + break + market.slotState[slotId] = SlotState.Free + method withdrawFunds*(market: MockMarket, requestId: RequestId) {.async.} = @@ -281,13 +294,24 @@ method subscribeRequests*(market: MockMarket, market.subscriptions.onRequest.add(subscription) return subscription +method subscribeFulfillment*(market: MockMarket, + callback: OnFulfillment): + Future[Subscription] {.async.} = + let subscription = FulfillmentSubscription( + market: market, + requestId: none RequestId, + callback: callback + ) + market.subscriptions.onFulfillment.add(subscription) + return subscription + method subscribeFulfillment*(market: MockMarket, requestId: RequestId, callback: OnFulfillment): Future[Subscription] {.async.} = let subscription = FulfillmentSubscription( market: market, - requestId: requestId, + requestId: some requestId, callback: callback ) market.subscriptions.onFulfillment.add(subscription) @@ -321,25 +345,47 @@ method subscribeSlotFreed*(market: MockMarket, market.subscriptions.onSlotFreed.add(subscription) return subscription +method subscribeRequestCancelled*(market: MockMarket, + callback: OnRequestCancelled): + Future[Subscription] {.async.} = + let subscription = RequestCancelledSubscription( + market: market, + requestId: none RequestId, + callback: callback + ) + market.subscriptions.onRequestCancelled.add(subscription) + return subscription + method subscribeRequestCancelled*(market: MockMarket, requestId: RequestId, callback: OnRequestCancelled): Future[Subscription] {.async.} = let subscription = RequestCancelledSubscription( market: market, - requestId: requestId, + requestId: some requestId, callback: callback ) market.subscriptions.onRequestCancelled.add(subscription) return subscription +method subscribeRequestFailed*(market: MockMarket, + callback: OnRequestFailed): + Future[Subscription] {.async.} = + let subscription = RequestFailedSubscription( + market: market, + requestId: none RequestId, + callback: callback + ) + market.subscriptions.onRequestFailed.add(subscription) + return subscription + method subscribeRequestFailed*(market: MockMarket, requestId: RequestId, callback: OnRequestFailed): Future[Subscription] {.async.} = let subscription = RequestFailedSubscription( market: market, - requestId: requestId, + requestId: some requestId, callback: callback ) market.subscriptions.onRequestFailed.add(subscription) @@ -355,6 +401,17 @@ method subscribeProofSubmission*(mock: MockMarket, mock.subscriptions.onProofSubmitted.add(subscription) return subscription +method queryPastStorageRequests*(market: MockMarket, + blocksAgo: int): + Future[seq[PastStorageRequest]] {.async.} = + # MockMarket does not have the concept of blocks, so simply return all + # previous events + return market.requested.map(request => + PastStorageRequest(requestId: request.id, + ask: request.ask, + expiry: request.expiry) + ) + method unsubscribe*(subscription: RequestSubscription) {.async.} = subscription.market.subscriptions.onRequest.keepItIf(it != subscription) diff --git a/tests/codex/helpers/mocksalesagent.nim b/tests/codex/helpers/mocksalesagent.nim new file mode 100644 index 00000000..43b0be87 --- /dev/null +++ b/tests/codex/helpers/mocksalesagent.nim @@ -0,0 +1,16 @@ +import pkg/codex/sales/salesagent + +type + MockSalesAgent = ref object of SalesAgent + fulfilledCalled*: bool + failedCalled*: bool + slotFilledCalled*: bool + +method onFulfilled*(agent: SalesAgent, requestId: RequestId) = + fulfilledCalled = true + +method onFailed*(agent: SalesAgent, requestId: RequestId) = + failedCalled = true + +method onSlotFilled*(agent: SalesAgent, requestId: RequestId, slotIndex: UInt256) {.base.} = + slotFilledCalled = true diff --git a/tests/codex/sales/states/testpreparing.nim b/tests/codex/sales/states/testpreparing.nim new file mode 100644 index 00000000..6f5d8c7f --- /dev/null +++ b/tests/codex/sales/states/testpreparing.nim @@ -0,0 +1,29 @@ +import std/unittest +import pkg/questionable +import pkg/codex/contracts/requests +import pkg/codex/sales/states/downloading +import pkg/codex/sales/states/cancelled +import pkg/codex/sales/states/failed +import pkg/codex/sales/states/filled +import ../../examples + +suite "sales state 'preparing'": + + let request = StorageRequest.example + let slotIndex = (request.ask.slots div 2).u256 + var state: SalePreparing + + setup: + state = SalePreparing.new() + + test "switches to cancelled state when request expires": + let next = state.onCancelled(request) + check !next of SaleCancelled + + test "switches to failed state when request fails": + let next = state.onFailed(request) + check !next of SaleFailed + + test "switches to filled state when slot is filled": + let next = state.onSlotFilled(request.id, slotIndex) + check !next of SaleFilled diff --git a/tests/codex/sales/testsales.nim b/tests/codex/sales/testsales.nim index a595676c..e8d890d3 100644 --- a/tests/codex/sales/testsales.nim +++ b/tests/codex/sales/testsales.nim @@ -11,6 +11,7 @@ import pkg/codex/sales import pkg/codex/sales/salesdata import pkg/codex/sales/salescontext import pkg/codex/sales/reservations +import pkg/codex/sales/slotqueue import pkg/codex/stores/repostore import pkg/codex/proving import pkg/codex/blocktype as bt @@ -32,6 +33,8 @@ asyncchecksuite "Sales": var proving: Proving var reservations: Reservations var repo: RepoStore + var queue: SlotQueue + var itemsProcessed: seq[SlotQueueItem] setup: availability = Availability.init( @@ -67,22 +70,122 @@ asyncchecksuite "Sales": slot: UInt256, onBatch: BatchProc): Future[?!void] {.async.} = return success() + queue = sales.context.slotQueue proving.onProve = proc(slot: Slot): Future[seq[byte]] {.async.} = return proof await sales.start() request.expiry = (clock.now() + 42).u256 + itemsProcessed = @[] teardown: - await repo.stop() await sales.stop() + await repo.stop() proc getAvailability: ?!Availability = waitFor reservations.get(availability.id) - proc wasIgnored: Future[bool] {.async.} = - return - eventually sales.agents.len == 1 and # agent created at first - eventually sales.agents.len == 0 # then removed once ignored + proc notProcessed(itemsProcessed: seq[SlotQueueItem], + request: StorageRequest): bool = + let items = SlotQueueItem.init(request) + for i in 0.. itemB + + test "expands available all possible slot indices on init": + let request = StorageRequest.example + let items = SlotQueueItem.init(request) + check items.len.uint64 == request.ask.slots + var checked = 0 + for slotIndex in 0'u16.. Date: Mon, 31 Jul 2023 15:09:34 +1000 Subject: [PATCH 6/9] Cancel and wait for asyncstatemachine futures when stopping (#493) * Simplify `.then` (promise api) and tests * Remove tracked future when cancelled. Add tracked future tests * Track and cancel statemachine futures The futures created in each asyncstatemachine instance are tracked, and each future is cancelled and waited in `stop`. Change `asyncstatemachine.stop` to be async so `machine.trackedFutures.cancelAndWait` could be called. Add a constructor for `asyncstatemachine` that initialises the `trackedFutures` instance, and call the constructor from derived class constructors. --- codex/purchasing/purchase.nim | 15 +- codex/sales.nim | 2 +- codex/sales/salesagent.nim | 15 +- codex/sales/slotqueue.nim | 3 +- codex/utils/asyncstatemachine.nim | 46 +- codex/utils/then.nim | 137 ++--- codex/{sales => utils}/trackedfutures.nim | 27 +- tests/codex/sales/testsalesagent.nim | 1 + tests/codex/testutils.nim | 1 + tests/codex/utils/testasyncstatemachine.nim | 4 +- tests/codex/utils/testthen.nim | 621 +++++++++++--------- tests/codex/utils/testtrackedfutures.nim | 67 +++ 12 files changed, 536 insertions(+), 403 deletions(-) rename codex/{sales => utils}/trackedfutures.nim (64%) create mode 100644 tests/codex/utils/testtrackedfutures.nim diff --git a/codex/purchasing/purchase.nim b/codex/purchasing/purchase.nim index 07498044..add60a2f 100644 --- a/codex/purchasing/purchase.nim +++ b/codex/purchasing/purchase.nim @@ -31,13 +31,14 @@ func new*( clock: Clock ): Purchase = ## create a new instance of a Purchase - ## - Purchase( - future: Future[void].new(), - requestId: requestId, - market: market, - clock: clock - ) + ## + var purchase = Purchase.new() + purchase.future = Future[void].new() + purchase.requestId = requestId + purchase.market = market + purchase.clock = clock + + return purchase func new*( _: type Purchase, diff --git a/codex/sales.nim b/codex/sales.nim index 8a7f0cb5..c025d867 100644 --- a/codex/sales.nim +++ b/codex/sales.nim @@ -15,10 +15,10 @@ import ./sales/salescontext import ./sales/salesagent import ./sales/statemachine import ./sales/slotqueue -import ./sales/trackedfutures import ./sales/states/preparing import ./sales/states/unknown import ./utils/then +import ./utils/trackedfutures ## Sales holds a list of available storage that it may sell. ## diff --git a/codex/sales/salesagent.nim b/codex/sales/salesagent.nim index ef7b255b..3f84ff9b 100644 --- a/codex/sales/salesagent.nim +++ b/codex/sales/salesagent.nim @@ -32,12 +32,13 @@ proc newSalesAgent*(context: SalesContext, requestId: RequestId, slotIndex: UInt256, request: ?StorageRequest): SalesAgent = - SalesAgent( - context: context, - data: SalesData( - requestId: requestId, - slotIndex: slotIndex, - request: request)) + var agent = SalesAgent.new() + agent.context = context + agent.data = SalesData( + requestId: requestId, + slotIndex: slotIndex, + request: request) + return agent proc retrieveRequest*(agent: SalesAgent) {.async.} = let data = agent.data @@ -96,5 +97,5 @@ proc unsubscribe*(agent: SalesAgent) {.async.} = agent.subscribed = false proc stop*(agent: SalesAgent) {.async.} = - procCall Machine(agent).stop() + await Machine(agent).stop() await agent.unsubscribe() diff --git a/codex/sales/slotqueue.nim b/codex/sales/slotqueue.nim index b512360d..80e95aec 100644 --- a/codex/sales/slotqueue.nim +++ b/codex/sales/slotqueue.nim @@ -1,5 +1,4 @@ import std/sequtils -import std/sugar import std/tables import pkg/chronicles import pkg/chronos @@ -7,13 +6,13 @@ import pkg/questionable import pkg/questionable/results import pkg/upraises import ./reservations -import ./trackedfutures import ../errors import ../rng import ../utils import ../contracts/requests import ../utils/asyncheapqueue import ../utils/then +import ../utils/trackedfutures logScope: topics = "marketplace slotqueue" diff --git a/codex/utils/asyncstatemachine.nim b/codex/utils/asyncstatemachine.nim index 13392008..3d49f741 100644 --- a/codex/utils/asyncstatemachine.nim +++ b/codex/utils/asyncstatemachine.nim @@ -1,7 +1,10 @@ +import std/sugar import pkg/questionable import pkg/chronos import pkg/chronicles import pkg/upraises +import ./trackedfutures +import ./then push: {.upraises:[].} @@ -10,8 +13,8 @@ type state: State running: Future[void] scheduled: AsyncQueue[Event] - scheduling: Future[void] started: bool + trackedFutures: TrackedFutures State* = ref object of RootObj Query*[T] = proc(state: State): T Event* = proc(state: State): ?State {.gcsafe, upraises:[].} @@ -19,6 +22,9 @@ type logScope: topics = "statemachine" +proc new*[T: Machine](_: type T): T = + T(trackedFutures: TrackedFutures.new()) + method `$`*(state: State): string {.base.} = raiseAssert "not implemented" @@ -60,21 +66,21 @@ proc run(machine: Machine, state: State) {.async.} = discard proc scheduler(machine: Machine) {.async.} = - proc onRunComplete(udata: pointer) {.gcsafe.} = - var fut = cast[FutureBase](udata) - if fut.failed(): - machine.schedule(machine.onError(fut.error)) - + var running: Future[void] try: - while true: - let event = await machine.scheduled.get() + while machine.started: + let event = await machine.scheduled.get().track(machine) if next =? event(machine.state): - if not machine.running.isNil: - await machine.running.cancelAndWait() + if not running.isNil and not running.finished: + await running.cancelAndWait() machine.state = next debug "enter state", state = machine.state - machine.running = machine.run(machine.state) - machine.running.addCallback(onRunComplete) + running = machine.run(machine.state) + running + .track(machine) + .catch((err: ref CatchableError) => + machine.schedule(machine.onError(err)) + ) except CancelledError: discard @@ -84,18 +90,20 @@ proc start*(machine: Machine, initialState: State) = if machine.scheduled.isNil: machine.scheduled = newAsyncQueue[Event]() - machine.scheduling = machine.scheduler() + machine.started = true + machine.scheduler() + .track(machine) + .catch((err: ref CatchableError) => + error("Error in scheduler", error = err.msg) + ) machine.schedule(Event.transition(machine.state, initialState)) -proc stop*(machine: Machine) = +proc stop*(machine: Machine) {.async.} = if not machine.started: return - if not machine.scheduling.isNil: - machine.scheduling.cancel() - if not machine.running.isNil: - machine.running.cancel() + machine.started = false + await machine.trackedFutures.cancelTracked() machine.state = nil - machine.started = false diff --git a/codex/utils/then.nim b/codex/utils/then.nim index 2bb5699e..fbcf7bf3 100644 --- a/codex/utils/then.nim +++ b/codex/utils/then.nim @@ -22,12 +22,11 @@ import pkg/upraises # `.catch` is called when the `Future` fails. In the case when the `Future` # returns a `Result[T, ref CatchableError` (or `?!T`), `.catch` will be called # if the `Result` contains an error. If the `Future` is already failed (or -# `Future[?!T]` contains an error), the `.catch` callback will be excuted +# `Future[?!T]` contains an error), the `.catch` callback will be executed # immediately. -# NOTE: Cancelled `Futures` are discarded as bubbling the `CancelledError` to -# the synchronous closure will likely cause an unintended and unhandled -# exception. +# `.cancelled` is called when the `Future` is cancelled. If the `Future` is +# already cancelled, the `.cancelled` callback will be executed immediately. # More info on JavaScript's Promise API can be found at: # https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise @@ -56,44 +55,30 @@ runnableExamples: type OnSuccess*[T] = proc(val: T) {.gcsafe, upraises: [].} OnError* = proc(err: ref CatchableError) {.gcsafe, upraises: [].} + OnCancelled* = proc() {.gcsafe, upraises: [].} proc ignoreError(err: ref CatchableError) = discard +proc ignoreCancelled() = discard + +template handleFinished(future: FutureBase, + onError: OnError, + onCancelled: OnCancelled) = -template returnOrError(future: FutureBase, onError: OnError) = if not future.finished: return if future.cancelled: - # do not bubble as closure is synchronous + onCancelled() return if future.failed: onError(future.error) return - -proc then*(future: Future[void], - onError: OnError): - Future[void] = +proc then*(future: Future[void], onSuccess: OnSuccess[void]): Future[void] = proc cb(udata: pointer) = - future.returnOrError(onError) - - proc cancellation(udata: pointer) = - if not future.finished(): - future.removeCallback(cb) - - future.addCallback(cb) - future.cancelCallback = cancellation - return future - -proc then*(future: Future[void], - onSuccess: OnSuccess[void], - onError: OnError = ignoreError): - Future[void] = - - proc cb(udata: pointer) = - future.returnOrError(onError) + future.handleFinished(ignoreError, ignoreCancelled) onSuccess() proc cancellation(udata: pointer) = @@ -104,42 +89,13 @@ proc then*(future: Future[void], future.cancelCallback = cancellation return future -proc then*[T](future: Future[T], - onSuccess: OnSuccess[T], - onError: OnError = ignoreError): - Future[T] = +proc then*[T](future: Future[T], onSuccess: OnSuccess[T]): Future[T] = proc cb(udata: pointer) = - future.returnOrError(onError) + future.handleFinished(ignoreError, ignoreCancelled) - without val =? future.read.catch, err: - onError(err) - return - onSuccess(val) - - proc cancellation(udata: pointer) = - if not future.finished(): - future.removeCallback(cb) - - future.addCallback(cb) - future.cancelCallback = cancellation - return future - -proc then*[T](future: Future[?!T], - onSuccess: OnSuccess[T], - onError: OnError = ignoreError): - Future[?!T] = - - proc cb(udata: pointer) = - future.returnOrError(onError) - - try: - without val =? future.read, err: - onError(err) - return + if val =? future.read.catch: onSuccess(val) - except CatchableError as e: - onError(e) proc cancellation(udata: pointer) = if not future.finished(): @@ -149,18 +105,16 @@ proc then*[T](future: Future[?!T], future.cancelCallback = cancellation return future -proc then*(future: Future[?!void], - onError: OnError = ignoreError): - Future[?!void] = +proc then*[T](future: Future[?!T], onSuccess: OnSuccess[T]): Future[?!T] = proc cb(udata: pointer) = - future.returnOrError(onError) + future.handleFinished(ignoreError, ignoreCancelled) try: - if err =? future.read.errorOption: - onError(err) + if val =? future.read: + onSuccess(val) except CatchableError as e: - onError(e) + ignoreError(e) proc cancellation(udata: pointer) = if not future.finished(): @@ -170,22 +124,17 @@ proc then*(future: Future[?!void], future.cancelCallback = cancellation return future -proc then*(future: Future[?!void], - onSuccess: OnSuccess[void], - onError: OnError = ignoreError): - Future[?!void] = +proc then*(future: Future[?!void], onSuccess: OnSuccess[void]): Future[?!void] = proc cb(udata: pointer) = - future.returnOrError(onError) + future.handleFinished(ignoreError, ignoreCancelled) try: - if err =? future.read.errorOption: - onError(err) - return + if future.read.isOk: + onSuccess() except CatchableError as e: - onError(e) + ignoreError(e) return - onSuccess() proc cancellation(udata: pointer) = if not future.finished(): @@ -197,8 +146,10 @@ proc then*(future: Future[?!void], proc catch*[T](future: Future[T], onError: OnError) = + if future.isNil: return + proc cb(udata: pointer) = - future.returnOrError(onError) + future.handleFinished(onError, ignoreCancelled) proc cancellation(udata: pointer) = if not future.finished(): @@ -209,8 +160,10 @@ proc catch*[T](future: Future[T], onError: OnError) = proc catch*[T](future: Future[?!T], onError: OnError) = + if future.isNil: return + proc cb(udata: pointer) = - future.returnOrError(onError) + future.handleFinished(onError, ignoreCancelled) try: if err =? future.read.errorOption: @@ -224,3 +177,31 @@ proc catch*[T](future: Future[?!T], onError: OnError) = future.addCallback(cb) future.cancelCallback = cancellation + +proc cancelled*[T](future: Future[T], onCancelled: OnCancelled): Future[T] = + + proc cb(udata: pointer) = + future.handleFinished(ignoreError, onCancelled) + + proc cancellation(udata: pointer) = + if not future.finished(): + future.removeCallback(cb) + onCancelled() + + future.addCallback(cb) + future.cancelCallback = cancellation + return future + +proc cancelled*[T](future: Future[?!T], onCancelled: OnCancelled): Future[?!T] = + + proc cb(udata: pointer) = + future.handleFinished(ignoreError, onCancelled) + + proc cancellation(udata: pointer) = + if not future.finished(): + future.removeCallback(cb) + onCancelled() + + future.addCallback(cb) + future.cancelCallback = cancellation + return future diff --git a/codex/sales/trackedfutures.nim b/codex/utils/trackedfutures.nim similarity index 64% rename from codex/sales/trackedfutures.nim rename to codex/utils/trackedfutures.nim index b9d9a565..ea26c4ae 100644 --- a/codex/sales/trackedfutures.nim +++ b/codex/utils/trackedfutures.nim @@ -12,21 +12,25 @@ type logScope: topics = "trackable futures" -proc track*[T](self: TrackedFutures, fut: Future[T]): Future[T] = - logScope: - id = fut.id +proc len*(self: TrackedFutures): int = self.futures.len - proc removeFuture() = - if not self.cancelling and not fut.isNil: - trace "removing tracked future" - self.futures.del(fut.id) +proc removeFuture(self: TrackedFutures, future: FutureBase) = + if not self.cancelling and not future.isNil: + trace "removing tracked future" + self.futures.del(future.id) + +proc track*[T](self: TrackedFutures, fut: Future[T]): Future[T] = + if self.cancelling: + return fut + + trace "tracking future", id = fut.id + self.futures[fut.id] = FutureBase(fut) fut - .then((val: T) => removeFuture()) - .catch((e: ref CatchableError) => removeFuture()) + .then((val: T) => self.removeFuture(fut)) + .cancelled(() => self.removeFuture(fut)) + .catch((e: ref CatchableError) => self.removeFuture(fut)) - trace "tracking future" - self.futures[fut.id] = FutureBase(fut) return fut proc track*[T, U](future: Future[T], self: U): Future[T] = @@ -43,4 +47,5 @@ proc cancelTracked*(self: TrackedFutures) {.async.} = trace "cancelling tracked future", id = future.id await future.cancelAndWait() + self.futures.clear() self.cancelling = false diff --git a/tests/codex/sales/testsalesagent.nim b/tests/codex/sales/testsalesagent.nim index 90951ff6..690d7902 100644 --- a/tests/codex/sales/testsalesagent.nim +++ b/tests/codex/sales/testsalesagent.nim @@ -10,6 +10,7 @@ import pkg/codex/proving import ../helpers/mockmarket import ../helpers/mockclock import ../helpers/eventually +import ../helpers import ../examples var onCancelCalled = false diff --git a/tests/codex/testutils.nim b/tests/codex/testutils.nim index 0e602db2..6b4b2366 100644 --- a/tests/codex/testutils.nim +++ b/tests/codex/testutils.nim @@ -3,5 +3,6 @@ import ./utils/testkeyutils import ./utils/testasyncstatemachine import ./utils/testtimer import ./utils/testthen +import ./utils/testtrackedfutures {.warning[UnusedImport]: off.} diff --git a/tests/codex/utils/testasyncstatemachine.nim b/tests/codex/utils/testasyncstatemachine.nim index eb0de264..4336657a 100644 --- a/tests/codex/utils/testasyncstatemachine.nim +++ b/tests/codex/utils/testasyncstatemachine.nim @@ -99,7 +99,7 @@ asyncchecksuite "async state machines": test "stops scheduling and current state": machine.start(State2.new()) await sleepAsync(1.millis) - machine.stop() + await machine.stop() machine.schedule(moveToNextStateEvent) await sleepAsync(1.millis) check runs == [0, 1, 0, 0] @@ -130,5 +130,5 @@ asyncchecksuite "async state machines": machine.start(State2.new()) check eventually machine.query(description).isSome - machine.stop() + await machine.stop() check machine.query(description).isNone diff --git a/tests/codex/utils/testthen.nim b/tests/codex/utils/testthen.nim index 7a5037bf..0b73d9ef 100644 --- a/tests/codex/utils/testthen.nim +++ b/tests/codex/utils/testthen.nim @@ -5,340 +5,409 @@ import pkg/questionable/results import codex/utils/then import ../helpers +proc newError(): ref CatchableError = + (ref CatchableError)(msg: "some error") + asyncchecksuite "then - Future[void]": - var returnsVoidWasRun: bool - var error = (ref CatchableError)(msg: "some error") + var error = newError() + var future: Future[void] setup: - returnsVoidWasRun = false + future = newFuture[void]("test void") - proc returnsVoid() {.async.} = - await sleepAsync 1.millis - returnsVoidWasRun = true + teardown: + if not future.finished: + raiseAssert "test should finish future" - proc returnsVoidError() {.async.} = - raise error + test "then callback is fired when future is already finished": + var firedImmediately = false + future.complete() + discard future.then(proc() = firedImmediately = true) + check eventually firedImmediately - proc returnsVoidCancelled() {.async.} = - await sleepAsync(1.seconds) + test "then callback is fired after future is finished": + var fired = false + discard future.then(proc() = fired = true) + future.complete() + check eventually fired - proc wasCancelled(error: ref CancelledError): bool = - not error.isNil and error.msg == "Future operation cancelled!" + test "catch callback is fired when future is already failed": + var actual: ref CatchableError + future.fail(error) + future.catch(proc(err: ref CatchableError) = actual = err) + check eventually actual == error - test "calls async proc when returns Future[void]": - discard returnsVoid().then( - proc(err: ref CatchableError) = discard - ) - check eventually returnsVoidWasRun + test "catch callback is fired after future is failed": + var actual: ref CatchableError + future.catch(proc(err: ref CatchableError) = actual = err) + future.fail(error) + check eventually actual == error - test "calls onSuccess when Future[void] complete": + test "cancelled callback is fired when future is already cancelled": + var fired = false + await future.cancelAndWait() + discard future.cancelled(proc() = fired = true) + check eventually fired + + test "cancelled callback is fired after future is cancelled": + var fired = false + discard future.cancelled(proc() = fired = true) + await future.cancelAndWait() + check eventually fired + + test "does not fire other callbacks when successful": var onSuccessCalled = false - discard returnsVoid().then( - proc() = onSuccessCalled = true, - proc(err: ref CatchableError) = discard - ) - check eventually returnsVoidWasRun - check eventually onSuccessCalled + var onCancelledCalled = false + var onCatchCalled = false - test "can pass only onSuccess for Future[void]": + future + .then(proc() = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) + + future.complete() + + check eventually onSuccessCalled + check always (not onCancelledCalled and not onCatchCalled) + + test "does not fire other callbacks when fails": var onSuccessCalled = false - discard returnsVoid().then( - proc() = onSuccessCalled = true - ) - check eventually returnsVoidWasRun - check eventually onSuccessCalled + var onCancelledCalled = false + var onCatchCalled = false - test "can chain onSuccess when Future[void] complete": + future + .then(proc() = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) + + future.fail(error) + + check eventually onCatchCalled + check always (not onCancelledCalled and not onSuccessCalled) + + test "does not fire other callbacks when cancelled": + var onSuccessCalled = false + var onCancelledCalled = false + var onCatchCalled = false + + future + .then(proc() = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) + + await future.cancelAndWait() + + check eventually onCancelledCalled + check always (not onSuccessCalled and not onCatchCalled) + + test "can chain onSuccess when future completes": var onSuccessCalledTimes = 0 - discard returnsVoid() + discard future .then(proc() = inc onSuccessCalledTimes) .then(proc() = inc onSuccessCalledTimes) .then(proc() = inc onSuccessCalledTimes) + future.complete() check eventually onSuccessCalledTimes == 3 - test "calls onError when Future[void] fails": - var errorActual: ref CatchableError - discard returnsVoidError().then( - proc() = discard, - proc(e: ref CatchableError) = errorActual = e - ) - check eventually error == errorActual - - test "calls onError when Future[void] fails": - var errorActual: ref CatchableError - discard returnsVoidError().then( - proc(e: ref CatchableError) = errorActual = e - ) - check eventually error == errorActual - - test "catch callback fired when Future[void] fails": - var errorActual: ref CatchableError - returnsVoidError().catch( - proc(e: ref CatchableError) = errorActual = e - ) - check eventually error == errorActual - - test "does not fire onSuccess callback when Future[void] fails": - var onSuccessCalled = false - - returnsVoidError() - .then(proc() = onSuccessCalled = true) - .then(proc() = onSuccessCalled = true) - .catch(proc(e: ref CatchableError) = discard) - - check always (not onSuccessCalled) - asyncchecksuite "then - Future[T]": - var returnsValWasRun: bool - var error = (ref CatchableError)(msg: "some error") + var error = newError() + var future: Future[int] setup: - returnsValWasRun = false + future = newFuture[int]("test void") - proc returnsVal(): Future[int] {.async.} = - await sleepAsync 1.millis - returnsValWasRun = true - return 1 + teardown: + if not future.finished: + raiseAssert "test should finish future" - proc returnsValError(): Future[int] {.async.} = - raise error + test "then callback is fired when future is already finished": + var cbVal = 0 + future.complete(1) + discard future.then(proc(val: int) = cbVal = val) + check eventually cbVal == 1 - proc returnsValCancelled(): Future[int] {.async.} = - await sleepAsync(1.seconds) + test "then callback is fired after future is finished": + var cbVal = 0 + discard future.then(proc(val: int) = cbVal = val) + future.complete(1) + check eventually cbVal == 1 - proc wasCancelled(error: ref CancelledError): bool = - not error.isNil and error.msg == "Future operation cancelled!" + test "catch callback is fired when future is already failed": + var actual: ref CatchableError + future.fail(error) + future.catch(proc(err: ref CatchableError) = actual = err) + check eventually actual == error - test "calls onSuccess when Future[T] complete": - var returnedVal = 0 - discard returnsVal().then( - proc(val: int) = returnedVal = val, - proc(err: ref CatchableError) = discard - ) - check eventually returnsValWasRun - check eventually returnedVal == 1 + test "catch callback is fired after future is failed": + var actual: ref CatchableError + future.catch(proc(err: ref CatchableError) = actual = err) + future.fail(error) + check eventually actual == error - test "can pass only onSuccess for Future[T]": - var returnedVal = 0 - discard returnsVal().then( - proc(val: int) = returnedVal = val - ) - check eventually returnsValWasRun - check eventually returnedVal == 1 + test "cancelled callback is fired when future is already cancelled": + var fired = false + await future.cancelAndWait() + discard future.cancelled(proc() = fired = true) + check eventually fired - test "can chain onSuccess when Future[T] complete": - var onSuccessCalledWith: seq[int] = @[] - discard returnsVal() - .then(proc(val: int) = onSuccessCalledWith.add(val)) - .then(proc(val: int) = onSuccessCalledWith.add(val)) - .then(proc(val: int) = onSuccessCalledWith.add(val)) - check eventually onSuccessCalledWith == @[1, 1, 1] + test "cancelled callback is fired after future is cancelled": + var fired = false + discard future.cancelled(proc() = fired = true) + await future.cancelAndWait() + check eventually fired - test "calls onError when Future[T] fails": - var errorActual: ref CatchableError - discard returnsValError().then( - proc(val: int) = discard, - proc(e: ref CatchableError) = errorActual = e - ) - check eventually error == errorActual - - test "catch callback fired when Future[T] fails": - var errorActual: ref CatchableError - returnsValError().catch( - proc(e: ref CatchableError) = errorActual = e - ) - check eventually error == errorActual - - test "does not fire onSuccess callback when Future[T] fails": + test "does not fire other callbacks when successful": var onSuccessCalled = false + var onCancelledCalled = false + var onCatchCalled = false - returnsValError() + future .then(proc(val: int) = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) + + future.complete(1) + + check eventually onSuccessCalled + check always (not onCancelledCalled and not onCatchCalled) + + test "does not fire other callbacks when fails": + var onSuccessCalled = false + var onCancelledCalled = false + var onCatchCalled = false + + future .then(proc(val: int) = onSuccessCalled = true) - .catch(proc(e: ref CatchableError) = discard) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) - check always (not onSuccessCalled) + future.fail(error) -asyncchecksuite "then - Future[?!void]": - var returnsResultVoidWasRun: bool - var error = (ref CatchableError)(msg: "some error") + check eventually onCatchCalled + check always (not onCancelledCalled and not onSuccessCalled) - setup: - returnsResultVoidWasRun = false - - proc returnsResultVoid(): Future[?!void] {.async.} = - await sleepAsync 1.millis - returnsResultVoidWasRun = true - return success() - - proc returnsResultVoidError(): Future[?!void] {.async.} = - return failure(error) - - - proc returnsResultVoidErrorUncaught(): Future[?!void] {.async.} = - raise error - - proc returnsResultVoidCancelled(): Future[?!void] {.async.} = - await sleepAsync(1.seconds) - return success() - - proc wasCancelled(error: ref CancelledError): bool = - not error.isNil and error.msg == "Future operation cancelled!" - - test "calls onSuccess when Future[?!void] complete": + test "does not fire other callbacks when cancelled": var onSuccessCalled = false - discard returnsResultVoid().then( - proc() = onSuccessCalled = true, - proc(err: ref CatchableError) = discard - ) - check eventually returnsResultVoidWasRun - check eventually onSuccessCalled + var onCancelledCalled = false + var onCatchCalled = false - test "can pass only onSuccess for Future[?!void]": - var onSuccessCalled = false - discard returnsResultVoid().then( - proc() = onSuccessCalled = true - ) - check eventually returnsResultVoidWasRun - check eventually onSuccessCalled + future + .then(proc(val: int) = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) - test "can chain onSuccess when Future[?!void] complete": + await future.cancelAndWait() + + check eventually onCancelledCalled + check always (not onSuccessCalled and not onCatchCalled) + + test "can chain onSuccess when future completes": var onSuccessCalledTimes = 0 - discard returnsResultVoid() - .then(proc() = inc onSuccessCalledTimes) - .then(proc() = inc onSuccessCalledTimes) - .then(proc() = inc onSuccessCalledTimes) + discard future + .then(proc(val: int) = inc onSuccessCalledTimes) + .then(proc(val: int) = inc onSuccessCalledTimes) + .then(proc(val: int) = inc onSuccessCalledTimes) + future.complete(1) check eventually onSuccessCalledTimes == 3 - test "calls onError when Future[?!void] fails": - var errorActual: ref CatchableError - discard returnsResultVoidError().then( - proc() = discard, - proc(e: ref CatchableError) = errorActual = e - ) - await sleepAsync(10.millis) - check eventually error == errorActual - - test "calls onError when Future[?!void] fails": - var errorActual: ref CatchableError - discard returnsResultVoidError().then( - proc(e: ref CatchableError) = errorActual = e - ) - check eventually error == errorActual - - test "catch callback fired when Future[?!void] fails": - var errorActual: ref CatchableError - returnsResultVoidError().catch( - proc(e: ref CatchableError) = errorActual = e - ) - check eventually error == errorActual - - test "does not fire onSuccess callback when Future[?!void] fails": - var onSuccessCalled = false - - returnsResultVoidError() - .then(proc() = onSuccessCalled = true) - .then(proc() = onSuccessCalled = true) - .catch(proc(e: ref CatchableError) = discard) - - check always (not onSuccessCalled) - - test "catch callback fired when Future[?!void] fails with uncaught error": - var errorActual: ref CatchableError - returnsResultVoidErrorUncaught().catch( - proc(e: ref CatchableError) = errorActual = e - ) - check eventually error == errorActual - -asyncchecksuite "then - Future[?!T]": - var returnsResultValWasRun: bool - var error = (ref CatchableError)(msg: "some error") +asyncchecksuite "then - Future[?!void]": + var error = newError() + var future: Future[?!void] setup: - returnsResultValWasRun = false + future = newFuture[?!void]("test void") - proc returnsResultVal(): Future[?!int] {.async.} = - await sleepAsync 1.millis - returnsResultValWasRun = true - return success(2) + teardown: + if not future.finished: + raiseAssert "test should finish future" - proc returnsResultValError(): Future[?!int] {.async.} = - return failure(error) + test "then callback is fired when future is already finished": + var firedImmediately = false + future.complete(success()) + discard future.then(proc() = firedImmediately = true) + check eventually firedImmediately - proc returnsResultValErrorUncaught(): Future[?!int] {.async.} = - raise error + test "then callback is fired after future is finished": + var fired = false + discard future.then(proc() = fired = true) + future.complete(success()) + check eventually fired - proc returnsResultValCancelled(): Future[?!int] {.async.} = - await sleepAsync(1.seconds) - return success(3) + test "catch callback is fired when future is already failed": + var actual: ref CatchableError + future.fail(error) + future.catch(proc(err: ref CatchableError) = actual = err) + check eventually actual == error - proc wasCancelled(error: ref CancelledError): bool = - not error.isNil and error.msg == "Future operation cancelled!" + test "catch callback is fired after future is failed": + var actual: ref CatchableError + future.catch(proc(err: ref CatchableError) = actual = err) + future.fail(error) + check eventually actual == error - test "calls onSuccess when Future[?!T] completes": - var actualVal = 0 - discard returnsResultVal().then( - proc(val: int) = actualVal = val, - proc(err: ref CatchableError) = discard - ) - check eventually returnsResultValWasRun - check eventually actualVal == 2 + test "cancelled callback is fired when future is already cancelled": + var fired = false + await future.cancelAndWait() + discard future.cancelled(proc() = fired = true) + check eventually fired - test "can pass only onSuccess for Future[?!T]": - var actualVal = 0 - discard returnsResultVal().then( - proc(val: int) = actualVal = val - ) - check eventually returnsResultValWasRun - check eventually actualVal == 2 + test "cancelled callback is fired after future is cancelled": + var fired = false + discard future.cancelled(proc() = fired = true) + await future.cancelAndWait() + check eventually fired - test "can chain onSuccess when Future[?!T] complete": - var onSuccessCalledWith: seq[int] = @[] - discard returnsResultVal() - .then(proc(val: int) = onSuccessCalledWith.add val) - .then(proc(val: int) = onSuccessCalledWith.add val) - .then(proc(val: int) = onSuccessCalledWith.add val) - check eventually onSuccessCalledWith == @[2, 2, 2] - - test "calls onError when Future[?!T] fails": - var errorActual: ref CatchableError - discard returnsResultValError().then( - proc(val: int) = discard, - proc(e: ref CatchableError) = errorActual = e - ) - check eventually error == errorActual - - test "calls onError when Future[?!T] fails": - var errorActual: ref CatchableError - discard returnsResultValError().then( - proc(val: int) = discard, - proc(e: ref CatchableError) = errorActual = e - ) - check eventually error == errorActual - - test "catch callback fired when Future[?!T] fails": - var errorActual: ref CatchableError - returnsResultValError().catch( - proc(e: ref CatchableError) = errorActual = e - ) - check eventually error == errorActual - - test "does not fire onSuccess callback when Future[?!T] fails": + test "does not fire other callbacks when successful": var onSuccessCalled = false + var onCancelledCalled = false + var onCatchCalled = false - returnsResultValError() + future + .then(proc() = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) + + future.complete(success()) + + check eventually onSuccessCalled + check always (not onCancelledCalled and not onCatchCalled) + + test "does not fire other callbacks when fails": + var onSuccessCalled = false + var onCancelledCalled = false + var onCatchCalled = false + + future + .then(proc() = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) + + future.fail(error) + + check eventually onCatchCalled + check always (not onCancelledCalled and not onSuccessCalled) + + test "does not fire other callbacks when cancelled": + var onSuccessCalled = false + var onCancelledCalled = false + var onCatchCalled = false + + future + .then(proc() = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) + + await future.cancelAndWait() + + check eventually onCancelledCalled + check always (not onSuccessCalled and not onCatchCalled) + + test "can chain onSuccess when future completes": + var onSuccessCalledTimes = 0 + discard future + .then(proc() = inc onSuccessCalledTimes) + .then(proc() = inc onSuccessCalledTimes) + .then(proc() = inc onSuccessCalledTimes) + future.complete(success()) + check eventually onSuccessCalledTimes == 3 + +asyncchecksuite "then - Future[?!T]": + var error = newError() + var future: Future[?!int] + + setup: + future = newFuture[?!int]("test void") + + teardown: + if not future.finished: + raiseAssert "test should finish future" + + test "then callback is fired when future is already finished": + var cbVal = 0 + future.complete(success(1)) + discard future.then(proc(val: int) = cbVal = val) + check eventually cbVal == 1 + + test "then callback is fired after future is finished": + var cbVal = 0 + discard future.then(proc(val: int) = cbVal = val) + future.complete(success(1)) + check eventually cbVal == 1 + + test "catch callback is fired when future is already failed": + var actual: ref CatchableError + future.fail(error) + future.catch(proc(err: ref CatchableError) = actual = err) + check eventually actual == error + + test "catch callback is fired after future is failed": + var actual: ref CatchableError + future.catch(proc(err: ref CatchableError) = actual = err) + future.fail(error) + check eventually actual == error + + test "cancelled callback is fired when future is already cancelled": + var fired = false + await future.cancelAndWait() + discard future.cancelled(proc() = fired = true) + check eventually fired + + test "cancelled callback is fired after future is cancelled": + var fired = false + discard future.cancelled(proc() = fired = true) + await future.cancelAndWait() + check eventually fired + + test "does not fire other callbacks when successful": + var onSuccessCalled = false + var onCancelledCalled = false + var onCatchCalled = false + + future .then(proc(val: int) = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) + + future.complete(success(1)) + + check eventually onSuccessCalled + check always (not onCancelledCalled and not onCatchCalled) + + test "does not fire other callbacks when fails": + var onSuccessCalled = false + var onCancelledCalled = false + var onCatchCalled = false + + future .then(proc(val: int) = onSuccessCalled = true) - .catch(proc(e: ref CatchableError) = discard) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) - check always (not onSuccessCalled) + future.fail(error) - test "catch callback fired when Future[?!T] fails with uncaught error": - var errorActual: ref CatchableError + check eventually onCatchCalled + check always (not onCancelledCalled and not onSuccessCalled) - returnsResultValErrorUncaught() - .then(proc(val: int) = discard) - .then(proc(val: int) = discard) - .catch(proc(e: ref CatchableError) = errorActual = e) + test "does not fire other callbacks when cancelled": + var onSuccessCalled = false + var onCancelledCalled = false + var onCatchCalled = false - check eventually error == errorActual + future + .then(proc(val: int) = onSuccessCalled = true) + .cancelled(proc() = onCancelledCalled = true) + .catch(proc(e: ref CatchableError) = onCatchCalled = true) + + await future.cancelAndWait() + + check eventually onCancelledCalled + check always (not onSuccessCalled and not onCatchCalled) + + test "can chain onSuccess when future completes": + var onSuccessCalledTimes = 0 + discard future + .then(proc(val: int) = inc onSuccessCalledTimes) + .then(proc(val: int) = inc onSuccessCalledTimes) + .then(proc(val: int) = inc onSuccessCalledTimes) + future.complete(success(1)) + check eventually onSuccessCalledTimes == 3 diff --git a/tests/codex/utils/testtrackedfutures.nim b/tests/codex/utils/testtrackedfutures.nim new file mode 100644 index 00000000..78756a8a --- /dev/null +++ b/tests/codex/utils/testtrackedfutures.nim @@ -0,0 +1,67 @@ +import pkg/asynctest +import pkg/chronos +import codex/utils/trackedfutures +import ../helpers/eventually +import ../helpers + +type Module = object + trackedFutures: TrackedFutures + +asyncchecksuite "tracked futures": + var module: Module + + setup: + module = Module(trackedFutures: TrackedFutures.new()) + + test "starts with zero tracked futures": + check module.trackedFutures.len == 0 + + test "tracks unfinished futures": + let fut = newFuture[void]("test") + discard fut.track(module) + check module.trackedFutures.len == 1 + + test "does not track completed futures": + let fut = newFuture[void]("test") + fut.complete() + discard fut.track(module) + check eventually module.trackedFutures.len == 0 + + test "does not track failed futures": + let fut = newFuture[void]("test") + fut.fail((ref CatchableError)(msg: "some error")) + discard fut.track(module) + check eventually module.trackedFutures.len == 0 + + test "does not track cancelled futures": + let fut = newFuture[void]("test") + await fut.cancelAndWait() + discard fut.track(module) + check eventually module.trackedFutures.len == 0 + + test "removes tracked future when finished": + let fut = newFuture[void]("test") + discard fut.track(module) + fut.complete() + check eventually module.trackedFutures.len == 0 + + test "removes tracked future when cancelled": + let fut = newFuture[void]("test") + discard fut.track(module) + await fut.cancelAndWait() + check eventually module.trackedFutures.len == 0 + + test "cancels and removes all tracked futures": + let fut1 = newFuture[void]("test1") + let fut2 = newFuture[void]("test2") + let fut3 = newFuture[void]("test3") + discard fut1.track(module) + discard fut2.track(module) + discard fut3.track(module) + await module.trackedFutures.cancelTracked() + check eventually fut1.cancelled + check eventually fut2.cancelled + check eventually fut3.cancelled + check eventually module.trackedFutures.len == 0 + + From 7efa9177dfc35c2581a1f733c58aa0eaf0ab2692 Mon Sep 17 00:00:00 2001 From: Jaremy Creechley Date: Tue, 1 Aug 2023 16:47:57 -0700 Subject: [PATCH 7/9] Bump deps take2 (#492) * extra utilities and tweaks * add atlas lock * update ignores * break build into it's own script * update url rules * base off codexdht's * compile fixes for Nim 1.6.14 * update submodules * convert mapFailure to procs to work around type resolution issues * add toml parser for multiaddress * change error type on keyutils * bump nimbus build to use 1.6.14 * update gitignore * adding new deps submodules * bump nim ci version * even more fixes * more libp2p changes * update keys * fix eventually function * adding coverage test file * move coverage to build.nims * use nimcache/coverage * move libp2p import for tests into helper.nim * remove named bin * bug fixes for networkpeers (from Dmitriy) --------- Co-authored-by: Dmitriy Ryajov --- .github/workflows/ci.yml | 6 +- .gitignore | 3 + .gitmodules | 12 + Makefile | 26 +- atlas.lock | 209 +++ build.nims | 87 ++ codex.nimble | 48 +- codex/blockexchange/engine/discovery.nim | 2 +- codex/blockexchange/engine/engine.nim | 2 +- codex/blockexchange/network/network.nim | 2 +- codex/blocktype.nim | 2 +- codex/conf.nim | 25 +- codex/discovery.nim | 6 +- codex/errors.nim | 15 +- codex/formats.nim | 2 +- codex/manifest/manifest.nim | 14 +- codex/node.nim | 120 +- codex/rest/api.nim | 6 +- codex/storageproofs/stpstore.nim | 2 +- codex/stores/blockstore.nim | 2 +- codex/stores/repostore.nim | 4 +- codex/streams/seekablestream.nim | 4 +- codex/streams/storestream.nim | 1 - codex/utils/keyutils.nim | 11 +- config.nims | 18 +- .../blockexchange/discovery/testdiscovery.nim | 1 - .../discovery/testdiscoveryengine.nim | 1 - .../blockexchange/engine/testblockexc.nim | 3 - .../codex/blockexchange/engine/testengine.nim | 4 +- .../blockexchange/protobuf/testpresence.nim | 1 - tests/codex/blockexchange/testnetwork.nim | 2 - .../codex/blockexchange/testpendingblocks.nim | 1 - tests/codex/helpers.nim | 2 + tests/codex/helpers/eventually.nim | 2 +- tests/codex/helpers/nodeutils.nim | 1 + tests/codex/sales/testsales.nim | 32 +- tests/codex/sales/testslotqueue.nim | 1 + tests/codex/storageproofs/testnetwork.nim | 1 - tests/codex/stores/commonstoretests.nim | 5 +- tests/codex/stores/testcachestore.nim | 1 - tests/codex/stores/testkeyutils.nim | 1 - tests/codex/stores/testmaintenance.nim | 1 - tests/codex/stores/testrepostore.nim | 1 - tests/codex/testchunking.nim | 2 +- tests/codex/testerasure.nim | 1 - tests/codex/testmanifest.nim | 1 - tests/codex/testnode.nim | 4 +- tests/codex/teststorestream.nim | 1 - tests/codex/utils/testkeyutils.nim | 11 +- tests/coverage.nim | 2 + tests/coverage.nims | 12 + tests/integration/nodes.nim | 6 +- tests/integration/testblockexpiration.nim | 1 + tests/logging.nim | 13 +- tests/nimlldb.py | 1381 +++++++++++++++++ vendor/asynctest | 2 +- vendor/atlas.workspace | 3 + vendor/dnsclient.nim | 2 +- vendor/nim-bearssl | 2 +- vendor/nim-chronicles | 2 +- vendor/nim-chronos | 2 +- vendor/nim-eth | 2 +- vendor/nim-faststreams | 2 +- vendor/nim-http-utils | 2 +- vendor/nim-json-serialization | 2 +- vendor/nim-libp2p | 2 +- vendor/nim-libp2p-dht | 2 +- vendor/nim-metrics | 2 +- vendor/nim-protobuf-serialization | 1 + vendor/nim-results | 1 + vendor/nim-secp256k1 | 2 +- vendor/nim-serialization | 2 +- vendor/nim-sqlite3-abi | 2 +- vendor/nim-stew | 2 +- vendor/nim-testutils | 1 + vendor/nim-unittest2 | 2 +- vendor/nim-websock | 2 +- vendor/nim-zlib | 2 +- vendor/nimbus-build-system | 2 +- vendor/nimcrypto | 2 +- vendor/npeg | 1 + vendor/questionable | 2 +- vendor/stint | 2 +- vendor/urls.rules | 8 + 84 files changed, 1931 insertions(+), 255 deletions(-) create mode 100644 atlas.lock create mode 100644 build.nims create mode 100644 tests/coverage.nim create mode 100644 tests/coverage.nims create mode 100644 tests/nimlldb.py create mode 100644 vendor/atlas.workspace create mode 160000 vendor/nim-protobuf-serialization create mode 160000 vendor/nim-results create mode 160000 vendor/nim-testutils create mode 160000 vendor/npeg create mode 100644 vendor/urls.rules diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e8dbbe46..9fcb1de7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,7 +7,7 @@ on: workflow_dispatch: env: cache_nonce: 0 # Allows for easily busting actions/cache caches - nim_version: v1.6.10 + nim_version: v1.6.14 jobs: build: strategy: @@ -105,7 +105,9 @@ jobs: nim_version: ${{ env.nim_version }} - name: Generate coverage data - run: make -j${ncpu} coverage + run: | + # make -j${ncpu} coverage + make -j${ncpu} coverage-script shell: bash - name: Upload coverage data to Codecov diff --git a/.gitignore b/.gitignore index 05d53c20..c85aa931 100644 --- a/.gitignore +++ b/.gitignore @@ -15,6 +15,8 @@ coverage/ # Nimble packages /vendor/.nimble +/vendor/packages/ +# /vendor/*/ # Nimble user files nimble.develop @@ -36,3 +38,4 @@ nimbus-build-system.paths docker/hostdatadir docker/prometheus-data .DS_Store +nim.cfg diff --git a/.gitmodules b/.gitmodules index b74ec86f..8cc85d0e 100644 --- a/.gitmodules +++ b/.gitmodules @@ -181,3 +181,15 @@ [submodule "vendor/codex-contracts-eth"] path = vendor/codex-contracts-eth url = https://github.com/status-im/codex-contracts-eth +[submodule "vendor/nim-protobuf-serialization"] + path = vendor/nim-protobuf-serialization + url = https://github.com/status-im/nim-protobuf-serialization +[submodule "vendor/nim-results"] + path = vendor/nim-results + url = https://github.com/arnetheduck/nim-results +[submodule "vendor/nim-testutils"] + path = vendor/nim-testutils + url = https://github.com/status-im/nim-testutils +[submodule "vendor/npeg"] + path = vendor/npeg + url = https://github.com/zevv/npeg diff --git a/Makefile b/Makefile index e629d710..0f7545f4 100644 --- a/Makefile +++ b/Makefile @@ -48,7 +48,7 @@ else # "variables.mk" was included. Business as usual until the end of this file # Builds the codex binary all: | build deps echo -e $(BUILD_MSG) "build/$@" && \ - $(ENV_SCRIPT) nim codex $(NIM_PARAMS) codex.nims + $(ENV_SCRIPT) nim codex $(NIM_PARAMS) build.nims # must be included after the default target -include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk @@ -60,15 +60,12 @@ else NIM_PARAMS := $(NIM_PARAMS) -d:release endif -deps: | deps-common nat-libs codex.nims +deps: | deps-common nat-libs ifneq ($(USE_LIBBACKTRACE), 0) deps: | libbacktrace endif -#- deletes and recreates "codex.nims" which on Windows is a copy instead of a proper symlink update: | update-common - rm -rf codex.nims && \ - $(MAKE) codex.nims $(HANDLE_OUTPUT) # detecting the os ifeq ($(OS),Windows_NT) # is Windows_NT on XP, 2000, 7, Vista, 10... @@ -83,26 +80,22 @@ endif # Builds and run a part of the test suite test: | build deps echo -e $(BUILD_MSG) "build/$@" && \ - $(ENV_SCRIPT) nim test $(NIM_PARAMS) codex.nims + $(ENV_SCRIPT) nim test $(NIM_PARAMS) build.nims # Builds and runs the smart contract tests testContracts: | build deps echo -e $(BUILD_MSG) "build/$@" && \ - $(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) codex.nims + $(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) build.nims # Builds and runs the integration tests testIntegration: | build deps echo -e $(BUILD_MSG) "build/$@" && \ - $(ENV_SCRIPT) nim testIntegration $(NIM_PARAMS) codex.nims + $(ENV_SCRIPT) nim testIntegration $(NIM_PARAMS) build.nims # Builds and runs all tests testAll: | build deps echo -e $(BUILD_MSG) "build/$@" && \ - $(ENV_SCRIPT) nim testAll $(NIM_PARAMS) codex.nims - -# symlink -codex.nims: - ln -s codex.nimble $@ + $(ENV_SCRIPT) nim testAll $(NIM_PARAMS) build.nims # nim-libbacktrace LIBBACKTRACE_MAKE_FLAGS := -C vendor/nim-libbacktrace --no-print-directory BUILD_CXX_LIB=0 @@ -127,8 +120,15 @@ coverage: shopt -s globstar && lcov --extract coverage/coverage.info $$(pwd)/codex/{*,**/*}.nim --output-file coverage/coverage.f.info echo -e $(BUILD_MSG) "coverage/report/index.html" genhtml coverage/coverage.f.info --output-directory coverage/report + +show-coverage: if which open >/dev/null; then (echo -e "\e[92mOpening\e[39m HTML coverage report in browser..." && open coverage/report/index.html) || true; fi +coverage-script: build deps + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim coverage $(NIM_PARAMS) build.nims + echo "Run `make show-coverage` to view coverage results" + # usual cleaning clean: | clean-common rm -rf build diff --git a/atlas.lock b/atlas.lock new file mode 100644 index 00000000..b37d03e3 --- /dev/null +++ b/atlas.lock @@ -0,0 +1,209 @@ +{ + "clangVersion": "", + "gccVersion": "", + "hostCPU": "arm64", + "hostOS": "macosx", + "items": { + "asynctest": { + "commit": "fe1a34caf572b05f8bdba3b650f1871af9fce31e", + "dir": "vendor/asynctest", + "url": "https://github.com/codex-storage/asynctest" + }, + "dnsclient.nim": { + "commit": "23214235d4784d24aceed99bbfe153379ea557c8", + "dir": "vendor/dnsclient.nim", + "url": "https://github.com/ba0f3/dnsclient.nim" + }, + "lrucache.nim": { + "commit": "8767ade0b76ea5b5d4ce24a52d0c58a6ebeb66cd", + "dir": "vendor/lrucache.nim", + "url": "https://github.com/status-im/lrucache.nim" + }, + "nim-bearssl": { + "commit": "99fcb3405c55b27cfffbf60f5368c55da7346f23", + "dir": "vendor/nim-bearssl", + "url": "https://github.com/status-im/nim-bearssl" + }, + "nim-blscurve": { + "commit": "48d8668c5a9a350d3a7ee0c3713ef9a11980a40d", + "dir": "vendor/nim-blscurve", + "url": "https://github.com/status-im/nim-blscurve" + }, + "nim-chronicles": { + "commit": "c9c8e58ec3f89b655a046c485f622f9021c68b61", + "dir": "vendor/nim-chronicles", + "url": "https://github.com/status-im/nim-chronicles" + }, + "nim-chronos": { + "commit": "0277b65be2c7a365ac13df002fba6e172be55537", + "dir": "vendor/nim-chronos", + "url": "https://github.com/status-im/nim-chronos" + }, + "nim-confutils": { + "commit": "2028b41602b3abf7c9bf450744efde7b296707a2", + "dir": "vendor/nim-confutils", + "url": "https://github.com/status-im/nim-confutils" + }, + "nim-contract-abi": { + "commit": "61f8f59b3917d8e27c6eb4330a6d8cf428e98b2d", + "dir": "vendor/nim-contract-abi", + "url": "https://github.com/status-im/nim-contract-abi" + }, + "nim-datastore": { + "commit": "0cde8aeb67c59fd0ac95496dc6b5e1168d6632aa", + "dir": "vendor/nim-datastore", + "url": "https://github.com/codex-storage/nim-datastore" + }, + "nim-faststreams": { + "commit": "720fc5e5c8e428d9d0af618e1e27c44b42350309", + "dir": "vendor/nim-faststreams", + "url": "https://github.com/status-im/nim-faststreams" + }, + "nim-http-utils": { + "commit": "3b491a40c60aad9e8d3407443f46f62511e63b18", + "dir": "vendor/nim-http-utils", + "url": "https://github.com/status-im/nim-http-utils" + }, + "nim-json-rpc": { + "commit": "0bf2bcbe74a18a3c7a709d57108bb7b51e748a92", + "dir": "vendor/nim-json-rpc", + "url": "https://github.com/status-im/nim-json-rpc" + }, + "nim-json-serialization": { + "commit": "bb53d49caf2a6c6cf1df365ba84af93cdcfa7aa3", + "dir": "vendor/nim-json-serialization", + "url": "https://github.com/status-im/nim-json-serialization" + }, + "nim-leopard": { + "commit": "1a6f2ab7252426a6ac01482a68b75d0c3b134cf0", + "dir": "vendor/nim-leopard", + "url": "https://github.com/status-im/nim-leopard" + }, + "nim-libbacktrace": { + "commit": "b29c22ba0ef13de50b779c776830dbea1d50cd33", + "dir": "vendor/nim-libbacktrace", + "url": "https://github.com/status-im/nim-libbacktrace" + }, + "nim-libp2p": { + "commit": "440461b24b9e66542b34d26a0b908c17f6549d05", + "dir": "vendor/nim-libp2p", + "url": "https://github.com/status-im/nim-libp2p" + }, + "nim-libp2p-dht": { + "commit": "fdd02450aa6979add7dabd29a3ba0f8738bf89f8", + "dir": "vendor/nim-libp2p-dht", + "url": "https://github.com/status-im/nim-libp2p-dht" + }, + "nim-metrics": { + "commit": "6142e433fc8ea9b73379770a788017ac528d46ff", + "dir": "vendor/nim-metrics", + "url": "https://github.com/status-im/nim-metrics" + }, + "nim-nat-traversal": { + "commit": "27d314d65c9078924b3239fe4e2f5af0c512b28c", + "dir": "vendor/nim-nat-traversal", + "url": "https://github.com/status-im/nim-nat-traversal" + }, + "nim-nitro": { + "commit": "6b4c455bf4dad7449c1580055733a1738fcd5aec", + "dir": "vendor/nim-nitro", + "url": "https://github.com/status-im/nim-nitro" + }, + "nim-presto": { + "commit": "3984431dc0fc829eb668e12e57e90542b041d298", + "dir": "vendor/nim-presto", + "url": "https://github.com/status-im/nim-presto" + }, + "nim-protobuf-serialization": { + "commit": "28214b3e40c755a9886d2ec8f261ec48fbb6bec6", + "dir": "vendor/nim-protobuf-serialization", + "url": "https://github.com/status-im/nim-protobuf-serialization" + }, + "nim-results": { + "commit": "f3c666a272c69d70cb41e7245e7f6844797303ad", + "dir": "vendor/nim-results", + "url": "https://github.com/arnetheduck/nim-results" + }, + "nim-secp256k1": { + "commit": "2acbbdcc0e63002a013fff49f015708522875832", + "dir": "vendor/nim-secp256k1", + "url": "https://github.com/status-im/nim-secp256k1" + }, + "nim-serialization": { + "commit": "384eb2561ee755446cff512a8e057325848b86a7", + "dir": "vendor/nim-serialization", + "url": "https://github.com/status-im/nim-serialization" + }, + "nim-sqlite3-abi": { + "commit": "362e1bd9f689ad9f5380d9d27f0705b3d4dfc7d3", + "dir": "vendor/nim-sqlite3-abi", + "url": "https://github.com/arnetheduck/nim-sqlite3-abi" + }, + "nim-stew": { + "commit": "7afe7e3c070758cac1f628e4330109f3ef6fc853", + "dir": "vendor/nim-stew", + "url": "https://github.com/status-im/nim-stew" + }, + "nim-taskpools": { + "commit": "b3673c7a7a959ccacb393bd9b47e997bbd177f5a", + "dir": "vendor/nim-taskpools", + "url": "https://github.com/status-im/nim-taskpools" + }, + "nim-testutils": { + "commit": "b56a5953e37fc5117bd6ea6dfa18418c5e112815", + "dir": "vendor/nim-testutils", + "url": "https://github.com/status-im/nim-testutils" + }, + "nim-toml-serialization": { + "commit": "86d477136f105f04bfd0dd7c0e939593d81fc581", + "dir": "vendor/nim-toml-serialization", + "url": "https://github.com/status-im/nim-toml-serialization" + }, + "nim-unittest2": { + "commit": "b178f47527074964f76c395ad0dfc81cf118f379", + "dir": "vendor/nim-unittest2", + "url": "https://github.com/status-im/nim-unittest2" + }, + "nim-websock": { + "commit": "2c3ae3137f3c9cb48134285bd4a47186fa51f0e8", + "dir": "vendor/nim-websock", + "url": "https://github.com/status-im/nim-websock" + }, + "nim-zlib": { + "commit": "f34ca261efd90f118dc1647beefd2f7a69b05d93", + "dir": "vendor/nim-zlib", + "url": "https://github.com/status-im/nim-zlib" + }, + "nim-stint": { + "dir": "vendor/stint", + "url": "https://github.com/status-im/nim-stint", + "commit": "86621eced1dcfb5e25903019ebcfc76ed9128ec5" + }, + "nimcrypto": { + "commit": "24e006df85927f64916e60511620583b11403178", + "dir": "vendor/nimcrypto", + "url": "https://github.com/status-im/nimcrypto" + }, + "npeg": { + "commit": "b15a10e388b91b898c581dbbcb6a718d46b27d2f", + "dir": "vendor/npeg", + "url": "https://github.com/zevv/npeg" + }, + "questionable": { + "commit": "b3cf35ac450fd42c9ea83dc084f5cba2efc55da3", + "dir": "vendor/questionable", + "url": "https://github.com/codex-storage/questionable" + }, + "upraises": { + "commit": "ff4f8108e44fba9b35cac535ab63d3927e8fd3c2", + "dir": "vendor/upraises", + "url": "https://github.com/markspanbroek/upraises" + } + }, + "nimVersion": "1.6.14", + "nimbleFile": { + "content": "# Package\n\nversion = \"0.3.2\"\nauthor = \"Status Research & Development GmbH\"\ndescription = \"DHT based on the libp2p Kademlia spec\"\nlicense = \"MIT\"\nskipDirs = @[\"tests\"]\n\n\n# Dependencies\nrequires \"nim >= 1.2.0\"\nrequires \"secp256k1#2acbbdcc0e63002a013fff49f015708522875832\" # >= 0.5.2 & < 0.6.0\nrequires \"protobuf_serialization\" # >= 0.2.0 & < 0.3.0\nrequires \"nimcrypto == 0.5.4\"\nrequires \"bearssl#head\"\nrequires \"chronicles >= 0.10.2 & < 0.11.0\"\nrequires \"chronos == 3.2.0\" # >= 3.0.11 & < 3.1.0\nrequires \"libp2p#unstable\"\nrequires \"metrics\"\nrequires \"stew#head\"\nrequires \"stint\"\nrequires \"asynctest >= 0.3.1 & < 0.4.0\"\nrequires \"https://github.com/codex-storage/nim-datastore#head\"\nrequires \"questionable\"\n\ninclude \"build.nims\"\n\n", + "filename": "" + }, + "nimcfg": "############# begin Atlas config section ##########\n--noNimblePath\n--path:\"vendor/nim-secp256k1\"\n--path:\"vendor/nim-protobuf-serialization\"\n--path:\"vendor/nimcrypto\"\n--path:\"vendor/nim-bearssl\"\n--path:\"vendor/nim-chronicles\"\n--path:\"vendor/nim-chronos\"\n--path:\"vendor/nim-libp2p\"\n--path:\"vendor/nim-metrics\"\n--path:\"vendor/nim-stew\"\n--path:\"vendor/nim-stint\"\n--path:\"vendor/asynctest\"\n--path:\"vendor/nim-datastore\"\n--path:\"vendor/questionable\"\n--path:\"vendor/nim-faststreams\"\n--path:\"vendor/nim-serialization\"\n--path:\"vendor/npeg/src\"\n--path:\"vendor/nim-unittest2\"\n--path:\"vendor/nim-testutils\"\n--path:\"vendor/nim-json-serialization\"\n--path:\"vendor/nim-http-utils\"\n--path:\"vendor/dnsclient.nim/src\"\n--path:\"vendor/nim-websock\"\n--path:\"vendor/nim-results\"\n--path:\"vendor/nim-sqlite3-abi\"\n--path:\"vendor/upraises\"\n--path:\"vendor/nim-zlib\"\n############# end Atlas config section ##########\n" +} diff --git a/build.nims b/build.nims new file mode 100644 index 00000000..bf89c0f0 --- /dev/null +++ b/build.nims @@ -0,0 +1,87 @@ +mode = ScriptMode.Verbose + + +### Helper functions +proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") = + if not dirExists "build": + mkDir "build" + # allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims" + var extra_params = params + when compiles(commandLineParams): + for param in commandLineParams(): + extra_params &= " " & param + else: + for i in 2..= 1.2.0" requires "asynctest >= 0.3.2 & < 0.4.0" @@ -32,47 +31,4 @@ requires "blscurve" requires "libp2pdht" requires "eth" -when declared(namedBin): - namedBin = { - "codex/codex": "codex" - }.toTable() - -### Helper functions -proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") = - if not dirExists "build": - mkDir "build" - # allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims" - var extra_params = params - when compiles(commandLineParams): - for param in commandLineParams: - extra_params &= " " & param - else: - for i in 2.. ref CatchableError: (ref exc)(msg: $e))) + exp.mapErr(proc (e: V): ref CatchableError = (ref exc)(msg: $e)) + +template mapFailure*[T, V](exp: Result[T, V]): Result[T, ref CatchableError] = + mapFailure(exp, CodexError) diff --git a/codex/formats.nim b/codex/formats.nim index ec79dabe..38881bc9 100644 --- a/codex/formats.nim +++ b/codex/formats.nim @@ -10,7 +10,7 @@ import std/strutils import pkg/chronicles -import pkg/libp2p +import pkg/libp2p/cid func shortLog*(cid: Cid): string = ## Returns compact string representation of ``pid``. diff --git a/codex/manifest/manifest.nim b/codex/manifest/manifest.nim index d231f69c..1bf307ae 100644 --- a/codex/manifest/manifest.nim +++ b/codex/manifest/manifest.nim @@ -101,7 +101,8 @@ func `[]=`*(self: Manifest, i: BackwardsIndex, item: Cid) = self.blocks[self.len - i.int] = item func isManifest*(cid: Cid): ?!bool = - ($(?cid.contentType().mapFailure) in ManifestContainers).success + let res = ?cid.contentType().mapFailure(CodexError) + ($(res) in ManifestContainers).success func isManifest*(mc: MultiCodec): ?!bool = ($mc in ManifestContainers).success @@ -189,11 +190,8 @@ proc makeRoot*(self: Manifest): ?!void = stack.add(mh) if stack.len == 1: - let cid = ? Cid.init( - self.version, - self.codec, - (? EmptyDigests[self.version][self.hcodec].catch)) - .mapFailure + let digest = ? EmptyDigests[self.version][self.hcodec].catch + let cid = ? Cid.init(self.version, self.codec, digest).mapFailure self.rootHash = cid.some @@ -225,8 +223,8 @@ proc new*( ## Create a manifest using an array of `Cid`s ## - if hcodec notin EmptyDigests[version]: - return failure("Unsupported manifest hash codec!") + # if hcodec notin EmptyDigests[version]: + # return failure("Unsupported manifest hash codec!") T( blocks: @blocks, diff --git a/codex/node.nim b/codex/node.nim index ebc3c540..0fe78dc8 100644 --- a/codex/node.nim +++ b/codex/node.nim @@ -16,7 +16,9 @@ import pkg/questionable import pkg/questionable/results import pkg/chronicles import pkg/chronos -import pkg/libp2p + +import pkg/libp2p/switch +import pkg/libp2p/stream/bufferstream # TODO: remove once exported by libp2p import pkg/libp2p/routing_record @@ -60,23 +62,21 @@ type proc findPeer*( node: CodexNodeRef, - peerId: PeerId -): Future[?PeerRecord] {.async.} = + peerId: PeerId): Future[?PeerRecord] {.async.} = ## Find peer using the discovery service from the given CodexNode ## return await node.discovery.findPeer(peerId) proc connect*( - node: CodexNodeRef, - peerId: PeerId, - addrs: seq[MultiAddress] + node: CodexNodeRef, + peerId: PeerId, + addrs: seq[MultiAddress] ): Future[void] = node.switch.connect(peerId, addrs) proc fetchManifest*( - node: CodexNodeRef, - cid: Cid -): Future[?!Manifest] {.async.} = + node: CodexNodeRef, + cid: Cid): Future[?!Manifest] {.async.} = ## Fetch and decode a manifest block ## @@ -100,11 +100,10 @@ proc fetchManifest*( return manifest.success proc fetchBatched*( - node: CodexNodeRef, - manifest: Manifest, - batchSize = FetchBatch, - onBatch: BatchProc = nil -): Future[?!void] {.async, gcsafe.} = + node: CodexNodeRef, + manifest: Manifest, + batchSize = FetchBatch, + onBatch: BatchProc = nil): Future[?!void] {.async, gcsafe.} = ## Fetch manifest in batches of `batchSize` ## @@ -130,9 +129,8 @@ proc fetchBatched*( return success() proc retrieve*( - node: CodexNodeRef, - cid: Cid -): Future[?!LPStream] {.async.} = + node: CodexNodeRef, + cid: Cid): Future[?!LPStream] {.async.} = ## Retrieve by Cid a single block or an entire dataset described by manifest ## @@ -147,47 +145,35 @@ proc retrieve*( trace "Unable to erasure decode manifest", cid, exc = error.msg except CatchableError as exc: trace "Exception decoding manifest", cid, exc = exc.msg - # + asyncSpawn erasureJob() - # else: - # # Prefetch the entire dataset into the local store - # proc prefetchBlocks() {.async, raises: [Defect].} = - # try: - # discard await node.fetchBatched(manifest) - # except CatchableError as exc: - # trace "Exception prefetching blocks", exc = exc.msg - # # - # # asyncSpawn prefetchBlocks() - temporarily commented out - # + # Retrieve all blocks of the dataset sequentially from the local store or network trace "Creating store stream for manifest", cid - return LPStream(StoreStream.new(node.blockStore, manifest, pad = false)).success + LPStream(StoreStream.new(node.blockStore, manifest, pad = false)).success + else: + let + stream = BufferStream.new() - let - stream = BufferStream.new() + without blk =? (await node.blockStore.getBlock(cid)), err: + return failure(err) - without blk =? (await node.blockStore.getBlock(cid)), err: - return failure(err) + proc streamOneBlock(): Future[void] {.async.} = + try: + await stream.pushData(blk.data) + except CatchableError as exc: + trace "Unable to send block", cid, exc = exc.msg + discard + finally: + await stream.pushEof() - proc streamOneBlock(): Future[void] {.async.} = - try: - await stream.pushData(blk.data) - except CatchableError as exc: - trace "Unable to send block", cid, exc = exc.msg - discard - finally: - await stream.pushEof() - - asyncSpawn streamOneBlock() - return LPStream(stream).success() - - return failure("Unable to retrieve Cid!") + asyncSpawn streamOneBlock() + LPStream(stream).success() proc store*( - self: CodexNodeRef, - stream: LPStream, - blockSize = DefaultBlockSize -): Future[?!Cid] {.async.} = + self: CodexNodeRef, + stream: LPStream, + blockSize = DefaultBlockSize): Future[?!Cid] {.async.} = ## Save stream contents as dataset with given blockSize ## to nodes's BlockStore, and return Cid of its manifest ## @@ -249,16 +235,15 @@ proc store*( return manifest.cid.success proc requestStorage*( - self: CodexNodeRef, - cid: Cid, - duration: UInt256, - proofProbability: UInt256, - nodes: uint, - tolerance: uint, - reward: UInt256, - collateral: UInt256, - expiry = UInt256.none -): Future[?!PurchaseId] {.async.} = + self: CodexNodeRef, + cid: Cid, + duration: UInt256, + proofProbability: UInt256, + nodes: uint, + tolerance: uint, + reward: UInt256, + collateral: UInt256, + expiry = UInt256.none): Future[?!PurchaseId] {.async.} = ## Initiate a request for storage sequence, this might ## be a multistep procedure. ## @@ -323,14 +308,13 @@ proc requestStorage*( return success purchase.id proc new*( - T: type CodexNodeRef, - switch: Switch, - store: BlockStore, - engine: BlockExcEngine, - erasure: Erasure, - discovery: Discovery, - contracts = Contracts.default -): CodexNodeRef = + T: type CodexNodeRef, + switch: Switch, + store: BlockStore, + engine: BlockExcEngine, + erasure: Erasure, + discovery: Discovery, + contracts = Contracts.default): CodexNodeRef = ## Create new instance of a Codex node, call `start` to run it ## CodexNodeRef( diff --git a/codex/rest/api.nim b/codex/rest/api.nim index 651d370d..db4e0d9c 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -27,9 +27,9 @@ import pkg/confutils import pkg/libp2p import pkg/libp2p/routing_record -import pkg/libp2pdht/discv5/spr as spr -import pkg/libp2pdht/discv5/routing_table as rt -import pkg/libp2pdht/discv5/node as dn +import pkg/codexdht/discv5/spr as spr +import pkg/codexdht/discv5/routing_table as rt +import pkg/codexdht/discv5/node as dn import ../node import ../blocktype diff --git a/codex/storageproofs/stpstore.nim b/codex/storageproofs/stpstore.nim index ff9ceb54..b5f979c3 100644 --- a/codex/storageproofs/stpstore.nim +++ b/codex/storageproofs/stpstore.nim @@ -47,7 +47,7 @@ proc retrieve*( trace "Cannot retrieve storage proof data from fs", path , error return failure("Cannot retrieve storage proof data from fs") - return PorMessage.decode(data).mapFailure + return PorMessage.decode(data).mapFailure(CatchableError) proc store*( self: StpStore, diff --git a/codex/stores/blockstore.nim b/codex/stores/blockstore.nim index 32ed91e8..bba95eac 100644 --- a/codex/stores/blockstore.nim +++ b/codex/stores/blockstore.nim @@ -18,7 +18,7 @@ import pkg/questionable/results import ../blocktype -export blocktype, libp2p +export blocktype type BlockNotFoundError* = object of CodexError diff --git a/codex/stores/repostore.nim b/codex/stores/repostore.nim index c9c79f36..75400296 100644 --- a/codex/stores/repostore.nim +++ b/codex/stores/repostore.nim @@ -13,7 +13,7 @@ push: {.upraises: [].} import pkg/chronos import pkg/chronicles -import pkg/libp2p +import pkg/libp2p/cid import pkg/metrics import pkg/questionable import pkg/questionable/results @@ -26,7 +26,7 @@ import ../blocktype import ../clock import ../systemclock -export blocktype, libp2p +export blocktype, cid logScope: topics = "codex repostore" diff --git a/codex/streams/seekablestream.nim b/codex/streams/seekablestream.nim index 54c13380..785d9afe 100644 --- a/codex/streams/seekablestream.nim +++ b/codex/streams/seekablestream.nim @@ -7,11 +7,11 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. -import pkg/libp2p +import pkg/libp2p/stream/lpstream import pkg/chronos import pkg/chronicles -export libp2p, chronos, chronicles +export lpstream, chronos, chronicles logScope: topics = "codex seekablestream" diff --git a/codex/streams/storestream.nim b/codex/streams/storestream.nim index 0f174aee..809d961c 100644 --- a/codex/streams/storestream.nim +++ b/codex/streams/storestream.nim @@ -13,7 +13,6 @@ import pkg/upraises push: {.upraises: [].} -import pkg/libp2p import pkg/chronos import pkg/chronicles import pkg/stew/ptrops diff --git a/codex/utils/keyutils.nim b/codex/utils/keyutils.nim index 6e14f0b0..ef6f6246 100644 --- a/codex/utils/keyutils.nim +++ b/codex/utils/keyutils.nim @@ -12,12 +12,14 @@ push: {.upraises: [].} import pkg/chronicles import pkg/questionable/results -import pkg/libp2p +import pkg/libp2p/crypto/crypto import ./fileutils import ../errors import ../rng +export crypto + type CodexKeyError = object of CodexError CodexKeyUnsafeError = object of CodexKeyError @@ -37,7 +39,6 @@ proc setupKey*(path: string): ?!PrivateKey = warn "The network private key file is not safe, aborting" return failure newException( CodexKeyUnsafeError, "The network private key file is not safe") - - return PrivateKey.init( - ? path.readAllBytes().mapFailure(CodexKeyError)) - .mapFailure(CodexKeyError) + + let kb = ? path.readAllBytes().mapFailure(CodexKeyError) + return PrivateKey.init(kb).mapFailure(CodexKeyError) diff --git a/config.nims b/config.nims index 15a17563..aa0c1f0f 100644 --- a/config.nims +++ b/config.nims @@ -1,8 +1,10 @@ -import std/os +include "build.nims" + +import std/os const currentDir = currentSourcePath()[0 .. ^(len("config.nims") + 1)] -if getEnv("NIMBUS_BUILD_SYSTEM") == "yes" and +when getEnv("NIMBUS_BUILD_SYSTEM") == "yes" and # BEWARE # In Nim 1.6, config files are evaluated with a working directory # matching where the Nim command was invocated. This means that we @@ -10,12 +12,12 @@ if getEnv("NIMBUS_BUILD_SYSTEM") == "yes" and system.fileExists(currentDir & "nimbus-build-system.paths"): include "nimbus-build-system.paths" -if defined(release): +when defined(release): switch("nimcache", joinPath(currentSourcePath.parentDir, "nimcache/release/$projectName")) else: switch("nimcache", joinPath(currentSourcePath.parentDir, "nimcache/debug/$projectName")) -if defined(limitStackUsage): +when defined(limitStackUsage): # This limits stack usage of each individual function to 1MB - the option is # available on some GCC versions but not all - run with `-d:limitStackUsage` # and look for .su files in "./build/", "./nimcache/" or $TMPDIR that list the @@ -23,7 +25,7 @@ if defined(limitStackUsage): switch("passC", "-fstack-usage -Werror=stack-usage=1048576") switch("passL", "-fstack-usage -Werror=stack-usage=1048576") -if defined(windows): +when defined(windows): # https://github.com/nim-lang/Nim/pull/19891 switch("define", "nimRawSetjmp") @@ -47,8 +49,8 @@ if defined(windows): # engineering a more portable binary release, this should be tweaked but still # use at least -msse2 or -msse3. -if defined(disableMarchNative): - if defined(i386) or defined(amd64): +when defined(disableMarchNative): + when defined(i386) or defined(amd64): switch("passC", "-mssse3") elif defined(macosx) and defined(arm64): # Apple's Clang can't handle "-march=native" on M1: https://github.com/status-im/nimbus-eth2/issues/2758 @@ -93,7 +95,7 @@ if not defined(macosx): --define:nimStackTraceOverride switch("import", "libbacktrace") ---define:nimOldCaseObjects # https://github.com/status-im/nim-confutils/issues/9 +switch("define", "codex_enable_proof_failures=true") # `switch("warning[CaseTransition]", "off")` fails with "Error: invalid command line option: '--warning[CaseTransition]'" switch("warning", "CaseTransition:off") diff --git a/tests/codex/blockexchange/discovery/testdiscovery.nim b/tests/codex/blockexchange/discovery/testdiscovery.nim index 94c8ebd2..6a64255a 100644 --- a/tests/codex/blockexchange/discovery/testdiscovery.nim +++ b/tests/codex/blockexchange/discovery/testdiscovery.nim @@ -5,7 +5,6 @@ import std/tables import pkg/asynctest import pkg/chronos -import pkg/libp2p import pkg/libp2p/errors import pkg/codex/rng diff --git a/tests/codex/blockexchange/discovery/testdiscoveryengine.nim b/tests/codex/blockexchange/discovery/testdiscoveryengine.nim index 4273bbb8..c9259768 100644 --- a/tests/codex/blockexchange/discovery/testdiscoveryengine.nim +++ b/tests/codex/blockexchange/discovery/testdiscoveryengine.nim @@ -5,7 +5,6 @@ import std/tables import pkg/asynctest import pkg/chronos -import pkg/libp2p import pkg/codex/rng import pkg/codex/stores diff --git a/tests/codex/blockexchange/engine/testblockexc.nim b/tests/codex/blockexchange/engine/testblockexc.nim index 4b9e7de4..2a4c9d76 100644 --- a/tests/codex/blockexchange/engine/testblockexc.nim +++ b/tests/codex/blockexchange/engine/testblockexc.nim @@ -5,9 +5,6 @@ import pkg/asynctest import pkg/chronos import pkg/stew/byteutils -import pkg/libp2p -import pkg/libp2p/errors - import pkg/codex/rng import pkg/codex/stores import pkg/codex/blockexchange diff --git a/tests/codex/blockexchange/engine/testengine.nim b/tests/codex/blockexchange/engine/testengine.nim index a7c8786a..a4e496ed 100644 --- a/tests/codex/blockexchange/engine/testengine.nim +++ b/tests/codex/blockexchange/engine/testengine.nim @@ -5,9 +5,9 @@ import std/algorithm import pkg/stew/byteutils import pkg/asynctest import pkg/chronos -import pkg/libp2p +import pkg/libp2p/errors import pkg/libp2p/routing_record -import pkg/libp2pdht/discv5/protocol as discv5 +import pkg/codexdht/discv5/protocol as discv5 import pkg/codex/rng import pkg/codex/blockexchange diff --git a/tests/codex/blockexchange/protobuf/testpresence.nim b/tests/codex/blockexchange/protobuf/testpresence.nim index 1eaf476e..e23a51dc 100644 --- a/tests/codex/blockexchange/protobuf/testpresence.nim +++ b/tests/codex/blockexchange/protobuf/testpresence.nim @@ -1,6 +1,5 @@ import pkg/asynctest import pkg/chronos -import pkg/libp2p import pkg/codex/blockexchange/protobuf/presence import ../../examples diff --git a/tests/codex/blockexchange/testnetwork.nim b/tests/codex/blockexchange/testnetwork.nim index 51d197de..6e791032 100644 --- a/tests/codex/blockexchange/testnetwork.nim +++ b/tests/codex/blockexchange/testnetwork.nim @@ -3,8 +3,6 @@ import std/tables import pkg/asynctest import pkg/chronos -import pkg/libp2p -import pkg/libp2p/errors import pkg/codex/rng import pkg/codex/chunker diff --git a/tests/codex/blockexchange/testpendingblocks.nim b/tests/codex/blockexchange/testpendingblocks.nim index f319b562..03634f9b 100644 --- a/tests/codex/blockexchange/testpendingblocks.nim +++ b/tests/codex/blockexchange/testpendingblocks.nim @@ -3,7 +3,6 @@ import std/algorithm import pkg/chronos import pkg/asynctest -import pkg/libp2p import pkg/stew/byteutils import pkg/codex/blocktype as bt diff --git a/tests/codex/helpers.nim b/tests/codex/helpers.nim index 60b1cd14..20734109 100644 --- a/tests/codex/helpers.nim +++ b/tests/codex/helpers.nim @@ -14,6 +14,8 @@ import ../checktest export randomchunker, nodeutils, mockdiscovery, eventually, checktest, manifest +export libp2p except setup, eventually + # NOTE: The meaning of equality for blocks # is changed here, because blocks are now `ref` # types. This is only in tests!!! diff --git a/tests/codex/helpers/eventually.nim b/tests/codex/helpers/eventually.nim index 3d68fc62..20a957e7 100644 --- a/tests/codex/helpers/eventually.nim +++ b/tests/codex/helpers/eventually.nim @@ -1,6 +1,6 @@ import pkg/chronos -template eventually*(condition: untyped, timeout = 5.seconds): bool = +template eventuallyCheck*(condition: untyped, timeout = 5.seconds): bool = proc loop: Future[bool] {.async.} = let start = Moment.now() while true: diff --git a/tests/codex/helpers/nodeutils.nim b/tests/codex/helpers/nodeutils.nim index df4b470e..83e59a69 100644 --- a/tests/codex/helpers/nodeutils.nim +++ b/tests/codex/helpers/nodeutils.nim @@ -2,6 +2,7 @@ import std/sequtils import pkg/chronos import pkg/libp2p +import pkg/libp2p/errors import pkg/codex/discovery import pkg/codex/stores diff --git a/tests/codex/sales/testsales.nim b/tests/codex/sales/testsales.nim index e8d890d3..332e8e25 100644 --- a/tests/codex/sales/testsales.nim +++ b/tests/codex/sales/testsales.nim @@ -113,7 +113,7 @@ asyncchecksuite "Sales": check isOk await reservations.reserve(availability) await market.requestStorage(request) let items = SlotQueueItem.init(request) - check eventually items.allIt(itemsProcessed.contains(it)) + check eventuallyCheck items.allIt(itemsProcessed.contains(it)) test "removes slots from slot queue once RequestCancelled emitted": let request1 = await addRequestToSaturatedQueue() @@ -146,7 +146,7 @@ asyncchecksuite "Sales": market.emitSlotFreed(request.id, 2.u256) let expected = SlotQueueItem.init(request, 2.uint16) - check eventually itemsProcessed.contains(expected) + check eventuallyCheck itemsProcessed.contains(expected) test "request slots are not added to the slot queue when no availabilities exist": var itemsProcessed: seq[SlotQueueItem] = @[] @@ -185,7 +185,7 @@ asyncchecksuite "Sales": # now add matching availability check isOk await reservations.reserve(availability) - check eventually itemsProcessed.len == request.ask.slots.int + check eventuallyCheck itemsProcessed.len == request.ask.slots.int test "makes storage unavailable when downloading a matched request": var used = false @@ -199,7 +199,7 @@ asyncchecksuite "Sales": check isOk await reservations.reserve(availability) await market.requestStorage(request) - check eventually used + check eventuallyCheck used test "reduces remaining availability size after download": let blk = bt.Block.example @@ -212,7 +212,7 @@ asyncchecksuite "Sales": return success() check isOk await reservations.reserve(availability) await market.requestStorage(request) - check eventually getAvailability().?size == success 1.u256 + check eventuallyCheck getAvailability().?size == success 1.u256 test "ignores download when duration not long enough": availability.duration = request.ask.duration - 1 @@ -265,7 +265,7 @@ asyncchecksuite "Sales": return success() check isOk await reservations.reserve(availability) await market.requestStorage(request) - check eventually storingRequest == request + check eventuallyCheck storingRequest == request check storingSlot < request.ask.slots.u256 test "handles errors during state run": @@ -280,7 +280,7 @@ asyncchecksuite "Sales": saleFailed = true check isOk await reservations.reserve(availability) await market.requestStorage(request) - check eventually saleFailed + check eventuallyCheck saleFailed test "makes storage available again when data retrieval fails": let error = newException(IOError, "data retrieval failed") @@ -290,7 +290,7 @@ asyncchecksuite "Sales": return failure(error) check isOk await reservations.reserve(availability) await market.requestStorage(request) - check eventually getAvailability().?used == success false + check eventuallyCheck getAvailability().?used == success false check getAvailability().?size == success availability.size test "generates proof of storage": @@ -301,13 +301,13 @@ asyncchecksuite "Sales": provingSlot = slot.slotIndex check isOk await reservations.reserve(availability) await market.requestStorage(request) - check eventually provingRequest == request + check eventuallyCheck provingRequest == request check provingSlot < request.ask.slots.u256 test "fills a slot": check isOk await reservations.reserve(availability) await market.requestStorage(request) - check eventually market.filled.len == 1 + check eventuallyCheck market.filled.len == 1 check market.filled[0].requestId == request.id check market.filled[0].slotIndex < request.ask.slots.u256 check market.filled[0].proof == proof @@ -325,7 +325,7 @@ asyncchecksuite "Sales": soldSlotIndex = slotIndex check isOk await reservations.reserve(availability) await market.requestStorage(request) - check eventually soldAvailability == availability + check eventuallyCheck soldAvailability == availability check soldRequest == request check soldSlotIndex < request.ask.slots.u256 @@ -342,7 +342,7 @@ asyncchecksuite "Sales": clearedSlotIndex = slotIndex check isOk await reservations.reserve(availability) await market.requestStorage(request) - check eventually clearedRequest == request + check eventuallyCheck clearedRequest == request check clearedSlotIndex < request.ask.slots.u256 test "makes storage available again when other host fills the slot": @@ -356,7 +356,7 @@ asyncchecksuite "Sales": await market.requestStorage(request) for slotIndex in 0.. agent.data == expected) diff --git a/tests/codex/sales/testslotqueue.nim b/tests/codex/sales/testslotqueue.nim index 1d2760ff..b7cc2059 100644 --- a/tests/codex/sales/testslotqueue.nim +++ b/tests/codex/sales/testslotqueue.nim @@ -10,6 +10,7 @@ import pkg/codex/sales/reservations import pkg/codex/sales/slotqueue import pkg/codex/stores +import ../helpers import ../helpers/mockmarket import ../helpers/eventually import ../examples diff --git a/tests/codex/storageproofs/testnetwork.nim b/tests/codex/storageproofs/testnetwork.nim index a56d7d63..538cbfdf 100644 --- a/tests/codex/storageproofs/testnetwork.nim +++ b/tests/codex/storageproofs/testnetwork.nim @@ -2,7 +2,6 @@ import std/sequtils import pkg/asynctest import pkg/chronos -import pkg/libp2p import pkg/libp2p/errors import pkg/contractabi as ca diff --git a/tests/codex/stores/commonstoretests.nim b/tests/codex/stores/commonstoretests.nim index 6aa280b3..d9c66e2b 100644 --- a/tests/codex/stores/commonstoretests.nim +++ b/tests/codex/stores/commonstoretests.nim @@ -4,8 +4,9 @@ import std/options import pkg/chronos import pkg/asynctest -import pkg/libp2p +import pkg/libp2p/multicodec import pkg/stew/byteutils +import pkg/questionable import pkg/questionable/results import pkg/codex/stores/cachestore import pkg/codex/chunker @@ -94,7 +95,7 @@ proc commonBlockStoreTests*(name: string, var count = 0 for c in cids: - if cid =? (await c): + if cid =? await c: check (await store.hasBlock(cid)).tryGet() count.inc diff --git a/tests/codex/stores/testcachestore.nim b/tests/codex/stores/testcachestore.nim index 7f36741c..8c135c11 100644 --- a/tests/codex/stores/testcachestore.nim +++ b/tests/codex/stores/testcachestore.nim @@ -2,7 +2,6 @@ import std/strutils import pkg/chronos import pkg/asynctest -import pkg/libp2p import pkg/stew/byteutils import pkg/questionable/results import pkg/codex/stores/cachestore diff --git a/tests/codex/stores/testkeyutils.nim b/tests/codex/stores/testkeyutils.nim index 70bf5e2a..7b1da450 100644 --- a/tests/codex/stores/testkeyutils.nim +++ b/tests/codex/stores/testkeyutils.nim @@ -10,7 +10,6 @@ import std/random import std/sequtils import pkg/chronos -import pkg/libp2p import pkg/asynctest import pkg/questionable import pkg/questionable/results diff --git a/tests/codex/stores/testmaintenance.nim b/tests/codex/stores/testmaintenance.nim index e4fc2f85..18806721 100644 --- a/tests/codex/stores/testmaintenance.nim +++ b/tests/codex/stores/testmaintenance.nim @@ -8,7 +8,6 @@ ## those terms. import pkg/chronos -import pkg/libp2p import pkg/asynctest import pkg/questionable/results import pkg/codex/blocktype as bt diff --git a/tests/codex/stores/testrepostore.nim b/tests/codex/stores/testrepostore.nim index 7b2f8daa..523f65f0 100644 --- a/tests/codex/stores/testrepostore.nim +++ b/tests/codex/stores/testrepostore.nim @@ -7,7 +7,6 @@ import pkg/questionable/results import pkg/chronos import pkg/asynctest -import pkg/libp2p import pkg/stew/byteutils import pkg/stew/endians2 import pkg/datastore diff --git a/tests/codex/testchunking.nim b/tests/codex/testchunking.nim index efeada27..860e52b6 100644 --- a/tests/codex/testchunking.nim +++ b/tests/codex/testchunking.nim @@ -1,9 +1,9 @@ + import pkg/asynctest import pkg/stew/byteutils import pkg/codex/chunker import pkg/chronicles import pkg/chronos -import pkg/libp2p import ./helpers diff --git a/tests/codex/testerasure.nim b/tests/codex/testerasure.nim index 96dd4c47..94f7c0e8 100644 --- a/tests/codex/testerasure.nim +++ b/tests/codex/testerasure.nim @@ -2,7 +2,6 @@ import std/sequtils import pkg/asynctest import pkg/chronos -import pkg/libp2p import pkg/questionable/results import pkg/codex/erasure diff --git a/tests/codex/testmanifest.nim b/tests/codex/testmanifest.nim index 5c43897e..89cea9c9 100644 --- a/tests/codex/testmanifest.nim +++ b/tests/codex/testmanifest.nim @@ -3,7 +3,6 @@ import std/sequtils import pkg/chronos import pkg/questionable/results import pkg/asynctest -import pkg/libp2p import pkg/stew/byteutils import pkg/codex/chunker diff --git a/tests/codex/testnode.nim b/tests/codex/testnode.nim index abe33763..c61c95aa 100644 --- a/tests/codex/testnode.nim +++ b/tests/codex/testnode.nim @@ -4,11 +4,11 @@ import std/math import pkg/asynctest import pkg/chronos +import pkg/chronicles import pkg/stew/byteutils import pkg/nitro -import pkg/libp2p -import pkg/libp2pdht/discv5/protocol as discv5 +import pkg/codexdht/discv5/protocol as discv5 import pkg/codex/stores import pkg/codex/blockexchange diff --git a/tests/codex/teststorestream.nim b/tests/codex/teststorestream.nim index 4edcc1ff..4226f5eb 100644 --- a/tests/codex/teststorestream.nim +++ b/tests/codex/teststorestream.nim @@ -1,6 +1,5 @@ import pkg/chronos import pkg/asynctest -import pkg/libp2p import pkg/questionable/results import ./helpers diff --git a/tests/codex/utils/testkeyutils.nim b/tests/codex/utils/testkeyutils.nim index be370374..2a33818a 100644 --- a/tests/codex/utils/testkeyutils.nim +++ b/tests/codex/utils/testkeyutils.nim @@ -1,7 +1,6 @@ import std/unittest import std/os -import pkg/libp2p -import pkg/questionable/results +import pkg/questionable import codex/utils/keyutils import ../helpers @@ -18,17 +17,17 @@ checksuite "keyutils": os.removeDir(path) test "creates a key file when it does not exist yet": - check setupKey(path / "keyfile").isSuccess + check setupKey(path / "keyfile").isOk check fileExists(path / "keyfile") test "stores key in a file that's only readable by the user": - discard !setupKey(path / "keyfile") + discard setupKey(path / "keyfile").get() when defined(posix): check getFilePermissions(path / "keyfile") == {fpUserRead, fpUserWrite} when defined(windows): check checkCurrentUserOnlyACL(path / "keyfile").get() test "reads key file when it does exist": - let key = !setupKey(path / "keyfile") - check !setupKey(path / "keyfile") == key + let key = setupKey(path / "keyfile").get() + check setupKey(path / "keyfile").get() == key diff --git a/tests/coverage.nim b/tests/coverage.nim new file mode 100644 index 00000000..832ee0d3 --- /dev/null +++ b/tests/coverage.nim @@ -0,0 +1,2 @@ + +include ./testCodex diff --git a/tests/coverage.nims b/tests/coverage.nims new file mode 100644 index 00000000..19297b60 --- /dev/null +++ b/tests/coverage.nims @@ -0,0 +1,12 @@ + +switch("debugger", "native") +switch("lineDir", "on") +switch("define", "debug") +switch("verbosity", "0") +switch("hints", "off") +switch("warnings", "off") +# switches for compiling with coverage +switch("passC", "-fprofile-arcs") +switch("passC", "-ftest-coverage") +switch("passL", "-fprofile-arcs") +switch("passL", "-ftest-coverage") diff --git a/tests/integration/nodes.nim b/tests/integration/nodes.nim index 513901d2..8257faa4 100644 --- a/tests/integration/nodes.nim +++ b/tests/integration/nodes.nim @@ -13,7 +13,7 @@ type process: Process arguments: seq[string] debug: bool - Role* = enum + Role* {.pure.} = enum Client, Provider, Validator @@ -57,7 +57,7 @@ proc init*(_: type DebugNodes, proc start(node: NodeProcess) = if node.debug: - node.process = startProcess( + node.process = osproc.startProcess( executable, workingDir, node.arguments, @@ -65,7 +65,7 @@ proc start(node: NodeProcess) = ) sleep(1000) else: - node.process = startProcess( + node.process = osproc.startProcess( executable, workingDir, node.arguments diff --git a/tests/integration/testblockexpiration.nim b/tests/integration/testblockexpiration.nim index 3193b470..e1c417e3 100644 --- a/tests/integration/testblockexpiration.nim +++ b/tests/integration/testblockexpiration.nim @@ -1,5 +1,6 @@ import std/os import std/httpclient +from std/net import TimeoutError import pkg/chronos import ../ethertest diff --git a/tests/logging.nim b/tests/logging.nim index 3b1c3771..cf2633d3 100644 --- a/tests/logging.nim +++ b/tests/logging.nim @@ -1,9 +1,10 @@ -import pkg/chronicles +when not defined(nimscript): + import pkg/chronicles -proc ignoreLogging(level: LogLevel, message: LogOutputStr) = - discard + proc ignoreLogging(level: LogLevel, message: LogOutputStr) = + discard -defaultChroniclesStream.output.writer = ignoreLogging + defaultChroniclesStream.output.writer = ignoreLogging -{.warning[UnusedImport]:off.} -{.used.} + {.warning[UnusedImport]:off.} + {.used.} diff --git a/tests/nimlldb.py b/tests/nimlldb.py new file mode 100644 index 00000000..bcc96e0d --- /dev/null +++ b/tests/nimlldb.py @@ -0,0 +1,1381 @@ +import lldb +from collections import OrderedDict +from typing import Union + + +def sbvaluegetitem(self: lldb.SBValue, name: Union[int, str]) -> lldb.SBValue: + if isinstance(name, str): + return self.GetChildMemberWithName(name) + else: + return self.GetChildAtIndex(name) + + +# Make this easier to work with +lldb.SBValue.__getitem__ = sbvaluegetitem + +NIM_IS_V2 = True + + +def get_nti(value: lldb.SBValue, nim_name=None): + name_split = value.type.name.split("_") + type_nim_name = nim_name or name_split[1] + id_string = name_split[-1].split(" ")[0] + + type_info_name = "NTI" + type_nim_name.lower() + "__" + id_string + "_" + nti = value.target.FindFirstGlobalVariable(type_info_name) + if not nti.IsValid(): + type_info_name = "NTI" + "__" + id_string + "_" + nti = value.target.FindFirstGlobalVariable(type_info_name) + if not nti.IsValid(): + print(f"NimEnumPrinter: lookup global symbol: '{type_info_name}' failed for {value.type.name}.\n") + return type_nim_name, nti + + +def enum_to_string(value: lldb.SBValue, int_val=None, nim_name=None): + tname = nim_name or value.type.name.split("_")[1] + + enum_val = value.signed + if int_val is not None: + enum_val = int_val + + default_val = f"{tname}.{str(enum_val)}" + + fn_syms = value.target.FindFunctions("reprEnum") + if not fn_syms.GetSize() > 0: + return default_val + + fn_sym: lldb.SBSymbolContext = fn_syms.GetContextAtIndex(0) + + fn: lldb.SBFunction = fn_sym.function + + fn_type: lldb.SBType = fn.type + arg_types: lldb.SBTypeList = fn_type.GetFunctionArgumentTypes() + if arg_types.GetSize() < 2: + return default_val + + arg1_type: lldb.SBType = arg_types.GetTypeAtIndex(0) + arg2_type: lldb.SBType = arg_types.GetTypeAtIndex(1) + + ty_info_name, nti = get_nti(value, nim_name=tname) + + if not nti.IsValid(): + return default_val + + call = f"{fn.name}(({arg1_type.name}){enum_val}, ({arg2_type.name})" + str(nti.GetLoadAddress()) + ");" + + res = executeCommand(call) + + if res.error.fail: + return default_val + + return f"{tname}.{res.summary[1:-1]}" + + +def to_string(value: lldb.SBValue): + # For getting NimStringDesc * value + value = value.GetNonSyntheticValue() + + # Check if data pointer is Null + if value.type.is_pointer and value.unsigned == 0: + return None + + size = int(value["Sup"]["len"].unsigned) + + if size == 0: + return "" + + if size > 2**14: + return "... (too long) ..." + + data = value["data"] + + # Check if first element is NULL + base_data_type = value.target.FindFirstType("char") + cast = data.Cast(base_data_type) + + if cast.unsigned == 0: + return None + + cast = data.Cast(value.target.FindFirstType("char").GetArrayType(size)) + return bytearray(cast.data.uint8s).decode("utf-8") + + +def to_stringV2(value: lldb.SBValue): + # For getting NimStringV2 value + value = value.GetNonSyntheticValue() + + data = value["p"]["data"] + + # Check if data pointer is Null + if value["p"].unsigned == 0: + return None + + size = int(value["len"].signed) + + if size == 0: + return "" + + if size > 2**14: + return "... (too long) ..." + + # Check if first element is NULL + base_data_type = data.type.GetArrayElementType().GetTypedefedType() + cast = data.Cast(base_data_type) + + if cast.unsigned == 0: + return None + + cast = data.Cast(base_data_type.GetArrayType(size)) + return bytearray(cast.data.uint8s).decode("utf-8") + + +def NimString(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + if NIM_IS_V2: + res = to_stringV2(value) + else: + res = to_string(value) + + if res is not None: + return f'"{res}"' + else: + return "nil" + + +def rope_helper(value: lldb.SBValue) -> str: + value = value.GetNonSyntheticValue() + if value.type.is_pointer and value.unsigned == 0: + return "" + + if value["length"].unsigned == 0: + return "" + + if NIM_IS_V2: + str_val = to_stringV2(value["data"]) + else: + str_val = to_string(value["data"]) + + if str_val is None: + str_val = "" + + return rope_helper(value["left"]) + str_val + rope_helper(value["right"]) + + +def Rope(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + rope_str = rope_helper(value) + + if len(rope_str) == 0: + rope_str = "nil" + else: + rope_str = f'"{rope_str}"' + + return f"Rope({rope_str})" + + +def NCSTRING(value: lldb.SBValue, internal_dict=None): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + ty = value.Dereference().type + val = value.target.CreateValueFromAddress( + value.name or "temp", lldb.SBAddress(value.unsigned, value.target), ty + ).AddressOf() + return val.summary + + +def ObjectV2(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + orig_value = value.GetNonSyntheticValue() + if orig_value.type.is_pointer and orig_value.unsigned == 0: + return "nil" + + custom_summary = get_custom_summary(value) + if custom_summary is not None: + return custom_summary + + while orig_value.type.is_pointer: + orig_value = orig_value.Dereference() + + if "_" in orig_value.type.name: + obj_name = orig_value.type.name.split("_")[1].replace("colonObjectType", "") + else: + obj_name = orig_value.type.name + + num_children = value.num_children + fields = [] + + for i in range(num_children): + fields.append(f"{value[i].name}: {value[i].summary}") + + res = f"{obj_name}(" + ", ".join(fields) + ")" + return res + + +def Number(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + if value.type.is_pointer and value.signed == 0: + return "nil" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + return str(value.signed) + + +def Float(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + return str(value.value) + + +def UnsignedNumber(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + return str(value.unsigned) + + +def Bool(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + return str(value.value) + + +def CharArray(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + return str([f"'{char}'" for char in value.uint8s]) + + +def Array(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + value = value.GetNonSyntheticValue() + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + value = value.GetNonSyntheticValue() + return "[" + ", ".join([value[i].summary for i in range(value.num_children)]) + "]" + + +def Tuple(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + while value.type.is_pointer: + value = value.Dereference() + + num_children = value.num_children + + fields = [] + + for i in range(num_children): + key = value[i].name + val = value[i].summary + if key.startswith("Field"): + fields.append(f"{val}") + else: + fields.append(f"{key}: {val}") + + return "(" + ", ".join(fields) + f")" + + +def is_local(value: lldb.SBValue) -> bool: + line: lldb.SBLineEntry = value.frame.GetLineEntry() + decl: lldb.SBDeclaration = value.GetDeclaration() + + if line.file == decl.file and decl.line != 0: + return True + + return False + + +def is_in_scope(value: lldb.SBValue) -> bool: + line: lldb.SBLineEntry = value.frame.GetLineEntry() + decl: lldb.SBDeclaration = value.GetDeclaration() + + if is_local(value) and decl.line < line.line: + return True + + return False + + +def Enum(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_value_summary(value) + if custom_summary is not None: + return custom_summary + + return enum_to_string(value) + + +def EnumSet(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + vals = [] + max_vals = 7 + for child in value.children: + vals.append(child.summary) + if len(vals) > max_vals: + vals.append("...") + break + + return "{" + ", ".join(vals) + "}" + + +def Set(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if custom_summary is not None: + return custom_summary + + vals = [] + max_vals = 7 + for child in value.children: + vals.append(child.value) + if len(vals) > max_vals: + vals.append("...") + break + + return "{" + ", ".join(vals) + "}" + + +def Table(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if custom_summary is not None: + return custom_summary + + fields = [] + + for i in range(value.num_children): + key = value[i].name + val = value[i].summary + fields.append(f"{key}: {val}") + + return "Table({" + ", ".join(fields) + "})" + + +def HashSet(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if custom_summary is not None: + return custom_summary + + fields = [] + + for i in range(value.num_children): + fields.append(f"{value[i].summary}") + + return "HashSet({" + ", ".join(fields) + "})" + + +def StringTable(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + fields = [] + + for i in range(value.num_children - 1): + key = value[i].name + val = value[i].summary + fields.append(f"{key}: {val}") + + mode = value[value.num_children - 1].summary + + return "StringTable({" + ", ".join(fields) + f"}}, mode={mode})" + + +def Sequence(value: lldb.SBValue, internal_dict): + if is_local(value): + if not is_in_scope(value): + return "undefined" + + custom_summary = get_custom_summary(value) + if not custom_summary is None: + return custom_summary + + return "@[" + ", ".join([value[i].summary for i in range(value.num_children)]) + "]" + + +class StringChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.data_type: lldb.SBType + if not NIM_IS_V2: + self.data_type = self.value.target.FindFirstType("char") + + self.first_element: lldb.SBValue + self.update() + self.count = 0 + + def num_children(self): + return self.count + + def get_child_index(self, name): + return int(name.lstrip("[").rstrip("]")) + + def get_child_at_index(self, index): + offset = index * self.data_size + return self.first_element.CreateChildAtOffset("[" + str(index) + "]", offset, self.data_type) + + def get_data(self) -> lldb.SBValue: + return self.value["p"]["data"] if NIM_IS_V2 else self.value["data"] + + def get_len(self) -> int: + if NIM_IS_V2: + if self.value["p"].unsigned == 0: + return 0 + + size = int(self.value["len"].signed) + + if size == 0: + return 0 + + data = self.value["p"]["data"] + + # Check if first element is NULL + base_data_type = data.type.GetArrayElementType().GetTypedefedType() + cast = data.Cast(base_data_type) + + if cast.unsigned == 0: + return 0 + else: + if self.value.type.is_pointer and self.value.unsigned == 0: + return 0 + + size = int(self.value["Sup"]["len"].unsigned) + + if size == 0: + return 0 + + data = self.value["data"] + + # Check if first element is NULL + base_data_type = self.value.target.FindFirstType("char") + cast = data.Cast(base_data_type) + + if cast.unsigned == 0: + return 0 + + return size + + def update(self): + if is_local(self.value): + if not is_in_scope(self.value): + return + + data = self.get_data() + size = self.get_len() + + self.count = size + self.first_element = data + + if NIM_IS_V2: + self.data_type = data.type.GetArrayElementType().GetTypedefedType() + + self.data_size = self.data_type.GetByteSize() + + def has_children(self): + return bool(self.num_children()) + + +class ArrayChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.data_type: lldb.SBType + self.first_element: lldb.SBValue + self.update() + + def num_children(self): + return self.has_children() and self.value.num_children + + def get_child_index(self, name: str): + return int(name.lstrip("[").rstrip("]")) + + def get_child_at_index(self, index): + offset = index * self.value[index].GetByteSize() + return self.first_element.CreateChildAtOffset("[" + str(index) + "]", offset, self.data_type) + + def update(self): + if not self.has_children(): + return + + self.first_element = self.value[0] + self.data_type = self.value.type.GetArrayElementType() + + def has_children(self): + if is_local(self.value): + if not is_in_scope(self.value): + return False + return bool(self.value.num_children) + + +class SeqChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.data_type: lldb.SBType + self.first_element: lldb.SBValue + self.data: lldb.SBValue + self.count = 0 + self.update() + + def num_children(self): + return self.count + + def get_child_index(self, name: str): + return int(name.lstrip("[").rstrip("]")) + + def get_child_at_index(self, index): + offset = index * self.data[index].GetByteSize() + return self.first_element.CreateChildAtOffset("[" + str(index) + "]", offset, self.data_type) + + def get_data(self) -> lldb.SBValue: + return self.value["p"]["data"] if NIM_IS_V2 else self.value["data"] + + def get_len(self) -> lldb.SBValue: + return self.value["len"] if NIM_IS_V2 else self.value["Sup"]["len"] + + def update(self): + self.count = 0 + + if is_local(self.value): + if not is_in_scope(self.value): + return + + self.count = self.get_len().unsigned + + if not self.has_children(): + return + + data = self.get_data() + self.data_type = data.type.GetArrayElementType() + + self.data = data.Cast(self.data_type.GetArrayType(self.num_children())) + self.first_element = self.data + + def has_children(self): + return bool(self.num_children()) + + +class ObjectChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.data_type: lldb.SBType + self.first_element: lldb.SBValue + self.data: lldb.SBValue + self.children: OrderedDict[str, int] = OrderedDict() + self.child_list: list[lldb.SBValue] = [] + self.update() + + def num_children(self): + return len(self.children) + + def get_child_index(self, name: str): + return self.children[name] + + def get_child_at_index(self, index): + return self.child_list[index] + + def populate_children(self): + self.children.clear() + self.child_list = [] + + if is_local(self.value): + if not is_in_scope(self.value): + return + + stack = [self.value.GetNonSyntheticValue()] + + index = 0 + + while stack: + cur_val = stack.pop() + if cur_val.type.is_pointer and cur_val.unsigned == 0: + continue + + while cur_val.type.is_pointer: + cur_val = cur_val.Dereference() + + # Add super objects if they exist + if cur_val.num_children > 0 and cur_val[0].name == "Sup" and cur_val[0].type.name.startswith("tyObject"): + stack.append(cur_val[0]) + + for child in cur_val.children: + child = child.GetNonSyntheticValue() + if child.name == "Sup": + continue + self.children[child.name] = index + self.child_list.append(child) + index += 1 + + def update(self): + self.populate_children() + + def has_children(self): + return bool(self.num_children()) + + +class HashSetChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.child_list: list[lldb.SBValue] = [] + self.update() + + def num_children(self): + return len(self.child_list) + + def get_child_index(self, name: str): + return int(name.lstrip("[").rstrip("]")) + + def get_child_at_index(self, index): + return self.child_list[index] + + def get_data(self) -> lldb.SBValue: + return self.value["data"]["p"]["data"] if NIM_IS_V2 else self.value["data"]["data"] + + def get_len(self) -> lldb.SBValue: + return self.value["data"]["len"] if NIM_IS_V2 else self.value["data"]["Sup"]["len"] + + def update(self): + self.child_list = [] + + if is_local(self.value): + if not is_in_scope(self.value): + return + + tuple_len = int(self.get_len().unsigned) + tuple = self.get_data() + + base_data_type = tuple.type.GetArrayElementType() + + cast = tuple.Cast(base_data_type.GetArrayType(tuple_len)) + + index = 0 + for i in range(tuple_len): + el = cast[i] + field0 = int(el[0].unsigned) + if field0 == 0: + continue + key = el[1] + child = key.CreateValueFromAddress(f"[{str(index)}]", key.GetLoadAddress(), key.GetType()) + index += 1 + + self.child_list.append(child) + + def has_children(self): + return bool(self.num_children()) + + +class SetCharChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.ty = self.value.target.FindFirstType("char") + self.child_list: list[lldb.SBValue] = [] + self.update() + + def num_children(self): + return len(self.child_list) + + def get_child_index(self, name: str): + return int(name.lstrip("[").rstrip("]")) + + def get_child_at_index(self, index): + return self.child_list[index] + + def update(self): + self.child_list = [] + if is_local(self.value): + if not is_in_scope(self.value): + return + + cur_pos = 0 + for child in self.value.children: + child_val = child.signed + if child_val != 0: + temp = child_val + num_bits = 8 + while temp != 0: + is_set = temp & 1 + if is_set == 1: + data = lldb.SBData.CreateDataFromInt(cur_pos) + child = self.value.synthetic_child_from_data(f"[{len(self.child_list)}]", data, self.ty) + self.child_list.append(child) + temp = temp >> 1 + cur_pos += 1 + num_bits -= 1 + cur_pos += num_bits + else: + cur_pos += 8 + + def has_children(self): + return bool(self.num_children()) + + +def create_set_children(value: lldb.SBValue, child_type: lldb.SBType, starting_pos: int) -> list[lldb.SBValue]: + child_list: list[lldb.SBValue] = [] + cur_pos = starting_pos + + if value.num_children > 0: + children = value.children + else: + children = [value] + + for child in children: + child_val = child.signed + if child_val != 0: + temp = child_val + num_bits = 8 + while temp != 0: + is_set = temp & 1 + if is_set == 1: + data = lldb.SBData.CreateDataFromInt(cur_pos) + child = value.synthetic_child_from_data(f"[{len(child_list)}]", data, child_type) + child_list.append(child) + temp = temp >> 1 + cur_pos += 1 + num_bits -= 1 + cur_pos += num_bits + else: + cur_pos += 8 + + return child_list + + +class SetIntChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.ty = self.value.target.FindFirstType(f"NI64") + self.child_list: list[lldb.SBValue] = [] + self.update() + + def num_children(self): + return len(self.child_list) + + def get_child_index(self, name: str): + return int(name.lstrip("[").rstrip("]")) + + def get_child_at_index(self, index): + return self.child_list[index] + + def update(self): + self.child_list = [] + if is_local(self.value): + if not is_in_scope(self.value): + return + bits = self.value.GetByteSize() * 8 + starting_pos = -(bits // 2) + self.child_list = create_set_children(self.value, self.ty, starting_pos) + + def has_children(self): + return bool(self.num_children()) + + +class SetUIntChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.ty = self.value.target.FindFirstType(f"NU64") + self.child_list: list[lldb.SBValue] = [] + self.update() + + def num_children(self): + return len(self.child_list) + + def get_child_index(self, name: str): + return int(name.lstrip("[").rstrip("]")) + + def get_child_at_index(self, index): + return self.child_list[index] + + def update(self): + self.child_list = [] + if is_local(self.value): + if not is_in_scope(self.value): + return + self.child_list = create_set_children(self.value, self.ty, starting_pos=0) + + def has_children(self): + return bool(self.num_children()) + + +class SetEnumChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.ty = self.value.target.FindFirstType(self.value.type.name.replace("tySet_", "")) + self.child_list: list[lldb.SBValue] = [] + self.update() + + def num_children(self): + return len(self.child_list) + + def get_child_index(self, name: str): + return int(name.lstrip("[").rstrip("]")) + + def get_child_at_index(self, index): + return self.child_list[index] + + def update(self): + if is_local(self.value): + if not is_in_scope(self.value): + return + self.child_list = create_set_children(self.value, self.ty, starting_pos=0) + + def has_children(self): + return bool(self.num_children()) + + +class TableChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.children: OrderedDict[str, int] = OrderedDict() + self.child_list: list[lldb.SBValue] = [] + + self.update() + + def num_children(self): + return len(self.child_list) + + def get_child_index(self, name: str): + return self.children[name] + + def get_child_at_index(self, index): + return self.child_list[index] + + def get_data(self) -> lldb.SBValue: + return self.value["data"]["p"]["data"] if NIM_IS_V2 else self.value["data"]["data"] + + def get_len(self) -> lldb.SBValue: + return self.value["data"]["len"] if NIM_IS_V2 else self.value["data"]["Sup"]["len"] + + def update(self): + self.child_list = [] + if is_local(self.value): + if not is_in_scope(self.value): + return + + tuple_len = int(self.get_len().unsigned) + tuple = self.get_data() + + base_data_type = tuple.type.GetArrayElementType() + + cast = tuple.Cast(base_data_type.GetArrayType(tuple_len)) + + index = 0 + for i in range(tuple_len): + el = cast[i] + field0 = int(el[0].unsigned) + if field0 == 0: + continue + key = el[1] + val = el[2] + key_summary = key.summary + child = self.value.CreateValueFromAddress(key_summary, val.GetLoadAddress(), val.GetType()) + self.child_list.append(child) + self.children[key_summary] = index + index += 1 + + def has_children(self): + return bool(self.num_children()) + + +class StringTableChildrenProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value = value + self.children: OrderedDict[str, int] = OrderedDict() + self.child_list: list[lldb.SBValue] = [] + self.update() + + def num_children(self): + return len(self.child_list) + + def get_child_index(self, name: str): + return self.children[name] + + def get_child_at_index(self, index): + return self.child_list[index] + + def get_data(self) -> lldb.SBValue: + return self.value["data"]["p"]["data"] if NIM_IS_V2 else self.value["data"]["data"] + + def get_len(self) -> lldb.SBValue: + return self.value["data"]["len"] if NIM_IS_V2 else self.value["data"]["Sup"]["len"] + + def update(self): + self.children.clear() + self.child_list = [] + + if is_local(self.value): + if not is_in_scope(self.value): + return + + tuple_len = int(self.get_len().unsigned) + tuple = self.get_data() + + base_data_type = tuple.type.GetArrayElementType() + + cast = tuple.Cast(base_data_type.GetArrayType(tuple_len)) + + index = 0 + for i in range(tuple_len): + el = cast[i] + field0 = int(el[2].unsigned) + if field0 == 0: + continue + key = el[0] + val = el[1] + child = val.CreateValueFromAddress(key.summary, val.GetLoadAddress(), val.GetType()) + self.child_list.append(child) + self.children[key.summary] = index + index += 1 + + self.child_list.append(self.value["mode"]) + self.children["mode"] = index + + def has_children(self): + return bool(self.num_children()) + + +class LLDBDynamicObjectProvider: + def __init__(self, value: lldb.SBValue, internalDict): + value = value.GetNonSyntheticValue() + self.value: lldb.SBValue = value[0] + self.children: OrderedDict[str, int] = OrderedDict() + self.child_list: list[lldb.SBValue] = [] + + while self.value.type.is_pointer: + self.value = self.value.Dereference() + + self.update() + + def num_children(self): + return len(self.child_list) + + def get_child_index(self, name: str): + return self.children[name] + + def get_child_at_index(self, index): + return self.child_list[index] + + def update(self): + self.children.clear() + self.child_list = [] + + for i, child in enumerate(self.value.children): + name = child.name.strip('"') + new_child = child.CreateValueFromAddress(name, child.GetLoadAddress(), child.GetType()) + + self.children[name] = i + self.child_list.append(new_child) + + def has_children(self): + return bool(self.num_children()) + + +class LLDBBasicObjectProvider: + def __init__(self, value: lldb.SBValue, internalDict): + self.value: lldb.SBValue = value + + def num_children(self): + if self.value is not None: + return self.value.num_children + return 0 + + def get_child_index(self, name: str): + return self.value.GetIndexOfChildWithName(name) + + def get_child_at_index(self, index): + return self.value.GetChildAtIndex(index) + + def update(self): + pass + + def has_children(self): + return self.num_children() > 0 + + +class CustomObjectChildrenProvider: + """ + This children provider handles values returned from lldbDebugSynthetic* + Nim procedures + """ + + def __init__(self, value: lldb.SBValue, internalDict): + self.value: lldb.SBValue = get_custom_synthetic(value) or value + if "lldbdynamicobject" in self.value.type.name.lower(): + self.provider = LLDBDynamicObjectProvider(self.value, internalDict) + else: + self.provider = LLDBBasicObjectProvider(self.value, internalDict) + + def num_children(self): + return self.provider.num_children() + + def get_child_index(self, name: str): + return self.provider.get_child_index(name) + + def get_child_at_index(self, index): + return self.provider.get_child_at_index(index) + + def update(self): + self.provider.update() + + def has_children(self): + return self.provider.has_children() + + +def echo(debugger: lldb.SBDebugger, command: str, result, internal_dict): + debugger.HandleCommand("po " + command) + + +SUMMARY_FUNCTIONS: dict[str, lldb.SBFunction] = {} +SYNTHETIC_FUNCTIONS: dict[str, lldb.SBFunction] = {} + + +def get_custom_summary(value: lldb.SBValue) -> Union[str, None]: + """Get a custom summary if a function exists for it""" + value = value.GetNonSyntheticValue() + if value.GetAddress().GetOffset() == 0: + return None + + base_type = get_base_type(value.type) + + fn = SUMMARY_FUNCTIONS.get(base_type.name) + if fn is None: + return None + + fn_type: lldb.SBType = fn.type + + arg_types: lldb.SBTypeList = fn_type.GetFunctionArgumentTypes() + first_type = arg_types.GetTypeAtIndex(0) + + while value.type.is_pointer: + value = value.Dereference() + + if first_type.is_pointer: + command = f"{fn.name}(({first_type.name})" + str(value.GetLoadAddress()) + ");" + else: + command = f"{fn.name}(*({first_type.GetPointerType().name})" + str(value.GetLoadAddress()) + ");" + + res = executeCommand(command) + + if res.error.fail: + return None + + return res.summary.strip('"') + + +def get_custom_value_summary(value: lldb.SBValue) -> Union[str, None]: + """Get a custom summary if a function exists for it""" + + fn: lldb.SBFunction = SUMMARY_FUNCTIONS.get(value.type.name) + if fn is None: + return None + + command = f"{fn.name}(({value.type.name})" + str(value.signed) + ");" + res = executeCommand(command) + + if res.error.fail: + return None + + return res.summary.strip('"') + + +def get_custom_synthetic(value: lldb.SBValue) -> Union[lldb.SBValue, None]: + """Get a custom synthetic object if a function exists for it""" + value = value.GetNonSyntheticValue() + if value.GetAddress().GetOffset() == 0: + return None + + base_type = get_base_type(value.type) + + fn = SYNTHETIC_FUNCTIONS.get(base_type.name) + if fn is None: + return None + + fn_type: lldb.SBType = fn.type + + arg_types: lldb.SBTypeList = fn_type.GetFunctionArgumentTypes() + first_type = arg_types.GetTypeAtIndex(0) + + while value.type.is_pointer: + value = value.Dereference() + + if first_type.is_pointer: + first_arg = f"({first_type.name}){value.GetLoadAddress()}" + else: + first_arg = f"*({first_type.GetPointerType().name}){value.GetLoadAddress()}" + + if arg_types.GetSize() > 1 and fn.GetArgumentName(1) == "Result": + ret_type = arg_types.GetTypeAtIndex(1) + ret_type = get_base_type(ret_type) + + command = f""" + {ret_type.name} lldbT; + nimZeroMem((void*)(&lldbT), sizeof({ret_type.name})); + {fn.name}(({first_arg}), (&lldbT)); + lldbT; + """ + else: + command = f"{fn.name}({first_arg});" + + res = executeCommand(command) + + if res.error.fail: + print(res.error) + return None + + return res + + +def get_base_type(ty: lldb.SBType) -> lldb.SBType: + """Get the base type of the type""" + temp = ty + while temp.IsPointerType(): + temp = temp.GetPointeeType() + return temp + + +def use_base_type(ty: lldb.SBType) -> bool: + types_to_check = [ + "NF", + "NF32", + "NF64", + "NI", + "NI8", + "NI16", + "NI32", + "NI64", + "bool", + "NIM_BOOL", + "NU", + "NU8", + "NU16", + "NU32", + "NU64", + ] + + for type_to_check in types_to_check: + if ty.name.startswith(type_to_check): + return False + + return True + + +def breakpoint_function_wrapper(frame: lldb.SBFrame, bp_loc, internal_dict): + """This allows function calls to Nim for custom object summaries and synthetic children""" + debugger = lldb.debugger + + global SUMMARY_FUNCTIONS + global SYNTHETIC_FUNCTIONS + + global NIM_IS_V2 + + for tname, fn in SYNTHETIC_FUNCTIONS.items(): + debugger.HandleCommand(f"type synthetic delete -w nim {tname}") + + SUMMARY_FUNCTIONS = {} + SYNTHETIC_FUNCTIONS = {} + + target: lldb.SBTarget = debugger.GetSelectedTarget() + + NIM_IS_V2 = target.FindFirstType("TNimTypeV2").IsValid() + + module = frame.GetSymbolContext(lldb.eSymbolContextModule).module + + for sym in module: + if ( + not sym.name.startswith("lldbDebugSummary") + and not sym.name.startswith("lldbDebugSynthetic") + and not sym.name.startswith("dollar___") + ): + continue + + fn_syms: lldb.SBSymbolContextList = target.FindFunctions(sym.name) + if not fn_syms.GetSize() > 0: + continue + + fn_sym: lldb.SBSymbolContext = fn_syms.GetContextAtIndex(0) + + fn: lldb.SBFunction = fn_sym.function + fn_type: lldb.SBType = fn.type + arg_types: lldb.SBTypeList = fn_type.GetFunctionArgumentTypes() + + if arg_types.GetSize() > 1 and fn.GetArgumentName(1) == "Result": + pass # don't continue + elif arg_types.GetSize() != 1: + continue + + arg_type: lldb.SBType = arg_types.GetTypeAtIndex(0) + if use_base_type(arg_type): + arg_type = get_base_type(arg_type) + + if sym.name.startswith("lldbDebugSummary") or sym.name.startswith("dollar___"): + SUMMARY_FUNCTIONS[arg_type.name] = fn + elif sym.name.startswith("lldbDebugSynthetic"): + SYNTHETIC_FUNCTIONS[arg_type.name] = fn + debugger.HandleCommand( + f"type synthetic add -w nim -l {__name__}.CustomObjectChildrenProvider {arg_type.name}" + ) + + +def executeCommand(command, *args): + debugger = lldb.debugger + process = debugger.GetSelectedTarget().GetProcess() + frame: lldb.SBFrame = process.GetSelectedThread().GetSelectedFrame() + + expr_options = lldb.SBExpressionOptions() + expr_options.SetIgnoreBreakpoints(False) + expr_options.SetFetchDynamicValue(lldb.eDynamicCanRunTarget) + expr_options.SetTimeoutInMicroSeconds(30 * 1000 * 1000) # 30 second timeout + expr_options.SetTryAllThreads(True) + expr_options.SetUnwindOnError(False) + expr_options.SetGenerateDebugInfo(True) + expr_options.SetLanguage(lldb.eLanguageTypeC) + expr_options.SetCoerceResultToId(True) + res = frame.EvaluateExpression(command, expr_options) + + return res + + +def __lldb_init_module(debugger, internal_dict): + # fmt: off + print("internal_dict: ", internal_dict.keys()) + debugger.HandleCommand(f"breakpoint command add -F {__name__}.breakpoint_function_wrapper --script-type python 1") + debugger.HandleCommand(f"type summary add -w nim -n sequence -F {__name__}.Sequence -x tySequence_+[[:alnum:]]+$") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.SeqChildrenProvider -x tySequence_+[[:alnum:]]+$") + + debugger.HandleCommand(f"type summary add -w nim -n chararray -F {__name__}.CharArray -x char\s+[\d+]") + debugger.HandleCommand(f"type summary add -w nim -n array -F {__name__}.Array -x tyArray_+[[:alnum:]]+") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.ArrayChildrenProvider -x tyArray_+[[:alnum:]]+") + debugger.HandleCommand(f"type summary add -w nim -n string -F {__name__}.NimString NimStringDesc") + + debugger.HandleCommand(f"type summary add -w nim -n stringv2 -F {__name__}.NimString -x NimStringV2$") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.StringChildrenProvider -x NimStringV2$") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.StringChildrenProvider -x NimStringDesc$") + + debugger.HandleCommand(f"type summary add -w nim -n cstring -F {__name__}.NCSTRING NCSTRING") + + debugger.HandleCommand(f"type summary add -w nim -n object -F {__name__}.ObjectV2 -x tyObject_+[[:alnum:]]+_+[[:alnum:]]+") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.ObjectChildrenProvider -x tyObject_+[[:alnum:]]+_+[[:alnum:]]+$") + + debugger.HandleCommand(f"type summary add -w nim -n tframe -F {__name__}.ObjectV2 -x TFrame$") + + debugger.HandleCommand(f"type summary add -w nim -n rootobj -F {__name__}.ObjectV2 -x RootObj$") + + debugger.HandleCommand(f"type summary add -w nim -n enum -F {__name__}.Enum -x tyEnum_+[[:alnum:]]+_+[[:alnum:]]+") + debugger.HandleCommand(f"type summary add -w nim -n hashset -F {__name__}.HashSet -x tyObject_+HashSet_+[[:alnum:]]+") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.HashSetChildrenProvider -x tyObject_+HashSet_+[[:alnum:]]+") + + debugger.HandleCommand(f"type summary add -w nim -n rope -F {__name__}.Rope -x tyObject_+Rope[[:alnum:]]+_+[[:alnum:]]+") + + debugger.HandleCommand(f"type summary add -w nim -n setuint -F {__name__}.Set -x tySet_+tyInt_+[[:alnum:]]+") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.SetIntChildrenProvider -x tySet_+tyInt[0-9]+_+[[:alnum:]]+") + debugger.HandleCommand(f"type summary add -w nim -n setint -F {__name__}.Set -x tySet_+tyInt[0-9]+_+[[:alnum:]]+") + debugger.HandleCommand(f"type summary add -w nim -n setuint2 -F {__name__}.Set -x tySet_+tyUInt[0-9]+_+[[:alnum:]]+") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.SetUIntChildrenProvider -x tySet_+tyUInt[0-9]+_+[[:alnum:]]+") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.SetUIntChildrenProvider -x tySet_+tyInt_+[[:alnum:]]+") + debugger.HandleCommand(f"type summary add -w nim -n setenum -F {__name__}.EnumSet -x tySet_+tyEnum_+[[:alnum:]]+_+[[:alnum:]]+") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.SetEnumChildrenProvider -x tySet_+tyEnum_+[[:alnum:]]+_+[[:alnum:]]+") + debugger.HandleCommand(f"type summary add -w nim -n setchar -F {__name__}.Set -x tySet_+tyChar_+[[:alnum:]]+") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.SetCharChildrenProvider -x tySet_+tyChar_+[[:alnum:]]+") + debugger.HandleCommand(f"type summary add -w nim -n table -F {__name__}.Table -x tyObject_+Table_+[[:alnum:]]+") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.TableChildrenProvider -x tyObject_+Table_+[[:alnum:]]+") + debugger.HandleCommand(f"type summary add -w nim -n stringtable -F {__name__}.StringTable -x tyObject_+StringTableObj_+[[:alnum:]]+") + debugger.HandleCommand(f"type synthetic add -w nim -l {__name__}.StringTableChildrenProvider -x tyObject_+StringTableObj_+[[:alnum:]]+") + debugger.HandleCommand(f"type summary add -w nim -n tuple2 -F {__name__}.Tuple -x tyObject_+Tuple_+[[:alnum:]]+") + debugger.HandleCommand(f"type summary add -w nim -n tuple -F {__name__}.Tuple -x tyTuple_+[[:alnum:]]+") + + debugger.HandleCommand(f"type summary add -w nim -n float -F {__name__}.Float NF") + debugger.HandleCommand(f"type summary add -w nim -n float32 -F {__name__}.Float NF32") + debugger.HandleCommand(f"type summary add -w nim -n float64 -F {__name__}.Float NF64") + debugger.HandleCommand(f"type summary add -w nim -n integer -F {__name__}.Number -x NI") + debugger.HandleCommand(f"type summary add -w nim -n integer8 -F {__name__}.Number -x NI8") + debugger.HandleCommand(f"type summary add -w nim -n integer16 -F {__name__}.Number -x NI16") + debugger.HandleCommand(f"type summary add -w nim -n integer32 -F {__name__}.Number -x NI32") + debugger.HandleCommand(f"type summary add -w nim -n integer64 -F {__name__}.Number -x NI64") + debugger.HandleCommand(f"type summary add -w nim -n bool -F {__name__}.Bool -x bool") + debugger.HandleCommand(f"type summary add -w nim -n bool2 -F {__name__}.Bool -x NIM_BOOL") + debugger.HandleCommand(f"type summary add -w nim -n uinteger -F {__name__}.UnsignedNumber -x NU") + debugger.HandleCommand(f"type summary add -w nim -n uinteger8 -F {__name__}.UnsignedNumber -x NU8") + debugger.HandleCommand(f"type summary add -w nim -n uinteger16 -F {__name__}.UnsignedNumber -x NU16") + debugger.HandleCommand(f"type summary add -w nim -n uinteger32 -F {__name__}.UnsignedNumber -x NU32") + debugger.HandleCommand(f"type summary add -w nim -n uinteger64 -F {__name__}.UnsignedNumber -x NU64") + debugger.HandleCommand("type category enable nim") + debugger.HandleCommand(f"command script add -f {__name__}.echo echo") + # fmt: on diff --git a/vendor/asynctest b/vendor/asynctest index a236a5f0..fe1a34ca 160000 --- a/vendor/asynctest +++ b/vendor/asynctest @@ -1 +1 @@ -Subproject commit a236a5f0f3031573ac2cb082b63dbf6e170e06e7 +Subproject commit fe1a34caf572b05f8bdba3b650f1871af9fce31e diff --git a/vendor/atlas.workspace b/vendor/atlas.workspace new file mode 100644 index 00000000..812bfb2d --- /dev/null +++ b/vendor/atlas.workspace @@ -0,0 +1,3 @@ +deps="" +resolver="MaxVer" +overrides="urls.rules" diff --git a/vendor/dnsclient.nim b/vendor/dnsclient.nim index fbb76f8a..23214235 160000 --- a/vendor/dnsclient.nim +++ b/vendor/dnsclient.nim @@ -1 +1 @@ -Subproject commit fbb76f8af8a33ab818184a7d4406d9fee20993be +Subproject commit 23214235d4784d24aceed99bbfe153379ea557c8 diff --git a/vendor/nim-bearssl b/vendor/nim-bearssl index f4c4233d..99fcb340 160000 --- a/vendor/nim-bearssl +++ b/vendor/nim-bearssl @@ -1 +1 @@ -Subproject commit f4c4233de453cb7eac0ce3f3ffad6496295f83ab +Subproject commit 99fcb3405c55b27cfffbf60f5368c55da7346f23 diff --git a/vendor/nim-chronicles b/vendor/nim-chronicles index 7631f7b2..c9c8e58e 160000 --- a/vendor/nim-chronicles +++ b/vendor/nim-chronicles @@ -1 +1 @@ -Subproject commit 7631f7b2ee03398cb1512a79923264e8f9410af6 +Subproject commit c9c8e58ec3f89b655a046c485f622f9021c68b61 diff --git a/vendor/nim-chronos b/vendor/nim-chronos index 6525f4ce..0277b65b 160000 --- a/vendor/nim-chronos +++ b/vendor/nim-chronos @@ -1 +1 @@ -Subproject commit 6525f4ce1d1a7eba146e5f1a53f6f105077ae686 +Subproject commit 0277b65be2c7a365ac13df002fba6e172be55537 diff --git a/vendor/nim-eth b/vendor/nim-eth index 5885f638..15a09fab 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit 5885f638e47b8607683ef9e1e77fc21ce1aede44 +Subproject commit 15a09fab737d08a2545284c727199c377bb0f4b7 diff --git a/vendor/nim-faststreams b/vendor/nim-faststreams index 1b561a9e..720fc5e5 160000 --- a/vendor/nim-faststreams +++ b/vendor/nim-faststreams @@ -1 +1 @@ -Subproject commit 1b561a9e71b6bdad1c1cdff753418906037e9d09 +Subproject commit 720fc5e5c8e428d9d0af618e1e27c44b42350309 diff --git a/vendor/nim-http-utils b/vendor/nim-http-utils index e88e231d..3b491a40 160000 --- a/vendor/nim-http-utils +++ b/vendor/nim-http-utils @@ -1 +1 @@ -Subproject commit e88e231dfcef4585fe3b2fbd9b664dbd28a88040 +Subproject commit 3b491a40c60aad9e8d3407443f46f62511e63b18 diff --git a/vendor/nim-json-serialization b/vendor/nim-json-serialization index e5b18fb7..bb53d49c 160000 --- a/vendor/nim-json-serialization +++ b/vendor/nim-json-serialization @@ -1 +1 @@ -Subproject commit e5b18fb710c3d0167ec79f3b892f5a7a1bc6d1a4 +Subproject commit bb53d49caf2a6c6cf1df365ba84af93cdcfa7aa3 diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p index 8c2eca18..440461b2 160000 --- a/vendor/nim-libp2p +++ b/vendor/nim-libp2p @@ -1 +1 @@ -Subproject commit 8c2eca18dcc538c57a8fbc0c53fd0b9d24d56cff +Subproject commit 440461b24b9e66542b34d26a0b908c17f6549d05 diff --git a/vendor/nim-libp2p-dht b/vendor/nim-libp2p-dht index bd517f0e..3c940ea8 160000 --- a/vendor/nim-libp2p-dht +++ b/vendor/nim-libp2p-dht @@ -1 +1 @@ -Subproject commit bd517f0e8da38a1b5da15f7deb2d5c652ca389f1 +Subproject commit 3c940ea8901ae6118e66cc4df423b8ff53699eb4 diff --git a/vendor/nim-metrics b/vendor/nim-metrics index 743f81d4..6142e433 160000 --- a/vendor/nim-metrics +++ b/vendor/nim-metrics @@ -1 +1 @@ -Subproject commit 743f81d4f6c6ebf0ac02389f2392ff8b4235bee5 +Subproject commit 6142e433fc8ea9b73379770a788017ac528d46ff diff --git a/vendor/nim-protobuf-serialization b/vendor/nim-protobuf-serialization new file mode 160000 index 00000000..28214b3e --- /dev/null +++ b/vendor/nim-protobuf-serialization @@ -0,0 +1 @@ +Subproject commit 28214b3e40c755a9886d2ec8f261ec48fbb6bec6 diff --git a/vendor/nim-results b/vendor/nim-results new file mode 160000 index 00000000..f3c666a2 --- /dev/null +++ b/vendor/nim-results @@ -0,0 +1 @@ +Subproject commit f3c666a272c69d70cb41e7245e7f6844797303ad diff --git a/vendor/nim-secp256k1 b/vendor/nim-secp256k1 index 5340cf18..2acbbdcc 160000 --- a/vendor/nim-secp256k1 +++ b/vendor/nim-secp256k1 @@ -1 +1 @@ -Subproject commit 5340cf188168d6afcafc8023770d880f067c0b2f +Subproject commit 2acbbdcc0e63002a013fff49f015708522875832 diff --git a/vendor/nim-serialization b/vendor/nim-serialization index 493d18b8..384eb256 160000 --- a/vendor/nim-serialization +++ b/vendor/nim-serialization @@ -1 +1 @@ -Subproject commit 493d18b8292fc03aa4f835fd825dea1183f97466 +Subproject commit 384eb2561ee755446cff512a8e057325848b86a7 diff --git a/vendor/nim-sqlite3-abi b/vendor/nim-sqlite3-abi index fda455cf..362e1bd9 160000 --- a/vendor/nim-sqlite3-abi +++ b/vendor/nim-sqlite3-abi @@ -1 +1 @@ -Subproject commit fda455cfea2df707dde052034411ce63de218453 +Subproject commit 362e1bd9f689ad9f5380d9d27f0705b3d4dfc7d3 diff --git a/vendor/nim-stew b/vendor/nim-stew index e18f5a62..7afe7e3c 160000 --- a/vendor/nim-stew +++ b/vendor/nim-stew @@ -1 +1 @@ -Subproject commit e18f5a62af2ade7a1fd1d39635d4e04d944def08 +Subproject commit 7afe7e3c070758cac1f628e4330109f3ef6fc853 diff --git a/vendor/nim-testutils b/vendor/nim-testutils new file mode 160000 index 00000000..b56a5953 --- /dev/null +++ b/vendor/nim-testutils @@ -0,0 +1 @@ +Subproject commit b56a5953e37fc5117bd6ea6dfa18418c5e112815 diff --git a/vendor/nim-unittest2 b/vendor/nim-unittest2 index 02c49b8a..b178f475 160000 --- a/vendor/nim-unittest2 +++ b/vendor/nim-unittest2 @@ -1 +1 @@ -Subproject commit 02c49b8a994dd3f9eddfaab45262f9b8fa507f8e +Subproject commit b178f47527074964f76c395ad0dfc81cf118f379 diff --git a/vendor/nim-websock b/vendor/nim-websock index 7b2ed397..2c3ae313 160000 --- a/vendor/nim-websock +++ b/vendor/nim-websock @@ -1 +1 @@ -Subproject commit 7b2ed397d6e4c37ea4df08ae82aeac7ff04cd180 +Subproject commit 2c3ae3137f3c9cb48134285bd4a47186fa51f0e8 diff --git a/vendor/nim-zlib b/vendor/nim-zlib index 74cdeb54..f34ca261 160000 --- a/vendor/nim-zlib +++ b/vendor/nim-zlib @@ -1 +1 @@ -Subproject commit 74cdeb54b21bededb5a515d36f608bc1850555a2 +Subproject commit f34ca261efd90f118dc1647beefd2f7a69b05d93 diff --git a/vendor/nimbus-build-system b/vendor/nimbus-build-system index 1cf6a1b1..fe9bc3f3 160000 --- a/vendor/nimbus-build-system +++ b/vendor/nimbus-build-system @@ -1 +1 @@ -Subproject commit 1cf6a1b18ca5aa0d24e7a2861dd85d79ad9cb0cd +Subproject commit fe9bc3f3759ae1add6bf8c899db2e75327f03782 diff --git a/vendor/nimcrypto b/vendor/nimcrypto index a5742a9a..24e006df 160000 --- a/vendor/nimcrypto +++ b/vendor/nimcrypto @@ -1 +1 @@ -Subproject commit a5742a9a214ac33f91615f3862c7b099aec43b00 +Subproject commit 24e006df85927f64916e60511620583b11403178 diff --git a/vendor/npeg b/vendor/npeg new file mode 160000 index 00000000..b15a10e3 --- /dev/null +++ b/vendor/npeg @@ -0,0 +1 @@ +Subproject commit b15a10e388b91b898c581dbbcb6a718d46b27d2f diff --git a/vendor/questionable b/vendor/questionable index 30e4184a..1569ef45 160000 --- a/vendor/questionable +++ b/vendor/questionable @@ -1 +1 @@ -Subproject commit 30e4184a99c8c1ba329925912d2c5d4b09acf8cc +Subproject commit 1569ef4526d118c1bd1c31d8882eb9de6193a096 diff --git a/vendor/stint b/vendor/stint index 036c71d0..86621ece 160000 --- a/vendor/stint +++ b/vendor/stint @@ -1 +1 @@ -Subproject commit 036c71d06a6b22f8f967ba9d54afd2189c3872ca +Subproject commit 86621eced1dcfb5e25903019ebcfc76ed9128ec5 diff --git a/vendor/urls.rules b/vendor/urls.rules new file mode 100644 index 00000000..7636ff34 --- /dev/null +++ b/vendor/urls.rules @@ -0,0 +1,8 @@ +https://github.com/status-im/nim-libp2p-dht.git -> https://github.com/codex-storage/nim-codex-dht.git +https://github.com/markspanbroek/questionable -> https://github.com/codex-storage/questionable +https://github.com/status-im/questionable -> https://github.com/codex-storage/questionable +https://github.com/status-im/asynctest -> https://github.com/codex-storage/asynctest +https://github.com/status-im/nim-datastore -> https://github.com/codex-storage/nim-datastore +https://github.com/cheatfate/nimcrypto -> https://github.com/status-im/nimcrypto +protobufserialization -> protobuf_serialization +protobufserialization -> https://github.com/status-im/nim-protobuf-serialization From 39efac1a97c34aff80b9500da0b03d8446d78c26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adam=20Uhl=C3=AD=C5=99?= Date: Tue, 15 Aug 2023 11:39:49 +0200 Subject: [PATCH 8/9] fix: load slots on sales module start (#510) --- codex/sales.nim | 4 +- tests/codex/sales/testsales.nim | 132 ++++++++++++++++++++++++++------ 2 files changed, 111 insertions(+), 25 deletions(-) diff --git a/codex/sales.nim b/codex/sales.nim index c025d867..42da851f 100644 --- a/codex/sales.nim +++ b/codex/sales.nim @@ -42,7 +42,7 @@ export stint export reservations logScope: - topics = "sales" + topics = "sales marketplace" type Sales* = ref object @@ -121,6 +121,7 @@ proc mySlots*(sales: Sales): Future[seq[Slot]] {.async.} = let slotIds = await market.mySlots() var slots: seq[Slot] = @[] + info "Loading active slots", slotsCount = len(slots) for slotId in slotIds: if slot =? (await market.getActiveSlot(slotId)): slots.add slot @@ -393,6 +394,7 @@ proc unsubscribe(sales: Sales) {.async.} = proc start*(sales: Sales) {.async.} = await sales.startSlotQueue() await sales.subscribe() + await sales.load() proc stop*(sales: Sales) {.async.} = trace "stopping sales" diff --git a/tests/codex/sales/testsales.nim b/tests/codex/sales/testsales.nim index 332e8e25..2f5bc627 100644 --- a/tests/codex/sales/testsales.nim +++ b/tests/codex/sales/testsales.nim @@ -22,6 +22,95 @@ import ../helpers/eventually import ../examples import ./helpers +asyncchecksuite "Sales - start": + let proof = exampleProof() + + var request: StorageRequest + var sales: Sales + var market: MockMarket + var clock: MockClock + var proving: Proving + var reservations: Reservations + var repo: RepoStore + var queue: SlotQueue + var itemsProcessed: seq[SlotQueueItem] + + setup: + request = StorageRequest( + ask: StorageAsk( + slots: 4, + slotSize: 100.u256, + duration: 60.u256, + reward: 10.u256, + collateral: 200.u256, + ), + content: StorageContent( + cid: "some cid" + ), + expiry: (getTime() + initDuration(hours=1)).toUnix.u256 + ) + + market = MockMarket.new() + clock = MockClock.new() + proving = Proving.new() + let repoDs = SQLiteDatastore.new(Memory).tryGet() + let metaDs = SQLiteDatastore.new(Memory).tryGet() + repo = RepoStore.new(repoDs, metaDs) + await repo.start() + sales = Sales.new(market, clock, proving, repo) + reservations = sales.context.reservations + sales.onStore = proc(request: StorageRequest, + slot: UInt256, + onBatch: BatchProc): Future[?!void] {.async.} = + return success() + queue = sales.context.slotQueue + proving.onProve = proc(slot: Slot): Future[seq[byte]] {.async.} = + return proof + itemsProcessed = @[] + request.expiry = (clock.now() + 42).u256 + + teardown: + await sales.stop() + await repo.stop() + + proc fillSlot(slotIdx: UInt256 = 0.u256) {.async.} = + let address = await market.getSigner() + let slot = MockSlot(requestId: request.id, + slotIndex: slotIdx, + proof: proof, + host: address) + market.filled.add slot + market.slotState[slotId(request.id, slotIdx)] = SlotState.Filled + + test "load slots when Sales module starts": + let me = await market.getSigner() + + request.ask.slots = 2 + market.requested = @[request] + market.requestState[request.id] = RequestState.New + + let slot0 = MockSlot(requestId: request.id, + slotIndex: 0.u256, + proof: proof, + host: me) + await fillSlot(slot0.slotIndex) + + let slot1 = MockSlot(requestId: request.id, + slotIndex: 1.u256, + proof: proof, + host: me) + await fillSlot(slot1.slotIndex) + + market.activeSlots[me] = @[request.slotId(0.u256), request.slotId(1.u256)] + market.requested = @[request] + market.activeRequests[me] = @[request.id] + + await sales.start() + + check eventually sales.agents.len == 2 + check sales.agents.any(agent => agent.data.requestId == request.id and agent.data.slotIndex == 0.u256) + check sales.agents.any(agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.u256) + asyncchecksuite "Sales": let proof = exampleProof() @@ -58,6 +147,10 @@ asyncchecksuite "Sales": ) market = MockMarket.new() + + let me = await market.getSigner() + market.activeSlots[me] = @[] + clock = MockClock.new() proving = Proving.new() let repoDs = SQLiteDatastore.new(Memory).tryGet() @@ -113,7 +206,7 @@ asyncchecksuite "Sales": check isOk await reservations.reserve(availability) await market.requestStorage(request) let items = SlotQueueItem.init(request) - check eventuallyCheck items.allIt(itemsProcessed.contains(it)) + check eventually items.allIt(itemsProcessed.contains(it)) test "removes slots from slot queue once RequestCancelled emitted": let request1 = await addRequestToSaturatedQueue() @@ -146,7 +239,7 @@ asyncchecksuite "Sales": market.emitSlotFreed(request.id, 2.u256) let expected = SlotQueueItem.init(request, 2.uint16) - check eventuallyCheck itemsProcessed.contains(expected) + check eventually itemsProcessed.contains(expected) test "request slots are not added to the slot queue when no availabilities exist": var itemsProcessed: seq[SlotQueueItem] = @[] @@ -199,7 +292,7 @@ asyncchecksuite "Sales": check isOk await reservations.reserve(availability) await market.requestStorage(request) - check eventuallyCheck used + check eventually used test "reduces remaining availability size after download": let blk = bt.Block.example @@ -212,7 +305,7 @@ asyncchecksuite "Sales": return success() check isOk await reservations.reserve(availability) await market.requestStorage(request) - check eventuallyCheck getAvailability().?size == success 1.u256 + check eventually getAvailability().?size == success 1.u256 test "ignores download when duration not long enough": availability.duration = request.ask.duration - 1 @@ -265,7 +358,7 @@ asyncchecksuite "Sales": return success() check isOk await reservations.reserve(availability) await market.requestStorage(request) - check eventuallyCheck storingRequest == request + check eventually storingRequest == request check storingSlot < request.ask.slots.u256 test "handles errors during state run": @@ -280,7 +373,7 @@ asyncchecksuite "Sales": saleFailed = true check isOk await reservations.reserve(availability) await market.requestStorage(request) - check eventuallyCheck saleFailed + check eventually saleFailed test "makes storage available again when data retrieval fails": let error = newException(IOError, "data retrieval failed") @@ -290,7 +383,7 @@ asyncchecksuite "Sales": return failure(error) check isOk await reservations.reserve(availability) await market.requestStorage(request) - check eventuallyCheck getAvailability().?used == success false + check eventually getAvailability().?used == success false check getAvailability().?size == success availability.size test "generates proof of storage": @@ -301,7 +394,7 @@ asyncchecksuite "Sales": provingSlot = slot.slotIndex check isOk await reservations.reserve(availability) await market.requestStorage(request) - check eventuallyCheck provingRequest == request + check eventually provingRequest == request check provingSlot < request.ask.slots.u256 test "fills a slot": @@ -325,7 +418,7 @@ asyncchecksuite "Sales": soldSlotIndex = slotIndex check isOk await reservations.reserve(availability) await market.requestStorage(request) - check eventuallyCheck soldAvailability == availability + check eventually soldAvailability == availability check soldRequest == request check soldSlotIndex < request.ask.slots.u256 @@ -342,7 +435,7 @@ asyncchecksuite "Sales": clearedSlotIndex = slotIndex check isOk await reservations.reserve(availability) await market.requestStorage(request) - check eventuallyCheck clearedRequest == request + check eventually clearedRequest == request check clearedSlotIndex < request.ask.slots.u256 test "makes storage available again when other host fills the slot": @@ -356,7 +449,7 @@ asyncchecksuite "Sales": await market.requestStorage(request) for slotIndex in 0.. agent.data == expected) + check eventually sales.agents.len == 2 + check sales.agents.any(agent => agent.data.requestId == request.id and agent.data.slotIndex == 0.u256) + check sales.agents.any(agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.u256) From e8601274b9f7be9a7f50c9e3da178f9f3d1a34bc Mon Sep 17 00:00:00 2001 From: Tomasz Bekas Date: Tue, 15 Aug 2023 13:23:35 +0200 Subject: [PATCH 9/9] Merkle tree construction (#504) * Building a merkle tree * Obtaining merkle proof from a tree --------- Co-authored-by: benbierens --- codex/merkletree/merkletree.nim | 189 ++++++++++++++++++++++ tests/codex/merkletree/testmerkletree.nim | 108 +++++++++++++ tests/codex/testmerkletree.nim | 3 + tests/testCodex.nim | 1 + vendor/questionable | 2 +- 5 files changed, 302 insertions(+), 1 deletion(-) create mode 100644 codex/merkletree/merkletree.nim create mode 100644 tests/codex/merkletree/testmerkletree.nim create mode 100644 tests/codex/testmerkletree.nim diff --git a/codex/merkletree/merkletree.nim b/codex/merkletree/merkletree.nim new file mode 100644 index 00000000..6fcb4682 --- /dev/null +++ b/codex/merkletree/merkletree.nim @@ -0,0 +1,189 @@ +## Nim-Codex +## Copyright (c) 2022 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +import std/sequtils +import std/math +import std/bitops +import std/sugar + +import pkg/libp2p +import pkg/stew/byteutils +import pkg/questionable +import pkg/questionable/results + +type + MerkleHash* = MultiHash + MerkleTree* = object + leavesCount: int + nodes: seq[MerkleHash] + MerkleProof* = object + index: int + path: seq[MerkleHash] + +# Tree constructed from leaves H0..H2 is +# H5=H(H3 & H4) +# / \ +# H3=H(H0 & H1) H4=H(H2 & H2) +# / \ / +# H0=H(A) H1=H(B) H2=H(C) +# | | | +# A B C +# +# Memory layout is [H0, H1, H2, H3, H4, H5] +# +# Proofs of inclusion are +# - [H1, H4] for A +# - [H0, H4] for B +# - [H2, H3] for C + + +func computeTreeHeight(leavesCount: int): int = + if isPowerOfTwo(leavesCount): + fastLog2(leavesCount) + 1 + else: + fastLog2(leavesCount) + 2 + +func getLowHigh(leavesCount, level: int): (int, int) = + var width = leavesCount + var low = 0 + for _ in 0..= self.leavesCount or index < 0: + return failure("Index " & $index & " out of range [0.." & $self.leaves.high & "]" ) + + var path = newSeq[MerkleHash](self.height - 1) + for level in 0.. newException(CatchableError, "Error calculating hash using codec " & $mcodec & ": " & $c) + ) + + # copy leaves + for i in 0..