diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 00000000..ecfa5d7d --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,2 @@ +# Formatted with nph v0.6.1-0-g0d8000e +e5df8c50d3b6e70e6eec1ff031657d2b7bb6fe63 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9b865de6..d660a029 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -24,7 +24,7 @@ jobs: steps: - name: Compute matrix id: matrix - uses: fabiocaccamo/create-matrix-action@v4 + uses: fabiocaccamo/create-matrix-action@v5 with: matrix: | os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} @@ -47,6 +47,19 @@ jobs: matrix: ${{ needs.matrix.outputs.matrix }} cache_nonce: ${{ needs.matrix.outputs.cache_nonce }} + linting: + runs-on: ubuntu-latest + if: github.event_name == 'pull_request' + steps: + - uses: actions/checkout@v4 + - name: Check `nph` formatting + uses: arnetheduck/nph-action@v1 + with: + version: 0.6.1 + options: "codex/ tests/" + fail: true + suggest: true + coverage: # Force to stick to ubuntu 20.04 for coverage because # lcov was updated to 2.x version in ubuntu-latest diff --git a/.github/workflows/docker-reusable.yml b/.github/workflows/docker-reusable.yml index 14e2222c..f0e46d95 100644 --- a/.github/workflows/docker-reusable.yml +++ b/.github/workflows/docker-reusable.yml @@ -98,7 +98,7 @@ jobs: - target: os: linux arch: arm64 - builder: buildjet-8vcpu-ubuntu-2204-arm + builder: ubuntu-22.04-arm name: Build ${{ matrix.target.os }}/${{ matrix.target.arch }} runs-on: ${{ matrix.builder }} diff --git a/.github/workflows/nim-matrix.yml b/.github/workflows/nim-matrix.yml index 4cddc971..4d86d3bb 100644 --- a/.github/workflows/nim-matrix.yml +++ b/.github/workflows/nim-matrix.yml @@ -17,7 +17,7 @@ jobs: steps: - name: Compute matrix id: matrix - uses: fabiocaccamo/create-matrix-action@v4 + uses: fabiocaccamo/create-matrix-action@v5 with: matrix: | os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index dd42f740..19170528 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -25,14 +25,14 @@ jobs: steps: - name: Compute matrix id: matrix - uses: fabiocaccamo/create-matrix-action@v4 + uses: fabiocaccamo/create-matrix-action@v5 with: matrix: | - os {linux}, cpu {amd64}, builder {ubuntu-20.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {linux}, cpu {arm64}, builder {buildjet-4vcpu-ubuntu-2204-arm}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {macos}, cpu {amd64}, builder {macos-13}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {windows}, cpu {amd64}, builder {windows-latest}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {msys2} + os {linux}, cpu {amd64}, builder {ubuntu-20.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {linux}, cpu {arm64}, builder {ubuntu-22.04-arm}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {macos}, cpu {amd64}, builder {macos-13}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {windows}, cpu {amd64}, builder {windows-latest}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {msys2} # Build build: diff --git a/.gitignore b/.gitignore index 1b8885e0..0e1f27db 100644 --- a/.gitignore +++ b/.gitignore @@ -5,9 +5,13 @@ !LICENSE* !Makefile +!Jenkinsfile nimcache/ +# Executables when using nix will be stored in result/ directory +result/ + # Executables shall be put in an ignored build/ directory build/ diff --git a/.gitmodules b/.gitmodules index 6842ddea..ece88749 100644 --- a/.gitmodules +++ b/.gitmodules @@ -218,3 +218,6 @@ [submodule "vendor/nim-zippy"] path = vendor/nim-zippy url = https://github.com/status-im/nim-zippy.git +[submodule "vendor/nph"] + path = vendor/nph + url = https://github.com/arnetheduck/nph.git diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 00000000..c7e54c92 --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,37 @@ +#!/usr/bin/env groovy +library 'status-jenkins-lib@v1.9.13' + +pipeline { + agent { label 'linux && x86_64 && nix-2.24' } + + options { + disableConcurrentBuilds() + /* manage how many builds we keep */ + buildDiscarder(logRotator( + numToKeepStr: '20', + daysToKeepStr: '30', + )) + } + + stages { + stage('Build') { + steps { + script { + nix.flake("default") + } + } + } + + stage('Check') { + steps { + script { + sh './result/bin/codex --version' + } + } + } + } + + post { + cleanup { cleanWs() } + } +} diff --git a/Makefile b/Makefile index 22cb2b31..3dfe8e7e 100644 --- a/Makefile +++ b/Makefile @@ -17,6 +17,7 @@ # version pinned by nimbus-build-system. #PINNED_NIM_VERSION := 38640664088251bbc88917b4bacfd86ec53014b8 # 1.6.21 PINNED_NIM_VERSION := v2.0.14 + ifeq ($(NIM_COMMIT),) NIM_COMMIT := $(PINNED_NIM_VERSION) else ifeq ($(NIM_COMMIT),pinned) @@ -199,4 +200,42 @@ ifneq ($(USE_LIBBACKTRACE), 0) + $(MAKE) -C vendor/nim-libbacktrace clean $(HANDLE_OUTPUT) endif +############ +## Format ## +############ +.PHONY: build-nph install-nph-hook clean-nph print-nph-path + +# Default location for nph binary shall be next to nim binary to make it available on the path. +NPH:=$(shell dirname $(NIM_BINARY))/nph + +build-nph: +ifeq ("$(wildcard $(NPH))","") + $(ENV_SCRIPT) nim c vendor/nph/src/nph.nim && \ + mv vendor/nph/src/nph $(shell dirname $(NPH)) + echo "nph utility is available at " $(NPH) +endif + +GIT_PRE_COMMIT_HOOK := .git/hooks/pre-commit + +install-nph-hook: build-nph +ifeq ("$(wildcard $(GIT_PRE_COMMIT_HOOK))","") + cp ./tools/scripts/git_pre_commit_format.sh $(GIT_PRE_COMMIT_HOOK) +else + echo "$(GIT_PRE_COMMIT_HOOK) already present, will NOT override" + exit 1 +endif + +nph/%: build-nph + echo -e $(FORMAT_MSG) "nph/$*" && \ + $(NPH) $* + +clean-nph: + rm -f $(NPH) + +# To avoid hardcoding nph binary location in several places +print-nph-path: + echo "$(NPH)" + +clean: | clean-nph + endif # "variables.mk" was not included diff --git a/README.md b/README.md index e1fb1e25..d073057f 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,7 @@ Run the client with: ```bash build/codex ``` + ## Configuration It is possible to configure a Codex node in several ways: @@ -51,3 +52,15 @@ To get acquainted with Codex, consider: ## API The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage). + +## Contributing and development + +Feel free to dive in, contributions are welcomed! Open an issue or submit PRs. + +### Linting and formatting + +`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is requrired to adhere to its styling. +If you are setting up fresh setup, in order to get `nph` run `make build-nph`. +In order to format files run `make nph/`. +If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior commiting them. +If you are using VSCode and the [NimLang](https://marketplace.visualstudio.com/items?itemName=NimLang.nimlang) extension you can enable "Format On Save" (eq. the `nim.formatOnSave` property) that will format the files using `nph`. \ No newline at end of file diff --git a/codex/blockexchange.nim b/codex/blockexchange.nim index 1c90ae4d..ff33f406 100644 --- a/codex/blockexchange.nim +++ b/codex/blockexchange.nim @@ -1,10 +1,5 @@ -import ./blockexchange/[ - network, - engine, - peers] +import ./blockexchange/[network, engine, peers] -import ./blockexchange/protobuf/[ - blockexc, - presence] +import ./blockexchange/protobuf/[blockexc, presence] export network, engine, blockexc, presence, peers diff --git a/codex/blockexchange/engine/advertiser.nim b/codex/blockexchange/engine/advertiser.nim index 20baaf58..f5f28bc1 100644 --- a/codex/blockexchange/engine/advertiser.nim +++ b/codex/blockexchange/engine/advertiser.nim @@ -34,20 +34,19 @@ const DefaultConcurrentAdvertRequests = 10 DefaultAdvertiseLoopSleep = 30.minutes -type - Advertiser* = ref object of RootObj - localStore*: BlockStore # Local block store for this instance - discovery*: Discovery # Discovery interface +type Advertiser* = ref object of RootObj + localStore*: BlockStore # Local block store for this instance + discovery*: Discovery # Discovery interface - advertiserRunning*: bool # Indicates if discovery is running - concurrentAdvReqs: int # Concurrent advertise requests + advertiserRunning*: bool # Indicates if discovery is running + concurrentAdvReqs: int # Concurrent advertise requests - advertiseLocalStoreLoop*: Future[void] # Advertise loop task handle - advertiseQueue*: AsyncQueue[Cid] # Advertise queue - trackedFutures*: TrackedFutures # Advertise tasks futures + advertiseLocalStoreLoop*: Future[void] # Advertise loop task handle + advertiseQueue*: AsyncQueue[Cid] # Advertise queue + trackedFutures*: TrackedFutures # Advertise tasks futures - advertiseLocalStoreLoopSleep: Duration # Advertise loop sleep - inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests + advertiseLocalStoreLoopSleep: Duration # Advertise loop sleep + inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests proc addCidToQueue(b: Advertiser, cid: Cid) {.async.} = if cid notin b.advertiseQueue: @@ -83,7 +82,6 @@ proc advertiseLocalStoreLoop(b: Advertiser) {.async: (raises: []).} = trace "Advertiser iterating blocks finished." await sleepAsync(b.advertiseLocalStoreLoopSleep) - except CancelledError: break # do not propagate as advertiseLocalStoreLoop was asyncSpawned except CatchableError as e: @@ -94,20 +92,17 @@ proc advertiseLocalStoreLoop(b: Advertiser) {.async: (raises: []).} = proc processQueueLoop(b: Advertiser) {.async: (raises: []).} = while b.advertiserRunning: try: - let - cid = await b.advertiseQueue.get() + let cid = await b.advertiseQueue.get() if cid in b.inFlightAdvReqs: continue try: - let - request = b.discovery.provide(cid) + let request = b.discovery.provide(cid) b.inFlightAdvReqs[cid] = request codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64) await request - finally: b.inFlightAdvReqs.del(cid) codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64) @@ -125,7 +120,7 @@ proc start*(b: Advertiser) {.async.} = trace "Advertiser start" - proc onBlock(cid: Cid) {.async.} = + proc onBlock(cid: Cid) {.async.} = await b.advertiseBlock(cid) doAssert(b.localStore.onBlockStored.isNone()) @@ -136,7 +131,7 @@ proc start*(b: Advertiser) {.async.} = return b.advertiserRunning = true - for i in 0.. 0: peerCtx.cleanPresence(dontWantCids) - let - wantCids = wantList.filterIt( - it in peerHave - ) + let wantCids = wantList.filterIt(it in peerHave) if wantCids.len > 0: trace "Peer has blocks in our wantList", peer, wants = wantCids @@ -246,13 +242,12 @@ proc blockPresenceHandler*( # if none of the connected peers report our wants in their have list, # fire up discovery b.discovery.queueFindBlocksReq( - toSeq(b.pendingBlocks.wantListCids) - .filter do(cid: Cid) -> bool: - not b.peers.anyIt( cid in it.peerHaveCids )) + toSeq(b.pendingBlocks.wantListCids).filter do(cid: Cid) -> bool: + not b.peers.anyIt(cid in it.peerHaveCids) + ) proc scheduleTasks(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} = - let - cids = blocksDelivery.mapIt( it.blk.cid ) + let cids = blocksDelivery.mapIt(it.blk.cid) # schedule any new peers to provide blocks to for p in b.peers: @@ -270,14 +265,16 @@ proc scheduleTasks(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.asyn proc cancelBlocks(b: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} = ## Tells neighboring peers that we're no longer interested in a block. - trace "Sending block request cancellations to peers", addrs, peers = b.peers.mapIt($it.id) + trace "Sending block request cancellations to peers", + addrs, peers = b.peers.mapIt($it.id) - let failed = (await allFinished( - b.peers.mapIt( - b.network.request.sendWantCancellations( - peer = it.id, - addresses = addrs)))) - .filterIt(it.failed) + let failed = ( + await allFinished( + b.peers.mapIt( + b.network.request.sendWantCancellations(peer = it.id, addresses = addrs) + ) + ) + ).filterIt(it.failed) if failed.len > 0: warn "Failed to send block request cancellations to peers", peers = failed.len @@ -290,12 +287,13 @@ proc resolveBlocks*(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.asy proc resolveBlocks*(b: BlockExcEngine, blocks: seq[Block]) {.async.} = await b.resolveBlocks( blocks.mapIt( - BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid) - ))) + BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid)) + ) + ) -proc payForBlocks(engine: BlockExcEngine, - peer: BlockExcPeerCtx, - blocksDelivery: seq[BlockDelivery]) {.async.} = +proc payForBlocks( + engine: BlockExcEngine, peer: BlockExcPeerCtx, blocksDelivery: seq[BlockDelivery] +) {.async.} = let sendPayment = engine.network.request.sendPayment price = peer.price(blocksDelivery.mapIt(it.address)) @@ -304,9 +302,7 @@ proc payForBlocks(engine: BlockExcEngine, trace "Sending payment for blocks", price, len = blocksDelivery.len await sendPayment(peer.id, payment) -proc validateBlockDelivery( - b: BlockExcEngine, - bd: BlockDelivery): ?!void = +proc validateBlockDelivery(b: BlockExcEngine, bd: BlockDelivery): ?!void = if bd.address notin b.pendingBlocks: return failure("Received block is not currently a pending block") @@ -315,33 +311,36 @@ proc validateBlockDelivery( return failure("Missing proof") if proof.index != bd.address.index: - return failure("Proof index " & $proof.index & " doesn't match leaf index " & $bd.address.index) + return failure( + "Proof index " & $proof.index & " doesn't match leaf index " & $bd.address.index + ) without leaf =? bd.blk.cid.mhash.mapFailure, err: return failure("Unable to get mhash from cid for block, nested err: " & err.msg) without treeRoot =? bd.address.treeCid.mhash.mapFailure, err: - return failure("Unable to get mhash from treeCid for block, nested err: " & err.msg) + return + failure("Unable to get mhash from treeCid for block, nested err: " & err.msg) if err =? proof.verify(leaf, treeRoot).errorOption: return failure("Unable to verify proof for block, nested err: " & err.msg) - else: # not leaf if bd.address.cid != bd.blk.cid: - return failure("Delivery cid " & $bd.address.cid & " doesn't match block cid " & $bd.blk.cid) + return failure( + "Delivery cid " & $bd.address.cid & " doesn't match block cid " & $bd.blk.cid + ) return success() proc blocksDeliveryHandler*( - b: BlockExcEngine, - peer: PeerId, - blocksDelivery: seq[BlockDelivery]) {.async.} = + b: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery] +) {.async.} = trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt(it.address)) var validatedBlocksDelivery: seq[BlockDelivery] for bd in blocksDelivery: logScope: - peer = peer + peer = peer address = bd.address if err =? b.validateBlockDelivery(bd).errorOption: @@ -356,12 +355,11 @@ proc blocksDeliveryHandler*( without proof =? bd.proof: error "Proof expected for a leaf block delivery" continue - if err =? (await b.localStore.putCidAndProof( - bd.address.treeCid, - bd.address.index, - bd.blk.cid, - proof)).errorOption: - + if err =? ( + await b.localStore.putCidAndProof( + bd.address.treeCid, bd.address.index, bd.blk.cid, proof + ) + ).errorOption: error "Unable to store proof and cid for a block" continue @@ -370,20 +368,15 @@ proc blocksDeliveryHandler*( await b.resolveBlocks(validatedBlocksDelivery) codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64) - let - peerCtx = b.peers.get(peer) + let peerCtx = b.peers.get(peer) if peerCtx != nil: await b.payForBlocks(peerCtx, blocksDelivery) ## shouldn't we remove them from the want-list instead of this: - peerCtx.cleanPresence(blocksDelivery.mapIt( it.address )) + peerCtx.cleanPresence(blocksDelivery.mapIt(it.address)) -proc wantListHandler*( - b: BlockExcEngine, - peer: PeerId, - wantList: WantList) {.async.} = - let - peerCtx = b.peers.get(peer) +proc wantListHandler*(b: BlockExcEngine, peer: PeerId, wantList: WantList) {.async.} = + let peerCtx = b.peers.get(peer) if peerCtx.isNil: return @@ -393,35 +386,32 @@ proc wantListHandler*( schedulePeer = false for e in wantList.entries: - let - idx = peerCtx.peerWants.findIt(it.address == e.address) + let idx = peerCtx.peerWants.findIt(it.address == e.address) logScope: - peer = peerCtx.id - address = e.address - wantType = $e.wantType + peer = peerCtx.id + address = e.address + wantType = $e.wantType if idx < 0: # Adding new entry to peer wants let have = await e.address in b.localStore - price = @( - b.pricing.get(Pricing(price: 0.u256)) - .price.toBytesBE) + price = @(b.pricing.get(Pricing(price: 0.u256)).price.toBytesBE) if e.wantType == WantType.WantHave: if have: presence.add( BlockPresence( - address: e.address, - `type`: BlockPresenceType.Have, - price: price)) + address: e.address, `type`: BlockPresenceType.Have, price: price + ) + ) else: if e.sendDontHave: presence.add( BlockPresence( - address: e.address, - `type`: BlockPresenceType.DontHave, - price: price)) + address: e.address, `type`: BlockPresenceType.DontHave, price: price + ) + ) peerCtx.peerWants.add(e) codex_block_exchange_want_have_lists_received.inc() @@ -446,31 +436,24 @@ proc wantListHandler*( if not b.scheduleTask(peerCtx): warn "Unable to schedule task for peer", peer -proc accountHandler*( - engine: BlockExcEngine, - peer: PeerId, - account: Account) {.async.} = - let - context = engine.peers.get(peer) +proc accountHandler*(engine: BlockExcEngine, peer: PeerId, account: Account) {.async.} = + let context = engine.peers.get(peer) if context.isNil: return context.account = account.some proc paymentHandler*( - engine: BlockExcEngine, - peer: PeerId, - payment: SignedState) {.async.} = + engine: BlockExcEngine, peer: PeerId, payment: SignedState +) {.async.} = trace "Handling payments", peer - without context =? engine.peers.get(peer).option and - account =? context.account: + without context =? engine.peers.get(peer).option and account =? context.account: trace "No context or account for peer", peer return if channel =? context.paymentChannel: - let - sender = account.address + let sender = account.address discard engine.wallet.acceptPayment(channel, Asset, sender, payment) else: context.paymentChannel = engine.wallet.acceptChannel(payment).option @@ -484,19 +467,16 @@ proc setupPeer*(b: BlockExcEngine, peer: PeerId) {.async.} = if peer notin b.peers: trace "Setting up new peer", peer - b.peers.add(BlockExcPeerCtx( - id: peer - )) + b.peers.add(BlockExcPeerCtx(id: peer)) trace "Added peer", peers = b.peers.len # broadcast our want list, the other peer will do the same if b.pendingBlocks.wantListLen > 0: trace "Sending our want list to a peer", peer let cids = toSeq(b.pendingBlocks.wantList) - await b.network.request.sendWantList( - peer, cids, full = true) + await b.network.request.sendWantList(peer, cids, full = true) - if address =? b.pricing.?address: + if address =? b.pricing .? address: await b.network.request.sendAccount(peer, Account(address: address)) proc dropPeer*(b: BlockExcEngine, peer: PeerId) = @@ -515,10 +495,8 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} = # TODO: There should be all sorts of accounting of # bytes sent/received here - var - wantsBlocks = task.peerWants.filterIt( - it.wantType == WantType.WantBlock and not it.inFlight - ) + var wantsBlocks = + task.peerWants.filterIt(it.wantType == WantType.WantBlock and not it.inFlight) proc updateInFlight(addresses: seq[BlockAddress], inFlight: bool) = for peerWant in task.peerWants.mitems: @@ -535,18 +513,20 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} = if e.address.leaf: (await b.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map( (blkAndProof: (Block, CodexProof)) => - BlockDelivery(address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some) + BlockDelivery( + address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some + ) ) else: (await b.localStore.getBlock(e.address)).map( - (blk: Block) => BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none) + (blk: Block) => + BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none) ) let blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup)) - blocksDelivery = blocksDeliveryFut - .filterIt(it.completed and it.read.isOk) - .mapIt(it.read.get) + blocksDelivery = + blocksDeliveryFut.filterIt(it.completed and it.read.isOk).mapIt(it.read.get) # All the wants that failed local lookup must be set to not-in-flight again. let @@ -555,11 +535,9 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} = updateInFlight(failedAddresses, false) if blocksDelivery.len > 0: - trace "Sending blocks to peer", peer = task.id, blocks = (blocksDelivery.mapIt(it.address)) - await b.network.request.sendBlocksDelivery( - task.id, - blocksDelivery - ) + trace "Sending blocks to peer", + peer = task.id, blocks = (blocksDelivery.mapIt(it.address)) + await b.network.request.sendBlocksDelivery(task.id, blocksDelivery) codex_block_exchange_blocks_sent.inc(blocksDelivery.len.int64) @@ -572,8 +550,7 @@ proc blockexcTaskRunner(b: BlockExcEngine) {.async: (raises: []).} = trace "Starting blockexc task runner" while b.blockexcRunning: try: - let - peerCtx = await b.taskQueue.pop() + let peerCtx = await b.taskQueue.pop() await b.taskHandler(peerCtx) except CancelledError: @@ -599,20 +576,20 @@ proc new*( ## Create new block exchange engine instance ## - let - engine = BlockExcEngine( - localStore: localStore, - peers: peerStore, - pendingBlocks: pendingBlocks, - peersPerRequest: peersPerRequest, - network: network, - wallet: wallet, - concurrentTasks: concurrentTasks, - trackedFutures: TrackedFutures.new(), - taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize), - discovery: discovery, - advertiser: advertiser, - blockFetchTimeout: blockFetchTimeout) + let engine = BlockExcEngine( + localStore: localStore, + peers: peerStore, + pendingBlocks: pendingBlocks, + peersPerRequest: peersPerRequest, + network: network, + wallet: wallet, + concurrentTasks: concurrentTasks, + trackedFutures: TrackedFutures.new(), + taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize), + discovery: discovery, + advertiser: advertiser, + blockFetchTimeout: blockFetchTimeout, + ) proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} = if event.kind == PeerEventKind.Joined: @@ -624,19 +601,17 @@ proc new*( network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined) network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left) - proc blockWantListHandler( - peer: PeerId, - wantList: WantList): Future[void] {.gcsafe.} = + proc blockWantListHandler(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.} = engine.wantListHandler(peer, wantList) proc blockPresenceHandler( - peer: PeerId, - presence: seq[BlockPresence]): Future[void] {.gcsafe.} = + peer: PeerId, presence: seq[BlockPresence] + ): Future[void] {.gcsafe.} = engine.blockPresenceHandler(peer, presence) proc blocksDeliveryHandler( - peer: PeerId, - blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} = + peer: PeerId, blocksDelivery: seq[BlockDelivery] + ): Future[void] {.gcsafe.} = engine.blocksDeliveryHandler(peer, blocksDelivery) proc accountHandler(peer: PeerId, account: Account): Future[void] {.gcsafe.} = @@ -650,6 +625,7 @@ proc new*( onBlocksDelivery: blocksDeliveryHandler, onPresence: blockPresenceHandler, onAccount: accountHandler, - onPayment: paymentHandler) + onPayment: paymentHandler, + ) return engine diff --git a/codex/blockexchange/engine/payments.nim b/codex/blockexchange/engine/payments.nim index 35d38e29..88953976 100644 --- a/codex/blockexchange/engine/payments.nim +++ b/codex/blockexchange/engine/payments.nim @@ -15,15 +15,16 @@ import ../peers export nitro export results -push: {.upraises: [].} +push: + {.upraises: [].} const ChainId* = 0.u256 # invalid chain id for now const Asset* = EthAddress.zero # invalid ERC20 asset address for now -const AmountPerChannel = (10'u64^18).u256 # 1 asset, ERC20 default is 18 decimals +const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals -func openLedgerChannel*(wallet: WalletRef, - hub: EthAddress, - asset: EthAddress): ?!ChannelId = +func openLedgerChannel*( + wallet: WalletRef, hub: EthAddress, asset: EthAddress +): ?!ChannelId = wallet.openLedgerChannel(hub, ChainId, asset, AmountPerChannel) func getOrOpenChannel(wallet: WalletRef, peer: BlockExcPeerCtx): ?!ChannelId = @@ -36,9 +37,7 @@ func getOrOpenChannel(wallet: WalletRef, peer: BlockExcPeerCtx): ?!ChannelId = else: failure "no account set for peer" -func pay*(wallet: WalletRef, - peer: BlockExcPeerCtx, - amount: UInt256): ?!SignedState = +func pay*(wallet: WalletRef, peer: BlockExcPeerCtx, amount: UInt256): ?!SignedState = if account =? peer.account: let asset = Asset let receiver = account.address diff --git a/codex/blockexchange/engine/pendingblocks.nim b/codex/blockexchange/engine/pendingblocks.nim index 9c5efc0b..3b69e2d2 100644 --- a/codex/blockexchange/engine/pendingblocks.nim +++ b/codex/blockexchange/engine/pendingblocks.nim @@ -12,7 +12,8 @@ import std/monotimes import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import pkg/chronos import pkg/libp2p @@ -25,11 +26,15 @@ import ../../logutils logScope: topics = "codex pendingblocks" -declareGauge(codex_block_exchange_pending_block_requests, "codex blockexchange pending block requests") -declareGauge(codex_block_exchange_retrieval_time_us, "codex blockexchange block retrieval time us") +declareGauge( + codex_block_exchange_pending_block_requests, + "codex blockexchange pending block requests", +) +declareGauge( + codex_block_exchange_retrieval_time_us, "codex blockexchange block retrieval time us" +) -const - DefaultBlockTimeout* = 10.minutes +const DefaultBlockTimeout* = 10.minutes type BlockReq* = object @@ -44,10 +49,11 @@ proc updatePendingBlockGauge(p: PendingBlocksManager) = codex_block_exchange_pending_block_requests.set(p.blocks.len.int64) proc getWantHandle*( - p: PendingBlocksManager, - address: BlockAddress, - timeout = DefaultBlockTimeout, - inFlight = false): Future[Block] {.async.} = + p: PendingBlocksManager, + address: BlockAddress, + timeout = DefaultBlockTimeout, + inFlight = false, +): Future[Block] {.async.} = ## Add an event for a block ## @@ -56,7 +62,8 @@ proc getWantHandle*( p.blocks[address] = BlockReq( handle: newFuture[Block]("pendingBlocks.getWantHandle"), inFlight: inFlight, - startTime: getMonoTime().ticks) + startTime: getMonoTime().ticks, + ) p.updatePendingBlockGauge() return await p.blocks[address].handle.wait(timeout) @@ -72,15 +79,13 @@ proc getWantHandle*( p.updatePendingBlockGauge() proc getWantHandle*( - p: PendingBlocksManager, - cid: Cid, - timeout = DefaultBlockTimeout, - inFlight = false): Future[Block] = + p: PendingBlocksManager, cid: Cid, timeout = DefaultBlockTimeout, inFlight = false +): Future[Block] = p.getWantHandle(BlockAddress.init(cid), timeout, inFlight) proc resolve*( - p: PendingBlocksManager, - blocksDelivery: seq[BlockDelivery]) {.gcsafe, raises: [].} = + p: PendingBlocksManager, blocksDelivery: seq[BlockDelivery] +) {.gcsafe, raises: [].} = ## Resolve pending blocks ## @@ -101,19 +106,14 @@ proc resolve*( else: trace "Block handle already finished", address = bd.address -proc setInFlight*( - p: PendingBlocksManager, - address: BlockAddress, - inFlight = true) = +proc setInFlight*(p: PendingBlocksManager, address: BlockAddress, inFlight = true) = ## Set inflight status for a block ## p.blocks.withValue(address, pending): pending[].inFlight = inFlight -proc isInFlight*( - p: PendingBlocksManager, - address: BlockAddress): bool = +proc isInFlight*(p: PendingBlocksManager, address: BlockAddress): bool = ## Check if a block is in flight ## diff --git a/codex/blockexchange/network/network.nim b/codex/blockexchange/network/network.nim index b6195473..ecb72890 100644 --- a/codex/blockexchange/network/network.nim +++ b/codex/blockexchange/network/network.nim @@ -35,8 +35,10 @@ const type WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.} - BlocksDeliveryHandler* = proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.} - BlockPresenceHandler* = proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.} + BlocksDeliveryHandler* = + proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.} + BlockPresenceHandler* = + proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.} AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.} PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} @@ -54,10 +56,14 @@ type cancel: bool = false, wantType: WantType = WantType.WantHave, full: bool = false, - sendDontHave: bool = false): Future[void] {.gcsafe.} - WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.} - BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} - PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} + sendDontHave: bool = false, + ): Future[void] {.gcsafe.} + WantCancellationSender* = + proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.} + BlocksDeliverySender* = + proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} + PresenceSender* = + proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.} PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} @@ -108,10 +114,7 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} = finally: b.inflightSema.release() -proc handleWantList( - b: BlockExcNetwork, - peer: NetworkPeer, - list: WantList) {.async.} = +proc handleWantList(b: BlockExcNetwork, peer: NetworkPeer, list: WantList) {.async.} = ## Handle incoming want list ## @@ -119,14 +122,15 @@ proc handleWantList( await b.handlers.onWantList(peer.id, list) proc sendWantList*( - b: BlockExcNetwork, - id: PeerId, - addresses: seq[BlockAddress], - priority: int32 = 0, - cancel: bool = false, - wantType: WantType = WantType.WantHave, - full: bool = false, - sendDontHave: bool = false): Future[void] = + b: BlockExcNetwork, + id: PeerId, + addresses: seq[BlockAddress], + priority: int32 = 0, + cancel: bool = false, + wantType: WantType = WantType.WantHave, + full: bool = false, + sendDontHave: bool = false, +): Future[void] = ## Send a want message to peer ## @@ -137,43 +141,41 @@ proc sendWantList*( priority: priority, cancel: cancel, wantType: wantType, - sendDontHave: sendDontHave) ), - full: full) + sendDontHave: sendDontHave, + ) + ), + full: full, + ) b.send(id, Message(wantlist: msg)) proc sendWantCancellations*( - b: BlockExcNetwork, - id: PeerId, - addresses: seq[BlockAddress]): Future[void] {.async.} = + b: BlockExcNetwork, id: PeerId, addresses: seq[BlockAddress] +): Future[void] {.async.} = ## Informs a remote peer that we're no longer interested in a set of blocks ## await b.sendWantList(id = id, addresses = addresses, cancel = true) proc handleBlocksDelivery( - b: BlockExcNetwork, - peer: NetworkPeer, - blocksDelivery: seq[BlockDelivery]) {.async.} = + b: BlockExcNetwork, peer: NetworkPeer, blocksDelivery: seq[BlockDelivery] +) {.async.} = ## Handle incoming blocks ## if not b.handlers.onBlocksDelivery.isNil: await b.handlers.onBlocksDelivery(peer.id, blocksDelivery) - proc sendBlocksDelivery*( - b: BlockExcNetwork, - id: PeerId, - blocksDelivery: seq[BlockDelivery]): Future[void] = + b: BlockExcNetwork, id: PeerId, blocksDelivery: seq[BlockDelivery] +): Future[void] = ## Send blocks to remote ## b.send(id, pb.Message(payload: blocksDelivery)) proc handleBlockPresence( - b: BlockExcNetwork, - peer: NetworkPeer, - presence: seq[BlockPresence]) {.async.} = + b: BlockExcNetwork, peer: NetworkPeer, presence: seq[BlockPresence] +) {.async.} = ## Handle block presence ## @@ -181,56 +183,44 @@ proc handleBlockPresence( await b.handlers.onPresence(peer.id, presence) proc sendBlockPresence*( - b: BlockExcNetwork, - id: PeerId, - presence: seq[BlockPresence]): Future[void] = + b: BlockExcNetwork, id: PeerId, presence: seq[BlockPresence] +): Future[void] = ## Send presence to remote ## b.send(id, Message(blockPresences: @presence)) proc handleAccount( - network: BlockExcNetwork, - peer: NetworkPeer, - account: Account) {.async.} = + network: BlockExcNetwork, peer: NetworkPeer, account: Account +) {.async.} = ## Handle account info ## if not network.handlers.onAccount.isNil: await network.handlers.onAccount(peer.id, account) -proc sendAccount*( - b: BlockExcNetwork, - id: PeerId, - account: Account): Future[void] = +proc sendAccount*(b: BlockExcNetwork, id: PeerId, account: Account): Future[void] = ## Send account info to remote ## b.send(id, Message(account: AccountMessage.init(account))) -proc sendPayment*( - b: BlockExcNetwork, - id: PeerId, - payment: SignedState): Future[void] = +proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[void] = ## Send payment to remote ## b.send(id, Message(payment: StateChannelUpdate.init(payment))) proc handlePayment( - network: BlockExcNetwork, - peer: NetworkPeer, - payment: SignedState) {.async.} = + network: BlockExcNetwork, peer: NetworkPeer, payment: SignedState +) {.async.} = ## Handle payment ## if not network.handlers.onPayment.isNil: await network.handlers.onPayment(peer.id, payment) -proc rpcHandler( - b: BlockExcNetwork, - peer: NetworkPeer, - msg: Message) {.raises: [].} = +proc rpcHandler(b: BlockExcNetwork, peer: NetworkPeer, msg: Message) {.raises: [].} = ## handle rpc messages ## if msg.wantList.entries.len > 0: @@ -266,7 +256,7 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer = if not isNil(b.getConn): getConn = b.getConn - let rpcHandler = proc (p: NetworkPeer, msg: Message) {.async.} = + let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async.} = b.rpcHandler(p, msg) # create new pubsub peer @@ -316,41 +306,43 @@ method init*(b: BlockExcNetwork) = proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} = let peerId = conn.peerId let blockexcPeer = b.getOrCreatePeer(peerId) - await blockexcPeer.readLoop(conn) # attach read loop + await blockexcPeer.readLoop(conn) # attach read loop b.handler = handle b.codec = Codec proc new*( - T: type BlockExcNetwork, - switch: Switch, - connProvider: ConnProvider = nil, - maxInflight = MaxInflight): BlockExcNetwork = + T: type BlockExcNetwork, + switch: Switch, + connProvider: ConnProvider = nil, + maxInflight = MaxInflight, +): BlockExcNetwork = ## Create a new BlockExcNetwork instance ## - let - self = BlockExcNetwork( - switch: switch, - getConn: connProvider, - inflightSema: newAsyncSemaphore(maxInflight)) + let self = BlockExcNetwork( + switch: switch, getConn: connProvider, inflightSema: newAsyncSemaphore(maxInflight) + ) proc sendWantList( - id: PeerId, - cids: seq[BlockAddress], - priority: int32 = 0, - cancel: bool = false, - wantType: WantType = WantType.WantHave, - full: bool = false, - sendDontHave: bool = false): Future[void] {.gcsafe.} = - self.sendWantList( - id, cids, priority, cancel, - wantType, full, sendDontHave) + id: PeerId, + cids: seq[BlockAddress], + priority: int32 = 0, + cancel: bool = false, + wantType: WantType = WantType.WantHave, + full: bool = false, + sendDontHave: bool = false, + ): Future[void] {.gcsafe.} = + self.sendWantList(id, cids, priority, cancel, wantType, full, sendDontHave) - proc sendWantCancellations(id: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.} = + proc sendWantCancellations( + id: PeerId, addresses: seq[BlockAddress] + ): Future[void] {.gcsafe.} = self.sendWantCancellations(id, addresses) - proc sendBlocksDelivery(id: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} = + proc sendBlocksDelivery( + id: PeerId, blocksDelivery: seq[BlockDelivery] + ): Future[void] {.gcsafe.} = self.sendBlocksDelivery(id, blocksDelivery) proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} = @@ -368,7 +360,8 @@ proc new*( sendBlocksDelivery: sendBlocksDelivery, sendPresence: sendPresence, sendAccount: sendAccount, - sendPayment: sendPayment) + sendPayment: sendPayment, + ) self.init() return self diff --git a/codex/blockexchange/network/networkpeer.nim b/codex/blockexchange/network/networkpeer.nim index 133d8c7c..90c538ea 100644 --- a/codex/blockexchange/network/networkpeer.nim +++ b/codex/blockexchange/network/networkpeer.nim @@ -8,7 +8,8 @@ ## those terms. import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import pkg/chronos import pkg/libp2p @@ -33,8 +34,7 @@ type getConn: ConnProvider proc connected*(b: NetworkPeer): bool = - not(isNil(b.sendConn)) and - not(b.sendConn.closed or b.sendConn.atEof) + not (isNil(b.sendConn)) and not (b.sendConn.closed or b.sendConn.atEof) proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} = if isNil(conn): @@ -80,15 +80,11 @@ proc broadcast*(b: NetworkPeer, msg: Message) = asyncSpawn sendAwaiter() func new*( - T: type NetworkPeer, - peer: PeerId, - connProvider: ConnProvider, - rpcHandler: RPCHandler): NetworkPeer = + T: type NetworkPeer, + peer: PeerId, + connProvider: ConnProvider, + rpcHandler: RPCHandler, +): NetworkPeer = + doAssert(not isNil(connProvider), "should supply connection provider") - doAssert(not isNil(connProvider), - "should supply connection provider") - - NetworkPeer( - id: peer, - getConn: connProvider, - handler: rpcHandler) + NetworkPeer(id: peer, getConn: connProvider, handler: rpcHandler) diff --git a/codex/blockexchange/peers/peercontext.nim b/codex/blockexchange/peers/peercontext.nim index 727676de..7a299b6b 100644 --- a/codex/blockexchange/peers/peercontext.nim +++ b/codex/blockexchange/peers/peercontext.nim @@ -25,15 +25,14 @@ import ../../logutils export payments, nitro -type - BlockExcPeerCtx* = ref object of RootObj - id*: PeerId - blocks*: Table[BlockAddress, Presence] # remote peer have list including price - peerWants*: seq[WantListEntry] # remote peers want lists - exchanged*: int # times peer has exchanged with us - lastExchange*: Moment # last time peer has exchanged with us - account*: ?Account # ethereum account of this peer - paymentChannel*: ?ChannelId # payment channel id +type BlockExcPeerCtx* = ref object of RootObj + id*: PeerId + blocks*: Table[BlockAddress, Presence] # remote peer have list including price + peerWants*: seq[WantListEntry] # remote peers want lists + exchanged*: int # times peer has exchanged with us + lastExchange*: Moment # last time peer has exchanged with us + account*: ?Account # ethereum account of this peer + paymentChannel*: ?ChannelId # payment channel id proc peerHave*(self: BlockExcPeerCtx): seq[BlockAddress] = toSeq(self.blocks.keys) diff --git a/codex/blockexchange/peers/peerctxstore.nim b/codex/blockexchange/peers/peerctxstore.nim index 4b65d849..7cf167b4 100644 --- a/codex/blockexchange/peers/peerctxstore.nim +++ b/codex/blockexchange/peers/peerctxstore.nim @@ -13,7 +13,8 @@ import std/algorithm import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import pkg/chronos import pkg/libp2p @@ -22,7 +23,6 @@ import ../protobuf/blockexc import ../../blocktype import ../../logutils - import ./peercontext export peercontext @@ -32,6 +32,7 @@ logScope: type PeerCtxStore* = ref object of RootObj peers*: OrderedTable[PeerId, BlockExcPeerCtx] + PeersForBlock* = object of RootObj with*: seq[BlockExcPeerCtx] without*: seq[BlockExcPeerCtx] @@ -44,7 +45,7 @@ proc contains*(a: openArray[BlockExcPeerCtx], b: PeerId): bool = ## Convenience method to check for peer precense ## - a.anyIt( it.id == b ) + a.anyIt(it.id == b) func contains*(self: PeerCtxStore, peerId: PeerId): bool = peerId in self.peers @@ -62,21 +63,21 @@ func len*(self: PeerCtxStore): int = self.peers.len func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] = - toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it == address ) ) + toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it == address)) func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] = - toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it.cidOrTreeCid == cid ) ) + toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it.cidOrTreeCid == cid)) func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] = - toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it == address ) ) + toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it == address)) func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] = - toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it.address.cidOrTreeCid == cid ) ) + toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it.address.cidOrTreeCid == cid)) proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock = var res = PeersForBlock() for peer in self: - if peer.peerHave.anyIt( it == address ): + if peer.peerHave.anyIt(it == address): res.with.add(peer) else: res.without.add(peer) diff --git a/codex/blockexchange/protobuf/blockexc.nim b/codex/blockexchange/protobuf/blockexc.nim index 12049853..69868681 100644 --- a/codex/blockexchange/protobuf/blockexc.nim +++ b/codex/blockexchange/protobuf/blockexc.nim @@ -42,7 +42,6 @@ proc `==`*(a: WantListEntry, b: BlockAddress): bool = proc `<`*(a, b: WantListEntry): bool = a.priority < b.priority - proc `==`*(a: BlockPresence, b: BlockAddress): bool = return a.address == b diff --git a/codex/blockexchange/protobuf/message.nim b/codex/blockexchange/protobuf/message.nim index 61488b40..73cb60f1 100644 --- a/codex/blockexchange/protobuf/message.nim +++ b/codex/blockexchange/protobuf/message.nim @@ -20,40 +20,40 @@ const type WantType* = enum - WantBlock = 0, + WantBlock = 0 WantHave = 1 WantListEntry* = object address*: BlockAddress - priority*: int32 # The priority (normalized). default to 1 - cancel*: bool # Whether this revokes an entry - wantType*: WantType # Note: defaults to enum 0, ie Block - sendDontHave*: bool # Note: defaults to false - inFlight*: bool # Whether block sending is in progress. Not serialized. + priority*: int32 # The priority (normalized). default to 1 + cancel*: bool # Whether this revokes an entry + wantType*: WantType # Note: defaults to enum 0, ie Block + sendDontHave*: bool # Note: defaults to false + inFlight*: bool # Whether block sending is in progress. Not serialized. WantList* = object - entries*: seq[WantListEntry] # A list of wantList entries - full*: bool # Whether this is the full wantList. default to false + entries*: seq[WantListEntry] # A list of wantList entries + full*: bool # Whether this is the full wantList. default to false BlockDelivery* = object blk*: Block address*: BlockAddress - proof*: ?CodexProof # Present only if `address.leaf` is true + proof*: ?CodexProof # Present only if `address.leaf` is true BlockPresenceType* = enum - Have = 0, + Have = 0 DontHave = 1 BlockPresence* = object address*: BlockAddress `type`*: BlockPresenceType - price*: seq[byte] # Amount of assets to pay for the block (UInt256) + price*: seq[byte] # Amount of assets to pay for the block (UInt256) AccountMessage* = object - address*: seq[byte] # Ethereum address to which payments should be made + address*: seq[byte] # Ethereum address to which payments should be made StateChannelUpdate* = object - update*: seq[byte] # Signed Nitro state, serialized as JSON + update*: seq[byte] # Signed Nitro state, serialized as JSON Message* = object wantList*: WantList @@ -140,7 +140,6 @@ proc protobufEncode*(value: Message): seq[byte] = ipb.finish() ipb.buffer - # # Decoding Message from seq[byte] in Protobuf format # @@ -151,22 +150,22 @@ proc decode*(_: type BlockAddress, pb: ProtoBuffer): ProtoResult[BlockAddress] = field: uint64 cidBuf = newSeq[byte]() - if ? pb.getField(1, field): + if ?pb.getField(1, field): leaf = bool(field) if leaf: var treeCid: Cid index: Natural - if ? pb.getField(2, cidBuf): - treeCid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob) - if ? pb.getField(3, field): + if ?pb.getField(2, cidBuf): + treeCid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob) + if ?pb.getField(3, field): index = field value = BlockAddress(leaf: true, treeCid: treeCid, index: index) else: var cid: Cid - if ? pb.getField(4, cidBuf): - cid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob) + if ?pb.getField(4, cidBuf): + cid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob) value = BlockAddress(leaf: false, cid: cid) ok(value) @@ -176,15 +175,15 @@ proc decode*(_: type WantListEntry, pb: ProtoBuffer): ProtoResult[WantListEntry] value = WantListEntry() field: uint64 ipb: ProtoBuffer - if ? pb.getField(1, ipb): - value.address = ? BlockAddress.decode(ipb) - if ? pb.getField(2, field): + if ?pb.getField(1, ipb): + value.address = ?BlockAddress.decode(ipb) + if ?pb.getField(2, field): value.priority = int32(field) - if ? pb.getField(3, field): + if ?pb.getField(3, field): value.cancel = bool(field) - if ? pb.getField(4, field): + if ?pb.getField(4, field): value.wantType = WantType(field) - if ? pb.getField(5, field): + if ?pb.getField(5, field): value.sendDontHave = bool(field) ok(value) @@ -193,10 +192,10 @@ proc decode*(_: type WantList, pb: ProtoBuffer): ProtoResult[WantList] = value = WantList() field: uint64 sublist: seq[seq[byte]] - if ? pb.getRepeatedField(1, sublist): + if ?pb.getRepeatedField(1, sublist): for item in sublist: - value.entries.add(? WantListEntry.decode(initProtoBuffer(item))) - if ? pb.getField(2, field): + value.entries.add(?WantListEntry.decode(initProtoBuffer(item))) + if ?pb.getField(2, field): value.full = bool(field) ok(value) @@ -208,17 +207,18 @@ proc decode*(_: type BlockDelivery, pb: ProtoBuffer): ProtoResult[BlockDelivery] cid: Cid ipb: ProtoBuffer - if ? pb.getField(1, cidBuf): - cid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob) - if ? pb.getField(2, dataBuf): - value.blk = ? Block.new(cid, dataBuf, verify = true).mapErr(x => ProtoError.IncorrectBlob) - if ? pb.getField(3, ipb): - value.address = ? BlockAddress.decode(ipb) + if ?pb.getField(1, cidBuf): + cid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob) + if ?pb.getField(2, dataBuf): + value.blk = + ?Block.new(cid, dataBuf, verify = true).mapErr(x => ProtoError.IncorrectBlob) + if ?pb.getField(3, ipb): + value.address = ?BlockAddress.decode(ipb) if value.address.leaf: var proofBuf = newSeq[byte]() - if ? pb.getField(4, proofBuf): - let proof = ? CodexProof.decode(proofBuf).mapErr(x => ProtoError.IncorrectBlob) + if ?pb.getField(4, proofBuf): + let proof = ?CodexProof.decode(proofBuf).mapErr(x => ProtoError.IncorrectBlob) value.proof = proof.some else: value.proof = CodexProof.none @@ -232,23 +232,23 @@ proc decode*(_: type BlockPresence, pb: ProtoBuffer): ProtoResult[BlockPresence] value = BlockPresence() field: uint64 ipb: ProtoBuffer - if ? pb.getField(1, ipb): - value.address = ? BlockAddress.decode(ipb) - if ? pb.getField(2, field): + if ?pb.getField(1, ipb): + value.address = ?BlockAddress.decode(ipb) + if ?pb.getField(2, field): value.`type` = BlockPresenceType(field) - discard ? pb.getField(3, value.price) + discard ?pb.getField(3, value.price) ok(value) proc decode*(_: type AccountMessage, pb: ProtoBuffer): ProtoResult[AccountMessage] = - var - value = AccountMessage() - discard ? pb.getField(1, value.address) + var value = AccountMessage() + discard ?pb.getField(1, value.address) ok(value) -proc decode*(_: type StateChannelUpdate, pb: ProtoBuffer): ProtoResult[StateChannelUpdate] = - var - value = StateChannelUpdate() - discard ? pb.getField(1, value.update) +proc decode*( + _: type StateChannelUpdate, pb: ProtoBuffer +): ProtoResult[StateChannelUpdate] = + var value = StateChannelUpdate() + discard ?pb.getField(1, value.update) ok(value) proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] = @@ -257,17 +257,19 @@ proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] = pb = initProtoBuffer(msg, maxSize = MaxMessageSize) ipb: ProtoBuffer sublist: seq[seq[byte]] - if ? pb.getField(1, ipb): - value.wantList = ? WantList.decode(ipb) - if ? pb.getRepeatedField(3, sublist): + if ?pb.getField(1, ipb): + value.wantList = ?WantList.decode(ipb) + if ?pb.getRepeatedField(3, sublist): for item in sublist: - value.payload.add(? BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize))) - if ? pb.getRepeatedField(4, sublist): + value.payload.add( + ?BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize)) + ) + if ?pb.getRepeatedField(4, sublist): for item in sublist: - value.blockPresences.add(? BlockPresence.decode(initProtoBuffer(item))) - discard ? pb.getField(5, value.pendingBytes) - if ? pb.getField(6, ipb): - value.account = ? AccountMessage.decode(ipb) - if ? pb.getField(7, ipb): - value.payment = ? StateChannelUpdate.decode(ipb) + value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item))) + discard ?pb.getField(5, value.pendingBytes) + if ?pb.getField(6, ipb): + value.account = ?AccountMessage.decode(ipb) + if ?pb.getField(7, ipb): + value.payment = ?StateChannelUpdate.decode(ipb) ok(value) diff --git a/codex/blockexchange/protobuf/payments.nim b/codex/blockexchange/protobuf/payments.nim index 61321d2e..5d010a81 100644 --- a/codex/blockexchange/protobuf/payments.nim +++ b/codex/blockexchange/protobuf/payments.nim @@ -11,11 +11,11 @@ export StateChannelUpdate export stint export nitro -push: {.upraises: [].} +push: + {.upraises: [].} -type - Account* = object - address*: EthAddress +type Account* = object + address*: EthAddress func init*(_: type AccountMessage, account: Account): AccountMessage = AccountMessage(address: @(account.address.toArray)) @@ -24,7 +24,7 @@ func parse(_: type EthAddress, bytes: seq[byte]): ?EthAddress = var address: array[20, byte] if bytes.len != address.len: return EthAddress.none - for i in 0..=` to the data, # use the Cid as a container! - Block( - cid: cid, - data: @data).success + + Block(cid: cid, data: @data).success proc new*( - T: type Block, - cid: Cid, - data: openArray[byte], - verify: bool = true + T: type Block, cid: Cid, data: openArray[byte], verify: bool = true ): ?!Block = ## creates a new block for both storage and network IO ## if verify: let - mhash = ? cid.mhash.mapFailure - computedMhash = ? MultiHash.digest($mhash.mcodec, data).mapFailure - computedCid = ? Cid.init(cid.cidver, cid.mcodec, computedMhash).mapFailure + mhash = ?cid.mhash.mapFailure + computedMhash = ?MultiHash.digest($mhash.mcodec, data).mapFailure + computedCid = ?Cid.init(cid.cidver, cid.mcodec, computedMhash).mapFailure if computedCid != cid: return "Cid doesn't match the data".failure - return Block( - cid: cid, - data: @data - ).success + return Block(cid: cid, data: @data).success proc emptyBlock*(version: CidVersion, hcodec: MultiCodec): ?!Block = - emptyCid(version, hcodec, BlockCodec) - .flatMap((cid: Cid) => Block.new(cid = cid, data = @[])) + emptyCid(version, hcodec, BlockCodec).flatMap( + (cid: Cid) => Block.new(cid = cid, data = @[]) + ) proc emptyBlock*(cid: Cid): ?!Block = - cid.mhash.mapFailure.flatMap((mhash: MultiHash) => - emptyBlock(cid.cidver, mhash.mcodec)) + cid.mhash.mapFailure.flatMap( + (mhash: MultiHash) => emptyBlock(cid.cidver, mhash.mcodec) + ) proc isEmpty*(cid: Cid): bool = - success(cid) == cid.mhash.mapFailure.flatMap((mhash: MultiHash) => - emptyCid(cid.cidver, mhash.mcodec, cid.mcodec)) + success(cid) == + cid.mhash.mapFailure.flatMap( + (mhash: MultiHash) => emptyCid(cid.cidver, mhash.mcodec, cid.mcodec) + ) proc isEmpty*(blk: Block): bool = blk.cid.isEmpty diff --git a/codex/chunker.nim b/codex/chunker.nim index ad256538..f735aa4b 100644 --- a/codex/chunker.nim +++ b/codex/chunker.nim @@ -11,7 +11,8 @@ import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import pkg/questionable import pkg/questionable/results @@ -23,8 +24,7 @@ import ./logutils export blocktype -const - DefaultChunkSize* = DefaultBlockSize +const DefaultChunkSize* = DefaultBlockSize type # default reader type @@ -33,10 +33,10 @@ type # Reader that splits input data into fixed-size chunks Chunker* = ref object - reader*: Reader # Procedure called to actually read the data - offset*: int # Bytes read so far (position in the stream) - chunkSize*: NBytes # Size of each chunk - pad*: bool # Pad last chunk to chunkSize? + reader*: Reader # Procedure called to actually read the data + offset*: int # Bytes read so far (position in the stream) + chunkSize*: NBytes # Size of each chunk + pad*: bool # Pad last chunk to chunkSize? FileChunker* = Chunker LPStreamChunker* = Chunker @@ -60,30 +60,21 @@ proc getBytes*(c: Chunker): Future[seq[byte]] {.async.} = return move buff proc new*( - T: type Chunker, - reader: Reader, - chunkSize = DefaultChunkSize, - pad = true + T: type Chunker, reader: Reader, chunkSize = DefaultChunkSize, pad = true ): Chunker = ## create a new Chunker instance ## - Chunker( - reader: reader, - offset: 0, - chunkSize: chunkSize, - pad: pad) + Chunker(reader: reader, offset: 0, chunkSize: chunkSize, pad: pad) proc new*( - T: type LPStreamChunker, - stream: LPStream, - chunkSize = DefaultChunkSize, - pad = true + T: type LPStreamChunker, stream: LPStream, chunkSize = DefaultChunkSize, pad = true ): LPStreamChunker = ## create the default File chunker ## - proc reader(data: ChunkBuffer, len: int): Future[int] - {.gcsafe, async, raises: [Defect].} = + proc reader( + data: ChunkBuffer, len: int + ): Future[int] {.gcsafe, async, raises: [Defect].} = var res = 0 try: while res < len: @@ -101,22 +92,17 @@ proc new*( return res - LPStreamChunker.new( - reader = reader, - chunkSize = chunkSize, - pad = pad) + LPStreamChunker.new(reader = reader, chunkSize = chunkSize, pad = pad) proc new*( - T: type FileChunker, - file: File, - chunkSize = DefaultChunkSize, - pad = true + T: type FileChunker, file: File, chunkSize = DefaultChunkSize, pad = true ): FileChunker = ## create the default File chunker ## - proc reader(data: ChunkBuffer, len: int): Future[int] - {.gcsafe, async, raises: [Defect].} = + proc reader( + data: ChunkBuffer, len: int + ): Future[int] {.gcsafe, async, raises: [Defect].} = var total = 0 try: while total < len: @@ -135,7 +121,4 @@ proc new*( return total - FileChunker.new( - reader = reader, - chunkSize = chunkSize, - pad = pad) + FileChunker.new(reader = reader, chunkSize = chunkSize, pad = pad) diff --git a/codex/clock.nim b/codex/clock.nim index 933cd199..98db22f7 100644 --- a/codex/clock.nim +++ b/codex/clock.nim @@ -20,9 +20,9 @@ method start*(clock: Clock) {.base, async.} = method stop*(clock: Clock) {.base, async.} = discard -proc withTimeout*(future: Future[void], - clock: Clock, - expiry: SecondsSince1970) {.async.} = +proc withTimeout*( + future: Future[void], clock: Clock, expiry: SecondsSince1970 +) {.async.} = let timeout = clock.waitUntil(expiry) try: await future or timeout diff --git a/codex/codex.nim b/codex/codex.nim index b22bf3d4..13985254 100644 --- a/codex/codex.nim +++ b/codex/codex.nim @@ -68,8 +68,7 @@ proc waitForSync(provider: Provider): Future[void] {.async.} = inc sleepTime trace "Ethereum provider is synced." -proc bootstrapInteractions( - s: CodexServer): Future[void] {.async.} = +proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} = ## bootstrap interactions and return contracts ## using clients, hosts, validators pairings ## @@ -137,12 +136,12 @@ proc bootstrapInteractions( host = some HostInteractions.new(clock, sales) if config.validator: - without validationConfig =? ValidationConfig.init( - config.validatorMaxSlots, - config.validatorGroups, - config.validatorGroupIndex), err: - error "Invalid validation parameters", err = err.msg - quit QuitFailure + without validationConfig =? + ValidationConfig.init( + config.validatorMaxSlots, config.validatorGroups, config.validatorGroupIndex + ), err: + error "Invalid validation parameters", err = err.msg + quit QuitFailure let validation = Validation.new(clock, market, validationConfig) validator = some ValidatorInteractions.new(clock, validation) @@ -156,10 +155,9 @@ proc start*(s: CodexServer) {.async.} = await s.codexNode.switch.start() - let (announceAddrs,discoveryAddrs)= nattedAddress( - s.config.nat, - s.codexNode.switch.peerInfo.addrs, - s.config.discoveryPort) + let (announceAddrs, discoveryAddrs) = nattedAddress( + s.config.nat, s.codexNode.switch.peerInfo.addrs, s.config.discoveryPort + ) s.codexNode.discovery.updateAnnounceRecord(announceAddrs) s.codexNode.discovery.updateDhtRecord(discoveryAddrs) @@ -176,15 +174,14 @@ proc stop*(s: CodexServer) {.async.} = s.codexNode.switch.stop(), s.codexNode.stop(), s.repoStore.stop(), - s.maintenance.stop()) + s.maintenance.stop(), + ) proc new*( - T: type CodexServer, - config: CodexConf, - privateKey: CodexPrivateKey): CodexServer = + T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey +): CodexServer = ## create CodexServer including setting up datastore, repostore, etc - let - switch = SwitchBuilder + let switch = SwitchBuilder .new() .withPrivateKey(privateKey) .withAddresses(config.listenAddrs) @@ -197,80 +194,107 @@ proc new*( .withTcpTransport({ServerFlags.ReuseAddr}) .build() - var - cache: CacheStore = nil + var cache: CacheStore = nil if config.cacheSize > 0'nb: cache = CacheStore.new(cacheSize = config.cacheSize) ## Is unused? - let - discoveryDir = config.dataDir / CodexDhtNamespace + let discoveryDir = config.dataDir / CodexDhtNamespace if io2.createPath(discoveryDir).isErr: - trace "Unable to create discovery directory for block store", discoveryDir = discoveryDir + trace "Unable to create discovery directory for block store", + discoveryDir = discoveryDir raise (ref Defect)( - msg: "Unable to create discovery directory for block store: " & discoveryDir) + msg: "Unable to create discovery directory for block store: " & discoveryDir + ) let discoveryStore = Datastore( - LevelDbDatastore.new(config.dataDir / CodexDhtProvidersNamespace) - .expect("Should create discovery datastore!")) + LevelDbDatastore.new(config.dataDir / CodexDhtProvidersNamespace).expect( + "Should create discovery datastore!" + ) + ) discovery = Discovery.new( switch.peerInfo.privateKey, announceAddrs = config.listenAddrs, bindPort = config.discoveryPort, bootstrapNodes = config.bootstrapNodes, - store = discoveryStore) + store = discoveryStore, + ) wallet = WalletRef.new(EthPrivateKey.random()) network = BlockExcNetwork.new(switch) - repoData = case config.repoKind - of repoFS: Datastore(FSDatastore.new($config.dataDir, depth = 5) - .expect("Should create repo file data store!")) - of repoSQLite: Datastore(SQLiteDatastore.new($config.dataDir) - .expect("Should create repo SQLite data store!")) - of repoLevelDb: Datastore(LevelDbDatastore.new($config.dataDir) - .expect("Should create repo LevelDB data store!")) + repoData = + case config.repoKind + of repoFS: + Datastore( + FSDatastore.new($config.dataDir, depth = 5).expect( + "Should create repo file data store!" + ) + ) + of repoSQLite: + Datastore( + SQLiteDatastore.new($config.dataDir).expect( + "Should create repo SQLite data store!" + ) + ) + of repoLevelDb: + Datastore( + LevelDbDatastore.new($config.dataDir).expect( + "Should create repo LevelDB data store!" + ) + ) repoStore = RepoStore.new( repoDs = repoData, - metaDs = LevelDbDatastore.new(config.dataDir / CodexMetaNamespace) - .expect("Should create metadata store!"), + metaDs = LevelDbDatastore.new(config.dataDir / CodexMetaNamespace).expect( + "Should create metadata store!" + ), quotaMaxBytes = config.storageQuota, - blockTtl = config.blockTtl) + blockTtl = config.blockTtl, + ) maintenance = BlockMaintainer.new( repoStore, interval = config.blockMaintenanceInterval, - numberOfBlocksPerInterval = config.blockMaintenanceNumberOfBlocks) + numberOfBlocksPerInterval = config.blockMaintenanceNumberOfBlocks, + ) peerStore = PeerCtxStore.new() pendingBlocks = PendingBlocksManager.new() advertiser = Advertiser.new(repoStore, discovery) - blockDiscovery = DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks) - engine = BlockExcEngine.new(repoStore, wallet, network, blockDiscovery, advertiser, peerStore, pendingBlocks) + blockDiscovery = + DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks) + engine = BlockExcEngine.new( + repoStore, wallet, network, blockDiscovery, advertiser, peerStore, pendingBlocks + ) store = NetworkStore.new(engine, repoStore) - prover = if config.prover: - let backend = config.initializeBackend().expect("Unable to create prover backend.") - some Prover.new(store, backend, config.numProofSamples) - else: - none Prover + prover = + if config.prover: + let backend = + config.initializeBackend().expect("Unable to create prover backend.") + some Prover.new(store, backend, config.numProofSamples) + else: + none Prover codexNode = CodexNodeRef.new( switch = switch, networkStore = store, engine = engine, discovery = discovery, - prover = prover) + prover = prover, + ) - restServer = RestServerRef.new( - codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin), - initTAddress(config.apiBindAddress , config.apiPort), - bufferSize = (1024 * 64), - maxRequestBodySize = int.high) + restServer = RestServerRef + .new( + codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin), + initTAddress(config.apiBindAddress, config.apiPort), + bufferSize = (1024 * 64), + maxRequestBodySize = int.high, + ) .expect("Should start rest server!") switch.mount(network) @@ -280,4 +304,5 @@ proc new*( codexNode: codexNode, restServer: restServer, repoStore: repoStore, - maintenance: maintenance) + maintenance: maintenance, + ) diff --git a/codex/codextypes.nim b/codex/codextypes.nim index 2fd15d1e..274b9be0 100644 --- a/codex/codextypes.nim +++ b/codex/codextypes.nim @@ -25,15 +25,15 @@ export tables const # Size of blocks for storage / network exchange, - DefaultBlockSize* = NBytes 1024*64 + DefaultBlockSize* = NBytes 1024 * 64 DefaultCellSize* = NBytes 2048 # Proving defaults - DefaultMaxSlotDepth* = 32 + DefaultMaxSlotDepth* = 32 DefaultMaxDatasetDepth* = 8 - DefaultBlockDepth* = 5 - DefaultCellElms* = 67 - DefaultSamplesNum* = 5 + DefaultBlockDepth* = 5 + DefaultCellElms* = 67 + DefaultSamplesNum* = 5 # hashes Sha256HashCodec* = multiCodec("sha2-256") @@ -48,18 +48,10 @@ const SlotProvingRootCodec* = multiCodec("codex-proving-root") CodexSlotCellCodec* = multiCodec("codex-slot-cell") - CodexHashesCodecs* = [ - Sha256HashCodec, - Pos2Bn128SpngCodec, - Pos2Bn128MrklCodec - ] + CodexHashesCodecs* = [Sha256HashCodec, Pos2Bn128SpngCodec, Pos2Bn128MrklCodec] CodexPrimitivesCodecs* = [ - ManifestCodec, - DatasetRootCodec, - BlockCodec, - SlotRootCodec, - SlotProvingRootCodec, + ManifestCodec, DatasetRootCodec, BlockCodec, SlotRootCodec, SlotProvingRootCodec, CodexSlotCellCodec, ] @@ -74,40 +66,34 @@ proc initEmptyCidTable(): ?!Table[(CidVersion, MultiCodec, MultiCodec), Cid] = let emptyData: seq[byte] = @[] PadHashes = { - Sha256HashCodec: ? MultiHash.digest($Sha256HashCodec, emptyData).mapFailure, - Sha512HashCodec: ? MultiHash.digest($Sha512HashCodec, emptyData).mapFailure, + Sha256HashCodec: ?MultiHash.digest($Sha256HashCodec, emptyData).mapFailure, + Sha512HashCodec: ?MultiHash.digest($Sha512HashCodec, emptyData).mapFailure, }.toTable - var - table = initTable[(CidVersion, MultiCodec, MultiCodec), Cid]() + var table = initTable[(CidVersion, MultiCodec, MultiCodec), Cid]() for hcodec, mhash in PadHashes.pairs: - table[(CIDv1, hcodec, BlockCodec)] = ? Cid.init(CIDv1, BlockCodec, mhash).mapFailure + table[(CIDv1, hcodec, BlockCodec)] = ?Cid.init(CIDv1, BlockCodec, mhash).mapFailure success table -proc emptyCid*( - version: CidVersion, - hcodec: MultiCodec, - dcodec: MultiCodec): ?!Cid = +proc emptyCid*(version: CidVersion, hcodec: MultiCodec, dcodec: MultiCodec): ?!Cid = ## Returns cid representing empty content, ## given cid version, hash codec and data codec ## - var - table {.global, threadvar.}: Table[(CidVersion, MultiCodec, MultiCodec), Cid] + var table {.global, threadvar.}: Table[(CidVersion, MultiCodec, MultiCodec), Cid] once: - table = ? initEmptyCidTable() + table = ?initEmptyCidTable() table[(version, hcodec, dcodec)].catch proc emptyDigest*( - version: CidVersion, - hcodec: MultiCodec, - dcodec: MultiCodec): ?!MultiHash = + version: CidVersion, hcodec: MultiCodec, dcodec: MultiCodec +): ?!MultiHash = ## Returns hash representing empty content, ## given cid version, hash codec and data codec ## - emptyCid(version, hcodec, dcodec) - .flatMap((cid: Cid) => cid.mhash.mapFailure) + + emptyCid(version, hcodec, dcodec).flatMap((cid: Cid) => cid.mhash.mapFailure) diff --git a/codex/conf.nim b/codex/conf.nim index 41ee628e..6d47f8f4 100644 --- a/codex/conf.nim +++ b/codex/conf.nim @@ -50,18 +50,17 @@ export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig export ValidationGroups, MaxSlots export - DefaultQuotaBytes, - DefaultBlockTtl, - DefaultBlockMaintenanceInterval, + DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockMaintenanceInterval, DefaultNumberOfBlocksToMaintainPerInterval proc defaultDataDir*(): string = - let dataDir = when defined(windows): - "AppData" / "Roaming" / "Codex" - elif defined(macosx): - "Library" / "Application Support" / "Codex" - else: - ".cache" / "codex" + let dataDir = + when defined(windows): + "AppData" / "Roaming" / "Codex" + elif defined(macosx): + "Library" / "Application Support" / "Codex" + else: + ".cache" / "codex" getHomeDir() / dataDir @@ -96,320 +95,341 @@ type CodexConf* = object configFile* {. - desc: "Loads the configuration from a TOML file" - defaultValueDesc: "none" - defaultValue: InputFile.none - name: "config-file" }: Option[InputFile] + desc: "Loads the configuration from a TOML file", + defaultValueDesc: "none", + defaultValue: InputFile.none, + name: "config-file" + .}: Option[InputFile] - logLevel* {. - defaultValue: "info" - desc: "Sets the log level", - name: "log-level" }: string + logLevel* {.defaultValue: "info", desc: "Sets the log level", name: "log-level".}: + string logFormat* {. - desc: "Specifies what kind of logs should be written to stdout (auto, " & - "colors, nocolors, json)" - defaultValueDesc: "auto" - defaultValue: LogKind.Auto - name: "log-format" }: LogKind + desc: + "Specifies what kind of logs should be written to stdout (auto, " & + "colors, nocolors, json)", + defaultValueDesc: "auto", + defaultValue: LogKind.Auto, + name: "log-format" + .}: LogKind metricsEnabled* {. - desc: "Enable the metrics server" - defaultValue: false - name: "metrics" }: bool + desc: "Enable the metrics server", defaultValue: false, name: "metrics" + .}: bool metricsAddress* {. - desc: "Listening address of the metrics server" - defaultValue: defaultAddress(config) - defaultValueDesc: "127.0.0.1" - name: "metrics-address" }: IpAddress + desc: "Listening address of the metrics server", + defaultValue: defaultAddress(config), + defaultValueDesc: "127.0.0.1", + name: "metrics-address" + .}: IpAddress metricsPort* {. - desc: "Listening HTTP port of the metrics server" - defaultValue: 8008 - name: "metrics-port" }: Port + desc: "Listening HTTP port of the metrics server", + defaultValue: 8008, + name: "metrics-port" + .}: Port dataDir* {. - desc: "The directory where codex will store configuration and data" - defaultValue: DefaultDataDir - defaultValueDesc: $DefaultDataDir - abbr: "d" - name: "data-dir" }: OutDir + desc: "The directory where codex will store configuration and data", + defaultValue: DefaultDataDir, + defaultValueDesc: $DefaultDataDir, + abbr: "d", + name: "data-dir" + .}: OutDir listenAddrs* {. - desc: "Multi Addresses to listen on" - defaultValue: @[ - MultiAddress.init("/ip4/0.0.0.0/tcp/0") - .expect("Should init multiaddress")] - defaultValueDesc: "/ip4/0.0.0.0/tcp/0" - abbr: "i" - name: "listen-addrs" }: seq[MultiAddress] + desc: "Multi Addresses to listen on", + defaultValue: + @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").expect("Should init multiaddress")], + defaultValueDesc: "/ip4/0.0.0.0/tcp/0", + abbr: "i", + name: "listen-addrs" + .}: seq[MultiAddress] nat* {. - desc: "Specify method to use for determining public address. " & - "Must be one of: any, none, upnp, pmp, extip:" - defaultValue: defaultNatConfig() - defaultValueDesc: "any" - name: "nat" }: NatConfig + desc: + "Specify method to use for determining public address. " & + "Must be one of: any, none, upnp, pmp, extip:", + defaultValue: defaultNatConfig(), + defaultValueDesc: "any", + name: "nat" + .}: NatConfig discoveryPort* {. - desc: "Discovery (UDP) port" - defaultValue: 8090.Port - defaultValueDesc: "8090" - abbr: "u" - name: "disc-port" }: Port + desc: "Discovery (UDP) port", + defaultValue: 8090.Port, + defaultValueDesc: "8090", + abbr: "u", + name: "disc-port" + .}: Port netPrivKeyFile* {. - desc: "Source of network (secp256k1) private key file path or name" - defaultValue: "key" - name: "net-privkey" }: string + desc: "Source of network (secp256k1) private key file path or name", + defaultValue: "key", + name: "net-privkey" + .}: string bootstrapNodes* {. - desc: "Specifies one or more bootstrap nodes to use when " & - "connecting to the network" - abbr: "b" - name: "bootstrap-node" }: seq[SignedPeerRecord] + desc: + "Specifies one or more bootstrap nodes to use when " & + "connecting to the network", + abbr: "b", + name: "bootstrap-node" + .}: seq[SignedPeerRecord] maxPeers* {. - desc: "The maximum number of peers to connect to" - defaultValue: 160 - name: "max-peers" }: int + desc: "The maximum number of peers to connect to", + defaultValue: 160, + name: "max-peers" + .}: int agentString* {. - defaultValue: "Codex" - desc: "Node agent string which is used as identifier in network" - name: "agent-string" }: string + defaultValue: "Codex", + desc: "Node agent string which is used as identifier in network", + name: "agent-string" + .}: string apiBindAddress* {. - desc: "The REST API bind address" - defaultValue: "127.0.0.1" - name: "api-bindaddr" - }: string + desc: "The REST API bind address", defaultValue: "127.0.0.1", name: "api-bindaddr" + .}: string apiPort* {. desc: "The REST Api port", - defaultValue: 8080.Port - defaultValueDesc: "8080" - name: "api-port" - abbr: "p" }: Port + defaultValue: 8080.Port, + defaultValueDesc: "8080", + name: "api-port", + abbr: "p" + .}: Port apiCorsAllowedOrigin* {. - desc: "The REST Api CORS allowed origin for downloading data. " & + desc: + "The REST Api CORS allowed origin for downloading data. " & "'*' will allow all origins, '' will allow none.", - defaultValue: string.none - defaultValueDesc: "Disallow all cross origin requests to download data" - name: "api-cors-origin" }: Option[string] + defaultValue: string.none, + defaultValueDesc: "Disallow all cross origin requests to download data", + name: "api-cors-origin" + .}: Option[string] repoKind* {. - desc: "Backend for main repo store (fs, sqlite, leveldb)" - defaultValueDesc: "fs" - defaultValue: repoFS - name: "repo-kind" }: RepoKind + desc: "Backend for main repo store (fs, sqlite, leveldb)", + defaultValueDesc: "fs", + defaultValue: repoFS, + name: "repo-kind" + .}: RepoKind storageQuota* {. - desc: "The size of the total storage quota dedicated to the node" - defaultValue: DefaultQuotaBytes - defaultValueDesc: $DefaultQuotaBytes - name: "storage-quota" - abbr: "q" }: NBytes + desc: "The size of the total storage quota dedicated to the node", + defaultValue: DefaultQuotaBytes, + defaultValueDesc: $DefaultQuotaBytes, + name: "storage-quota", + abbr: "q" + .}: NBytes blockTtl* {. - desc: "Default block timeout in seconds - 0 disables the ttl" - defaultValue: DefaultBlockTtl - defaultValueDesc: $DefaultBlockTtl - name: "block-ttl" - abbr: "t" }: Duration + desc: "Default block timeout in seconds - 0 disables the ttl", + defaultValue: DefaultBlockTtl, + defaultValueDesc: $DefaultBlockTtl, + name: "block-ttl", + abbr: "t" + .}: Duration blockMaintenanceInterval* {. - desc: "Time interval in seconds - determines frequency of block " & - "maintenance cycle: how often blocks are checked " & - "for expiration and cleanup" - defaultValue: DefaultBlockMaintenanceInterval - defaultValueDesc: $DefaultBlockMaintenanceInterval - name: "block-mi" }: Duration + desc: + "Time interval in seconds - determines frequency of block " & + "maintenance cycle: how often blocks are checked " & "for expiration and cleanup", + defaultValue: DefaultBlockMaintenanceInterval, + defaultValueDesc: $DefaultBlockMaintenanceInterval, + name: "block-mi" + .}: Duration blockMaintenanceNumberOfBlocks* {. - desc: "Number of blocks to check every maintenance cycle" - defaultValue: DefaultNumberOfBlocksToMaintainPerInterval - defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval - name: "block-mn" }: int + desc: "Number of blocks to check every maintenance cycle", + defaultValue: DefaultNumberOfBlocksToMaintainPerInterval, + defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval, + name: "block-mn" + .}: int cacheSize* {. - desc: "The size of the block cache, 0 disables the cache - " & - "might help on slow hardrives" - defaultValue: 0 - defaultValueDesc: "0" - name: "cache-size" - abbr: "c" }: NBytes + desc: + "The size of the block cache, 0 disables the cache - " & + "might help on slow hardrives", + defaultValue: 0, + defaultValueDesc: "0", + name: "cache-size", + abbr: "c" + .}: NBytes logFile* {. - desc: "Logs to file" - defaultValue: string.none - name: "log-file" - hidden - .}: Option[string] + desc: "Logs to file", defaultValue: string.none, name: "log-file", hidden + .}: Option[string] - case cmd* {. - defaultValue: noCmd - command }: StartUpCmd + case cmd* {.defaultValue: noCmd, command.}: StartUpCmd of persistence: ethProvider* {. - desc: "The URL of the JSON-RPC API of the Ethereum node" - defaultValue: "ws://localhost:8545" + desc: "The URL of the JSON-RPC API of the Ethereum node", + defaultValue: "ws://localhost:8545", name: "eth-provider" .}: string ethAccount* {. - desc: "The Ethereum account that is used for storage contracts" - defaultValue: EthAddress.none - defaultValueDesc: "" + desc: "The Ethereum account that is used for storage contracts", + defaultValue: EthAddress.none, + defaultValueDesc: "", name: "eth-account" .}: Option[EthAddress] ethPrivateKey* {. - desc: "File containing Ethereum private key for storage contracts" - defaultValue: string.none - defaultValueDesc: "" + desc: "File containing Ethereum private key for storage contracts", + defaultValue: string.none, + defaultValueDesc: "", name: "eth-private-key" .}: Option[string] marketplaceAddress* {. - desc: "Address of deployed Marketplace contract" - defaultValue: EthAddress.none - defaultValueDesc: "" + desc: "Address of deployed Marketplace contract", + defaultValue: EthAddress.none, + defaultValueDesc: "", name: "marketplace-address" .}: Option[EthAddress] # TODO: should go behind a feature flag simulateProofFailures* {. - desc: "Simulates proof failures once every N proofs. 0 = disabled." - defaultValue: 0 - name: "simulate-proof-failures" - hidden - .}: int + desc: "Simulates proof failures once every N proofs. 0 = disabled.", + defaultValue: 0, + name: "simulate-proof-failures", + hidden + .}: int validator* {. - desc: "Enables validator, requires an Ethereum node" - defaultValue: false + desc: "Enables validator, requires an Ethereum node", + defaultValue: false, name: "validator" .}: bool validatorMaxSlots* {. - desc: "Maximum number of slots that the validator monitors" - longDesc: "If set to 0, the validator will not limit " & - "the maximum number of slots it monitors" - defaultValue: 1000 + desc: "Maximum number of slots that the validator monitors", + longDesc: + "If set to 0, the validator will not limit " & + "the maximum number of slots it monitors", + defaultValue: 1000, name: "validator-max-slots" .}: MaxSlots validatorGroups* {. - desc: "Slot validation groups" - longDesc: "A number indicating total number of groups into " & + desc: "Slot validation groups", + longDesc: + "A number indicating total number of groups into " & "which the whole slot id space will be divided. " & "The value must be in the range [2, 65535]. " & "If not provided, the validator will observe " & "the whole slot id space and the value of " & "the --validator-group-index parameter will be ignored. " & - "Powers of twos are advised for even distribution" - defaultValue: ValidationGroups.none + "Powers of twos are advised for even distribution", + defaultValue: ValidationGroups.none, name: "validator-groups" .}: Option[ValidationGroups] validatorGroupIndex* {. - desc: "Slot validation group index" - longDesc: "The value provided must be in the range " & + desc: "Slot validation group index", + longDesc: + "The value provided must be in the range " & "[0, validatorGroups). Ignored when --validator-groups " & "is not provided. Only slot ids satisfying condition " & "[(slotId mod validationGroups) == groupIndex] will be " & - "observed by the validator" - defaultValue: 0 + "observed by the validator", + defaultValue: 0, name: "validator-group-index" .}: uint16 rewardRecipient* {. - desc: "Address to send payouts to (eg rewards and refunds)" + desc: "Address to send payouts to (eg rewards and refunds)", name: "reward-recipient" .}: Option[EthAddress] - case persistenceCmd* {. - defaultValue: noCmd - command }: PersistenceCmd - + case persistenceCmd* {.defaultValue: noCmd, command.}: PersistenceCmd of PersistenceCmd.prover: circuitDir* {. - desc: "Directory where Codex will store proof circuit data" - defaultValue: DefaultCircuitDir - defaultValueDesc: $DefaultCircuitDir - abbr: "cd" - name: "circuit-dir" }: OutDir + desc: "Directory where Codex will store proof circuit data", + defaultValue: DefaultCircuitDir, + defaultValueDesc: $DefaultCircuitDir, + abbr: "cd", + name: "circuit-dir" + .}: OutDir circomR1cs* {. - desc: "The r1cs file for the storage circuit" - defaultValue: $DefaultCircuitDir / "proof_main.r1cs" - defaultValueDesc: $DefaultCircuitDir & "/proof_main.r1cs" + desc: "The r1cs file for the storage circuit", + defaultValue: $DefaultCircuitDir / "proof_main.r1cs", + defaultValueDesc: $DefaultCircuitDir & "/proof_main.r1cs", name: "circom-r1cs" .}: InputFile circomWasm* {. - desc: "The wasm file for the storage circuit" - defaultValue: $DefaultCircuitDir / "proof_main.wasm" - defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm" + desc: "The wasm file for the storage circuit", + defaultValue: $DefaultCircuitDir / "proof_main.wasm", + defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm", name: "circom-wasm" .}: InputFile circomZkey* {. - desc: "The zkey file for the storage circuit" - defaultValue: $DefaultCircuitDir / "proof_main.zkey" - defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.zkey" + desc: "The zkey file for the storage circuit", + defaultValue: $DefaultCircuitDir / "proof_main.zkey", + defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.zkey", name: "circom-zkey" .}: InputFile # TODO: should probably be hidden and behind a feature flag circomNoZkey* {. - desc: "Ignore the zkey file - use only for testing!" - defaultValue: false + desc: "Ignore the zkey file - use only for testing!", + defaultValue: false, name: "circom-no-zkey" .}: bool numProofSamples* {. - desc: "Number of samples to prove" - defaultValue: DefaultSamplesNum - defaultValueDesc: $DefaultSamplesNum - name: "proof-samples" }: int + desc: "Number of samples to prove", + defaultValue: DefaultSamplesNum, + defaultValueDesc: $DefaultSamplesNum, + name: "proof-samples" + .}: int maxSlotDepth* {. - desc: "The maximum depth of the slot tree" - defaultValue: DefaultMaxSlotDepth - defaultValueDesc: $DefaultMaxSlotDepth - name: "max-slot-depth" }: int + desc: "The maximum depth of the slot tree", + defaultValue: DefaultMaxSlotDepth, + defaultValueDesc: $DefaultMaxSlotDepth, + name: "max-slot-depth" + .}: int maxDatasetDepth* {. - desc: "The maximum depth of the dataset tree" - defaultValue: DefaultMaxDatasetDepth - defaultValueDesc: $DefaultMaxDatasetDepth - name: "max-dataset-depth" }: int + desc: "The maximum depth of the dataset tree", + defaultValue: DefaultMaxDatasetDepth, + defaultValueDesc: $DefaultMaxDatasetDepth, + name: "max-dataset-depth" + .}: int maxBlockDepth* {. - desc: "The maximum depth of the network block merkle tree" - defaultValue: DefaultBlockDepth - defaultValueDesc: $DefaultBlockDepth - name: "max-block-depth" }: int + desc: "The maximum depth of the network block merkle tree", + defaultValue: DefaultBlockDepth, + defaultValueDesc: $DefaultBlockDepth, + name: "max-block-depth" + .}: int maxCellElms* {. - desc: "The maximum number of elements in a cell" - defaultValue: DefaultCellElms - defaultValueDesc: $DefaultCellElms - name: "max-cell-elements" }: int + desc: "The maximum number of elements in a cell", + defaultValue: DefaultCellElms, + defaultValueDesc: $DefaultCellElms, + name: "max-cell-elements" + .}: int of PersistenceCmd.noCmd: discard - of StartUpCmd.noCmd: discard # end of persistence EthAddress* = ethers.Address -logutils.formatIt(LogFormat.textLines, EthAddress): it.short0xHexLog -logutils.formatIt(LogFormat.json, EthAddress): %it +logutils.formatIt(LogFormat.textLines, EthAddress): + it.short0xHexLog +logutils.formatIt(LogFormat.json, EthAddress): + %it func defaultAddress*(conf: CodexConf): IpAddress = result = static parseIpAddress("127.0.0.1") @@ -443,20 +463,19 @@ const nimBanner* = getNimBanner() codexFullVersion* = - "Codex version: " & codexVersion & "\p" & - "Codex revision: " & codexRevision & "\p" & + "Codex version: " & codexVersion & "\p" & "Codex revision: " & codexRevision & "\p" & nimBanner -proc parseCmdArg*(T: typedesc[MultiAddress], - input: string): MultiAddress - {.upraises: [ValueError] .} = +proc parseCmdArg*( + T: typedesc[MultiAddress], input: string +): MultiAddress {.upraises: [ValueError].} = var ma: MultiAddress try: let res = MultiAddress.init(input) if res.isOk: ma = res.get() else: - warn "Invalid MultiAddress", input=input, error = res.error() + warn "Invalid MultiAddress", input = input, error = res.error() quit QuitFailure except LPError as exc: warn "Invalid MultiAddress uri", uri = input, error = exc.msg @@ -478,28 +497,28 @@ proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T = res func parseCmdArg*(T: type NatConfig, p: string): T {.raises: [ValueError].} = - case p.toLowerAscii: - of "any": - NatConfig(hasExtIp: false, nat: NatStrategy.NatAny) - of "none": - NatConfig(hasExtIp: false, nat: NatStrategy.NatNone) - of "upnp": - NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp) - of "pmp": - NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp) - else: - if p.startsWith("extip:"): - try: - let ip = parseIpAddress(p[6..^1]) - NatConfig(hasExtIp: true, extIp: ip) - except ValueError: - let error = "Not a valid IP address: " & p[6..^1] - raise newException(ValueError, error) - else: - let error = "Not a valid NAT option: " & p + case p.toLowerAscii + of "any": + NatConfig(hasExtIp: false, nat: NatStrategy.NatAny) + of "none": + NatConfig(hasExtIp: false, nat: NatStrategy.NatNone) + of "upnp": + NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp) + of "pmp": + NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp) + else: + if p.startsWith("extip:"): + try: + let ip = parseIpAddress(p[6 ..^ 1]) + NatConfig(hasExtIp: true, extIp: ip) + except ValueError: + let error = "Not a valid IP address: " & p[6 ..^ 1] raise newException(ValueError, error) + else: + let error = "Not a valid NAT option: " & p + raise newException(ValueError, error) -proc completeCmdArg*(T: type NatConfig; val: string): seq[string] = +proc completeCmdArg*(T: type NatConfig, val: string): seq[string] = return @[] proc parseCmdArg*(T: type EthAddress, address: string): T = @@ -509,20 +528,21 @@ proc parseCmdArg*(T: type NBytes, val: string): T = var num = 0'i64 let count = parseSize(val, num, alwaysBin = true) if count == 0: - warn "Invalid number of bytes", nbytes = val - quit QuitFailure + warn "Invalid number of bytes", nbytes = val + quit QuitFailure NBytes(num) proc parseCmdArg*(T: type Duration, val: string): T = var dur: Duration let count = parseDuration(val, dur) if count == 0: - warn "Cannot parse duration", dur = dur - quit QuitFailure + warn "Cannot parse duration", dur = dur + quit QuitFailure dur -proc readValue*(r: var TomlReader, val: var EthAddress) - {.upraises: [SerializationError, IOError].} = +proc readValue*( + r: var TomlReader, val: var EthAddress +) {.upraises: [SerializationError, IOError].} = val = EthAddress.init(r.readValue(string)).get() proc readValue*(r: var TomlReader, val: var SignedPeerRecord) = @@ -545,11 +565,12 @@ proc readValue*(r: var TomlReader, val: var MultiAddress) = if res.isOk: val = res.get() else: - warn "Invalid MultiAddress", input=input, error=res.error() + warn "Invalid MultiAddress", input = input, error = res.error() quit QuitFailure -proc readValue*(r: var TomlReader, val: var NBytes) - {.upraises: [SerializationError, IOError].} = +proc readValue*( + r: var TomlReader, val: var NBytes +) {.upraises: [SerializationError, IOError].} = var value = 0'i64 var str = r.readValue(string) let count = parseSize(str, value, alwaysBin = true) @@ -558,8 +579,9 @@ proc readValue*(r: var TomlReader, val: var NBytes) quit QuitFailure val = NBytes(value) -proc readValue*(r: var TomlReader, val: var Duration) - {.upraises: [SerializationError, IOError].} = +proc readValue*( + r: var TomlReader, val: var Duration +) {.upraises: [SerializationError, IOError].} = var str = r.readValue(string) var dur: Duration let count = parseDuration(str, dur) @@ -568,20 +590,23 @@ proc readValue*(r: var TomlReader, val: var Duration) quit QuitFailure val = dur -proc readValue*(r: var TomlReader, val: var NatConfig) - {.raises: [SerializationError].} = - val = try: parseCmdArg(NatConfig, r.readValue(string)) - except CatchableError as err: - raise newException(SerializationError, err.msg) +proc readValue*( + r: var TomlReader, val: var NatConfig +) {.raises: [SerializationError].} = + val = + try: + parseCmdArg(NatConfig, r.readValue(string)) + except CatchableError as err: + raise newException(SerializationError, err.msg) # no idea why confutils needs this: -proc completeCmdArg*(T: type EthAddress; val: string): seq[string] = +proc completeCmdArg*(T: type EthAddress, val: string): seq[string] = discard -proc completeCmdArg*(T: type NBytes; val: string): seq[string] = +proc completeCmdArg*(T: type NBytes, val: string): seq[string] = discard -proc completeCmdArg*(T: type Duration; val: string): seq[string] = +proc completeCmdArg*(T: type Duration, val: string): seq[string] = discard # silly chronicles, colors is a compile-time property @@ -603,7 +628,7 @@ proc stripAnsi*(v: string): string = if c2 != '[': break else: - if c2 in {'0'..'9'} + {';'}: + if c2 in {'0' .. '9'} + {';'}: discard # keep looking elif c2 == 'm': i = x + 1 @@ -627,12 +652,12 @@ proc updateLogLevel*(logLevel: string) {.upraises: [ValueError].} = setLogLevel(parseEnum[LogLevel](directives[0].toUpperAscii)) except ValueError: raise (ref ValueError)( - msg: "Please specify one of: trace, debug, " & - "info, notice, warn, error or fatal" + msg: + "Please specify one of: trace, debug, " & "info, notice, warn, error or fatal" ) if directives.len > 1: - for topicName, settings in parseTopicDirectives(directives[1..^1]): + for topicName, settings in parseTopicDirectives(directives[1 ..^ 1]): if not setTopicState(topicName, settings.state, settings.logLevel): warn "Unrecognized logging topic", topic = topicName @@ -641,7 +666,9 @@ proc setupLogging*(conf: CodexConf) = warn "Logging configuration options not enabled in the current build" else: var logFile: ?IoHandle - proc noOutput(logLevel: LogLevel, msg: LogOutputStr) = discard + proc noOutput(logLevel: LogLevel, msg: LogOutputStr) = + discard + proc writeAndFlush(f: File, msg: LogOutputStr) = try: f.write(msg) @@ -662,14 +689,11 @@ proc setupLogging*(conf: CodexConf) = defaultChroniclesStream.outputs[2].writer = noOutput if logFilePath =? conf.logFile and logFilePath.len > 0: - let logFileHandle = openFile( - logFilePath, - {OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate} - ) + let logFileHandle = + openFile(logFilePath, {OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate}) if logFileHandle.isErr: error "failed to open log file", - path = logFilePath, - errorCode = $logFileHandle.error + path = logFilePath, errorCode = $logFileHandle.error else: logFile = logFileHandle.option defaultChroniclesStream.outputs[2].writer = fileFlush @@ -677,14 +701,13 @@ proc setupLogging*(conf: CodexConf) = defaultChroniclesStream.outputs[1].writer = noOutput let writer = - case conf.logFormat: + case conf.logFormat of LogKind.Auto: - if isatty(stdout): - stdoutFlush - else: - noColorsFlush - of LogKind.Colors: stdoutFlush - of LogKind.NoColors: noColorsFlush + if isatty(stdout): stdoutFlush else: noColorsFlush + of LogKind.Colors: + stdoutFlush + of LogKind.NoColors: + noColorsFlush of LogKind.Json: defaultChroniclesStream.outputs[1].writer = stdoutFlush noOutput @@ -695,8 +718,9 @@ proc setupLogging*(conf: CodexConf) = var counter = 0.uint64 proc numberedWriter(logLevel: LogLevel, msg: LogOutputStr) = inc(counter) - let withoutNewLine = msg[0..^2] + let withoutNewLine = msg[0 ..^ 2] writer(logLevel, withoutNewLine & " count=" & $counter & "\n") + defaultChroniclesStream.outputs[0].writer = numberedWriter else: defaultChroniclesStream.outputs[0].writer = writer diff --git a/codex/contracts/clock.nim b/codex/contracts/clock.nim index 284996ff..b5bf7ebb 100644 --- a/codex/contracts/clock.nim +++ b/codex/contracts/clock.nim @@ -11,14 +11,13 @@ export clock logScope: topics = "contracts clock" -type - OnChainClock* = ref object of Clock - provider: Provider - subscription: Subscription - offset: times.Duration - blockNumber: UInt256 - started: bool - newBlock: AsyncEvent +type OnChainClock* = ref object of Clock + provider: Provider + subscription: Subscription + offset: times.Duration + blockNumber: UInt256 + started: bool + newBlock: AsyncEvent proc new*(_: type OnChainClock, provider: Provider): OnChainClock = OnChainClock(provider: provider, newBlock: newAsyncEvent()) @@ -29,7 +28,8 @@ proc update(clock: OnChainClock, blck: Block) = let computerTime = getTime() clock.offset = blockTime - computerTime clock.blockNumber = number - trace "updated clock", blockTime=blck.timestamp, blockNumber=number, offset=clock.offset + trace "updated clock", + blockTime = blck.timestamp, blockNumber = number, offset = clock.offset clock.newBlock.fire() proc update(clock: OnChainClock) {.async.} = @@ -39,7 +39,7 @@ proc update(clock: OnChainClock) {.async.} = except CancelledError as error: raise error except CatchableError as error: - debug "error updating clock: ", error=error.msg + debug "error updating clock: ", error = error.msg discard method start*(clock: OnChainClock) {.async.} = @@ -48,7 +48,7 @@ method start*(clock: OnChainClock) {.async.} = proc onBlock(blckResult: ?!Block) = if eventError =? blckResult.errorOption: - error "There was an error in block subscription", msg=eventError.msg + error "There was an error in block subscription", msg = eventError.msg return # ignore block parameter; hardhat may call this with pending blocks diff --git a/codex/contracts/config.nim b/codex/contracts/config.nim index 76e00207..87cd1f2a 100644 --- a/codex/contracts/config.nim +++ b/codex/contracts/config.nim @@ -8,11 +8,14 @@ type MarketplaceConfig* = object collateral*: CollateralConfig proofs*: ProofConfig + CollateralConfig* = object - repairRewardPercentage*: uint8 # percentage of remaining collateral slot has after it has been freed + repairRewardPercentage*: uint8 + # percentage of remaining collateral slot has after it has been freed maxNumberOfSlashes*: uint8 # frees slot when the number of slashes reaches this value slashCriterion*: uint16 # amount of proofs missed that lead to slashing slashPercentage*: uint8 # percentage of the collateral that is slashed + ProofConfig* = object period*: UInt256 # proofs requirements are calculated per period (in seconds) timeout*: UInt256 # mark proofs as missing before the timeout (in seconds) @@ -23,14 +26,13 @@ type # blocks. Should be a prime number to ensure there are no cycles. downtimeProduct*: uint8 - func fromTuple(_: type ProofConfig, tupl: tuple): ProofConfig = ProofConfig( period: tupl[0], timeout: tupl[1], downtime: tupl[2], zkeyHash: tupl[3], - downtimeProduct: tupl[4] + downtimeProduct: tupl[4], ) func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig = @@ -38,14 +40,11 @@ func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig = repairRewardPercentage: tupl[0], maxNumberOfSlashes: tupl[1], slashCriterion: tupl[2], - slashPercentage: tupl[3] + slashPercentage: tupl[3], ) func fromTuple(_: type MarketplaceConfig, tupl: tuple): MarketplaceConfig = - MarketplaceConfig( - collateral: tupl[0], - proofs: tupl[1] - ) + MarketplaceConfig(collateral: tupl[0], proofs: tupl[1]) func solidityType*(_: type ProofConfig): string = solidityType(ProofConfig.fieldTypes) diff --git a/codex/contracts/deployment.nim b/codex/contracts/deployment.nim index 611aa359..c4e59b80 100644 --- a/codex/contracts/deployment.nim +++ b/codex/contracts/deployment.nim @@ -12,23 +12,20 @@ type Deployment* = ref object config: CodexConf const knownAddresses = { - # Hardhat localhost network - "31337": { - "Marketplace": Address.init("0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44"), - }.toTable, - # Taiko Alpha-3 Testnet - "167005": { - "Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F") - }.toTable, - # Codex Testnet - Nov 25 2024 18:41:29 PM (+00:00 UTC) - "789987": { - "Marketplace": Address.init("0xAB03b6a58C5262f530D54146DA2a552B1C0F7648") - }.toTable + # Hardhat localhost network + "31337": + {"Marketplace": Address.init("0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44")}.toTable, + # Taiko Alpha-3 Testnet + "167005": + {"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")}.toTable, + # Codex Testnet - Nov 25 2024 18:41:29 PM (+00:00 UTC) + "789987": + {"Marketplace": Address.init("0xAB03b6a58C5262f530D54146DA2a552B1C0F7648")}.toTable, }.toTable proc getKnownAddress(T: type, chainId: UInt256): ?Address = let id = chainId.toString(10) - notice "Looking for well-known contract address with ChainID ", chainId=id + notice "Looking for well-known contract address with ChainID ", chainId = id if not (id in knownAddresses): return none Address diff --git a/codex/contracts/interactions/clientinteractions.nim b/codex/contracts/interactions/clientinteractions.nim index 78b3bedf..df81da11 100644 --- a/codex/contracts/interactions/clientinteractions.nim +++ b/codex/contracts/interactions/clientinteractions.nim @@ -9,13 +9,12 @@ import ./interactions export purchasing export logutils -type - ClientInteractions* = ref object of ContractInteractions - purchasing*: Purchasing +type ClientInteractions* = ref object of ContractInteractions + purchasing*: Purchasing -proc new*(_: type ClientInteractions, - clock: OnChainClock, - purchasing: Purchasing): ClientInteractions = +proc new*( + _: type ClientInteractions, clock: OnChainClock, purchasing: Purchasing +): ClientInteractions = ClientInteractions(clock: clock, purchasing: purchasing) proc start*(self: ClientInteractions) {.async.} = diff --git a/codex/contracts/interactions/hostinteractions.nim b/codex/contracts/interactions/hostinteractions.nim index 2decfa44..dd311746 100644 --- a/codex/contracts/interactions/hostinteractions.nim +++ b/codex/contracts/interactions/hostinteractions.nim @@ -7,15 +7,10 @@ import ./interactions export sales export logutils -type - HostInteractions* = ref object of ContractInteractions - sales*: Sales +type HostInteractions* = ref object of ContractInteractions + sales*: Sales -proc new*( - _: type HostInteractions, - clock: Clock, - sales: Sales -): HostInteractions = +proc new*(_: type HostInteractions, clock: Clock, sales: Sales): HostInteractions = ## Create a new HostInteractions instance ## HostInteractions(clock: clock, sales: sales) diff --git a/codex/contracts/interactions/interactions.nim b/codex/contracts/interactions/interactions.nim index d4fddf54..1006eb3f 100644 --- a/codex/contracts/interactions/interactions.nim +++ b/codex/contracts/interactions/interactions.nim @@ -5,9 +5,8 @@ import ../market export clock -type - ContractInteractions* = ref object of RootObj - clock*: Clock +type ContractInteractions* = ref object of RootObj + clock*: Clock method start*(self: ContractInteractions) {.async, base.} = discard diff --git a/codex/contracts/interactions/validatorinteractions.nim b/codex/contracts/interactions/validatorinteractions.nim index 1aa4026c..aae28202 100644 --- a/codex/contracts/interactions/validatorinteractions.nim +++ b/codex/contracts/interactions/validatorinteractions.nim @@ -3,13 +3,12 @@ import ../../validation export validation -type - ValidatorInteractions* = ref object of ContractInteractions - validation: Validation +type ValidatorInteractions* = ref object of ContractInteractions + validation: Validation -proc new*(_: type ValidatorInteractions, - clock: OnChainClock, - validation: Validation): ValidatorInteractions = +proc new*( + _: type ValidatorInteractions, clock: OnChainClock, validation: Validation +): ValidatorInteractions = ValidatorInteractions(clock: clock, validation: validation) proc start*(self: ValidatorInteractions) {.async.} = diff --git a/codex/contracts/market.nim b/codex/contracts/market.nim index 06902868..35557050 100644 --- a/codex/contracts/market.nim +++ b/codex/contracts/market.nim @@ -27,18 +27,12 @@ type eventSubscription: EventSubscription func new*( - _: type OnChainMarket, - contract: Marketplace, - rewardRecipient = Address.none): OnChainMarket = - + _: type OnChainMarket, contract: Marketplace, rewardRecipient = Address.none +): OnChainMarket = without signer =? contract.signer: raiseAssert("Marketplace contract should have a signer") - OnChainMarket( - contract: contract, - signer: signer, - rewardRecipient: rewardRecipient - ) + OnChainMarket(contract: contract, signer: signer, rewardRecipient: rewardRecipient) proc raiseMarketError(message: string) {.raises: [MarketError].} = raise newException(MarketError, message) @@ -105,56 +99,55 @@ method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} = method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} = convertEthersError: let slots = await market.contract.mySlots() - debug "Fetched my slots", numSlots=len(slots) + debug "Fetched my slots", numSlots = len(slots) return slots -method requestStorage(market: OnChainMarket, request: StorageRequest){.async.} = +method requestStorage(market: OnChainMarket, request: StorageRequest) {.async.} = convertEthersError: debug "Requesting storage" - await market.approveFunds(request.price()) + await market.approveFunds(request.totalPrice()) discard await market.contract.requestStorage(request).confirm(1) -method getRequest*(market: OnChainMarket, - id: RequestId): Future[?StorageRequest] {.async.} = +method getRequest*( + market: OnChainMarket, id: RequestId +): Future[?StorageRequest] {.async.} = convertEthersError: try: return some await market.contract.getRequest(id) - except ProviderError as e: - if e.msgDetail.contains("Unknown request"): - return none StorageRequest - raise e + except Marketplace_UnknownRequest: + return none StorageRequest -method requestState*(market: OnChainMarket, - requestId: RequestId): Future[?RequestState] {.async.} = +method requestState*( + market: OnChainMarket, requestId: RequestId +): Future[?RequestState] {.async.} = convertEthersError: try: let overrides = CallOverrides(blockTag: some BlockTag.pending) return some await market.contract.requestState(requestId, overrides) - except ProviderError as e: - if e.msgDetail.contains("Unknown request"): - return none RequestState - raise e + except Marketplace_UnknownRequest: + return none RequestState -method slotState*(market: OnChainMarket, - slotId: SlotId): Future[SlotState] {.async.} = +method slotState*(market: OnChainMarket, slotId: SlotId): Future[SlotState] {.async.} = convertEthersError: let overrides = CallOverrides(blockTag: some BlockTag.pending) return await market.contract.slotState(slotId, overrides) -method getRequestEnd*(market: OnChainMarket, - id: RequestId): Future[SecondsSince1970] {.async.} = +method getRequestEnd*( + market: OnChainMarket, id: RequestId +): Future[SecondsSince1970] {.async.} = convertEthersError: return await market.contract.requestEnd(id) -method requestExpiresAt*(market: OnChainMarket, - id: RequestId): Future[SecondsSince1970] {.async.} = +method requestExpiresAt*( + market: OnChainMarket, id: RequestId +): Future[SecondsSince1970] {.async.} = convertEthersError: return await market.contract.requestExpiry(id) -method getHost(market: OnChainMarket, - requestId: RequestId, - slotIndex: UInt256): Future[?Address] {.async.} = +method getHost( + market: OnChainMarket, requestId: RequestId, slotIndex: UInt256 +): Future[?Address] {.async.} = convertEthersError: let slotId = slotId(requestId, slotIndex) let address = await market.contract.getHost(slotId) @@ -163,21 +156,26 @@ method getHost(market: OnChainMarket, else: return none Address -method getActiveSlot*(market: OnChainMarket, - slotId: SlotId): Future[?Slot] {.async.} = +method currentCollateral*( + market: OnChainMarket, slotId: SlotId +): Future[UInt256] {.async.} = + convertEthersError: + return await market.contract.currentCollateral(slotId) + +method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} = convertEthersError: try: return some await market.contract.getActiveSlot(slotId) - except ProviderError as e: - if e.msgDetail.contains("Slot is free"): - return none Slot - raise e + except Marketplace_SlotIsFree: + return none Slot -method fillSlot(market: OnChainMarket, - requestId: RequestId, - slotIndex: UInt256, - proof: Groth16Proof, - collateral: UInt256) {.async.} = +method fillSlot( + market: OnChainMarket, + requestId: RequestId, + slotIndex: UInt256, + proof: Groth16Proof, + collateral: UInt256, +) {.async.} = convertEthersError: logScope: requestId @@ -197,9 +195,9 @@ method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} = let collateralRecipient = await market.getSigner() freeSlot = market.contract.freeSlot( slotId, - rewardRecipient, # --reward-recipient - collateralRecipient) # SP's address - + rewardRecipient, # --reward-recipient + collateralRecipient, + ) # SP's address else: # Otherwise, use the SP's address as both the reward and collateral # recipient (the contract will use msg.sender for both) @@ -207,55 +205,45 @@ method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} = discard await freeSlot.confirm(1) - -method withdrawFunds(market: OnChainMarket, - requestId: RequestId) {.async.} = +method withdrawFunds(market: OnChainMarket, requestId: RequestId) {.async.} = convertEthersError: discard await market.contract.withdrawFunds(requestId).confirm(1) -method isProofRequired*(market: OnChainMarket, - id: SlotId): Future[bool] {.async.} = +method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} = convertEthersError: try: let overrides = CallOverrides(blockTag: some BlockTag.pending) return await market.contract.isProofRequired(id, overrides) - except ProviderError as e: - if e.msgDetail.contains("Slot is free"): - return false - raise e + except Marketplace_SlotIsFree: + return false -method willProofBeRequired*(market: OnChainMarket, - id: SlotId): Future[bool] {.async.} = +method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} = convertEthersError: try: let overrides = CallOverrides(blockTag: some BlockTag.pending) return await market.contract.willProofBeRequired(id, overrides) - except ProviderError as e: - if e.msgDetail.contains("Slot is free"): - return false - raise e + except Marketplace_SlotIsFree: + return false -method getChallenge*(market: OnChainMarket, id: SlotId): Future[ProofChallenge] {.async.} = +method getChallenge*( + market: OnChainMarket, id: SlotId +): Future[ProofChallenge] {.async.} = convertEthersError: let overrides = CallOverrides(blockTag: some BlockTag.pending) return await market.contract.getChallenge(id, overrides) -method submitProof*(market: OnChainMarket, - id: SlotId, - proof: Groth16Proof) {.async.} = +method submitProof*(market: OnChainMarket, id: SlotId, proof: Groth16Proof) {.async.} = convertEthersError: discard await market.contract.submitProof(id, proof).confirm(1) -method markProofAsMissing*(market: OnChainMarket, - id: SlotId, - period: Period) {.async.} = +method markProofAsMissing*( + market: OnChainMarket, id: SlotId, period: Period +) {.async.} = convertEthersError: discard await market.contract.markProofAsMissing(id, period).confirm(1) method canProofBeMarkedAsMissing*( - market: OnChainMarket, - id: SlotId, - period: Period + market: OnChainMarket, id: SlotId, period: Period ): Future[bool] {.async.} = let provider = market.contract.provider let contractWithoutSigner = market.contract.connect(provider) @@ -268,46 +256,42 @@ method canProofBeMarkedAsMissing*( return false method reserveSlot*( - market: OnChainMarket, - requestId: RequestId, - slotIndex: UInt256) {.async.} = - + market: OnChainMarket, requestId: RequestId, slotIndex: UInt256 +) {.async.} = convertEthersError: - discard await market.contract.reserveSlot( + discard await market.contract + .reserveSlot( requestId, slotIndex, # reserveSlot runs out of gas for unknown reason, but 100k gas covers it - TransactionOverrides(gasLimit: some 100000.u256) - ).confirm(1) + TransactionOverrides(gasLimit: some 100000.u256), + ) + .confirm(1) method canReserveSlot*( - market: OnChainMarket, - requestId: RequestId, - slotIndex: UInt256): Future[bool] {.async.} = - + market: OnChainMarket, requestId: RequestId, slotIndex: UInt256 +): Future[bool] {.async.} = convertEthersError: return await market.contract.canReserveSlot(requestId, slotIndex) -method subscribeRequests*(market: OnChainMarket, - callback: OnRequest): - Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!StorageRequested) {.upraises:[].} = +method subscribeRequests*( + market: OnChainMarket, callback: OnRequest +): Future[MarketSubscription] {.async.} = + proc onEvent(eventResult: ?!StorageRequested) {.upraises: [].} = without event =? eventResult, eventErr: error "There was an error in Request subscription", msg = eventErr.msg return - callback(event.requestId, - event.ask, - event.expiry) + callback(event.requestId, event.ask, event.expiry) convertEthersError: let subscription = await market.contract.subscribe(StorageRequested, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) -method subscribeSlotFilled*(market: OnChainMarket, - callback: OnSlotFilled): - Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!SlotFilled) {.upraises:[].} = +method subscribeSlotFilled*( + market: OnChainMarket, callback: OnSlotFilled +): Future[MarketSubscription] {.async.} = + proc onEvent(eventResult: ?!SlotFilled) {.upraises: [].} = without event =? eventResult, eventErr: error "There was an error in SlotFilled subscription", msg = eventErr.msg return @@ -318,11 +302,12 @@ method subscribeSlotFilled*(market: OnChainMarket, let subscription = await market.contract.subscribe(SlotFilled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) -method subscribeSlotFilled*(market: OnChainMarket, - requestId: RequestId, - slotIndex: UInt256, - callback: OnSlotFilled): - Future[MarketSubscription] {.async.} = +method subscribeSlotFilled*( + market: OnChainMarket, + requestId: RequestId, + slotIndex: UInt256, + callback: OnSlotFilled, +): Future[MarketSubscription] {.async.} = proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: UInt256) = if eventRequestId == requestId and eventSlotIndex == slotIndex: callback(requestId, slotIndex) @@ -330,10 +315,10 @@ method subscribeSlotFilled*(market: OnChainMarket, convertEthersError: return await market.subscribeSlotFilled(onSlotFilled) -method subscribeSlotFreed*(market: OnChainMarket, - callback: OnSlotFreed): - Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!SlotFreed) {.upraises:[].} = +method subscribeSlotFreed*( + market: OnChainMarket, callback: OnSlotFreed +): Future[MarketSubscription] {.async.} = + proc onEvent(eventResult: ?!SlotFreed) {.upraises: [].} = without event =? eventResult, eventErr: error "There was an error in SlotFreed subscription", msg = eventErr.msg return @@ -345,12 +330,12 @@ method subscribeSlotFreed*(market: OnChainMarket, return OnChainMarketSubscription(eventSubscription: subscription) method subscribeSlotReservationsFull*( - market: OnChainMarket, - callback: OnSlotReservationsFull): Future[MarketSubscription] {.async.} = - - proc onEvent(eventResult: ?!SlotReservationsFull) {.upraises:[].} = + market: OnChainMarket, callback: OnSlotReservationsFull +): Future[MarketSubscription] {.async.} = + proc onEvent(eventResult: ?!SlotReservationsFull) {.upraises: [].} = without event =? eventResult, eventErr: - error "There was an error in SlotReservationsFull subscription", msg = eventErr.msg + error "There was an error in SlotReservationsFull subscription", + msg = eventErr.msg return callback(event.requestId, event.slotIndex) @@ -359,10 +344,10 @@ method subscribeSlotReservationsFull*( let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) -method subscribeFulfillment(market: OnChainMarket, - callback: OnFulfillment): - Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!RequestFulfilled) {.upraises:[].} = +method subscribeFulfillment( + market: OnChainMarket, callback: OnFulfillment +): Future[MarketSubscription] {.async.} = + proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} = without event =? eventResult, eventErr: error "There was an error in RequestFulfillment subscription", msg = eventErr.msg return @@ -373,11 +358,10 @@ method subscribeFulfillment(market: OnChainMarket, let subscription = await market.contract.subscribe(RequestFulfilled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) -method subscribeFulfillment(market: OnChainMarket, - requestId: RequestId, - callback: OnFulfillment): - Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!RequestFulfilled) {.upraises:[].} = +method subscribeFulfillment( + market: OnChainMarket, requestId: RequestId, callback: OnFulfillment +): Future[MarketSubscription] {.async.} = + proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} = without event =? eventResult, eventErr: error "There was an error in RequestFulfillment subscription", msg = eventErr.msg return @@ -389,10 +373,10 @@ method subscribeFulfillment(market: OnChainMarket, let subscription = await market.contract.subscribe(RequestFulfilled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) -method subscribeRequestCancelled*(market: OnChainMarket, - callback: OnRequestCancelled): - Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!RequestCancelled) {.upraises:[].} = +method subscribeRequestCancelled*( + market: OnChainMarket, callback: OnRequestCancelled +): Future[MarketSubscription] {.async.} = + proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} = without event =? eventResult, eventErr: error "There was an error in RequestCancelled subscription", msg = eventErr.msg return @@ -403,11 +387,10 @@ method subscribeRequestCancelled*(market: OnChainMarket, let subscription = await market.contract.subscribe(RequestCancelled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) -method subscribeRequestCancelled*(market: OnChainMarket, - requestId: RequestId, - callback: OnRequestCancelled): - Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!RequestCancelled) {.upraises:[].} = +method subscribeRequestCancelled*( + market: OnChainMarket, requestId: RequestId, callback: OnRequestCancelled +): Future[MarketSubscription] {.async.} = + proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} = without event =? eventResult, eventErr: error "There was an error in RequestCancelled subscription", msg = eventErr.msg return @@ -419,10 +402,10 @@ method subscribeRequestCancelled*(market: OnChainMarket, let subscription = await market.contract.subscribe(RequestCancelled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) -method subscribeRequestFailed*(market: OnChainMarket, - callback: OnRequestFailed): - Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!RequestFailed) {.upraises:[]} = +method subscribeRequestFailed*( + market: OnChainMarket, callback: OnRequestFailed +): Future[MarketSubscription] {.async.} = + proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} = without event =? eventResult, eventErr: error "There was an error in RequestFailed subscription", msg = eventErr.msg return @@ -433,11 +416,10 @@ method subscribeRequestFailed*(market: OnChainMarket, let subscription = await market.contract.subscribe(RequestFailed, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) -method subscribeRequestFailed*(market: OnChainMarket, - requestId: RequestId, - callback: OnRequestFailed): - Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!RequestFailed) {.upraises:[]} = +method subscribeRequestFailed*( + market: OnChainMarket, requestId: RequestId, callback: OnRequestFailed +): Future[MarketSubscription] {.async.} = + proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} = without event =? eventResult, eventErr: error "There was an error in RequestFailed subscription", msg = eventErr.msg return @@ -449,9 +431,9 @@ method subscribeRequestFailed*(market: OnChainMarket, let subscription = await market.contract.subscribe(RequestFailed, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) -method subscribeProofSubmission*(market: OnChainMarket, - callback: OnProofSubmitted): - Future[MarketSubscription] {.async.} = +method subscribeProofSubmission*( + market: OnChainMarket, callback: OnProofSubmitted +): Future[MarketSubscription] {.async.} = proc onEvent(eventResult: ?!ProofSubmitted) {.upraises: [].} = without event =? eventResult, eventErr: error "There was an error in ProofSubmitted subscription", msg = eventErr.msg @@ -467,48 +449,37 @@ method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} = await subscription.eventSubscription.unsubscribe() method queryPastSlotFilledEvents*( - market: OnChainMarket, - fromBlock: BlockTag): Future[seq[SlotFilled]] {.async.} = - + market: OnChainMarket, fromBlock: BlockTag +): Future[seq[SlotFilled]] {.async.} = convertEthersError: - return await market.contract.queryFilter(SlotFilled, - fromBlock, - BlockTag.latest) + return await market.contract.queryFilter(SlotFilled, fromBlock, BlockTag.latest) method queryPastSlotFilledEvents*( - market: OnChainMarket, - blocksAgo: int): Future[seq[SlotFilled]] {.async.} = - + market: OnChainMarket, blocksAgo: int +): Future[seq[SlotFilled]] {.async.} = convertEthersError: - let fromBlock = - await market.contract.provider.pastBlockTag(blocksAgo) + let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo) return await market.queryPastSlotFilledEvents(fromBlock) method queryPastSlotFilledEvents*( - market: OnChainMarket, - fromTime: SecondsSince1970): Future[seq[SlotFilled]] {.async.} = - + market: OnChainMarket, fromTime: SecondsSince1970 +): Future[seq[SlotFilled]] {.async.} = convertEthersError: - let fromBlock = - await market.contract.provider.blockNumberForEpoch(fromTime) + let fromBlock = await market.contract.provider.blockNumberForEpoch(fromTime) return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock)) method queryPastStorageRequestedEvents*( - market: OnChainMarket, - fromBlock: BlockTag): Future[seq[StorageRequested]] {.async.} = - + market: OnChainMarket, fromBlock: BlockTag +): Future[seq[StorageRequested]] {.async.} = convertEthersError: - return await market.contract.queryFilter(StorageRequested, - fromBlock, - BlockTag.latest) + return + await market.contract.queryFilter(StorageRequested, fromBlock, BlockTag.latest) method queryPastStorageRequestedEvents*( - market: OnChainMarket, - blocksAgo: int): Future[seq[StorageRequested]] {.async.} = - + market: OnChainMarket, blocksAgo: int +): Future[seq[StorageRequested]] {.async.} = convertEthersError: - let fromBlock = - await market.contract.provider.pastBlockTag(blocksAgo) + let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo) return await market.queryPastStorageRequestedEvents(fromBlock) diff --git a/codex/contracts/marketplace.nim b/codex/contracts/marketplace.nim index 020f501e..87fd1e47 100644 --- a/codex/contracts/marketplace.nim +++ b/codex/contracts/marketplace.nim @@ -17,28 +17,142 @@ export requests type Marketplace* = ref object of Contract + Marketplace_RepairRewardPercentageTooHigh* = object of SolidityError + Marketplace_SlashPercentageTooHigh* = object of SolidityError + Marketplace_MaximumSlashingTooHigh* = object of SolidityError + Marketplace_InvalidExpiry* = object of SolidityError + Marketplace_InvalidMaxSlotLoss* = object of SolidityError + Marketplace_InsufficientSlots* = object of SolidityError + Marketplace_InvalidClientAddress* = object of SolidityError + Marketplace_RequestAlreadyExists* = object of SolidityError + Marketplace_InvalidSlot* = object of SolidityError + Marketplace_SlotNotFree* = object of SolidityError + Marketplace_InvalidSlotHost* = object of SolidityError + Marketplace_AlreadyPaid* = object of SolidityError + Marketplace_TransferFailed* = object of SolidityError + Marketplace_UnknownRequest* = object of SolidityError + Marketplace_InvalidState* = object of SolidityError + Marketplace_StartNotBeforeExpiry* = object of SolidityError + Marketplace_SlotNotAcceptingProofs* = object of SolidityError + Marketplace_SlotIsFree* = object of SolidityError + Marketplace_ReservationRequired* = object of SolidityError + Marketplace_NothingToWithdraw* = object of SolidityError + Marketplace_InsufficientDuration* = object of SolidityError + Marketplace_InsufficientProofProbability* = object of SolidityError + Marketplace_InsufficientCollateral* = object of SolidityError + Marketplace_InsufficientReward* = object of SolidityError + Marketplace_InvalidCid* = object of SolidityError + Proofs_InsufficientBlockHeight* = object of SolidityError + Proofs_InvalidProof* = object of SolidityError + Proofs_ProofAlreadySubmitted* = object of SolidityError + Proofs_PeriodNotEnded* = object of SolidityError + Proofs_ValidationTimedOut* = object of SolidityError + Proofs_ProofNotMissing* = object of SolidityError + Proofs_ProofNotRequired* = object of SolidityError + Proofs_ProofAlreadyMarkedMissing* = object of SolidityError + Proofs_InvalidProbability* = object of SolidityError + Periods_InvalidSecondsPerPeriod* = object of SolidityError + proc configuration*(marketplace: Marketplace): MarketplaceConfig {.contract, view.} proc token*(marketplace: Marketplace): Address {.contract, view.} +proc currentCollateral*( + marketplace: Marketplace, id: SlotId +): UInt256 {.contract, view.} + proc slashMisses*(marketplace: Marketplace): UInt256 {.contract, view.} proc slashPercentage*(marketplace: Marketplace): UInt256 {.contract, view.} proc minCollateralThreshold*(marketplace: Marketplace): UInt256 {.contract, view.} -proc requestStorage*(marketplace: Marketplace, request: StorageRequest): Confirmable {.contract.} -proc fillSlot*(marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256, proof: Groth16Proof): Confirmable {.contract.} -proc withdrawFunds*(marketplace: Marketplace, requestId: RequestId): Confirmable {.contract.} -proc withdrawFunds*(marketplace: Marketplace, requestId: RequestId, withdrawAddress: Address): Confirmable {.contract.} -proc freeSlot*(marketplace: Marketplace, id: SlotId): Confirmable {.contract.} -proc freeSlot*(marketplace: Marketplace, id: SlotId, rewardRecipient: Address, collateralRecipient: Address): Confirmable {.contract.} -proc getRequest*(marketplace: Marketplace, id: RequestId): StorageRequest {.contract, view.} +proc requestStorage*( + marketplace: Marketplace, request: StorageRequest +): Confirmable {. + contract, + errors: [ + Marketplace_InvalidClientAddress, Marketplace_RequestAlreadyExists, + Marketplace_InvalidExpiry, Marketplace_InsufficientSlots, + Marketplace_InvalidMaxSlotLoss, + ] +.} + +proc fillSlot*( + marketplace: Marketplace, + requestId: RequestId, + slotIndex: UInt256, + proof: Groth16Proof, +): Confirmable {. + contract, + errors: [ + Marketplace_InvalidSlot, Marketplace_ReservationRequired, Marketplace_SlotNotFree, + Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest, + ] +.} + +proc withdrawFunds*( + marketplace: Marketplace, requestId: RequestId +): Confirmable {. + contract, + errors: [ + Marketplace_InvalidClientAddress, Marketplace_InvalidState, + Marketplace_NothingToWithdraw, Marketplace_UnknownRequest, + ] +.} + +proc withdrawFunds*( + marketplace: Marketplace, requestId: RequestId, withdrawAddress: Address +): Confirmable {. + contract, + errors: [ + Marketplace_InvalidClientAddress, Marketplace_InvalidState, + Marketplace_NothingToWithdraw, Marketplace_UnknownRequest, + ] +.} + +proc freeSlot*( + marketplace: Marketplace, id: SlotId +): Confirmable {. + contract, + errors: [ + Marketplace_InvalidSlotHost, Marketplace_AlreadyPaid, + Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest, Marketplace_SlotIsFree, + ] +.} + +proc freeSlot*( + marketplace: Marketplace, + id: SlotId, + rewardRecipient: Address, + collateralRecipient: Address, +): Confirmable {. + contract, + errors: [ + Marketplace_InvalidSlotHost, Marketplace_AlreadyPaid, + Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest, Marketplace_SlotIsFree, + ] +.} + +proc getRequest*( + marketplace: Marketplace, id: RequestId +): StorageRequest {.contract, view, errors: [Marketplace_UnknownRequest].} + proc getHost*(marketplace: Marketplace, id: SlotId): Address {.contract, view.} -proc getActiveSlot*(marketplace: Marketplace, id: SlotId): Slot {.contract, view.} +proc getActiveSlot*( + marketplace: Marketplace, id: SlotId +): Slot {.contract, view, errors: [Marketplace_SlotIsFree].} proc myRequests*(marketplace: Marketplace): seq[RequestId] {.contract, view.} proc mySlots*(marketplace: Marketplace): seq[SlotId] {.contract, view.} -proc requestState*(marketplace: Marketplace, requestId: RequestId): RequestState {.contract, view.} +proc requestState*( + marketplace: Marketplace, requestId: RequestId +): RequestState {.contract, view, errors: [Marketplace_UnknownRequest].} + proc slotState*(marketplace: Marketplace, slotId: SlotId): SlotState {.contract, view.} -proc requestEnd*(marketplace: Marketplace, requestId: RequestId): SecondsSince1970 {.contract, view.} -proc requestExpiry*(marketplace: Marketplace, requestId: RequestId): SecondsSince1970 {.contract, view.} +proc requestEnd*( + marketplace: Marketplace, requestId: RequestId +): SecondsSince1970 {.contract, view.} + +proc requestExpiry*( + marketplace: Marketplace, requestId: RequestId +): SecondsSince1970 {.contract, view.} proc proofTimeout*(marketplace: Marketplace): UInt256 {.contract, view.} @@ -46,11 +160,35 @@ proc proofEnd*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.} proc missingProofs*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.} proc isProofRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.} proc willProofBeRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.} -proc getChallenge*(marketplace: Marketplace, id: SlotId): array[32, byte] {.contract, view.} +proc getChallenge*( + marketplace: Marketplace, id: SlotId +): array[32, byte] {.contract, view.} + proc getPointer*(marketplace: Marketplace, id: SlotId): uint8 {.contract, view.} -proc submitProof*(marketplace: Marketplace, id: SlotId, proof: Groth16Proof): Confirmable {.contract.} -proc markProofAsMissing*(marketplace: Marketplace, id: SlotId, period: UInt256): Confirmable {.contract.} +proc submitProof*( + marketplace: Marketplace, id: SlotId, proof: Groth16Proof +): Confirmable {. + contract, + errors: + [Proofs_ProofAlreadySubmitted, Proofs_InvalidProof, Marketplace_UnknownRequest] +.} -proc reserveSlot*(marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256): Confirmable {.contract.} -proc canReserveSlot*(marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256): bool {.contract, view.} +proc markProofAsMissing*( + marketplace: Marketplace, id: SlotId, period: UInt256 +): Confirmable {. + contract, + errors: [ + Marketplace_SlotNotAcceptingProofs, Marketplace_StartNotBeforeExpiry, + Proofs_PeriodNotEnded, Proofs_ValidationTimedOut, Proofs_ProofNotMissing, + Proofs_ProofNotRequired, Proofs_ProofAlreadyMarkedMissing, + ] +.} + +proc reserveSlot*( + marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256 +): Confirmable {.contract.} + +proc canReserveSlot*( + marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256 +): bool {.contract, view.} diff --git a/codex/contracts/proofs.nim b/codex/contracts/proofs.nim index a7a59351..771d685b 100644 --- a/codex/contracts/proofs.nim +++ b/codex/contracts/proofs.nim @@ -7,13 +7,16 @@ type a*: G1Point b*: G2Point c*: G1Point + G1Point* = object x*: UInt256 y*: UInt256 + # A field element F_{p^2} encoded as `real + i * imag` Fp2Element* = object real*: UInt256 imag*: UInt256 + G2Point* = object x*: Fp2Element y*: Fp2Element diff --git a/codex/contracts/provider.nim b/codex/contracts/provider.nim index 62098fb5..b7fc5602 100644 --- a/codex/contracts/provider.nim +++ b/codex/contracts/provider.nim @@ -12,8 +12,9 @@ logScope: proc raiseProviderError(message: string) {.raises: [ProviderError].} = raise newException(ProviderError, message) -proc blockNumberAndTimestamp*(provider: Provider, blockTag: BlockTag): - Future[(UInt256, UInt256)] {.async: (raises: [ProviderError]).} = +proc blockNumberAndTimestamp*( + provider: Provider, blockTag: BlockTag +): Future[(UInt256, UInt256)] {.async: (raises: [ProviderError]).} = without latestBlock =? await provider.getBlock(blockTag): raiseProviderError("Could not get latest block") @@ -23,14 +24,10 @@ proc blockNumberAndTimestamp*(provider: Provider, blockTag: BlockTag): return (latestBlockNumber, latestBlock.timestamp) proc binarySearchFindClosestBlock( - provider: Provider, - epochTime: int, - low: UInt256, - high: UInt256): Future[UInt256] {.async: (raises: [ProviderError]).} = - let (_, lowTimestamp) = - await provider.blockNumberAndTimestamp(BlockTag.init(low)) - let (_, highTimestamp) = - await provider.blockNumberAndTimestamp(BlockTag.init(high)) + provider: Provider, epochTime: int, low: UInt256, high: UInt256 +): Future[UInt256] {.async: (raises: [ProviderError]).} = + let (_, lowTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(low)) + let (_, highTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(high)) if abs(lowTimestamp.truncate(int) - epochTime) < abs(highTimestamp.truncate(int) - epochTime): return low @@ -41,8 +38,8 @@ proc binarySearchBlockNumberForEpoch( provider: Provider, epochTime: UInt256, latestBlockNumber: UInt256, - earliestBlockNumber: UInt256): Future[UInt256] - {.async: (raises: [ProviderError]).} = + earliestBlockNumber: UInt256, +): Future[UInt256] {.async: (raises: [ProviderError]).} = var low = earliestBlockNumber var high = latestBlockNumber @@ -52,7 +49,7 @@ proc binarySearchBlockNumberForEpoch( let mid = (low + high) div 2 let (midBlockNumber, midBlockTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(mid)) - + if midBlockTimestamp < epochTime: low = mid + 1 elif midBlockTimestamp > epochTime: @@ -63,16 +60,16 @@ proc binarySearchBlockNumberForEpoch( # low is always greater than high - this is why we use high, where # intuitively we would use low: await provider.binarySearchFindClosestBlock( - epochTime.truncate(int), low=high, high=low) + epochTime.truncate(int), low = high, high = low + ) proc blockNumberForEpoch*( - provider: Provider, - epochTime: SecondsSince1970): Future[UInt256] - {.async: (raises: [ProviderError]).} = + provider: Provider, epochTime: SecondsSince1970 +): Future[UInt256] {.async: (raises: [ProviderError]).} = let epochTimeUInt256 = epochTime.u256 - let (latestBlockNumber, latestBlockTimestamp) = + let (latestBlockNumber, latestBlockTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.latest) - let (earliestBlockNumber, earliestBlockTimestamp) = + let (earliestBlockNumber, earliestBlockTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.earliest) # Initially we used the average block time to predict @@ -109,18 +106,18 @@ proc blockNumberForEpoch*( return latestBlockNumber if earliestBlockNumber > 0 and earliestBlockTimestamp > epochTimeUInt256: - let availableHistoryInDays = - (latestBlockTimestamp - earliestBlockTimestamp) div - 1.days.secs.u256 - warn "Short block history detected.", earliestBlockTimestamp = - earliestBlockTimestamp, days = availableHistoryInDays + let availableHistoryInDays = + (latestBlockTimestamp - earliestBlockTimestamp) div 1.days.secs.u256 + warn "Short block history detected.", + earliestBlockTimestamp = earliestBlockTimestamp, days = availableHistoryInDays return earliestBlockNumber return await provider.binarySearchBlockNumberForEpoch( - epochTimeUInt256, latestBlockNumber, earliestBlockNumber) + epochTimeUInt256, latestBlockNumber, earliestBlockNumber + ) -proc pastBlockTag*(provider: Provider, - blocksAgo: int): - Future[BlockTag] {.async: (raises: [ProviderError]).} = +proc pastBlockTag*( + provider: Provider, blocksAgo: int +): Future[BlockTag] {.async: (raises: [ProviderError]).} = let head = await provider.getBlockNumber() return BlockTag.init(head - blocksAgo.abs.u256) diff --git a/codex/contracts/requests.nim b/codex/contracts/requests.nim index 70434197..48947602 100644 --- a/codex/contracts/requests.nim +++ b/codex/contracts/requests.nim @@ -18,20 +18,24 @@ type content* {.serialize.}: StorageContent expiry* {.serialize.}: UInt256 nonce*: Nonce + StorageAsk* = object slots* {.serialize.}: uint64 slotSize* {.serialize.}: UInt256 duration* {.serialize.}: UInt256 proofProbability* {.serialize.}: UInt256 - reward* {.serialize.}: UInt256 - collateral* {.serialize.}: UInt256 + pricePerBytePerSecond* {.serialize.}: UInt256 + collateralPerByte* {.serialize.}: UInt256 maxSlotLoss* {.serialize.}: uint64 + StorageContent* = object cid* {.serialize.}: string merkleRoot*: array[32, byte] + Slot* = object request* {.serialize.}: StorageRequest slotIndex* {.serialize.}: UInt256 + SlotId* = distinct array[32, byte] RequestId* = distinct array[32, byte] Nonce* = distinct array[32, byte] @@ -41,6 +45,7 @@ type Cancelled Finished Failed + SlotState* {.pure.} = enum Free Filled @@ -80,27 +85,26 @@ proc toHex*[T: distinct](id: T): string = type baseType = T.distinctBase baseType(id).toHex -logutils.formatIt(LogFormat.textLines, Nonce): it.short0xHexLog -logutils.formatIt(LogFormat.textLines, RequestId): it.short0xHexLog -logutils.formatIt(LogFormat.textLines, SlotId): it.short0xHexLog -logutils.formatIt(LogFormat.json, Nonce): it.to0xHexLog -logutils.formatIt(LogFormat.json, RequestId): it.to0xHexLog -logutils.formatIt(LogFormat.json, SlotId): it.to0xHexLog +logutils.formatIt(LogFormat.textLines, Nonce): + it.short0xHexLog +logutils.formatIt(LogFormat.textLines, RequestId): + it.short0xHexLog +logutils.formatIt(LogFormat.textLines, SlotId): + it.short0xHexLog +logutils.formatIt(LogFormat.json, Nonce): + it.to0xHexLog +logutils.formatIt(LogFormat.json, RequestId): + it.to0xHexLog +logutils.formatIt(LogFormat.json, SlotId): + it.to0xHexLog func fromTuple(_: type StorageRequest, tupl: tuple): StorageRequest = StorageRequest( - client: tupl[0], - ask: tupl[1], - content: tupl[2], - expiry: tupl[3], - nonce: tupl[4] + client: tupl[0], ask: tupl[1], content: tupl[2], expiry: tupl[3], nonce: tupl[4] ) func fromTuple(_: type Slot, tupl: tuple): Slot = - Slot( - request: tupl[0], - slotIndex: tupl[1] - ) + Slot(request: tupl[0], slotIndex: tupl[1]) func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk = StorageAsk( @@ -108,16 +112,13 @@ func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk = slotSize: tupl[1], duration: tupl[2], proofProbability: tupl[3], - reward: tupl[4], - collateral: tupl[5], - maxSlotLoss: tupl[6] + pricePerBytePerSecond: tupl[4], + collateralPerByte: tupl[5], + maxSlotLoss: tupl[6], ) func fromTuple(_: type StorageContent, tupl: tuple): StorageContent = - StorageContent( - cid: tupl[0], - merkleRoot: tupl[1] - ) + StorageContent(cid: tupl[0], merkleRoot: tupl[1]) func solidityType*(_: type StorageContent): string = solidityType(StorageContent.fieldTypes) @@ -160,7 +161,7 @@ func decode*(decoder: var AbiDecoder, T: type Slot): ?!T = success Slot.fromTuple(tupl) func id*(request: StorageRequest): RequestId = - let encoding = AbiEncoder.encode((request, )) + let encoding = AbiEncoder.encode((request,)) RequestId(keccak256.digest(encoding).data) func slotId*(requestId: RequestId, slotIndex: UInt256): SlotId = @@ -173,14 +174,20 @@ func slotId*(request: StorageRequest, slotIndex: UInt256): SlotId = func id*(slot: Slot): SlotId = slotId(slot.request, slot.slotIndex) -func pricePerSlot*(ask: StorageAsk): UInt256 = - ask.duration * ask.reward +func pricePerSlotPerSecond*(ask: StorageAsk): UInt256 = + ask.pricePerBytePerSecond * ask.slotSize -func price*(ask: StorageAsk): UInt256 = +func pricePerSlot*(ask: StorageAsk): UInt256 = + ask.duration * ask.pricePerSlotPerSecond + +func totalPrice*(ask: StorageAsk): UInt256 = ask.slots.u256 * ask.pricePerSlot -func price*(request: StorageRequest): UInt256 = - request.ask.price +func totalPrice*(request: StorageRequest): UInt256 = + request.ask.totalPrice + +func collateralPerSlot*(ask: StorageAsk): UInt256 = + ask.collateralPerByte * ask.slotSize func size*(ask: StorageAsk): UInt256 = ask.slots.u256 * ask.slotSize diff --git a/codex/discovery.nim b/codex/discovery.nim index e3e37d61..9aa8c7d8 100644 --- a/codex/discovery.nim +++ b/codex/discovery.nim @@ -32,15 +32,15 @@ export discv5 logScope: topics = "codex discovery" -type - Discovery* = ref object of RootObj - protocol*: discv5.Protocol # dht protocol - key: PrivateKey # private key - peerId: PeerId # the peer id of the local node - announceAddrs*: seq[MultiAddress] # addresses announced as part of the provider records - providerRecord*: ?SignedPeerRecord # record to advertice node connection information, this carry any - # address that the node can be connected on - dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information +type Discovery* = ref object of RootObj + protocol*: discv5.Protocol # dht protocol + key: PrivateKey # private key + peerId: PeerId # the peer id of the local node + announceAddrs*: seq[MultiAddress] # addresses announced as part of the provider records + providerRecord*: ?SignedPeerRecord + # record to advertice node connection information, this carry any + # address that the node can be connected on + dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information proc toNodeId*(cid: Cid): NodeId = ## Cid to discovery id @@ -54,14 +54,11 @@ proc toNodeId*(host: ca.Address): NodeId = readUintBE[256](keccak256.digest(host.toArray).data) -proc findPeer*( - d: Discovery, - peerId: PeerId): Future[?PeerRecord] {.async.} = +proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} = trace "protocol.resolve..." ## Find peer using the given Discovery object ## - let - node = await d.protocol.resolve(toNodeId(peerId)) + let node = await d.protocol.resolve(toNodeId(peerId)) return if node.isSome(): @@ -69,37 +66,31 @@ proc findPeer*( else: PeerRecord.none -method find*( - d: Discovery, - cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} = +method find*(d: Discovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} = ## Find block providers ## - without providers =? - (await d.protocol.getProviders(cid.toNodeId())).mapFailure, error: + without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure, error: warn "Error finding providers for block", cid, error = error.msg - return providers.filterIt( not (it.data.peerId == d.peerId) ) + return providers.filterIt(not (it.data.peerId == d.peerId)) method provide*(d: Discovery, cid: Cid) {.async, base.} = ## Provide a block Cid ## - let - nodes = await d.protocol.addProvider( - cid.toNodeId(), d.providerRecord.get) + let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get) if nodes.len <= 0: warn "Couldn't provide to any nodes!" - method find*( - d: Discovery, - host: ca.Address): Future[seq[SignedPeerRecord]] {.async, base.} = + d: Discovery, host: ca.Address +): Future[seq[SignedPeerRecord]] {.async, base.} = ## Find host providers ## trace "Finding providers for host", host = $host - without var providers =? - (await d.protocol.getProviders(host.toNodeId())).mapFailure, error: + without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure, + error: trace "Error finding providers for host", host = $host, exc = error.msg return @@ -117,15 +108,11 @@ method provide*(d: Discovery, host: ca.Address) {.async, base.} = ## trace "Providing host", host = $host - let - nodes = await d.protocol.addProvider( - host.toNodeId(), d.providerRecord.get) + let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get) if nodes.len > 0: trace "Provided to nodes", nodes = nodes.len -method removeProvider*( - d: Discovery, - peerId: PeerId): Future[void] {.base, gcsafe.} = +method removeProvider*(d: Discovery, peerId: PeerId): Future[void] {.base, gcsafe.} = ## Remove provider from providers table ## @@ -139,26 +126,24 @@ proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) = d.announceAddrs = @addrs trace "Updating announce record", addrs = d.announceAddrs - d.providerRecord = SignedPeerRecord.init( - d.key, PeerRecord.init(d.peerId, d.announceAddrs)) - .expect("Should construct signed record").some + d.providerRecord = SignedPeerRecord + .init(d.key, PeerRecord.init(d.peerId, d.announceAddrs)) + .expect("Should construct signed record").some if not d.protocol.isNil: - d.protocol.updateRecord(d.providerRecord) - .expect("Should update SPR") + d.protocol.updateRecord(d.providerRecord).expect("Should update SPR") proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) = ## Update providers record ## trace "Updating Dht record", addrs = addrs - d.dhtRecord = SignedPeerRecord.init( - d.key, PeerRecord.init(d.peerId, @addrs)) - .expect("Should construct signed record").some + d.dhtRecord = SignedPeerRecord + .init(d.key, PeerRecord.init(d.peerId, @addrs)) + .expect("Should construct signed record").some if not d.protocol.isNil: - d.protocol.updateRecord(d.dhtRecord) - .expect("Should update SPR") + d.protocol.updateRecord(d.dhtRecord).expect("Should update SPR") proc start*(d: Discovery) {.async.} = d.protocol.open() @@ -174,15 +159,13 @@ proc new*( bindPort = 0.Port, announceAddrs: openArray[MultiAddress], bootstrapNodes: openArray[SignedPeerRecord] = [], - store: Datastore = SQLiteDatastore.new(Memory).expect("Should not fail!") + store: Datastore = SQLiteDatastore.new(Memory).expect("Should not fail!"), ): Discovery = ## Create a new Discovery node instance for the given key and datastore ## - var - self = Discovery( - key: key, - peerId: PeerId.init(key).expect("Should construct PeerId")) + var self = + Discovery(key: key, peerId: PeerId.init(key).expect("Should construct PeerId")) self.updateAnnounceRecord(announceAddrs) @@ -190,11 +173,8 @@ proc new*( # FIXME disable IP limits temporarily so we can run our workshop. Re-enable # and figure out proper solution. let discoveryConfig = DiscoveryConfig( - tableIpLimits: TableIpLimits( - tableIpLimit: high(uint), - bucketIpLimit:high(uint) - ), - bitsPerHop: DefaultBitsPerHop + tableIpLimits: TableIpLimits(tableIpLimit: high(uint), bucketIpLimit: high(uint)), + bitsPerHop: DefaultBitsPerHop, ) # -------------------------------------------------------------------------- @@ -206,6 +186,7 @@ proc new*( bootstrapRecords = bootstrapNodes, rng = Rng.instance(), providers = ProvidersManager.new(store), - config = discoveryConfig) + config = discoveryConfig, + ) self diff --git a/codex/erasure/asyncbackend.nim b/codex/erasure/asyncbackend.nim deleted file mode 100644 index 4827806a..00000000 --- a/codex/erasure/asyncbackend.nim +++ /dev/null @@ -1,225 +0,0 @@ -## Nim-Codex -## Copyright (c) 2024 Status Research & Development GmbH -## Licensed under either of -## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) -## * MIT license ([LICENSE-MIT](LICENSE-MIT)) -## at your option. -## This file may not be copied, modified, or distributed except according to -## those terms. - -import std/sequtils - -import pkg/taskpools -import pkg/taskpools/flowvars -import pkg/chronos -import pkg/chronos/threadsync -import pkg/questionable/results - -import ./backend -import ../errors -import ../logutils - -logScope: - topics = "codex asyncerasure" - -const - CompletitionTimeout = 1.seconds # Maximum await time for completition after receiving a signal - CompletitionRetryDelay = 10.millis - -type - EncoderBackendPtr = ptr EncoderBackend - DecoderBackendPtr = ptr DecoderBackend - - # Args objects are missing seq[seq[byte]] field, to avoid unnecessary data copy - EncodeTaskArgs = object - signal: ThreadSignalPtr - backend: EncoderBackendPtr - blockSize: int - ecM: int - - DecodeTaskArgs = object - signal: ThreadSignalPtr - backend: DecoderBackendPtr - blockSize: int - ecK: int - - SharedArrayHolder*[T] = object - data: ptr UncheckedArray[T] - size: int - - EncodeTaskResult = Result[SharedArrayHolder[byte], cstring] - DecodeTaskResult = Result[SharedArrayHolder[byte], cstring] - -proc encodeTask(args: EncodeTaskArgs, data: seq[seq[byte]]): EncodeTaskResult = - var - data = data.unsafeAddr - parity = newSeqWith[seq[byte]](args.ecM, newSeq[byte](args.blockSize)) - - try: - let res = args.backend[].encode(data[], parity) - - if res.isOk: - let - resDataSize = parity.len * args.blockSize - resData = cast[ptr UncheckedArray[byte]](allocShared0(resDataSize)) - arrHolder = SharedArrayHolder[byte]( - data: resData, - size: resDataSize - ) - - for i in 0.. - self.store.getBlock( - BlockAddress.init(manifest.treeCid, i) - ).map((r: ?!bt.Block) => (r, i)) # Get the data blocks (first K) + pendingBlocks = indicies.map( + (i: int) => + self.store.getBlock(BlockAddress.init(manifest.treeCid, i)).map( + (r: ?!bt.Block) => (r, i) + ) # Get the data blocks (first K) ) - proc isFinished(): bool = pendingBlocks.len == 0 + proc isFinished(): bool = + pendingBlocks.len == 0 proc genNext(): Future[(?!bt.Block, int)] {.async.} = let completedFut = await one(pendingBlocks) @@ -123,29 +124,31 @@ proc getPendingBlocks( let (_, index) = await completedFut raise newException( CatchableError, - "Future for block id not found, tree cid: " & $manifest.treeCid & ", index: " & $index) + "Future for block id not found, tree cid: " & $manifest.treeCid & ", index: " & + $index, + ) AsyncIter[(?!bt.Block, int)].new(genNext, isFinished) proc prepareEncodingData( - self: Erasure, - manifest: Manifest, - params: EncodingParams, - step: Natural, - data: ref seq[seq[byte]], - cids: ref seq[Cid], - emptyBlock: seq[byte]): Future[?!Natural] {.async.} = + self: Erasure, + manifest: Manifest, + params: EncodingParams, + step: Natural, + data: ref seq[seq[byte]], + cids: ref seq[Cid], + emptyBlock: seq[byte], +): Future[?!Natural] {.async.} = ## Prepare data for encoding ## let strategy = params.strategy.init( - firstIndex = 0, - lastIndex = params.rounded - 1, - iterations = params.steps + firstIndex = 0, lastIndex = params.rounded - 1, iterations = params.steps ) indicies = toSeq(strategy.getIndicies(step)) - pendingBlocksIter = self.getPendingBlocks(manifest, indicies.filterIt(it < manifest.blocksCount)) + pendingBlocksIter = + self.getPendingBlocks(manifest, indicies.filterIt(it < manifest.blocksCount)) var resolved = 0 for fut in pendingBlocksIter: @@ -164,20 +167,22 @@ proc prepareEncodingData( let pos = indexToPos(params.steps, idx, step) trace "Padding with empty block", idx shallowCopy(data[pos], emptyBlock) - without emptyBlockCid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec), err: + without emptyBlockCid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec), + err: return failure(err) cids[idx] = emptyBlockCid success(resolved.Natural) proc prepareDecodingData( - self: Erasure, - encoded: Manifest, - step: Natural, - data: ref seq[seq[byte]], - parityData: ref seq[seq[byte]], - cids: ref seq[Cid], - emptyBlock: seq[byte]): Future[?!(Natural, Natural)] {.async.} = + self: Erasure, + encoded: Manifest, + step: Natural, + data: ref seq[seq[byte]], + parityData: ref seq[seq[byte]], + cids: ref seq[Cid], + emptyBlock: seq[byte], +): Future[?!(Natural, Natural)] {.async.} = ## Prepare data for decoding ## `encoded` - the encoded manifest ## `step` - the current step @@ -189,9 +194,7 @@ proc prepareDecodingData( let strategy = encoded.protectedStrategy.init( - firstIndex = 0, - lastIndex = encoded.blocksCount - 1, - iterations = encoded.steps + firstIndex = 0, lastIndex = encoded.blocksCount - 1, iterations = encoded.steps ) indicies = toSeq(strategy.getIndicies(step)) pendingBlocksIter = self.getPendingBlocks(encoded, indicies) @@ -211,20 +214,21 @@ proc prepareDecodingData( trace "Failed retreiving a block", idx, treeCid = encoded.treeCid, msg = err.msg continue - let - pos = indexToPos(encoded.steps, idx, step) + let pos = indexToPos(encoded.steps, idx, step) logScope: - cid = blk.cid - idx = idx - pos = pos - step = step + cid = blk.cid + idx = idx + pos = pos + step = step empty = blk.isEmpty cids[idx] = blk.cid if idx >= encoded.rounded: trace "Retrieved parity block" - shallowCopy(parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data) + shallowCopy( + parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data + ) parityPieces.inc else: trace "Retrieved data block" @@ -236,17 +240,19 @@ proc prepareDecodingData( return success (dataPieces.Natural, parityPieces.Natural) proc init*( - _: type EncodingParams, - manifest: Manifest, - ecK: Natural, ecM: Natural, - strategy: StrategyType): ?!EncodingParams = + _: type EncodingParams, + manifest: Manifest, + ecK: Natural, + ecM: Natural, + strategy: StrategyType, +): ?!EncodingParams = if ecK > manifest.blocksCount: let exc = (ref InsufficientBlocksError)( - msg: "Unable to encode manifest, not enough blocks, ecK = " & - $ecK & - ", blocksCount = " & - $manifest.blocksCount, - minSize: ecK.NBytes * manifest.blockSize) + msg: + "Unable to encode manifest, not enough blocks, ecK = " & $ecK & + ", blocksCount = " & $manifest.blocksCount, + minSize: ecK.NBytes * manifest.blockSize, + ) return failure(exc) let @@ -260,25 +266,23 @@ proc init*( rounded: rounded, steps: steps, blocksCount: blocksCount, - strategy: strategy + strategy: strategy, ) proc encodeData( - self: Erasure, - manifest: Manifest, - params: EncodingParams - ): Future[?!Manifest] {.async.} = + self: Erasure, manifest: Manifest, params: EncodingParams +): Future[?!Manifest] {.async.} = ## Encode blocks pointed to by the protected manifest ## ## `manifest` - the manifest to encode ## logScope: - steps = params.steps - rounded_blocks = params.rounded - blocks_count = params.blocksCount - ecK = params.ecK - ecM = params.ecM + steps = params.steps + rounded_blocks = params.rounded + blocks_count = params.blocksCount + ecK = params.ecK + ecM = params.ecM var cids = seq[Cid].new() @@ -288,11 +292,12 @@ proc encodeData( cids[].setLen(params.blocksCount) try: - for step in 0.. i < tree.leavesCount) + let idxIter = + Iter[Natural].new(recoveredIndices).filter((i: Natural) => i < tree.leavesCount) if err =? (await self.store.putSomeProofs(tree, idxIter)).errorOption: - return failure(err) + return failure(err) let decoded = Manifest.new(encoded) @@ -479,14 +486,14 @@ proc stop*(self: Erasure) {.async.} = return proc new*( - T: type Erasure, - store: BlockStore, - encoderProvider: EncoderProvider, - decoderProvider: DecoderProvider): Erasure = + T: type Erasure, + store: BlockStore, + encoderProvider: EncoderProvider, + decoderProvider: DecoderProvider, +): Erasure = ## Create a new Erasure instance for encoding and decoding manifests ## Erasure( - store: store, - encoderProvider: encoderProvider, - decoderProvider: decoderProvider) + store: store, encoderProvider: encoderProvider, decoderProvider: decoderProvider + ) diff --git a/codex/errors.nim b/codex/errors.nim index d98bfc04..f7c2fa6b 100644 --- a/codex/errors.nim +++ b/codex/errors.nim @@ -20,13 +20,15 @@ type CodexResult*[T] = Result[T, ref CodexError] template mapFailure*[T, V, E]( - exp: Result[T, V], - exc: typedesc[E], + exp: Result[T, V], exc: typedesc[E] ): Result[T, ref CatchableError] = ## Convert `Result[T, E]` to `Result[E, ref CatchableError]` ## - exp.mapErr(proc (e: V): ref CatchableError = (ref exc)(msg: $e)) + exp.mapErr( + proc(e: V): ref CatchableError = + (ref exc)(msg: $e) + ) template mapFailure*[T, V](exp: Result[T, V]): Result[T, ref CatchableError] = mapFailure(exp, CodexError) diff --git a/codex/indexingstrategy.nim b/codex/indexingstrategy.nim index d8eeba58..063ecd98 100644 --- a/codex/indexingstrategy.nim +++ b/codex/indexingstrategy.nim @@ -10,7 +10,7 @@ type # 0 => 0, 1, 2 # 1 => 3, 4, 5 # 2 => 6, 7, 8 - LinearStrategy, + LinearStrategy # Stepped indexing: # 0 => 0, 3, 6 @@ -21,31 +21,32 @@ type # Representing a strategy for grouping indices (of blocks usually) # Given an interation-count as input, will produce a seq of # selected indices. - IndexingError* = object of CodexError IndexingWrongIndexError* = object of IndexingError IndexingWrongIterationsError* = object of IndexingError IndexingStrategy* = object strategyType*: StrategyType - firstIndex*: int # Lowest index that can be returned - lastIndex*: int # Highest index that can be returned - iterations*: int # getIndices(iteration) will run from 0 ..< iterations + firstIndex*: int # Lowest index that can be returned + lastIndex*: int # Highest index that can be returned + iterations*: int # getIndices(iteration) will run from 0 ..< iterations step*: int -func checkIteration(self: IndexingStrategy, iteration: int): void {.raises: [IndexingError].} = +func checkIteration( + self: IndexingStrategy, iteration: int +): void {.raises: [IndexingError].} = if iteration >= self.iterations: raise newException( - IndexingError, - "Indexing iteration can't be greater than or equal to iterations.") + IndexingError, "Indexing iteration can't be greater than or equal to iterations." + ) func getIter(first, last, step: int): Iter[int] = {.cast(noSideEffect).}: Iter[int].new(first, last, step) func getLinearIndicies( - self: IndexingStrategy, - iteration: int): Iter[int] {.raises: [IndexingError].} = + self: IndexingStrategy, iteration: int +): Iter[int] {.raises: [IndexingError].} = self.checkIteration(iteration) let @@ -55,8 +56,8 @@ func getLinearIndicies( getIter(first, last, 1) func getSteppedIndicies( - self: IndexingStrategy, - iteration: int): Iter[int] {.raises: [IndexingError].} = + self: IndexingStrategy, iteration: int +): Iter[int] {.raises: [IndexingError].} = self.checkIteration(iteration) let @@ -66,9 +67,8 @@ func getSteppedIndicies( getIter(first, last, self.iterations) func getIndicies*( - self: IndexingStrategy, - iteration: int): Iter[int] {.raises: [IndexingError].} = - + self: IndexingStrategy, iteration: int +): Iter[int] {.raises: [IndexingError].} = case self.strategyType of StrategyType.LinearStrategy: self.getLinearIndicies(iteration) @@ -76,22 +76,25 @@ func getIndicies*( self.getSteppedIndicies(iteration) func init*( - strategy: StrategyType, - firstIndex, lastIndex, iterations: int): IndexingStrategy {.raises: [IndexingError].} = - + strategy: StrategyType, firstIndex, lastIndex, iterations: int +): IndexingStrategy {.raises: [IndexingError].} = if firstIndex > lastIndex: raise newException( IndexingWrongIndexError, - "firstIndex (" & $firstIndex & ") can't be greater than lastIndex (" & $lastIndex & ")") + "firstIndex (" & $firstIndex & ") can't be greater than lastIndex (" & $lastIndex & + ")", + ) if iterations <= 0: raise newException( IndexingWrongIterationsError, - "iterations (" & $iterations & ") must be greater than zero.") + "iterations (" & $iterations & ") must be greater than zero.", + ) IndexingStrategy( strategyType: strategy, firstIndex: firstIndex, lastIndex: lastIndex, iterations: iterations, - step: divUp((lastIndex - firstIndex + 1), iterations)) + step: divUp((lastIndex - firstIndex + 1), iterations), + ) diff --git a/codex/logutils.nim b/codex/logutils.nim index e24b52d2..b37f6952 100644 --- a/codex/logutils.nim +++ b/codex/logutils.nim @@ -123,8 +123,9 @@ func shortLog*(long: string, ellipses = "*", start = 3, stop = 6): string = short func shortHexLog*(long: string): string = - if long[0..1] == "0x": result &= "0x" - result &= long[2..long.high].shortLog("..", 4, 4) + if long[0 .. 1] == "0x": + result &= "0x" + result &= long[2 .. long.high].shortLog("..", 4, 4) func short0xHexLog*[N: static[int], T: array[N, byte]](v: T): string = v.to0xHex.shortHexLog @@ -182,12 +183,16 @@ template formatIt*(format: LogFormat, T: typedesc, body: untyped) = let v = opts.map(opt => opt.formatJsonOption) setProperty(r, key, json.`%`(v)) - proc setProperty*(r: var JsonRecord, key: string, val: seq[T]) {.raises:[ValueError, IOError].} = + proc setProperty*( + r: var JsonRecord, key: string, val: seq[T] + ) {.raises: [ValueError, IOError].} = var it {.inject, used.}: T let v = val.map(it => body) setProperty(r, key, json.`%`(v)) - proc setProperty*(r: var JsonRecord, key: string, val: T) {.raises:[ValueError, IOError].} = + proc setProperty*( + r: var JsonRecord, key: string, val: T + ) {.raises: [ValueError, IOError].} = var it {.inject, used.}: T = val let v = body setProperty(r, key, json.`%`(v)) @@ -218,23 +223,35 @@ template formatIt*(format: LogFormat, T: typedesc, body: untyped) = let v = opts.map(opt => opt.formatTextLineOption) setProperty(r, key, v.formatTextLineSeq) - proc setProperty*(r: var TextLineRecord, key: string, val: seq[T]) {.raises:[ValueError, IOError].} = + proc setProperty*( + r: var TextLineRecord, key: string, val: seq[T] + ) {.raises: [ValueError, IOError].} = var it {.inject, used.}: T let v = val.map(it => body) setProperty(r, key, v.formatTextLineSeq) - proc setProperty*(r: var TextLineRecord, key: string, val: T) {.raises:[ValueError, IOError].} = + proc setProperty*( + r: var TextLineRecord, key: string, val: T + ) {.raises: [ValueError, IOError].} = var it {.inject, used.}: T = val let v = body setProperty(r, key, v) template formatIt*(T: type, body: untyped) {.dirty.} = - formatIt(LogFormat.textLines, T): body - formatIt(LogFormat.json, T): body + formatIt(LogFormat.textLines, T): + body + formatIt(LogFormat.json, T): + body -formatIt(LogFormat.textLines, Cid): shortLog($it) -formatIt(LogFormat.json, Cid): $it -formatIt(UInt256): $it -formatIt(MultiAddress): $it -formatIt(LogFormat.textLines, array[32, byte]): it.short0xHexLog -formatIt(LogFormat.json, array[32, byte]): it.to0xHex +formatIt(LogFormat.textLines, Cid): + shortLog($it) +formatIt(LogFormat.json, Cid): + $it +formatIt(UInt256): + $it +formatIt(MultiAddress): + $it +formatIt(LogFormat.textLines, array[32, byte]): + it.short0xHexLog +formatIt(LogFormat.json, array[32, byte]): + it.to0xHex diff --git a/codex/manifest/coders.nim b/codex/manifest/coders.nim index 4eed4299..0c461e45 100644 --- a/codex/manifest/coders.nim +++ b/codex/manifest/coders.nim @@ -10,9 +10,10 @@ # This module implements serialization and deserialization of Manifest import pkg/upraises -import times +import times -push: {.upraises: [].} +push: + {.upraises: [].} import std/tables import std/sequtils @@ -33,7 +34,7 @@ proc encode*(manifest: Manifest): ?!seq[byte] = ## multicodec container (Dag-pb) for now ## - ? manifest.verify() + ?manifest.verify() var pbNode = initProtoBuffer() # NOTE: The `Data` field in the the `dag-pb` @@ -98,7 +99,7 @@ proc encode*(manifest: Manifest): ?!seq[byte] = if manifest.filename.isSome: header.write(8, manifest.filename.get()) - if manifest.mimetype.isSome: + if manifest.mimetype.isSome: header.write(9, manifest.mimetype.get()) if manifest.uploadedAt.isSome: @@ -206,15 +207,14 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest = if pbVerificationInfo.getField(4, verifiableStrategy).isErr: return failure("Unable to decode `verifiableStrategy` from manifest!") - let - treeCid = ? Cid.init(treeCidBuf).mapFailure + let treeCid = ?Cid.init(treeCidBuf).mapFailure var filenameOption = if filename.len == 0: string.none else: filename.some var mimetypeOption = if mimetype.len == 0: string.none else: mimetype.some var uploadedAtOption = if uploadedAt == 0: int64.none else: uploadedAt.int64.some - let - self = if protected: + let self = + if protected: Manifest.new( treeCid = treeCid, datasetSize = datasetSize.NBytes, @@ -224,37 +224,39 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest = codec = codec.MultiCodec, ecK = ecK.int, ecM = ecM.int, - originalTreeCid = ? Cid.init(originalTreeCid).mapFailure, + originalTreeCid = ?Cid.init(originalTreeCid).mapFailure, originalDatasetSize = originalDatasetSize.NBytes, strategy = StrategyType(protectedStrategy), filename = filenameOption, mimetype = mimetypeOption, - uploadedAt = uploadedAtOption) - else: - Manifest.new( - treeCid = treeCid, - datasetSize = datasetSize.NBytes, - blockSize = blockSize.NBytes, - version = CidVersion(version), - hcodec = hcodec.MultiCodec, - codec = codec.MultiCodec, - filename = filenameOption, - mimetype = mimetypeOption, - uploadedAt = uploadedAtOption) + uploadedAt = uploadedAtOption, + ) + else: + Manifest.new( + treeCid = treeCid, + datasetSize = datasetSize.NBytes, + blockSize = blockSize.NBytes, + version = CidVersion(version), + hcodec = hcodec.MultiCodec, + codec = codec.MultiCodec, + filename = filenameOption, + mimetype = mimetypeOption, + uploadedAt = uploadedAtOption, + ) - ? self.verify() + ?self.verify() if verifiable: let - verifyRootCid = ? Cid.init(verifyRoot).mapFailure - slotRootCids = slotRoots.mapIt(? Cid.init(it).mapFailure) + verifyRootCid = ?Cid.init(verifyRoot).mapFailure + slotRootCids = slotRoots.mapIt(?Cid.init(it).mapFailure) return Manifest.new( manifest = self, verifyRoot = verifyRootCid, slotRoots = slotRootCids, cellSize = cellSize.NBytes, - strategy = StrategyType(verifiableStrategy) + strategy = StrategyType(verifiableStrategy), ) self.success @@ -263,7 +265,7 @@ func decode*(_: type Manifest, blk: Block): ?!Manifest = ## Decode a manifest using `decoder` ## - if not ? blk.cid.isManifest: + if not ?blk.cid.isManifest: return failure "Cid not a manifest codec" Manifest.decode(blk.data) diff --git a/codex/manifest/manifest.nim b/codex/manifest/manifest.nim index 73644dd2..6e0d1b80 100644 --- a/codex/manifest/manifest.nim +++ b/codex/manifest/manifest.nim @@ -11,7 +11,8 @@ import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import pkg/libp2p/protobuf/minprotobuf import pkg/libp2p/[cid, multihash, multicodec] @@ -25,37 +26,37 @@ import ../blocktype import ../indexingstrategy import ../logutils - # TODO: Manifest should be reworked to more concrete types, # perhaps using inheritance -type - Manifest* = ref object of RootObj - treeCid {.serialize.}: Cid # Root of the merkle tree - datasetSize {.serialize.}: NBytes # Total size of all blocks - blockSize {.serialize.}: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed) - codec: MultiCodec # Dataset codec - hcodec: MultiCodec # Multihash codec - version: CidVersion # Cid version - filename {.serialize.}: ?string # The filename of the content uploaded (optional) - mimetype {.serialize.}: ?string # The mimetype of the content uploaded (optional) - uploadedAt {.serialize.}: ?int64 # The UTC creation timestamp in seconds - case protected {.serialize.}: bool # Protected datasets have erasure coded info +type Manifest* = ref object of RootObj + treeCid {.serialize.}: Cid # Root of the merkle tree + datasetSize {.serialize.}: NBytes # Total size of all blocks + blockSize {.serialize.}: NBytes + # Size of each contained block (might not be needed if blocks are len-prefixed) + codec: MultiCodec # Dataset codec + hcodec: MultiCodec # Multihash codec + version: CidVersion # Cid version + filename {.serialize.}: ?string # The filename of the content uploaded (optional) + mimetype {.serialize.}: ?string # The mimetype of the content uploaded (optional) + uploadedAt {.serialize.}: ?int64 # The UTC creation timestamp in seconds + case protected {.serialize.}: bool # Protected datasets have erasure coded info + of true: + ecK: int # Number of blocks to encode + ecM: int # Number of resulting parity blocks + originalTreeCid: Cid # The original root of the dataset being erasure coded + originalDatasetSize: NBytes + protectedStrategy: StrategyType # Indexing strategy used to build the slot roots + case verifiable {.serialize.}: bool + # Verifiable datasets can be used to generate storage proofs of true: - ecK: int # Number of blocks to encode - ecM: int # Number of resulting parity blocks - originalTreeCid: Cid # The original root of the dataset being erasure coded - originalDatasetSize: NBytes - protectedStrategy: StrategyType # Indexing strategy used to build the slot roots - case verifiable {.serialize.}: bool # Verifiable datasets can be used to generate storage proofs - of true: - verifyRoot: Cid # Root of the top level merkle tree built from slot roots - slotRoots: seq[Cid] # Individual slot root built from the original dataset blocks - cellSize: NBytes # Size of each slot cell - verifiableStrategy: StrategyType # Indexing strategy used to build the slot roots - else: - discard + verifyRoot: Cid # Root of the top level merkle tree built from slot roots + slotRoots: seq[Cid] # Individual slot root built from the original dataset blocks + cellSize: NBytes # Size of each slot cell + verifiableStrategy: StrategyType # Indexing strategy used to build the slot roots else: discard + else: + discard ############################################################ # Accessors @@ -137,7 +138,7 @@ func uploadedAt*(self: Manifest): ?int64 = ############################################################ func isManifest*(cid: Cid): ?!bool = - success (ManifestCodec == ? cid.contentType().mapFailure(CodexError)) + success (ManifestCodec == ?cid.contentType().mapFailure(CodexError)) func isManifest*(mc: MultiCodec): ?!bool = success mc == ManifestCodec @@ -159,7 +160,8 @@ func verify*(self: Manifest): ?!void = ## if self.protected and (self.blocksCount != self.steps * (self.ecK + self.ecM)): - return failure newException(CodexError, "Broken manifest: wrong originalBlocksCount") + return + failure newException(CodexError, "Broken manifest: wrong originalBlocksCount") return success() @@ -167,41 +169,32 @@ func cid*(self: Manifest): ?!Cid {.deprecated: "use treeCid instead".} = self.treeCid.success func `==`*(a, b: Manifest): bool = - (a.treeCid == b.treeCid) and - (a.datasetSize == b.datasetSize) and - (a.blockSize == b.blockSize) and - (a.version == b.version) and - (a.hcodec == b.hcodec) and - (a.codec == b.codec) and - (a.protected == b.protected) and - (a.filename == b.filename) and - (a.mimetype == b.mimetype) and - (a.uploadedAt == b.uploadedAt) and - (if a.protected: - (a.ecK == b.ecK) and - (a.ecM == b.ecM) and - (a.originalTreeCid == b.originalTreeCid) and - (a.originalDatasetSize == b.originalDatasetSize) and - (a.protectedStrategy == b.protectedStrategy) and - (a.verifiable == b.verifiable) and - (if a.verifiable: - (a.verifyRoot == b.verifyRoot) and - (a.slotRoots == b.slotRoots) and - (a.cellSize == b.cellSize) and - (a.verifiableStrategy == b.verifiableStrategy) + (a.treeCid == b.treeCid) and (a.datasetSize == b.datasetSize) and + (a.blockSize == b.blockSize) and (a.version == b.version) and (a.hcodec == b.hcodec) and + (a.codec == b.codec) and (a.protected == b.protected) and (a.filename == b.filename) and + (a.mimetype == b.mimetype) and (a.uploadedAt == b.uploadedAt) and ( + if a.protected: + (a.ecK == b.ecK) and (a.ecM == b.ecM) and (a.originalTreeCid == b.originalTreeCid) and + (a.originalDatasetSize == b.originalDatasetSize) and + (a.protectedStrategy == b.protectedStrategy) and (a.verifiable == b.verifiable) and + ( + if a.verifiable: + (a.verifyRoot == b.verifyRoot) and (a.slotRoots == b.slotRoots) and + (a.cellSize == b.cellSize) and ( + a.verifiableStrategy == b.verifiableStrategy + ) else: - true) + true + ) else: - true) + true + ) func `$`*(self: Manifest): string = - result = "treeCid: " & $self.treeCid & - ", datasetSize: " & $self.datasetSize & - ", blockSize: " & $self.blockSize & - ", version: " & $self.version & - ", hcodec: " & $self.hcodec & - ", codec: " & $self.codec & - ", protected: " & $self.protected + result = + "treeCid: " & $self.treeCid & ", datasetSize: " & $self.datasetSize & ", blockSize: " & + $self.blockSize & ", version: " & $self.version & ", hcodec: " & $self.hcodec & + ", codec: " & $self.codec & ", protected: " & $self.protected if self.filename.isSome: result &= ", filename: " & $self.filename @@ -212,19 +205,19 @@ func `$`*(self: Manifest): string = if self.uploadedAt.isSome: result &= ", uploadedAt: " & $self.uploadedAt - result &= (if self.protected: - ", ecK: " & $self.ecK & - ", ecM: " & $self.ecM & - ", originalTreeCid: " & $self.originalTreeCid & - ", originalDatasetSize: " & $self.originalDatasetSize & - ", verifiable: " & $self.verifiable & - (if self.verifiable: - ", verifyRoot: " & $self.verifyRoot & - ", slotRoots: " & $self.slotRoots + result &= ( + if self.protected: + ", ecK: " & $self.ecK & ", ecM: " & $self.ecM & ", originalTreeCid: " & + $self.originalTreeCid & ", originalDatasetSize: " & $self.originalDatasetSize & + ", verifiable: " & $self.verifiable & ( + if self.verifiable: + ", verifyRoot: " & $self.verifyRoot & ", slotRoots: " & $self.slotRoots + else: + "" + ) else: - "") - else: - "") + "" + ) return result @@ -233,18 +226,18 @@ func `$`*(self: Manifest): string = ############################################################ func new*( - T: type Manifest, - treeCid: Cid, - blockSize: NBytes, - datasetSize: NBytes, - version: CidVersion = CIDv1, - hcodec = Sha256HashCodec, - codec = BlockCodec, - protected = false, - filename: ?string = string.none, - mimetype: ?string = string.none, - uploadedAt: ?int64 = int64.none): Manifest = - + T: type Manifest, + treeCid: Cid, + blockSize: NBytes, + datasetSize: NBytes, + version: CidVersion = CIDv1, + hcodec = Sha256HashCodec, + codec = BlockCodec, + protected = false, + filename: ?string = string.none, + mimetype: ?string = string.none, + uploadedAt: ?int64 = int64.none, +): Manifest = T( treeCid: treeCid, blockSize: blockSize, @@ -255,15 +248,17 @@ func new*( protected: protected, filename: filename, mimetype: mimetype, - uploadedAt: uploadedAt) + uploadedAt: uploadedAt, + ) func new*( - T: type Manifest, - manifest: Manifest, - treeCid: Cid, - datasetSize: NBytes, - ecK, ecM: int, - strategy = SteppedStrategy): Manifest = + T: type Manifest, + manifest: Manifest, + treeCid: Cid, + datasetSize: NBytes, + ecK, ecM: int, + strategy = SteppedStrategy, +): Manifest = ## Create an erasure protected dataset from an ## unprotected one ## @@ -276,18 +271,17 @@ func new*( hcodec: manifest.hcodec, blockSize: manifest.blockSize, protected: true, - ecK: ecK, ecM: ecM, + ecK: ecK, + ecM: ecM, originalTreeCid: manifest.treeCid, originalDatasetSize: manifest.datasetSize, protectedStrategy: strategy, filename: manifest.filename, mimetype: manifest.mimetype, - uploadedAt: manifest.uploadedAt - ) + uploadedAt: manifest.uploadedAt, + ) -func new*( - T: type Manifest, - manifest: Manifest): Manifest = +func new*(T: type Manifest, manifest: Manifest): Manifest = ## Create an unprotected dataset from an ## erasure protected one ## @@ -302,25 +296,26 @@ func new*( protected: false, filename: manifest.filename, mimetype: manifest.mimetype, - uploadedAt: manifest.uploadedAt) + uploadedAt: manifest.uploadedAt, + ) func new*( - T: type Manifest, - treeCid: Cid, - datasetSize: NBytes, - blockSize: NBytes, - version: CidVersion, - hcodec: MultiCodec, - codec: MultiCodec, - ecK: int, - ecM: int, - originalTreeCid: Cid, - originalDatasetSize: NBytes, - strategy = SteppedStrategy, - filename: ?string = string.none, - mimetype: ?string = string.none, - uploadedAt: ?int64 = int64.none): Manifest = - + T: type Manifest, + treeCid: Cid, + datasetSize: NBytes, + blockSize: NBytes, + version: CidVersion, + hcodec: MultiCodec, + codec: MultiCodec, + ecK: int, + ecM: int, + originalTreeCid: Cid, + originalDatasetSize: NBytes, + strategy = SteppedStrategy, + filename: ?string = string.none, + mimetype: ?string = string.none, + uploadedAt: ?int64 = int64.none, +): Manifest = Manifest( treeCid: treeCid, datasetSize: datasetSize, @@ -334,28 +329,30 @@ func new*( originalTreeCid: originalTreeCid, originalDatasetSize: originalDatasetSize, protectedStrategy: strategy, - filename: filename, + filename: filename, mimetype: mimetype, - uploadedAt: uploadedAt) + uploadedAt: uploadedAt, + ) func new*( - T: type Manifest, - manifest: Manifest, - verifyRoot: Cid, - slotRoots: openArray[Cid], - cellSize = DefaultCellSize, - strategy = LinearStrategy): ?!Manifest = + T: type Manifest, + manifest: Manifest, + verifyRoot: Cid, + slotRoots: openArray[Cid], + cellSize = DefaultCellSize, + strategy = LinearStrategy, +): ?!Manifest = ## Create a verifiable dataset from an ## protected one ## if not manifest.protected: return failure newException( - CodexError, "Can create verifiable manifest only from protected manifest.") + CodexError, "Can create verifiable manifest only from protected manifest." + ) if slotRoots.len != manifest.numSlots: - return failure newException( - CodexError, "Wrong number of slot roots.") + return failure newException(CodexError, "Wrong number of slot roots.") success Manifest( treeCid: manifest.treeCid, @@ -377,12 +374,10 @@ func new*( verifiableStrategy: strategy, filename: manifest.filename, mimetype: manifest.mimetype, - uploadedAt: manifest.uploadedAt - ) + uploadedAt: manifest.uploadedAt, + ) -func new*( - T: type Manifest, - data: openArray[byte]): ?!Manifest = +func new*(T: type Manifest, data: openArray[byte]): ?!Manifest = ## Create a manifest instance from given data ## diff --git a/codex/market.nim b/codex/market.nim index 38df9669..bc325cd9 100644 --- a/codex/market.nim +++ b/codex/market.nim @@ -19,16 +19,17 @@ type Market* = ref object of RootObj MarketError* = object of CodexError Subscription* = ref object of RootObj - OnRequest* = proc(id: RequestId, - ask: StorageAsk, - expiry: UInt256) {.gcsafe, upraises:[].} + OnRequest* = + proc(id: RequestId, ask: StorageAsk, expiry: UInt256) {.gcsafe, upraises: [].} OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].} - OnSlotFilled* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises:[].} + OnSlotFilled* = + proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].} OnSlotFreed* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].} - OnSlotReservationsFull* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].} - OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises:[].} - OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises:[].} - OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises:[].} + OnSlotReservationsFull* = + proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].} + OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises: [].} + OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises: [].} + OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises: [].} ProofChallenge* = array[32, byte] # Marketplace events -- located here due to the Market abstraction @@ -37,21 +38,28 @@ type requestId*: RequestId ask*: StorageAsk expiry*: UInt256 + SlotFilled* = object of MarketplaceEvent requestId* {.indexed.}: RequestId slotIndex*: UInt256 + SlotFreed* = object of MarketplaceEvent requestId* {.indexed.}: RequestId slotIndex*: UInt256 + SlotReservationsFull* = object of MarketplaceEvent requestId* {.indexed.}: RequestId slotIndex*: UInt256 + RequestFulfilled* = object of MarketplaceEvent requestId* {.indexed.}: RequestId + RequestCancelled* = object of MarketplaceEvent requestId* {.indexed.}: RequestId + RequestFailed* = object of MarketplaceEvent requestId* {.indexed.}: RequestId + ProofSubmitted* = object of MarketplaceEvent id*: SlotId @@ -81,8 +89,7 @@ proc inDowntime*(market: Market, slotId: SlotId): Future[bool] {.async.} = let pntr = await market.getPointer(slotId) return pntr < downtime -method requestStorage*(market: Market, - request: StorageRequest) {.base, async.} = +method requestStorage*(market: Market, request: StorageRequest) {.base, async.} = raiseAssert("not implemented") method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} = @@ -91,182 +98,173 @@ method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} = method mySlots*(market: Market): Future[seq[SlotId]] {.base, async.} = raiseAssert("not implemented") -method getRequest*(market: Market, - id: RequestId): - Future[?StorageRequest] {.base, async.} = +method getRequest*( + market: Market, id: RequestId +): Future[?StorageRequest] {.base, async.} = raiseAssert("not implemented") -method requestState*(market: Market, - requestId: RequestId): Future[?RequestState] {.base, async.} = +method requestState*( + market: Market, requestId: RequestId +): Future[?RequestState] {.base, async.} = raiseAssert("not implemented") -method slotState*(market: Market, - slotId: SlotId): Future[SlotState] {.base, async.} = +method slotState*(market: Market, slotId: SlotId): Future[SlotState] {.base, async.} = raiseAssert("not implemented") -method getRequestEnd*(market: Market, - id: RequestId): Future[SecondsSince1970] {.base, async.} = +method getRequestEnd*( + market: Market, id: RequestId +): Future[SecondsSince1970] {.base, async.} = raiseAssert("not implemented") -method requestExpiresAt*(market: Market, - id: RequestId): Future[SecondsSince1970] {.base, async.} = +method requestExpiresAt*( + market: Market, id: RequestId +): Future[SecondsSince1970] {.base, async.} = raiseAssert("not implemented") -method getHost*(market: Market, - requestId: RequestId, - slotIndex: UInt256): Future[?Address] {.base, async.} = +method getHost*( + market: Market, requestId: RequestId, slotIndex: UInt256 +): Future[?Address] {.base, async.} = raiseAssert("not implemented") -method getActiveSlot*( - market: Market, - slotId: SlotId): Future[?Slot] {.base, async.} = - +method currentCollateral*( + market: Market, slotId: SlotId +): Future[UInt256] {.base, async.} = raiseAssert("not implemented") -method fillSlot*(market: Market, - requestId: RequestId, - slotIndex: UInt256, - proof: Groth16Proof, - collateral: UInt256) {.base, async.} = +method getActiveSlot*(market: Market, slotId: SlotId): Future[?Slot] {.base, async.} = + raiseAssert("not implemented") + +method fillSlot*( + market: Market, + requestId: RequestId, + slotIndex: UInt256, + proof: Groth16Proof, + collateral: UInt256, +) {.base, async.} = raiseAssert("not implemented") method freeSlot*(market: Market, slotId: SlotId) {.base, async.} = raiseAssert("not implemented") -method withdrawFunds*(market: Market, - requestId: RequestId) {.base, async.} = +method withdrawFunds*(market: Market, requestId: RequestId) {.base, async.} = raiseAssert("not implemented") -method subscribeRequests*(market: Market, - callback: OnRequest): - Future[Subscription] {.base, async.} = +method subscribeRequests*( + market: Market, callback: OnRequest +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method isProofRequired*(market: Market, - id: SlotId): Future[bool] {.base, async.} = +method isProofRequired*(market: Market, id: SlotId): Future[bool] {.base, async.} = raiseAssert("not implemented") -method willProofBeRequired*(market: Market, - id: SlotId): Future[bool] {.base, async.} = +method willProofBeRequired*(market: Market, id: SlotId): Future[bool] {.base, async.} = raiseAssert("not implemented") -method getChallenge*(market: Market, id: SlotId): Future[ProofChallenge] {.base, async.} = +method getChallenge*( + market: Market, id: SlotId +): Future[ProofChallenge] {.base, async.} = raiseAssert("not implemented") -method submitProof*(market: Market, - id: SlotId, - proof: Groth16Proof) {.base, async.} = +method submitProof*(market: Market, id: SlotId, proof: Groth16Proof) {.base, async.} = raiseAssert("not implemented") -method markProofAsMissing*(market: Market, - id: SlotId, - period: Period) {.base, async.} = +method markProofAsMissing*(market: Market, id: SlotId, period: Period) {.base, async.} = raiseAssert("not implemented") -method canProofBeMarkedAsMissing*(market: Market, - id: SlotId, - period: Period): Future[bool] {.base, async.} = +method canProofBeMarkedAsMissing*( + market: Market, id: SlotId, period: Period +): Future[bool] {.base, async.} = raiseAssert("not implemented") method reserveSlot*( - market: Market, - requestId: RequestId, - slotIndex: UInt256) {.base, async.} = - + market: Market, requestId: RequestId, slotIndex: UInt256 +) {.base, async.} = raiseAssert("not implemented") method canReserveSlot*( - market: Market, - requestId: RequestId, - slotIndex: UInt256): Future[bool] {.base, async.} = - + market: Market, requestId: RequestId, slotIndex: UInt256 +): Future[bool] {.base, async.} = raiseAssert("not implemented") -method subscribeFulfillment*(market: Market, - callback: OnFulfillment): - Future[Subscription] {.base, async.} = +method subscribeFulfillment*( + market: Market, callback: OnFulfillment +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method subscribeFulfillment*(market: Market, - requestId: RequestId, - callback: OnFulfillment): - Future[Subscription] {.base, async.} = +method subscribeFulfillment*( + market: Market, requestId: RequestId, callback: OnFulfillment +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method subscribeSlotFilled*(market: Market, - callback: OnSlotFilled): - Future[Subscription] {.base, async.} = +method subscribeSlotFilled*( + market: Market, callback: OnSlotFilled +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method subscribeSlotFilled*(market: Market, - requestId: RequestId, - slotIndex: UInt256, - callback: OnSlotFilled): - Future[Subscription] {.base, async.} = +method subscribeSlotFilled*( + market: Market, requestId: RequestId, slotIndex: UInt256, callback: OnSlotFilled +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method subscribeSlotFreed*(market: Market, - callback: OnSlotFreed): - Future[Subscription] {.base, async.} = +method subscribeSlotFreed*( + market: Market, callback: OnSlotFreed +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") method subscribeSlotReservationsFull*( - market: Market, - callback: OnSlotReservationsFull): Future[Subscription] {.base, async.} = - + market: Market, callback: OnSlotReservationsFull +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method subscribeRequestCancelled*(market: Market, - callback: OnRequestCancelled): - Future[Subscription] {.base, async.} = +method subscribeRequestCancelled*( + market: Market, callback: OnRequestCancelled +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method subscribeRequestCancelled*(market: Market, - requestId: RequestId, - callback: OnRequestCancelled): - Future[Subscription] {.base, async.} = +method subscribeRequestCancelled*( + market: Market, requestId: RequestId, callback: OnRequestCancelled +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method subscribeRequestFailed*(market: Market, - callback: OnRequestFailed): - Future[Subscription] {.base, async.} = +method subscribeRequestFailed*( + market: Market, callback: OnRequestFailed +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method subscribeRequestFailed*(market: Market, - requestId: RequestId, - callback: OnRequestFailed): - Future[Subscription] {.base, async.} = +method subscribeRequestFailed*( + market: Market, requestId: RequestId, callback: OnRequestFailed +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method subscribeProofSubmission*(market: Market, - callback: OnProofSubmitted): - Future[Subscription] {.base, async.} = +method subscribeProofSubmission*( + market: Market, callback: OnProofSubmitted +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method unsubscribe*(subscription: Subscription) {.base, async, upraises:[].} = +method unsubscribe*(subscription: Subscription) {.base, async, upraises: [].} = raiseAssert("not implemented") method queryPastSlotFilledEvents*( - market: Market, - fromBlock: BlockTag): Future[seq[SlotFilled]] {.base, async.} = + market: Market, fromBlock: BlockTag +): Future[seq[SlotFilled]] {.base, async.} = raiseAssert("not implemented") method queryPastSlotFilledEvents*( - market: Market, - blocksAgo: int): Future[seq[SlotFilled]] {.base, async.} = + market: Market, blocksAgo: int +): Future[seq[SlotFilled]] {.base, async.} = raiseAssert("not implemented") method queryPastSlotFilledEvents*( - market: Market, - fromTime: SecondsSince1970): Future[seq[SlotFilled]] {.base, async.} = + market: Market, fromTime: SecondsSince1970 +): Future[seq[SlotFilled]] {.base, async.} = raiseAssert("not implemented") method queryPastStorageRequestedEvents*( - market: Market, - fromBlock: BlockTag): Future[seq[StorageRequested]] {.base, async.} = + market: Market, fromBlock: BlockTag +): Future[seq[StorageRequested]] {.base, async.} = raiseAssert("not implemented") method queryPastStorageRequestedEvents*( - market: Market, - blocksAgo: int): Future[seq[StorageRequested]] {.base, async.} = + market: Market, blocksAgo: int +): Future[seq[StorageRequested]] {.base, async.} = raiseAssert("not implemented") diff --git a/codex/merkletree/codex/coders.nim b/codex/merkletree/codex/coders.nim index a2d5a24b..b8209991 100644 --- a/codex/merkletree/codex/coders.nim +++ b/codex/merkletree/codex/coders.nim @@ -9,7 +9,8 @@ import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import pkg/libp2p import pkg/questionable @@ -42,8 +43,8 @@ proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree = var pb = initProtoBuffer(data, maxSize = MaxMerkleTreeSize) var mcodecCode: uint64 var leavesCount: uint64 - discard ? pb.getField(1, mcodecCode).mapFailure - discard ? pb.getField(2, leavesCount).mapFailure + discard ?pb.getField(1, mcodecCode).mapFailure + discard ?pb.getField(2, leavesCount).mapFailure let mcodec = MultiCodec.codec(mcodecCode.int) if mcodec == InvalidMultiCodec: @@ -53,10 +54,10 @@ proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree = nodesBuff: seq[seq[byte]] nodes: seq[ByteHash] - if ? pb.getRepeatedField(3, nodesBuff).mapFailure: + if ?pb.getRepeatedField(3, nodesBuff).mapFailure: for nodeBuff in nodesBuff: var node: ByteHash - discard ? initProtoBuffer(nodeBuff).getField(1, node).mapFailure + discard ?initProtoBuffer(nodeBuff).getField(1, node).mapFailure nodes.add node CodexTree.fromNodes(mcodec, nodes, leavesCount.int) @@ -81,32 +82,29 @@ proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof = var mcodecCode: uint64 var index: uint64 var nleaves: uint64 - discard ? pb.getField(1, mcodecCode).mapFailure + discard ?pb.getField(1, mcodecCode).mapFailure let mcodec = MultiCodec.codec(mcodecCode.int) if mcodec == InvalidMultiCodec: return failure("Invalid MultiCodec code " & $mcodecCode) - discard ? pb.getField(2, index).mapFailure - discard ? pb.getField(3, nleaves).mapFailure + discard ?pb.getField(2, index).mapFailure + discard ?pb.getField(3, nleaves).mapFailure var nodesBuff: seq[seq[byte]] nodes: seq[ByteHash] - if ? pb.getRepeatedField(4, nodesBuff).mapFailure: + if ?pb.getRepeatedField(4, nodesBuff).mapFailure: for nodeBuff in nodesBuff: var node: ByteHash let nodePb = initProtoBuffer(nodeBuff) - discard ? nodePb.getField(1, node).mapFailure + discard ?nodePb.getField(1, node).mapFailure nodes.add node CodexProof.init(mcodec, index.int, nleaves.int, nodes) -proc fromJson*( - _: type CodexProof, - json: JsonNode -): ?!CodexProof = +proc fromJson*(_: type CodexProof, json: JsonNode): ?!CodexProof = expectJsonKind(Cid, JString, json) var bytes: seq[byte] try: @@ -116,4 +114,5 @@ proc fromJson*( CodexProof.decode(bytes) -func `%`*(proof: CodexProof): JsonNode = % byteutils.toHex(proof.encode()) +func `%`*(proof: CodexProof): JsonNode = + %byteutils.toHex(proof.encode()) diff --git a/codex/merkletree/codex/codex.nim b/codex/merkletree/codex/codex.nim index 72b044f2..e287dfac 100644 --- a/codex/merkletree/codex/codex.nim +++ b/codex/merkletree/codex/codex.nim @@ -32,10 +32,10 @@ logScope: type ByteTreeKey* {.pure.} = enum - KeyNone = 0x0.byte - KeyBottomLayer = 0x1.byte - KeyOdd = 0x2.byte - KeyOddAndBottomLayer = 0x3.byte + KeyNone = 0x0.byte + KeyBottomLayer = 0x1.byte + KeyOdd = 0x2.byte + KeyOddAndBottomLayer = 0x3.byte ByteHash* = seq[byte] ByteTree* = MerkleTree[ByteHash, ByteTreeKey] @@ -56,8 +56,7 @@ proc initMultiHashCodeTable(): Table[MultiCodec, MHash] {.compileTime.} = const CodeHashes = initMultiHashCodeTable() func mhash*(mcodec: MultiCodec): ?!MHash = - let - mhash = CodeHashes.getOrDefault(mcodec) + let mhash = CodeHashes.getOrDefault(mcodec) if isNil(mhash.coder): return failure "Invalid multihash codec" @@ -71,10 +70,9 @@ func digestSize*(self: (CodexTree or CodexProof)): int = self.mhash.size func getProof*(self: CodexTree, index: int): ?!CodexProof = - var - proof = CodexProof(mcodec: self.mcodec) + var proof = CodexProof(mcodec: self.mcodec) - ? self.getProof(index, proof) + ?self.getProof(index, proof) success proof @@ -86,83 +84,66 @@ func verify*(self: CodexProof, leaf: MultiHash, root: MultiHash): ?!bool = rootBytes = root.digestBytes leafBytes = leaf.digestBytes - if self.mcodec != root.mcodec or - self.mcodec != leaf.mcodec: + if self.mcodec != root.mcodec or self.mcodec != leaf.mcodec: return failure "Hash codec mismatch" - if rootBytes.len != root.size and - leafBytes.len != leaf.size: + if rootBytes.len != root.size and leafBytes.len != leaf.size: return failure "Invalid hash length" self.verify(leafBytes, rootBytes) func verify*(self: CodexProof, leaf: Cid, root: Cid): ?!bool = - self.verify(? leaf.mhash.mapFailure, ? leaf.mhash.mapFailure) + self.verify(?leaf.mhash.mapFailure, ?leaf.mhash.mapFailure) -proc rootCid*( - self: CodexTree, - version = CIDv1, - dataCodec = DatasetRootCodec): ?!Cid = - - if (? self.root).len == 0: +proc rootCid*(self: CodexTree, version = CIDv1, dataCodec = DatasetRootCodec): ?!Cid = + if (?self.root).len == 0: return failure "Empty root" - let - mhash = ? MultiHash.init(self.mcodec, ? self.root).mapFailure + let mhash = ?MultiHash.init(self.mcodec, ?self.root).mapFailure Cid.init(version, DatasetRootCodec, mhash).mapFailure func getLeafCid*( - self: CodexTree, - i: Natural, - version = CIDv1, - dataCodec = BlockCodec): ?!Cid = - + self: CodexTree, i: Natural, version = CIDv1, dataCodec = BlockCodec +): ?!Cid = if i >= self.leavesCount: return failure "Invalid leaf index " & $i let leaf = self.leaves[i] - mhash = ? MultiHash.init($self.mcodec, leaf).mapFailure + mhash = ?MultiHash.init($self.mcodec, leaf).mapFailure Cid.init(version, dataCodec, mhash).mapFailure proc `$`*(self: CodexTree): string = - let root = if self.root.isOk: byteutils.toHex(self.root.get) else: "none" - "CodexTree(" & - " root: " & root & - ", leavesCount: " & $self.leavesCount & - ", levels: " & $self.levels & - ", mcodec: " & $self.mcodec & " )" + let root = + if self.root.isOk: + byteutils.toHex(self.root.get) + else: + "none" + "CodexTree(" & " root: " & root & ", leavesCount: " & $self.leavesCount & ", levels: " & + $self.levels & ", mcodec: " & $self.mcodec & " )" proc `$`*(self: CodexProof): string = - "CodexProof(" & - " nleaves: " & $self.nleaves & - ", index: " & $self.index & - ", path: " & $self.path.mapIt( byteutils.toHex(it) ) & - ", mcodec: " & $self.mcodec & " )" + "CodexProof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index & ", path: " & + $self.path.mapIt(byteutils.toHex(it)) & ", mcodec: " & $self.mcodec & " )" -func compress*( - x, y: openArray[byte], - key: ByteTreeKey, - mhash: MHash): ?!ByteHash = +func compress*(x, y: openArray[byte], key: ByteTreeKey, mhash: MHash): ?!ByteHash = ## Compress two hashes ## var digest = newSeq[byte](mhash.size) - mhash.coder(@x & @y & @[ key.byte ], digest) + mhash.coder(@x & @y & @[key.byte], digest) success digest func init*( - _: type CodexTree, - mcodec: MultiCodec = Sha256HashCodec, - leaves: openArray[ByteHash]): ?!CodexTree = - + _: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash] +): ?!CodexTree = if leaves.len == 0: return failure "Empty leaves" let - mhash = ? mcodec.mhash() + mhash = ?mcodec.mhash() compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} = compress(x, y, key, mhash) Zero: ByteHash = newSeq[byte](mhash.size) @@ -170,48 +151,42 @@ func init*( if mhash.size != leaves[0].len: return failure "Invalid hash length" - var - self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero) + var self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero) - self.layers = ? merkleTreeWorker(self, leaves, isBottomLayer = true) + self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true) success self -func init*( - _: type CodexTree, - leaves: openArray[MultiHash]): ?!CodexTree = - +func init*(_: type CodexTree, leaves: openArray[MultiHash]): ?!CodexTree = if leaves.len == 0: return failure "Empty leaves" let mcodec = leaves[0].mcodec - leaves = leaves.mapIt( it.digestBytes ) + leaves = leaves.mapIt(it.digestBytes) CodexTree.init(mcodec, leaves) -func init*( - _: type CodexTree, - leaves: openArray[Cid]): ?!CodexTree = +func init*(_: type CodexTree, leaves: openArray[Cid]): ?!CodexTree = if leaves.len == 0: return failure "Empty leaves" let - mcodec = (? leaves[0].mhash.mapFailure).mcodec - leaves = leaves.mapIt( (? it.mhash.mapFailure).digestBytes ) + mcodec = (?leaves[0].mhash.mapFailure).mcodec + leaves = leaves.mapIt((?it.mhash.mapFailure).digestBytes) CodexTree.init(mcodec, leaves) proc fromNodes*( - _: type CodexTree, - mcodec: MultiCodec = Sha256HashCodec, - nodes: openArray[ByteHash], - nleaves: int): ?!CodexTree = - + _: type CodexTree, + mcodec: MultiCodec = Sha256HashCodec, + nodes: openArray[ByteHash], + nleaves: int, +): ?!CodexTree = if nodes.len == 0: return failure "Empty nodes" let - mhash = ? mcodec.mhash() + mhash = ?mcodec.mhash() Zero = newSeq[byte](mhash.size) compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} = compress(x, y, key, mhash) @@ -225,31 +200,31 @@ proc fromNodes*( pos = 0 while pos < nodes.len: - self.layers.add( nodes[pos..<(pos + layer)] ) + self.layers.add(nodes[pos ..< (pos + layer)]) pos += layer layer = divUp(layer, 2) let index = Rng.instance.rand(nleaves - 1) - proof = ? self.getProof(index) + proof = ?self.getProof(index) - if not ? proof.verify(self.leaves[index], ? self.root): # sanity check + if not ?proof.verify(self.leaves[index], ?self.root): # sanity check return failure "Unable to verify tree built from nodes" success self func init*( - _: type CodexProof, - mcodec: MultiCodec = Sha256HashCodec, - index: int, - nleaves: int, - nodes: openArray[ByteHash]): ?!CodexProof = - + _: type CodexProof, + mcodec: MultiCodec = Sha256HashCodec, + index: int, + nleaves: int, + nodes: openArray[ByteHash], +): ?!CodexProof = if nodes.len == 0: return failure "Empty nodes" let - mhash = ? mcodec.mhash() + mhash = ?mcodec.mhash() Zero = newSeq[byte](mhash.size) compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!seq[byte] {.noSideEffect.} = compress(x, y, key, mhash) @@ -260,4 +235,5 @@ func init*( mcodec: mcodec, index: index, nleaves: nleaves, - path: @nodes) + path: @nodes, + ) diff --git a/codex/merkletree/merkletree.nim b/codex/merkletree/merkletree.nim index 2f46b93d..f1905bec 100644 --- a/codex/merkletree/merkletree.nim +++ b/codex/merkletree/merkletree.nim @@ -16,19 +16,19 @@ import pkg/questionable/results import ../errors type - CompressFn*[H, K] = proc (x, y: H, key: K): ?!H {.noSideEffect, raises: [].} + CompressFn*[H, K] = proc(x, y: H, key: K): ?!H {.noSideEffect, raises: [].} MerkleTree*[H, K] = ref object of RootObj - layers* : seq[seq[H]] + layers*: seq[seq[H]] compress*: CompressFn[H, K] - zero* : H + zero*: H MerkleProof*[H, K] = ref object of RootObj - index* : int # linear index of the leaf, starting from 0 - path* : seq[H] # order: from the bottom to the top - nleaves* : int # number of leaves in the tree (=size of input) - compress*: CompressFn[H, K] # compress function - zero* : H # zero value + index*: int # linear index of the leaf, starting from 0 + path*: seq[H] # order: from the bottom to the top + nleaves*: int # number of leaves in the tree (=size of input) + compress*: CompressFn[H, K] # compress function + zero*: H # zero value func depth*[H, K](self: MerkleTree[H, K]): int = return self.layers.len - 1 @@ -59,36 +59,38 @@ func root*[H, K](self: MerkleTree[H, K]): ?!H = return success last[0] func getProof*[H, K]( - self: MerkleTree[H, K], - index: int, - proof: MerkleProof[H, K]): ?!void = - let depth = self.depth + self: MerkleTree[H, K], index: int, proof: MerkleProof[H, K] +): ?!void = + let depth = self.depth let nleaves = self.leavesCount if not (index >= 0 and index < nleaves): return failure "index out of bounds" - var path : seq[H] = newSeq[H](depth) + var path: seq[H] = newSeq[H](depth) var k = index var m = nleaves - for i in 0.. odd node - h = ? proof.compress( h, p, K(bottomFlag.ord + 2) ) + h = ?proof.compress(h, p, K(bottomFlag.ord + 2)) else: # even node - h = ? proof.compress( h , p, bottomFlag ) + h = ?proof.compress(h, p, bottomFlag) bottomFlag = K.KeyNone - j = j shr 1 - m = (m+1) shr 1 + j = j shr 1 + m = (m + 1) shr 1 return success h func verify*[H, K](proof: MerkleProof[H, K], leaf: H, root: H): ?!bool = - success bool(root == ? proof.reconstructRoot(leaf)) + success bool(root == ?proof.reconstructRoot(leaf)) func merkleTreeWorker*[H, K]( - self: MerkleTree[H, K], - xs: openArray[H], - isBottomLayer: static bool): ?!seq[seq[H]] = - + self: MerkleTree[H, K], xs: openArray[H], isBottomLayer: static bool +): ?!seq[seq[H]] = let a = low(xs) let b = high(xs) let m = b - a + 1 when not isBottomLayer: if m == 1: - return success @[ @xs ] + return success @[@xs] - let halfn: int = m div 2 - let n : int = 2 * halfn + let halfn: int = m div 2 + let n: int = 2 * halfn let isOdd: bool = (n != m) var ys: seq[H] @@ -143,11 +143,11 @@ func merkleTreeWorker*[H, K]( else: ys = newSeq[H](halfn + 1) - for i in 0.. 0): - + while (let chunk = await chunker.getBytes(); chunk.len > 0): without mhash =? MultiHash.digest($hcodec, chunk).mapFailure, err: return failure(err) @@ -335,7 +318,8 @@ proc store*( for index, cid in cids: without proof =? tree.getProof(index), err: return failure(err) - if err =? (await self.networkStore.putCidAndProof(treeCid, index, cid, proof)).errorOption: + if err =? + (await self.networkStore.putCidAndProof(treeCid, index, cid, proof)).errorOption: # TODO add log here return failure(err) @@ -348,18 +332,20 @@ proc store*( codec = dataCodec, filename = filename, mimetype = mimetype, - uploadedAt = now().utc.toTime.toUnix.some) + uploadedAt = now().utc.toTime.toUnix.some, + ) without manifestBlk =? await self.storeManifest(manifest), err: error "Unable to store manifest" return failure(err) - info "Stored data", manifestCid = manifestBlk.cid, - treeCid = treeCid, - blocks = manifest.blocksCount, - datasetSize = manifest.datasetSize, - filename = manifest.filename, - mimetype = manifest.mimetype + info "Stored data", + manifestCid = manifestBlk.cid, + treeCid = treeCid, + blocks = manifest.blocksCount, + datasetSize = manifest.datasetSize, + filename = manifest.filename, + mimetype = manifest.mimetype return manifestBlk.cid.success @@ -381,15 +367,16 @@ proc iterateManifests*(self: CodexNodeRef, onManifest: OnManifest) {.async.} = onManifest(cid, manifest) proc setupRequest( - self: CodexNodeRef, - cid: Cid, - duration: UInt256, - proofProbability: UInt256, - nodes: uint, - tolerance: uint, - reward: UInt256, - collateral: UInt256, - expiry: UInt256): Future[?!StorageRequest] {.async.} = + self: CodexNodeRef, + cid: Cid, + duration: UInt256, + proofProbability: UInt256, + nodes: uint, + tolerance: uint, + pricePerBytePerSecond: UInt256, + collateralPerByte: UInt256, + expiry: UInt256, +): Future[?!StorageRequest] {.async.} = ## Setup slots for a given dataset ## @@ -398,16 +385,16 @@ proc setupRequest( ecM = tolerance logScope: - cid = cid - duration = duration - nodes = nodes - tolerance = tolerance - reward = reward - proofProbability = proofProbability - collateral = collateral - expiry = expiry - ecK = ecK - ecM = ecM + cid = cid + duration = duration + nodes = nodes + tolerance = tolerance + pricePerBytePerSecond = pricePerBytePerSecond + proofProbability = proofProbability + collateralPerByte = collateralPerByte + expiry = expiry + ecK = ecK + ecM = ecM trace "Setting up slots" @@ -416,11 +403,8 @@ proc setupRequest( return failure error # Erasure code the dataset according to provided parameters - let - erasure = Erasure.new( - self.networkStore.localStore, - leoEncoderProvider, - leoDecoderProvider) + let erasure = + Erasure.new(self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider) without encoded =? (await erasure.encode(manifest, ecK, ecM)), error: trace "Unable to erasure code dataset" @@ -441,9 +425,9 @@ proc setupRequest( let verifyRoot = if builder.verifyRoot.isNone: - return failure("No slots root") - else: - builder.verifyRoot.get.toBytes + return failure("No slots root") + else: + builder.verifyRoot.get.toBytes request = StorageRequest( ask: StorageAsk( @@ -451,44 +435,45 @@ proc setupRequest( slotSize: builder.slotBytes.uint.u256, duration: duration, proofProbability: proofProbability, - reward: reward, - collateral: collateral, - maxSlotLoss: tolerance + pricePerBytePerSecond: pricePerBytePerSecond, + collateralPerByte: collateralPerByte, + maxSlotLoss: tolerance, ), content: StorageContent( cid: $manifestBlk.cid, # TODO: why string? - merkleRoot: verifyRoot + merkleRoot: verifyRoot, ), - expiry: expiry + expiry: expiry, ) trace "Request created", request = $request success request proc requestStorage*( - self: CodexNodeRef, - cid: Cid, - duration: UInt256, - proofProbability: UInt256, - nodes: uint, - tolerance: uint, - reward: UInt256, - collateral: UInt256, - expiry: UInt256): Future[?!PurchaseId] {.async.} = + self: CodexNodeRef, + cid: Cid, + duration: UInt256, + proofProbability: UInt256, + nodes: uint, + tolerance: uint, + pricePerBytePerSecond: UInt256, + collateralPerByte: UInt256, + expiry: UInt256, +): Future[?!PurchaseId] {.async.} = ## Initiate a request for storage sequence, this might ## be a multistep procedure. ## logScope: - cid = cid - duration = duration - nodes = nodes - tolerance = tolerance - reward = reward - proofProbability = proofProbability - collateral = collateral - expiry = expiry.truncate(int64) - now = self.clock.now + cid = cid + duration = duration + nodes = nodes + tolerance = tolerance + pricePerBytePerSecond = pricePerBytePerSecond + proofProbability = proofProbability + collateralPerByte = collateralPerByte + expiry = expiry.truncate(int64) + now = self.clock.now trace "Received a request for storage!" @@ -496,16 +481,12 @@ proc requestStorage*( trace "Purchasing not available" return failure "Purchasing not available" - without request =? - (await self.setupRequest( - cid, - duration, - proofProbability, - nodes, - tolerance, - reward, - collateral, - expiry)), err: + without request =? ( + await self.setupRequest( + cid, duration, proofProbability, nodes, tolerance, pricePerBytePerSecond, + collateralPerByte, expiry, + ) + ), err: trace "Unable to setup request" return failure err @@ -513,10 +494,8 @@ proc requestStorage*( success purchase.id proc onStore( - self: CodexNodeRef, - request: StorageRequest, - slotIdx: UInt256, - blocksCb: BlocksCb): Future[?!void] {.async.} = + self: CodexNodeRef, request: StorageRequest, slotIdx: UInt256, blocksCb: BlocksCb +): Future[?!void] {.async.} = ## store data in local storage ## @@ -534,9 +513,8 @@ proc onStore( trace "Unable to fetch manifest for cid", cid, err = err.msg return failure(err) - without builder =? Poseidon2Builder.new( - self.networkStore, manifest, manifest.verifiableStrategy - ), err: + without builder =? + Poseidon2Builder.new(self.networkStore, manifest, manifest.verifiableStrategy), err: trace "Unable to create slots builder", err = err.msg return failure(err) @@ -551,7 +529,8 @@ proc onStore( proc updateExpiry(blocks: seq[bt.Block]): Future[?!void] {.async.} = trace "Updating expiry for blocks", blocks = blocks.len - let ensureExpiryFutures = blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry)) + let ensureExpiryFutures = + blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry)) if updateExpiryErr =? (await allFutureResult(ensureExpiryFutures)).errorOption: return failure(updateExpiryErr) @@ -561,8 +540,9 @@ proc onStore( return success() - without indexer =? manifest.verifiableStrategy.init( - 0, manifest.blocksCount - 1, manifest.numSlots).catch, err: + without indexer =? + manifest.verifiableStrategy.init(0, manifest.blocksCount - 1, manifest.numSlots).catch, + err: trace "Unable to create indexing strategy from protected manifest", err = err.msg return failure(err) @@ -570,10 +550,9 @@ proc onStore( trace "Unable to get indicies from strategy", err = err.msg return failure(err) - if err =? (await self.fetchBatched( - manifest.treeCid, - blksIter, - onBatch = updateExpiry)).errorOption: + if err =? ( + await self.fetchBatched(manifest.treeCid, blksIter, onBatch = updateExpiry) + ).errorOption: trace "Unable to fetch blocks", err = err.msg return failure(err) @@ -584,7 +563,8 @@ proc onStore( trace "Slot successfully retrieved and reconstructed" if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx.int]: - trace "Slot root mismatch", manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid() + trace "Slot root mismatch", + manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid() return failure(newException(CodexError, "Slot root mismatch")) trace "Slot successfully retrieved and reconstructed" @@ -592,9 +572,8 @@ proc onStore( return success() proc onProve( - self: CodexNodeRef, - slot: Slot, - challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} = + self: CodexNodeRef, slot: Slot, challenge: ProofChallenge +): Future[?!Groth16Proof] {.async.} = ## Generats a proof for a given slot and challenge ## @@ -648,9 +627,8 @@ proc onProve( failure "Prover not enabled" proc onExpiryUpdate( - self: CodexNodeRef, - rootCid: string, - expiry: SecondsSince1970): Future[?!void] {.async.} = + self: CodexNodeRef, rootCid: string, expiry: SecondsSince1970 +): Future[?!void] {.async.} = without cid =? Cid.init(rootCid): trace "Unable to parse Cid", cid let error = newException(CodexError, "Unable to parse Cid") @@ -658,11 +636,8 @@ proc onExpiryUpdate( return await self.updateExpiry(cid, expiry) -proc onClear( - self: CodexNodeRef, - request: StorageRequest, - slotIndex: UInt256) = -# TODO: remove data from local storage +proc onClear(self: CodexNodeRef, request: StorageRequest, slotIndex: UInt256) = + # TODO: remove data from local storage discard proc start*(self: CodexNodeRef) {.async.} = @@ -676,32 +651,32 @@ proc start*(self: CodexNodeRef) {.async.} = await self.clock.start() if hostContracts =? self.contracts.host: - hostContracts.sales.onStore = - proc( - request: StorageRequest, - slot: UInt256, - onBatch: BatchProc): Future[?!void] = self.onStore(request, slot, onBatch) + hostContracts.sales.onStore = proc( + request: StorageRequest, slot: UInt256, onBatch: BatchProc + ): Future[?!void] = + self.onStore(request, slot, onBatch) - hostContracts.sales.onExpiryUpdate = - proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] = - self.onExpiryUpdate(rootCid, expiry) + hostContracts.sales.onExpiryUpdate = proc( + rootCid: string, expiry: SecondsSince1970 + ): Future[?!void] = + self.onExpiryUpdate(rootCid, expiry) - hostContracts.sales.onClear = - proc(request: StorageRequest, slotIndex: UInt256) = + hostContracts.sales.onClear = proc(request: StorageRequest, slotIndex: UInt256) = # TODO: remove data from local storage self.onClear(request, slotIndex) - hostContracts.sales.onProve = - proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] = - # TODO: generate proof - self.onProve(slot, challenge) + hostContracts.sales.onProve = proc( + slot: Slot, challenge: ProofChallenge + ): Future[?!Groth16Proof] = + # TODO: generate proof + self.onProve(slot, challenge) try: await hostContracts.start() except CancelledError as error: raise error except CatchableError as error: - error "Unable to start host contract interactions", error=error.msg + error "Unable to start host contract interactions", error = error.msg self.contracts.host = HostInteractions.none if clientContracts =? self.contracts.client: @@ -710,7 +685,7 @@ proc start*(self: CodexNodeRef) {.async.} = except CancelledError as error: raise error except CatchableError as error: - error "Unable to start client contract interactions: ", error=error.msg + error "Unable to start client contract interactions: ", error = error.msg self.contracts.client = ClientInteractions.none if validatorContracts =? self.contracts.validator: @@ -719,7 +694,7 @@ proc start*(self: CodexNodeRef) {.async.} = except CancelledError as error: raise error except CatchableError as error: - error "Unable to start validator contract interactions: ", error=error.msg + error "Unable to start validator contract interactions: ", error = error.msg self.contracts.validator = ValidatorInteractions.none self.networkId = self.switch.peerInfo.peerId @@ -750,13 +725,14 @@ proc stop*(self: CodexNodeRef) {.async.} = await self.networkStore.close proc new*( - T: type CodexNodeRef, - switch: Switch, - networkStore: NetworkStore, - engine: BlockExcEngine, - discovery: Discovery, - prover = Prover.none, - contracts = Contracts.default): CodexNodeRef = + T: type CodexNodeRef, + switch: Switch, + networkStore: NetworkStore, + engine: BlockExcEngine, + discovery: Discovery, + prover = Prover.none, + contracts = Contracts.default, +): CodexNodeRef = ## Create new instance of a Codex self, call `start` to run it ## @@ -766,4 +742,5 @@ proc new*( engine: engine, prover: prover, discovery: discovery, - contracts: contracts) + contracts: contracts, + ) diff --git a/codex/periods.nim b/codex/periods.nim index f0b789e1..429931ee 100644 --- a/codex/periods.nim +++ b/codex/periods.nim @@ -3,6 +3,7 @@ import pkg/stint type Periodicity* = object seconds*: UInt256 + Period* = UInt256 Timestamp* = UInt256 diff --git a/codex/purchasing.nim b/codex/purchasing.nim index ca92ece9..4ab84405 100644 --- a/codex/purchasing.nim +++ b/codex/purchasing.nim @@ -18,16 +18,13 @@ type clock: Clock purchases: Table[PurchaseId, Purchase] proofProbability*: UInt256 + PurchaseTimeout* = Timeout const DefaultProofProbability = 100.u256 proc new*(_: type Purchasing, market: Market, clock: Clock): Purchasing = - Purchasing( - market: market, - clock: clock, - proofProbability: DefaultProofProbability, - ) + Purchasing(market: market, clock: clock, proofProbability: DefaultProofProbability) proc load*(purchasing: Purchasing) {.async.} = let market = purchasing.market @@ -43,9 +40,9 @@ proc start*(purchasing: Purchasing) {.async.} = proc stop*(purchasing: Purchasing) {.async.} = discard -proc populate*(purchasing: Purchasing, - request: StorageRequest - ): Future[StorageRequest] {.async.} = +proc populate*( + purchasing: Purchasing, request: StorageRequest +): Future[StorageRequest] {.async.} = result = request if result.ask.proofProbability == 0.u256: result.ask.proofProbability = purchasing.proofProbability @@ -55,9 +52,9 @@ proc populate*(purchasing: Purchasing, result.nonce = Nonce(id) result.client = await purchasing.market.getSigner() -proc purchase*(purchasing: Purchasing, - request: StorageRequest - ): Future[Purchase] {.async.} = +proc purchase*( + purchasing: Purchasing, request: StorageRequest +): Future[Purchase] {.async.} = let request = await purchasing.populate(request) let purchase = Purchase.new(request, purchasing.market, purchasing.clock) purchase.start() @@ -75,4 +72,3 @@ func getPurchaseIds*(purchasing: Purchasing): seq[PurchaseId] = for key in purchasing.purchases.keys: pIds.add(key) return pIds - diff --git a/codex/purchasing/purchase.nim b/codex/purchasing/purchase.nim index d616e492..7c16c28c 100644 --- a/codex/purchasing/purchase.nim +++ b/codex/purchasing/purchase.nim @@ -25,10 +25,7 @@ export purchaseid export statemachine func new*( - _: type Purchase, - requestId: RequestId, - market: Market, - clock: Clock + _: type Purchase, requestId: RequestId, market: Market, clock: Clock ): Purchase = ## create a new instance of a Purchase ## @@ -42,10 +39,7 @@ func new*( return purchase func new*( - _: type Purchase, - request: StorageRequest, - market: Market, - clock: Clock + _: type Purchase, request: StorageRequest, market: Market, clock: Clock ): Purchase = ## Create a new purchase using the given market and clock let purchase = Purchase.new(request.id, market, clock) @@ -76,4 +70,5 @@ func error*(purchase: Purchase): ?(ref CatchableError) = func state*(purchase: Purchase): ?string = proc description(state: State): string = $state + purchase.query(description) diff --git a/codex/purchasing/purchaseid.nim b/codex/purchasing/purchaseid.nim index 91734fe9..965b0839 100644 --- a/codex/purchasing/purchaseid.nim +++ b/codex/purchasing/purchaseid.nim @@ -3,9 +3,12 @@ import ../logutils type PurchaseId* = distinct array[32, byte] -logutils.formatIt(LogFormat.textLines, PurchaseId): it.short0xHexLog -logutils.formatIt(LogFormat.json, PurchaseId): it.to0xHexLog +logutils.formatIt(LogFormat.textLines, PurchaseId): + it.short0xHexLog +logutils.formatIt(LogFormat.json, PurchaseId): + it.to0xHexLog proc hash*(x: PurchaseId): Hash {.borrow.} proc `==`*(x, y: PurchaseId): bool {.borrow.} -proc toHex*(x: PurchaseId): string = array[32, byte](x).toHex +proc toHex*(x: PurchaseId): string = + array[32, byte](x).toHex diff --git a/codex/purchasing/statemachine.nim b/codex/purchasing/statemachine.nim index de2753c3..20a63783 100644 --- a/codex/purchasing/statemachine.nim +++ b/codex/purchasing/statemachine.nim @@ -14,5 +14,6 @@ type clock*: Clock requestId*: RequestId request*: ?StorageRequest + PurchaseState* = ref object of State PurchaseError* = object of CodexError diff --git a/codex/purchasing/states/cancelled.nim b/codex/purchasing/states/cancelled.nim index f9bb1ece..760dc81a 100644 --- a/codex/purchasing/states/cancelled.nim +++ b/codex/purchasing/states/cancelled.nim @@ -18,7 +18,7 @@ method run*(state: PurchaseCancelled, machine: Machine): Future[?State] {.async. codex_purchases_cancelled.inc() let purchase = Purchase(machine) - warn "Request cancelled, withdrawing remaining funds", requestId = purchase.requestId + warn "Request cancelled, withdrawing remaining funds", requestId = purchase.requestId await purchase.market.withdrawFunds(purchase.requestId) let error = newException(Timeout, "Purchase cancelled due to timeout") diff --git a/codex/purchasing/states/error.nim b/codex/purchasing/states/error.nim index 0ebe1dbe..d7017b38 100644 --- a/codex/purchasing/states/error.nim +++ b/codex/purchasing/states/error.nim @@ -18,6 +18,7 @@ method run*(state: PurchaseErrored, machine: Machine): Future[?State] {.async.} codex_purchases_error.inc() let purchase = Purchase(machine) - error "Purchasing error", error=state.error.msgDetail, requestId = purchase.requestId + error "Purchasing error", + error = state.error.msgDetail, requestId = purchase.requestId purchase.future.fail(state.error) diff --git a/codex/purchasing/states/errorhandling.nim b/codex/purchasing/states/errorhandling.nim index 57e00924..8ef91ba6 100644 --- a/codex/purchasing/states/errorhandling.nim +++ b/codex/purchasing/states/errorhandling.nim @@ -2,8 +2,7 @@ import pkg/questionable import ../statemachine import ./error -type - ErrorHandlingState* = ref object of PurchaseState +type ErrorHandlingState* = ref object of PurchaseState method onError*(state: ErrorHandlingState, error: ref CatchableError): ?State = some State(PurchaseErrored(error: error)) diff --git a/codex/purchasing/states/failed.nim b/codex/purchasing/states/failed.nim index b05dbb6f..5a126a73 100644 --- a/codex/purchasing/states/failed.nim +++ b/codex/purchasing/states/failed.nim @@ -5,8 +5,7 @@ import ./error declareCounter(codex_purchases_failed, "codex purchases failed") -type - PurchaseFailed* = ref object of PurchaseState +type PurchaseFailed* = ref object of PurchaseState method `$`*(state: PurchaseFailed): string = "failed" @@ -14,7 +13,7 @@ method `$`*(state: PurchaseFailed): string = method run*(state: PurchaseFailed, machine: Machine): Future[?State] {.async.} = codex_purchases_failed.inc() let purchase = Purchase(machine) - warn "Request failed, withdrawing remaining funds", requestId = purchase.requestId + warn "Request failed, withdrawing remaining funds", requestId = purchase.requestId await purchase.market.withdrawFunds(purchase.requestId) let error = newException(PurchaseError, "Purchase failed") diff --git a/codex/purchasing/states/started.nim b/codex/purchasing/states/started.nim index 42acd1fc..083e64c8 100644 --- a/codex/purchasing/states/started.nim +++ b/codex/purchasing/states/started.nim @@ -27,6 +27,7 @@ method run*(state: PurchaseStarted, machine: Machine): Future[?State] {.async.} let failed = newFuture[void]() proc callback(_: RequestId) = failed.complete() + let subscription = await market.subscribeRequestFailed(purchase.requestId, callback) # Ensure that we're past the request end by waiting an additional second diff --git a/codex/purchasing/states/submitted.nim b/codex/purchasing/states/submitted.nim index 5532c850..1cf65b1f 100644 --- a/codex/purchasing/states/submitted.nim +++ b/codex/purchasing/states/submitted.nim @@ -23,12 +23,14 @@ method run*(state: PurchaseSubmitted, machine: Machine): Future[?State] {.async. let market = purchase.market let clock = purchase.clock - info "Request submitted, waiting for slots to be filled", requestId = purchase.requestId + info "Request submitted, waiting for slots to be filled", + requestId = purchase.requestId - proc wait {.async.} = + proc wait() {.async.} = let done = newFuture[void]() proc callback(_: RequestId) = done.complete() + let subscription = await market.subscribeFulfillment(request.id, callback) await done await subscription.unsubscribe() diff --git a/codex/purchasing/states/unknown.nim b/codex/purchasing/states/unknown.nim index ade70c9f..54e09942 100644 --- a/codex/purchasing/states/unknown.nim +++ b/codex/purchasing/states/unknown.nim @@ -19,7 +19,6 @@ method run*(state: PurchaseUnknown, machine: Machine): Future[?State] {.async.} let purchase = Purchase(machine) if (request =? await purchase.market.getRequest(purchase.requestId)) and (requestState =? await purchase.market.requestState(purchase.requestId)): - purchase.request = some request case requestState diff --git a/codex/rest/api.nim b/codex/rest/api.nim index 597e7386..134aa8d2 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -9,8 +9,8 @@ import pkg/upraises -push: {.upraises: [].} - +push: + {.upraises: [].} import std/sequtils import mimetypes @@ -49,10 +49,7 @@ logScope: declareCounter(codex_api_uploads, "codex API uploads") declareCounter(codex_api_downloads, "codex API downloads") -proc validate( - pattern: string, - value: string): int - {.gcsafe, raises: [Defect].} = +proc validate(pattern: string, value: string): int {.gcsafe, raises: [Defect].} = 0 proc formatManifest(cid: Cid, manifest: Manifest): RestContent = @@ -63,21 +60,19 @@ proc formatManifestBlocks(node: CodexNodeRef): Future[JsonNode] {.async.} = proc addManifest(cid: Cid, manifest: Manifest) = content.add(formatManifest(cid, manifest)) + await node.iterateManifests(addManifest) return %RestContentList.init(content) proc retrieveCid( - node: CodexNodeRef, - cid: Cid, - local: bool = true, - resp: HttpResponseRef): Future[RestApiResponse] {.async.} = + node: CodexNodeRef, cid: Cid, local: bool = true, resp: HttpResponseRef +): Future[RestApiResponse] {.async.} = ## Download a file from the node in a streaming ## manner ## - var - stream: LPStream + var stream: LPStream var bytes = 0 try: @@ -101,8 +96,12 @@ proc retrieveCid( resp.addHeader("Content-Type", "application/octet-stream") if manifest.filename.isSome: - resp.setHeader("Content-Disposition", "attachment; filename=\"" & manifest.filename.get() & "\"") - + resp.setHeader( + "Content-Disposition", + "attachment; filename=\"" & manifest.filename.get() & "\"", + ) + else: + resp.setHeader("Content-Disposition", "attachment") await resp.prepareChunked() @@ -129,7 +128,9 @@ proc retrieveCid( if not stream.isNil: await stream.close() -proc buildCorsHeaders(httpMethod: string, allowedOrigin: Option[string]): seq[(string, string)] = +proc buildCorsHeaders( + httpMethod: string, allowedOrigin: Option[string] +): seq[(string, string)] = var headers: seq[(string, string)] = newSeq[(string, string)]() if corsOrigin =? allowedOrigin: @@ -137,15 +138,15 @@ proc buildCorsHeaders(httpMethod: string, allowedOrigin: Option[string]): seq[(s headers.add(("Access-Control-Allow-Methods", httpMethod & ", OPTIONS")) headers.add(("Access-Control-Max-Age", "86400")) - return headers + return headers -proc setCorsHeaders(resp: HttpResponseRef, httpMethod: string, origin: string) = +proc setCorsHeaders(resp: HttpResponseRef, httpMethod: string, origin: string) = resp.setHeader("Access-Control-Allow-Origin", origin) resp.setHeader("Access-Control-Allow-Methods", httpMethod & ", OPTIONS") resp.setHeader("Access-Control-Max-Age", "86400") proc getFilenameFromContentDisposition(contentDisposition: string): ?string = - if not("filename=" in contentDisposition): + if not ("filename=" in contentDisposition): return string.none let parts = contentDisposition.split("filename=\"") @@ -154,696 +155,718 @@ proc getFilenameFromContentDisposition(contentDisposition: string): ?string = return string.none let filename = parts[1].strip() - return filename[0..^2].some + return filename[0 ..^ 2].some proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRouter) = let allowedOrigin = router.allowedOrigin # prevents capture inside of api defintion - - router.api( - MethodOptions, - "/api/codex/v1/data") do ( - resp: HttpResponseRef) -> RestApiResponse: - - if corsOrigin =? allowedOrigin: - resp.setCorsHeaders("POST", corsOrigin) - resp.setHeader("Access-Control-Allow-Headers", "content-type, content-disposition") - resp.status = Http204 - await resp.sendBody("") + router.api(MethodOptions, "/api/codex/v1/data") do( + resp: HttpResponseRef + ) -> RestApiResponse: + if corsOrigin =? allowedOrigin: + resp.setCorsHeaders("POST", corsOrigin) + resp.setHeader( + "Access-Control-Allow-Headers", "content-type, content-disposition" + ) - router.rawApi( - MethodPost, - "/api/codex/v1/data") do ( - ) -> RestApiResponse: - ## Upload a file in a streaming manner - ## + resp.status = Http204 + await resp.sendBody("") - trace "Handling file upload" - var bodyReader = request.getBodyReader() - if bodyReader.isErr(): - return RestApiResponse.error(Http500) + router.rawApi(MethodPost, "/api/codex/v1/data") do() -> RestApiResponse: + ## Upload a file in a streaming manner + ## - # Attempt to handle `Expect` header - # some clients (curl), wait 1000ms - # before giving up - # - await request.handleExpect() - - var mimetype = request.headers.getString(ContentTypeHeader).some - - if mimetype.get() != "": - var m = newMimetypes() - let extension = m.getExt(mimetype.get(), "") - if extension == "": - return RestApiResponse.error(Http422, "The MIME type is not valid.") - else: - mimetype = string.none - - const ContentDispositionHeader = "Content-Disposition" - let contentDisposition = request.headers.getString(ContentDispositionHeader) - let filename = getFilenameFromContentDisposition(contentDisposition) - - if filename.isSome and not isValidFilename(filename.get()): - return RestApiResponse.error(Http422, "The filename is not valid.") - - # Here we could check if the extension matches the filename if needed - - let - reader = bodyReader.get() - - try: - without cid =? ( - await node.store(AsyncStreamWrapper.new(reader = AsyncStreamReader(reader)), filename = filename, mimetype = mimetype)), error: - error "Error uploading file", exc = error.msg - return RestApiResponse.error(Http500, error.msg) - - codex_api_uploads.inc() - trace "Uploaded file", cid - return RestApiResponse.response($cid) - except CancelledError: - trace "Upload cancelled error" - return RestApiResponse.error(Http500) - except AsyncStreamError: - trace "Async stream error" - return RestApiResponse.error(Http500) - finally: - await reader.closeWait() - - trace "Something went wrong error" + trace "Handling file upload" + var bodyReader = request.getBodyReader() + if bodyReader.isErr(): return RestApiResponse.error(Http500) - router.api( - MethodGet, - "/api/codex/v1/data") do () -> RestApiResponse: - let json = await formatManifestBlocks(node) - return RestApiResponse.response($json, contentType="application/json") + # Attempt to handle `Expect` header + # some clients (curl), wait 1000ms + # before giving up + # + await request.handleExpect() - router.api( - MethodGet, - "/api/codex/v1/data/{cid}") do ( - cid: Cid, resp: HttpResponseRef) -> RestApiResponse: + var mimetype = request.headers.getString(ContentTypeHeader).some - var headers = buildCorsHeaders("GET", allowedOrigin) + if mimetype.get() != "": + var m = newMimetypes() + let extension = m.getExt(mimetype.get(), "") + if extension == "": + return RestApiResponse.error(Http422, "The MIME type is not valid.") + else: + mimetype = string.none - ## Download a file from the local node in a streaming - ## manner - if cid.isErr: - return RestApiResponse.error( - Http400, - $cid.error(), - headers = headers) + const ContentDispositionHeader = "Content-Disposition" + let contentDisposition = request.headers.getString(ContentDispositionHeader) + let filename = getFilenameFromContentDisposition(contentDisposition) - if corsOrigin =? allowedOrigin: - resp.setCorsHeaders("GET", corsOrigin) - resp.setHeader("Access-Control-Headers", "X-Requested-With") + if filename.isSome and not isValidFilename(filename.get()): + return RestApiResponse.error(Http422, "The filename is not valid.") - await node.retrieveCid(cid.get(), local = true, resp=resp) + # Here we could check if the extension matches the filename if needed - router.api( - MethodPost, - "/api/codex/v1/data/{cid}/network") do ( - cid: Cid, resp: HttpResponseRef) -> RestApiResponse: - ## Download a file from the network to the local node - ## + let reader = bodyReader.get() - var headers = buildCorsHeaders("GET", allowedOrigin) + try: + without cid =? ( + await node.store( + AsyncStreamWrapper.new(reader = AsyncStreamReader(reader)), + filename = filename, + mimetype = mimetype, + ) + ), error: + error "Error uploading file", exc = error.msg + return RestApiResponse.error(Http500, error.msg) - if cid.isErr: - return RestApiResponse.error( - Http400, - $cid.error(), headers = headers) + codex_api_uploads.inc() + trace "Uploaded file", cid + return RestApiResponse.response($cid) + except CancelledError: + trace "Upload cancelled error" + return RestApiResponse.error(Http500) + except AsyncStreamError: + trace "Async stream error" + return RestApiResponse.error(Http500) + finally: + await reader.closeWait() - if corsOrigin =? allowedOrigin: - resp.setCorsHeaders("GET", corsOrigin) - resp.setHeader("Access-Control-Headers", "X-Requested-With") + trace "Something went wrong error" + return RestApiResponse.error(Http500) - without manifest =? (await node.fetchManifest(cid.get())), err: - error "Failed to fetch manifest", err = err.msg - return RestApiResponse.error( - Http404, - err.msg, headers = headers) + router.api(MethodGet, "/api/codex/v1/data") do() -> RestApiResponse: + let json = await formatManifestBlocks(node) + return RestApiResponse.response($json, contentType = "application/json") - proc fetchDatasetAsync(): Future[void] {.async.} = - try: - if err =? (await node.fetchBatched(manifest)).errorOption: - error "Unable to fetch dataset", cid = cid.get(), err = err.msg - except CatchableError as exc: - error "CatchableError when fetching dataset", cid = cid.get(), exc = exc.msg - discard + router.api(MethodGet, "/api/codex/v1/data/{cid}") do( + cid: Cid, resp: HttpResponseRef + ) -> RestApiResponse: + var headers = buildCorsHeaders("GET", allowedOrigin) - asyncSpawn fetchDatasetAsync() + ## Download a file from the local node in a streaming + ## manner + if cid.isErr: + return RestApiResponse.error(Http400, $cid.error(), headers = headers) - let json = %formatManifest(cid.get(), manifest) - return RestApiResponse.response($json, contentType="application/json") + if corsOrigin =? allowedOrigin: + resp.setCorsHeaders("GET", corsOrigin) + resp.setHeader("Access-Control-Headers", "X-Requested-With") - router.api( - MethodGet, - "/api/codex/v1/data/{cid}/network/stream") do ( - cid: Cid, resp: HttpResponseRef) -> RestApiResponse: - ## Download a file from the network in a streaming - ## manner - ## + await node.retrieveCid(cid.get(), local = true, resp = resp) - var headers = buildCorsHeaders("GET", allowedOrigin) + router.api(MethodPost, "/api/codex/v1/data/{cid}/network") do( + cid: Cid, resp: HttpResponseRef + ) -> RestApiResponse: + ## Download a file from the network to the local node + ## - if cid.isErr: - return RestApiResponse.error( - Http400, - $cid.error(), headers = headers) + var headers = buildCorsHeaders("GET", allowedOrigin) - if corsOrigin =? allowedOrigin: - resp.setCorsHeaders("GET", corsOrigin) - resp.setHeader("Access-Control-Headers", "X-Requested-With") + if cid.isErr: + return RestApiResponse.error(Http400, $cid.error(), headers = headers) - await node.retrieveCid(cid.get(), local = false, resp=resp) + without manifest =? (await node.fetchManifest(cid.get())), err: + error "Failed to fetch manifest", err = err.msg + return RestApiResponse.error(Http404, err.msg, headers = headers) - router.api( - MethodGet, - "/api/codex/v1/data/{cid}/network/manifest") do ( - cid: Cid, resp: HttpResponseRef) -> RestApiResponse: - ## Download only the manifest. - ## + proc fetchDatasetAsync(): Future[void] {.async.} = + try: + if err =? (await node.fetchBatched(manifest)).errorOption: + error "Unable to fetch dataset", cid = cid.get(), err = err.msg + except CatchableError as exc: + error "CatchableError when fetching dataset", cid = cid.get(), exc = exc.msg + discard - var headers = buildCorsHeaders("GET", allowedOrigin) + asyncSpawn fetchDatasetAsync() - if cid.isErr: - return RestApiResponse.error( - Http400, - $cid.error(), headers = headers) + let json = %formatManifest(cid.get(), manifest) + return RestApiResponse.response($json, contentType = "application/json") - without manifest =? (await node.fetchManifest(cid.get())), err: - error "Failed to fetch manifest", err = err.msg - return RestApiResponse.error( - Http404, - err.msg, headers = headers) + router.api(MethodGet, "/api/codex/v1/data/{cid}/network/stream") do( + cid: Cid, resp: HttpResponseRef + ) -> RestApiResponse: + ## Download a file from the network in a streaming + ## manner + ## - let json = %formatManifest(cid.get(), manifest) - return RestApiResponse.response($json, contentType="application/json") + var headers = buildCorsHeaders("GET", allowedOrigin) - router.api( - MethodGet, - "/api/codex/v1/space") do () -> RestApiResponse: - let json = % RestRepoStore( + if cid.isErr: + return RestApiResponse.error(Http400, $cid.error(), headers = headers) + + if corsOrigin =? allowedOrigin: + resp.setCorsHeaders("GET", corsOrigin) + resp.setHeader("Access-Control-Headers", "X-Requested-With") + + await node.retrieveCid(cid.get(), local = false, resp = resp) + + router.api(MethodGet, "/api/codex/v1/data/{cid}/network/manifest") do( + cid: Cid, resp: HttpResponseRef + ) -> RestApiResponse: + ## Download only the manifest. + ## + + var headers = buildCorsHeaders("GET", allowedOrigin) + + if cid.isErr: + return RestApiResponse.error(Http400, $cid.error(), headers = headers) + + without manifest =? (await node.fetchManifest(cid.get())), err: + error "Failed to fetch manifest", err = err.msg + return RestApiResponse.error(Http404, err.msg, headers = headers) + + let json = %formatManifest(cid.get(), manifest) + return RestApiResponse.response($json, contentType = "application/json") + + router.api(MethodGet, "/api/codex/v1/space") do() -> RestApiResponse: + let json = + %RestRepoStore( totalBlocks: repoStore.totalBlocks, quotaMaxBytes: repoStore.quotaMaxBytes, quotaUsedBytes: repoStore.quotaUsedBytes, - quotaReservedBytes: repoStore.quotaReservedBytes + quotaReservedBytes: repoStore.quotaReservedBytes, ) - return RestApiResponse.response($json, contentType="application/json") + return RestApiResponse.response($json, contentType = "application/json") proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = let allowedOrigin = router.allowedOrigin - router.api( - MethodGet, - "/api/codex/v1/sales/slots") do () -> RestApiResponse: - var headers = buildCorsHeaders("GET", allowedOrigin) - - ## Returns active slots for the host - try: - without contracts =? node.contracts.host: - return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers) - - let json = %(await contracts.sales.mySlots()) - return RestApiResponse.response($json, contentType="application/json", headers = headers) - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) - - router.api( - MethodGet, - "/api/codex/v1/sales/slots/{slotId}") do (slotId: SlotId) -> RestApiResponse: - ## Returns active slot with id {slotId} for the host. Returns 404 if the - ## slot is not active for the host. - var headers = buildCorsHeaders("GET", allowedOrigin) + router.api(MethodGet, "/api/codex/v1/sales/slots") do() -> RestApiResponse: + var headers = buildCorsHeaders("GET", allowedOrigin) + ## Returns active slots for the host + try: without contracts =? node.contracts.host: - return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers) + return RestApiResponse.error( + Http503, "Persistence is not enabled", headers = headers + ) - without slotId =? slotId.tryGet.catch, error: + let json = %(await contracts.sales.mySlots()) + return RestApiResponse.response( + $json, contentType = "application/json", headers = headers + ) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) + + router.api(MethodGet, "/api/codex/v1/sales/slots/{slotId}") do( + slotId: SlotId + ) -> RestApiResponse: + ## Returns active slot with id {slotId} for the host. Returns 404 if the + ## slot is not active for the host. + var headers = buildCorsHeaders("GET", allowedOrigin) + + without contracts =? node.contracts.host: + return + RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers) + + without slotId =? slotId.tryGet.catch, error: + return RestApiResponse.error(Http400, error.msg, headers = headers) + + without agent =? await contracts.sales.activeSale(slotId): + return + RestApiResponse.error(Http404, "Provider not filling slot", headers = headers) + + let restAgent = RestSalesAgent( + state: agent.state() |? "none", + slotIndex: agent.data.slotIndex, + requestId: agent.data.requestId, + request: agent.data.request, + reservation: agent.data.reservation, + ) + + return RestApiResponse.response( + restAgent.toJson, contentType = "application/json", headers = headers + ) + + router.api(MethodGet, "/api/codex/v1/sales/availability") do() -> RestApiResponse: + ## Returns storage that is for sale + var headers = buildCorsHeaders("GET", allowedOrigin) + + try: + without contracts =? node.contracts.host: + return RestApiResponse.error( + Http503, "Persistence is not enabled", headers = headers + ) + + without avails =? (await contracts.sales.context.reservations.all(Availability)), + err: + return RestApiResponse.error(Http500, err.msg, headers = headers) + + let json = %avails + return RestApiResponse.response( + $json, contentType = "application/json", headers = headers + ) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) + + router.rawApi(MethodPost, "/api/codex/v1/sales/availability") do() -> RestApiResponse: + ## Add available storage to sell. + ## Every time Availability's offer finishes, its capacity is + ## returned to the availability. + ## + ## totalSize - size of available storage in bytes + ## duration - maximum time the storage should be sold for (in seconds) + ## minPricePerBytePerSecond - minimal price per byte paid (in amount of + ## tokens) to be matched against the request's pricePerBytePerSecond + ## totalCollateral - total collateral (in amount of + ## tokens) that can be distributed among matching requests + + var headers = buildCorsHeaders("POST", allowedOrigin) + + try: + without contracts =? node.contracts.host: + return RestApiResponse.error( + Http503, "Persistence is not enabled", headers = headers + ) + + let body = await request.getBody() + + without restAv =? RestAvailability.fromJson(body), error: return RestApiResponse.error(Http400, error.msg, headers = headers) - without agent =? await contracts.sales.activeSale(slotId): - return RestApiResponse.error(Http404, "Provider not filling slot", headers = headers) + let reservations = contracts.sales.context.reservations - let restAgent = RestSalesAgent( - state: agent.state() |? "none", - slotIndex: agent.data.slotIndex, - requestId: agent.data.requestId, - request: agent.data.request, - reservation: agent.data.reservation, + if restAv.totalSize == 0: + return RestApiResponse.error( + Http400, "Total size must be larger then zero", headers = headers + ) + + if not reservations.hasAvailable(restAv.totalSize.truncate(uint)): + return + RestApiResponse.error(Http422, "Not enough storage quota", headers = headers) + + without availability =? ( + await reservations.createAvailability( + restAv.totalSize, restAv.duration, restAv.minPricePerBytePerSecond, + restAv.totalCollateral, + ) + ), error: + return RestApiResponse.error(Http500, error.msg, headers = headers) + + return RestApiResponse.response( + availability.toJson, + Http201, + contentType = "application/json", + headers = headers, ) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) - return RestApiResponse.response(restAgent.toJson, contentType="application/json", headers = headers) + router.api(MethodOptions, "/api/codex/v1/sales/availability/{id}") do( + id: AvailabilityId, resp: HttpResponseRef + ) -> RestApiResponse: + if corsOrigin =? allowedOrigin: + resp.setCorsHeaders("PATCH", corsOrigin) - router.api( - MethodGet, - "/api/codex/v1/sales/availability") do () -> RestApiResponse: - ## Returns storage that is for sale - var headers = buildCorsHeaders("GET", allowedOrigin) + resp.status = Http204 + await resp.sendBody("") - try: - without contracts =? node.contracts.host: - return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers) + router.rawApi(MethodPatch, "/api/codex/v1/sales/availability/{id}") do( + id: AvailabilityId + ) -> RestApiResponse: + ## Updates Availability. + ## The new parameters will be only considered for new requests. + ## Existing Requests linked to this Availability will continue as is. + ## + ## totalSize - size of available storage in bytes. + ## When decreasing the size, then lower limit is + ## the currently `totalSize - freeSize`. + ## duration - maximum time the storage should be sold for (in seconds) + ## minPricePerBytePerSecond - minimal price per byte paid (in amount of + ## tokens) to be matched against the request's pricePerBytePerSecond + ## totalCollateral - total collateral (in amount of + ## tokens) that can be distributed among matching requests + try: + without contracts =? node.contracts.host: + return RestApiResponse.error(Http503, "Persistence is not enabled") - without avails =? (await contracts.sales.context.reservations.all(Availability)), err: - return RestApiResponse.error(Http500, err.msg, headers = headers) + without id =? id.tryGet.catch, error: + return RestApiResponse.error(Http400, error.msg) + without keyId =? id.key.tryGet.catch, error: + return RestApiResponse.error(Http400, error.msg) - let json = %avails - return RestApiResponse.response($json, contentType="application/json", headers = headers) - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) + let + body = await request.getBody() + reservations = contracts.sales.context.reservations - router.rawApi( - MethodPost, - "/api/codex/v1/sales/availability") do () -> RestApiResponse: - ## Add available storage to sell. - ## Every time Availability's offer finishes, its capacity is returned to the availability. - ## - ## totalSize - size of available storage in bytes - ## duration - maximum time the storage should be sold for (in seconds) - ## minPrice - minimal price paid (in amount of tokens) for the whole hosted request's slot for the request's duration - ## maxCollateral - maximum collateral user is willing to pay per filled Slot (in amount of tokens) + type OptRestAvailability = Optionalize(RestAvailability) + without restAv =? OptRestAvailability.fromJson(body), error: + return RestApiResponse.error(Http400, error.msg) - var headers = buildCorsHeaders("POST", allowedOrigin) + without availability =? (await reservations.get(keyId, Availability)), error: + if error of NotExistsError: + return RestApiResponse.error(Http404, "Availability not found") - try: - without contracts =? node.contracts.host: - return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers) + return RestApiResponse.error(Http500, error.msg) - let body = await request.getBody() + if isSome restAv.freeSize: + return RestApiResponse.error(Http400, "Updating freeSize is not allowed") - without restAv =? RestAvailability.fromJson(body), error: - return RestApiResponse.error(Http400, error.msg, headers = headers) + if size =? restAv.totalSize: + # we don't allow lowering the totalSize bellow currently utilized size + if size < (availability.totalSize - availability.freeSize): + return RestApiResponse.error( + Http400, + "New totalSize must be larger then current totalSize - freeSize, which is currently: " & + $(availability.totalSize - availability.freeSize), + ) - let reservations = contracts.sales.context.reservations + availability.freeSize += size - availability.totalSize + availability.totalSize = size - if restAv.totalSize == 0: - return RestApiResponse.error(Http400, "Total size must be larger then zero", headers = headers) + if duration =? restAv.duration: + availability.duration = duration - if not reservations.hasAvailable(restAv.totalSize.truncate(uint)): - return RestApiResponse.error(Http422, "Not enough storage quota", headers = headers) + if minPricePerBytePerSecond =? restAv.minPricePerBytePerSecond: + availability.minPricePerBytePerSecond = minPricePerBytePerSecond - without availability =? ( - await reservations.createAvailability( - restAv.totalSize, - restAv.duration, - restAv.minPrice, - restAv.maxCollateral) - ), error: + if totalCollateral =? restAv.totalCollateral: + availability.totalCollateral = totalCollateral + + if err =? (await reservations.update(availability)).errorOption: + return RestApiResponse.error(Http500, err.msg) + + return RestApiResponse.response(Http200) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500) + + router.rawApi(MethodGet, "/api/codex/v1/sales/availability/{id}/reservations") do( + id: AvailabilityId + ) -> RestApiResponse: + ## Gets Availability's reservations. + var headers = buildCorsHeaders("GET", allowedOrigin) + + try: + without contracts =? node.contracts.host: + return RestApiResponse.error( + Http503, "Persistence is not enabled", headers = headers + ) + + without id =? id.tryGet.catch, error: + return RestApiResponse.error(Http400, error.msg, headers = headers) + without keyId =? id.key.tryGet.catch, error: + return RestApiResponse.error(Http400, error.msg, headers = headers) + + let reservations = contracts.sales.context.reservations + let market = contracts.sales.context.market + + if error =? (await reservations.get(keyId, Availability)).errorOption: + if error of NotExistsError: + return + RestApiResponse.error(Http404, "Availability not found", headers = headers) + else: return RestApiResponse.error(Http500, error.msg, headers = headers) - return RestApiResponse.response(availability.toJson, - Http201, - contentType="application/json", - headers = headers) - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) + without availabilitysReservations =? (await reservations.all(Reservation, id)), + err: + return RestApiResponse.error(Http500, err.msg, headers = headers) - router.api( - MethodOptions, - "/api/codex/v1/sales/availability/{id}") do (id: AvailabilityId, resp: HttpResponseRef) -> RestApiResponse: - - if corsOrigin =? allowedOrigin: - resp.setCorsHeaders("PATCH", corsOrigin) - - resp.status = Http204 - await resp.sendBody("") - - router.rawApi( - MethodPatch, - "/api/codex/v1/sales/availability/{id}") do (id: AvailabilityId) -> RestApiResponse: - ## Updates Availability. - ## The new parameters will be only considered for new requests. - ## Existing Requests linked to this Availability will continue as is. - ## - ## totalSize - size of available storage in bytes. When decreasing the size, then lower limit is the currently `totalSize - freeSize`. - ## duration - maximum time the storage should be sold for (in seconds) - ## minPrice - minimum price to be paid (in amount of tokens) - ## maxCollateral - maximum collateral user is willing to pay per filled Slot (in amount of tokens) - try: - without contracts =? node.contracts.host: - return RestApiResponse.error(Http503, "Persistence is not enabled") - - without id =? id.tryGet.catch, error: - return RestApiResponse.error(Http400, error.msg) - without keyId =? id.key.tryGet.catch, error: - return RestApiResponse.error(Http400, error.msg) - - let - body = await request.getBody() - reservations = contracts.sales.context.reservations - - type OptRestAvailability = Optionalize(RestAvailability) - without restAv =? OptRestAvailability.fromJson(body), error: - return RestApiResponse.error(Http400, error.msg) - - without availability =? (await reservations.get(keyId, Availability)), error: - if error of NotExistsError: - return RestApiResponse.error(Http404, "Availability not found") - - return RestApiResponse.error(Http500, error.msg) - - if isSome restAv.freeSize: - return RestApiResponse.error(Http400, "Updating freeSize is not allowed") - - if size =? restAv.totalSize: - # we don't allow lowering the totalSize bellow currently utilized size - if size < (availability.totalSize - availability.freeSize): - return RestApiResponse.error(Http400, "New totalSize must be larger then current totalSize - freeSize, which is currently: " & $(availability.totalSize - availability.freeSize)) - - availability.freeSize += size - availability.totalSize - availability.totalSize = size - - if duration =? restAv.duration: - availability.duration = duration - - if minPrice =? restAv.minPrice: - availability.minPrice = minPrice - - if maxCollateral =? restAv.maxCollateral: - availability.maxCollateral = maxCollateral - - if err =? (await reservations.update(availability)).errorOption: - return RestApiResponse.error(Http500, err.msg) - - return RestApiResponse.response(Http200) - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500) - - router.rawApi( - MethodGet, - "/api/codex/v1/sales/availability/{id}/reservations") do (id: AvailabilityId) -> RestApiResponse: - ## Gets Availability's reservations. - var headers = buildCorsHeaders("GET", allowedOrigin) - - try: - without contracts =? node.contracts.host: - return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers) - - without id =? id.tryGet.catch, error: - return RestApiResponse.error(Http400, error.msg, headers = headers) - without keyId =? id.key.tryGet.catch, error: - return RestApiResponse.error(Http400, error.msg, headers = headers) - - let reservations = contracts.sales.context.reservations - let market = contracts.sales.context.market - - if error =? (await reservations.get(keyId, Availability)).errorOption: - if error of NotExistsError: - return RestApiResponse.error(Http404, "Availability not found", headers = headers) - else: - return RestApiResponse.error(Http500, error.msg, headers = headers) - - without availabilitysReservations =? (await reservations.all(Reservation, id)), err: - return RestApiResponse.error(Http500, err.msg, headers = headers) - - # TODO: Expand this structure with information about the linked StorageRequest not only RequestID - return RestApiResponse.response(availabilitysReservations.toJson, contentType="application/json", headers = headers) - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) + # TODO: Expand this structure with information about the linked StorageRequest not only RequestID + return RestApiResponse.response( + availabilitysReservations.toJson, + contentType = "application/json", + headers = headers, + ) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) = let allowedOrigin = router.allowedOrigin - router.rawApi( - MethodPost, - "/api/codex/v1/storage/request/{cid}") do (cid: Cid) -> RestApiResponse: - var headers = buildCorsHeaders("POST", allowedOrigin) + router.rawApi(MethodPost, "/api/codex/v1/storage/request/{cid}") do( + cid: Cid + ) -> RestApiResponse: + var headers = buildCorsHeaders("POST", allowedOrigin) - ## Create a request for storage - ## - ## cid - the cid of a previously uploaded dataset - ## duration - the duration of the request in seconds - ## proofProbability - how often storage proofs are required - ## reward - the maximum amount of tokens paid per second per slot to hosts the client is willing to pay - ## expiry - specifies threshold in seconds from now when the request expires if the Request does not find requested amount of nodes to host the data - ## nodes - number of nodes the content should be stored on - ## tolerance - allowed number of nodes that can be lost before content is lost - ## colateral - requested collateral from hosts when they fill slot - try: - without contracts =? node.contracts.client: - return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers) - - without cid =? cid.tryGet.catch, error: - return RestApiResponse.error(Http400, error.msg, headers = headers) - - let body = await request.getBody() - - without params =? StorageRequestParams.fromJson(body), error: - return RestApiResponse.error(Http400, error.msg, headers = headers) - - let nodes = params.nodes |? 3 - let tolerance = params.tolerance |? 1 - - if tolerance == 0: - return RestApiResponse.error(Http400, "Tolerance needs to be bigger then zero", headers = headers) - - # prevent underflow - if tolerance > nodes: - return RestApiResponse.error(Http400, "Invalid parameters: `tolerance` cannot be greater than `nodes`", headers = headers) - - let ecK = nodes - tolerance - let ecM = tolerance # for readability - - # ensure leopard constrainst of 1 < K ≥ M - if ecK <= 1 or ecK < ecM: - return RestApiResponse.error(Http400, "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`", headers = headers) - - without expiry =? params.expiry: - return RestApiResponse.error(Http400, "Expiry required", headers = headers) - - if expiry <= 0 or expiry >= params.duration: - return RestApiResponse.error(Http400, "Expiry needs value bigger then zero and smaller then the request's duration", headers = headers) - - without purchaseId =? await node.requestStorage( - cid, - params.duration, - params.proofProbability, - nodes, - tolerance, - params.reward, - params.collateral, - expiry), error: - - if error of InsufficientBlocksError: - return RestApiResponse.error(Http400, - "Dataset too small for erasure parameters, need at least " & - $(ref InsufficientBlocksError)(error).minSize.int & " bytes", headers = headers) - - return RestApiResponse.error(Http500, error.msg, headers = headers) - - return RestApiResponse.response(purchaseId.toHex) - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) - - router.api( - MethodGet, - "/api/codex/v1/storage/purchases/{id}") do ( - id: PurchaseId) -> RestApiResponse: - var headers = buildCorsHeaders("GET", allowedOrigin) - - try: - without contracts =? node.contracts.client: - return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers) - - without id =? id.tryGet.catch, error: - return RestApiResponse.error(Http400, error.msg, headers = headers) - - without purchase =? contracts.purchasing.getPurchase(id): - return RestApiResponse.error(Http404, headers = headers) - - let json = % RestPurchase( - state: purchase.state |? "none", - error: purchase.error.?msg, - request: purchase.request, - requestId: purchase.requestId + ## Create a request for storage + ## + ## cid - the cid of a previously uploaded dataset + ## duration - the duration of the request in seconds + ## proofProbability - how often storage proofs are required + ## pricePerBytePerSecond - the amount of tokens paid per byte per second to hosts the client is willing to pay + ## expiry - specifies threshold in seconds from now when the request expires if the Request does not find requested amount of nodes to host the data + ## nodes - number of nodes the content should be stored on + ## tolerance - allowed number of nodes that can be lost before content is lost + ## colateralPerByte - requested collateral per byte from hosts when they fill slot + try: + without contracts =? node.contracts.client: + return RestApiResponse.error( + Http503, "Persistence is not enabled", headers = headers ) - return RestApiResponse.response($json, contentType="application/json", headers = headers) - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) + without cid =? cid.tryGet.catch, error: + return RestApiResponse.error(Http400, error.msg, headers = headers) - router.api( - MethodGet, - "/api/codex/v1/storage/purchases") do () -> RestApiResponse: - var headers = buildCorsHeaders("GET", allowedOrigin) + let body = await request.getBody() - try: - without contracts =? node.contracts.client: - return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers) + without params =? StorageRequestParams.fromJson(body), error: + return RestApiResponse.error(Http400, error.msg, headers = headers) - let purchaseIds = contracts.purchasing.getPurchaseIds() - return RestApiResponse.response($ %purchaseIds, contentType="application/json", headers = headers) - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) + let nodes = params.nodes |? 3 + let tolerance = params.tolerance |? 1 + + if tolerance == 0: + return RestApiResponse.error( + Http400, "Tolerance needs to be bigger then zero", headers = headers + ) + + # prevent underflow + if tolerance > nodes: + return RestApiResponse.error( + Http400, + "Invalid parameters: `tolerance` cannot be greater than `nodes`", + headers = headers, + ) + + let ecK = nodes - tolerance + let ecM = tolerance # for readability + + # ensure leopard constrainst of 1 < K ≥ M + if ecK <= 1 or ecK < ecM: + return RestApiResponse.error( + Http400, + "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`", + headers = headers, + ) + + without expiry =? params.expiry: + return RestApiResponse.error(Http400, "Expiry required", headers = headers) + + if expiry <= 0 or expiry >= params.duration: + return RestApiResponse.error( + Http400, + "Expiry needs value bigger then zero and smaller then the request's duration", + headers = headers, + ) + + without purchaseId =? + await node.requestStorage( + cid, params.duration, params.proofProbability, nodes, tolerance, + params.pricePerBytePerSecond, params.collateralPerByte, expiry, + ), error: + if error of InsufficientBlocksError: + return RestApiResponse.error( + Http400, + "Dataset too small for erasure parameters, need at least " & + $(ref InsufficientBlocksError)(error).minSize.int & " bytes", + headers = headers, + ) + + return RestApiResponse.error(Http500, error.msg, headers = headers) + + return RestApiResponse.response(purchaseId.toHex) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) + + router.api(MethodGet, "/api/codex/v1/storage/purchases/{id}") do( + id: PurchaseId + ) -> RestApiResponse: + var headers = buildCorsHeaders("GET", allowedOrigin) + + try: + without contracts =? node.contracts.client: + return RestApiResponse.error( + Http503, "Persistence is not enabled", headers = headers + ) + + without id =? id.tryGet.catch, error: + return RestApiResponse.error(Http400, error.msg, headers = headers) + + without purchase =? contracts.purchasing.getPurchase(id): + return RestApiResponse.error(Http404, headers = headers) + + let json = + %RestPurchase( + state: purchase.state |? "none", + error: purchase.error .? msg, + request: purchase.request, + requestId: purchase.requestId, + ) + + return RestApiResponse.response( + $json, contentType = "application/json", headers = headers + ) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) + + router.api(MethodGet, "/api/codex/v1/storage/purchases") do() -> RestApiResponse: + var headers = buildCorsHeaders("GET", allowedOrigin) + + try: + without contracts =? node.contracts.client: + return RestApiResponse.error( + Http503, "Persistence is not enabled", headers = headers + ) + + let purchaseIds = contracts.purchasing.getPurchaseIds() + return RestApiResponse.response( + $ %purchaseIds, contentType = "application/json", headers = headers + ) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) = let allowedOrigin = router.allowedOrigin ## various node management api's ## - router.api( - MethodGet, - "/api/codex/v1/spr") do () -> RestApiResponse: - ## Returns node SPR in requested format, json or text. - ## - var headers = buildCorsHeaders("GET", allowedOrigin) + router.api(MethodGet, "/api/codex/v1/spr") do() -> RestApiResponse: + ## Returns node SPR in requested format, json or text. + ## + var headers = buildCorsHeaders("GET", allowedOrigin) - try: - without spr =? node.discovery.dhtRecord: - return RestApiResponse.response("", status=Http503, contentType="application/json", headers = headers) + try: + without spr =? node.discovery.dhtRecord: + return RestApiResponse.response( + "", status = Http503, contentType = "application/json", headers = headers + ) - if $preferredContentType().get() == "text/plain": - return RestApiResponse.response(spr.toURI, contentType="text/plain", headers = headers) - else: - return RestApiResponse.response($ %* {"spr": spr.toURI}, contentType="application/json", headers = headers) - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) + if $preferredContentType().get() == "text/plain": + return RestApiResponse.response( + spr.toURI, contentType = "text/plain", headers = headers + ) + else: + return RestApiResponse.response( + $ %*{"spr": spr.toURI}, contentType = "application/json", headers = headers + ) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) - router.api( - MethodGet, - "/api/codex/v1/peerid") do () -> RestApiResponse: - ## Returns node's peerId in requested format, json or text. - ## - var headers = buildCorsHeaders("GET", allowedOrigin) + router.api(MethodGet, "/api/codex/v1/peerid") do() -> RestApiResponse: + ## Returns node's peerId in requested format, json or text. + ## + var headers = buildCorsHeaders("GET", allowedOrigin) - try: - let id = $node.switch.peerInfo.peerId + try: + let id = $node.switch.peerInfo.peerId - if $preferredContentType().get() == "text/plain": - return RestApiResponse.response(id, contentType="text/plain", headers = headers) - else: - return RestApiResponse.response($ %* {"id": id}, contentType="application/json", headers = headers) - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) + if $preferredContentType().get() == "text/plain": + return + RestApiResponse.response(id, contentType = "text/plain", headers = headers) + else: + return RestApiResponse.response( + $ %*{"id": id}, contentType = "application/json", headers = headers + ) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) - router.api( - MethodGet, - "/api/codex/v1/connect/{peerId}") do ( - peerId: PeerId, - addrs: seq[MultiAddress]) -> RestApiResponse: - ## Connect to a peer - ## - ## If `addrs` param is supplied, it will be used to - ## dial the peer, otherwise the `peerId` is used - ## to invoke peer discovery, if it succeeds - ## the returned addresses will be used to dial - ## - ## `addrs` the listening addresses of the peers to dial, eg the one specified with `--listen-addrs` - ## - var headers = buildCorsHeaders("GET", allowedOrigin) + router.api(MethodGet, "/api/codex/v1/connect/{peerId}") do( + peerId: PeerId, addrs: seq[MultiAddress] + ) -> RestApiResponse: + ## Connect to a peer + ## + ## If `addrs` param is supplied, it will be used to + ## dial the peer, otherwise the `peerId` is used + ## to invoke peer discovery, if it succeeds + ## the returned addresses will be used to dial + ## + ## `addrs` the listening addresses of the peers to dial, eg the one specified with `--listen-addrs` + ## + var headers = buildCorsHeaders("GET", allowedOrigin) - if peerId.isErr: - return RestApiResponse.error( - Http400, - $peerId.error(), - headers = headers) + if peerId.isErr: + return RestApiResponse.error(Http400, $peerId.error(), headers = headers) - let addresses = if addrs.isOk and addrs.get().len > 0: - addrs.get() - else: - without peerRecord =? (await node.findPeer(peerId.get())): - return RestApiResponse.error( - Http400, - "Unable to find Peer!", - headers = headers) - peerRecord.addresses.mapIt(it.address) - try: - await node.connect(peerId.get(), addresses) - return RestApiResponse.response("Successfully connected to peer", headers = headers) - except DialFailedError: - return RestApiResponse.error(Http400, "Unable to dial peer", headers = headers) - except CatchableError: - return RestApiResponse.error(Http500, "Unknown error dialling peer", headers = headers) + let addresses = + if addrs.isOk and addrs.get().len > 0: + addrs.get() + else: + without peerRecord =? (await node.findPeer(peerId.get())): + return + RestApiResponse.error(Http400, "Unable to find Peer!", headers = headers) + peerRecord.addresses.mapIt(it.address) + try: + await node.connect(peerId.get(), addresses) + return + RestApiResponse.response("Successfully connected to peer", headers = headers) + except DialFailedError: + return RestApiResponse.error(Http400, "Unable to dial peer", headers = headers) + except CatchableError: + return + RestApiResponse.error(Http500, "Unknown error dialling peer", headers = headers) proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) = let allowedOrigin = router.allowedOrigin - router.api( - MethodGet, - "/api/codex/v1/debug/info") do () -> RestApiResponse: - ## Print rudimentary node information - ## - var headers = buildCorsHeaders("GET", allowedOrigin) + router.api(MethodGet, "/api/codex/v1/debug/info") do() -> RestApiResponse: + ## Print rudimentary node information + ## + var headers = buildCorsHeaders("GET", allowedOrigin) + + try: + let table = RestRoutingTable.init(node.discovery.protocol.routingTable) + + let json = + %*{ + "id": $node.switch.peerInfo.peerId, + "addrs": node.switch.peerInfo.addrs.mapIt($it), + "repo": $conf.dataDir, + "spr": + if node.discovery.dhtRecord.isSome: + node.discovery.dhtRecord.get.toURI + else: + "", + "announceAddresses": node.discovery.announceAddrs, + "table": table, + "codex": {"version": $codexVersion, "revision": $codexRevision}, + } + + # return pretty json for human readability + return RestApiResponse.response( + json.pretty(), contentType = "application/json", headers = headers + ) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) + + router.api(MethodPost, "/api/codex/v1/debug/chronicles/loglevel") do( + level: Option[string] + ) -> RestApiResponse: + ## Set log level at run time + ## + ## e.g. `chronicles/loglevel?level=DEBUG` + ## + ## `level` - chronicles log level + ## + var headers = buildCorsHeaders("POST", allowedOrigin) + + try: + without res =? level and level =? res: + return RestApiResponse.error(Http400, "Missing log level", headers = headers) try: - let table = RestRoutingTable.init(node.discovery.protocol.routingTable) - - let - json = %*{ - "id": $node.switch.peerInfo.peerId, - "addrs": node.switch.peerInfo.addrs.mapIt( $it ), - "repo": $conf.dataDir, - "spr": - if node.discovery.dhtRecord.isSome: - node.discovery.dhtRecord.get.toURI - else: - "", - "announceAddresses": node.discovery.announceAddrs, - "table": table, - "codex": { - "version": $codexVersion, - "revision": $codexRevision - } - } - - # return pretty json for human readability - return RestApiResponse.response(json.pretty(), contentType="application/json", headers = headers) + {.gcsafe.}: + updateLogLevel(level) except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) + return RestApiResponse.error(Http500, exc.msg, headers = headers) - router.api( - MethodPost, - "/api/codex/v1/debug/chronicles/loglevel") do ( - level: Option[string]) -> RestApiResponse: - ## Set log level at run time - ## - ## e.g. `chronicles/loglevel?level=DEBUG` - ## - ## `level` - chronicles log level - ## - var headers = buildCorsHeaders("POST", allowedOrigin) - - try: - without res =? level and level =? res: - return RestApiResponse.error(Http400, "Missing log level", headers = headers) - - try: - {.gcsafe.}: - updateLogLevel(level) - except CatchableError as exc: - return RestApiResponse.error(Http500, exc.msg, headers = headers) - - return RestApiResponse.response("") - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) + return RestApiResponse.response("") + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) when codex_enable_api_debug_peers: - router.api( - MethodGet, - "/api/codex/v1/debug/peer/{peerId}") do (peerId: PeerId) -> RestApiResponse: + router.api(MethodGet, "/api/codex/v1/debug/peer/{peerId}") do( + peerId: PeerId + ) -> RestApiResponse: var headers = buildCorsHeaders("GET", allowedOrigin) try: trace "debug/peer start" without peerRecord =? (await node.findPeer(peerId.get())): trace "debug/peer peer not found!" - return RestApiResponse.error( - Http400, - "Unable to find Peer!", - headers = headers) + return + RestApiResponse.error(Http400, "Unable to find Peer!", headers = headers) let json = %RestPeerRecord.init(peerRecord) trace "debug/peer returning peer record" @@ -853,11 +876,11 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) = return RestApiResponse.error(Http500, headers = headers) proc initRestApi*( - node: CodexNodeRef, - conf: CodexConf, - repoStore: RepoStore, - corsAllowedOrigin: ?string): RestRouter = - + node: CodexNodeRef, + conf: CodexConf, + repoStore: RepoStore, + corsAllowedOrigin: ?string, +): RestRouter = var router = RestRouter.init(validate, corsAllowedOrigin) initDataApi(node, repoStore, router) diff --git a/codex/rest/coders.nim b/codex/rest/coders.nim index 0be1a638..1c997ccf 100644 --- a/codex/rest/coders.nim +++ b/codex/rest/coders.nim @@ -25,9 +25,7 @@ proc encodeString*(cid: type Cid): Result[string, cstring] = ok($cid) proc decodeString*(T: type Cid, value: string): Result[Cid, cstring] = - Cid - .init(value) - .mapErr do(e: CidError) -> cstring: + Cid.init(value).mapErr do(e: CidError) -> cstring: case e of CidError.Incorrect: "Incorrect Cid".cstring of CidError.Unsupported: "Unsupported Cid".cstring @@ -44,9 +42,8 @@ proc encodeString*(address: MultiAddress): Result[string, cstring] = ok($address) proc decodeString*(T: type MultiAddress, value: string): Result[MultiAddress, cstring] = - MultiAddress - .init(value) - .mapErr do(e: string) -> cstring: cstring(e) + MultiAddress.init(value).mapErr do(e: string) -> cstring: + cstring(e) proc decodeString*(T: type SomeUnsignedInt, value: string): Result[T, cstring] = Base10.decode(T, value) @@ -55,7 +52,7 @@ proc encodeString*(value: SomeUnsignedInt): Result[string, cstring] = ok(Base10.toString(value)) proc decodeString*(T: type Duration, value: string): Result[T, cstring] = - let v = ? Base10.decode(uint32, value) + let v = ?Base10.decode(uint32, value) ok(v.minutes) proc encodeString*(value: Duration): Result[string, cstring] = @@ -77,19 +74,20 @@ proc decodeString*(_: type UInt256, value: string): Result[UInt256, cstring] = except ValueError as e: err e.msg.cstring -proc decodeString*(_: type array[32, byte], - value: string): Result[array[32, byte], cstring] = +proc decodeString*( + _: type array[32, byte], value: string +): Result[array[32, byte], cstring] = try: ok array[32, byte].fromHex(value) except ValueError as e: err e.msg.cstring -proc decodeString*[T: PurchaseId | RequestId | Nonce | SlotId | AvailabilityId](_: type T, - value: string): Result[T, cstring] = +proc decodeString*[T: PurchaseId | RequestId | Nonce | SlotId | AvailabilityId]( + _: type T, value: string +): Result[T, cstring] = array[32, byte].decodeString(value).map(id => T(id)) -proc decodeString*(t: typedesc[string], - value: string): Result[string, cstring] = +proc decodeString*(t: typedesc[string], value: string): Result[string, cstring] = ok(value) proc encodeString*(value: string): RestResult[string] = diff --git a/codex/rest/json.nim b/codex/rest/json.nim index afbfebe6..9bc7664e 100644 --- a/codex/rest/json.nim +++ b/codex/rest/json.nim @@ -15,8 +15,8 @@ type StorageRequestParams* = object duration* {.serialize.}: UInt256 proofProbability* {.serialize.}: UInt256 - reward* {.serialize.}: UInt256 - collateral* {.serialize.}: UInt256 + pricePerBytePerSecond* {.serialize.}: UInt256 + collateralPerByte* {.serialize.}: UInt256 expiry* {.serialize.}: ?UInt256 nodes* {.serialize.}: ?uint tolerance* {.serialize.}: ?uint @@ -30,8 +30,8 @@ type RestAvailability* = object totalSize* {.serialize.}: UInt256 duration* {.serialize.}: UInt256 - minPrice* {.serialize.}: UInt256 - maxCollateral* {.serialize.}: UInt256 + minPricePerBytePerSecond* {.serialize.}: UInt256 + totalCollateral* {.serialize.}: UInt256 freeSize* {.serialize.}: ?UInt256 RestSalesAgent* = object @@ -74,15 +74,10 @@ type quotaReservedBytes* {.serialize.}: NBytes proc init*(_: type RestContentList, content: seq[RestContent]): RestContentList = - RestContentList( - content: content - ) + RestContentList(content: content) proc init*(_: type RestContent, cid: Cid, manifest: Manifest): RestContent = - RestContent( - cid: cid, - manifest: manifest - ) + RestContent(cid: cid, manifest: manifest) proc init*(_: type RestNode, node: dn.Node): RestNode = RestNode( @@ -90,7 +85,7 @@ proc init*(_: type RestNode, node: dn.Node): RestNode = peerId: node.record.data.peerId, record: node.record, address: node.address, - seen: node.seen > 0.5 + seen: node.seen > 0.5, ) proc init*(_: type RestRoutingTable, routingTable: rt.RoutingTable): RestRoutingTable = @@ -99,28 +94,23 @@ proc init*(_: type RestRoutingTable, routingTable: rt.RoutingTable): RestRouting for node in bucket.nodes: nodes.add(RestNode.init(node)) - RestRoutingTable( - localNode: RestNode.init(routingTable.localNode), - nodes: nodes - ) + RestRoutingTable(localNode: RestNode.init(routingTable.localNode), nodes: nodes) proc init*(_: type RestPeerRecord, peerRecord: PeerRecord): RestPeerRecord = RestPeerRecord( - peerId: peerRecord.peerId, - seqNo: peerRecord.seqNo, - addresses: peerRecord.addresses + peerId: peerRecord.peerId, seqNo: peerRecord.seqNo, addresses: peerRecord.addresses ) proc init*(_: type RestNodeId, id: NodeId): RestNodeId = - RestNodeId( - id: id - ) + RestNodeId(id: id) proc `%`*(obj: StorageRequest | Slot): JsonNode = let jsonObj = newJObject() - for k, v in obj.fieldPairs: jsonObj[k] = %v + for k, v in obj.fieldPairs: + jsonObj[k] = %v jsonObj["id"] = %(obj.id) return jsonObj -proc `%`*(obj: RestNodeId): JsonNode = % $obj.id +proc `%`*(obj: RestNodeId): JsonNode = + % $obj.id diff --git a/codex/rng.nim b/codex/rng.nim index 19452cd4..9d82156e 100644 --- a/codex/rng.nim +++ b/codex/rng.nim @@ -9,7 +9,8 @@ import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import pkg/libp2p/crypto/crypto import pkg/bearssl/rand @@ -30,7 +31,8 @@ proc instance*(t: type Rng): Rng = const randMax = 18_446_744_073_709_551_615'u64 proc rand*(rng: Rng, max: Natural): int = - if max == 0: return 0 + if max == 0: + return 0 while true: let x = rng[].generate(uint64) @@ -41,8 +43,8 @@ proc sample*[T](rng: Rng, a: openArray[T]): T = result = a[rng.rand(a.high)] proc sample*[T]( - rng: Rng, sample, exclude: openArray[T]): T - {.raises: [Defect, RngSampleError].} = + rng: Rng, sample, exclude: openArray[T] +): T {.raises: [Defect, RngSampleError].} = if sample == exclude: raise newException(RngSampleError, "Sample and exclude arrays are the same!") diff --git a/codex/sales.nim b/codex/sales.nim index f891edab..4bf2d13c 100644 --- a/codex/sales.nim +++ b/codex/sales.nim @@ -45,13 +45,12 @@ export salescontext logScope: topics = "sales marketplace" -type - Sales* = ref object - context*: SalesContext - agents*: seq[SalesAgent] - running: bool - subscriptions: seq[market.Subscription] - trackedFutures: TrackedFutures +type Sales* = ref object + context*: SalesContext + agents*: seq[SalesAgent] + running: bool + subscriptions: seq[market.Subscription] + trackedFutures: TrackedFutures proc `onStore=`*(sales: Sales, onStore: OnStore) = sales.context.onStore = some onStore @@ -68,28 +67,31 @@ proc `onProve=`*(sales: Sales, callback: OnProve) = proc `onExpiryUpdate=`*(sales: Sales, callback: OnExpiryUpdate) = sales.context.onExpiryUpdate = some callback -proc onStore*(sales: Sales): ?OnStore = sales.context.onStore +proc onStore*(sales: Sales): ?OnStore = + sales.context.onStore -proc onClear*(sales: Sales): ?OnClear = sales.context.onClear +proc onClear*(sales: Sales): ?OnClear = + sales.context.onClear -proc onSale*(sales: Sales): ?OnSale = sales.context.onSale +proc onSale*(sales: Sales): ?OnSale = + sales.context.onSale -proc onProve*(sales: Sales): ?OnProve = sales.context.onProve +proc onProve*(sales: Sales): ?OnProve = + sales.context.onProve -proc onExpiryUpdate*(sales: Sales): ?OnExpiryUpdate = sales.context.onExpiryUpdate +proc onExpiryUpdate*(sales: Sales): ?OnExpiryUpdate = + sales.context.onExpiryUpdate -proc new*(_: type Sales, - market: Market, - clock: Clock, - repo: RepoStore): Sales = +proc new*(_: type Sales, market: Market, clock: Clock, repo: RepoStore): Sales = Sales.new(market, clock, repo, 0) -proc new*(_: type Sales, - market: Market, - clock: Clock, - repo: RepoStore, - simulateProofFailures: int): Sales = - +proc new*( + _: type Sales, + market: Market, + clock: Clock, + repo: RepoStore, + simulateProofFailures: int, +): Sales = let reservations = Reservations.new(repo) Sales( context: SalesContext( @@ -97,10 +99,10 @@ proc new*(_: type Sales, clock: clock, reservations: reservations, slotQueue: SlotQueue.new(), - simulateProofFailures: simulateProofFailures + simulateProofFailures: simulateProofFailures, ), trackedFutures: TrackedFutures.new(), - subscriptions: @[] + subscriptions: @[], ) proc remove(sales: Sales, agent: SalesAgent) {.async.} = @@ -108,20 +110,22 @@ proc remove(sales: Sales, agent: SalesAgent) {.async.} = if sales.running: sales.agents.keepItIf(it != agent) -proc cleanUp(sales: Sales, - agent: SalesAgent, - returnBytes: bool, - reprocessSlot: bool, - processing: Future[void]) {.async.} = - +proc cleanUp( + sales: Sales, + agent: SalesAgent, + returnBytes: bool, + reprocessSlot: bool, + returnedCollateral: ?UInt256, + processing: Future[void], +) {.async.} = let data = agent.data logScope: topics = "sales cleanUp" requestId = data.requestId slotIndex = data.slotIndex - reservationId = data.reservation.?id |? ReservationId.default - availabilityId = data.reservation.?availabilityId |? AvailabilityId.default + reservationId = data.reservation .? id |? ReservationId.default + availabilityId = data.reservation .? availabilityId |? AvailabilityId.default trace "cleaning up sales agent" @@ -129,36 +133,37 @@ proc cleanUp(sales: Sales, # that the cleanUp was called before the sales process really started, so # there are not really any bytes to be returned if returnBytes and request =? data.request and reservation =? data.reservation: - if returnErr =? (await sales.context.reservations.returnBytesToAvailability( - reservation.availabilityId, - reservation.id, - request.ask.slotSize - )).errorOption: - error "failure returning bytes", - error = returnErr.msg, - bytes = request.ask.slotSize + if returnErr =? ( + await sales.context.reservations.returnBytesToAvailability( + reservation.availabilityId, reservation.id, request.ask.slotSize + ) + ).errorOption: + error "failure returning bytes", + error = returnErr.msg, bytes = request.ask.slotSize # delete reservation and return reservation bytes back to the availability if reservation =? data.reservation and - deleteErr =? (await sales.context.reservations.deleteReservation( - reservation.id, - reservation.availabilityId - )).errorOption: - error "failure deleting reservation", error = deleteErr.msg + deleteErr =? ( + await sales.context.reservations.deleteReservation( + reservation.id, reservation.availabilityId, returnedCollateral + ) + ).errorOption: + error "failure deleting reservation", error = deleteErr.msg # Re-add items back into the queue to prevent small availabilities from # draining the queue. Seen items will be ordered last. if reprocessSlot and request =? data.request: let queue = sales.context.slotQueue - var seenItem = SlotQueueItem.init(data.requestId, - data.slotIndex.truncate(uint16), - data.ask, - request.expiry, - seen = true) + var seenItem = SlotQueueItem.init( + data.requestId, + data.slotIndex.truncate(uint16), + data.ask, + request.expiry, + seen = true, + ) trace "pushing ignored item to queue, marked as seen" if err =? queue.push(seenItem).errorOption: - error "failed to readd slot to queue", - errorType = $(type err), error = err.msg + error "failed to readd slot to queue", errorType = $(type err), error = err.msg await sales.remove(agent) @@ -167,11 +172,8 @@ proc cleanUp(sales: Sales, processing.complete() proc filled( - sales: Sales, - request: StorageRequest, - slotIndex: UInt256, - processing: Future[void]) = - + sales: Sales, request: StorageRequest, slotIndex: UInt256, processing: Future[void] +) = if onSale =? sales.context.onSale: onSale(request, slotIndex) @@ -180,18 +182,16 @@ proc filled( processing.complete() proc processSlot(sales: Sales, item: SlotQueueItem, done: Future[void]) = - debug "Processing slot from queue", requestId = item.requestId, - slot = item.slotIndex + debug "Processing slot from queue", requestId = item.requestId, slot = item.slotIndex let agent = newSalesAgent( - sales.context, - item.requestId, - item.slotIndex.u256, - none StorageRequest + sales.context, item.requestId, item.slotIndex.u256, none StorageRequest ) - agent.onCleanUp = proc (returnBytes = false, reprocessSlot = false) {.async.} = - await sales.cleanUp(agent, returnBytes, reprocessSlot, done) + agent.onCleanUp = proc( + returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none + ) {.async.} = + await sales.cleanUp(agent, returnBytes, reprocessSlot, returnedCollateral, done) agent.onFilled = some proc(request: StorageRequest, slotIndex: UInt256) = sales.filled(request, slotIndex, done) @@ -204,10 +204,12 @@ proc deleteInactiveReservations(sales: Sales, activeSlots: seq[Slot]) {.async.} without reservs =? await reservations.all(Reservation): return - let unused = reservs.filter(r => ( - let slotId = slotId(r.requestId, r.slotIndex) - not activeSlots.any(slot => slot.id == slotId) - )) + let unused = reservs.filter( + r => ( + let slotId = slotId(r.requestId, r.slotIndex) + not activeSlots.any(slot => slot.id == slotId) + ) + ) if unused.len == 0: return @@ -215,14 +217,13 @@ proc deleteInactiveReservations(sales: Sales, activeSlots: seq[Slot]) {.async.} info "Found unused reservations for deletion", unused = unused.len for reservation in unused: - logScope: reservationId = reservation.id availabilityId = reservation.availabilityId - if err =? (await reservations.deleteReservation( - reservation.id, reservation.availabilityId - )).errorOption: + if err =? ( + await reservations.deleteReservation(reservation.id, reservation.availabilityId) + ).errorOption: error "Failed to delete unused reservation", error = err.msg else: trace "Deleted unused reservation" @@ -252,17 +253,16 @@ proc load*(sales: Sales) {.async.} = await sales.deleteInactiveReservations(activeSlots) for slot in activeSlots: - let agent = newSalesAgent( - sales.context, - slot.request.id, - slot.slotIndex, - some slot.request) + let agent = + newSalesAgent(sales.context, slot.request.id, slot.slotIndex, some slot.request) - agent.onCleanUp = proc(returnBytes = false, reprocessSlot = false) {.async.} = + agent.onCleanUp = proc( + returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none + ) {.async.} = # since workers are not being dispatched, this future has not been created # by a worker. Create a dummy one here so we can call sales.cleanUp let done: Future[void] = nil - await sales.cleanUp(agent, returnBytes, reprocessSlot, done) + await sales.cleanUp(agent, returnBytes, reprocessSlot, returnedCollateral, done) # There is no need to assign agent.onFilled as slots loaded from `mySlots` # are inherently already filled and so assigning agent.onFilled would be @@ -282,11 +282,9 @@ proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} = trace "unpausing queue after new availability added" queue.unpause() -proc onStorageRequested(sales: Sales, - requestId: RequestId, - ask: StorageAsk, - expiry: UInt256) = - +proc onStorageRequested( + sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: UInt256 +) = logScope: topics = "marketplace sales onStorageRequested" requestId @@ -314,10 +312,7 @@ proc onStorageRequested(sales: Sales, else: warn "Error adding request to SlotQueue", error = err.msg -proc onSlotFreed(sales: Sales, - requestId: RequestId, - slotIndex: UInt256) = - +proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: UInt256) = logScope: topics = "marketplace sales onSlotFreed" requestId @@ -331,8 +326,7 @@ proc onSlotFreed(sales: Sales, let queue = context.slotQueue # first attempt to populate request using existing slot metadata in queue - without var found =? queue.populateItem(requestId, - slotIndex.truncate(uint16)): + without var found =? queue.populateItem(requestId, slotIndex.truncate(uint16)): trace "no existing request metadata, getting request info from contract" # if there's no existing slot for that request, retrieve the request # from the contract. @@ -359,9 +353,7 @@ proc subscribeRequested(sales: Sales) {.async.} = let context = sales.context let market = context.market - proc onStorageRequested(requestId: RequestId, - ask: StorageAsk, - expiry: UInt256) = + proc onStorageRequested(requestId: RequestId, ask: StorageAsk, expiry: UInt256) = sales.onStorageRequested(requestId, ask, expiry) try: @@ -485,10 +477,9 @@ proc startSlotQueue(sales: Sales) = let slotQueue = sales.context.slotQueue let reservations = sales.context.reservations - slotQueue.onProcessSlot = - proc(item: SlotQueueItem, done: Future[void]) {.async.} = - trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex - sales.processSlot(item, done) + slotQueue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = + trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex + sales.processSlot(item, done) slotQueue.start() diff --git a/codex/sales/reservations.nim b/codex/sales/reservations.nim index 027bda95..4f48e057 100644 --- a/codex/sales/reservations.nim +++ b/codex/sales/reservations.nim @@ -7,26 +7,29 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. ## -## +--------------------------------------+ -## | RESERVATION | -## +----------------------------------------+ |--------------------------------------| -## | AVAILABILITY | | ReservationId | id | PK | -## |----------------------------------------| |--------------------------------------| -## | AvailabilityId | id | PK |<-||-------o<-| AvailabilityId | availabilityId | FK | -## |----------------------------------------| |--------------------------------------| -## | UInt256 | totalSize | | | UInt256 | size | | -## |----------------------------------------| |--------------------------------------| -## | UInt256 | freeSize | | | UInt256 | slotIndex | | -## |----------------------------------------| +--------------------------------------+ -## | UInt256 | duration | | -## |----------------------------------------| -## | UInt256 | minPrice | | -## |----------------------------------------| -## | UInt256 | maxCollateral | | -## +----------------------------------------+ +## +--------------------------------------+ +## | RESERVATION | +## +---------------------------------------------------+ |--------------------------------------| +## | AVAILABILITY | | ReservationId | id | PK | +## |---------------------------------------------------| |--------------------------------------| +## | AvailabilityId | id | PK |<-||-------o<-| AvailabilityId | availabilityId | FK | +## |---------------------------------------------------| |--------------------------------------| +## | UInt256 | totalSize | | | UInt256 | size | | +## |---------------------------------------------------| |--------------------------------------| +## | UInt256 | freeSize | | | UInt256 | slotIndex | | +## |---------------------------------------------------| +--------------------------------------+ +## | UInt256 | duration | | +## |---------------------------------------------------| +## | UInt256 | minPricePerBytePerSecond | | +## |---------------------------------------------------| +## | UInt256 | totalCollateral | | +## |---------------------------------------------------| +## | UInt256 | totalRemainingCollateral | | +## +---------------------------------------------------+ import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import std/sequtils import std/sugar @@ -54,7 +57,6 @@ export logutils logScope: topics = "sales reservations" - type AvailabilityId* = distinct array[32, byte] ReservationId* = distinct array[32, byte] @@ -65,25 +67,32 @@ type totalSize* {.serialize.}: UInt256 freeSize* {.serialize.}: UInt256 duration* {.serialize.}: UInt256 - minPrice* {.serialize.}: UInt256 # minimal price paid for the whole hosted slot for the request's duration - maxCollateral* {.serialize.}: UInt256 + minPricePerBytePerSecond* {.serialize.}: UInt256 + totalCollateral {.serialize.}: UInt256 + totalRemainingCollateral* {.serialize.}: UInt256 + Reservation* = ref object id* {.serialize.}: ReservationId availabilityId* {.serialize.}: AvailabilityId size* {.serialize.}: UInt256 requestId* {.serialize.}: RequestId slotIndex* {.serialize.}: UInt256 + Reservations* = ref object of RootObj - availabilityLock: AsyncLock # Lock for protecting assertions of availability's sizes when searching for matching availability + availabilityLock: AsyncLock + # Lock for protecting assertions of availability's sizes when searching for matching availability repo: RepoStore onAvailabilityAdded: ?OnAvailabilityAdded + GetNext* = proc(): Future[?seq[byte]] {.upraises: [], gcsafe, closure.} IterDispose* = proc(): Future[?!void] {.gcsafe, closure.} - OnAvailabilityAdded* = proc(availability: Availability): Future[void] {.upraises: [], gcsafe.} + OnAvailabilityAdded* = + proc(availability: Availability): Future[void] {.upraises: [], gcsafe.} StorableIter* = ref object finished*: bool next*: GetNext dispose*: IterDispose + ReservationsError* = object of CodexError ReserveFailedError* = object of ReservationsError ReleaseFailedError* = object of ReservationsError @@ -109,35 +118,52 @@ template withLock(lock, body) = if lock.locked: lock.release() - -proc new*(T: type Reservations, - repo: RepoStore): Reservations = - - T(availabilityLock: newAsyncLock(),repo: repo) +proc new*(T: type Reservations, repo: RepoStore): Reservations = + T(availabilityLock: newAsyncLock(), repo: repo) proc init*( - _: type Availability, - totalSize: UInt256, - freeSize: UInt256, - duration: UInt256, - minPrice: UInt256, - maxCollateral: UInt256): Availability = - + _: type Availability, + totalSize: UInt256, + freeSize: UInt256, + duration: UInt256, + minPricePerBytePerSecond: UInt256, + totalCollateral: UInt256, +): Availability = var id: array[32, byte] doAssert randomBytes(id) == 32 - Availability(id: AvailabilityId(id), totalSize:totalSize, freeSize: freeSize, duration: duration, minPrice: minPrice, maxCollateral: maxCollateral) + Availability( + id: AvailabilityId(id), + totalSize: totalSize, + freeSize: freeSize, + duration: duration, + minPricePerBytePerSecond: minPricePerBytePerSecond, + totalCollateral: totalCollateral, + totalRemainingCollateral: totalCollateral, + ) + +func totalCollateral*(self: Availability): UInt256 {.inline.} = + return self.totalCollateral + +proc `totalCollateral=`*(self: Availability, value: UInt256) {.inline.} = + self.totalCollateral = value + self.totalRemainingCollateral = value proc init*( - _: type Reservation, - availabilityId: AvailabilityId, - size: UInt256, - requestId: RequestId, - slotIndex: UInt256 + _: type Reservation, + availabilityId: AvailabilityId, + size: UInt256, + requestId: RequestId, + slotIndex: UInt256, ): Reservation = - var id: array[32, byte] doAssert randomBytes(id) == 32 - Reservation(id: ReservationId(id), availabilityId: availabilityId, size: size, requestId: requestId, slotIndex: slotIndex) + Reservation( + id: ReservationId(id), + availabilityId: availabilityId, + size: size, + requestId: requestId, + slotIndex: slotIndex, + ) func toArray(id: SomeStorableId): array[32, byte] = array[32, byte](id) @@ -146,23 +172,26 @@ proc `==`*(x, y: AvailabilityId): bool {.borrow.} proc `==`*(x, y: ReservationId): bool {.borrow.} proc `==`*(x, y: Reservation): bool = x.id == y.id + proc `==`*(x, y: Availability): bool = x.id == y.id -proc `$`*(id: SomeStorableId): string = id.toArray.toHex +proc `$`*(id: SomeStorableId): string = + id.toArray.toHex proc toErr[E1: ref CatchableError, E2: ReservationsError]( - e1: E1, - _: type E2, - msg: string = e1.msg): ref E2 = - + e1: E1, _: type E2, msg: string = e1.msg +): ref E2 = return newException(E2, msg, e1) -logutils.formatIt(LogFormat.textLines, SomeStorableId): it.short0xHexLog -logutils.formatIt(LogFormat.json, SomeStorableId): it.to0xHexLog +logutils.formatIt(LogFormat.textLines, SomeStorableId): + it.short0xHexLog +logutils.formatIt(LogFormat.json, SomeStorableId): + it.to0xHexLog -proc `onAvailabilityAdded=`*(self: Reservations, - onAvailabilityAdded: OnAvailabilityAdded) = +proc `onAvailabilityAdded=`*( + self: Reservations, onAvailabilityAdded: OnAvailabilityAdded +) = self.onAvailabilityAdded = some onAvailabilityAdded func key*(id: AvailabilityId): ?!Key = @@ -176,27 +205,26 @@ func key*(reservationId: ReservationId, availabilityId: AvailabilityId): ?!Key = func key*(availability: Availability): ?!Key = return availability.id.key +func maxCollateralPerByte*(availability: Availability): UInt256 = + return availability.totalRemainingCollateral div availability.freeSize + func key*(reservation: Reservation): ?!Key = return key(reservation.id, reservation.availabilityId) -func available*(self: Reservations): uint = self.repo.available.uint +func available*(self: Reservations): uint = + self.repo.available.uint func hasAvailable*(self: Reservations, bytes: uint): bool = self.repo.available(bytes.NBytes) -proc exists*( - self: Reservations, - key: Key): Future[bool] {.async.} = - +proc exists*(self: Reservations, key: Key): Future[bool] {.async.} = let exists = await self.repo.metaDs.ds.contains(key) return exists -proc getImpl( - self: Reservations, - key: Key): Future[?!seq[byte]] {.async.} = - +proc getImpl(self: Reservations, key: Key): Future[?!seq[byte]] {.async.} = if not await self.exists(key): - let err = newException(NotExistsError, "object with key " & $key & " does not exist") + let err = + newException(NotExistsError, "object with key " & $key & " does not exist") return failure(err) without serialized =? await self.repo.metaDs.ds.get(key), error: @@ -205,10 +233,8 @@ proc getImpl( return success serialized proc get*( - self: Reservations, - key: Key, - T: type SomeStorableObject): Future[?!T] {.async.} = - + self: Reservations, key: Key, T: type SomeStorableObject +): Future[?!T] {.async.} = without serialized =? await self.getImpl(key), error: return failure(error) @@ -217,27 +243,20 @@ proc get*( return success obj -proc updateImpl( - self: Reservations, - obj: SomeStorableObject): Future[?!void] {.async.} = - +proc updateImpl(self: Reservations, obj: SomeStorableObject): Future[?!void] {.async.} = trace "updating " & $(obj.type), id = obj.id without key =? obj.key, error: return failure(error) - if err =? (await self.repo.metaDs.ds.put( - key, - @(obj.toJson.toBytes) - )).errorOption: + if err =? (await self.repo.metaDs.ds.put(key, @(obj.toJson.toBytes))).errorOption: return failure(err.toErr(UpdateFailedError)) return success() proc updateAvailability( - self: Reservations, - obj: Availability): Future[?!void] {.async.} = - + self: Reservations, obj: Availability +): Future[?!void] {.async.} = logScope: availabilityId = obj.id @@ -269,11 +288,18 @@ proc updateAvailability( if oldAvailability.totalSize != obj.totalSize: trace "totalSize changed, updating repo reservation" if oldAvailability.totalSize < obj.totalSize: # storage added - if reserveErr =? (await self.repo.reserve((obj.totalSize - oldAvailability.totalSize).truncate(uint).NBytes)).errorOption: + if reserveErr =? ( + await self.repo.reserve( + (obj.totalSize - oldAvailability.totalSize).truncate(uint).NBytes + ) + ).errorOption: return failure(reserveErr.toErr(ReserveFailedError)) - elif oldAvailability.totalSize > obj.totalSize: # storage removed - if reserveErr =? (await self.repo.release((oldAvailability.totalSize - obj.totalSize).truncate(uint).NBytes)).errorOption: + if reserveErr =? ( + await self.repo.release( + (oldAvailability.totalSize - obj.totalSize).truncate(uint).NBytes + ) + ).errorOption: return failure(reserveErr.toErr(ReleaseFailedError)) let res = await self.updateImpl(obj) @@ -296,21 +322,14 @@ proc updateAvailability( return res -proc update*( - self: Reservations, - obj: Reservation): Future[?!void] {.async.} = +proc update*(self: Reservations, obj: Reservation): Future[?!void] {.async.} = return await self.updateImpl(obj) -proc update*( - self: Reservations, - obj: Availability): Future[?!void] {.async.} = +proc update*(self: Reservations, obj: Availability): Future[?!void] {.async.} = withLock(self.availabilityLock): return await self.updateAvailability(obj) -proc delete( - self: Reservations, - key: Key): Future[?!void] {.async.} = - +proc delete(self: Reservations, key: Key): Future[?!void] {.async.} = trace "deleting object", key if not await self.exists(key): @@ -322,10 +341,11 @@ proc delete( return success() proc deleteReservation*( - self: Reservations, - reservationId: ReservationId, - availabilityId: AvailabilityId): Future[?!void] {.async.} = - + self: Reservations, + reservationId: ReservationId, + availabilityId: AvailabilityId, + returnedCollateral: ?UInt256 = UInt256.none, +): Future[?!void] {.async.} = logScope: reservationId availabilityId @@ -353,6 +373,9 @@ proc deleteReservation*( availability.freeSize += reservation.size + if collateral =? returnedCollateral: + availability.totalRemainingCollateral += collateral + if updateErr =? (await self.updateAvailability(availability)).errorOption: return failure(updateErr) @@ -365,24 +388,23 @@ proc deleteReservation*( # To delete, must not have any active sales. proc createAvailability*( - self: Reservations, - size: UInt256, - duration: UInt256, - minPrice: UInt256, - maxCollateral: UInt256): Future[?!Availability] {.async.} = + self: Reservations, + size: UInt256, + duration: UInt256, + minPricePerBytePerSecond: UInt256, + totalCollateral: UInt256, +): Future[?!Availability] {.async.} = + trace "creating availability", + size, duration, minPricePerBytePerSecond, totalCollateral - trace "creating availability", size, duration, minPrice, maxCollateral - - let availability = Availability.init( - size, size, duration, minPrice, maxCollateral - ) + let availability = + Availability.init(size, size, duration, minPricePerBytePerSecond, totalCollateral) let bytes = availability.freeSize.truncate(uint) if reserveErr =? (await self.repo.reserve(bytes.NBytes)).errorOption: return failure(reserveErr.toErr(ReserveFailedError)) if updateErr =? (await self.update(availability)).errorOption: - # rollback the reserve trace "rolling back reserve" if rollbackErr =? (await self.repo.release(bytes.NBytes)).errorOption: @@ -394,13 +416,13 @@ proc createAvailability*( return success(availability) method createReservation*( - self: Reservations, - availabilityId: AvailabilityId, - slotSize: UInt256, - requestId: RequestId, - slotIndex: UInt256 + self: Reservations, + availabilityId: AvailabilityId, + slotSize: UInt256, + requestId: RequestId, + slotIndex: UInt256, + collateralPerByte: UInt256, ): Future[?!Reservation] {.async, base.} = - withLock(self.availabilityLock): without availabilityKey =? availabilityId.key, error: return failure(error) @@ -412,7 +434,8 @@ method createReservation*( if availability.freeSize < slotSize: let error = newException( BytesOutOfBoundsError, - "trying to reserve an amount of bytes that is greater than the total size of the Availability") + "trying to reserve an amount of bytes that is greater than the free size of the Availability", + ) return failure(error) trace "Creating reservation", availabilityId, slotSize, requestId, slotIndex @@ -426,6 +449,9 @@ method createReservation*( # the newly created Reservation availability.freeSize -= slotSize + # adjust the remaining totalRemainingCollateral + availability.totalRemainingCollateral -= slotSize * collateralPerByte + # update availability with reduced size trace "Updating availability with reduced size" if updateErr =? (await self.updateAvailability(availability)).errorOption: @@ -446,11 +472,11 @@ method createReservation*( return success(reservation) proc returnBytesToAvailability*( - self: Reservations, - availabilityId: AvailabilityId, - reservationId: ReservationId, - bytes: UInt256): Future[?!void] {.async.} = - + self: Reservations, + availabilityId: AvailabilityId, + reservationId: ReservationId, + bytes: UInt256, +): Future[?!void] {.async.} = logScope: reservationId availabilityId @@ -467,14 +493,17 @@ proc returnBytesToAvailability*( let bytesToBeReturned = bytes - reservation.size if bytesToBeReturned == 0: - trace "No bytes are returned", requestSizeBytes = bytes, returningBytes = bytesToBeReturned + trace "No bytes are returned", + requestSizeBytes = bytes, returningBytes = bytesToBeReturned return success() - trace "Returning bytes", requestSizeBytes = bytes, returningBytes = bytesToBeReturned + trace "Returning bytes", + requestSizeBytes = bytes, returningBytes = bytesToBeReturned # First lets see if we can re-reserve the bytes, if the Repo's quota # is depleted then we will fail-fast as there is nothing to be done atm. - if reserveErr =? (await self.repo.reserve(bytesToBeReturned.truncate(uint).NBytes)).errorOption: + if reserveErr =? + (await self.repo.reserve(bytesToBeReturned.truncate(uint).NBytes)).errorOption: return failure(reserveErr.toErr(ReserveFailedError)) without availabilityKey =? availabilityId.key, error: @@ -487,9 +516,9 @@ proc returnBytesToAvailability*( # Update availability with returned size if updateErr =? (await self.updateAvailability(availability)).errorOption: - trace "Rolling back returning bytes" - if rollbackErr =? (await self.repo.release(bytesToBeReturned.truncate(uint).NBytes)).errorOption: + if rollbackErr =? + (await self.repo.release(bytesToBeReturned.truncate(uint).NBytes)).errorOption: rollbackErr.parent = updateErr return failure(rollbackErr) @@ -498,11 +527,11 @@ proc returnBytesToAvailability*( return success() proc release*( - self: Reservations, - reservationId: ReservationId, - availabilityId: AvailabilityId, - bytes: uint): Future[?!void] {.async.} = - + self: Reservations, + reservationId: ReservationId, + availabilityId: AvailabilityId, + bytes: uint, +): Future[?!void] {.async.} = logScope: topics = "release" bytes @@ -520,7 +549,8 @@ proc release*( if reservation.size < bytes.u256: let error = newException( BytesOutOfBoundsError, - "trying to release an amount of bytes that is greater than the total size of the Reservation") + "trying to release an amount of bytes that is greater than the total size of the Reservation", + ) return failure(error) if releaseErr =? (await self.repo.release(bytes.NBytes)).errorOption: @@ -530,7 +560,6 @@ proc release*( # persist partially used Reservation with updated size if err =? (await self.update(reservation)).errorOption: - # rollback release if an update error encountered trace "rolling back release" if rollbackErr =? (await self.repo.reserve(bytes.NBytes)).errorOption: @@ -545,11 +574,8 @@ iterator items(self: StorableIter): Future[?seq[byte]] = yield self.next() proc storables( - self: Reservations, - T: type SomeStorableObject, - queryKey: Key = ReservationsKey + self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey ): Future[?!StorableIter] {.async.} = - var iter = StorableIter() let query = Query.init(queryKey) when T is Availability: @@ -570,12 +596,8 @@ proc storables( proc next(): Future[?seq[byte]] {.async.} = await idleAsync() iter.finished = results.finished - if not results.finished and - res =? (await results.next()) and - res.data.len > 0 and - key =? res.key and - key.namespaces.len == defaultKey.namespaces.len: - + if not results.finished and res =? (await results.next()) and res.data.len > 0 and + key =? res.key and key.namespaces.len == defaultKey.namespaces.len: return some res.data return none seq[byte] @@ -588,11 +610,8 @@ proc storables( return success iter proc allImpl( - self: Reservations, - T: type SomeStorableObject, - queryKey: Key = ReservationsKey + self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey ): Future[?!seq[T]] {.async.} = - var ret: seq[T] = @[] without storables =? (await self.storables(T, queryKey)), error: @@ -604,24 +623,18 @@ proc allImpl( without obj =? T.fromJson(bytes), error: error "json deserialization error", - json = string.fromBytes(bytes), - error = error.msg + json = string.fromBytes(bytes), error = error.msg continue ret.add obj return success(ret) -proc all*( - self: Reservations, - T: type SomeStorableObject -): Future[?!seq[T]] {.async.} = +proc all*(self: Reservations, T: type SomeStorableObject): Future[?!seq[T]] {.async.} = return await self.allImpl(T) proc all*( - self: Reservations, - T: type SomeStorableObject, - availabilityId: AvailabilityId + self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId ): Future[?!seq[T]] {.async.} = without key =? (ReservationsKey / $availabilityId): return failure("no key") @@ -629,29 +642,28 @@ proc all*( return await self.allImpl(T, key) proc findAvailability*( - self: Reservations, - size, duration, minPrice, collateral: UInt256 + self: Reservations, + size, duration, pricePerBytePerSecond, collateralPerByte: UInt256, ): Future[?Availability] {.async.} = - without storables =? (await self.storables(Availability)), e: error "failed to get all storables", error = e.msg return none Availability for item in storables.items: - if bytes =? (await item) and - availability =? Availability.fromJson(bytes): - - if size <= availability.freeSize and - duration <= availability.duration and - collateral <= availability.maxCollateral and - minPrice >= availability.minPrice: - + if bytes =? (await item) and availability =? Availability.fromJson(bytes): + if size <= availability.freeSize and duration <= availability.duration and + collateralPerByte <= availability.maxCollateralPerByte and + pricePerBytePerSecond >= availability.minPricePerBytePerSecond: trace "availability matched", id = availability.id, - size, availFreeSize = availability.freeSize, - duration, availDuration = availability.duration, - minPrice, availMinPrice = availability.minPrice, - collateral, availMaxCollateral = availability.maxCollateral + size, + availFreeSize = availability.freeSize, + duration, + availDuration = availability.duration, + pricePerBytePerSecond, + availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond, + collateralPerByte, + availMaxCollateralPerByte = availability.maxCollateralPerByte # TODO: As soon as we're on ARC-ORC, we can use destructors # to automatically dispose our iterators when they fall out of scope. @@ -663,7 +675,11 @@ proc findAvailability*( trace "availability did not match", id = availability.id, - size, availFreeSize = availability.freeSize, - duration, availDuration = availability.duration, - minPrice, availMinPrice = availability.minPrice, - collateral, availMaxCollateral = availability.maxCollateral + size, + availFreeSize = availability.freeSize, + duration, + availDuration = availability.duration, + pricePerBytePerSecond, + availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond, + collateralPerByte, + availMaxCollateralPerByte = availability.maxCollateralPerByte diff --git a/codex/sales/salesagent.nim b/codex/sales/salesagent.nim index e52e2fe0..8a8e5dc0 100644 --- a/codex/sales/salesagent.nim +++ b/codex/sales/salesagent.nim @@ -25,27 +25,26 @@ type onCleanUp*: OnCleanUp onFilled*: ?OnFilled - OnCleanUp* = proc (returnBytes = false, reprocessSlot = false): Future[void] {.gcsafe, upraises: [].} - OnFilled* = proc(request: StorageRequest, - slotIndex: UInt256) {.gcsafe, upraises: [].} + OnCleanUp* = proc( + returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none + ): Future[void] {.gcsafe, upraises: [].} + OnFilled* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].} SalesAgentError = object of CodexError AllSlotsFilledError* = object of SalesAgentError func `==`*(a, b: SalesAgent): bool = - a.data.requestId == b.data.requestId and - a.data.slotIndex == b.data.slotIndex + a.data.requestId == b.data.requestId and a.data.slotIndex == b.data.slotIndex -proc newSalesAgent*(context: SalesContext, - requestId: RequestId, - slotIndex: UInt256, - request: ?StorageRequest): SalesAgent = +proc newSalesAgent*( + context: SalesContext, + requestId: RequestId, + slotIndex: UInt256, + request: ?StorageRequest, +): SalesAgent = var agent = SalesAgent.new() agent.context = context - agent.data = SalesData( - requestId: requestId, - slotIndex: slotIndex, - request: request) + agent.data = SalesData(requestId: requestId, slotIndex: slotIndex, request: request) return agent proc retrieveRequest*(agent: SalesAgent) {.async.} = @@ -62,6 +61,7 @@ proc retrieveRequestState*(agent: SalesAgent): Future[?RequestState] {.async.} = func state*(agent: SalesAgent): ?string = proc description(state: State): string = $state + agent.query(description) proc subscribeCancellation(agent: SalesAgent) {.async.} = @@ -77,7 +77,7 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} = while true: let deadline = max(clock.now, expiry) + 1 - trace "Waiting for request to be cancelled", now=clock.now, expiry=deadline + trace "Waiting for request to be cancelled", now = clock.now, expiry = deadline await clock.waitUntil(deadline) without state =? await agent.retrieveRequestState(): @@ -93,27 +93,29 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} = of RequestState.Started, RequestState.Finished, RequestState.Failed: break - debug "The request is not yet canceled, even though it should be. Waiting for some more time.", currentState = state, now=clock.now + debug "The request is not yet canceled, even though it should be. Waiting for some more time.", + currentState = state, now = clock.now data.cancelled = onCancelled() -method onFulfilled*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, upraises: [].} = - if agent.data.requestId == requestId and - not agent.data.cancelled.isNil: +method onFulfilled*( + agent: SalesAgent, requestId: RequestId +) {.base, gcsafe, upraises: [].} = + if agent.data.requestId == requestId and not agent.data.cancelled.isNil: agent.data.cancelled.cancelSoon() -method onFailed*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, upraises: [].} = +method onFailed*( + agent: SalesAgent, requestId: RequestId +) {.base, gcsafe, upraises: [].} = without request =? agent.data.request: return if agent.data.requestId == requestId: agent.schedule(failedEvent(request)) -method onSlotFilled*(agent: SalesAgent, - requestId: RequestId, - slotIndex: UInt256) {.base, gcsafe, upraises: [].} = - - if agent.data.requestId == requestId and - agent.data.slotIndex == slotIndex: +method onSlotFilled*( + agent: SalesAgent, requestId: RequestId, slotIndex: UInt256 +) {.base, gcsafe, upraises: [].} = + if agent.data.requestId == requestId and agent.data.slotIndex == slotIndex: agent.schedule(slotFilledEvent(requestId, slotIndex)) proc subscribe*(agent: SalesAgent) {.async.} = diff --git a/codex/sales/salescontext.nim b/codex/sales/salescontext.nim index 199aa5fb..bb0b5dc9 100644 --- a/codex/sales/salescontext.nim +++ b/codex/sales/salescontext.nim @@ -24,12 +24,14 @@ type simulateProofFailures*: int BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, raises: [].} - OnStore* = proc(request: StorageRequest, - slot: UInt256, - blocksCb: BlocksCb): Future[?!void] {.gcsafe, upraises: [].} - OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.gcsafe, upraises: [].} - OnExpiryUpdate* = proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] {.gcsafe, upraises: [].} - OnClear* = proc(request: StorageRequest, - slotIndex: UInt256) {.gcsafe, upraises: [].} - OnSale* = proc(request: StorageRequest, - slotIndex: UInt256) {.gcsafe, upraises: [].} + OnStore* = proc( + request: StorageRequest, slot: UInt256, blocksCb: BlocksCb + ): Future[?!void] {.gcsafe, upraises: [].} + OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {. + gcsafe, upraises: [] + .} + OnExpiryUpdate* = proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] {. + gcsafe, upraises: [] + .} + OnClear* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].} + OnSale* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].} diff --git a/codex/sales/salesdata.nim b/codex/sales/salesdata.nim index 7fd56149..995c7a4b 100644 --- a/codex/sales/salesdata.nim +++ b/codex/sales/salesdata.nim @@ -3,11 +3,10 @@ import ../contracts/requests import ../market import ./reservations -type - SalesData* = ref object - requestId*: RequestId - ask*: StorageAsk - request*: ?StorageRequest - slotIndex*: UInt256 - cancelled*: Future[void] - reservation*: ?Reservation +type SalesData* = ref object + requestId*: RequestId + ask*: StorageAsk + request*: ?StorageRequest + slotIndex*: UInt256 + cancelled*: Future[void] + reservation*: ?Reservation diff --git a/codex/sales/slotqueue.nim b/codex/sales/slotqueue.nim index f5685e34..332ec9e0 100644 --- a/codex/sales/slotqueue.nim +++ b/codex/sales/slotqueue.nim @@ -17,7 +17,7 @@ logScope: type OnProcessSlot* = - proc(item: SlotQueueItem, done: Future[void]): Future[void] {.gcsafe, upraises:[].} + proc(item: SlotQueueItem, done: Future[void]): Future[void] {.gcsafe, upraises: [].} # Non-ref obj copies value when assigned, preventing accidental modification # of values which could cause an incorrect order (eg @@ -32,14 +32,14 @@ type slotIndex: uint16 slotSize: UInt256 duration: UInt256 - reward: UInt256 - collateral: UInt256 + pricePerBytePerSecond: UInt256 + collateralPerByte: UInt256 expiry: UInt256 seen: bool # don't need to -1 to prevent overflow when adding 1 (to always allow push) # because AsyncHeapQueue size is of type `int`, which is larger than `uint16` - SlotQueueSize = range[1'u16..uint16.high] + SlotQueueSize = range[1'u16 .. uint16.high] SlotQueue* = ref object maxWorkers: int @@ -69,10 +69,14 @@ const DefaultMaxWorkers = 3 const DefaultMaxSize = 128'u16 proc profitability(item: SlotQueueItem): UInt256 = - StorageAsk(collateral: item.collateral, - duration: item.duration, - reward: item.reward, - slotSize: item.slotSize).pricePerSlot + StorageAsk( + duration: item.duration, + pricePerBytePerSecond: item.pricePerBytePerSecond, + slotSize: item.slotSize, + ).pricePerSlot + +proc collateralPerSlot(item: SlotQueueItem): UInt256 = + StorageAsk(collateralPerByte: item.collateralPerByte, slotSize: item.slotSize).collateralPerSlot proc `<`*(a, b: SlotQueueItem): bool = # for A to have a higher priority than B (in a min queue), A must be less than @@ -90,25 +94,22 @@ proc `<`*(a, b: SlotQueueItem): bool = scoreA.addIf(a.profitability > b.profitability, 3) scoreB.addIf(a.profitability < b.profitability, 3) - scoreA.addIf(a.collateral < b.collateral, 2) - scoreB.addIf(a.collateral > b.collateral, 2) + scoreA.addIf(a.collateralPerSlot < b.collateralPerSlot, 2) + scoreB.addIf(a.collateralPerSlot > b.collateralPerSlot, 2) scoreA.addIf(a.expiry > b.expiry, 1) scoreB.addIf(a.expiry < b.expiry, 1) - scoreA.addIf(a.slotSize < b.slotSize, 0) - scoreB.addIf(a.slotSize > b.slotSize, 0) - return scoreA > scoreB proc `==`*(a, b: SlotQueueItem): bool = - a.requestId == b.requestId and - a.slotIndex == b.slotIndex - -proc new*(_: type SlotQueue, - maxWorkers = DefaultMaxWorkers, - maxSize: SlotQueueSize = DefaultMaxSize): SlotQueue = + a.requestId == b.requestId and a.slotIndex == b.slotIndex +proc new*( + _: type SlotQueue, + maxWorkers = DefaultMaxWorkers, + maxSize: SlotQueueSize = DefaultMaxSize, +): SlotQueue = if maxWorkers <= 0: raise newException(ValueError, "maxWorkers must be positive") if maxWorkers.uint16 > maxSize: @@ -121,53 +122,46 @@ proc new*(_: type SlotQueue, queue: newAsyncHeapQueue[SlotQueueItem](maxSize.int + 1), running: false, trackedFutures: TrackedFutures.new(), - unpaused: newAsyncEvent() + unpaused: newAsyncEvent(), ) # avoid instantiating `workers` in constructor to avoid side effects in # `newAsyncQueue` procedure proc init(_: type SlotQueueWorker): SlotQueueWorker = - SlotQueueWorker( - doneProcessing: newFuture[void]("slotqueue.worker.processing") - ) - -proc init*(_: type SlotQueueItem, - requestId: RequestId, - slotIndex: uint16, - ask: StorageAsk, - expiry: UInt256, - seen = false): SlotQueueItem = + SlotQueueWorker(doneProcessing: newFuture[void]("slotqueue.worker.processing")) +proc init*( + _: type SlotQueueItem, + requestId: RequestId, + slotIndex: uint16, + ask: StorageAsk, + expiry: UInt256, + seen = false, +): SlotQueueItem = SlotQueueItem( requestId: requestId, slotIndex: slotIndex, slotSize: ask.slotSize, duration: ask.duration, - reward: ask.reward, - collateral: ask.collateral, + pricePerBytePerSecond: ask.pricePerBytePerSecond, + collateralPerByte: ask.collateralPerByte, expiry: expiry, - seen: seen + seen: seen, ) -proc init*(_: type SlotQueueItem, - request: StorageRequest, - slotIndex: uint16): SlotQueueItem = - - SlotQueueItem.init(request.id, - slotIndex, - request.ask, - request.expiry) - -proc init*(_: type SlotQueueItem, - requestId: RequestId, - ask: StorageAsk, - expiry: UInt256): seq[SlotQueueItem] = +proc init*( + _: type SlotQueueItem, request: StorageRequest, slotIndex: uint16 +): SlotQueueItem = + SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry) +proc init*( + _: type SlotQueueItem, requestId: RequestId, ask: StorageAsk, expiry: UInt256 +): seq[SlotQueueItem] = if not ask.slots.inRange: raise newException(SlotsOutOfRangeError, "Too many slots") var i = 0'u16 - proc initSlotQueueItem: SlotQueueItem = + proc initSlotQueueItem(): SlotQueueItem = let item = SlotQueueItem.init(requestId, i, ask, expiry) inc i return item @@ -176,37 +170,54 @@ proc init*(_: type SlotQueueItem, Rng.instance.shuffle(items) return items -proc init*(_: type SlotQueueItem, - request: StorageRequest): seq[SlotQueueItem] = - +proc init*(_: type SlotQueueItem, request: StorageRequest): seq[SlotQueueItem] = return SlotQueueItem.init(request.id, request.ask, request.expiry) proc inRange*(val: SomeUnsignedInt): bool = - val.uint16 in SlotQueueSize.low..SlotQueueSize.high + val.uint16 in SlotQueueSize.low .. SlotQueueSize.high -proc requestId*(self: SlotQueueItem): RequestId = self.requestId -proc slotIndex*(self: SlotQueueItem): uint16 = self.slotIndex -proc slotSize*(self: SlotQueueItem): UInt256 = self.slotSize -proc duration*(self: SlotQueueItem): UInt256 = self.duration -proc reward*(self: SlotQueueItem): UInt256 = self.reward -proc collateral*(self: SlotQueueItem): UInt256 = self.collateral -proc seen*(self: SlotQueueItem): bool = self.seen +proc requestId*(self: SlotQueueItem): RequestId = + self.requestId -proc running*(self: SlotQueue): bool = self.running +proc slotIndex*(self: SlotQueueItem): uint16 = + self.slotIndex -proc len*(self: SlotQueue): int = self.queue.len +proc slotSize*(self: SlotQueueItem): UInt256 = + self.slotSize -proc size*(self: SlotQueue): int = self.queue.size - 1 +proc duration*(self: SlotQueueItem): UInt256 = + self.duration -proc paused*(self: SlotQueue): bool = not self.unpaused.isSet +proc pricePerBytePerSecond*(self: SlotQueueItem): UInt256 = + self.pricePerBytePerSecond -proc `$`*(self: SlotQueue): string = $self.queue +proc collateralPerByte*(self: SlotQueueItem): UInt256 = + self.collateralPerByte + +proc seen*(self: SlotQueueItem): bool = + self.seen + +proc running*(self: SlotQueue): bool = + self.running + +proc len*(self: SlotQueue): int = + self.queue.len + +proc size*(self: SlotQueue): int = + self.queue.size - 1 + +proc paused*(self: SlotQueue): bool = + not self.unpaused.isSet + +proc `$`*(self: SlotQueue): string = + $self.queue proc `onProcessSlot=`*(self: SlotQueue, onProcessSlot: OnProcessSlot) = self.onProcessSlot = some onProcessSlot proc activeWorkers*(self: SlotQueue): int = - if not self.running: return 0 + if not self.running: + return 0 # active = capacity - available self.maxWorkers - self.workers.len @@ -222,10 +233,9 @@ proc unpause*(self: SlotQueue) = # set unpaused flag to true -- unblocks coroutines waiting on unpaused.wait() self.unpaused.fire() -proc populateItem*(self: SlotQueue, - requestId: RequestId, - slotIndex: uint16): ?SlotQueueItem = - +proc populateItem*( + self: SlotQueue, requestId: RequestId, slotIndex: uint16 +): ?SlotQueueItem = trace "populate item, items in queue", len = self.queue.len for item in self.queue.items: trace "populate item search", itemRequestId = item.requestId, requestId @@ -235,14 +245,13 @@ proc populateItem*(self: SlotQueue, slotIndex: slotIndex, slotSize: item.slotSize, duration: item.duration, - reward: item.reward, - collateral: item.collateral, - expiry: item.expiry + pricePerBytePerSecond: item.pricePerBytePerSecond, + collateralPerByte: item.collateralPerByte, + expiry: item.expiry, ) return none SlotQueueItem proc push*(self: SlotQueue, item: SlotQueueItem): ?!void = - logScope: requestId = item.requestId slotIndex = item.slotIndex @@ -330,9 +339,9 @@ proc addWorker(self: SlotQueue): ?!void = return success() -proc dispatch(self: SlotQueue, - worker: SlotQueueWorker, - item: SlotQueueItem) {.async: (raises: []).} = +proc dispatch( + self: SlotQueue, worker: SlotQueueWorker, item: SlotQueueItem +) {.async: (raises: []).} = logScope: requestId = item.requestId slotIndex = item.slotIndex @@ -349,10 +358,8 @@ proc dispatch(self: SlotQueue, if err =? self.addWorker().errorOption: raise err # catch below - except QueueNotRunningError as e: - info "could not re-add worker to worker queue, queue not running", - error = e.msg + info "could not re-add worker to worker queue, queue not running", error = e.msg except CancelledError: # do not bubble exception up as it is called with `asyncSpawn` which would # convert the exception into a `FutureDefect` @@ -380,7 +387,6 @@ proc clearSeenFlags*(self: SlotQueue) = trace "all 'seen' flags cleared" proc run(self: SlotQueue) {.async: (raises: []).} = - while self.running: try: if self.paused: @@ -389,7 +395,8 @@ proc run(self: SlotQueue) {.async: (raises: []).} = # block until unpaused is true/fired, ie wait for queue to be unpaused await self.unpaused.wait() - let worker = await self.workers.popFirst() # if workers saturated, wait here for new workers + let worker = + await self.workers.popFirst() # if workers saturated, wait here for new workers let item = await self.queue.pop() # if queue empty, wait here for new items logScope: @@ -413,6 +420,8 @@ proc run(self: SlotQueue) {.async: (raises: []).} = trace "readding seen item back into the queue" discard self.push(item) # on error, drop the item and continue worker.doneProcessing.complete() + if err =? self.addWorker().errorOption: + error "error adding new worker", error = err.msg await sleepAsync(1.millis) # poll continue @@ -442,7 +451,7 @@ proc start*(self: SlotQueue) = # Add initial workers to the `AsyncHeapQueue`. Once a worker has completed its # task, a new worker will be pushed to the queue - for i in 0.. 0: info "Proving with failure rate", rate = context.simulateProofFailures - return some State(SaleProvingSimulated(failEveryNProofs: context.simulateProofFailures)) + return some State( + SaleProvingSimulated(failEveryNProofs: context.simulateProofFailures) + ) return some State(SaleProving()) - else: let error = newException(HostMismatchError, "Slot filled by other host") return some State(SaleErrored(error: error)) diff --git a/codex/sales/states/filling.nim b/codex/sales/states/filling.nim index ce2e53f5..1934fc12 100644 --- a/codex/sales/states/filling.nim +++ b/codex/sales/states/filling.nim @@ -13,11 +13,11 @@ import ./errored logScope: topics = "marketplace sales filling" -type - SaleFilling* = ref object of ErrorHandlingState - proof*: Groth16Proof +type SaleFilling* = ref object of ErrorHandlingState + proof*: Groth16Proof -method `$`*(state: SaleFilling): string = "SaleFilling" +method `$`*(state: SaleFilling): string = + "SaleFilling" method onCancelled*(state: SaleFilling, request: StorageRequest): ?State = return some State(SaleCancelled()) @@ -28,7 +28,7 @@ method onFailed*(state: SaleFilling, request: StorageRequest): ?State = method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} = let data = SalesAgent(machine).data let market = SalesAgent(machine).context.market - without (fullCollateral =? data.request.?ask.?collateral): + without (request =? data.request): raiseAssert "Request not set" logScope: @@ -36,14 +36,17 @@ method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} = slotIndex = data.slotIndex let slotState = await market.slotState(slotId(data.requestId, data.slotIndex)) + let requestedCollateral = request.ask.collateralPerSlot var collateral: UInt256 if slotState == SlotState.Repair: # When repairing the node gets "discount" on the collateral that it needs to let repairRewardPercentage = (await market.repairRewardPercentage).u256 - collateral = fullCollateral - ((fullCollateral * repairRewardPercentage)).div(100.u256) + collateral = + requestedCollateral - + ((requestedCollateral * repairRewardPercentage)).div(100.u256) else: - collateral = fullCollateral + collateral = requestedCollateral debug "Filling slot" try: @@ -51,9 +54,9 @@ method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} = except MarketError as e: if e.msg.contains "Slot is not free": debug "Slot is already filled, ignoring slot" - return some State( SaleIgnored(reprocessSlot: false, returnBytes: true) ) + return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) else: - return some State( SaleErrored(error: e) ) + return some State(SaleErrored(error: e)) # other CatchableErrors are handled "automatically" by the ErrorHandlingState return some State(SaleFilled()) diff --git a/codex/sales/states/finished.nim b/codex/sales/states/finished.nim index 59e9244c..151300d0 100644 --- a/codex/sales/states/finished.nim +++ b/codex/sales/states/finished.nim @@ -10,10 +10,11 @@ import ./failed logScope: topics = "marketplace sales finished" -type - SaleFinished* = ref object of ErrorHandlingState +type SaleFinished* = ref object of ErrorHandlingState + returnedCollateral*: ?UInt256 -method `$`*(state: SaleFinished): string = "SaleFinished" +method `$`*(state: SaleFinished): string = + "SaleFinished" method onCancelled*(state: SaleFinished, request: StorageRequest): ?State = return some State(SaleCancelled()) @@ -28,7 +29,8 @@ method run*(state: SaleFinished, machine: Machine): Future[?State] {.async.} = without request =? data.request: raiseAssert "no sale request" - info "Slot finished and paid out", requestId = data.requestId, slotIndex = data.slotIndex + info "Slot finished and paid out", + requestId = data.requestId, slotIndex = data.slotIndex if onCleanUp =? agent.onCleanUp: - await onCleanUp() + await onCleanUp(returnedCollateral = state.returnedCollateral) diff --git a/codex/sales/states/ignored.nim b/codex/sales/states/ignored.nim index 93346fdc..b915bff5 100644 --- a/codex/sales/states/ignored.nim +++ b/codex/sales/states/ignored.nim @@ -11,16 +11,17 @@ logScope: # Ignored slots could mean there was no availability or that the slot could # not be reserved. -type - SaleIgnored* = ref object of ErrorHandlingState - reprocessSlot*: bool # readd slot to queue with `seen` flag - returnBytes*: bool # return unreleased bytes from Reservation to Availability +type SaleIgnored* = ref object of ErrorHandlingState + reprocessSlot*: bool # readd slot to queue with `seen` flag + returnBytes*: bool # return unreleased bytes from Reservation to Availability -method `$`*(state: SaleIgnored): string = "SaleIgnored" +method `$`*(state: SaleIgnored): string = + "SaleIgnored" method run*(state: SaleIgnored, machine: Machine): Future[?State] {.async.} = let agent = SalesAgent(machine) if onCleanUp =? agent.onCleanUp: - await onCleanUp(reprocessSlot = state.reprocessSlot, - returnBytes = state.returnBytes) + await onCleanUp( + reprocessSlot = state.reprocessSlot, returnBytes = state.returnBytes + ) diff --git a/codex/sales/states/initialproving.nim b/codex/sales/states/initialproving.nim index 4a5b8515..bc9ce6b6 100644 --- a/codex/sales/states/initialproving.nim +++ b/codex/sales/states/initialproving.nim @@ -12,10 +12,10 @@ import ./failed logScope: topics = "marketplace sales initial-proving" -type - SaleInitialProving* = ref object of ErrorHandlingState +type SaleInitialProving* = ref object of ErrorHandlingState -method `$`*(state: SaleInitialProving): string = "SaleInitialProving" +method `$`*(state: SaleInitialProving): string = + "SaleInitialProving" method onCancelled*(state: SaleInitialProving, request: StorageRequest): ?State = return some State(SaleCancelled()) diff --git a/codex/sales/states/payout.nim b/codex/sales/states/payout.nim index 5c8c2859..9ce36613 100644 --- a/codex/sales/states/payout.nim +++ b/codex/sales/states/payout.nim @@ -10,10 +10,10 @@ import ./finished logScope: topics = "marketplace sales payout" -type - SalePayout* = ref object of ErrorHandlingState +type SalePayout* = ref object of ErrorHandlingState -method `$`*(state: SalePayout): string = "SalePayout" +method `$`*(state: SalePayout): string = + "SalePayout" method onCancelled*(state: SalePayout, request: StorageRequest): ?State = return some State(SaleCancelled()) @@ -21,7 +21,7 @@ method onCancelled*(state: SalePayout, request: StorageRequest): ?State = method onFailed*(state: SalePayout, request: StorageRequest): ?State = return some State(SaleFailed()) -method run(state: SalePayout, machine: Machine): Future[?State] {.async.} = +method run*(state: SalePayout, machine: Machine): Future[?State] {.async.} = let data = SalesAgent(machine).data let market = SalesAgent(machine).context.market @@ -29,7 +29,9 @@ method run(state: SalePayout, machine: Machine): Future[?State] {.async.} = raiseAssert "no sale request" let slot = Slot(request: request, slotIndex: data.slotIndex) - debug "Collecting finished slot's reward", requestId = data.requestId, slotIndex = data.slotIndex + debug "Collecting finished slot's reward", + requestId = data.requestId, slotIndex = data.slotIndex + let currentCollateral = await market.currentCollateral(slot.id) await market.freeSlot(slot.id) - return some State(SaleFinished()) + return some State(SaleFinished(returnedCollateral: some currentCollateral)) diff --git a/codex/sales/states/preparing.nim b/codex/sales/states/preparing.nim index 169eb964..bdde1249 100644 --- a/codex/sales/states/preparing.nim +++ b/codex/sales/states/preparing.nim @@ -14,15 +14,17 @@ import ./ignored import ./slotreserving import ./errored -declareCounter(codex_reservations_availability_mismatch, "codex reservations availability_mismatch") +declareCounter( + codex_reservations_availability_mismatch, "codex reservations availability_mismatch" +) -type - SalePreparing* = ref object of ErrorHandlingState +type SalePreparing* = ref object of ErrorHandlingState logScope: topics = "marketplace sales preparing" -method `$`*(state: SalePreparing): string = "SalePreparing" +method `$`*(state: SalePreparing): string = + "SalePreparing" method onCancelled*(state: SalePreparing, request: StorageRequest): ?State = return some State(SaleCancelled()) @@ -30,8 +32,9 @@ method onCancelled*(state: SalePreparing, request: StorageRequest): ?State = method onFailed*(state: SalePreparing, request: StorageRequest): ?State = return some State(SaleFailed()) -method onSlotFilled*(state: SalePreparing, requestId: RequestId, - slotIndex: UInt256): ?State = +method onSlotFilled*( + state: SalePreparing, requestId: RequestId, slotIndex: UInt256 +): ?State = return some State(SaleFilled()) method run*(state: SalePreparing, machine: Machine): Future[?State] {.async.} = @@ -59,34 +62,31 @@ method run*(state: SalePreparing, machine: Machine): Future[?State] {.async.} = slotIndex = data.slotIndex slotSize = request.ask.slotSize duration = request.ask.duration - pricePerSlot = request.ask.pricePerSlot + pricePerBytePerSecond = request.ask.pricePerBytePerSecond + collateralPerByte = request.ask.collateralPerByte - # availability was checked for this slot when it entered the queue, however - # check to the ensure that there is still availability as they may have - # changed since being added (other slots may have been processed in that time) - without availability =? await reservations.findAvailability( - request.ask.slotSize, - request.ask.duration, - request.ask.pricePerSlot, - request.ask.collateral): + without availability =? + await reservations.findAvailability( + request.ask.slotSize, request.ask.duration, request.ask.pricePerBytePerSecond, + request.ask.collateralPerByte, + ): debug "No availability found for request, ignoring" return some State(SaleIgnored(reprocessSlot: true)) info "Availability found for request, creating reservation" - without reservation =? await reservations.createReservation( - availability.id, - request.ask.slotSize, - request.id, - data.slotIndex - ), error: + without reservation =? + await reservations.createReservation( + availability.id, request.ask.slotSize, request.id, data.slotIndex, + request.ask.collateralPerByte, + ), error: trace "Creation of reservation failed" # Race condition: # reservations.findAvailability (line 64) is no guarantee. You can never know for certain that the reservation can be created until after you have it. # Should createReservation fail because there's no space, we proceed to SaleIgnored. if error of BytesOutOfBoundsError: - # Lets monitor how often this happen and if it is often we can make it more inteligent to handle it + # Lets monitor how often this happen and if it is often we can make it more inteligent to handle it codex_reservations_availability_mismatch.inc() return some State(SaleIgnored(reprocessSlot: true)) diff --git a/codex/sales/states/proving.nim b/codex/sales/states/proving.nim index 76180ab2..0ee2ed60 100644 --- a/codex/sales/states/proving.nim +++ b/codex/sales/states/proving.nim @@ -22,12 +22,12 @@ type loop: Future[void] method prove*( - state: SaleProving, - slot: Slot, - challenge: ProofChallenge, - onProve: OnProve, - market: Market, - currentPeriod: Period + state: SaleProving, + slot: Slot, + challenge: ProofChallenge, + onProve: OnProve, + market: Market, + currentPeriod: Period, ) {.base, async.} = try: without proof =? (await onProve(slot, challenge)), err: @@ -43,14 +43,13 @@ method prove*( error "Submitting proof failed", msg = e.msgDetail proc proveLoop( - state: SaleProving, - market: Market, - clock: Clock, - request: StorageRequest, - slotIndex: UInt256, - onProve: OnProve + state: SaleProving, + market: Market, + clock: Clock, + request: StorageRequest, + slotIndex: UInt256, + onProve: OnProve, ) {.async.} = - let slot = Slot(request: request, slotIndex: slotIndex) let slotId = slot.id @@ -76,7 +75,8 @@ proc proveLoop( case slotState of SlotState.Filled: debug "Proving for new period", period = currentPeriod - if (await market.isProofRequired(slotId)) or (await market.willProofBeRequired(slotId)): + if (await market.isProofRequired(slotId)) or + (await market.willProofBeRequired(slotId)): let challenge = await market.getChallenge(slotId) debug "Proof is required", period = currentPeriod, challenge = challenge await state.prove(slot, challenge, onProve, market, currentPeriod) @@ -100,7 +100,8 @@ proc proveLoop( debug "waiting until next period" await waitUntilPeriod(currentPeriod + 1) -method `$`*(state: SaleProving): string = "SaleProving" +method `$`*(state: SaleProving): string = + "SaleProving" method onCancelled*(state: SaleProving, request: StorageRequest): ?State = # state.loop cancellation happens automatically when run is cancelled due to diff --git a/codex/sales/states/provingsimulated.nim b/codex/sales/states/provingsimulated.nim index 20fb4ad6..e60169bc 100644 --- a/codex/sales/states/provingsimulated.nim +++ b/codex/sales/states/provingsimulated.nim @@ -12,21 +12,26 @@ when codex_enable_proof_failures: import ./proving logScope: - topics = "marketplace sales simulated-proving" + topics = "marketplace sales simulated-proving" - type - SaleProvingSimulated* = ref object of SaleProving - failEveryNProofs*: int - proofCount: int + type SaleProvingSimulated* = ref object of SaleProving + failEveryNProofs*: int + proofCount: int proc onSubmitProofError(error: ref CatchableError, period: UInt256, slotId: SlotId) = error "Submitting invalid proof failed", period, slotId, msg = error.msgDetail - method prove*(state: SaleProvingSimulated, slot: Slot, challenge: ProofChallenge, onProve: OnProve, market: Market, currentPeriod: Period) {.async.} = + method prove*( + state: SaleProvingSimulated, + slot: Slot, + challenge: ProofChallenge, + onProve: OnProve, + market: Market, + currentPeriod: Period, + ) {.async.} = trace "Processing proving in simulated mode" state.proofCount += 1 - if state.failEveryNProofs > 0 and - state.proofCount mod state.failEveryNProofs == 0: + if state.failEveryNProofs > 0 and state.proofCount mod state.failEveryNProofs == 0: state.proofCount = 0 try: @@ -40,4 +45,6 @@ when codex_enable_proof_failures: except CatchableError as e: onSubmitProofError(e, currentPeriod, slot.id) else: - await procCall SaleProving(state).prove(slot, challenge, onProve, market, currentPeriod) + await procCall SaleProving(state).prove( + slot, challenge, onProve, market, currentPeriod + ) diff --git a/codex/sales/states/slotreserving.nim b/codex/sales/states/slotreserving.nim index 670013ab..38b7fa76 100644 --- a/codex/sales/states/slotreserving.nim +++ b/codex/sales/states/slotreserving.nim @@ -12,13 +12,13 @@ import ./ignored import ./downloading import ./errored -type - SaleSlotReserving* = ref object of ErrorHandlingState +type SaleSlotReserving* = ref object of ErrorHandlingState logScope: topics = "marketplace sales reserving" -method `$`*(state: SaleSlotReserving): string = "SaleSlotReserving" +method `$`*(state: SaleSlotReserving): string = + "SaleSlotReserving" method onCancelled*(state: SaleSlotReserving, request: StorageRequest): ?State = return some State(SaleCancelled()) @@ -42,19 +42,17 @@ method run*(state: SaleSlotReserving, machine: Machine): Future[?State] {.async. trace "Reserving slot" await market.reserveSlot(data.requestId, data.slotIndex) except MarketError as e: - if e.msg.contains "Reservation not allowed": + if e.msg.contains "SlotReservations_ReservationNotAllowed": debug "Slot cannot be reserved, ignoring", error = e.msg - return some State( SaleIgnored(reprocessSlot: false, returnBytes: true) ) + return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) else: - return some State( SaleErrored(error: e) ) + return some State(SaleErrored(error: e)) # other CatchableErrors are handled "automatically" by the ErrorHandlingState trace "Slot successfully reserved" - return some State( SaleDownloading() ) - + return some State(SaleDownloading()) else: # do not re-add this slot to the queue, and return bytes from Reservation to # the Availability debug "Slot cannot be reserved, ignoring" - return some State( SaleIgnored(reprocessSlot: false, returnBytes: true) ) - + return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) diff --git a/codex/sales/states/unknown.nim b/codex/sales/states/unknown.nim index d497cba3..3034129a 100644 --- a/codex/sales/states/unknown.nim +++ b/codex/sales/states/unknown.nim @@ -17,7 +17,8 @@ type SaleUnknownError* = object of CatchableError UnexpectedSlotError* = object of SaleUnknownError -method `$`*(state: SaleUnknown): string = "SaleUnknown" +method `$`*(state: SaleUnknown): string = + "SaleUnknown" method onCancelled*(state: SaleUnknown, request: StorageRequest): ?State = return some State(SaleCancelled()) @@ -38,8 +39,8 @@ method run*(state: SaleUnknown, machine: Machine): Future[?State] {.async.} = case slotState of SlotState.Free: - let error = newException(UnexpectedSlotError, - "Slot state on chain should not be 'free'") + let error = + newException(UnexpectedSlotError, "Slot state on chain should not be 'free'") return some State(SaleErrored(error: error)) of SlotState.Filled: return some State(SaleFilled()) @@ -52,6 +53,7 @@ method run*(state: SaleUnknown, machine: Machine): Future[?State] {.async.} = of SlotState.Cancelled: return some State(SaleCancelled()) of SlotState.Repair: - let error = newException(SlotFreedError, - "Slot was forcible freed and host was removed from its hosting") + let error = newException( + SlotFreedError, "Slot was forcible freed and host was removed from its hosting" + ) return some State(SaleErrored(error: error)) diff --git a/codex/slots/builder.nim b/codex/slots/builder.nim index 9df03f16..25844db6 100644 --- a/codex/slots/builder.nim +++ b/codex/slots/builder.nim @@ -5,5 +5,4 @@ import ../merkletree export builder, converters -type - Poseidon2Builder* = SlotsBuilder[Poseidon2Tree, Poseidon2Hash] +type Poseidon2Builder* = SlotsBuilder[Poseidon2Tree, Poseidon2Hash] diff --git a/codex/slots/builder/builder.nim b/codex/slots/builder/builder.nim index 8652350e..30332f1c 100644 --- a/codex/slots/builder/builder.nim +++ b/codex/slots/builder/builder.nim @@ -34,17 +34,17 @@ export converters, asynciter logScope: topics = "codex slotsbuilder" -type - SlotsBuilder*[T, H] = ref object of RootObj - store: BlockStore - manifest: Manifest # current manifest - strategy: IndexingStrategy # indexing strategy - cellSize: NBytes # cell size - numSlotBlocks: Natural # number of blocks per slot (should yield a power of two number of cells) - slotRoots: seq[H] # roots of the slots - emptyBlock: seq[byte] # empty block - verifiableTree: ?T # verification tree (dataset tree) - emptyDigestTree: T # empty digest tree for empty blocks +type SlotsBuilder*[T, H] = ref object of RootObj + store: BlockStore + manifest: Manifest # current manifest + strategy: IndexingStrategy # indexing strategy + cellSize: NBytes # cell size + numSlotBlocks: Natural + # number of blocks per slot (should yield a power of two number of cells) + slotRoots: seq[H] # roots of the slots + emptyBlock: seq[byte] # empty block + verifiableTree: ?T # verification tree (dataset tree) + emptyDigestTree: T # empty digest tree for empty blocks func verifiable*[T, H](self: SlotsBuilder[T, H]): bool {.inline.} = ## Returns true if the slots are verifiable. @@ -133,9 +133,8 @@ func manifest*[T, H](self: SlotsBuilder[T, H]): Manifest = self.manifest proc buildBlockTree*[T, H]( - self: SlotsBuilder[T, H], - blkIdx: Natural, - slotPos: Natural): Future[?!(seq[byte], T)] {.async.} = + self: SlotsBuilder[T, H], blkIdx: Natural, slotPos: Natural +): Future[?!(seq[byte], T)] {.async.} = ## Build the block digest tree and return a tuple with the ## block data and the tree. ## @@ -160,16 +159,15 @@ proc buildBlockTree*[T, H]( if blk.isEmpty: success (self.emptyBlock, self.emptyDigestTree) else: - without tree =? - T.digestTree(blk.data, self.cellSize.int), err: + without tree =? T.digestTree(blk.data, self.cellSize.int), err: error "Failed to create digest for block", err = err.msg return failure(err) success (blk.data, tree) proc getCellHashes*[T, H]( - self: SlotsBuilder[T, H], - slotIndex: Natural): Future[?!seq[H]] {.async.} = + self: SlotsBuilder[T, H], slotIndex: Natural +): Future[?!seq[H]] {.async.} = ## Collect all the cells from a block and return ## their hashes. ## @@ -192,8 +190,8 @@ proc getCellHashes*[T, H]( pos = i trace "Getting block CID for tree at index" - without (_, tree) =? (await self.buildBlockTree(blkIdx, i)) and - digest =? tree.root, err: + without (_, tree) =? (await self.buildBlockTree(blkIdx, i)) and digest =? tree.root, + err: error "Failed to get block CID for tree at index", err = err.msg return failure(err) @@ -203,8 +201,8 @@ proc getCellHashes*[T, H]( success hashes proc buildSlotTree*[T, H]( - self: SlotsBuilder[T, H], - slotIndex: Natural): Future[?!T] {.async.} = + self: SlotsBuilder[T, H], slotIndex: Natural +): Future[?!T] {.async.} = ## Build the slot tree from the block digest hashes ## and return the tree. @@ -215,20 +213,20 @@ proc buildSlotTree*[T, H]( T.init(cellHashes) proc buildSlot*[T, H]( - self: SlotsBuilder[T, H], - slotIndex: Natural): Future[?!H] {.async.} = + self: SlotsBuilder[T, H], slotIndex: Natural +): Future[?!H] {.async.} = ## Build a slot tree and store the proofs in ## the block store. ## logScope: - cid = self.manifest.treeCid - slotIndex = slotIndex + cid = self.manifest.treeCid + slotIndex = slotIndex trace "Building slot tree" without tree =? (await self.buildSlotTree(slotIndex)) and - treeCid =? tree.root.?toSlotCid, err: + treeCid =? tree.root .? toSlotCid, err: error "Failed to build slot tree", err = err.msg return failure(err) @@ -238,13 +236,12 @@ proc buildSlot*[T, H]( error "Failed to get CID for slot cell", err = err.msg return failure(err) - without proof =? tree.getProof(i) and - encodableProof =? proof.toEncodableProof, err: + without proof =? tree.getProof(i) and encodableProof =? proof.toEncodableProof, err: error "Failed to get proof for slot tree", err = err.msg return failure(err) - if err =? (await self.store.putCidAndProof( - treeCid, i, cellCid, encodableProof)).errorOption: + if err =? + (await self.store.putCidAndProof(treeCid, i, cellCid, encodableProof)).errorOption: error "Failed to store slot tree", err = err.msg return failure(err) @@ -258,14 +255,14 @@ proc buildSlots*[T, H](self: SlotsBuilder[T, H]): Future[?!void] {.async.} = ## logScope: - cid = self.manifest.treeCid - blockCount = self.manifest.blocksCount + cid = self.manifest.treeCid + blockCount = self.manifest.blocksCount trace "Building slots" if self.slotRoots.len == 0: self.slotRoots = collect(newSeq): - for i in 0.. 0: - numPadSlotBlocks + numSlotBlocks - else: - numSlotBlocks + numPadSlotBlocks + numSlotBlocks + else: + numSlotBlocks - numBlocksTotal = numSlotBlocksTotal * manifest.numSlots # number of blocks per slot + numBlocksTotal = numSlotBlocksTotal * manifest.numSlots # number of blocks per slot - emptyBlock = newSeq[byte](manifest.blockSize.int) - emptyDigestTree = ? T.digestTree(emptyBlock, cellSize.int) + emptyBlock = newSeq[byte](manifest.blockSize.int) + emptyDigestTree = ?T.digestTree(emptyBlock, cellSize.int) - strategy = ? strategy.init( - 0, - numBlocksTotal - 1, - manifest.numSlots).catch + strategy = ?strategy.init(0, numBlocksTotal - 1, manifest.numSlots).catch logScope: - numSlotBlocks = numSlotBlocks - numBlockCells = numBlockCells - numSlotCells = numSlotCells - pow2SlotCells = pow2SlotCells - numPadSlotBlocks = numPadSlotBlocks - numBlocksTotal = numBlocksTotal - numSlotBlocksTotal = numSlotBlocksTotal - strategy = strategy.strategyType + numSlotBlocks = numSlotBlocks + numBlockCells = numBlockCells + numSlotCells = numSlotCells + pow2SlotCells = pow2SlotCells + numPadSlotBlocks = numPadSlotBlocks + numBlocksTotal = numBlocksTotal + numSlotBlocksTotal = numSlotBlocksTotal + strategy = strategy.strategyType trace "Creating slots builder" - var - self = SlotsBuilder[T, H]( - store: store, - manifest: manifest, - strategy: strategy, - cellSize: cellSize, - emptyBlock: emptyBlock, - numSlotBlocks: numSlotBlocksTotal, - emptyDigestTree: emptyDigestTree) + var self = SlotsBuilder[T, H]( + store: store, + manifest: manifest, + strategy: strategy, + cellSize: cellSize, + emptyBlock: emptyBlock, + numSlotBlocks: numSlotBlocksTotal, + emptyDigestTree: emptyDigestTree, + ) if manifest.verifiable: - if manifest.slotRoots.len == 0 or - manifest.slotRoots.len != manifest.numSlots: + if manifest.slotRoots.len == 0 or manifest.slotRoots.len != manifest.numSlots: return failure "Manifest is verifiable but slot roots are missing or invalid." let - slotRoots = manifest.slotRoots.mapIt( (? it.fromSlotCid() )) - tree = ? self.buildVerifyTree(slotRoots) - expectedRoot = ? manifest.verifyRoot.fromVerifyCid() - verifyRoot = ? tree.root + slotRoots = manifest.slotRoots.mapIt((?it.fromSlotCid())) + tree = ?self.buildVerifyTree(slotRoots) + expectedRoot = ?manifest.verifyRoot.fromVerifyCid() + verifyRoot = ?tree.root if verifyRoot != expectedRoot: return failure "Existing slots root doesn't match reconstructed root." diff --git a/codex/slots/converters.nim b/codex/slots/converters.nim index f9716fa3..f0dc3990 100644 --- a/codex/slots/converters.nim +++ b/codex/slots/converters.nim @@ -23,21 +23,25 @@ import ../utils/digest func toCid(hash: Poseidon2Hash, mcodec: MultiCodec, cidCodec: MultiCodec): ?!Cid = let - mhash = ? MultiHash.init(mcodec, hash.toBytes).mapFailure - treeCid = ? Cid.init(CIDv1, cidCodec, mhash).mapFailure + mhash = ?MultiHash.init(mcodec, hash.toBytes).mapFailure + treeCid = ?Cid.init(CIDv1, cidCodec, mhash).mapFailure success treeCid -proc toPoseidon2Hash(cid: Cid, mcodec: MultiCodec, cidCodec: MultiCodec): ?!Poseidon2Hash = +proc toPoseidon2Hash( + cid: Cid, mcodec: MultiCodec, cidCodec: MultiCodec +): ?!Poseidon2Hash = if cid.cidver != CIDv1: return failure("Unexpected CID version") if cid.mcodec != cidCodec: - return failure("Cid is not of expected codec. Was: " & $cid.mcodec & " but expected: " & $cidCodec) + return failure( + "Cid is not of expected codec. Was: " & $cid.mcodec & " but expected: " & $cidCodec + ) let - mhash = ? cid.mhash.mapFailure + mhash = ?cid.mhash.mapFailure bytes: array[32, byte] = array[32, byte].initCopyFrom(mhash.digestBytes()) - hash = ? Poseidon2Hash.fromBytes(bytes).toFailure + hash = ?Poseidon2Hash.fromBytes(bytes).toFailure success hash @@ -51,7 +55,7 @@ func toSlotCid*(hash: Poseidon2Hash): ?!Cid = toCid(hash, multiCodec("identity"), SlotRootCodec) func toSlotCids*(slotRoots: openArray[Poseidon2Hash]): ?!seq[Cid] = - success slotRoots.mapIt( ? it.toSlotCid ) + success slotRoots.mapIt(?it.toSlotCid) func fromSlotCid*(cid: Cid): ?!Poseidon2Hash = toPoseidon2Hash(cid, multiCodec("identity"), SlotRootCodec) @@ -62,27 +66,17 @@ func toVerifyCid*(hash: Poseidon2Hash): ?!Cid = func fromVerifyCid*(cid: Cid): ?!Poseidon2Hash = toPoseidon2Hash(cid, multiCodec("identity"), SlotProvingRootCodec) -func toEncodableProof*( - proof: Poseidon2Proof): ?!CodexProof = - - let - encodableProof = CodexProof( - mcodec: multiCodec("identity"), - index: proof.index, - nleaves: proof.nleaves, - path: proof.path.mapIt( @( it.toBytes ) )) +func toEncodableProof*(proof: Poseidon2Proof): ?!CodexProof = + let encodableProof = CodexProof( + mcodec: multiCodec("identity"), + index: proof.index, + nleaves: proof.nleaves, + path: proof.path.mapIt(@(it.toBytes)), + ) success encodableProof -func toVerifiableProof*( - proof: CodexProof): ?!Poseidon2Proof = +func toVerifiableProof*(proof: CodexProof): ?!Poseidon2Proof = + let nodes = proof.path.mapIt(?Poseidon2Hash.fromBytes(it.toArray32).toFailure) - let - nodes = proof.path.mapIt( - ? Poseidon2Hash.fromBytes(it.toArray32).toFailure - ) - - Poseidon2Proof.init( - index = proof.index, - nleaves = proof.nleaves, - nodes = nodes) + Poseidon2Proof.init(index = proof.index, nleaves = proof.nleaves, nodes = nodes) diff --git a/codex/slots/proofs/backendfactory.nim b/codex/slots/proofs/backendfactory.nim index ac478e1a..7aba27d8 100644 --- a/codex/slots/proofs/backendfactory.nim +++ b/codex/slots/proofs/backendfactory.nim @@ -11,26 +11,25 @@ import ../../conf import ./backends import ./backendutils -proc initializeFromConfig( - config: CodexConf, - utils: BackendUtils): ?!AnyBackend = +proc initializeFromConfig(config: CodexConf, utils: BackendUtils): ?!AnyBackend = if not fileAccessible($config.circomR1cs, {AccessFlags.Read}) or - not endsWith($config.circomR1cs, ".r1cs"): + not endsWith($config.circomR1cs, ".r1cs"): return failure("Circom R1CS file not accessible") if not fileAccessible($config.circomWasm, {AccessFlags.Read}) or - not endsWith($config.circomWasm, ".wasm"): + not endsWith($config.circomWasm, ".wasm"): return failure("Circom wasm file not accessible") if not fileAccessible($config.circomZkey, {AccessFlags.Read}) or - not endsWith($config.circomZkey, ".zkey"): + not endsWith($config.circomZkey, ".zkey"): return failure("Circom zkey file not accessible") trace "Initialized prover backend from cli config" - success(utils.initializeCircomBackend( - $config.circomR1cs, - $config.circomWasm, - $config.circomZkey)) + success( + utils.initializeCircomBackend( + $config.circomR1cs, $config.circomWasm, $config.circomZkey + ) + ) proc r1csFilePath(config: CodexConf): string = config.circuitDir / "proof_main.r1cs" @@ -42,42 +41,40 @@ proc zkeyFilePath(config: CodexConf): string = config.circuitDir / "proof_main.zkey" proc initializeFromCircuitDirFiles( - config: CodexConf, - utils: BackendUtils): ?!AnyBackend {.gcsafe.} = - if fileExists(config.r1csFilePath) and - fileExists(config.wasmFilePath) and - fileExists(config.zkeyFilePath): + config: CodexConf, utils: BackendUtils +): ?!AnyBackend {.gcsafe.} = + if fileExists(config.r1csFilePath) and fileExists(config.wasmFilePath) and + fileExists(config.zkeyFilePath): trace "Initialized prover backend from local files" - return success(utils.initializeCircomBackend( - config.r1csFilePath, - config.wasmFilePath, - config.zkeyFilePath)) + return success( + utils.initializeCircomBackend( + config.r1csFilePath, config.wasmFilePath, config.zkeyFilePath + ) + ) failure("Circuit files not found") proc suggestDownloadTool(config: CodexConf) = without address =? config.marketplaceAddress: - raise (ref Defect)(msg: "Proving backend initializing while marketplace address not set.") + raise (ref Defect)( + msg: "Proving backend initializing while marketplace address not set." + ) let - tokens = [ - "cirdl", - "\"" & $config.circuitDir & "\"", - config.ethProvider, - $address - ] + tokens = ["cirdl", "\"" & $config.circuitDir & "\"", config.ethProvider, $address] instructions = "'./" & tokens.join(" ") & "'" - warn "Proving circuit files are not found. Please run the following to download them:", instructions + warn "Proving circuit files are not found. Please run the following to download them:", + instructions proc initializeBackend*( - config: CodexConf, - utils: BackendUtils = BackendUtils()): ?!AnyBackend = - + config: CodexConf, utils: BackendUtils = BackendUtils() +): ?!AnyBackend = without backend =? initializeFromConfig(config, utils), cliErr: info "Could not initialize prover backend from CLI options...", msg = cliErr.msg without backend =? initializeFromCircuitDirFiles(config, utils), localErr: - info "Could not initialize prover backend from circuit dir files...", msg = localErr.msg + info "Could not initialize prover backend from circuit dir files...", + msg = localErr.msg suggestDownloadTool(config) return failure("CircuitFilesNotFound") # Unexpected: value of backend does not survive leaving each scope. (definition does though...) diff --git a/codex/slots/proofs/backends.nim b/codex/slots/proofs/backends.nim index 3872d821..3bd2edb6 100644 --- a/codex/slots/proofs/backends.nim +++ b/codex/slots/proofs/backends.nim @@ -2,5 +2,4 @@ import ./backends/circomcompat export circomcompat -type - AnyBackend* = CircomCompat +type AnyBackend* = CircomCompat diff --git a/codex/slots/proofs/backends/circomcompat.nim b/codex/slots/proofs/backends/circomcompat.nim index 374b8151..1d2e3e19 100644 --- a/codex/slots/proofs/backends/circomcompat.nim +++ b/codex/slots/proofs/backends/circomcompat.nim @@ -25,21 +25,22 @@ export circomcompat, converters type CircomCompat* = object - slotDepth : int # max depth of the slot tree - datasetDepth : int # max depth of dataset tree - blkDepth : int # depth of the block merkle tree (pow2 for now) - cellElms : int # number of field elements per cell - numSamples : int # number of samples per slot - r1csPath : string # path to the r1cs file - wasmPath : string # path to the wasm file - zkeyPath : string # path to the zkey file - backendCfg : ptr CircomBn254Cfg - vkp* : ptr CircomKey + slotDepth: int # max depth of the slot tree + datasetDepth: int # max depth of dataset tree + blkDepth: int # depth of the block merkle tree (pow2 for now) + cellElms: int # number of field elements per cell + numSamples: int # number of samples per slot + r1csPath: string # path to the r1cs file + wasmPath: string # path to the wasm file + zkeyPath: string # path to the zkey file + backendCfg: ptr CircomBn254Cfg + vkp*: ptr CircomKey NormalizedProofInputs*[H] {.borrow: `.`.} = distinct ProofInputs[H] -func normalizeInput*[H](self: CircomCompat, input: ProofInputs[H]): - NormalizedProofInputs[H] = +func normalizeInput*[H]( + self: CircomCompat, input: ProofInputs[H] +): NormalizedProofInputs[H] = ## Parameters in CIRCOM circuits are statically sized and must be properly ## padded before they can be passed onto the circuit. This function takes ## variable length parameters and performs that padding. @@ -52,10 +53,7 @@ func normalizeInput*[H](self: CircomCompat, input: ProofInputs[H]): for sample in input.samples: var merklePaths = sample.merklePaths merklePaths.setLen(self.slotDepth) - Sample[H]( - cellData: sample.cellData, - merklePaths: merklePaths - ) + Sample[H](cellData: sample.cellData, merklePaths: merklePaths) var normSlotProof = input.slotProof normSlotProof.setLen(self.datasetDepth) @@ -68,7 +66,7 @@ func normalizeInput*[H](self: CircomCompat, input: ProofInputs[H]): nCellsPerSlot: input.nCellsPerSlot, nSlotsPerDataSet: input.nSlotsPerDataSet, slotProof: normSlotProof, - samples: normSamples + samples: normSamples, ) proc release*(self: CircomCompat) = @@ -81,32 +79,28 @@ proc release*(self: CircomCompat) = if not isNil(self.vkp): self.vkp.unsafeAddr.release_key() -proc prove[H]( - self: CircomCompat, - input: NormalizedProofInputs[H]): ?!CircomProof = - - doAssert input.samples.len == self.numSamples, - "Number of samples does not match" +proc prove[H](self: CircomCompat, input: NormalizedProofInputs[H]): ?!CircomProof = + doAssert input.samples.len == self.numSamples, "Number of samples does not match" doAssert input.slotProof.len <= self.datasetDepth, "Slot proof is too deep - dataset has more slots than what we can handle?" doAssert input.samples.allIt( block: - (it.merklePaths.len <= self.slotDepth + self.blkDepth and - it.cellData.len == self.cellElms)), "Merkle paths too deep or cells too big for circuit" + ( + it.merklePaths.len <= self.slotDepth + self.blkDepth and + it.cellData.len == self.cellElms + ) + ), "Merkle paths too deep or cells too big for circuit" # TODO: All parameters should match circom's static parametter - var - ctx: ptr CircomCompatCtx + var ctx: ptr CircomCompatCtx defer: if ctx != nil: ctx.addr.release_circom_compat() - if init_circom_compat( - self.backendCfg, - addr ctx) != ERR_OK or ctx == nil: + if init_circom_compat(self.backendCfg, addr ctx) != ERR_OK or ctx == nil: raiseAssert("failed to initialize CircomCompat ctx") var @@ -114,67 +108,61 @@ proc prove[H]( dataSetRoot = input.datasetRoot.toBytes slotRoot = input.slotRoot.toBytes - if ctx.push_input_u256_array( - "entropy".cstring, entropy[0].addr, entropy.len.uint32) != ERR_OK: + if ctx.push_input_u256_array("entropy".cstring, entropy[0].addr, entropy.len.uint32) != + ERR_OK: return failure("Failed to push entropy") if ctx.push_input_u256_array( - "dataSetRoot".cstring, dataSetRoot[0].addr, dataSetRoot.len.uint32) != ERR_OK: + "dataSetRoot".cstring, dataSetRoot[0].addr, dataSetRoot.len.uint32 + ) != ERR_OK: return failure("Failed to push data set root") if ctx.push_input_u256_array( - "slotRoot".cstring, slotRoot[0].addr, slotRoot.len.uint32) != ERR_OK: + "slotRoot".cstring, slotRoot[0].addr, slotRoot.len.uint32 + ) != ERR_OK: return failure("Failed to push data set root") - if ctx.push_input_u32( - "nCellsPerSlot".cstring, input.nCellsPerSlot.uint32) != ERR_OK: + if ctx.push_input_u32("nCellsPerSlot".cstring, input.nCellsPerSlot.uint32) != ERR_OK: return failure("Failed to push nCellsPerSlot") - if ctx.push_input_u32( - "nSlotsPerDataSet".cstring, input.nSlotsPerDataSet.uint32) != ERR_OK: + if ctx.push_input_u32("nSlotsPerDataSet".cstring, input.nSlotsPerDataSet.uint32) != + ERR_OK: return failure("Failed to push nSlotsPerDataSet") - if ctx.push_input_u32( - "slotIndex".cstring, input.slotIndex.uint32) != ERR_OK: + if ctx.push_input_u32("slotIndex".cstring, input.slotIndex.uint32) != ERR_OK: return failure("Failed to push slotIndex") - var - slotProof = input.slotProof.mapIt( it.toBytes ).concat + var slotProof = input.slotProof.mapIt(it.toBytes).concat doAssert(slotProof.len == self.datasetDepth) # arrays are always flattened if ctx.push_input_u256_array( - "slotProof".cstring, - slotProof[0].addr, - uint (slotProof[0].len * slotProof.len)) != ERR_OK: - return failure("Failed to push slot proof") + "slotProof".cstring, slotProof[0].addr, uint (slotProof[0].len * slotProof.len) + ) != ERR_OK: + return failure("Failed to push slot proof") for s in input.samples: var - merklePaths = s.merklePaths.mapIt( it.toBytes ) - data = s.cellData.mapIt( @(it.toBytes) ).concat + merklePaths = s.merklePaths.mapIt(it.toBytes) + data = s.cellData.mapIt(@(it.toBytes)).concat if ctx.push_input_u256_array( "merklePaths".cstring, merklePaths[0].addr, - uint (merklePaths[0].len * merklePaths.len)) != ERR_OK: - return failure("Failed to push merkle paths") + uint (merklePaths[0].len * merklePaths.len), + ) != ERR_OK: + return failure("Failed to push merkle paths") - if ctx.push_input_u256_array( - "cellData".cstring, - data[0].addr, - data.len.uint) != ERR_OK: - return failure("Failed to push cell data") + if ctx.push_input_u256_array("cellData".cstring, data[0].addr, data.len.uint) != + ERR_OK: + return failure("Failed to push cell data") - var - proofPtr: ptr Proof = nil + var proofPtr: ptr Proof = nil let proof = try: - if ( - let res = self.backendCfg.prove_circuit(ctx, proofPtr.addr); - res != ERR_OK) or - proofPtr == nil: + if (let res = self.backendCfg.prove_circuit(ctx, proofPtr.addr); res != ERR_OK) or + proofPtr == nil: return failure("Failed to prove - err code: " & $res) proofPtr[] @@ -184,16 +172,12 @@ proc prove[H]( success proof -proc prove*[H]( - self: CircomCompat, - input: ProofInputs[H]): ?!CircomProof = - +proc prove*[H](self: CircomCompat, input: ProofInputs[H]): ?!CircomProof = self.prove(self.normalizeInput(input)) proc verify*[H]( - self: CircomCompat, - proof: CircomProof, - inputs: ProofInputs[H]): ?!bool = + self: CircomCompat, proof: CircomProof, inputs: ProofInputs[H] +): ?!bool = ## Verify a proof using a ctx ## @@ -213,43 +197,44 @@ proc verify*[H]( inputs.releaseCircomInputs() proc init*( - _: type CircomCompat, - r1csPath : string, - wasmPath : string, - zkeyPath : string = "", - slotDepth = DefaultMaxSlotDepth, - datasetDepth = DefaultMaxDatasetDepth, - blkDepth = DefaultBlockDepth, - cellElms = DefaultCellElms, - numSamples = DefaultSamplesNum): CircomCompat = + _: type CircomCompat, + r1csPath: string, + wasmPath: string, + zkeyPath: string = "", + slotDepth = DefaultMaxSlotDepth, + datasetDepth = DefaultMaxDatasetDepth, + blkDepth = DefaultBlockDepth, + cellElms = DefaultCellElms, + numSamples = DefaultSamplesNum, +): CircomCompat = ## Create a new ctx ## var cfg: ptr CircomBn254Cfg var zkey = if zkeyPath.len > 0: zkeyPath.cstring else: nil - if init_circom_config( - r1csPath.cstring, - wasmPath.cstring, - zkey, cfg.addr) != ERR_OK or cfg == nil: - if cfg != nil: cfg.addr.release_cfg() - raiseAssert("failed to initialize circom compat config") + if init_circom_config(r1csPath.cstring, wasmPath.cstring, zkey, cfg.addr) != ERR_OK or + cfg == nil: + if cfg != nil: + cfg.addr.release_cfg() + raiseAssert("failed to initialize circom compat config") - var - vkpPtr: ptr VerifyingKey = nil + var vkpPtr: ptr VerifyingKey = nil if cfg.get_verifying_key(vkpPtr.addr) != ERR_OK or vkpPtr == nil: - if vkpPtr != nil: vkpPtr.addr.release_key() + if vkpPtr != nil: + vkpPtr.addr.release_key() raiseAssert("Failed to get verifying key") CircomCompat( - r1csPath : r1csPath, - wasmPath : wasmPath, - zkeyPath : zkeyPath, - slotDepth : slotDepth, + r1csPath: r1csPath, + wasmPath: wasmPath, + zkeyPath: zkeyPath, + slotDepth: slotDepth, datasetDepth: datasetDepth, - blkDepth : blkDepth, - cellElms : cellElms, - numSamples : numSamples, - backendCfg : cfg, - vkp : vkpPtr) + blkDepth: blkDepth, + cellElms: cellElms, + numSamples: numSamples, + backendCfg: cfg, + vkp: vkpPtr, + ) diff --git a/codex/slots/proofs/backends/converters.nim b/codex/slots/proofs/backends/converters.nim index 60c64f5c..ee771477 100644 --- a/codex/slots/proofs/backends/converters.nim +++ b/codex/slots/proofs/backends/converters.nim @@ -19,8 +19,8 @@ type CircomG1* = G1 CircomG2* = G2 - CircomProof* = Proof - CircomKey* = VerifyingKey + CircomProof* = Proof + CircomKey* = VerifyingKey CircomInputs* = Inputs proc toCircomInputs*(inputs: ProofInputs[Poseidon2Hash]): CircomInputs = @@ -29,18 +29,12 @@ proc toCircomInputs*(inputs: ProofInputs[Poseidon2Hash]): CircomInputs = datasetRoot = inputs.datasetRoot.toBytes.toArray32 entropy = inputs.entropy.toBytes.toArray32 - elms = [ - entropy, - datasetRoot, - slotIndex - ] + elms = [entropy, datasetRoot, slotIndex] let inputsPtr = allocShared0(32 * elms.len) copyMem(inputsPtr, addr elms[0], elms.len * 32) - CircomInputs( - elms: cast[ptr array[32, byte]](inputsPtr), - len: elms.len.uint) + CircomInputs(elms: cast[ptr array[32, byte]](inputsPtr), len: elms.len.uint) proc releaseCircomInputs*(inputs: var CircomInputs) = if not inputs.elms.isNil: @@ -48,23 +42,13 @@ proc releaseCircomInputs*(inputs: var CircomInputs) = inputs.elms = nil func toG1*(g: CircomG1): G1Point = - G1Point( - x: UInt256.fromBytesLE(g.x), - y: UInt256.fromBytesLE(g.y)) + G1Point(x: UInt256.fromBytesLE(g.x), y: UInt256.fromBytesLE(g.y)) func toG2*(g: CircomG2): G2Point = G2Point( - x: Fp2Element( - real: UInt256.fromBytesLE(g.x[0]), - imag: UInt256.fromBytesLE(g.x[1]) - ), - y: Fp2Element( - real: UInt256.fromBytesLE(g.y[0]), - imag: UInt256.fromBytesLE(g.y[1]) - )) + x: Fp2Element(real: UInt256.fromBytesLE(g.x[0]), imag: UInt256.fromBytesLE(g.x[1])), + y: Fp2Element(real: UInt256.fromBytesLE(g.y[0]), imag: UInt256.fromBytesLE(g.y[1])), + ) func toGroth16Proof*(proof: CircomProof): Groth16Proof = - Groth16Proof( - a: proof.a.toG1, - b: proof.b.toG2, - c: proof.c.toG1) + Groth16Proof(a: proof.a.toG1, b: proof.b.toG2, c: proof.c.toG1) diff --git a/codex/slots/proofs/backendutils.nim b/codex/slots/proofs/backendutils.nim index f7e6e2e1..0e334ace 100644 --- a/codex/slots/proofs/backendutils.nim +++ b/codex/slots/proofs/backendutils.nim @@ -1,12 +1,8 @@ import ./backends -type - BackendUtils* = ref object of RootObj +type BackendUtils* = ref object of RootObj method initializeCircomBackend*( - self: BackendUtils, - r1csFile: string, - wasmFile: string, - zKeyFile: string -): AnyBackend {.base, gcsafe.}= + self: BackendUtils, r1csFile: string, wasmFile: string, zKeyFile: string +): AnyBackend {.base, gcsafe.} = CircomCompat.init(r1csFile, wasmFile, zKeyFile) diff --git a/codex/slots/proofs/prover.nim b/codex/slots/proofs/prover.nim index 631e82e1..36fc0a05 100644 --- a/codex/slots/proofs/prover.nim +++ b/codex/slots/proofs/prover.nim @@ -47,10 +47,8 @@ type nSamples: int proc prove*( - self: Prover, - slotIdx: int, - manifest: Manifest, - challenge: ProofChallenge): Future[?!(AnyProofInputs, AnyProof)] {.async.} = + self: Prover, slotIdx: int, manifest: Manifest, challenge: ProofChallenge +): Future[?!(AnyProofInputs, AnyProof)] {.async.} = ## Prove a statement using backend. ## Returns a future that resolves to a proof. @@ -81,20 +79,13 @@ proc prove*( success (proofInput, proof) proc verify*( - self: Prover, - proof: AnyProof, - inputs: AnyProofInputs): Future[?!bool] {.async.} = + self: Prover, proof: AnyProof, inputs: AnyProofInputs +): Future[?!bool] {.async.} = ## Prove a statement using backend. ## Returns a future that resolves to a proof. self.backend.verify(proof, inputs) proc new*( - _: type Prover, - store: BlockStore, - backend: AnyBackend, - nSamples: int): Prover = - - Prover( - store: store, - backend: backend, - nSamples: nSamples) + _: type Prover, store: BlockStore, backend: AnyBackend, nSamples: int +): Prover = + Prover(store: store, backend: backend, nSamples: nSamples) diff --git a/codex/slots/sampler.nim b/codex/slots/sampler.nim index 10ea2656..23cfb73f 100644 --- a/codex/slots/sampler.nim +++ b/codex/slots/sampler.nim @@ -5,5 +5,4 @@ import ../merkletree export sampler, utils -type - Poseidon2Sampler* = DataSampler[Poseidon2Tree, Poseidon2Hash] +type Poseidon2Sampler* = DataSampler[Poseidon2Tree, Poseidon2Hash] diff --git a/codex/slots/sampler/sampler.nim b/codex/slots/sampler/sampler.nim index 3270d55a..bccdaff2 100644 --- a/codex/slots/sampler/sampler.nim +++ b/codex/slots/sampler/sampler.nim @@ -29,17 +29,14 @@ import ./utils logScope: topics = "codex datasampler" -type - DataSampler*[T, H] = ref object of RootObj - index: Natural - blockStore: BlockStore - builder: SlotsBuilder[T, H] +type DataSampler*[T, H] = ref object of RootObj + index: Natural + blockStore: BlockStore + builder: SlotsBuilder[T, H] func getCell*[T, H]( - self: DataSampler[T, H], - blkBytes: seq[byte], - blkCellIdx: Natural): seq[H] = - + self: DataSampler[T, H], blkBytes: seq[byte], blkCellIdx: Natural +): seq[H] = let cellSize = self.builder.cellSize.uint64 dataStart = cellSize * blkCellIdx.uint64 @@ -50,54 +47,47 @@ func getCell*[T, H]( blkBytes[dataStart ..< dataEnd].elements(H).toSeq() proc getSample*[T, H]( - self: DataSampler[T, H], - cellIdx: int, - slotTreeCid: Cid, - slotRoot: H): Future[?!Sample[H]] {.async.} = - + self: DataSampler[T, H], cellIdx: int, slotTreeCid: Cid, slotRoot: H +): Future[?!Sample[H]] {.async.} = let cellsPerBlock = self.builder.numBlockCells - blkCellIdx = cellIdx.toCellInBlk(cellsPerBlock) # block cell index - blkSlotIdx = cellIdx.toBlkInSlot(cellsPerBlock) # slot tree index - origBlockIdx = self.builder.slotIndicies(self.index)[blkSlotIdx] # convert to original dataset block index + blkCellIdx = cellIdx.toCellInBlk(cellsPerBlock) # block cell index + blkSlotIdx = cellIdx.toBlkInSlot(cellsPerBlock) # slot tree index + origBlockIdx = self.builder.slotIndicies(self.index)[blkSlotIdx] + # convert to original dataset block index logScope: - cellIdx = cellIdx - blkSlotIdx = blkSlotIdx - blkCellIdx = blkCellIdx - origBlockIdx = origBlockIdx + cellIdx = cellIdx + blkSlotIdx = blkSlotIdx + blkCellIdx = blkCellIdx + origBlockIdx = origBlockIdx trace "Retrieving sample from block tree" let - (_, proof) = (await self.blockStore.getCidAndProof( - slotTreeCid, blkSlotIdx.Natural)).valueOr: + (_, proof) = (await self.blockStore.getCidAndProof(slotTreeCid, blkSlotIdx.Natural)).valueOr: return failure("Failed to get slot tree CID and proof") slotProof = proof.toVerifiableProof().valueOr: return failure("Failed to get verifiable proof") - (bytes, blkTree) = (await self.builder.buildBlockTree( - origBlockIdx, blkSlotIdx)).valueOr: + (bytes, blkTree) = (await self.builder.buildBlockTree(origBlockIdx, blkSlotIdx)).valueOr: return failure("Failed to build block tree") cellData = self.getCell(bytes, blkCellIdx) cellProof = blkTree.getProof(blkCellIdx).valueOr: return failure("Failed to get proof from block tree") - success Sample[H]( - cellData: cellData, - merklePaths: (cellProof.path & slotProof.path)) + success Sample[H](cellData: cellData, merklePaths: (cellProof.path & slotProof.path)) proc getProofInput*[T, H]( - self: DataSampler[T, H], - entropy: ProofChallenge, - nSamples: Natural): Future[?!ProofInputs[H]] {.async.} = + self: DataSampler[T, H], entropy: ProofChallenge, nSamples: Natural +): Future[?!ProofInputs[H]] {.async.} = ## Generate proofs as input to the proving circuit. ## let - entropy = H.fromBytes( - array[31, byte].initCopyFrom(entropy[0..30])) # truncate to 31 bytes, otherwise it _might_ be greater than mod + entropy = H.fromBytes(array[31, byte].initCopyFrom(entropy[0 .. 30])) + # truncate to 31 bytes, otherwise it _might_ be greater than mod verifyTree = self.builder.verifyTree.toFailure.valueOr: return failure("Failed to get verify tree") @@ -109,11 +99,8 @@ proc getProofInput*[T, H]( return failure("Failed to get dataset root") slotTreeCid = self.builder.manifest.slotRoots[self.index] - slotRoot = self.builder.slotRoots[self.index] - cellIdxs = entropy.cellIndices( - slotRoot, - self.builder.numSlotCells, - nSamples) + slotRoot = self.builder.slotRoots[self.index] + cellIdxs = entropy.cellIndices(slotRoot, self.builder.numSlotCells, nSamples) logScope: cells = cellIdxs @@ -132,14 +119,15 @@ proc getProofInput*[T, H]( nCellsPerSlot: self.builder.numSlotCells, slotRoot: slotRoot, slotIndex: self.index, - samples: samples) + samples: samples, + ) proc new*[T, H]( _: type DataSampler[T, H], index: Natural, blockStore: BlockStore, - builder: SlotsBuilder[T, H]): ?!DataSampler[T, H] = - + builder: SlotsBuilder[T, H], +): ?!DataSampler[T, H] = if index > builder.slotRoots.high: error "Slot index is out of range" return failure("Slot index is out of range") @@ -147,7 +135,4 @@ proc new*[T, H]( if not builder.verifiable: return failure("Cannot instantiate DataSampler for non-verifiable builder") - success DataSampler[T, H]( - index: index, - blockStore: blockStore, - builder: builder) + success DataSampler[T, H](index: index, blockStore: blockStore, builder: builder) diff --git a/codex/slots/sampler/utils.nim b/codex/slots/sampler/utils.nim index 998f2cdc..ce78fadc 100644 --- a/codex/slots/sampler/utils.nim +++ b/codex/slots/sampler/utils.nim @@ -15,21 +15,21 @@ import pkg/constantine/math/arithmetic import ../../merkletree func extractLowBits*[n: static int](elm: BigInt[n], k: int): uint64 = - doAssert( k > 0 and k <= 64 ) - var r = 0'u64 - for i in 0.. 0 and k <= 64) + var r = 0'u64 + for i in 0 ..< k: let b = bit[n](elm, i) let y = uint64(b) if (y != 0): - r = bitor( r, 1'u64 shl i ) + r = bitor(r, 1'u64 shl i) r func extractLowBits(fld: Poseidon2Hash, k: int): uint64 = - let elm : BigInt[254] = fld.toBig() - return extractLowBits(elm, k); + let elm: BigInt[254] = fld.toBig() + return extractLowBits(elm, k) -func floorLog2*(x : int) : int = - doAssert ( x > 0 ) +func floorLog2*(x: int): int = + doAssert (x > 0) var k = -1 var y = x while (y > 0): @@ -37,39 +37,39 @@ func floorLog2*(x : int) : int = y = y shr 1 return k -func ceilingLog2*(x : int) : int = - doAssert ( x > 0 ) +func ceilingLog2*(x: int): int = + doAssert (x > 0) return (floorLog2(x - 1) + 1) func toBlkInSlot*(cell: Natural, numCells: Natural): Natural = let log2 = ceilingLog2(numCells) - doAssert( 1 shl log2 == numCells , "`numCells` is assumed to be a power of two" ) + doAssert(1 shl log2 == numCells, "`numCells` is assumed to be a power of two") return cell div numCells func toCellInBlk*(cell: Natural, numCells: Natural): Natural = let log2 = ceilingLog2(numCells) - doAssert( 1 shl log2 == numCells , "`numCells` is assumed to be a power of two" ) + doAssert(1 shl log2 == numCells, "`numCells` is assumed to be a power of two") return cell mod numCells func cellIndex*( - entropy: Poseidon2Hash, - slotRoot: Poseidon2Hash, - numCells: Natural, counter: Natural): Natural = + entropy: Poseidon2Hash, slotRoot: Poseidon2Hash, numCells: Natural, counter: Natural +): Natural = let log2 = ceilingLog2(numCells) - doAssert( 1 shl log2 == numCells , "`numCells` is assumed to be a power of two" ) + doAssert(1 shl log2 == numCells, "`numCells` is assumed to be a power of two") - let hash = Sponge.digest( @[ entropy, slotRoot, counter.toF ], rate = 2 ) - return int( extractLowBits(hash, log2) ) + let hash = Sponge.digest(@[entropy, slotRoot, counter.toF], rate = 2) + return int(extractLowBits(hash, log2)) func cellIndices*( - entropy: Poseidon2Hash, - slotRoot: Poseidon2Hash, - numCells: Natural, nSamples: Natural): seq[Natural] = - + entropy: Poseidon2Hash, + slotRoot: Poseidon2Hash, + numCells: Natural, + nSamples: Natural, +): seq[Natural] = var indices: seq[Natural] - for i in 1..nSamples: + for i in 1 .. nSamples: indices.add(cellIndex(entropy, slotRoot, numCells, i)) indices diff --git a/codex/slots/types.nim b/codex/slots/types.nim index 8703086e..0cd24326 100644 --- a/codex/slots/types.nim +++ b/codex/slots/types.nim @@ -24,5 +24,7 @@ type slotRoot*: H nCellsPerSlot*: Natural nSlotsPerDataSet*: Natural - slotProof*: seq[H] # inclusion proof that shows that the slot root (leaf) is part of the dataset (root) - samples*: seq[Sample[H]] # inclusion proofs which show that the selected cells (leafs) are part of the slot (roots) + slotProof*: seq[H] + # inclusion proof that shows that the slot root (leaf) is part of the dataset (root) + samples*: seq[Sample[H]] + # inclusion proofs which show that the selected cells (leafs) are part of the slot (roots) diff --git a/codex/stores.nim b/codex/stores.nim index 11e7c8df..91d2c786 100644 --- a/codex/stores.nim +++ b/codex/stores.nim @@ -7,10 +7,4 @@ import ./stores/keyutils import ./stores/treehelper export - cachestore, - blockstore, - networkstore, - repostore, - keyutils, - treehelper, - maintenance + cachestore, blockstore, networkstore, repostore, keyutils, treehelper, maintenance diff --git a/codex/stores/blockstore.nim b/codex/stores/blockstore.nim index 791e7d5b..78fab0da 100644 --- a/codex/stores/blockstore.nim +++ b/codex/stores/blockstore.nim @@ -9,7 +9,8 @@ import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import pkg/chronos import pkg/libp2p @@ -27,9 +28,11 @@ type BlockNotFoundError* = object of CodexError BlockType* {.pure.} = enum - Manifest, Block, Both + Manifest + Block + Both - CidCallback* = proc(cid: Cid): Future[void] {.gcsafe, raises:[].} + CidCallback* = proc(cid: Cid): Future[void] {.gcsafe, raises: [].} BlockStore* = ref object of RootObj onBlockStored*: ?CidCallback @@ -39,7 +42,9 @@ method getBlock*(self: BlockStore, cid: Cid): Future[?!Block] {.base, gcsafe.} = raiseAssert("getBlock by cid not implemented!") -method getBlock*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!Block] {.base, gcsafe.} = +method getBlock*( + self: BlockStore, treeCid: Cid, index: Natural +): Future[?!Block] {.base, gcsafe.} = ## Get a block from the blockstore ## @@ -50,51 +55,49 @@ method getCid*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!Cid] {. ## raiseAssert("getCid by treecid not implemented!") -method getBlock*(self: BlockStore, address: BlockAddress): Future[?!Block] {.base, gcsafe.} = +method getBlock*( + self: BlockStore, address: BlockAddress +): Future[?!Block] {.base, gcsafe.} = ## Get a block from the blockstore ## raiseAssert("getBlock by addr not implemented!") -method getBlockAndProof*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!(Block, CodexProof)] {.base, gcsafe.} = +method getBlockAndProof*( + self: BlockStore, treeCid: Cid, index: Natural +): Future[?!(Block, CodexProof)] {.base, gcsafe.} = ## Get a block and associated inclusion proof by Cid of a merkle tree and an index of a leaf in a tree ## raiseAssert("getBlockAndProof not implemented!") method putBlock*( - self: BlockStore, - blk: Block, - ttl = Duration.none): Future[?!void] {.base, gcsafe.} = + self: BlockStore, blk: Block, ttl = Duration.none +): Future[?!void] {.base, gcsafe.} = ## Put a block to the blockstore ## raiseAssert("putBlock not implemented!") method putCidAndProof*( - self: BlockStore, - treeCid: Cid, - index: Natural, - blockCid: Cid, - proof: CodexProof): Future[?!void] {.base, gcsafe.} = + self: BlockStore, treeCid: Cid, index: Natural, blockCid: Cid, proof: CodexProof +): Future[?!void] {.base, gcsafe.} = ## Put a block proof to the blockstore ## raiseAssert("putCidAndProof not implemented!") method getCidAndProof*( - self: BlockStore, - treeCid: Cid, - index: Natural): Future[?!(Cid, CodexProof)] {.base, gcsafe.} = + self: BlockStore, treeCid: Cid, index: Natural +): Future[?!(Cid, CodexProof)] {.base, gcsafe.} = ## Get a block proof from the blockstore ## raiseAssert("getCidAndProof not implemented!") method ensureExpiry*( - self: BlockStore, - cid: Cid, - expiry: SecondsSince1970): Future[?!void] {.base, gcsafe.} = + self: BlockStore, cid: Cid, expiry: SecondsSince1970 +): Future[?!void] {.base, gcsafe.} = ## Ensure that block's assosicated expiry is at least given timestamp ## If the current expiry is lower then it is updated to the given one, otherwise it is left intact ## @@ -102,10 +105,8 @@ method ensureExpiry*( raiseAssert("Not implemented!") method ensureExpiry*( - self: BlockStore, - treeCid: Cid, - index: Natural, - expiry: SecondsSince1970): Future[?!void] {.base, gcsafe.} = + self: BlockStore, treeCid: Cid, index: Natural, expiry: SecondsSince1970 +): Future[?!void] {.base, gcsafe.} = ## Ensure that block's associated expiry is at least given timestamp ## If the current expiry is lower then it is updated to the given one, otherwise it is left intact ## @@ -118,7 +119,9 @@ method delBlock*(self: BlockStore, cid: Cid): Future[?!void] {.base, gcsafe.} = raiseAssert("delBlock not implemented!") -method delBlock*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!void] {.base, gcsafe.} = +method delBlock*( + self: BlockStore, treeCid: Cid, index: Natural +): Future[?!void] {.base, gcsafe.} = ## Delete a block from the blockstore ## @@ -130,15 +133,17 @@ method hasBlock*(self: BlockStore, cid: Cid): Future[?!bool] {.base, gcsafe.} = raiseAssert("hasBlock not implemented!") -method hasBlock*(self: BlockStore, tree: Cid, index: Natural): Future[?!bool] {.base, gcsafe.} = +method hasBlock*( + self: BlockStore, tree: Cid, index: Natural +): Future[?!bool] {.base, gcsafe.} = ## Check if the block exists in the blockstore ## raiseAssert("hasBlock not implemented!") method listBlocks*( - self: BlockStore, - blockType = BlockType.Manifest): Future[?!AsyncIter[?Cid]] {.base, gcsafe.} = + self: BlockStore, blockType = BlockType.Manifest +): Future[?!AsyncIter[?Cid]] {.base, gcsafe.} = ## Get the list of blocks in the BlockStore. This is an intensive operation ## @@ -159,7 +164,8 @@ proc contains*(self: BlockStore, blk: Cid): Future[bool] {.async.} = return (await self.hasBlock(blk)) |? false proc contains*(self: BlockStore, address: BlockAddress): Future[bool] {.async.} = - return if address.leaf: - (await self.hasBlock(address.treeCid, address.index)) |? false + return + if address.leaf: + (await self.hasBlock(address.treeCid, address.index)) |? false else: - (await self.hasBlock(address.cid)) |? false + (await self.hasBlock(address.cid)) |? false diff --git a/codex/stores/cachestore.nim b/codex/stores/cachestore.nim index 130d2ade..6235c9c6 100644 --- a/codex/stores/cachestore.nim +++ b/codex/stores/cachestore.nim @@ -9,7 +9,8 @@ import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import std/options @@ -43,8 +44,7 @@ type InvalidBlockSize* = object of CodexError -const - DefaultCacheSize*: NBytes = 5.MiBs +const DefaultCacheSize*: NBytes = 5.MiBs method getBlock*(self: CacheStore, cid: Cid): Future[?!Block] {.async.} = ## Get a block from the stores @@ -68,22 +68,28 @@ method getBlock*(self: CacheStore, cid: Cid): Future[?!Block] {.async.} = return failure exc method getCidAndProof*( - self: CacheStore, - treeCid: Cid, - index: Natural): Future[?!(Cid, CodexProof)] {.async.} = - + self: CacheStore, treeCid: Cid, index: Natural +): Future[?!(Cid, CodexProof)] {.async.} = if cidAndProof =? self.cidAndProofCache.getOption((treeCid, index)): success(cidAndProof) else: - failure(newException(BlockNotFoundError, "Block not in cache: " & $BlockAddress.init(treeCid, index))) + failure( + newException( + BlockNotFoundError, "Block not in cache: " & $BlockAddress.init(treeCid, index) + ) + ) -method getBlock*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!Block] {.async.} = +method getBlock*( + self: CacheStore, treeCid: Cid, index: Natural +): Future[?!Block] {.async.} = without cidAndProof =? (await self.getCidAndProof(treeCid, index)), err: return failure(err) await self.getBlock(cidAndProof[0]) -method getBlockAndProof*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!(Block, CodexProof)] {.async.} = +method getBlockAndProof*( + self: CacheStore, treeCid: Cid, index: Natural +): Future[?!(Block, CodexProof)] {.async.} = without cidAndProof =? (await self.getCidAndProof(treeCid, index)), err: return failure(err) @@ -111,7 +117,9 @@ method hasBlock*(self: CacheStore, cid: Cid): Future[?!bool] {.async.} = return (cid in self.cache).success -method hasBlock*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!bool] {.async.} = +method hasBlock*( + self: CacheStore, treeCid: Cid, index: Natural +): Future[?!bool] {.async.} = without cidAndProof =? (await self.getCidAndProof(treeCid, index)), err: if err of BlockNotFoundError: return success(false) @@ -120,20 +128,19 @@ method hasBlock*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!bool] await self.hasBlock(cidAndProof[0]) -func cids(self: CacheStore): (iterator: Cid {.gcsafe.}) = - return iterator(): Cid = - for cid in self.cache.keys: - yield cid +func cids(self: CacheStore): (iterator (): Cid {.gcsafe.}) = + return + iterator (): Cid = + for cid in self.cache.keys: + yield cid method listBlocks*( - self: CacheStore, - blockType = BlockType.Manifest + self: CacheStore, blockType = BlockType.Manifest ): Future[?!AsyncIter[?Cid]] {.async.} = ## Get the list of blocks in the BlockStore. This is an intensive operation ## - let - cids = self.cids() + let cids = self.cids() proc isFinished(): bool = return finished(cids) @@ -141,29 +148,32 @@ method listBlocks*( proc genNext(): Future[Cid] {.async.} = cids() - let iter = await (AsyncIter[Cid].new(genNext, isFinished) - .filter( - proc (cid: Cid): Future[bool] {.async.} = + let iter = await ( + AsyncIter[Cid].new(genNext, isFinished).filter( + proc(cid: Cid): Future[bool] {.async.} = without isManifest =? cid.isManifest, err: trace "Error checking if cid is a manifest", err = err.msg return false - case blockType: + case blockType of BlockType.Both: return true of BlockType.Manifest: return isManifest of BlockType.Block: return not isManifest - )) + ) + ) - return success(map[Cid, ?Cid](iter, - proc (cid: Cid): Future[?Cid] {.async.} = - some(cid) - )) + return success( + map[Cid, ?Cid]( + iter, + proc(cid: Cid): Future[?Cid] {.async.} = + some(cid), + ) + ) func putBlockSync(self: CacheStore, blk: Block): bool = - let blkSize = blk.data.len.NBytes # in bytes if blkSize > self.size: @@ -185,9 +195,8 @@ func putBlockSync(self: CacheStore, blk: Block): bool = return true method putBlock*( - self: CacheStore, - blk: Block, - ttl = Duration.none): Future[?!void] {.async.} = + self: CacheStore, blk: Block, ttl = Duration.none +): Future[?!void] {.async.} = ## Put a block to the blockstore ## @@ -199,23 +208,17 @@ method putBlock*( discard self.putBlockSync(blk) if onBlock =? self.onBlockStored: await onBlock(blk.cid) - + return success() method putCidAndProof*( - self: CacheStore, - treeCid: Cid, - index: Natural, - blockCid: Cid, - proof: CodexProof + self: CacheStore, treeCid: Cid, index: Natural, blockCid: Cid, proof: CodexProof ): Future[?!void] {.async.} = self.cidAndProofCache[(treeCid, index)] = (blockCid, proof) success() method ensureExpiry*( - self: CacheStore, - cid: Cid, - expiry: SecondsSince1970 + self: CacheStore, cid: Cid, expiry: SecondsSince1970 ): Future[?!void] {.async.} = ## Updates block's assosicated TTL in store - not applicable for CacheStore ## @@ -223,10 +226,7 @@ method ensureExpiry*( discard # CacheStore does not have notion of TTL method ensureExpiry*( - self: CacheStore, - treeCid: Cid, - index: Natural, - expiry: SecondsSince1970 + self: CacheStore, treeCid: Cid, index: Natural, expiry: SecondsSince1970 ): Future[?!void] {.async.} = ## Updates block's associated TTL in store - not applicable for CacheStore ## @@ -248,7 +248,9 @@ method delBlock*(self: CacheStore, cid: Cid): Future[?!void] {.async.} = return success() -method delBlock*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!void] {.async.} = +method delBlock*( + self: CacheStore, treeCid: Cid, index: Natural +): Future[?!void] {.async.} = let maybeRemoved = self.cidAndProofCache.del((treeCid, index)) if removed =? maybeRemoved: @@ -266,7 +268,7 @@ proc new*( _: type CacheStore, blocks: openArray[Block] = [], cacheSize: NBytes = DefaultCacheSize, - chunkSize: NBytes = DefaultChunkSize + chunkSize: NBytes = DefaultChunkSize, ): CacheStore {.raises: [Defect, ValueError].} = ## Create a new CacheStore instance ## @@ -286,7 +288,8 @@ proc new*( cidAndProofCache: cidAndProofCache, currentSize: currentSize, size: cacheSize, - onBlockStored: CidCallback.none) + onBlockStored: CidCallback.none, + ) for blk in blocks: discard store.putBlockSync(blk) @@ -294,9 +297,6 @@ proc new*( return store proc new*( - _: type CacheStore, - blocks: openArray[Block] = [], - cacheSize: int, - chunkSize: int + _: type CacheStore, blocks: openArray[Block] = [], cacheSize: int, chunkSize: int ): CacheStore {.raises: [Defect, ValueError].} = CacheStore.new(blocks, NBytes cacheSize, NBytes chunkSize) diff --git a/codex/stores/keyutils.nim b/codex/stores/keyutils.nim index 1dbeccb4..0634b6a2 100644 --- a/codex/stores/keyutils.nim +++ b/codex/stores/keyutils.nim @@ -8,7 +8,8 @@ ## those terms. import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import std/sugar import pkg/questionable/results @@ -30,10 +31,9 @@ const QuotaReservedKey* = (QuotaKey / "reserved").tryGet func makePrefixKey*(postFixLen: int, cid: Cid): ?!Key = - let - cidKey = ? Key.init(($cid)[^postFixLen..^1] & "/" & $cid) + let cidKey = ?Key.init(($cid)[^postFixLen ..^ 1] & "/" & $cid) - if ? cid.isManifest: + if ?cid.isManifest: success CodexManifestKey / cidKey else: success CodexBlocksKey / cidKey @@ -42,7 +42,7 @@ proc createBlockExpirationMetadataKey*(cid: Cid): ?!Key = BlocksTtlKey / $cid proc createBlockExpirationMetadataQueryKey*(): ?!Key = - let queryString = ? (BlocksTtlKey / "*") + let queryString = ?(BlocksTtlKey / "*") Key.init(queryString) proc createBlockCidAndProofMetadataKey*(treeCid: Cid, index: Natural): ?!Key = diff --git a/codex/stores/maintenance.nim b/codex/stores/maintenance.nim index 3d1e66ca..e7ce1bdf 100644 --- a/codex/stores/maintenance.nim +++ b/codex/stores/maintenance.nim @@ -25,14 +25,13 @@ const DefaultBlockMaintenanceInterval* = 10.minutes DefaultNumberOfBlocksToMaintainPerInterval* = 1000 -type - BlockMaintainer* = ref object of RootObj - repoStore: RepoStore - interval: Duration - timer: Timer - clock: Clock - numberOfBlocksPerInterval: int - offset: int +type BlockMaintainer* = ref object of RootObj + repoStore: RepoStore + interval: Duration + timer: Timer + clock: Clock + numberOfBlocksPerInterval: int + offset: int proc new*( T: type BlockMaintainer, @@ -40,7 +39,7 @@ proc new*( interval: Duration, numberOfBlocksPerInterval = 100, timer = Timer.new(), - clock: Clock = SystemClock.new() + clock: Clock = SystemClock.new(), ): BlockMaintainer = ## Create new BlockMaintainer instance ## @@ -52,13 +51,16 @@ proc new*( numberOfBlocksPerInterval: numberOfBlocksPerInterval, timer: timer, clock: clock, - offset: 0) + offset: 0, + ) proc deleteExpiredBlock(self: BlockMaintainer, cid: Cid): Future[void] {.async.} = if isErr (await self.repoStore.delBlock(cid)): trace "Unable to delete block from repoStore" -proc processBlockExpiration(self: BlockMaintainer, be: BlockExpiration): Future[void] {.async.} = +proc processBlockExpiration( + self: BlockMaintainer, be: BlockExpiration +): Future[void] {.async.} = if be.expiry < self.clock.now: await self.deleteExpiredBlock(be.cid) else: @@ -66,8 +68,7 @@ proc processBlockExpiration(self: BlockMaintainer, be: BlockExpiration): Future[ proc runBlockCheck(self: BlockMaintainer): Future[void] {.async.} = let expirations = await self.repoStore.getBlockExpirations( - maxNumber = self.numberOfBlocksPerInterval, - offset = self.offset + maxNumber = self.numberOfBlocksPerInterval, offset = self.offset ) without iter =? expirations, err: @@ -93,7 +94,7 @@ proc start*(self: BlockMaintainer) = except CancelledError as error: raise error except CatchableError as exc: - error "Unexpected exception in BlockMaintainer.onTimer(): ", msg=exc.msg + error "Unexpected exception in BlockMaintainer.onTimer(): ", msg = exc.msg self.timer.start(onTimer, self.interval) diff --git a/codex/stores/networkstore.nim b/codex/stores/networkstore.nim index 40758b94..faee36e1 100644 --- a/codex/stores/networkstore.nim +++ b/codex/stores/networkstore.nim @@ -7,7 +7,6 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. - {.push raises: [].} import pkg/chronos @@ -28,10 +27,9 @@ export blockstore, blockexchange, asyncheapqueue logScope: topics = "codex networkstore" -type - NetworkStore* = ref object of BlockStore - engine*: BlockExcEngine # blockexc decision engine - localStore*: BlockStore # local block store +type NetworkStore* = ref object of BlockStore + engine*: BlockExcEngine # blockexc decision engine + localStore*: BlockStore # local block store method getBlock*(self: NetworkStore, address: BlockAddress): Future[?!Block] {.async.} = without blk =? (await self.localStore.getBlock(address)), err: @@ -60,9 +58,8 @@ method getBlock*(self: NetworkStore, treeCid: Cid, index: Natural): Future[?!Blo self.getBlock(BlockAddress.init(treeCid, index)) method putBlock*( - self: NetworkStore, - blk: Block, - ttl = Duration.none): Future[?!void] {.async.} = + self: NetworkStore, blk: Block, ttl = Duration.none +): Future[?!void] {.async.} = ## Store block locally and notify the network ## let res = await self.localStore.putBlock(blk, ttl) @@ -73,26 +70,21 @@ method putBlock*( return success() method putCidAndProof*( - self: NetworkStore, - treeCid: Cid, - index: Natural, - blockCid: Cid, - proof: CodexProof): Future[?!void] = + self: NetworkStore, treeCid: Cid, index: Natural, blockCid: Cid, proof: CodexProof +): Future[?!void] = self.localStore.putCidAndProof(treeCid, index, blockCid, proof) method getCidAndProof*( - self: NetworkStore, - treeCid: Cid, - index: Natural): Future[?!(Cid, CodexProof)] = + self: NetworkStore, treeCid: Cid, index: Natural +): Future[?!(Cid, CodexProof)] = ## Get a block proof from the blockstore ## self.localStore.getCidAndProof(treeCid, index) method ensureExpiry*( - self: NetworkStore, - cid: Cid, - expiry: SecondsSince1970): Future[?!void] {.async.} = + self: NetworkStore, cid: Cid, expiry: SecondsSince1970 +): Future[?!void] {.async.} = ## Ensure that block's assosicated expiry is at least given timestamp ## If the current expiry is lower then it is updated to the given one, otherwise it is left intact ## @@ -108,10 +100,8 @@ method ensureExpiry*( return success() method ensureExpiry*( - self: NetworkStore, - treeCid: Cid, - index: Natural, - expiry: SecondsSince1970): Future[?!void] {.async.} = + self: NetworkStore, treeCid: Cid, index: Natural, expiry: SecondsSince1970 +): Future[?!void] {.async.} = ## Ensure that block's associated expiry is at least given timestamp ## If the current expiry is lower then it is updated to the given one, otherwise it is left intact ## @@ -127,8 +117,8 @@ method ensureExpiry*( return success() method listBlocks*( - self: NetworkStore, - blockType = BlockType.Manifest): Future[?!AsyncIter[?Cid]] = + self: NetworkStore, blockType = BlockType.Manifest +): Future[?!AsyncIter[?Cid]] = self.localStore.listBlocks(blockType) method delBlock*(self: NetworkStore, cid: Cid): Future[?!void] = @@ -155,9 +145,7 @@ method close*(self: NetworkStore): Future[void] {.async.} = await self.localStore.close proc new*( - T: type NetworkStore, - engine: BlockExcEngine, - localStore: BlockStore + T: type NetworkStore, engine: BlockExcEngine, localStore: BlockStore ): NetworkStore = ## Create new instance of a NetworkStore ## diff --git a/codex/stores/queryiterhelper.nim b/codex/stores/queryiterhelper.nim index 7c51d215..6bf3090b 100644 --- a/codex/stores/queryiterhelper.nim +++ b/codex/stores/queryiterhelper.nim @@ -9,9 +9,8 @@ import ../utils/asynciter type KeyVal*[T] = tuple[key: Key, value: T] proc toAsyncIter*[T]( - queryIter: QueryIter[T], - finishOnErr: bool = true - ): Future[?!AsyncIter[?!QueryResponse[T]]] {.async.} = + queryIter: QueryIter[T], finishOnErr: bool = true +): Future[?!AsyncIter[?!QueryResponse[T]]] {.async.} = ## Converts `QueryIter[T]` to `AsyncIter[?!QueryResponse[T]]` and automatically ## runs dispose whenever `QueryIter` finishes or whenever an error occurs (only ## if the flag finishOnErr is set to true) @@ -25,7 +24,7 @@ proc toAsyncIter*[T]( var errOccurred = false - proc genNext: Future[?!QueryResponse[T]] {.async.} = + proc genNext(): Future[?!QueryResponse[T]] {.async.} = let queryResOrErr = await queryIter.next() if queryResOrErr.isErr: @@ -44,8 +43,8 @@ proc toAsyncIter*[T]( AsyncIter[?!QueryResponse[T]].new(genNext, isFinished).success proc filterSuccess*[T]( - iter: AsyncIter[?!QueryResponse[T]] - ): Future[AsyncIter[tuple[key: Key, value: T]]] {.async.} = + iter: AsyncIter[?!QueryResponse[T]] +): Future[AsyncIter[tuple[key: Key, value: T]]] {.async.} = ## Filters out any items that are not success proc mapping(resOrErr: ?!QueryResponse[T]): Future[?KeyVal[T]] {.async.} = diff --git a/codex/stores/repostore/coders.nim b/codex/stores/repostore/coders.nim index 6fc78408..47df7219 100644 --- a/codex/stores/repostore/coders.nim +++ b/codex/stores/repostore/coders.nim @@ -19,20 +19,35 @@ import ../../errors import ../../merkletree import ../../utils/json -proc encode*(t: QuotaUsage): seq[byte] = t.toJson().toBytes() -proc decode*(T: type QuotaUsage, bytes: seq[byte]): ?!T = T.fromJson(bytes) +proc encode*(t: QuotaUsage): seq[byte] = + t.toJson().toBytes() -proc encode*(t: BlockMetadata): seq[byte] = t.toJson().toBytes() -proc decode*(T: type BlockMetadata, bytes: seq[byte]): ?!T = T.fromJson(bytes) +proc decode*(T: type QuotaUsage, bytes: seq[byte]): ?!T = + T.fromJson(bytes) -proc encode*(t: LeafMetadata): seq[byte] = t.toJson().toBytes() -proc decode*(T: type LeafMetadata, bytes: seq[byte]): ?!T = T.fromJson(bytes) +proc encode*(t: BlockMetadata): seq[byte] = + t.toJson().toBytes() -proc encode*(t: DeleteResult): seq[byte] = t.toJson().toBytes() -proc decode*(T: type DeleteResult, bytes: seq[byte]): ?!T = T.fromJson(bytes) +proc decode*(T: type BlockMetadata, bytes: seq[byte]): ?!T = + T.fromJson(bytes) -proc encode*(t: StoreResult): seq[byte] = t.toJson().toBytes() -proc decode*(T: type StoreResult, bytes: seq[byte]): ?!T = T.fromJson(bytes) +proc encode*(t: LeafMetadata): seq[byte] = + t.toJson().toBytes() + +proc decode*(T: type LeafMetadata, bytes: seq[byte]): ?!T = + T.fromJson(bytes) + +proc encode*(t: DeleteResult): seq[byte] = + t.toJson().toBytes() + +proc decode*(T: type DeleteResult, bytes: seq[byte]): ?!T = + T.fromJson(bytes) + +proc encode*(t: StoreResult): seq[byte] = + t.toJson().toBytes() + +proc decode*(T: type StoreResult, bytes: seq[byte]): ?!T = + T.fromJson(bytes) proc encode*(i: uint64): seq[byte] = @(i.toBytesBE) @@ -43,5 +58,8 @@ proc decode*(T: type uint64, bytes: seq[byte]): ?!T = else: failure("Not enough bytes to decode `uint64`") -proc encode*(i: Natural | enum): seq[byte] = cast[uint64](i).encode -proc decode*(T: typedesc[Natural | enum], bytes: seq[byte]): ?!T = uint64.decode(bytes).map((ui: uint64) => cast[T](ui)) +proc encode*(i: Natural | enum): seq[byte] = + cast[uint64](i).encode + +proc decode*(T: typedesc[Natural | enum], bytes: seq[byte]): ?!T = + uint64.decode(bytes).map((ui: uint64) => cast[T](ui)) diff --git a/codex/stores/repostore/operations.nim b/codex/stores/repostore/operations.nim index e000bb0a..dcacbd62 100644 --- a/codex/stores/repostore/operations.nim +++ b/codex/stores/repostore/operations.nim @@ -32,12 +32,17 @@ declareGauge(codex_repostore_blocks, "codex repostore blocks") declareGauge(codex_repostore_bytes_used, "codex repostore bytes used") declareGauge(codex_repostore_bytes_reserved, "codex repostore bytes reserved") -proc putLeafMetadata*(self: RepoStore, treeCid: Cid, index: Natural, blkCid: Cid, proof: CodexProof): Future[?!StoreResultKind] {.async.} = +proc putLeafMetadata*( + self: RepoStore, treeCid: Cid, index: Natural, blkCid: Cid, proof: CodexProof +): Future[?!StoreResultKind] {.async.} = without key =? createBlockCidAndProofMetadataKey(treeCid, index), err: return failure(err) - await self.metaDs.modifyGet(key, - proc (maybeCurrMd: ?LeafMetadata): Future[(?LeafMetadata, StoreResultKind)] {.async.} = + await self.metaDs.modifyGet( + key, + proc( + maybeCurrMd: ?LeafMetadata + ): Future[(?LeafMetadata, StoreResultKind)] {.async.} = var md: LeafMetadata res: StoreResultKind @@ -49,10 +54,12 @@ proc putLeafMetadata*(self: RepoStore, treeCid: Cid, index: Natural, blkCid: Cid md = LeafMetadata(blkCid: blkCid, proof: proof) res = Stored - (md.some, res) + (md.some, res), ) -proc getLeafMetadata*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!LeafMetadata] {.async.} = +proc getLeafMetadata*( + self: RepoStore, treeCid: Cid, index: Natural +): Future[?!LeafMetadata] {.async.} = without key =? createBlockCidAndProofMetadataKey(treeCid, index), err: return failure(err) @@ -64,9 +71,12 @@ proc getLeafMetadata*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!L success(leafMd) -proc updateTotalBlocksCount*(self: RepoStore, plusCount: Natural = 0, minusCount: Natural = 0): Future[?!void] {.async.} = - await self.metaDs.modify(CodexTotalBlocksKey, - proc (maybeCurrCount: ?Natural): Future[?Natural] {.async.} = +proc updateTotalBlocksCount*( + self: RepoStore, plusCount: Natural = 0, minusCount: Natural = 0 +): Future[?!void] {.async.} = + await self.metaDs.modify( + CodexTotalBlocksKey, + proc(maybeCurrCount: ?Natural): Future[?Natural] {.async.} = let count: Natural = if currCount =? maybeCurrCount: currCount + plusCount - minusCount @@ -75,42 +85,49 @@ proc updateTotalBlocksCount*(self: RepoStore, plusCount: Natural = 0, minusCount self.totalBlocks = count codex_repostore_blocks.set(count.int64) - count.some + count.some, ) proc updateQuotaUsage*( - self: RepoStore, - plusUsed: NBytes = 0.NBytes, - minusUsed: NBytes = 0.NBytes, - plusReserved: NBytes = 0.NBytes, - minusReserved: NBytes = 0.NBytes + self: RepoStore, + plusUsed: NBytes = 0.NBytes, + minusUsed: NBytes = 0.NBytes, + plusReserved: NBytes = 0.NBytes, + minusReserved: NBytes = 0.NBytes, ): Future[?!void] {.async.} = - await self.metaDs.modify(QuotaUsedKey, - proc (maybeCurrUsage: ?QuotaUsage): Future[?QuotaUsage] {.async.} = + await self.metaDs.modify( + QuotaUsedKey, + proc(maybeCurrUsage: ?QuotaUsage): Future[?QuotaUsage] {.async.} = var usage: QuotaUsage if currUsage =? maybeCurrUsage: - usage = QuotaUsage(used: currUsage.used + plusUsed - minusUsed, reserved: currUsage.reserved + plusReserved - minusReserved) + usage = QuotaUsage( + used: currUsage.used + plusUsed - minusUsed, + reserved: currUsage.reserved + plusReserved - minusReserved, + ) else: - usage = QuotaUsage(used: plusUsed - minusUsed, reserved: plusReserved - minusReserved) + usage = + QuotaUsage(used: plusUsed - minusUsed, reserved: plusReserved - minusReserved) if usage.used + usage.reserved > self.quotaMaxBytes: - raise newException(QuotaNotEnoughError, + raise newException( + QuotaNotEnoughError, "Quota usage would exceed the limit. Used: " & $usage.used & ", reserved: " & - $usage.reserved & ", limit: " & $self.quotaMaxBytes) + $usage.reserved & ", limit: " & $self.quotaMaxBytes, + ) else: self.quotaUsage = usage codex_repostore_bytes_used.set(usage.used.int64) codex_repostore_bytes_reserved.set(usage.reserved.int64) - return usage.some + return usage.some, ) proc updateBlockMetadata*( - self: RepoStore, - cid: Cid, - plusRefCount: Natural = 0, - minusRefCount: Natural = 0, - minExpiry: SecondsSince1970 = 0 + self: RepoStore, + cid: Cid, + plusRefCount: Natural = 0, + minusRefCount: Natural = 0, + minExpiry: SecondsSince1970 = 0, ): Future[?!void] {.async.} = if cid.isEmpty: return success() @@ -118,19 +135,24 @@ proc updateBlockMetadata*( without metaKey =? createBlockExpirationMetadataKey(cid), err: return failure(err) - await self.metaDs.modify(metaKey, - proc (maybeCurrBlockMd: ?BlockMetadata): Future[?BlockMetadata] {.async.} = + await self.metaDs.modify( + metaKey, + proc(maybeCurrBlockMd: ?BlockMetadata): Future[?BlockMetadata] {.async.} = if currBlockMd =? maybeCurrBlockMd: BlockMetadata( size: currBlockMd.size, expiry: max(currBlockMd.expiry, minExpiry), - refCount: currBlockMd.refCount + plusRefCount - minusRefCount + refCount: currBlockMd.refCount + plusRefCount - minusRefCount, ).some else: - raise newException(BlockNotFoundError, "Metadata for block with cid " & $cid & " not found") + raise newException( + BlockNotFoundError, "Metadata for block with cid " & $cid & " not found" + ), ) -proc storeBlock*(self: RepoStore, blk: Block, minExpiry: SecondsSince1970): Future[?!StoreResult] {.async.} = +proc storeBlock*( + self: RepoStore, blk: Block, minExpiry: SecondsSince1970 +): Future[?!StoreResult] {.async.} = if blk.isEmpty: return success(StoreResult(kind: AlreadyInStore)) @@ -140,15 +162,20 @@ proc storeBlock*(self: RepoStore, blk: Block, minExpiry: SecondsSince1970): Futu without blkKey =? makePrefixKey(self.postFixLen, blk.cid), err: return failure(err) - await self.metaDs.modifyGet(metaKey, - proc (maybeCurrMd: ?BlockMetadata): Future[(?BlockMetadata, StoreResult)] {.async.} = + await self.metaDs.modifyGet( + metaKey, + proc(maybeCurrMd: ?BlockMetadata): Future[(?BlockMetadata, StoreResult)] {.async.} = var md: BlockMetadata res: StoreResult if currMd =? maybeCurrMd: if currMd.size == blk.data.len.NBytes: - md = BlockMetadata(size: currMd.size, expiry: max(currMd.expiry, minExpiry), refCount: currMd.refCount) + md = BlockMetadata( + size: currMd.size, + expiry: max(currMd.expiry, minExpiry), + refCount: currMd.refCount, + ) res = StoreResult(kind: AlreadyInStore) # making sure that the block acutally is stored in the repoDs @@ -156,21 +183,28 @@ proc storeBlock*(self: RepoStore, blk: Block, minExpiry: SecondsSince1970): Futu raise err if not hasBlock: - warn "Block metadata is present, but block is absent. Restoring block.", cid = blk.cid + warn "Block metadata is present, but block is absent. Restoring block.", + cid = blk.cid if err =? (await self.repoDs.put(blkKey, blk.data)).errorOption: raise err else: - raise newException(CatchableError, "Repo already stores a block with the same cid but with a different size, cid: " & $blk.cid) + raise newException( + CatchableError, + "Repo already stores a block with the same cid but with a different size, cid: " & + $blk.cid, + ) else: md = BlockMetadata(size: blk.data.len.NBytes, expiry: minExpiry, refCount: 0) res = StoreResult(kind: Stored, used: blk.data.len.NBytes) if err =? (await self.repoDs.put(blkKey, blk.data)).errorOption: raise err - (md.some, res) + (md.some, res), ) -proc tryDeleteBlock*(self: RepoStore, cid: Cid, expiryLimit = SecondsSince1970.low): Future[?!DeleteResult] {.async.} = +proc tryDeleteBlock*( + self: RepoStore, cid: Cid, expiryLimit = SecondsSince1970.low +): Future[?!DeleteResult] {.async.} = if cid.isEmpty: return success(DeleteResult(kind: InUse)) @@ -180,8 +214,11 @@ proc tryDeleteBlock*(self: RepoStore, cid: Cid, expiryLimit = SecondsSince1970.l without blkKey =? makePrefixKey(self.postFixLen, cid), err: return failure(err) - await self.metaDs.modifyGet(metaKey, - proc (maybeCurrMd: ?BlockMetadata): Future[(?BlockMetadata, DeleteResult)] {.async.} = + await self.metaDs.modifyGet( + metaKey, + proc( + maybeCurrMd: ?BlockMetadata + ): Future[(?BlockMetadata, DeleteResult)] {.async.} = var maybeMeta: ?BlockMetadata res: DeleteResult @@ -209,5 +246,5 @@ proc tryDeleteBlock*(self: RepoStore, cid: Cid, expiryLimit = SecondsSince1970.l if err =? (await self.repoDs.delete(blkKey)).errorOption: raise err - (maybeMeta, res) + (maybeMeta, res), ) diff --git a/codex/stores/repostore/store.nim b/codex/stores/repostore/store.nim index b11970bd..c32eb805 100644 --- a/codex/stores/repostore/store.nim +++ b/codex/stores/repostore/store.nim @@ -81,7 +81,9 @@ method getBlock*(self: RepoStore, cid: Cid): Future[?!Block] {.async.} = return Block.new(cid, data, verify = true) -method getBlockAndProof*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!(Block, CodexProof)] {.async.} = +method getBlockAndProof*( + self: RepoStore, treeCid: Cid, index: Natural +): Future[?!(Block, CodexProof)] {.async.} = without leafMd =? await self.getLeafMetadata(treeCid, index), err: return failure(err) @@ -90,7 +92,9 @@ method getBlockAndProof*(self: RepoStore, treeCid: Cid, index: Natural): Future[ success((blk, leafMd.proof)) -method getBlock*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!Block] {.async.} = +method getBlock*( + self: RepoStore, treeCid: Cid, index: Natural +): Future[?!Block] {.async.} = without leafMd =? await self.getLeafMetadata(treeCid, index), err: return failure(err) @@ -106,24 +110,20 @@ method getBlock*(self: RepoStore, address: BlockAddress): Future[?!Block] = self.getBlock(address.cid) method ensureExpiry*( - self: RepoStore, - cid: Cid, - expiry: SecondsSince1970 + self: RepoStore, cid: Cid, expiry: SecondsSince1970 ): Future[?!void] {.async.} = ## Ensure that block's associated expiry is at least given timestamp ## If the current expiry is lower then it is updated to the given one, otherwise it is left intact ## if expiry <= 0: - return failure(newException(ValueError, "Expiry timestamp must be larger then zero")) + return + failure(newException(ValueError, "Expiry timestamp must be larger then zero")) await self.updateBlockMetadata(cid, minExpiry = expiry) method ensureExpiry*( - self: RepoStore, - treeCid: Cid, - index: Natural, - expiry: SecondsSince1970 + self: RepoStore, treeCid: Cid, index: Natural, expiry: SecondsSince1970 ): Future[?!void] {.async.} = ## Ensure that block's associated expiry is at least given timestamp ## If the current expiry is lower then it is updated to the given one, otherwise it is left intact @@ -135,11 +135,7 @@ method ensureExpiry*( await self.ensureExpiry(leafMd.blkCid, expiry) method putCidAndProof*( - self: RepoStore, - treeCid: Cid, - index: Natural, - blkCid: Cid, - proof: CodexProof + self: RepoStore, treeCid: Cid, index: Natural, blkCid: Cid, proof: CodexProof ): Future[?!void] {.async.} = ## Put a block to the blockstore ## @@ -165,29 +161,22 @@ method putCidAndProof*( return success() method getCidAndProof*( - self: RepoStore, - treeCid: Cid, - index: Natural + self: RepoStore, treeCid: Cid, index: Natural ): Future[?!(Cid, CodexProof)] {.async.} = without leafMd =? await self.getLeafMetadata(treeCid, index), err: return failure(err) success((leafMd.blkCid, leafMd.proof)) -method getCid*( - self: RepoStore, - treeCid: Cid, - index: Natural -): Future[?!Cid] {.async.} = +method getCid*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!Cid] {.async.} = without leafMd =? await self.getLeafMetadata(treeCid, index), err: return failure(err) success(leafMd.blkCid) method putBlock*( - self: RepoStore, - blk: Block, - ttl = Duration.none): Future[?!void] {.async.} = + self: RepoStore, blk: Block, ttl = Duration.none +): Future[?!void] {.async.} = ## Put a block to the blockstore ## @@ -251,14 +240,17 @@ method delBlock*(self: RepoStore, cid: Cid): Future[?!void] {.async.} = return success() -method delBlock*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!void] {.async.} = +method delBlock*( + self: RepoStore, treeCid: Cid, index: Natural +): Future[?!void] {.async.} = without leafMd =? await self.getLeafMetadata(treeCid, index), err: if err of BlockNotFoundError: return success() else: return failure(err) - if err =? (await self.updateBlockMetadata(leafMd.blkCid, minusRefCount = 1)).errorOption: + if err =? + (await self.updateBlockMetadata(leafMd.blkCid, minusRefCount = 1)).errorOption: if not (err of BlockNotFoundError): return failure(err) @@ -281,7 +273,9 @@ method hasBlock*(self: RepoStore, cid: Cid): Future[?!bool] {.async.} = return await self.repoDs.has(key) -method hasBlock*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!bool] {.async.} = +method hasBlock*( + self: RepoStore, treeCid: Cid, index: Natural +): Future[?!bool] {.async.} = without leafMd =? await self.getLeafMetadata(treeCid, index), err: if err of BlockNotFoundError: return success(false) @@ -291,23 +285,21 @@ method hasBlock*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!bool] await self.hasBlock(leafMd.blkCid) method listBlocks*( - self: RepoStore, - blockType = BlockType.Manifest + self: RepoStore, blockType = BlockType.Manifest ): Future[?!AsyncIter[?Cid]] {.async.} = ## Get the list of blocks in the RepoStore. ## This is an intensive operation ## - var - iter = AsyncIter[?Cid]() + var iter = AsyncIter[?Cid]() let key = - case blockType: + case blockType of BlockType.Manifest: CodexManifestKey of BlockType.Block: CodexBlocksKey of BlockType.Both: CodexRepoKey - let query = Query.init(key, value=false) + let query = Query.init(key, value = false) without queryIter =? (await self.repoDs.query(query)), err: trace "Error querying cids in repo", blockType, err = err.msg return failure(err) @@ -328,13 +320,12 @@ method listBlocks*( return success iter proc createBlockExpirationQuery(maxNumber: int, offset: int): ?!Query = - let queryKey = ? createBlockExpirationMetadataQueryKey() + let queryKey = ?createBlockExpirationMetadataQueryKey() success Query.init(queryKey, offset = offset, limit = maxNumber) method getBlockExpirations*( - self: RepoStore, - maxNumber: int, - offset: int): Future[?!AsyncIter[BlockExpiration]] {.async, base.} = + self: RepoStore, maxNumber: int, offset: int +): Future[?!AsyncIter[BlockExpiration]] {.async, base.} = ## Get iterator with block expirations ## @@ -350,17 +341,18 @@ method getBlockExpirations*( error "Unable to convert QueryIter to AsyncIter", err = err.msg return failure(err) - let - filteredIter: AsyncIter[KeyVal[BlockMetadata]] = await asyncQueryIter.filterSuccess() + let filteredIter: AsyncIter[KeyVal[BlockMetadata]] = + await asyncQueryIter.filterSuccess() - proc mapping (kv: KeyVal[BlockMetadata]): Future[?BlockExpiration] {.async.} = + proc mapping(kv: KeyVal[BlockMetadata]): Future[?BlockExpiration] {.async.} = without cid =? Cid.init(kv.key.value).mapFailure, err: error "Failed decoding cid", err = err.msg return BlockExpiration.none BlockExpiration(cid: cid, expiry: kv.value.expiry).some - let blockExpIter = await mapFilter[KeyVal[BlockMetadata], BlockExpiration](filteredIter, mapping) + let blockExpIter = + await mapFilter[KeyVal[BlockMetadata], BlockExpiration](filteredIter, mapping) success(blockExpIter) diff --git a/codex/stores/repostore/types.nim b/codex/stores/repostore/types.nim index 2f88183d..3d455d12 100644 --- a/codex/stores/repostore/types.nim +++ b/codex/stores/repostore/types.nim @@ -56,17 +56,17 @@ type expiry*: SecondsSince1970 DeleteResultKind* {.serialize.} = enum - Deleted = 0, # block removed from store - InUse = 1, # block not removed, refCount > 0 and not expired - NotFound = 2 # block not found in store + Deleted = 0 # block removed from store + InUse = 1 # block not removed, refCount > 0 and not expired + NotFound = 2 # block not found in store DeleteResult* {.serialize.} = object kind*: DeleteResultKind released*: NBytes StoreResultKind* {.serialize.} = enum - Stored = 0, # new block stored - AlreadyInStore = 1 # block already in store + Stored = 0 # new block stored + AlreadyInStore = 1 # block already in store StoreResult* {.serialize.} = object kind*: StoreResultKind @@ -94,7 +94,7 @@ func new*( clock: Clock = SystemClock.new(), postFixLen = 2, quotaMaxBytes = DefaultQuotaBytes, - blockTtl = DefaultBlockTtl + blockTtl = DefaultBlockTtl, ): RepoStore = ## Create new instance of a RepoStore ## @@ -105,5 +105,5 @@ func new*( postFixLen: postFixLen, quotaMaxBytes: quotaMaxBytes, blockTtl: blockTtl, - onBlockStored: CidCallback.none + onBlockStored: CidCallback.none, ) diff --git a/codex/stores/treehelper.nim b/codex/stores/treehelper.nim index 485cbfc2..e1f5d48d 100644 --- a/codex/stores/treehelper.nim +++ b/codex/stores/treehelper.nim @@ -9,7 +9,8 @@ import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import std/sugar import pkg/chronos @@ -22,13 +23,18 @@ import ./blockstore import ../utils/asynciter import ../merkletree -proc putSomeProofs*(store: BlockStore, tree: CodexTree, iter: Iter[int]): Future[?!void] {.async.} = +proc putSomeProofs*( + store: BlockStore, tree: CodexTree, iter: Iter[int] +): Future[?!void] {.async.} = without treeCid =? tree.rootCid, err: return failure(err) for i in iter: - if i notin 0.. i.ord)) proc putAllProofs*(store: BlockStore, tree: CodexTree): Future[?!void] = - store.putSomeProofs(tree, Iter[int].new(0..= self.size type LPStreamReadError* = object of LPStreamError - par*: ref CatchableError + par*: ref CatchableError proc newLPStreamReadError*(p: ref CatchableError): ref LPStreamReadError = var w = newException(LPStreamReadError, "Read stream failed") @@ -83,9 +76,7 @@ proc newLPStreamReadError*(p: ref CatchableError): ref LPStreamReadError = result = w method readOnce*( - self: StoreStream, - pbytes: pointer, - nbytes: int + self: StoreStream, pbytes: pointer, nbytes: int ): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} = ## Read `nbytes` from current position in the StoreStream into output buffer pointed by `pbytes`. ## Return how many bytes were actually read before EOF was encountered. @@ -97,24 +88,34 @@ method readOnce*( # The loop iterates over blocks in the StoreStream, # reading them and copying their data into outbuf - var read = 0 # Bytes read so far, and thus write offset in the outbuf + var read = 0 # Bytes read so far, and thus write offset in the outbuf while read < nbytes and not self.atEof: # Compute from the current stream position `self.offset` the block num/offset to read # Compute how many bytes to read from this block let - blockNum = self.offset div self.manifest.blockSize.int + blockNum = self.offset div self.manifest.blockSize.int blockOffset = self.offset mod self.manifest.blockSize.int - readBytes = min([self.size - self.offset, - nbytes - read, - self.manifest.blockSize.int - blockOffset]) - address = BlockAddress(leaf: true, treeCid: self.manifest.treeCid, index: blockNum) - + readBytes = min( + [ + self.size - self.offset, + nbytes - read, + self.manifest.blockSize.int - blockOffset, + ] + ) + address = + BlockAddress(leaf: true, treeCid: self.manifest.treeCid, index: blockNum) # Read contents of block `blockNum` without blk =? (await self.store.getBlock(address)).tryGet.catch, error: raise newLPStreamReadError(error) - trace "Reading bytes from store stream", manifestCid = self.manifest.cid.get(), numBlocks = self.manifest.blocksCount, blockNum, blkCid = blk.cid, bytes = readBytes, blockOffset + trace "Reading bytes from store stream", + manifestCid = self.manifest.cid.get(), + numBlocks = self.manifest.blocksCount, + blockNum, + blkCid = blk.cid, + bytes = readBytes, + blockOffset # Copy `readBytes` bytes starting at `blockOffset` from the block into the outbuf if blk.isEmpty: @@ -130,5 +131,5 @@ method readOnce*( method closeImpl*(self: StoreStream) {.async.} = trace "Closing StoreStream" - self.offset = self.size # set Eof + self.offset = self.size # set Eof await procCall LPStream(self).closeImpl() diff --git a/codex/systemclock.nim b/codex/systemclock.nim index 25ac4216..6226f627 100644 --- a/codex/systemclock.nim +++ b/codex/systemclock.nim @@ -2,8 +2,7 @@ import std/times import pkg/upraises import ./clock -type - SystemClock* = ref object of Clock +type SystemClock* = ref object of Clock method now*(clock: SystemClock): SecondsSince1970 {.upraises: [].} = let now = times.now().utc diff --git a/codex/units.nim b/codex/units.nim index 73a67a2c..b600103f 100644 --- a/codex/units.nim +++ b/codex/units.nim @@ -13,52 +13,78 @@ import std/strutils import ./logutils -type - NBytes* = distinct Natural +type NBytes* = distinct Natural template basicMaths(T: untyped) = - proc `+` *(x: T, y: static[int]): T = T(`+`(x.Natural, y.Natural)) - proc `-` *(x: T, y: static[int]): T = T(`-`(x.Natural, y.Natural)) - proc `*` *(x: T, y: static[int]): T = T(`*`(x.Natural, y.Natural)) - proc `+` *(x, y: T): T = T(`+`(x.Natural, y.Natural)) - proc `-` *(x, y: T): T = T(`-`(x.Natural, y.Natural)) - proc `*` *(x, y: T): T = T(`*`(x.Natural, y.Natural)) - proc `<` *(x, y: T): bool {.borrow.} - proc `<=` *(x, y: T): bool {.borrow.} - proc `==` *(x, y: T): bool {.borrow.} - proc `+=` *(x: var T, y: T) {.borrow.} - proc `-=` *(x: var T, y: T) {.borrow.} - proc `hash` *(x: T): Hash {.borrow.} + proc `+`*(x: T, y: static[int]): T = + T(`+`(x.Natural, y.Natural)) + + proc `-`*(x: T, y: static[int]): T = + T(`-`(x.Natural, y.Natural)) + + proc `*`*(x: T, y: static[int]): T = + T(`*`(x.Natural, y.Natural)) + + proc `+`*(x, y: T): T = + T(`+`(x.Natural, y.Natural)) + + proc `-`*(x, y: T): T = + T(`-`(x.Natural, y.Natural)) + + proc `*`*(x, y: T): T = + T(`*`(x.Natural, y.Natural)) + + proc `<`*(x, y: T): bool {.borrow.} + proc `<=`*(x, y: T): bool {.borrow.} + proc `==`*(x, y: T): bool {.borrow.} + proc `+=`*(x: var T, y: T) {.borrow.} + proc `-=`*(x: var T, y: T) {.borrow.} + proc `hash`*(x: T): Hash {.borrow.} template divMaths(T: untyped) = - proc `mod` *(x, y: T): T = T(`mod`(x.Natural, y.Natural)) - proc `div` *(x, y: T): Natural = `div`(x.Natural, y.Natural) + proc `mod`*(x, y: T): T = + T(`mod`(x.Natural, y.Natural)) + + proc `div`*(x, y: T): Natural = + `div`(x.Natural, y.Natural) + # proc `/` *(x, y: T): Natural = `/`(x.Natural, y.Natural) basicMaths(NBytes) divMaths(NBytes) -proc `$`*(ts: NBytes): string = $(int(ts)) & "'NByte" -proc `'nb`*(n: string): NBytes = parseInt(n).NBytes +proc `$`*(ts: NBytes): string = + $(int(ts)) & "'NByte" -logutils.formatIt(NBytes): $it +proc `'nb`*(n: string): NBytes = + parseInt(n).NBytes + +logutils.formatIt(NBytes): + $it const KiB = 1024.NBytes # ByteSz, 1 kibibyte = 1,024 ByteSz - MiB = KiB * 1024 # ByteSz, 1 mebibyte = 1,048,576 ByteSz - GiB = MiB * 1024 # ByteSz, 1 gibibyte = 1,073,741,824 ByteSz + MiB = KiB * 1024 # ByteSz, 1 mebibyte = 1,048,576 ByteSz + GiB = MiB * 1024 # ByteSz, 1 gibibyte = 1,073,741,824 ByteSz -proc KiBs*(v: Natural): NBytes = v.NBytes * KiB -proc MiBs*(v: Natural): NBytes = v.NBytes * MiB -proc GiBs*(v: Natural): NBytes = v.NBytes * GiB +proc KiBs*(v: Natural): NBytes = + v.NBytes * KiB -func divUp*[T: NBytes](a, b : T): int = +proc MiBs*(v: Natural): NBytes = + v.NBytes * MiB + +proc GiBs*(v: Natural): NBytes = + v.NBytes * GiB + +func divUp*[T: NBytes](a, b: T): int = ## Division with result rounded up (rather than truncated as in 'div') assert(b != T(0)) - if a==T(0): int(0) else: int( ((a - T(1)) div b) + 1 ) + if a == T(0): + int(0) + else: + int(((a - T(1)) div b) + 1) when isMainModule: - import unittest2 suite "maths": diff --git a/codex/utils.nim b/codex/utils.nim index 617f30cb..30d84e74 100644 --- a/codex/utils.nim +++ b/codex/utils.nim @@ -23,20 +23,20 @@ export asyncheapqueue, fileutils, asynciter, chronos when defined(posix): import os, posix -func divUp*[T: SomeInteger](a, b : T): T = +func divUp*[T: SomeInteger](a, b: T): T = ## Division with result rounded up (rather than truncated as in 'div') assert(b != T(0)) - if a==T(0): T(0) else: ((a - T(1)) div b) + T(1) + if a == T(0): + T(0) + else: + ((a - T(1)) div b) + T(1) -func roundUp*[T](a, b : T): T = +func roundUp*[T](a, b: T): T = ## Round up 'a' to the next value divisible by 'b' - divUp(a,b) * b + divUp(a, b) * b proc orElse*[A](a, b: Option[A]): Option[A] = - if (a.isSome()): - a - else: - b + if (a.isSome()): a else: b template findIt*(s, pred: untyped): untyped = ## Returns the index of the first object matching a predicate, or -1 if no @@ -57,45 +57,48 @@ template findIt*(s, pred: untyped): untyped = index when not declared(parseDuration): # Odd code formatting to minimize diff v. mainLine - const Whitespace = {' ', '\t', '\v', '\r', '\l', '\f'} + const Whitespace = {' ', '\t', '\v', '\r', '\l', '\f'} - func toLowerAscii(c: char): char = - if c in {'A'..'Z'}: char(uint8(c) xor 0b0010_0000'u8) else: c + func toLowerAscii(c: char): char = + if c in {'A' .. 'Z'}: + char(uint8(c) xor 0b0010_0000'u8) + else: + c - func parseDuration*(s: string, size: var Duration): int = - ## Parse a size qualified by simple time into `Duration`. - ## - runnableExamples: - var res: Duration # caller must still know if 'b' refers to bytes|bits - doAssert parseDuration("10H", res) == 3 - doAssert res == initDuration(hours=10) - doAssert parseDuration("64m", res) == 6 - doAssert res == initDuration(minutes=64) - doAssert parseDuration("7m/block", res) == 2 # '/' stops parse - doAssert res == initDuration(minutes=7) # 1 shl 30, forced binary metric - doAssert parseDuration("3d", res) == 2 # '/' stops parse - doAssert res == initDuration(days=3) # 1 shl 30, forced binary metric + func parseDuration*(s: string, size: var Duration): int = + ## Parse a size qualified by simple time into `Duration`. + ## + runnableExamples: + var res: Duration # caller must still know if 'b' refers to bytes|bits + doAssert parseDuration("10H", res) == 3 + doAssert res == initDuration(hours = 10) + doAssert parseDuration("64m", res) == 6 + doAssert res == initDuration(minutes = 64) + doAssert parseDuration("7m/block", res) == 2 # '/' stops parse + doAssert res == initDuration(minutes = 7) # 1 shl 30, forced binary metric + doAssert parseDuration("3d", res) == 2 # '/' stops parse + doAssert res == initDuration(days = 3) # 1 shl 30, forced binary metric - const prefix = "s" & "mhdw" # byte|bit & lowCase metric-ish prefixes - const timeScale = [1.0, 60.0, 3600.0, 86_400.0, 604_800.0] + const prefix = "s" & "mhdw" # byte|bit & lowCase metric-ish prefixes + const timeScale = [1.0, 60.0, 3600.0, 86_400.0, 604_800.0] - var number: float - var scale = 1.0 - result = parseFloat(s, number) - if number < 0: # While parseFloat accepts negatives .. - result = 0 #.. we do not since sizes cannot be < 0 - else: - let start = result # Save spot to maybe unwind white to EOS - while result < s.len and s[result] in Whitespace: - inc result - if result < s.len: # Illegal starting char => unity - if (let si = prefix.find(s[result].toLowerAscii); si >= 0): - inc result # Now parse the scale - scale = timeScale[si] - else: # Unwind result advancement when there.. - result = start #..is no unit to the end of `s`. - var sizeF = number * scale + 0.5 # Saturate to int64.high when too big - size = seconds(int(sizeF)) + var number: float + var scale = 1.0 + result = parseFloat(s, number) + if number < 0: # While parseFloat accepts negatives .. + result = 0 #.. we do not since sizes cannot be < 0 + else: + let start = result # Save spot to maybe unwind white to EOS + while result < s.len and s[result] in Whitespace: + inc result + if result < s.len: # Illegal starting char => unity + if (let si = prefix.find(s[result].toLowerAscii); si >= 0): + inc result # Now parse the scale + scale = timeScale[si] + else: # Unwind result advancement when there.. + result = start #..is no unit to the end of `s`. + var sizeF = number * scale + 0.5 # Saturate to int64.high when too big + size = seconds(int(sizeF)) # Block all/most signals in the current thread, so we don't interfere with regular signal # handling elsewhere. @@ -115,12 +118,10 @@ proc ignoreSignalsInThread*() = SIGXCPU = 24 SIGSEGV = 11 SIGBUS = 7 - if sigdelset(signalMask, SIGPWR) != 0 or - sigdelset(signalMask, SIGXCPU) != 0 or - sigdelset(signalMask, SIGSEGV) != 0 or - sigdelset(signalMask, SIGBUS) != 0: + if sigdelset(signalMask, SIGPWR) != 0 or sigdelset(signalMask, SIGXCPU) != 0 or + sigdelset(signalMask, SIGSEGV) != 0 or sigdelset(signalMask, SIGBUS) != 0: echo osErrorMsg(osLastError()) quit(QuitFailure) if pthread_sigmask(SIG_BLOCK, signalMask, oldSignalMask) != 0: echo osErrorMsg(osLastError()) - quit(QuitFailure) \ No newline at end of file + quit(QuitFailure) diff --git a/codex/utils/addrutils.nim b/codex/utils/addrutils.nim index 3eec3015..a9ec54f5 100644 --- a/codex/utils/addrutils.nim +++ b/codex/utils/addrutils.nim @@ -8,7 +8,8 @@ ## those terms. import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import std/strutils import std/options @@ -20,26 +21,26 @@ import pkg/stew/endians2 func remapAddr*( address: MultiAddress, ip: Option[IpAddress] = IpAddress.none, - port: Option[Port] = Port.none + port: Option[Port] = Port.none, ): MultiAddress = ## Remap addresses to new IP and/or Port ## - var - parts = ($address).split("/") + var parts = ($address).split("/") - parts[2] = if ip.isSome: + parts[2] = + if ip.isSome: $ip.get else: parts[2] - parts[4] = if port.isSome: + parts[4] = + if port.isSome: $port.get else: parts[4] - MultiAddress.init(parts.join("/")) - .expect("Should construct multiaddress") + MultiAddress.init(parts.join("/")).expect("Should construct multiaddress") proc getMultiAddrWithIPAndUDPPort*(ip: IpAddress, port: Port): MultiAddress = ## Creates a MultiAddress with the specified IP address and UDP port @@ -50,43 +51,41 @@ proc getMultiAddrWithIPAndUDPPort*(ip: IpAddress, port: Port): MultiAddress = ## ## Returns: ## A MultiAddress in the format "/ip4/
/udp/" or "/ip6/
/udp/" - + let ipFamily = if ip.family == IpAddressFamily.IPv4: "/ip4/" else: "/ip6/" return MultiAddress.init(ipFamily & $ip & "/udp/" & $port).expect("valid multiaddr") -proc getAddressAndPort*(ma: MultiAddress): tuple[ip: Option[IpAddress], port: Option[Port]] = +proc getAddressAndPort*( + ma: MultiAddress +): tuple[ip: Option[IpAddress], port: Option[Port]] = try: # Try IPv4 first let ipv4Result = ma[multiCodec("ip4")] - let ip = if ipv4Result.isOk: - let ipBytes = ipv4Result.get() - .protoArgument() - .expect("Invalid IPv4 format") - let ipArray = [ipBytes[0], ipBytes[1], ipBytes[2], ipBytes[3]] - some(IpAddress(family: IPv4, address_v4: ipArray)) - else: - # Try IPv6 if IPv4 not found - let ipv6Result = ma[multiCodec("ip6")] - if ipv6Result.isOk: - let ipBytes = ipv6Result.get() - .protoArgument() - .expect("Invalid IPv6 format") - var ipArray: array[16, byte] - for i in 0..15: - ipArray[i] = ipBytes[i] - some(IpAddress(family: IPv6, address_v6: ipArray)) + let ip = + if ipv4Result.isOk: + let ipBytes = ipv4Result.get().protoArgument().expect("Invalid IPv4 format") + let ipArray = [ipBytes[0], ipBytes[1], ipBytes[2], ipBytes[3]] + some(IpAddress(family: IPv4, address_v4: ipArray)) else: - none(IpAddress) + # Try IPv6 if IPv4 not found + let ipv6Result = ma[multiCodec("ip6")] + if ipv6Result.isOk: + let ipBytes = ipv6Result.get().protoArgument().expect("Invalid IPv6 format") + var ipArray: array[16, byte] + for i in 0 .. 15: + ipArray[i] = ipBytes[i] + some(IpAddress(family: IPv6, address_v6: ipArray)) + else: + none(IpAddress) # Get TCP Port let portResult = ma[multiCodec("tcp")] - let port = if portResult.isOk: - let portBytes = portResult.get() - .protoArgument() - .expect("Invalid port format") - some(Port(fromBytesBE(uint16, portBytes))) - else: - none(Port) + let port = + if portResult.isOk: + let portBytes = portResult.get().protoArgument().expect("Invalid port format") + some(Port(fromBytesBE(uint16, portBytes))) + else: + none(Port) (ip: ip, port: port) except Exception: (ip: none(IpAddress), port: none(Port)) diff --git a/codex/utils/asyncheapqueue.nim b/codex/utils/asyncheapqueue.nim index e7d7edad..1b0dd8bc 100644 --- a/codex/utils/asyncheapqueue.nim +++ b/codex/utils/asyncheapqueue.nim @@ -15,7 +15,8 @@ import pkg/stew/results type QueueType* {.pure.} = enum - Min, Max + Min + Max AsyncHeapQueue*[T] = ref object of RootRef ## A priority queue @@ -31,11 +32,11 @@ type maxsize: int AsyncHQErrors* {.pure.} = enum - Empty, Full + Empty + Full proc newAsyncHeapQueue*[T]( - maxsize: int = 0, - queueType: QueueType = QueueType.Min + maxsize: int = 0, queueType: QueueType = QueueType.Min ): AsyncHeapQueue[T] = ## Creates a new asynchronous queue ``AsyncHeapQueue``. ## @@ -54,12 +55,12 @@ proc wakeupNext(waiters: var seq[Future[void]]) {.inline.} = var waiter = waiters[i] inc(i) - if not(waiter.finished()): + if not (waiter.finished()): waiter.complete() break if i > 0: - waiters.delete(0..(i-1)) + waiters.delete(0 .. (i - 1)) proc heapCmp[T](x, y: T, max: bool = false): bool {.inline.} = if max: @@ -93,17 +94,17 @@ proc siftup[T](heap: AsyncHeapQueue[T], p: int) = let startpos = pos let newitem = heap[pos] # Bubble up the smaller child until hitting a leaf. - var childpos = 2*pos + 1 # leftmost child position + var childpos = 2 * pos + 1 # leftmost child position while childpos < endpos: # Set childpos to index of smaller child. let rightpos = childpos + 1 if rightpos < endpos and - not heapCmp(heap[childpos], heap[rightpos], heap.queueType == QueueType.Max): + not heapCmp(heap[childpos], heap[rightpos], heap.queueType == QueueType.Max): childpos = rightpos # Move the smaller child up. heap.queue[pos] = heap[childpos] pos = childpos - childpos = 2*pos + 1 + childpos = 2 * pos + 1 # The leaf at pos is empty now. Put newitem there, and bubble it up # to its final resting place (by sifting its parents down). heap.queue[pos] = newitem @@ -131,7 +132,7 @@ proc pushNoWait*[T](heap: AsyncHeapQueue[T], item: T): Result[void, AsyncHQError return err(AsyncHQErrors.Full) heap.queue.add(item) - siftdown(heap, 0, len(heap)-1) + siftdown(heap, 0, len(heap) - 1) heap.getters.wakeupNext() return ok() @@ -147,7 +148,7 @@ proc push*[T](heap: AsyncHeapQueue[T], item: T) {.async, gcsafe.} = try: await putter except CatchableError as exc: - if not(heap.full()) and not(putter.cancelled()): + if not (heap.full()) and not (putter.cancelled()): heap.putters.wakeupNext() raise exc @@ -180,7 +181,7 @@ proc pop*[T](heap: AsyncHeapQueue[T]): Future[T] {.async.} = try: await getter except CatchableError as exc: - if not(heap.empty()) and not(getter.cancelled()): + if not (heap.empty()) and not (getter.cancelled()): heap.getters.wakeupNext() raise exc @@ -225,7 +226,9 @@ proc update*[T](heap: AsyncHeapQueue[T], item: T): bool = heap.siftup(0) return true -proc pushOrUpdateNoWait*[T](heap: AsyncHeapQueue[T], item: T): Result[void, AsyncHQErrors] = +proc pushOrUpdateNoWait*[T]( + heap: AsyncHeapQueue[T], item: T +): Result[void, AsyncHQErrors] = ## Update an item if it exists or push a new one ## @@ -285,12 +288,12 @@ proc size*[T](heap: AsyncHeapQueue[T]): int {.inline.} = ## Return the maximum number of elements in ``heap``. heap.maxsize -proc `[]`*[T](heap: AsyncHeapQueue[T], i: Natural) : T {.inline.} = +proc `[]`*[T](heap: AsyncHeapQueue[T], i: Natural): T {.inline.} = ## Access the i-th element of ``heap`` by order from first to last. ## ``heap[0]`` is the first element, ``heap[^1]`` is the last element. heap.queue[i] -proc `[]`*[T](heap: AsyncHeapQueue[T], i: BackwardsIndex) : T {.inline.} = +proc `[]`*[T](heap: AsyncHeapQueue[T], i: BackwardsIndex): T {.inline.} = ## Access the i-th element of ``heap`` by order from first to last. ## ``heap[0]`` is the first element, ``heap[^1]`` is the last element. heap.queue[len(heap.queue) - int(i)] @@ -314,14 +317,16 @@ proc contains*[T](heap: AsyncHeapQueue[T], item: T): bool {.inline.} = ## Return true if ``item`` is in ``heap`` or false if not found. Usually used ## via the ``in`` operator. for e in heap.queue.items(): - if e == item: return true + if e == item: + return true return false proc `$`*[T](heap: AsyncHeapQueue[T]): string = ## Turn an async queue ``heap`` into its string representation. var res = "[" for item in heap.queue.items(): - if len(res) > 1: res.add(", ") + if len(res) > 1: + res.add(", ") res.addQuoted(item) res.add("]") res diff --git a/codex/utils/asynciter.nim b/codex/utils/asynciter.nim index a1779552..b5371d24 100644 --- a/codex/utils/asynciter.nim +++ b/codex/utils/asynciter.nim @@ -10,10 +10,9 @@ export iter ## AsyncIter[T] is similar to `Iter[Future[T]]` with addition of methods specific to asynchronous processing ## -type - AsyncIter*[T] = ref object - finished: bool - next*: GenNext[Future[T]] +type AsyncIter*[T] = ref object + finished: bool + next*: GenNext[Future[T]] proc finish*[T](self: AsyncIter[T]): void = self.finished = true @@ -39,7 +38,12 @@ proc flatMap*[T, U](fut: Future[T], fn: Function[T, Future[U]]): Future[U] {.asy let t = await fut await fn(t) -proc new*[T](_: type AsyncIter[T], genNext: GenNext[Future[T]], isFinished: IsFinished, finishOnErr: bool = true): AsyncIter[T] = +proc new*[T]( + _: type AsyncIter[T], + genNext: GenNext[Future[T]], + isFinished: IsFinished, + finishOnErr: bool = true, +): AsyncIter[T] = ## Creates a new Iter using elements returned by supplier function `genNext`. ## Iter is finished whenever `isFinished` returns true. ## @@ -63,7 +67,9 @@ proc new*[T](_: type AsyncIter[T], genNext: GenNext[Future[T]], isFinished: IsFi iter.finish return item else: - raise newException(CatchableError, "AsyncIter is finished but next item was requested") + raise newException( + CatchableError, "AsyncIter is finished but next item was requested" + ) if isFinished(): iter.finish @@ -72,29 +78,30 @@ proc new*[T](_: type AsyncIter[T], genNext: GenNext[Future[T]], isFinished: IsFi return iter proc mapAsync*[T, U](iter: Iter[T], fn: Function[T, Future[U]]): AsyncIter[U] = - AsyncIter[U].new( - genNext = () => fn(iter.next()), - isFinished = () => iter.finished() - ) + AsyncIter[U].new(genNext = () => fn(iter.next()), isFinished = () => iter.finished()) proc new*[U, V: Ordinal](_: type AsyncIter[U], slice: HSlice[U, V]): AsyncIter[U] = ## Creates new Iter from a slice ## let iter = Iter[U].new(slice) - mapAsync[U, U](iter, - proc (i: U): Future[U] {.async.} = - i + mapAsync[U, U]( + iter, + proc(i: U): Future[U] {.async.} = + i, ) -proc new*[U, V, S: Ordinal](_: type AsyncIter[U], a: U, b: V, step: S = 1): AsyncIter[U] = +proc new*[U, V, S: Ordinal]( + _: type AsyncIter[U], a: U, b: V, step: S = 1 +): AsyncIter[U] = ## Creates new Iter in range a..b with specified step (default 1) ## let iter = Iter[U].new(a, b, step) - mapAsync[U, U](iter, - proc (i: U): Future[U] {.async.} = - i + mapAsync[U, U]( + iter, + proc(i: U): Future[U] {.async.} = + i, ) proc empty*[T](_: type AsyncIter[T]): AsyncIter[T] = @@ -103,17 +110,20 @@ proc empty*[T](_: type AsyncIter[T]): AsyncIter[T] = proc genNext(): Future[T] {.raises: [CatchableError].} = raise newException(CatchableError, "Next item requested from an empty AsyncIter") - proc isFinished(): bool = true + + proc isFinished(): bool = + true AsyncIter[T].new(genNext, isFinished) proc map*[T, U](iter: AsyncIter[T], fn: Function[T, Future[U]]): AsyncIter[U] = AsyncIter[U].new( - genNext = () => iter.next().flatMap(fn), - isFinished = () => iter.finished + genNext = () => iter.next().flatMap(fn), isFinished = () => iter.finished ) -proc mapFilter*[T, U](iter: AsyncIter[T], mapPredicate: Function[T, Future[Option[U]]]): Future[AsyncIter[U]] {.async.} = +proc mapFilter*[T, U]( + iter: AsyncIter[T], mapPredicate: Function[T, Future[Option[U]]] +): Future[AsyncIter[U]] {.async.} = var nextFutU: Option[Future[U]] proc tryFetch(): Future[void] {.async.} = @@ -145,7 +155,9 @@ proc mapFilter*[T, U](iter: AsyncIter[T], mapPredicate: Function[T, Future[Optio await tryFetch() AsyncIter[U].new(genNext, isFinished) -proc filter*[T](iter: AsyncIter[T], predicate: Function[T, Future[bool]]): Future[AsyncIter[T]] {.async.} = +proc filter*[T]( + iter: AsyncIter[T], predicate: Function[T, Future[bool]] +): Future[AsyncIter[T]] {.async.} = proc wrappedPredicate(t: T): Future[Option[T]] {.async.} = if await predicate(t): some(t) @@ -158,8 +170,9 @@ proc delayBy*[T](iter: AsyncIter[T], d: Duration): AsyncIter[T] = ## Delays emitting each item by given duration ## - map[T, T](iter, - proc (t: T): Future[T] {.async.} = + map[T, T]( + iter, + proc(t: T): Future[T] {.async.} = await sleepAsync(d) - t + t, ) diff --git a/codex/utils/asyncspawn.nim b/codex/utils/asyncspawn.nim index 6717e5e1..95a9f014 100644 --- a/codex/utils/asyncspawn.nim +++ b/codex/utils/asyncspawn.nim @@ -1,10 +1,10 @@ import pkg/chronos proc asyncSpawn*(future: Future[void], ignore: type CatchableError) = - proc ignoringError {.async.} = + proc ignoringError() {.async.} = try: await future except ignore: discard - asyncSpawn ignoringError() + asyncSpawn ignoringError() diff --git a/codex/utils/asyncstatemachine.nim b/codex/utils/asyncstatemachine.nim index 6bddc24e..572ae246 100644 --- a/codex/utils/asyncstatemachine.nim +++ b/codex/utils/asyncstatemachine.nim @@ -3,7 +3,7 @@ import pkg/chronos import ../logutils import ./trackedfutures -{.push raises:[].} +{.push raises: [].} type Machine* = ref object of RootObj @@ -12,9 +12,10 @@ type scheduled: AsyncQueue[Event] started: bool trackedFutures: TrackedFutures + State* = ref object of RootObj Query*[T] = proc(state: State): T - Event* = proc(state: State): ?State {.gcsafe, raises:[].} + Event* = proc(state: State): ?State {.gcsafe, raises: [].} logScope: topics = "statemachine" @@ -26,7 +27,7 @@ method `$`*(state: State): string {.base, gcsafe.} = raiseAssert "not implemented" proc transition(_: type Event, previous, next: State): Event = - return proc (state: State): ?State = + return proc(state: State): ?State = if state == previous: return some next @@ -52,10 +53,10 @@ method onError*(state: State, error: ref CatchableError): ?State {.base.} = raise (ref Defect)(msg: "error in state machine: " & error.msg, parent: error) proc onError(machine: Machine, error: ref CatchableError): Event = - return proc (state: State): ?State = + return proc(state: State): ?State = state.onError(error) -proc run(machine: Machine, state: State) {.async: (raises:[]).} = +proc run(machine: Machine, state: State) {.async: (raises: []).} = try: if next =? await state.run(machine): machine.schedule(Event.transition(state, next)) @@ -73,7 +74,11 @@ proc scheduler(machine: Machine) {.async: (raises: []).} = if not running.isNil and not running.finished: trace "cancelling current state", state = $machine.state await running.cancelAndWait() - let fromState = if machine.state.isNil: "" else: $machine.state + let fromState = + if machine.state.isNil: + "" + else: + $machine.state machine.state = next debug "enter state", state = fromState & " => " & $machine.state running = machine.run(machine.state) diff --git a/codex/utils/digest.nim b/codex/utils/digest.nim index 4b3e68bf..57c4cd4e 100644 --- a/codex/utils/digest.nim +++ b/codex/utils/digest.nim @@ -1,8 +1,7 @@ - from pkg/libp2p import MultiHash func digestBytes*(mhash: MultiHash): seq[byte] = ## Extract hash digestBytes ## - mhash.data.buffer[mhash.dpos.. 0 and i > b) or - (step < 0 and i < b) + (step > 0 and i > b) or (step < 0 and i < b) Iter[U].new(genNext, isFinished) @@ -83,8 +87,7 @@ proc new*[T](_: type Iter[T], items: seq[T]): Iter[T] = ## Creates a new Iter from a sequence ## - Iter[int].new(0.. items[i]) + Iter[int].new(0 ..< items.len).map((i: int) => items[i]) proc empty*[T](_: type Iter[T]): Iter[T] = ## Creates an empty Iter @@ -92,15 +95,14 @@ proc empty*[T](_: type Iter[T]): Iter[T] = proc genNext(): T {.raises: [CatchableError].} = raise newException(CatchableError, "Next item requested from an empty Iter") - proc isFinished(): bool = true + + proc isFinished(): bool = + true Iter[T].new(genNext, isFinished) proc map*[T, U](iter: Iter[T], fn: Function[T, U]): Iter[U] = - Iter[U].new( - genNext = () => fn(iter.next()), - isFinished = () => iter.finished - ) + Iter[U].new(genNext = () => fn(iter.next()), isFinished = () => iter.finished) proc mapFilter*[T, U](iter: Iter[T], mapPredicate: Function[T, Option[U]]): Iter[U] = var nextUOrErr: Option[Result[U, ref CatchableError]] diff --git a/codex/utils/json.nim b/codex/utils/json.nim index 4113b632..5bd16846 100644 --- a/codex/utils/json.nim +++ b/codex/utils/json.nim @@ -1,8 +1,8 @@ - import std/options import std/typetraits from pkg/ethers import Address -from pkg/libp2p import Cid, PeerId, SignedPeerRecord, MultiAddress, AddressInfo, init, `$` +from pkg/libp2p import + Cid, PeerId, SignedPeerRecord, MultiAddress, AddressInfo, init, `$` import pkg/contractabi import pkg/codexdht/discv5/node as dn import pkg/serde/json @@ -11,24 +11,27 @@ import ../errors export json - -proc fromJson*( - _: type Cid, - json: JsonNode -): ?!Cid = +proc fromJson*(_: type Cid, json: JsonNode): ?!Cid = expectJsonKind(Cid, JString, json) Cid.init(json.str).mapFailure -func `%`*(cid: Cid): JsonNode = % $cid +func `%`*(cid: Cid): JsonNode = + % $cid -func `%`*(obj: PeerId): JsonNode = % $obj +func `%`*(obj: PeerId): JsonNode = + % $obj -func `%`*(obj: SignedPeerRecord): JsonNode = % $obj +func `%`*(obj: SignedPeerRecord): JsonNode = + % $obj -func `%`*(obj: dn.Address): JsonNode = % $obj +func `%`*(obj: dn.Address): JsonNode = + % $obj -func `%`*(obj: AddressInfo): JsonNode = % $obj.address +func `%`*(obj: AddressInfo): JsonNode = + % $obj.address -func `%`*(obj: MultiAddress): JsonNode = % $obj +func `%`*(obj: MultiAddress): JsonNode = + % $obj -func `%`*(address: ethers.Address): JsonNode = % $address +func `%`*(address: ethers.Address): JsonNode = + % $address diff --git a/codex/utils/keyutils.nim b/codex/utils/keyutils.nim index c7f76263..664396d3 100644 --- a/codex/utils/keyutils.nim +++ b/codex/utils/keyutils.nim @@ -8,7 +8,8 @@ ## those terms. import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import pkg/questionable/results import pkg/libp2p/crypto/crypto @@ -28,17 +29,18 @@ proc setupKey*(path: string): ?!PrivateKey = if not path.fileAccessible({AccessFlags.Find}): info "Creating a private key and saving it" let - res = ? PrivateKey.random(Rng.instance()[]).mapFailure(CodexKeyError) - bytes = ? res.getBytes().mapFailure(CodexKeyError) + res = ?PrivateKey.random(Rng.instance()[]).mapFailure(CodexKeyError) + bytes = ?res.getBytes().mapFailure(CodexKeyError) - ? path.secureWriteFile(bytes).mapFailure(CodexKeyError) + ?path.secureWriteFile(bytes).mapFailure(CodexKeyError) return PrivateKey.init(bytes).mapFailure(CodexKeyError) info "Found a network private key" - if not ? checkSecureFile(path).mapFailure(CodexKeyError): + if not ?checkSecureFile(path).mapFailure(CodexKeyError): warn "The network private key file is not safe, aborting" return failure newException( - CodexKeyUnsafeError, "The network private key file is not safe") + CodexKeyUnsafeError, "The network private key file is not safe" + ) - let kb = ? path.readAllBytes().mapFailure(CodexKeyError) + let kb = ?path.readAllBytes().mapFailure(CodexKeyError) return PrivateKey.init(kb).mapFailure(CodexKeyError) diff --git a/codex/utils/natutils.nim b/codex/utils/natutils.nim index 86497e12..8a641e95 100644 --- a/codex/utils/natutils.nim +++ b/codex/utils/natutils.nim @@ -1,27 +1,26 @@ {.push raises: [].} import - std/[tables, hashes], - stew/results, stew/shims/net as stewNet, chronos, chronicles + std/[tables, hashes], stew/results, stew/shims/net as stewNet, chronos, chronicles import pkg/libp2p -type - NatStrategy* = enum - NatAny - NatUpnp - NatPmp - NatNone +type NatStrategy* = enum + NatAny + NatUpnp + NatPmp + NatNone -type - IpLimits* = object - limit*: uint - ips: Table[IpAddress, uint] +type IpLimits* = object + limit*: uint + ips: Table[IpAddress, uint] func hash*(ip: IpAddress): Hash = case ip.family - of IpAddressFamily.IPv6: hash(ip.address_v6) - of IpAddressFamily.IPv4: hash(ip.address_v4) + of IpAddressFamily.IPv6: + hash(ip.address_v6) + of IpAddressFamily.IPv4: + hash(ip.address_v4) func inc*(ipLimits: var IpLimits, ip: IpAddress): bool = let val = ipLimits.ips.getOrDefault(ip, 0) @@ -39,10 +38,7 @@ func dec*(ipLimits: var IpLimits, ip: IpAddress) = ipLimits.ips[ip] = val - 1 func isGlobalUnicast*(address: TransportAddress): bool = - if address.isGlobal() and address.isUnicast(): - true - else: - false + if address.isGlobal() and address.isUnicast(): true else: false func isGlobalUnicast*(address: IpAddress): bool = let a = initTAddress(address, Port(0)) @@ -53,16 +49,19 @@ proc getRouteIpv4*(): Result[IpAddress, cstring] = # Note: `publicAddress` is only used an "example" IP to find the best route, # no data is send over the network to this IP! let - publicAddress = TransportAddress(family: AddressFamily.IPv4, - address_v4: [1'u8, 1, 1, 1], port: Port(0)) + publicAddress = TransportAddress( + family: AddressFamily.IPv4, address_v4: [1'u8, 1, 1, 1], port: Port(0) + ) route = getBestRoute(publicAddress) if route.source.isUnspecified(): err("No best ipv4 route found") else: - let ip = try: route.source.address() - except ValueError as e: - # This should not occur really. - error "Address conversion error", exception = e.name, msg = e.msg - return err("Invalid IP address") - ok(ip) \ No newline at end of file + let ip = + try: + route.source.address() + except ValueError as e: + # This should not occur really. + error "Address conversion error", exception = e.name, msg = e.msg + return err("Invalid IP address") + ok(ip) diff --git a/codex/utils/options.nim b/codex/utils/options.nim index 0362eebf..ad44a717 100644 --- a/codex/utils/options.nim +++ b/codex/utils/options.nim @@ -31,7 +31,6 @@ template WrapOption*(input: untyped): type = else: Option[input] - macro createType(t: typedesc): untyped = var objectType = getType(t) @@ -47,22 +46,26 @@ macro createType(t: typedesc): untyped = # re-wrapping already filed which is `Option[T]`. for field in objectType[2]: let fieldType = getTypeInst(field) - let newFieldNode = - nnkIdentDefs.newTree(ident($field), nnkCall.newTree(ident("WrapOption"), fieldType), newEmptyNode()) + let newFieldNode = nnkIdentDefs.newTree( + ident($field), nnkCall.newTree(ident("WrapOption"), fieldType), newEmptyNode() + ) fields.add(newFieldNode) # Creates new object type T with the fields lists from steps above. let tSym = genSym(nskType, "T") nnkStmtList.newTree( - nnkTypeSection.newTree( - nnkTypeDef.newTree(tSym, newEmptyNode(), nnkObjectTy.newTree(newEmptyNode(), newEmptyNode(), fields)) - ), - tSym + nnkTypeSection.newTree( + nnkTypeDef.newTree( + tSym, + newEmptyNode(), + nnkObjectTy.newTree(newEmptyNode(), newEmptyNode(), fields), + ) + ), + tSym, ) template Optionalize*(t: typed): untyped = ## Takes object type and wraps all the first level fields into ## Option type unless it is already Option type. createType(t) - diff --git a/codex/utils/poseidon2digest.nim b/codex/utils/poseidon2digest.nim index efdb3c6a..6eaf21e9 100644 --- a/codex/utils/poseidon2digest.nim +++ b/codex/utils/poseidon2digest.nim @@ -15,27 +15,24 @@ import pkg/stew/byteutils import ../merkletree func spongeDigest*( - _: type Poseidon2Hash, - bytes: openArray[byte], - rate: static int = 2): ?!Poseidon2Hash = + _: type Poseidon2Hash, bytes: openArray[byte], rate: static int = 2 +): ?!Poseidon2Hash = ## Hashes chunks of data with a sponge of rate 1 or 2. ## success Sponge.digest(bytes, rate) func spongeDigest*( - _: type Poseidon2Hash, - bytes: openArray[Bn254Fr], - rate: static int = 2): ?!Poseidon2Hash = + _: type Poseidon2Hash, bytes: openArray[Bn254Fr], rate: static int = 2 +): ?!Poseidon2Hash = ## Hashes chunks of elements with a sponge of rate 1 or 2. ## success Sponge.digest(bytes, rate) func digestTree*( - _: type Poseidon2Tree, - bytes: openArray[byte], - chunkSize: int): ?!Poseidon2Tree = + _: type Poseidon2Tree, bytes: openArray[byte], chunkSize: int +): ?!Poseidon2Tree = ## Hashes chunks of data with a sponge of rate 2, and combines the ## resulting chunk hashes in a merkle root. ## @@ -50,30 +47,27 @@ func digestTree*( while index < bytes.len: let start = index let finish = min(index + chunkSize, bytes.len) - let digest = ? Poseidon2Hash.spongeDigest(bytes.toOpenArray(start, finish - 1), 2) + let digest = ?Poseidon2Hash.spongeDigest(bytes.toOpenArray(start, finish - 1), 2) leaves.add(digest) index += chunkSize return Poseidon2Tree.init(leaves) func digest*( - _: type Poseidon2Tree, - bytes: openArray[byte], - chunkSize: int): ?!Poseidon2Hash = + _: type Poseidon2Tree, bytes: openArray[byte], chunkSize: int +): ?!Poseidon2Hash = ## Hashes chunks of data with a sponge of rate 2, and combines the ## resulting chunk hashes in a merkle root. ## - (? Poseidon2Tree.digestTree(bytes, chunkSize)).root + (?Poseidon2Tree.digestTree(bytes, chunkSize)).root func digestMhash*( - _: type Poseidon2Tree, - bytes: openArray[byte], - chunkSize: int): ?!MultiHash = + _: type Poseidon2Tree, bytes: openArray[byte], chunkSize: int +): ?!MultiHash = ## Hashes chunks of data with a sponge of rate 2 and ## returns the multihash of the root ## - let - hash = ? Poseidon2Tree.digest(bytes, chunkSize) + let hash = ?Poseidon2Tree.digest(bytes, chunkSize) - ? MultiHash.init(Pos2Bn128MrklCodec, hash).mapFailure + ?MultiHash.init(Pos2Bn128MrklCodec, hash).mapFailure diff --git a/codex/utils/stintutils.nim b/codex/utils/stintutils.nim index 125ff8b6..48e332d0 100644 --- a/codex/utils/stintutils.nim +++ b/codex/utils/stintutils.nim @@ -1,4 +1,4 @@ import pkg/stint -func fromDecimal*(T: typedesc[StUint|StInt], s: string): T {.inline.} = +func fromDecimal*(T: typedesc[StUint | StInt], s: string): T {.inline.} = parse(s, type result, radix = 10) diff --git a/codex/utils/timer.nim b/codex/utils/timer.nim index 9cf59489..0a5a940a 100644 --- a/codex/utils/timer.nim +++ b/codex/utils/timer.nim @@ -12,14 +12,15 @@ import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import pkg/chronos import ../logutils type - TimerCallback* = proc(): Future[void] {.gcsafe, upraises:[].} + TimerCallback* = proc(): Future[void] {.gcsafe, upraises: [].} Timer* = ref object of RootObj callback: TimerCallback interval: Duration @@ -38,12 +39,14 @@ proc timerLoop(timer: Timer) {.async: (raises: []).} = except CancelledError: discard # do not propagate as timerLoop is asyncSpawned except CatchableError as exc: - error "Timer caught unhandled exception: ", name=timer.name, msg=exc.msg + error "Timer caught unhandled exception: ", name = timer.name, msg = exc.msg -method start*(timer: Timer, callback: TimerCallback, interval: Duration) {.gcsafe, base.} = +method start*( + timer: Timer, callback: TimerCallback, interval: Duration +) {.gcsafe, base.} = if timer.loopFuture != nil: return - trace "Timer starting: ", name=timer.name + trace "Timer starting: ", name = timer.name timer.callback = callback timer.interval = interval timer.loopFuture = timerLoop(timer) @@ -51,6 +54,6 @@ method start*(timer: Timer, callback: TimerCallback, interval: Duration) {.gcsaf method stop*(timer: Timer) {.async, base.} = if timer.loopFuture != nil and not timer.loopFuture.finished: - trace "Timer stopping: ", name=timer.name + trace "Timer stopping: ", name = timer.name await timer.loopFuture.cancelAndWait() timer.loopFuture = nil diff --git a/codex/utils/trackedfutures.nim b/codex/utils/trackedfutures.nim index 2505ffe2..eb3cc219 100644 --- a/codex/utils/trackedfutures.nim +++ b/codex/utils/trackedfutures.nim @@ -5,15 +5,15 @@ import ../logutils {.push raises: [].} -type - TrackedFutures* = ref object - futures: Table[uint, FutureBase] - cancelling: bool +type TrackedFutures* = ref object + futures: Table[uint, FutureBase] + cancelling: bool logScope: topics = "trackable futures" -proc len*(self: TrackedFutures): int = self.futures.len +proc len*(self: TrackedFutures): int = + self.futures.len proc removeFuture(self: TrackedFutures, future: FutureBase) = if not self.cancelling and not future.isNil: diff --git a/codex/validation.nim b/codex/validation.nim index 3e9e63ff..6e3135e4 100644 --- a/codex/validation.nim +++ b/codex/validation.nim @@ -12,28 +12,23 @@ export market export sets export validationconfig -type - Validation* = ref object - slots: HashSet[SlotId] - clock: Clock - market: Market - subscriptions: seq[Subscription] - running: Future[void] - periodicity: Periodicity - proofTimeout: UInt256 - config: ValidationConfig +type Validation* = ref object + slots: HashSet[SlotId] + clock: Clock + market: Market + subscriptions: seq[Subscription] + running: Future[void] + periodicity: Periodicity + proofTimeout: UInt256 + config: ValidationConfig -const - MaxStorageRequestDuration = 30.days +const MaxStorageRequestDuration = 30.days logScope: topics = "codex validator" proc new*( - _: type Validation, - clock: Clock, - market: Market, - config: ValidationConfig + _: type Validation, clock: Clock, market: Market, config: ValidationConfig ): Validation = Validation(clock: clock, market: market, config: config) @@ -49,20 +44,17 @@ proc waitUntilNextPeriod(validation: Validation) {.async.} = trace "Waiting until next period", currentPeriod = period await validation.clock.waitUntil(periodEnd.truncate(int64) + 1) -func groupIndexForSlotId*(slotId: SlotId, - validationGroups: ValidationGroups): uint16 = +func groupIndexForSlotId*(slotId: SlotId, validationGroups: ValidationGroups): uint16 = let slotIdUInt256 = UInt256.fromBytesBE(slotId.toArray) (slotIdUInt256 mod validationGroups.u256).truncate(uint16) func maxSlotsConstraintRespected(validation: Validation): bool = - validation.config.maxSlots == 0 or - validation.slots.len < validation.config.maxSlots + validation.config.maxSlots == 0 or validation.slots.len < validation.config.maxSlots func shouldValidateSlot(validation: Validation, slotId: SlotId): bool = without validationGroups =? validation.config.groups: return true - groupIndexForSlotId(slotId, validationGroups) == - validation.config.groupIndex + groupIndexForSlotId(slotId, validationGroups) == validation.config.groupIndex proc subscribeSlotFilled(validation: Validation) {.async.} = proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) = @@ -72,6 +64,7 @@ proc subscribeSlotFilled(validation: Validation) {.async.} = if validation.shouldValidateSlot(slotId): trace "Adding slot", slotId validation.slots.incl(slotId) + let subscription = await validation.market.subscribeSlotFilled(onSlotFilled) validation.subscriptions.add(subscription) @@ -85,9 +78,9 @@ proc removeSlotsThatHaveEnded(validation: Validation) {.async.} = ended.incl(slotId) validation.slots.excl(ended) -proc markProofAsMissing(validation: Validation, - slotId: SlotId, - period: Period) {.async.} = +proc markProofAsMissing( + validation: Validation, slotId: SlotId, period: Period +) {.async.} = logScope: currentPeriod = validation.getCurrentPeriod() @@ -122,15 +115,16 @@ proc run(validation: Validation) {.async: (raises: []).} = except CatchableError as e: error "Validation failed", msg = e.msg -proc epochForDurationBackFromNow(validation: Validation, - duration: Duration): SecondsSince1970 = +proc epochForDurationBackFromNow( + validation: Validation, duration: Duration +): SecondsSince1970 = return validation.clock.now - duration.secs proc restoreHistoricalState(validation: Validation) {.async.} = trace "Restoring historical state..." let startTimeEpoch = validation.epochForDurationBackFromNow(MaxStorageRequestDuration) - let slotFilledEvents = await validation.market.queryPastSlotFilledEvents( - fromTime = startTimeEpoch) + let slotFilledEvents = + await validation.market.queryPastSlotFilledEvents(fromTime = startTimeEpoch) for event in slotFilledEvents: if not validation.maxSlotsConstraintRespected: break @@ -142,8 +136,8 @@ proc restoreHistoricalState(validation: Validation) {.async.} = trace "Historical state restored", numberOfSlots = validation.slots.len proc start*(validation: Validation) {.async.} = - trace "Starting validator", groups = validation.config.groups, - groupIndex = validation.config.groupIndex + trace "Starting validator", + groups = validation.config.groups, groupIndex = validation.config.groupIndex validation.periodicity = await validation.market.periodicity() validation.proofTimeout = await validation.market.proofTimeout() await validation.subscribeSlotFilled() diff --git a/codex/validationconfig.nim b/codex/validationconfig.nim index dd36a25a..3e21c4fa 100644 --- a/codex/validationconfig.nim +++ b/codex/validationconfig.nim @@ -3,7 +3,7 @@ import pkg/questionable import pkg/questionable/results type - ValidationGroups* = range[2..65535] + ValidationGroups* = range[2 .. 65535] MaxSlots* = int ValidationConfig* = object maxSlots: MaxSlots @@ -14,17 +14,16 @@ func init*( _: type ValidationConfig, maxSlots: MaxSlots, groups: ?ValidationGroups, - groupIndex: uint16 = 0): ?!ValidationConfig = + groupIndex: uint16 = 0, +): ?!ValidationConfig = if maxSlots < 0: return failure "The value of maxSlots must be greater than " & - fmt"or equal to 0! (got: {maxSlots})" + fmt"or equal to 0! (got: {maxSlots})" if validationGroups =? groups and groupIndex >= uint16(validationGroups): return failure "The value of the group index must be less than " & - fmt"validation groups! (got: {groupIndex = }, " & - fmt"groups = {validationGroups})" - - success ValidationConfig( - maxSlots: maxSlots, groups: groups, groupIndex: groupIndex) + fmt"validation groups! (got: {groupIndex = }, " & fmt"groups = {validationGroups})" + + success ValidationConfig(maxSlots: maxSlots, groups: groups, groupIndex: groupIndex) func maxSlots*(config: ValidationConfig): MaxSlots = config.maxSlots diff --git a/flake.nix b/flake.nix index 37d459a0..4302b3d2 100644 --- a/flake.nix +++ b/flake.nix @@ -22,7 +22,7 @@ circomCompatPkg = circom-compat.packages.${system}.default; buildTarget = pkgsFor.${system}.callPackage ./nix/default.nix rec { inherit stableSystems circomCompatPkg; - src = pkgsFor.${system}.lib.traceValFn (v: "self.submodules: ${toString v.submodules}") self; + src = self; }; build = targets: buildTarget.override { inherit targets; }; in rec { diff --git a/nix/README.md b/nix/README.md index d1886794..fa34ffe2 100644 --- a/nix/README.md +++ b/nix/README.md @@ -4,7 +4,7 @@ A development shell can be started using: ```sh -nix develop +nix develop '.?submodules=1#' ``` ## Building diff --git a/nix/checksums.nix b/nix/checksums.nix new file mode 100644 index 00000000..d79345d2 --- /dev/null +++ b/nix/checksums.nix @@ -0,0 +1,12 @@ +{ pkgs ? import { } }: + +let + tools = pkgs.callPackage ./tools.nix {}; + sourceFile = ../vendor/nimbus-build-system/vendor/Nim/koch.nim; +in pkgs.fetchFromGitHub { + owner = "nim-lang"; + repo = "checksums"; + rev = tools.findKeyValue "^ +ChecksumsStableCommit = \"([a-f0-9]+)\"$" sourceFile; + # WARNING: Requires manual updates when Nim compiler version changes. + hash = "sha256-Bm5iJoT2kAvcTexiLMFBa9oU5gf7d4rWjo3OiN7obWQ="; +} diff --git a/nix/csources.nix b/nix/csources.nix new file mode 100644 index 00000000..33cd9d0f --- /dev/null +++ b/nix/csources.nix @@ -0,0 +1,12 @@ +{ pkgs ? import { } }: + +let + tools = pkgs.callPackage ./tools.nix {}; + sourceFile = ../vendor/nimbus-build-system/vendor/Nim/config/build_config.txt; +in pkgs.fetchFromGitHub { + owner = "nim-lang"; + repo = "csources_v2"; + rev = tools.findKeyValue "^nim_csourcesHash=([a-f0-9]+)$" sourceFile; + # WARNING: Requires manual updates when Nim compiler version changes. + hash = "sha256-UCLtoxOcGYjBdvHx7A47x6FjLMi6VZqpSs65MN7fpBs="; +} diff --git a/nix/default.nix b/nix/default.nix index b4030095..691e2af3 100644 --- a/nix/default.nix +++ b/nix/default.nix @@ -3,15 +3,15 @@ src ? ../., targets ? ["all"], # Options: 0,1,2 - verbosity ? 0, - # Use system Nim compiler instead of building it with nimbus-build-system - useSystemNim ? true, + verbosity ? 1, commit ? builtins.substring 0 7 (src.rev or "dirty"), # These are the only platforms tested in CI and considered stable. stableSystems ? [ "x86_64-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin" ], + # Perform 2-stage bootstrap instead of 3-stage to save time. + quickAndDirty ? true, circomCompatPkg ? ( builtins.getFlake "github:codex-storage/circom-compat-ffi" ).packages.${builtins.currentSystem}.default @@ -26,7 +26,7 @@ let revision = lib.substring 0 8 (src.rev or "dirty"); tools = callPackage ./tools.nix {}; -in pkgs.gcc11Stdenv.mkDerivation rec { +in pkgs.gcc13Stdenv.mkDerivation rec { pname = "codex"; @@ -49,10 +49,7 @@ in pkgs.gcc11Stdenv.mkDerivation rec { in with pkgs; [ cmake - pkg-config - nimble which - nim-unwrapped-1 lsb-release circomCompatPkg fakeGit @@ -66,11 +63,23 @@ in pkgs.gcc11Stdenv.mkDerivation rec { makeFlags = targets ++ [ "V=${toString verbosity}" - "USE_SYSTEM_NIM=${if useSystemNim then "1" else "0"}" + "QUICK_AND_DIRTY_COMPILER=${if quickAndDirty then "1" else "0"}" + "QUICK_AND_DIRTY_NIMBLE=${if quickAndDirty then "1" else "0"}" ]; configurePhase = '' - patchShebangs . > /dev/null + patchShebangs . vendor/nimbus-build-system > /dev/null + make nimbus-build-system-paths + ''; + + preBuild = '' + pushd vendor/nimbus-build-system/vendor/Nim + mkdir dist + cp -r ${callPackage ./nimble.nix {}} dist/nimble + cp -r ${callPackage ./checksums.nix {}} dist/checksums + cp -r ${callPackage ./csources.nix {}} csources_v2 + chmod 777 -R dist/nimble csources_v2 + popd ''; installPhase = '' diff --git a/nix/nimble.nix b/nix/nimble.nix new file mode 100644 index 00000000..39c5e0ff --- /dev/null +++ b/nix/nimble.nix @@ -0,0 +1,13 @@ +{ pkgs ? import { } }: + +let + tools = pkgs.callPackage ./tools.nix {}; + sourceFile = ../vendor/nimbus-build-system/vendor/Nim/koch.nim; +in pkgs.fetchFromGitHub { + owner = "nim-lang"; + repo = "nimble"; + fetchSubmodules = true; + rev = tools.findKeyValue "^ +NimbleStableCommit = \"([a-f0-9]+)\".+" sourceFile; + # WARNING: Requires manual updates when Nim compiler version changes. + hash = "sha256-Rz48sGUKZEAp+UySla+MlsOfsERekuGKw69Tm11fDz8="; +} diff --git a/openapi.yaml b/openapi.yaml index c38c916f..9d401e8f 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -6,7 +6,7 @@ info: description: "List of endpoints and interfaces available to Codex API users" security: - - { } + - {} components: schemas: @@ -50,9 +50,9 @@ components: type: string description: Address of Ethereum address - Reward: + PricePerBytePerSecond: type: string - description: The maximum amount of tokens paid per second per slot to hosts the client is willing to pay + description: The amount of tokens paid per byte per second per slot to hosts the client is willing to pay Duration: type: string @@ -157,12 +157,12 @@ components: description: Total size of availability's storage in bytes as decimal string duration: $ref: "#/components/schemas/Duration" - minPrice: + minPricePerBytePerSecond: type: string - description: Minimal price paid (in amount of tokens) for the whole hosted request's slot for the request's duration as decimal string - maxCollateral: + description: Minimal price per byte per second paid (in amount of tokens) for the hosted request's slot for the request's duration as decimal string + totalCollateral: type: string - description: Maximum collateral user is willing to pay per filled Slot (in amount of tokens) as decimal string + description: Total collateral (in amount of tokens) that can be used for matching requests SalesAvailabilityREAD: allOf: @@ -178,8 +178,8 @@ components: - $ref: "#/components/schemas/SalesAvailability" - required: - totalSize - - minPrice - - maxCollateral + - minPricePerBytePerSecond + - totalCollateral - duration Slot: @@ -243,16 +243,16 @@ components: StorageRequestCreation: type: object required: - - reward + - pricePerBytePerSecond - duration - proofProbability - - collateral + - collateralPerByte - expiry properties: duration: $ref: "#/components/schemas/Duration" - reward: - $ref: "#/components/schemas/Reward" + pricePerBytePerSecond: + $ref: "#/components/schemas/PricePerBytePerSecond" proofProbability: $ref: "#/components/schemas/ProofProbability" nodes: @@ -263,16 +263,16 @@ components: description: Additional number of nodes on top of the `nodes` property that can be lost before pronouncing the content lost type: integer default: 0 - collateral: + collateralPerByte: type: string - description: Number as decimal string that represents how much collateral is asked from hosts that wants to fill a slots + description: Number as decimal string that represents how much collateral per byte is asked from hosts that wants to fill a slots expiry: type: string description: Number as decimal string that represents expiry threshold in seconds from when the Request is submitted. When the threshold is reached and the Request does not find requested amount of nodes to host the data, the Request is voided. The number of seconds can not be higher then the Request's duration itself. StorageAsk: type: object required: - - reward + - pricePerBytePerSecond properties: slots: description: Number of slots (eq. hosts) that the Request want to have the content spread over @@ -284,8 +284,8 @@ components: $ref: "#/components/schemas/Duration" proofProbability: $ref: "#/components/schemas/ProofProbability" - reward: - $ref: "#/components/schemas/Reward" + pricePerBytePerSecond: + $ref: "#/components/schemas/PricePerBytePerSecond" maxSlotLoss: type: integer description: Max slots that can be lost without data considered to be lost @@ -418,14 +418,14 @@ paths: description: | If `addrs` param is supplied, it will be used to dial the peer, otherwise the `peerId` is used to invoke peer discovery, if it succeeds the returned addresses will be used to dial. - tags: [ Node ] + tags: [Node] operationId: connectPeer parameters: - in: path name: peerId required: true schema: - $ref: "#/components/schemas/PeerId" + $ref: "#/components/schemas/PeerId" description: Peer that should be dialed. - in: query name: addrs @@ -448,7 +448,7 @@ paths: "/data": get: summary: "Lists manifest CIDs stored locally in node." - tags: [ Data ] + tags: [Data] operationId: listData responses: "200": @@ -468,7 +468,7 @@ paths: description: Well it was bad-bad post: summary: "Upload a file in a streaming manner. Once finished, the file is stored in the node and can be retrieved by any node in the network using the returned CID." - tags: [ Data ] + tags: [Data] operationId: upload parameters: - name: content-type @@ -484,7 +484,7 @@ paths: description: The content disposition used to send the filename. schema: type: string - example: "attachment; filename=\"codex.png\"" + example: 'attachment; filename="codex.png"' requestBody: content: application/octet-stream: @@ -504,14 +504,14 @@ paths: "/data/{cid}": get: summary: "Download a file from the local node in a streaming manner. If the file is not available locally, a 404 is returned." - tags: [ Data ] + tags: [Data] operationId: downloadLocal parameters: - in: path name: cid required: true schema: - $ref: "#/components/schemas/Cid" + $ref: "#/components/schemas/Cid" description: File to be downloaded. responses: @@ -532,14 +532,14 @@ paths: "/data/{cid}/network": post: summary: "Download a file from the network to the local node if it's not available locally. Note: Download is performed async. Call can return before download is completed." - tags: [ Data ] + tags: [Data] operationId: downloadNetwork parameters: - in: path name: cid required: true schema: - $ref: "#/components/schemas/Cid" + $ref: "#/components/schemas/Cid" description: "File to be downloaded." responses: "200": @@ -558,14 +558,14 @@ paths: "/data/{cid}/network/stream": get: summary: "Download a file from the network in a streaming manner. If the file is not available locally, it will be retrieved from other nodes in the network if able." - tags: [ Data ] + tags: [Data] operationId: downloadNetworkStream parameters: - in: path name: cid required: true schema: - $ref: "#/components/schemas/Cid" + $ref: "#/components/schemas/Cid" description: "File to be downloaded." responses: "200": @@ -585,14 +585,14 @@ paths: "/data/{cid}/network/manifest": get: summary: "Download only the dataset manifest from the network to the local node if it's not available locally." - tags: [ Data ] + tags: [Data] operationId: downloadNetworkManifest parameters: - in: path name: cid required: true schema: - $ref: "#/components/schemas/Cid" + $ref: "#/components/schemas/Cid" description: "File for which the manifest is to be downloaded." responses: "200": @@ -611,7 +611,7 @@ paths: "/space": get: summary: "Gets a summary of the storage space allocation of the node." - tags: [ Data ] + tags: [Data] operationId: space responses: "200": @@ -627,7 +627,7 @@ paths: "/sales/slots": get: summary: "Returns active slots" - tags: [ Marketplace ] + tags: [Marketplace] operationId: getActiveSlots responses: "200": @@ -645,7 +645,7 @@ paths: "/sales/slots/{slotId}": get: summary: "Returns active slot with id {slotId} for the host" - tags: [ Marketplace ] + tags: [Marketplace] operationId: getActiveSlotById parameters: - in: path @@ -674,7 +674,7 @@ paths: "/sales/availability": get: summary: "Returns storage that is for sale" - tags: [ Marketplace ] + tags: [Marketplace] operationId: getAvailabilities responses: "200": @@ -693,7 +693,7 @@ paths: post: summary: "Offers storage for sale" operationId: offerStorage - tags: [ Marketplace ] + tags: [Marketplace] requestBody: content: application/json: @@ -721,7 +721,7 @@ paths: The new parameters will be only considered for new requests. Existing Requests linked to this Availability will continue as is. operationId: updateOfferedStorage - tags: [ Marketplace ] + tags: [Marketplace] parameters: - in: path name: id @@ -753,7 +753,7 @@ paths: summary: "Get availability's reservations" description: Return's list of Reservations for ongoing Storage Requests that the node hosts. operationId: getReservations - tags: [ Marketplace ] + tags: [Marketplace] parameters: - in: path name: id @@ -782,7 +782,7 @@ paths: "/storage/request/{cid}": post: summary: "Creates a new Request for storage" - tags: [ Marketplace ] + tags: [Marketplace] operationId: createStorageRequest parameters: - in: path @@ -813,7 +813,7 @@ paths: "/storage/purchases": get: summary: "Returns list of purchase IDs" - tags: [ Marketplace ] + tags: [Marketplace] operationId: getPurchases responses: "200": @@ -830,7 +830,7 @@ paths: "/storage/purchases/{id}": get: summary: "Returns purchase details" - tags: [ Marketplace ] + tags: [Marketplace] operationId: getPurchase parameters: - in: path @@ -857,7 +857,7 @@ paths: get: summary: "Get Node's SPR" operationId: getSPR - tags: [ Node ] + tags: [Node] responses: "200": description: Node's SPR @@ -875,7 +875,7 @@ paths: get: summary: "Get Node's PeerID" operationId: getPeerId - tags: [ Node ] + tags: [Node] responses: "200": description: Node's Peer ID @@ -890,7 +890,7 @@ paths: "/debug/chronicles/loglevel": post: summary: "Set log level at run time" - tags: [ Debug ] + tags: [Debug] operationId: setDebugLogLevel parameters: @@ -912,7 +912,7 @@ paths: get: summary: "Gets node information" operationId: getDebugInfo - tags: [ Debug ] + tags: [Debug] responses: "200": description: Node's information diff --git a/tests/checktest.nim b/tests/checktest.nim index 8ca5c53e..b1d80ff9 100644 --- a/tests/checktest.nim +++ b/tests/checktest.nim @@ -3,7 +3,7 @@ import ./helpers ## Unit testing suite that calls checkTrackers in teardown to check for memory leaks using chronos trackers. template checksuite*(name, body) = suite name: - proc suiteProc = + proc suiteProc() = multisetup() teardown: @@ -15,7 +15,7 @@ template checksuite*(name, body) = template asyncchecksuite*(name, body) = suite name: - proc suiteProc = + proc suiteProc() = asyncmultisetup() teardown: diff --git a/tests/codex/blockexchange/discovery/testdiscovery.nim b/tests/codex/blockexchange/discovery/testdiscovery.nim index a136f89e..88331c3f 100644 --- a/tests/codex/blockexchange/discovery/testdiscovery.nim +++ b/tests/codex/blockexchange/discovery/testdiscovery.nim @@ -19,7 +19,6 @@ import ../../helpers import ../../helpers/mockdiscovery import ../../examples - asyncchecksuite "Block Advertising and Discovery": let chunker = RandomChunker.new(Rng.instance(), size = 4096, chunkSize = 256) @@ -56,8 +55,8 @@ asyncchecksuite "Block Advertising and Discovery": pendingBlocks = PendingBlocksManager.new() (manifest, tree) = makeManifestAndTree(blocks).tryGet() - manifestBlock = bt.Block.new( - manifest.encode().tryGet(), codec = ManifestCodec).tryGet() + manifestBlock = + bt.Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() (await localStore.putBlock(manifestBlock)).tryGet() @@ -67,42 +66,33 @@ asyncchecksuite "Block Advertising and Discovery": network, blockDiscovery, pendingBlocks, - minPeersPerBlock = 1) - - advertiser = Advertiser.new( - localStore, - blockDiscovery + minPeersPerBlock = 1, ) + advertiser = Advertiser.new(localStore, blockDiscovery) + engine = BlockExcEngine.new( - localStore, - wallet, - network, - discovery, - advertiser, - peerStore, - pendingBlocks) + localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks + ) switch.mount(network) test "Should discover want list": - let - pendingBlocks = blocks.mapIt( - engine.pendingBlocks.getWantHandle(it.cid) - ) + let pendingBlocks = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid)) await engine.start() - blockDiscovery.publishBlockProvideHandler = - proc(d: MockDiscovery, cid: Cid): Future[void] {.async, gcsafe.} = - return + blockDiscovery.publishBlockProvideHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[void] {.async, gcsafe.} = + return - blockDiscovery.findBlockProvidersHandler = - proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async.} = - await engine.resolveBlocks(blocks.filterIt(it.cid == cid)) + blockDiscovery.findBlockProvidersHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[seq[SignedPeerRecord]] {.async.} = + await engine.resolveBlocks(blocks.filterIt(it.cid == cid)) - await allFuturesThrowing( - allFinished(pendingBlocks)) + await allFuturesThrowing(allFinished(pendingBlocks)) await engine.stop() @@ -110,26 +100,27 @@ asyncchecksuite "Block Advertising and Discovery": let cids = @[manifest.cid.tryGet, manifest.treeCid] advertised = initTable.collect: - for cid in cids: {cid: newFuture[void]()} + for cid in cids: + {cid: newFuture[void]()} - blockDiscovery - .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid) {.async.} = - if cid in advertised and not advertised[cid].finished(): - advertised[cid].complete() + blockDiscovery.publishBlockProvideHandler = proc( + d: MockDiscovery, cid: Cid + ) {.async.} = + if cid in advertised and not advertised[cid].finished(): + advertised[cid].complete() await engine.start() - await allFuturesThrowing( - allFinished(toSeq(advertised.values))) + await allFuturesThrowing(allFinished(toSeq(advertised.values))) await engine.stop() test "Should not advertise local blocks": - let - blockCids = blocks.mapIt(it.cid) + let blockCids = blocks.mapIt(it.cid) - blockDiscovery - .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid) {.async.} = - check: - cid notin blockCids + blockDiscovery.publishBlockProvideHandler = proc( + d: MockDiscovery, cid: Cid + ) {.async.} = + check: + cid notin blockCids await engine.start() await sleepAsync(3.seconds) @@ -137,29 +128,25 @@ asyncchecksuite "Block Advertising and Discovery": test "Should not launch discovery if remote peer has block": let - pendingBlocks = blocks.mapIt( - engine.pendingBlocks.getWantHandle(it.cid) - ) + pendingBlocks = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid)) peerId = PeerId.example haves = collect(initTable()): for blk in blocks: - { blk.address: Presence(address: blk.address, price: 0.u256) } + {blk.address: Presence(address: blk.address, price: 0.u256)} - engine.peers.add( - BlockExcPeerCtx( - id: peerId, - blocks: haves - )) + engine.peers.add(BlockExcPeerCtx(id: peerId, blocks: haves)) - blockDiscovery.findBlockProvidersHandler = - proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] = - check false + blockDiscovery.findBlockProvidersHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[seq[SignedPeerRecord]] = + check false await engine.start() - engine.pendingBlocks.resolve(blocks.mapIt(BlockDelivery(blk: it, address: it.address))) + engine.pendingBlocks.resolve( + blocks.mapIt(BlockDelivery(blk: it, address: it.address)) + ) - await allFuturesThrowing( - allFinished(pendingBlocks)) + await allFuturesThrowing(allFinished(pendingBlocks)) await engine.stop() @@ -176,7 +163,7 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": trees: seq[CodexTree] setup: - for _ in 0..<4: + for _ in 0 ..< 4: let chunker = RandomChunker.new(Rng.instance(), size = 4096, chunkSize = 256) var blocks = newSeq[bt.Block]() while true: @@ -205,21 +192,14 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": network, blockDiscovery, pendingBlocks, - minPeersPerBlock = 1) - - advertiser = Advertiser.new( - localStore, - blockDiscovery + minPeersPerBlock = 1, ) + advertiser = Advertiser.new(localStore, blockDiscovery) + engine = BlockExcEngine.new( - localStore, - wallet, - network, - discovery, - advertiser, - peerStore, - pendingBlocks) + localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks + ) networkStore = NetworkStore.new(engine, localStore) s.mount(network) @@ -239,46 +219,70 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": var advertised: Table[Cid, SignedPeerRecord] - MockDiscovery(blockexc[1].engine.discovery.discovery) - .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = - advertised[cid] = switch[1].peerInfo.signedPeerRecord + MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[void] {.async.} = + advertised[cid] = switch[1].peerInfo.signedPeerRecord - MockDiscovery(blockexc[2].engine.discovery.discovery) - .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = - advertised[cid] = switch[2].peerInfo.signedPeerRecord + MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[void] {.async.} = + advertised[cid] = switch[2].peerInfo.signedPeerRecord - MockDiscovery(blockexc[3].engine.discovery.discovery) - .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = - advertised[cid] = switch[3].peerInfo.signedPeerRecord + MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[void] {.async.} = + advertised[cid] = switch[3].peerInfo.signedPeerRecord discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid) - await blockexc[1].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, @[BlockDelivery(blk: mBlocks[0], address: BlockAddress(leaf: false, cid: mBlocks[0].cid))]) + await blockexc[1].engine.blocksDeliveryHandler( + switch[0].peerInfo.peerId, + @[ + BlockDelivery( + blk: mBlocks[0], address: BlockAddress(leaf: false, cid: mBlocks[0].cid) + ) + ], + ) discard blockexc[2].engine.pendingBlocks.getWantHandle(mBlocks[1].cid) - await blockexc[2].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, @[BlockDelivery(blk: mBlocks[1], address: BlockAddress(leaf: false, cid: mBlocks[1].cid))]) + await blockexc[2].engine.blocksDeliveryHandler( + switch[0].peerInfo.peerId, + @[ + BlockDelivery( + blk: mBlocks[1], address: BlockAddress(leaf: false, cid: mBlocks[1].cid) + ) + ], + ) discard blockexc[3].engine.pendingBlocks.getWantHandle(mBlocks[2].cid) - await blockexc[3].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, @[BlockDelivery(blk: mBlocks[2], address: BlockAddress(leaf: false, cid: mBlocks[2].cid))]) + await blockexc[3].engine.blocksDeliveryHandler( + switch[0].peerInfo.peerId, + @[ + BlockDelivery( + blk: mBlocks[2], address: BlockAddress(leaf: false, cid: mBlocks[2].cid) + ) + ], + ) - MockDiscovery(blockexc[0].engine.discovery.discovery) - .findBlockProvidersHandler = proc(d: MockDiscovery, cid: Cid): - Future[seq[SignedPeerRecord]] {.async.} = - if cid in advertised: - result.add(advertised[cid]) + MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[seq[SignedPeerRecord]] {.async.} = + if cid in advertised: + result.add(advertised[cid]) let futs = collect(newSeq): - for m in mBlocks[0..2]: + for m in mBlocks[0 .. 2]: blockexc[0].engine.requestBlock(m.cid) await allFuturesThrowing( - switch.mapIt(it.start()) & - blockexc.mapIt(it.engine.start())).wait(10.seconds) + switch.mapIt(it.start()) & blockexc.mapIt(it.engine.start()) + ) + .wait(10.seconds) await allFutures(futs).wait(10.seconds) - await allFuturesThrowing( - blockexc.mapIt(it.engine.stop()) & - switch.mapIt(it.stop())).wait(10.seconds) + await allFuturesThrowing(blockexc.mapIt(it.engine.stop()) & switch.mapIt(it.stop())) + .wait(10.seconds) test "E2E - Should advertise and discover blocks with peers already connected": # Distribute the blocks amongst 1..3 @@ -286,42 +290,65 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": var advertised: Table[Cid, SignedPeerRecord] - MockDiscovery(blockexc[1].engine.discovery.discovery) - .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = - advertised[cid] = switch[1].peerInfo.signedPeerRecord + MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[void] {.async.} = + advertised[cid] = switch[1].peerInfo.signedPeerRecord - MockDiscovery(blockexc[2].engine.discovery.discovery) - .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = - advertised[cid] = switch[2].peerInfo.signedPeerRecord + MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[void] {.async.} = + advertised[cid] = switch[2].peerInfo.signedPeerRecord - MockDiscovery(blockexc[3].engine.discovery.discovery) - .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = - advertised[cid] = switch[3].peerInfo.signedPeerRecord + MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[void] {.async.} = + advertised[cid] = switch[3].peerInfo.signedPeerRecord discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid) - await blockexc[1].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, @[BlockDelivery(blk: mBlocks[0], address: BlockAddress(leaf: false, cid: mBlocks[0].cid))]) + await blockexc[1].engine.blocksDeliveryHandler( + switch[0].peerInfo.peerId, + @[ + BlockDelivery( + blk: mBlocks[0], address: BlockAddress(leaf: false, cid: mBlocks[0].cid) + ) + ], + ) discard blockexc[2].engine.pendingBlocks.getWantHandle(mBlocks[1].cid) - await blockexc[2].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, @[BlockDelivery(blk: mBlocks[1], address: BlockAddress(leaf: false, cid: mBlocks[1].cid))]) + await blockexc[2].engine.blocksDeliveryHandler( + switch[0].peerInfo.peerId, + @[ + BlockDelivery( + blk: mBlocks[1], address: BlockAddress(leaf: false, cid: mBlocks[1].cid) + ) + ], + ) discard blockexc[3].engine.pendingBlocks.getWantHandle(mBlocks[2].cid) - await blockexc[3].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, @[BlockDelivery(blk: mBlocks[2], address: BlockAddress(leaf: false, cid: mBlocks[2].cid))]) + await blockexc[3].engine.blocksDeliveryHandler( + switch[0].peerInfo.peerId, + @[ + BlockDelivery( + blk: mBlocks[2], address: BlockAddress(leaf: false, cid: mBlocks[2].cid) + ) + ], + ) - MockDiscovery(blockexc[0].engine.discovery.discovery) - .findBlockProvidersHandler = proc(d: MockDiscovery, cid: Cid): - Future[seq[SignedPeerRecord]] {.async.} = - if cid in advertised: - return @[advertised[cid]] + MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[seq[SignedPeerRecord]] {.async.} = + if cid in advertised: + return @[advertised[cid]] - let - futs = mBlocks[0..2].mapIt(blockexc[0].engine.requestBlock(it.cid)) + let futs = mBlocks[0 .. 2].mapIt(blockexc[0].engine.requestBlock(it.cid)) await allFuturesThrowing( - switch.mapIt(it.start()) & - blockexc.mapIt(it.engine.start())).wait(10.seconds) + switch.mapIt(it.start()) & blockexc.mapIt(it.engine.start()) + ) + .wait(10.seconds) await allFutures(futs).wait(10.seconds) - await allFuturesThrowing( - blockexc.mapIt(it.engine.stop()) & - switch.mapIt(it.stop())).wait(10.seconds) + await allFuturesThrowing(blockexc.mapIt(it.engine.stop()) & switch.mapIt(it.stop())) + .wait(10.seconds) diff --git a/tests/codex/blockexchange/discovery/testdiscoveryengine.nim b/tests/codex/blockexchange/discovery/testdiscoveryengine.nim index 42bc84a9..904703a0 100644 --- a/tests/codex/blockexchange/discovery/testdiscoveryengine.nim +++ b/tests/codex/blockexchange/discovery/testdiscoveryengine.nim @@ -62,12 +62,18 @@ asyncchecksuite "Test Discovery Engine": network, blockDiscovery, pendingBlocks, - discoveryLoopSleep = 100.millis) - wants = blocks.mapIt(pendingBlocks.getWantHandle(it.cid) ) + discoveryLoopSleep = 100.millis, + ) + wants = blocks.mapIt(pendingBlocks.getWantHandle(it.cid)) - blockDiscovery.findBlockProvidersHandler = - proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = - pendingBlocks.resolve(blocks.filterIt(it.cid == cid).mapIt(BlockDelivery(blk: it, address: it.address))) + blockDiscovery.findBlockProvidersHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = + pendingBlocks.resolve( + blocks.filterIt(it.cid == cid).mapIt( + BlockDelivery(blk: it, address: it.address) + ) + ) await discoveryEngine.start() await allFuturesThrowing(allFinished(wants)).wait(1.seconds) @@ -82,14 +88,16 @@ asyncchecksuite "Test Discovery Engine": network, blockDiscovery, pendingBlocks, - discoveryLoopSleep = 100.millis) + discoveryLoopSleep = 100.millis, + ) want = newFuture[void]() - blockDiscovery.findBlockProvidersHandler = - proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = - check cid == blocks[0].cid - if not want.finished: - want.complete() + blockDiscovery.findBlockProvidersHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = + check cid == blocks[0].cid + if not want.finished: + want.complete() await discoveryEngine.start() discoveryEngine.queueFindBlocksReq(@[blocks[0].cid]) @@ -107,23 +115,24 @@ asyncchecksuite "Test Discovery Engine": blockDiscovery, pendingBlocks, discoveryLoopSleep = 5.minutes, - minPeersPerBlock = minPeers) + minPeersPerBlock = minPeers, + ) want = newAsyncEvent() var pendingCids = newSeq[Cid]() - blockDiscovery.findBlockProvidersHandler = - proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = - check cid in pendingCids - pendingCids.keepItIf(it != cid) - check peerStore.len < minPeers - var - peerCtx = BlockExcPeerCtx(id: PeerId.example) + blockDiscovery.findBlockProvidersHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = + check cid in pendingCids + pendingCids.keepItIf(it != cid) + check peerStore.len < minPeers + var peerCtx = BlockExcPeerCtx(id: PeerId.example) - let address = BlockAddress(leaf: false, cid: cid) + let address = BlockAddress(leaf: false, cid: cid) - peerCtx.blocks[address] = Presence(address: address, price: 0.u256) - peerStore.add(peerCtx) - want.fire() + peerCtx.blocks[address] = Presence(address: address, price: 0.u256) + peerStore.add(peerCtx) + want.fire() await discoveryEngine.start() var idx = 0 @@ -148,19 +157,20 @@ asyncchecksuite "Test Discovery Engine": blockDiscovery, pendingBlocks, discoveryLoopSleep = 100.millis, - concurrentDiscReqs = 2) + concurrentDiscReqs = 2, + ) reqs = newFuture[void]() count = 0 - blockDiscovery.findBlockProvidersHandler = - proc(d: MockDiscovery, cid: Cid): - Future[seq[SignedPeerRecord]] {.gcsafe, async.} = - check cid == blocks[0].cid - if count > 0: - check false - count.inc + blockDiscovery.findBlockProvidersHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[seq[SignedPeerRecord]] {.gcsafe, async.} = + check cid == blocks[0].cid + if count > 0: + check false + count.inc - await reqs # queue the request + await reqs # queue the request await discoveryEngine.start() discoveryEngine.queueFindBlocksReq(@[blocks[0].cid]) diff --git a/tests/codex/blockexchange/engine/testadvertiser.nim b/tests/codex/blockexchange/engine/testadvertiser.nim index c1bf1c68..157564d6 100644 --- a/tests/codex/blockexchange/engine/testadvertiser.nim +++ b/tests/codex/blockexchange/engine/testadvertiser.nim @@ -22,24 +22,22 @@ asyncchecksuite "Advertiser": advertised: seq[Cid] let manifest = Manifest.new( - treeCid = Cid.example, - blockSize = 123.NBytes, - datasetSize = 234.NBytes) - manifestBlk = Block.new(data = manifest.encode().tryGet(), codec = ManifestCodec).tryGet() + treeCid = Cid.example, blockSize = 123.NBytes, datasetSize = 234.NBytes + ) + manifestBlk = + Block.new(data = manifest.encode().tryGet(), codec = ManifestCodec).tryGet() setup: blockDiscovery = MockDiscovery.new() localStore = CacheStore.new() advertised = newSeq[Cid]() - blockDiscovery.publishBlockProvideHandler = - proc(d: MockDiscovery, cid: Cid) {.async, gcsafe.} = - advertised.add(cid) + blockDiscovery.publishBlockProvideHandler = proc( + d: MockDiscovery, cid: Cid + ) {.async, gcsafe.} = + advertised.add(cid) - advertiser = Advertiser.new( - localStore, - blockDiscovery - ) + advertiser = Advertiser.new(localStore, blockDiscovery) await advertiser.start() @@ -86,14 +84,10 @@ asyncchecksuite "Advertiser": check manifest.treeCid in advertised test "Should advertise existing manifests and their trees": - let - newStore = CacheStore.new([manifestBlk]) + let newStore = CacheStore.new([manifestBlk]) await advertiser.stop() - advertiser = Advertiser.new( - newStore, - blockDiscovery - ) + advertiser = Advertiser.new(newStore, blockDiscovery) await advertiser.start() check eventually manifestBlk.cid in advertised diff --git a/tests/codex/blockexchange/engine/testblockexc.nim b/tests/codex/blockexchange/engine/testblockexc.nim index e1a2bfcf..aa15f795 100644 --- a/tests/codex/blockexchange/engine/testblockexc.nim +++ b/tests/codex/blockexchange/engine/testblockexc.nim @@ -34,16 +34,15 @@ asyncchecksuite "NetworkStore engine - 2 nodes": nodeCmps1.engine.start(), nodeCmps2.switch.start(), nodeCmps2.blockDiscovery.start(), - nodeCmps2.engine.start()) + nodeCmps2.engine.start(), + ) # initialize our want lists - pendingBlocks1 = blocks2[0..3].mapIt( - nodeCmps1.pendingBlocks.getWantHandle( it.cid ) - ) + pendingBlocks1 = + blocks2[0 .. 3].mapIt(nodeCmps1.pendingBlocks.getWantHandle(it.cid)) - pendingBlocks2 = blocks1[0..3].mapIt( - nodeCmps2.pendingBlocks.getWantHandle( it.cid ) - ) + pendingBlocks2 = + blocks1[0 .. 3].mapIt(nodeCmps2.pendingBlocks.getWantHandle(it.cid)) pricing1 = Pricing.example() pricing2 = Pricing.example() @@ -54,8 +53,8 @@ asyncchecksuite "NetworkStore engine - 2 nodes": nodeCmps2.engine.pricing = pricing2.some await nodeCmps1.switch.connect( - nodeCmps2.switch.peerInfo.peerId, - nodeCmps2.switch.peerInfo.addrs) + nodeCmps2.switch.peerInfo.peerId, nodeCmps2.switch.peerInfo.addrs + ) await sleepAsync(1.seconds) # give some time to exchange lists peerCtx2 = nodeCmps1.peerStore.get(nodeCmps2.switch.peerInfo.peerId) @@ -71,39 +70,32 @@ asyncchecksuite "NetworkStore engine - 2 nodes": nodeCmps1.switch.stop(), nodeCmps2.blockDiscovery.stop(), nodeCmps2.engine.stop(), - nodeCmps2.switch.stop()) + nodeCmps2.switch.stop(), + ) test "Should exchange blocks on connect": - await allFuturesThrowing( - allFinished(pendingBlocks1)) - .wait(10.seconds) + await allFuturesThrowing(allFinished(pendingBlocks1)).wait(10.seconds) - await allFuturesThrowing( - allFinished(pendingBlocks2)) - .wait(10.seconds) + await allFuturesThrowing(allFinished(pendingBlocks2)).wait(10.seconds) check: - (await allFinished( - blocks1[0..3].mapIt( - nodeCmps2.localStore.getBlock( it.cid ) ))) - .filterIt( it.completed and it.read.isOk ) - .mapIt( $it.read.get.cid ).sorted(cmp[string]) == - blocks1[0..3].mapIt( $it.cid ).sorted(cmp[string]) + (await allFinished(blocks1[0 .. 3].mapIt(nodeCmps2.localStore.getBlock(it.cid)))) + .filterIt(it.completed and it.read.isOk) + .mapIt($it.read.get.cid) + .sorted(cmp[string]) == blocks1[0 .. 3].mapIt($it.cid).sorted(cmp[string]) - (await allFinished( - blocks2[0..3].mapIt( - nodeCmps1.localStore.getBlock( it.cid ) ))) - .filterIt( it.completed and it.read.isOk ) - .mapIt( $it.read.get.cid ).sorted(cmp[string]) == - blocks2[0..3].mapIt( $it.cid ).sorted(cmp[string]) + (await allFinished(blocks2[0 .. 3].mapIt(nodeCmps1.localStore.getBlock(it.cid)))) + .filterIt(it.completed and it.read.isOk) + .mapIt($it.read.get.cid) + .sorted(cmp[string]) == blocks2[0 .. 3].mapIt($it.cid).sorted(cmp[string]) test "Should exchanges accounts on connect": - check peerCtx1.account.?address == pricing1.address.some - check peerCtx2.account.?address == pricing2.address.some + check peerCtx1.account .? address == pricing1.address.some + check peerCtx2.account .? address == pricing2.address.some test "Should send want-have for block": let blk = bt.Block.new("Block 1".toBytes).tryGet() - let blkFut = nodeCmps1.pendingBlocks.getWantHandle( blk.cid ) + let blkFut = nodeCmps1.pendingBlocks.getWantHandle(blk.cid) (await nodeCmps2.localStore.putBlock(blk)).tryGet() let entry = WantListEntry( @@ -111,25 +103,20 @@ asyncchecksuite "NetworkStore engine - 2 nodes": priority: 1, cancel: false, wantType: WantType.WantBlock, - sendDontHave: false) + sendDontHave: false, + ) peerCtx1.peerWants.add(entry) - check nodeCmps2 - .engine - .taskQueue - .pushOrUpdateNoWait(peerCtx1).isOk + check nodeCmps2.engine.taskQueue.pushOrUpdateNoWait(peerCtx1).isOk check eventually (await nodeCmps1.localStore.hasBlock(blk.cid)).tryGet() check eventually (await blkFut) == blk test "Should get blocks from remote": - let - blocks = await allFinished( - blocks2[4..7].mapIt( - nodeCmps1.networkStore.getBlock(it.cid) - )) + let blocks = + await allFinished(blocks2[4 .. 7].mapIt(nodeCmps1.networkStore.getBlock(it.cid))) - check blocks.mapIt( it.read().tryGet() ) == blocks2[4..7] + check blocks.mapIt(it.read().tryGet()) == blocks2[4 .. 7] test "Remote should send blocks when available": let blk = bt.Block.new("Block 1".toBytes).tryGet() @@ -142,19 +129,15 @@ asyncchecksuite "NetworkStore engine - 2 nodes": (await nodeCmps2.networkStore.putBlock(blk)).tryGet() # should succeed retrieving block from remote - check await nodeCmps1.networkStore.getBlock(blk.cid) - .withTimeout(100.millis) # should succeed + check await nodeCmps1.networkStore.getBlock(blk.cid).withTimeout(100.millis) + # should succeed test "Should receive payments for blocks that were sent": - discard await allFinished( - blocks2[4..7].mapIt( - nodeCmps2.networkStore.putBlock(it) - )) + discard + await allFinished(blocks2[4 .. 7].mapIt(nodeCmps2.networkStore.putBlock(it))) - discard await allFinished( - blocks2[4..7].mapIt( - nodeCmps1.networkStore.getBlock(it.cid) - )) + discard + await allFinished(blocks2[4 .. 7].mapIt(nodeCmps1.networkStore.getBlock(it.cid))) let channel = !peerCtx1.paymentChannel @@ -173,14 +156,10 @@ asyncchecksuite "NetworkStore - multiple nodes": for e in nodes: await e.engine.start() - await allFuturesThrowing( - nodes.mapIt( it.switch.start() ) - ) + await allFuturesThrowing(nodes.mapIt(it.switch.start())) teardown: - await allFuturesThrowing( - nodes.mapIt( it.switch.stop() ) - ) + await allFuturesThrowing(nodes.mapIt(it.switch.stop())) nodes = @[] @@ -191,34 +170,23 @@ asyncchecksuite "NetworkStore - multiple nodes": # Add blocks from 1st peer to want list let - downloadCids = - blocks[0..3].mapIt( - it.cid - ) & - blocks[12..15].mapIt( - it.cid - ) + downloadCids = blocks[0 .. 3].mapIt(it.cid) & blocks[12 .. 15].mapIt(it.cid) - pendingBlocks = downloadCids.mapIt( - engine.pendingBlocks.getWantHandle( it ) - ) + pendingBlocks = downloadCids.mapIt(engine.pendingBlocks.getWantHandle(it)) - for i in 0..15: + for i in 0 .. 15: (await nodes[i div 4].networkStore.engine.localStore.putBlock(blocks[i])).tryGet() await connectNodes(nodes) await sleepAsync(1.seconds) - await allFuturesThrowing( - allFinished(pendingBlocks)) + await allFuturesThrowing(allFinished(pendingBlocks)) check: - (await allFinished( - downloadCids.mapIt( - downloader.localStore.getBlock( it ) ))) - .filterIt( it.completed and it.read.isOk ) - .mapIt( $it.read.get.cid ).sorted(cmp[string]) == - downloadCids.mapIt( $it ).sorted(cmp[string]) + (await allFinished(downloadCids.mapIt(downloader.localStore.getBlock(it)))) + .filterIt(it.completed and it.read.isOk) + .mapIt($it.read.get.cid) + .sorted(cmp[string]) == downloadCids.mapIt($it).sorted(cmp[string]) test "Should exchange blocks with multiple nodes": let @@ -227,25 +195,20 @@ asyncchecksuite "NetworkStore - multiple nodes": # Add blocks from 1st peer to want list let - pendingBlocks1 = blocks[0..3].mapIt( - engine.pendingBlocks.getWantHandle( it.cid ) - ) - pendingBlocks2 = blocks[12..15].mapIt( - engine.pendingBlocks.getWantHandle( it.cid ) - ) + pendingBlocks1 = blocks[0 .. 3].mapIt(engine.pendingBlocks.getWantHandle(it.cid)) + pendingBlocks2 = + blocks[12 .. 15].mapIt(engine.pendingBlocks.getWantHandle(it.cid)) - for i in 0..15: + for i in 0 .. 15: (await nodes[i div 4].networkStore.engine.localStore.putBlock(blocks[i])).tryGet() await connectNodes(nodes) await sleepAsync(1.seconds) - await allFuturesThrowing( - allFinished(pendingBlocks1), - allFinished(pendingBlocks2)) + await allFuturesThrowing(allFinished(pendingBlocks1), allFinished(pendingBlocks2)) - check pendingBlocks1.mapIt( it.read ) == blocks[0..3] - check pendingBlocks2.mapIt( it.read ) == blocks[12..15] + check pendingBlocks1.mapIt(it.read) == blocks[0 .. 3] + check pendingBlocks2.mapIt(it.read) == blocks[12 .. 15] test "Should actively cancel want-haves if block received from elsewhere": let @@ -265,13 +228,9 @@ asyncchecksuite "NetworkStore - multiple nodes": # ... and bystander learns that downloader wants it, but can't provide it. check eventually( - bystander - .engine - .peers - .get(downloader.switch.peerInfo.peerId) - .peerWants - .filterIt( it.address == aBlock.address ) - .len == 1 + bystander.engine.peers + .get(downloader.switch.peerInfo.peerId).peerWants + .filterIt(it.address == aBlock.address).len == 1 ) # As soon as we connect the downloader to the blockHolder, the block should @@ -282,11 +241,7 @@ asyncchecksuite "NetworkStore - multiple nodes": # ... and the bystander should have cancelled the want-have check eventually( - bystander - .engine - .peers - .get(downloader.switch.peerInfo.peerId) - .peerWants - .filterIt( it.address == aBlock.address ) - .len == 0 + bystander.engine.peers + .get(downloader.switch.peerInfo.peerId).peerWants + .filterIt(it.address == aBlock.address).len == 0 ) diff --git a/tests/codex/blockexchange/engine/testengine.nim b/tests/codex/blockexchange/engine/testengine.nim index d97016d5..f7cc8294 100644 --- a/tests/codex/blockexchange/engine/testengine.nim +++ b/tests/codex/blockexchange/engine/testengine.nim @@ -54,42 +54,30 @@ asyncchecksuite "NetworkStore engine basic": test "Should send want list to new peers": proc sendWantList( - id: PeerId, - addresses: seq[BlockAddress], - priority: int32 = 0, - cancel: bool = false, - wantType: WantType = WantType.WantHave, - full: bool = false, - sendDontHave: bool = false) {.gcsafe, async.} = - check addresses.mapIt($it.cidOrTreeCid).sorted == blocks.mapIt( $it.cid ).sorted - done.complete() + id: PeerId, + addresses: seq[BlockAddress], + priority: int32 = 0, + cancel: bool = false, + wantType: WantType = WantType.WantHave, + full: bool = false, + sendDontHave: bool = false, + ) {.gcsafe, async.} = + check addresses.mapIt($it.cidOrTreeCid).sorted == blocks.mapIt($it.cid).sorted + done.complete() let - network = BlockExcNetwork(request: BlockExcRequest( - sendWantList: sendWantList, - )) + network = BlockExcNetwork(request: BlockExcRequest(sendWantList: sendWantList)) - localStore = CacheStore.new(blocks.mapIt( it )) + localStore = CacheStore.new(blocks.mapIt(it)) discovery = DiscoveryEngine.new( - localStore, - peerStore, - network, - blockDiscovery, - pendingBlocks) - - advertiser = Advertiser.new( - localStore, - blockDiscovery + localStore, peerStore, network, blockDiscovery, pendingBlocks ) + advertiser = Advertiser.new(localStore, blockDiscovery) + engine = BlockExcEngine.new( - localStore, - wallet, - network, - discovery, - advertiser, - peerStore, - pendingBlocks) + localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks + ) for b in blocks: discard engine.pendingBlocks.getWantHandle(b.cid) @@ -105,32 +93,18 @@ asyncchecksuite "NetworkStore engine basic": done.complete() let - network = BlockExcNetwork( - request: BlockExcRequest( - sendAccount: sendAccount - )) + network = BlockExcNetwork(request: BlockExcRequest(sendAccount: sendAccount)) localStore = CacheStore.new() discovery = DiscoveryEngine.new( - localStore, - peerStore, - network, - blockDiscovery, - pendingBlocks) - - advertiser = Advertiser.new( - localStore, - blockDiscovery + localStore, peerStore, network, blockDiscovery, pendingBlocks ) + advertiser = Advertiser.new(localStore, blockDiscovery) + engine = BlockExcEngine.new( - localStore, - wallet, - network, - discovery, - advertiser, - peerStore, - pendingBlocks) + localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks + ) engine.pricing = pricing.some await engine.setupPeer(peerId) @@ -156,9 +130,9 @@ asyncchecksuite "NetworkStore engine handlers": blocks: seq[Block] const NopSendWantCancellationsProc = proc( - id: PeerId, - addresses: seq[BlockAddress] - ) {.gcsafe, async.} = discard + id: PeerId, addresses: seq[BlockAddress] + ) {.gcsafe, async.} = + discard setup: rng = Rng.instance() @@ -181,43 +155,27 @@ asyncchecksuite "NetworkStore engine handlers": localStore = CacheStore.new() network = BlockExcNetwork() - discovery = DiscoveryEngine.new( - localStore, - peerStore, - network, - blockDiscovery, - pendingBlocks) + discovery = + DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks) - advertiser = Advertiser.new( - localStore, - blockDiscovery - ) + advertiser = Advertiser.new(localStore, blockDiscovery) engine = BlockExcEngine.new( - localStore, - wallet, - network, - discovery, - advertiser, - peerStore, - pendingBlocks) - - peerCtx = BlockExcPeerCtx( - id: peerId + localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks ) + + peerCtx = BlockExcPeerCtx(id: peerId) engine.peers.add(peerCtx) test "Should schedule block requests": - let - wantList = makeWantList( - blocks.mapIt( it.cid ), - wantType = WantType.WantBlock) # only `wantBlock` are stored in `peerWants` + let wantList = makeWantList(blocks.mapIt(it.cid), wantType = WantType.WantBlock) + # only `wantBlock` are stored in `peerWants` proc handler() {.async.} = let ctx = await engine.taskQueue.pop() check ctx.id == peerId # only `wantBlock` scheduled - check ctx.peerWants.mapIt( it.address.cidOrTreeCid ) == blocks.mapIt( it.cid ) + check ctx.peerWants.mapIt(it.address.cidOrTreeCid) == blocks.mapIt(it.cid) let done = handler() await engine.wantListHandler(peerId, wantList) @@ -226,19 +184,16 @@ asyncchecksuite "NetworkStore engine handlers": test "Should handle want list": let done = newFuture[void]() - wantList = makeWantList(blocks.mapIt( it.cid )) + wantList = makeWantList(blocks.mapIt(it.cid)) proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = - check presence.mapIt( it.address ) == wantList.entries.mapIt( it.address ) + check presence.mapIt(it.address) == wantList.entries.mapIt(it.address) done.complete() - engine.network = BlockExcNetwork( - request: BlockExcRequest( - sendPresence: sendPresence - )) + engine.network = + BlockExcNetwork(request: BlockExcRequest(sendPresence: sendPresence)) - await allFuturesThrowing( - allFinished(blocks.mapIt( localStore.putBlock(it) ))) + await allFuturesThrowing(allFinished(blocks.mapIt(localStore.putBlock(it)))) await engine.wantListHandler(peerId, wantList) await done @@ -246,21 +201,18 @@ asyncchecksuite "NetworkStore engine handlers": test "Should handle want list - `dont-have`": let done = newFuture[void]() - wantList = makeWantList( - blocks.mapIt( it.cid ), - sendDontHave = true) + wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true) proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = - check presence.mapIt( it.address ) == wantList.entries.mapIt( it.address ) + check presence.mapIt(it.address) == wantList.entries.mapIt(it.address) for p in presence: check: p.`type` == BlockPresenceType.DontHave done.complete() - engine.network = BlockExcNetwork(request: BlockExcRequest( - sendPresence: sendPresence - )) + engine.network = + BlockExcNetwork(request: BlockExcRequest(sendPresence: sendPresence)) await engine.wantListHandler(peerId, wantList) await done @@ -268,23 +220,20 @@ asyncchecksuite "NetworkStore engine handlers": test "Should handle want list - `dont-have` some blocks": let done = newFuture[void]() - wantList = makeWantList( - blocks.mapIt( it.cid ), - sendDontHave = true) + wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true) proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = for p in presence: - if p.address.cidOrTreeCid != blocks[0].cid and p.address.cidOrTreeCid != blocks[1].cid: + if p.address.cidOrTreeCid != blocks[0].cid and + p.address.cidOrTreeCid != blocks[1].cid: check p.`type` == BlockPresenceType.DontHave else: check p.`type` == BlockPresenceType.Have done.complete() - engine.network = BlockExcNetwork( - request: BlockExcRequest( - sendPresence: sendPresence - )) + engine.network = + BlockExcNetwork(request: BlockExcRequest(sendPresence: sendPresence)) (await engine.localStore.putBlock(blocks[0])).tryGet() (await engine.localStore.putBlock(blocks[1])).tryGet() @@ -293,19 +242,18 @@ asyncchecksuite "NetworkStore engine handlers": await done test "Should store blocks in local store": - let pending = blocks.mapIt( - engine.pendingBlocks.getWantHandle( it.cid ) - ) + let pending = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid)) let blocksDelivery = blocks.mapIt(BlockDelivery(blk: it, address: it.address)) # Install NOP for want list cancellations so they don't cause a crash engine.network = BlockExcNetwork( - request: BlockExcRequest(sendWantCancellations: NopSendWantCancellationsProc)) + request: BlockExcRequest(sendWantCancellations: NopSendWantCancellationsProc) + ) await engine.blocksDeliveryHandler(peerId, blocksDelivery) let resolved = await allFinished(pending) - check resolved.mapIt( it.read ) == blocks + check resolved.mapIt(it.read) == blocks for b in blocks: let present = await engine.localStore.hasBlock(b.cid) check present.tryGet() @@ -325,10 +273,7 @@ asyncchecksuite "NetworkStore engine handlers": request: BlockExcRequest( sendPayment: proc(receiver: PeerId, payment: SignedState) {.gcsafe, async.} = let - amount = - blocks.mapIt( - peerContext.blocks[it.address].price - ).foldl(a + b) + amount = blocks.mapIt(peerContext.blocks[it.address].price).foldl(a + b) balances = !payment.state.outcome.balances(Asset) @@ -337,48 +282,46 @@ asyncchecksuite "NetworkStore engine handlers": done.complete(), # Install NOP for want list cancellations so they don't cause a crash - sendWantCancellations: NopSendWantCancellationsProc - )) + sendWantCancellations: NopSendWantCancellationsProc, + ) + ) - await engine.blocksDeliveryHandler(peerId, blocks.mapIt( - BlockDelivery(blk: it, address: it.address))) + await engine.blocksDeliveryHandler( + peerId, blocks.mapIt(BlockDelivery(blk: it, address: it.address)) + ) await done.wait(100.millis) test "Should handle block presence": - var - handles: Table[Cid, Future[Block]] + var handles: Table[Cid, Future[Block]] proc sendWantList( - id: PeerId, - addresses: seq[BlockAddress], - priority: int32 = 0, - cancel: bool = false, - wantType: WantType = WantType.WantHave, - full: bool = false, - sendDontHave: bool = false) {.gcsafe, async.} = - engine.pendingBlocks.resolve(blocks - .filterIt( it.address in addresses ) - .mapIt(BlockDelivery(blk: it, address: it.address))) + id: PeerId, + addresses: seq[BlockAddress], + priority: int32 = 0, + cancel: bool = false, + wantType: WantType = WantType.WantHave, + full: bool = false, + sendDontHave: bool = false, + ) {.gcsafe, async.} = + engine.pendingBlocks.resolve( + blocks.filterIt(it.address in addresses).mapIt( + BlockDelivery(blk: it, address: it.address) + ) + ) - engine.network = BlockExcNetwork( - request: BlockExcRequest( - sendWantList: sendWantList - )) + engine.network = + BlockExcNetwork(request: BlockExcRequest(sendWantList: sendWantList)) # only Cids in peer want lists are requested - handles = blocks.mapIt( - (it.cid, engine.pendingBlocks.getWantHandle( it.cid ))).toTable + handles = blocks.mapIt((it.cid, engine.pendingBlocks.getWantHandle(it.cid))).toTable let price = UInt256.example await engine.blockPresenceHandler( peerId, blocks.mapIt( - PresenceMessage.init( - Presence( - address: it.address, - have: true, - price: price - )))) + PresenceMessage.init(Presence(address: it.address, have: true, price: price)) + ), + ) for a in blocks.mapIt(it.address): check a in peerCtx.peerHave @@ -388,21 +331,17 @@ asyncchecksuite "NetworkStore engine handlers": let pending = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid)) blocksDelivery = blocks.mapIt(BlockDelivery(blk: it, address: it.address)) - cancellations = newTable( - blocks.mapIt((it.address, newFuture[void]())).toSeq - ) + cancellations = newTable(blocks.mapIt((it.address, newFuture[void]())).toSeq) proc sendWantCancellations( - id: PeerId, - addresses: seq[BlockAddress] + id: PeerId, addresses: seq[BlockAddress] ) {.gcsafe, async.} = - for address in addresses: - cancellations[address].complete() + for address in addresses: + cancellations[address].complete() engine.network = BlockExcNetwork( - request: BlockExcRequest( - sendWantCancellations: sendWantCancellations - )) + request: BlockExcRequest(sendWantCancellations: sendWantCancellations) + ) await engine.blocksDeliveryHandler(peerId, blocksDelivery) discard await allFinished(pending) @@ -448,43 +387,29 @@ asyncchecksuite "Task Handler": localStore = CacheStore.new() network = BlockExcNetwork() - discovery = DiscoveryEngine.new( - localStore, - peerStore, - network, - blockDiscovery, - pendingBlocks) + discovery = + DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks) - advertiser = Advertiser.new( - localStore, - blockDiscovery - ) + advertiser = Advertiser.new(localStore, blockDiscovery) engine = BlockExcEngine.new( - localStore, - wallet, - network, - discovery, - advertiser, - peerStore, - pendingBlocks) + localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks + ) peersCtx = @[] - for i in 0..3: + for i in 0 .. 3: let seckey = PrivateKey.random(rng[]).tryGet() peers.add(PeerId.init(seckey.getPublicKey().tryGet()).tryGet()) - peersCtx.add(BlockExcPeerCtx( - id: peers[i] - )) + peersCtx.add(BlockExcPeerCtx(id: peers[i])) peerStore.add(peersCtx[i]) engine.pricing = Pricing.example.some test "Should send want-blocks in priority order": proc sendBlocksDelivery( - id: PeerId, - blocksDelivery: seq[BlockDelivery]) {.gcsafe, async.} = + id: PeerId, blocksDelivery: seq[BlockDelivery] + ) {.gcsafe, async.} = check blocksDelivery.len == 2 check: blocksDelivery[1].address == blocks[0].address @@ -501,7 +426,8 @@ asyncchecksuite "Task Handler": priority: 49, cancel: false, wantType: WantType.WantBlock, - sendDontHave: false) + sendDontHave: false, + ) ) # first block to send by priority @@ -511,39 +437,44 @@ asyncchecksuite "Task Handler": priority: 50, cancel: false, wantType: WantType.WantBlock, - sendDontHave: false) + sendDontHave: false, + ) ) await engine.taskHandler(peersCtx[0]) test "Should set in-flight for outgoing blocks": proc sendBlocksDelivery( - id: PeerId, - blocksDelivery: seq[BlockDelivery]) {.gcsafe, async.} = + id: PeerId, blocksDelivery: seq[BlockDelivery] + ) {.gcsafe, async.} = check peersCtx[0].peerWants[0].inFlight for blk in blocks: (await engine.localStore.putBlock(blk)).tryGet() engine.network.request.sendBlocksDelivery = sendBlocksDelivery - peersCtx[0].peerWants.add(WantListEntry( - address: blocks[0].address, - priority: 50, - cancel: false, - wantType: WantType.WantBlock, - sendDontHave: false, - inFlight: false) + peersCtx[0].peerWants.add( + WantListEntry( + address: blocks[0].address, + priority: 50, + cancel: false, + wantType: WantType.WantBlock, + sendDontHave: false, + inFlight: false, + ) ) await engine.taskHandler(peersCtx[0]) test "Should clear in-flight when local lookup fails": - peersCtx[0].peerWants.add(WantListEntry( - address: blocks[0].address, - priority: 50, - cancel: false, - wantType: WantType.WantBlock, - sendDontHave: false, - inFlight: false) + peersCtx[0].peerWants.add( + WantListEntry( + address: blocks[0].address, + priority: 50, + cancel: false, + wantType: WantType.WantBlock, + sendDontHave: false, + inFlight: false, + ) ) await engine.taskHandler(peersCtx[0]) @@ -555,11 +486,12 @@ asyncchecksuite "Task Handler": let price = (!engine.pricing).price proc sendPresence(id: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = - check presence.mapIt(!Presence.init(it)) == @[ - Presence(address: present[0].address, have: true, price: price), - Presence(address: present[1].address, have: true, price: price), - Presence(address: missing[0].address, have: false) - ] + check presence.mapIt(!Presence.init(it)) == + @[ + Presence(address: present[0].address, have: true, price: price), + Presence(address: present[1].address, have: true, price: price), + Presence(address: missing[0].address, have: false), + ] for blk in blocks: (await engine.localStore.putBlock(blk)).tryGet() @@ -572,7 +504,8 @@ asyncchecksuite "Task Handler": priority: 1, cancel: false, wantType: WantType.WantHave, - sendDontHave: false) + sendDontHave: false, + ) ) # have block @@ -582,7 +515,8 @@ asyncchecksuite "Task Handler": priority: 1, cancel: false, wantType: WantType.WantHave, - sendDontHave: false) + sendDontHave: false, + ) ) # don't have block @@ -592,7 +526,8 @@ asyncchecksuite "Task Handler": priority: 1, cancel: false, wantType: WantType.WantHave, - sendDontHave: false) + sendDontHave: false, + ) ) await engine.taskHandler(peersCtx[0]) diff --git a/tests/codex/blockexchange/engine/testpayments.nim b/tests/codex/blockexchange/engine/testpayments.nim index 03c08e09..24d5dab6 100644 --- a/tests/codex/blockexchange/engine/testpayments.nim +++ b/tests/codex/blockexchange/engine/testpayments.nim @@ -5,7 +5,6 @@ import ../../examples import ../../helpers checksuite "engine payments": - let address = EthAddress.example let amount = 42.u256 @@ -29,6 +28,6 @@ checksuite "engine payments": test "uses same channel for consecutive payments": let payment1, payment2 = wallet.pay(peer, amount) - let channel1 = payment1.?state.?channel.?getChannelId - let channel2 = payment2.?state.?channel.?getChannelId + let channel1 = payment1 .? state .? channel .? getChannelId + let channel2 = payment2 .? state .? channel .? getChannelId check channel1 == channel2 diff --git a/tests/codex/blockexchange/protobuf/testpayments.nim b/tests/codex/blockexchange/protobuf/testpayments.nim index 81bc5dfc..d0773d70 100644 --- a/tests/codex/blockexchange/protobuf/testpayments.nim +++ b/tests/codex/blockexchange/protobuf/testpayments.nim @@ -6,9 +6,7 @@ import ../../../asynctest import ../../examples import ../../helpers - checksuite "account protobuf messages": - let account = Account(address: EthAddress.example) let message = AccountMessage.init(account) @@ -16,7 +14,7 @@ checksuite "account protobuf messages": check message.address == @(account.address.toArray) test "decodes recipient of payments": - check Account.init(message).?address == account.address.some + check Account.init(message) .? address == account.address.some test "fails to decode when address has incorrect number of bytes": var incorrect = message @@ -24,7 +22,6 @@ checksuite "account protobuf messages": check Account.init(incorrect).isNone checksuite "channel update messages": - let state = SignedState.example let update = StateChannelUpdate.init(state) diff --git a/tests/codex/blockexchange/protobuf/testpresence.nim b/tests/codex/blockexchange/protobuf/testpresence.nim index 963dd0ec..7e3b94e6 100644 --- a/tests/codex/blockexchange/protobuf/testpresence.nim +++ b/tests/codex/blockexchange/protobuf/testpresence.nim @@ -7,7 +7,6 @@ import ../../examples import ../../helpers checksuite "block presence protobuf messages": - let cid = Cid.example address = BlockAddress(leaf: false, cid: cid) @@ -26,17 +25,17 @@ checksuite "block presence protobuf messages": check message.price == @(price.toBytesBE) test "decodes CID": - check Presence.init(message).?address == address.some + check Presence.init(message) .? address == address.some test "decodes have/donthave": var message = message message.`type` = BlockPresenceType.Have - check Presence.init(message).?have == true.some + check Presence.init(message) .? have == true.some message.`type` = BlockPresenceType.DontHave - check Presence.init(message).?have == false.some + check Presence.init(message) .? have == false.some test "decodes price": - check Presence.init(message).?price == price.some + check Presence.init(message) .? price == price.some test "fails to decode when price is invalid": var incorrect = message diff --git a/tests/codex/blockexchange/testnetwork.nim b/tests/codex/blockexchange/testnetwork.nim index 756d86a1..0fae4ffe 100644 --- a/tests/codex/blockexchange/testnetwork.nim +++ b/tests/codex/blockexchange/testnetwork.nim @@ -39,9 +39,7 @@ asyncchecksuite "Network - Handlers": done = newFuture[void]() buffer = BufferStream.new() - network = BlockExcNetwork.new( - switch = newStandardSwitch(), - connProvider = getConn) + network = BlockExcNetwork.new(switch = newStandardSwitch(), connProvider = getConn) network.setupPeer(peerId) networkPeer = network.peers[peerId] discard await networkPeer.connect() @@ -63,10 +61,8 @@ asyncchecksuite "Network - Handlers": network.handlers.onWantList = wantListHandler - let wantList = makeWantList( - blocks.mapIt( it.cid ), - 1, true, WantType.WantHave, - true, true) + let wantList = + makeWantList(blocks.mapIt(it.cid), 1, true, WantType.WantHave, true, true) let msg = Message(wantlist: wantList) await buffer.pushData(lenPrefix(protobufEncode(msg))) @@ -74,21 +70,22 @@ asyncchecksuite "Network - Handlers": await done.wait(500.millis) test "Blocks Handler": - proc blocksDeliveryHandler(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {.gcsafe, async.} = + proc blocksDeliveryHandler( + peer: PeerId, blocksDelivery: seq[BlockDelivery] + ) {.gcsafe, async.} = check blocks == blocksDelivery.mapIt(it.blk) done.complete() network.handlers.onBlocksDelivery = blocksDeliveryHandler - let msg = Message(payload: blocks.mapIt(BlockDelivery(blk: it, address: it.address))) + let msg = + Message(payload: blocks.mapIt(BlockDelivery(blk: it, address: it.address))) await buffer.pushData(lenPrefix(protobufEncode(msg))) await done.wait(500.millis) test "Presence Handler": - proc presenceHandler( - peer: PeerId, - presence: seq[BlockPresence]) {.gcsafe, async.} = + proc presenceHandler(peer: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = for b in blocks: check: b.address in presence @@ -98,11 +95,9 @@ asyncchecksuite "Network - Handlers": network.handlers.onPresence = presenceHandler let msg = Message( - blockPresences: blocks.mapIt( - BlockPresence( - address: it.address, - type: BlockPresenceType.Have - ))) + blockPresences: + blocks.mapIt(BlockPresence(address: it.address, type: BlockPresenceType.Have)) + ) await buffer.pushData(lenPrefix(protobufEncode(msg))) await done.wait(500.millis) @@ -136,8 +131,7 @@ asyncchecksuite "Network - Handlers": await done.wait(100.millis) asyncchecksuite "Network - Senders": - let - chunker = RandomChunker.new(Rng.instance(), size = 1024, chunkSize = 256) + let chunker = RandomChunker.new(Rng.instance(), size = 1024, chunkSize = 256) var switch1, switch2: Switch @@ -156,25 +150,19 @@ asyncchecksuite "Network - Senders": done = newFuture[void]() switch1 = newStandardSwitch() switch2 = newStandardSwitch() - network1 = BlockExcNetwork.new( - switch = switch1) + network1 = BlockExcNetwork.new(switch = switch1) switch1.mount(network1) - network2 = BlockExcNetwork.new( - switch = switch2) + network2 = BlockExcNetwork.new(switch = switch2) switch2.mount(network2) await switch1.start() await switch2.start() - await switch1.connect( - switch2.peerInfo.peerId, - switch2.peerInfo.addrs) + await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs) teardown: - await allFuturesThrowing( - switch1.stop(), - switch2.stop()) + await allFuturesThrowing(switch1.stop(), switch2.stop()) test "Send want list": proc wantListHandler(peer: PeerId, wantList: WantList) {.gcsafe, async.} = @@ -194,28 +182,32 @@ asyncchecksuite "Network - Senders": network2.handlers.onWantList = wantListHandler await network1.sendWantList( switch2.peerInfo.peerId, - blocks.mapIt( it.address ), - 1, true, WantType.WantHave, - true, true) + blocks.mapIt(it.address), + 1, + true, + WantType.WantHave, + true, + true, + ) await done.wait(500.millis) test "send blocks": - proc blocksDeliveryHandler(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {.gcsafe, async.} = + proc blocksDeliveryHandler( + peer: PeerId, blocksDelivery: seq[BlockDelivery] + ) {.gcsafe, async.} = check blocks == blocksDelivery.mapIt(it.blk) done.complete() network2.handlers.onBlocksDelivery = blocksDeliveryHandler await network1.sendBlocksDelivery( - switch2.peerInfo.peerId, - blocks.mapIt(BlockDelivery(blk: it, address: it.address))) + switch2.peerInfo.peerId, blocks.mapIt(BlockDelivery(blk: it, address: it.address)) + ) await done.wait(500.millis) test "send presence": - proc presenceHandler( - peer: PeerId, - precense: seq[BlockPresence]) {.gcsafe, async.} = + proc presenceHandler(peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async.} = for b in blocks: check: b.address in precense @@ -226,11 +218,8 @@ asyncchecksuite "Network - Senders": await network1.sendBlockPresence( switch2.peerInfo.peerId, - blocks.mapIt( - BlockPresence( - address: it.address, - type: BlockPresenceType.Have - ))) + blocks.mapIt(BlockPresence(address: it.address, type: BlockPresenceType.Have)), + ) await done.wait(500.millis) @@ -269,36 +258,30 @@ asyncchecksuite "Network - Test Limits": switch1 = newStandardSwitch() switch2 = newStandardSwitch() - network1 = BlockExcNetwork.new( - switch = switch1, - maxInflight = 0) + network1 = BlockExcNetwork.new(switch = switch1, maxInflight = 0) switch1.mount(network1) - network2 = BlockExcNetwork.new( - switch = switch2) + network2 = BlockExcNetwork.new(switch = switch2) switch2.mount(network2) await switch1.start() await switch2.start() - await switch1.connect( - switch2.peerInfo.peerId, - switch2.peerInfo.addrs) + await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs) teardown: - await allFuturesThrowing( - switch1.stop(), - switch2.stop()) + await allFuturesThrowing(switch1.stop(), switch2.stop()) test "Concurrent Sends": let account = Account(address: EthAddress.example) - network2.handlers.onAccount = - proc(peer: PeerId, received: Account) {.gcsafe, async.} = - check false + network2.handlers.onAccount = proc( + peer: PeerId, received: Account + ) {.gcsafe, async.} = + check false let fut = network1.send( - switch2.peerInfo.peerId, - Message(account: AccountMessage.init(account))) + switch2.peerInfo.peerId, Message(account: AccountMessage.init(account)) + ) await sleepAsync(100.millis) check not fut.finished diff --git a/tests/codex/blockexchange/testpeerctxstore.nim b/tests/codex/blockexchange/testpeerctxstore.nim index dc77fbd8..6ea601d1 100644 --- a/tests/codex/blockexchange/testpeerctxstore.nim +++ b/tests/codex/blockexchange/testpeerctxstore.nim @@ -40,10 +40,12 @@ checksuite "Peer Context Store Peer Selection": setup: store = PeerCtxStore.new() addresses = collect(newSeq): - for i in 0..<10: BlockAddress(leaf: false, cid: Cid.example) + for i in 0 ..< 10: + BlockAddress(leaf: false, cid: Cid.example) peerCtxs = collect(newSeq): - for i in 0..<10: BlockExcPeerCtx.example + for i in 0 ..< 10: + BlockExcPeerCtx.example for p in peerCtxs: store.add(p) @@ -56,34 +58,33 @@ checksuite "Peer Context Store Peer Selection": test "Should select peers that have Cid": peerCtxs[0].blocks = collect(initTable): for i, a in addresses: - { a: Presence(address: a, price: i.u256) } + {a: Presence(address: a, price: i.u256)} peerCtxs[5].blocks = collect(initTable): for i, a in addresses: - { a: Presence(address: a, price: i.u256) } + {a: Presence(address: a, price: i.u256)} - let - peers = store.peersHave(addresses[0]) + let peers = store.peersHave(addresses[0]) check peers.len == 2 check peerCtxs[0] in peers check peerCtxs[5] in peers test "Should select peers that want Cid": - let - entries = addresses.mapIt( - WantListEntry( - address: it, - priority: 1, - cancel: false, - wantType: WantType.WantBlock, - sendDontHave: false)) + let entries = addresses.mapIt( + WantListEntry( + address: it, + priority: 1, + cancel: false, + wantType: WantType.WantBlock, + sendDontHave: false, + ) + ) peerCtxs[0].peerWants = entries peerCtxs[5].peerWants = entries - let - peers = store.peersWant(addresses[4]) + let peers = store.peersWant(addresses[4]) check peers.len == 2 check peerCtxs[0] in peers diff --git a/tests/codex/blockexchange/testpendingblocks.nim b/tests/codex/blockexchange/testpendingblocks.nim index dd94c4da..45b065c0 100644 --- a/tests/codex/blockexchange/testpendingblocks.nim +++ b/tests/codex/blockexchange/testpendingblocks.nim @@ -58,24 +58,24 @@ checksuite "Pending Blocks": test "Should get wants list": let pendingBlocks = PendingBlocksManager.new() - blks = (0..9).mapIt( bt.Block.new(("Hello " & $it).toBytes).tryGet ) + blks = (0 .. 9).mapIt(bt.Block.new(("Hello " & $it).toBytes).tryGet) - discard blks.mapIt( pendingBlocks.getWantHandle( it.cid ) ) + discard blks.mapIt(pendingBlocks.getWantHandle(it.cid)) check: - blks.mapIt( $it.cid ).sorted(cmp[string]) == - toSeq(pendingBlocks.wantListBlockCids).mapIt( $it ).sorted(cmp[string]) + blks.mapIt($it.cid).sorted(cmp[string]) == + toSeq(pendingBlocks.wantListBlockCids).mapIt($it).sorted(cmp[string]) test "Should get want handles list": let pendingBlocks = PendingBlocksManager.new() - blks = (0..9).mapIt( bt.Block.new(("Hello " & $it).toBytes).tryGet ) - handles = blks.mapIt( pendingBlocks.getWantHandle( it.cid ) ) + blks = (0 .. 9).mapIt(bt.Block.new(("Hello " & $it).toBytes).tryGet) + handles = blks.mapIt(pendingBlocks.getWantHandle(it.cid)) wantHandles = toSeq(pendingBlocks.wantHandles) check wantHandles.len == handles.len pendingBlocks.resolve(blks.mapIt(BlockDelivery(blk: it, address: it.address))) check: - (await allFinished(wantHandles)).mapIt( $it.read.cid ).sorted(cmp[string]) == - (await allFinished(handles)).mapIt( $it.read.cid ).sorted(cmp[string]) + (await allFinished(wantHandles)).mapIt($it.read.cid).sorted(cmp[string]) == + (await allFinished(handles)).mapIt($it.read.cid).sorted(cmp[string]) diff --git a/tests/codex/examples.nim b/tests/codex/examples.nim index 2e68d236..69a85db8 100644 --- a/tests/codex/examples.nim +++ b/tests/codex/examples.nim @@ -34,10 +34,7 @@ proc example*(_: type SignedState): SignedState = wallet.pay(channel, asset, receiver, amount).get proc example*(_: type Pricing): Pricing = - Pricing( - address: EthAddress.example, - price: uint32.rand.u256 - ) + Pricing(address: EthAddress.example, price: uint32.rand.u256) proc example*(_: type bt.Block): bt.Block = let length = rand(4096) @@ -58,20 +55,23 @@ proc example*(_: type MultiHash, mcodec = Sha256HashCodec): MultiHash = let bytes = newSeqWith(256, rand(uint8)) MultiHash.digest($mcodec, bytes).tryGet() -proc example*(_: type Availability): Availability = +proc example*( + _: type Availability, collateralPerByte = uint8.example.u256 +): Availability = + let totalSize = uint16.example.u256 Availability.init( - totalSize = uint16.example.u256, + totalSize = totalSize, freeSize = uint16.example.u256, duration = uint16.example.u256, - minPrice = uint64.example.u256, - maxCollateral = uint16.example.u256 + minPricePerBytePerSecond = uint8.example.u256, + totalCollateral = totalSize * collateralPerByte, ) proc example*(_: type Reservation): Reservation = Reservation.init( availabilityId = AvailabilityId(array[32, byte].example), size = uint16.example.u256, - slotId = SlotId.example + slotId = SlotId.example, ) proc example*(_: type MerkleProof): MerkleProof = @@ -80,5 +80,5 @@ proc example*(_: type MerkleProof): MerkleProof = proc example*(_: type Poseidon2Proof): Poseidon2Proof = var example = MerkleProof[Poseidon2Hash, PoseidonKeysEnum]() example.index = 123 - example.path = @[1, 2, 3, 4].mapIt( it.toF ) + example.path = @[1, 2, 3, 4].mapIt(it.toF) example diff --git a/tests/codex/helpers.nim b/tests/codex/helpers.nim index 89aeafd1..6d7415d3 100644 --- a/tests/codex/helpers.nim +++ b/tests/codex/helpers.nim @@ -41,25 +41,25 @@ proc lenPrefix*(msg: openArray[byte]): seq[byte] = let vbytes = PB.toBytes(msg.len().uint64) var buf = newSeqUninitialized[byte](msg.len() + vbytes.len) - buf[0.. 0): - + while (let chunk = await chunker.getBytes(); chunk.len > 0): let blk = Block.new(chunk).tryGet() cids.add(blk.cid) (await store.putBlock(blk)).tryGet() @@ -98,19 +100,20 @@ proc storeDataGetManifest*(store: BlockStore, chunker: Chunker): Future[Manifest manifest = Manifest.new( treeCid = treeCid, blockSize = NBytes(chunker.chunkSize), - datasetSize = NBytes(chunker.offset)) + datasetSize = NBytes(chunker.offset), + ) - for i in 0..= dataset.len: return 0 var read = 0 - while read < len and - read < chunkSize.int and - (consumed + read) < dataset.len: + while read < len and read < chunkSize.int and (consumed + read) < dataset.len: data[read] = dataset[consumed + read] read.inc consumed += read return read - Chunker.new( - reader = reader, - pad = pad, - chunkSize = chunkSize) + Chunker.new(reader = reader, pad = pad, chunkSize = chunkSize) diff --git a/tests/codex/helpers/mockclock.nim b/tests/codex/helpers/mockclock.nim index 75a251c9..be1eb4d2 100644 --- a/tests/codex/helpers/mockclock.nim +++ b/tests/codex/helpers/mockclock.nim @@ -8,14 +8,12 @@ type MockClock* = ref object of Clock time: SecondsSince1970 waiting: seq[Waiting] + Waiting = ref object until: SecondsSince1970 future: Future[void] -func new*( - _: type MockClock, - time: SecondsSince1970 = getTime().toUnix -): MockClock = +func new*(_: type MockClock, time: SecondsSince1970 = getTime().toUnix): MockClock = ## Create a mock clock instance MockClock(time: time) diff --git a/tests/codex/helpers/mockdiscovery.nim b/tests/codex/helpers/mockdiscovery.nim index 5d5e8132..42ad76a9 100644 --- a/tests/codex/helpers/mockdiscovery.nim +++ b/tests/codex/helpers/mockdiscovery.nim @@ -13,32 +13,24 @@ import pkg/questionable import pkg/codex/discovery import pkg/contractabi/address as ca -type - MockDiscovery* = ref object of Discovery - findBlockProvidersHandler*: proc(d: MockDiscovery, cid: Cid): - Future[seq[SignedPeerRecord]] {.gcsafe.} - publishBlockProvideHandler*: proc(d: MockDiscovery, cid: Cid): - Future[void] {.gcsafe.} - findHostProvidersHandler*: proc(d: MockDiscovery, host: ca.Address): - Future[seq[SignedPeerRecord]] {.gcsafe.} - publishHostProvideHandler*: proc(d: MockDiscovery, host: ca.Address): - Future[void] {.gcsafe.} +type MockDiscovery* = ref object of Discovery + findBlockProvidersHandler*: + proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.gcsafe.} + publishBlockProvideHandler*: proc(d: MockDiscovery, cid: Cid): Future[void] {.gcsafe.} + findHostProvidersHandler*: + proc(d: MockDiscovery, host: ca.Address): Future[seq[SignedPeerRecord]] {.gcsafe.} + publishHostProvideHandler*: + proc(d: MockDiscovery, host: ca.Address): Future[void] {.gcsafe.} proc new*(T: type MockDiscovery): MockDiscovery = MockDiscovery() -proc findPeer*( - d: Discovery, - peerId: PeerId -): Future[?PeerRecord] {.async.} = +proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} = ## mock find a peer - always return none ## return none(PeerRecord) -method find*( - d: MockDiscovery, - cid: Cid -): Future[seq[SignedPeerRecord]] {.async.} = +method find*(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async.} = if isNil(d.findBlockProvidersHandler): return @@ -51,8 +43,7 @@ method provide*(d: MockDiscovery, cid: Cid): Future[void] {.async.} = await d.publishBlockProvideHandler(d, cid) method find*( - d: MockDiscovery, - host: ca.Address + d: MockDiscovery, host: ca.Address ): Future[seq[SignedPeerRecord]] {.async.} = if isNil(d.findHostProvidersHandler): return diff --git a/tests/codex/helpers/mockmarket.nim b/tests/codex/helpers/mockmarket.nim index 358a5206..bb0eaaa2 100644 --- a/tests/codex/helpers/mockmarket.nim +++ b/tests/codex/helpers/mockmarket.nim @@ -48,16 +48,20 @@ type canReserveSlot*: bool reserveSlotThrowError*: ?(ref MarketError) clock: ?Clock + Fulfillment* = object requestId*: RequestId proof*: Groth16Proof host*: Address + MockSlot* = object requestId*: RequestId host*: Address slotIndex*: UInt256 proof*: Groth16Proof timestamp: ?SecondsSince1970 + collateral*: UInt256 + Subscriptions = object onRequest: seq[RequestSubscription] onFulfillment: seq[FulfillmentSubscription] @@ -67,32 +71,40 @@ type onRequestCancelled: seq[RequestCancelledSubscription] onRequestFailed: seq[RequestFailedSubscription] onProofSubmitted: seq[ProofSubmittedSubscription] + RequestSubscription* = ref object of Subscription market: MockMarket callback: OnRequest + FulfillmentSubscription* = ref object of Subscription market: MockMarket requestId: ?RequestId callback: OnFulfillment + SlotFilledSubscription* = ref object of Subscription market: MockMarket requestId: ?RequestId slotIndex: ?UInt256 callback: OnSlotFilled + SlotFreedSubscription* = ref object of Subscription market: MockMarket callback: OnSlotFreed + SlotReservationsFullSubscription* = ref object of Subscription market: MockMarket callback: OnSlotReservationsFull + RequestCancelledSubscription* = ref object of Subscription market: MockMarket requestId: ?RequestId callback: OnRequestCancelled + RequestFailedSubscription* = ref object of Subscription market: MockMarket requestId: ?RequestId callback: OnRequestCancelled + ProofSubmittedSubscription = ref object of Subscription market: MockMarket callback: OnProofSubmitted @@ -111,17 +123,15 @@ proc new*(_: type MockMarket, clock: ?Clock = Clock.none): MockMarket = repairRewardPercentage: 10, maxNumberOfSlashes: 5, slashCriterion: 3, - slashPercentage: 10 + slashPercentage: 10, ), proofs: ProofConfig( - period: 10.u256, - timeout: 5.u256, - downtime: 64.uint8, - downtimeProduct: 67.uint8 - ) + period: 10.u256, timeout: 5.u256, downtime: 64.uint8, downtimeProduct: 67.uint8 + ), + ) + MockMarket( + signer: Address.example, config: config, canReserveSlot: true, clock: clock ) - MockMarket(signer: Address.example, config: config, - canReserveSlot: true, clock: clock) method getSigner*(market: MockMarket): Future[Address] {.async.} = return market.signer @@ -145,9 +155,7 @@ method requestStorage*(market: MockMarket, request: StorageRequest) {.async.} = market.requested.add(request) var subscriptions = market.subscriptions.onRequest for subscription in subscriptions: - subscription.callback(request.id, - request.ask, - request.expiry) + subscription.callback(request.id, request.ask, request.expiry) method myRequests*(market: MockMarket): Future[seq[RequestId]] {.async.} = return market.activeRequests[market.signer] @@ -155,75 +163,75 @@ method myRequests*(market: MockMarket): Future[seq[RequestId]] {.async.} = method mySlots*(market: MockMarket): Future[seq[SlotId]] {.async.} = return market.activeSlots[market.signer] -method getRequest*(market: MockMarket, - id: RequestId): Future[?StorageRequest] {.async.} = +method getRequest*( + market: MockMarket, id: RequestId +): Future[?StorageRequest] {.async.} = for request in market.requested: if request.id == id: return some request return none StorageRequest -method getActiveSlot*( - market: MockMarket, - slotId: SlotId): Future[?Slot] {.async.} = - +method getActiveSlot*(market: MockMarket, slotId: SlotId): Future[?Slot] {.async.} = for slot in market.filled: if slotId(slot.requestId, slot.slotIndex) == slotId and - request =? await market.getRequest(slot.requestId): + request =? await market.getRequest(slot.requestId): return some Slot(request: request, slotIndex: slot.slotIndex) return none Slot -method requestState*(market: MockMarket, - requestId: RequestId): Future[?RequestState] {.async.} = - return market.requestState.?[requestId] +method requestState*( + market: MockMarket, requestId: RequestId +): Future[?RequestState] {.async.} = + return market.requestState .? [requestId] -method slotState*(market: MockMarket, - slotId: SlotId): Future[SlotState] {.async.} = +method slotState*(market: MockMarket, slotId: SlotId): Future[SlotState] {.async.} = if not market.slotState.hasKey(slotId): return SlotState.Free return market.slotState[slotId] -method getRequestEnd*(market: MockMarket, - id: RequestId): Future[SecondsSince1970] {.async.} = +method getRequestEnd*( + market: MockMarket, id: RequestId +): Future[SecondsSince1970] {.async.} = return market.requestEnds[id] -method requestExpiresAt*(market: MockMarket, - id: RequestId): Future[SecondsSince1970] {.async.} = +method requestExpiresAt*( + market: MockMarket, id: RequestId +): Future[SecondsSince1970] {.async.} = return market.requestExpiry[id] -method getHost*(market: MockMarket, - requestId: RequestId, - slotIndex: UInt256): Future[?Address] {.async.} = +method getHost*( + market: MockMarket, requestId: RequestId, slotIndex: UInt256 +): Future[?Address] {.async.} = for slot in market.filled: if slot.requestId == requestId and slot.slotIndex == slotIndex: return some slot.host return none Address -proc emitSlotFilled*(market: MockMarket, - requestId: RequestId, - slotIndex: UInt256) = +method currentCollateral*( + market: MockMarket, slotId: SlotId +): Future[UInt256] {.async.} = + for slot in market.filled: + if slotId == slotId(slot.requestId, slot.slotIndex): + return slot.collateral + return 0.u256 + +proc emitSlotFilled*(market: MockMarket, requestId: RequestId, slotIndex: UInt256) = var subscriptions = market.subscriptions.onSlotFilled for subscription in subscriptions: let requestMatches = - subscription.requestId.isNone or - subscription.requestId == some requestId + subscription.requestId.isNone or subscription.requestId == some requestId let slotMatches = - subscription.slotIndex.isNone or - subscription.slotIndex == some slotIndex + subscription.slotIndex.isNone or subscription.slotIndex == some slotIndex if requestMatches and slotMatches: subscription.callback(requestId, slotIndex) -proc emitSlotFreed*(market: MockMarket, - requestId: RequestId, - slotIndex: UInt256) = +proc emitSlotFreed*(market: MockMarket, requestId: RequestId, slotIndex: UInt256) = var subscriptions = market.subscriptions.onSlotFreed for subscription in subscriptions: subscription.callback(requestId, slotIndex) proc emitSlotReservationsFull*( - market: MockMarket, - requestId: RequestId, - slotIndex: UInt256) = - + market: MockMarket, requestId: RequestId, slotIndex: UInt256 +) = var subscriptions = market.subscriptions.onSlotReservationsFull for subscription in subscriptions: subscription.callback(requestId, slotIndex) @@ -231,46 +239,49 @@ proc emitSlotReservationsFull*( proc emitRequestCancelled*(market: MockMarket, requestId: RequestId) = var subscriptions = market.subscriptions.onRequestCancelled for subscription in subscriptions: - if subscription.requestId == requestId.some or - subscription.requestId.isNone: + if subscription.requestId == requestId.some or subscription.requestId.isNone: subscription.callback(requestId) proc emitRequestFulfilled*(market: MockMarket, requestId: RequestId) = var subscriptions = market.subscriptions.onFulfillment for subscription in subscriptions: - if subscription.requestId == requestId.some or - subscription.requestId.isNone: + if subscription.requestId == requestId.some or subscription.requestId.isNone: subscription.callback(requestId) proc emitRequestFailed*(market: MockMarket, requestId: RequestId) = var subscriptions = market.subscriptions.onRequestFailed for subscription in subscriptions: - if subscription.requestId == requestId.some or - subscription.requestId.isNone: + if subscription.requestId == requestId.some or subscription.requestId.isNone: subscription.callback(requestId) -proc fillSlot*(market: MockMarket, - requestId: RequestId, - slotIndex: UInt256, - proof: Groth16Proof, - host: Address) = +proc fillSlot*( + market: MockMarket, + requestId: RequestId, + slotIndex: UInt256, + proof: Groth16Proof, + host: Address, + collateral = 0.u256, +) = let slot = MockSlot( requestId: requestId, slotIndex: slotIndex, proof: proof, host: host, - timestamp: market.clock.?now + timestamp: market.clock .? now, + collateral: collateral, ) market.filled.add(slot) market.slotState[slotId(slot.requestId, slot.slotIndex)] = SlotState.Filled market.emitSlotFilled(requestId, slotIndex) -method fillSlot*(market: MockMarket, - requestId: RequestId, - slotIndex: UInt256, - proof: Groth16Proof, - collateral: UInt256) {.async.} = - market.fillSlot(requestId, slotIndex, proof, market.signer) +method fillSlot*( + market: MockMarket, + requestId: RequestId, + slotIndex: UInt256, + proof: Groth16Proof, + collateral: UInt256, +) {.async.} = + market.fillSlot(requestId, slotIndex, proof, market.signer, collateral) method freeSlot*(market: MockMarket, slotId: SlotId) {.async.} = market.freed.add(slotId) @@ -280,11 +291,10 @@ method freeSlot*(market: MockMarket, slotId: SlotId) {.async.} = break market.slotState[slotId] = SlotState.Free -method withdrawFunds*(market: MockMarket, - requestId: RequestId) {.async.} = +method withdrawFunds*(market: MockMarket, requestId: RequestId) {.async.} = market.withdrawn.add(requestId) - if state =? market.requestState.?[requestId] and state == RequestState.Cancelled: + if state =? market.requestState .? [requestId] and state == RequestState.Cancelled: market.emitRequestCancelled(requestId) proc setProofRequired*(mock: MockMarket, id: SlotId, required: bool) = @@ -293,8 +303,7 @@ proc setProofRequired*(mock: MockMarket, id: SlotId, required: bool) = else: mock.proofsRequired.excl(id) -method isProofRequired*(mock: MockMarket, - id: SlotId): Future[bool] {.async.} = +method isProofRequired*(mock: MockMarket, id: SlotId): Future[bool] {.async.} = return mock.proofsRequired.contains(id) proc setProofToBeRequired*(mock: MockMarket, id: SlotId, required: bool) = @@ -303,8 +312,7 @@ proc setProofToBeRequired*(mock: MockMarket, id: SlotId, required: bool) = else: mock.proofsToBeRequired.excl(id) -method willProofBeRequired*(mock: MockMarket, - id: SlotId): Future[bool] {.async.} = +method willProofBeRequired*(mock: MockMarket, id: SlotId): Future[bool] {.async.} = return mock.proofsToBeRequired.contains(id) method getChallenge*(mock: MockMarket, id: SlotId): Future[ProofChallenge] {.async.} = @@ -318,9 +326,7 @@ method submitProof*(mock: MockMarket, id: SlotId, proof: Groth16Proof) {.async.} for subscription in mock.subscriptions.onProofSubmitted: subscription.callback(id) -method markProofAsMissing*(market: MockMarket, - id: SlotId, - period: Period) {.async.} = +method markProofAsMissing*(market: MockMarket, id: SlotId, period: Period) {.async.} = market.markedAsMissingProofs.add(id) proc setCanProofBeMarkedAsMissing*(mock: MockMarket, id: SlotId, required: bool) = @@ -329,204 +335,172 @@ proc setCanProofBeMarkedAsMissing*(mock: MockMarket, id: SlotId, required: bool) else: mock.canBeMarkedAsMissing.excl(id) -method canProofBeMarkedAsMissing*(market: MockMarket, - id: SlotId, - period: Period): Future[bool] {.async.} = +method canProofBeMarkedAsMissing*( + market: MockMarket, id: SlotId, period: Period +): Future[bool] {.async.} = return market.canBeMarkedAsMissing.contains(id) method reserveSlot*( - market: MockMarket, - requestId: RequestId, - slotIndex: UInt256) {.async.} = - + market: MockMarket, requestId: RequestId, slotIndex: UInt256 +) {.async.} = if error =? market.reserveSlotThrowError: raise error method canReserveSlot*( - market: MockMarket, - requestId: RequestId, - slotIndex: UInt256): Future[bool] {.async.} = - + market: MockMarket, requestId: RequestId, slotIndex: UInt256 +): Future[bool] {.async.} = return market.canReserveSlot func setCanReserveSlot*(market: MockMarket, canReserveSlot: bool) = market.canReserveSlot = canReserveSlot -func setReserveSlotThrowError*( - market: MockMarket, error: ?(ref MarketError)) = - +func setReserveSlotThrowError*(market: MockMarket, error: ?(ref MarketError)) = market.reserveSlotThrowError = error -method subscribeRequests*(market: MockMarket, - callback: OnRequest): - Future[Subscription] {.async.} = - let subscription = RequestSubscription( - market: market, - callback: callback - ) +method subscribeRequests*( + market: MockMarket, callback: OnRequest +): Future[Subscription] {.async.} = + let subscription = RequestSubscription(market: market, callback: callback) market.subscriptions.onRequest.add(subscription) return subscription -method subscribeFulfillment*(market: MockMarket, - callback: OnFulfillment): - Future[Subscription] {.async.} = +method subscribeFulfillment*( + market: MockMarket, callback: OnFulfillment +): Future[Subscription] {.async.} = let subscription = FulfillmentSubscription( - market: market, - requestId: none RequestId, - callback: callback + market: market, requestId: none RequestId, callback: callback ) market.subscriptions.onFulfillment.add(subscription) return subscription -method subscribeFulfillment*(market: MockMarket, - requestId: RequestId, - callback: OnFulfillment): - Future[Subscription] {.async.} = +method subscribeFulfillment*( + market: MockMarket, requestId: RequestId, callback: OnFulfillment +): Future[Subscription] {.async.} = let subscription = FulfillmentSubscription( - market: market, - requestId: some requestId, - callback: callback + market: market, requestId: some requestId, callback: callback ) market.subscriptions.onFulfillment.add(subscription) return subscription -method subscribeSlotFilled*(market: MockMarket, - callback: OnSlotFilled): - Future[Subscription] {.async.} = +method subscribeSlotFilled*( + market: MockMarket, callback: OnSlotFilled +): Future[Subscription] {.async.} = let subscription = SlotFilledSubscription(market: market, callback: callback) market.subscriptions.onSlotFilled.add(subscription) return subscription -method subscribeSlotFilled*(market: MockMarket, - requestId: RequestId, - slotIndex: UInt256, - callback: OnSlotFilled): - Future[Subscription] {.async.} = +method subscribeSlotFilled*( + market: MockMarket, requestId: RequestId, slotIndex: UInt256, callback: OnSlotFilled +): Future[Subscription] {.async.} = let subscription = SlotFilledSubscription( market: market, requestId: some requestId, slotIndex: some slotIndex, - callback: callback + callback: callback, ) market.subscriptions.onSlotFilled.add(subscription) return subscription -method subscribeSlotFreed*(market: MockMarket, - callback: OnSlotFreed): - Future[Subscription] {.async.} = +method subscribeSlotFreed*( + market: MockMarket, callback: OnSlotFreed +): Future[Subscription] {.async.} = let subscription = SlotFreedSubscription(market: market, callback: callback) market.subscriptions.onSlotFreed.add(subscription) return subscription method subscribeSlotReservationsFull*( - market: MockMarket, - callback: OnSlotReservationsFull): Future[Subscription] {.async.} = - + market: MockMarket, callback: OnSlotReservationsFull +): Future[Subscription] {.async.} = let subscription = SlotReservationsFullSubscription(market: market, callback: callback) market.subscriptions.onSlotReservationsFull.add(subscription) return subscription -method subscribeRequestCancelled*(market: MockMarket, - callback: OnRequestCancelled): - Future[Subscription] {.async.} = +method subscribeRequestCancelled*( + market: MockMarket, callback: OnRequestCancelled +): Future[Subscription] {.async.} = let subscription = RequestCancelledSubscription( - market: market, - requestId: none RequestId, - callback: callback + market: market, requestId: none RequestId, callback: callback ) market.subscriptions.onRequestCancelled.add(subscription) return subscription -method subscribeRequestCancelled*(market: MockMarket, - requestId: RequestId, - callback: OnRequestCancelled): - Future[Subscription] {.async.} = +method subscribeRequestCancelled*( + market: MockMarket, requestId: RequestId, callback: OnRequestCancelled +): Future[Subscription] {.async.} = let subscription = RequestCancelledSubscription( - market: market, - requestId: some requestId, - callback: callback + market: market, requestId: some requestId, callback: callback ) market.subscriptions.onRequestCancelled.add(subscription) return subscription -method subscribeRequestFailed*(market: MockMarket, - callback: OnRequestFailed): - Future[Subscription] {.async.} = +method subscribeRequestFailed*( + market: MockMarket, callback: OnRequestFailed +): Future[Subscription] {.async.} = let subscription = RequestFailedSubscription( - market: market, - requestId: none RequestId, - callback: callback + market: market, requestId: none RequestId, callback: callback ) market.subscriptions.onRequestFailed.add(subscription) return subscription -method subscribeRequestFailed*(market: MockMarket, - requestId: RequestId, - callback: OnRequestFailed): - Future[Subscription] {.async.} = +method subscribeRequestFailed*( + market: MockMarket, requestId: RequestId, callback: OnRequestFailed +): Future[Subscription] {.async.} = let subscription = RequestFailedSubscription( - market: market, - requestId: some requestId, - callback: callback + market: market, requestId: some requestId, callback: callback ) market.subscriptions.onRequestFailed.add(subscription) return subscription -method subscribeProofSubmission*(mock: MockMarket, - callback: OnProofSubmitted): - Future[Subscription] {.async.} = - let subscription = ProofSubmittedSubscription( - market: mock, - callback: callback - ) +method subscribeProofSubmission*( + mock: MockMarket, callback: OnProofSubmitted +): Future[Subscription] {.async.} = + let subscription = ProofSubmittedSubscription(market: mock, callback: callback) mock.subscriptions.onProofSubmitted.add(subscription) return subscription method queryPastStorageRequestedEvents*( - market: MockMarket, - fromBlock: BlockTag): Future[seq[StorageRequested]] {.async.} = - return market.requested.map(request => - StorageRequested(requestId: request.id, - ask: request.ask, - expiry: request.expiry) + market: MockMarket, fromBlock: BlockTag +): Future[seq[StorageRequested]] {.async.} = + return market.requested.map( + request => + StorageRequested(requestId: request.id, ask: request.ask, expiry: request.expiry) ) method queryPastStorageRequestedEvents*( - market: MockMarket, - blocksAgo: int): Future[seq[StorageRequested]] {.async.} = - return market.requested.map(request => - StorageRequested(requestId: request.id, - ask: request.ask, - expiry: request.expiry) + market: MockMarket, blocksAgo: int +): Future[seq[StorageRequested]] {.async.} = + return market.requested.map( + request => + StorageRequested(requestId: request.id, ask: request.ask, expiry: request.expiry) ) method queryPastSlotFilledEvents*( - market: MockMarket, - fromBlock: BlockTag): Future[seq[SlotFilled]] {.async.} = - return market.filled.map(slot => - SlotFilled(requestId: slot.requestId, slotIndex: slot.slotIndex) + market: MockMarket, fromBlock: BlockTag +): Future[seq[SlotFilled]] {.async.} = + return market.filled.map( + slot => SlotFilled(requestId: slot.requestId, slotIndex: slot.slotIndex) ) method queryPastSlotFilledEvents*( - market: MockMarket, - blocksAgo: int): Future[seq[SlotFilled]] {.async.} = - return market.filled.map(slot => - SlotFilled(requestId: slot.requestId, slotIndex: slot.slotIndex) + market: MockMarket, blocksAgo: int +): Future[seq[SlotFilled]] {.async.} = + return market.filled.map( + slot => SlotFilled(requestId: slot.requestId, slotIndex: slot.slotIndex) ) method queryPastSlotFilledEvents*( - market: MockMarket, - fromTime: SecondsSince1970): Future[seq[SlotFilled]] {.async.} = + market: MockMarket, fromTime: SecondsSince1970 +): Future[seq[SlotFilled]] {.async.} = let filtered = market.filled.filter( - proc (slot: MockSlot): bool = + proc(slot: MockSlot): bool = if timestamp =? slot.timestamp: return timestamp >= fromTime else: true ) - return filtered.map(slot => - SlotFilled(requestId: slot.requestId, slotIndex: slot.slotIndex) + return filtered.map( + slot => SlotFilled(requestId: slot.requestId, slotIndex: slot.slotIndex) ) method unsubscribe*(subscription: RequestSubscription) {.async.} = diff --git a/tests/codex/helpers/mockrepostore.nim b/tests/codex/helpers/mockrepostore.nim index a29b7d2f..50b47f02 100644 --- a/tests/codex/helpers/mockrepostore.nim +++ b/tests/codex/helpers/mockrepostore.nim @@ -16,21 +16,22 @@ import pkg/questionable/results import pkg/codex/stores/repostore import pkg/codex/utils/asynciter -type - MockRepoStore* = ref object of RepoStore - delBlockCids*: seq[Cid] - getBeMaxNumber*: int - getBeOffset*: int +type MockRepoStore* = ref object of RepoStore + delBlockCids*: seq[Cid] + getBeMaxNumber*: int + getBeOffset*: int - testBlockExpirations*: seq[BlockExpiration] - getBlockExpirationsThrows*: bool + testBlockExpirations*: seq[BlockExpiration] + getBlockExpirationsThrows*: bool method delBlock*(self: MockRepoStore, cid: Cid): Future[?!void] {.async.} = self.delBlockCids.add(cid) self.testBlockExpirations = self.testBlockExpirations.filterIt(it.cid != cid) return success() -method getBlockExpirations*(self: MockRepoStore, maxNumber: int, offset: int): Future[?!AsyncIter[BlockExpiration]] {.async.} = +method getBlockExpirations*( + self: MockRepoStore, maxNumber: int, offset: int +): Future[?!AsyncIter[BlockExpiration]] {.async.} = if self.getBlockExpirationsThrows: raise new CatchableError @@ -42,10 +43,11 @@ method getBlockExpirations*(self: MockRepoStore, maxNumber: int, offset: int): F limit = min(offset + maxNumber, len(testBlockExpirationsCpy)) let - iter1 = AsyncIter[int].new(offset..= size: return 0 @@ -45,7 +45,4 @@ proc new*( consumed += read return read - Chunker.new( - reader = reader, - pad = pad, - chunkSize = chunkSize) + Chunker.new(reader = reader, pad = pad, chunkSize = chunkSize) diff --git a/tests/codex/merkletree/generictreetests.nim b/tests/codex/merkletree/generictreetests.nim index df9ba33c..0e1f7c9f 100644 --- a/tests/codex/merkletree/generictreetests.nim +++ b/tests/codex/merkletree/generictreetests.nim @@ -3,127 +3,109 @@ import std/unittest import pkg/codex/merkletree proc testGenericTree*[H, K, U]( - name: string, - data: openArray[H], - zero: H, - compress: proc(z, y: H, key: K): H, - makeTree: proc(data: seq[H]): U) = - - let - data = @data + name: string, + data: openArray[H], + zero: H, + compress: proc(z, y: H, key: K): H, + makeTree: proc(data: seq[H]): U, +) = + let data = @data suite "Correctness tests - " & name: - test "Should build correct tree for even bottom layer": - let - expectedRoot = compress( - compress( - compress(data[0], data[1], K.KeyBottomLayer), - compress(data[2], data[3], K.KeyBottomLayer), - K.KeyNone - ), - compress( - compress(data[4], data[5], K.KeyBottomLayer), - compress(data[6], data[7], K.KeyBottomLayer), - K.KeyNone - ), - K.KeyNone - ) + let expectedRoot = compress( + compress( + compress(data[0], data[1], K.KeyBottomLayer), + compress(data[2], data[3], K.KeyBottomLayer), + K.KeyNone, + ), + compress( + compress(data[4], data[5], K.KeyBottomLayer), + compress(data[6], data[7], K.KeyBottomLayer), + K.KeyNone, + ), + K.KeyNone, + ) - let - tree = makeTree( data[0..7] ) + let tree = makeTree(data[0 .. 7]) check: tree.root.tryGet == expectedRoot test "Should build correct tree for odd bottom layer": - let - expectedRoot = compress( - compress( - compress(data[0], data[1], K.KeyBottomLayer), - compress(data[2], data[3], K.KeyBottomLayer), - K.KeyNone - ), - compress( - compress(data[4], data[5], K.KeyBottomLayer), - compress(data[6], zero, K.KeyOddAndBottomLayer), - K.KeyNone - ), - K.KeyNone - ) + let expectedRoot = compress( + compress( + compress(data[0], data[1], K.KeyBottomLayer), + compress(data[2], data[3], K.KeyBottomLayer), + K.KeyNone, + ), + compress( + compress(data[4], data[5], K.KeyBottomLayer), + compress(data[6], zero, K.KeyOddAndBottomLayer), + K.KeyNone, + ), + K.KeyNone, + ) - let - tree = makeTree( data[0..6] ) + let tree = makeTree(data[0 .. 6]) check: tree.root.tryGet == expectedRoot test "Should build correct tree for even bottom and odd upper layers": - let - expectedRoot = compress( + let expectedRoot = compress( + compress( compress( - compress( - compress(data[0], data[1], K.KeyBottomLayer), - compress(data[2], data[3], K.KeyBottomLayer), - K.KeyNone - ), - compress( - compress(data[4], data[5], K.KeyBottomLayer), - compress(data[6], data[7], K.KeyBottomLayer), - K.KeyNone - ), - K.KeyNone + compress(data[0], data[1], K.KeyBottomLayer), + compress(data[2], data[3], K.KeyBottomLayer), + K.KeyNone, ), compress( - compress( - compress(data[8], data[9], K.KeyBottomLayer), - zero, - K.KeyOdd - ), - zero, - K.KeyOdd + compress(data[4], data[5], K.KeyBottomLayer), + compress(data[6], data[7], K.KeyBottomLayer), + K.KeyNone, ), - K.KeyNone - ) + K.KeyNone, + ), + compress( + compress(compress(data[8], data[9], K.KeyBottomLayer), zero, K.KeyOdd), + zero, + K.KeyOdd, + ), + K.KeyNone, + ) - let - tree = makeTree( data[0..9] ) + let tree = makeTree(data[0 .. 9]) check: tree.root.tryGet == expectedRoot test "Should get and validate correct proofs": - let - expectedRoot = compress( + let expectedRoot = compress( + compress( compress( - compress( - compress(data[0], data[1], K.KeyBottomLayer), - compress(data[2], data[3], K.KeyBottomLayer), - K.KeyNone - ), - compress( - compress(data[4], data[5], K.KeyBottomLayer), - compress(data[6], data[7], K.KeyBottomLayer), - K.KeyNone - ), - K.KeyNone + compress(data[0], data[1], K.KeyBottomLayer), + compress(data[2], data[3], K.KeyBottomLayer), + K.KeyNone, ), compress( - compress( - compress(data[8], data[9], K.KeyBottomLayer), - zero, - K.KeyOdd - ), - zero, - K.KeyOdd + compress(data[4], data[5], K.KeyBottomLayer), + compress(data[6], data[7], K.KeyBottomLayer), + K.KeyNone, ), - K.KeyNone - ) + K.KeyNone, + ), + compress( + compress(compress(data[8], data[9], K.KeyBottomLayer), zero, K.KeyOdd), + zero, + K.KeyOdd, + ), + K.KeyNone, + ) - let - tree = makeTree( data ) + let tree = makeTree(data) - for i in 0.. 0): + while (let chunk = await chunker.getBytes(); chunk.len > 0): await stream.pushData(chunk) finally: await stream.pushEof() @@ -101,20 +98,27 @@ template setupAndTearDown*() {.dirty.} = blockDiscovery = Discovery.new( switch.peerInfo.privateKey, - announceAddrs = @[MultiAddress.init("/ip4/127.0.0.1/tcp/0") - .expect("Should return multiaddress")]) + announceAddrs = + @[ + MultiAddress.init("/ip4/127.0.0.1/tcp/0").expect("Should return multiaddress") + ], + ) peerStore = PeerCtxStore.new() pendingBlocks = PendingBlocksManager.new() - discovery = DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks) + discovery = + DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks) advertiser = Advertiser.new(localStore, blockDiscovery) - engine = BlockExcEngine.new(localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks) + engine = BlockExcEngine.new( + localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks + ) store = NetworkStore.new(engine, localStore) node = CodexNodeRef.new( switch = switch, networkStore = store, engine = engine, prover = Prover.none, - discovery = blockDiscovery) + discovery = blockDiscovery, + ) teardown: close(file) diff --git a/tests/codex/node/testcontracts.nim b/tests/codex/node/testcontracts.nim index 83d1ee98..cce6d5bd 100644 --- a/tests/codex/node/testcontracts.nim +++ b/tests/codex/node/testcontracts.nim @@ -65,16 +65,16 @@ asyncchecksuite "Test Node - Host contracts": node.contracts = ( none ClientInteractions, some HostInteractions.new(clock, sales), - none ValidatorInteractions) + none ValidatorInteractions, + ) await node.start() # Populate manifest in local store manifest = await storeDataGetManifest(localStore, chunker) let - manifestBlock = bt.Block.new( - manifest.encode().tryGet(), - codec = ManifestCodec).tryGet() + manifestBlock = + bt.Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider) manifestCid = manifestBlock.cid @@ -85,9 +85,8 @@ asyncchecksuite "Test Node - Host contracts": protected = (await erasure.encode(manifest, 3, 2)).tryGet() builder = Poseidon2Builder.new(localStore, protected).tryGet() verifiable = (await builder.buildManifest()).tryGet() - verifiableBlock = bt.Block.new( - verifiable.encode().tryGet(), - codec = ManifestCodec).tryGet() + verifiableBlock = + bt.Block.new(verifiable.encode().tryGet(), codec = ManifestCodec).tryGet() (await localStore.putBlock(verifiableBlock)).tryGet() @@ -102,7 +101,7 @@ asyncchecksuite "Test Node - Host contracts": (await expiryUpdateCallback(manifestCidStr, expectedExpiry)).tryGet() - for index in 0.. 0 and blocks.len <= batchSize - return success() - )).tryGet() + for batchSize in 1 .. 12: + ( + await node.fetchBatched( + manifest, + batchSize = batchSize, + proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, async.} = + check blocks.len > 0 and blocks.len <= batchSize + return success(), + ) + ).tryGet() test "Store and retrieve Data Stream": let stream = BufferStream.new() storeFut = node.store(stream) - oddChunkSize = math.trunc(DefaultBlockSize.float / 3.14).NBytes # Let's check that node.store can correctly rechunk these odd chunks - oddChunker = FileChunker.new(file = file, chunkSize = oddChunkSize, pad = false) # TODO: doesn't work with pad=tue + oddChunkSize = math.trunc(DefaultBlockSize.float / 3.14).NBytes + # Let's check that node.store can correctly rechunk these odd chunks + oddChunker = FileChunker.new(file = file, chunkSize = oddChunkSize, pad = false) + # TODO: doesn't work with pad=tue - var - original: seq[byte] + var original: seq[byte] try: - while ( - let chunk = await oddChunker.getBytes(); - chunk.len > 0): + while (let chunk = await oddChunker.getBytes(); chunk.len > 0): original &= chunk await stream.pushData(chunk) finally: @@ -129,7 +128,8 @@ asyncchecksuite "Test Node - Basic": (await localStore.putBlock(blk)).tryGet() let stream = (await node.retrieve(blk.cid)).tryGet() - defer: await stream.close() + defer: + await stream.close() var data = newSeq[byte](testString.len) await stream.readExactly(addr data[0], data.len) @@ -139,28 +139,28 @@ asyncchecksuite "Test Node - Basic": let erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider) manifest = await storeDataGetManifest(localStore, chunker) - manifestBlock = bt.Block.new( - manifest.encode().tryGet(), - codec = ManifestCodec).tryGet() + manifestBlock = + bt.Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() protected = (await erasure.encode(manifest, 3, 2)).tryGet() builder = Poseidon2Builder.new(localStore, protected).tryGet() verifiable = (await builder.buildManifest()).tryGet() - verifiableBlock = bt.Block.new( - verifiable.encode().tryGet(), - codec = ManifestCodec).tryGet() + verifiableBlock = + bt.Block.new(verifiable.encode().tryGet(), codec = ManifestCodec).tryGet() (await localStore.putBlock(manifestBlock)).tryGet() - let - request = (await node.setupRequest( + let request = ( + await node.setupRequest( cid = manifestBlock.cid, nodes = 5, tolerance = 2, duration = 100.u256, - reward = 2.u256, + pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, expiry = 200.u256, - collateral = 200.u256)).tryGet + collateralPerByte = 1.u256, + ) + ).tryGet check: (await verifiableBlock.cid in localStore) == true diff --git a/tests/codex/sales/states/testcancelled.nim b/tests/codex/sales/states/testcancelled.nim index e252cd9c..d2568b98 100644 --- a/tests/codex/sales/states/testcancelled.nim +++ b/tests/codex/sales/states/testcancelled.nim @@ -15,31 +15,40 @@ import ../../helpers/mockclock asyncchecksuite "sales state 'cancelled'": let request = StorageRequest.example let slotIndex = (request.ask.slots div 2).u256 - let market = MockMarket.new() let clock = MockClock.new() + let currentCollateral = UInt256.example + + var market: MockMarket var state: SaleCancelled var agent: SalesAgent - var returnBytesWas = false - var reprocessSlotWas = false + var returnBytesWas = bool.none + var reprocessSlotWas = bool.none + var returnedCollateralValue = UInt256.none setup: - let onCleanUp = proc (returnBytes = false, reprocessSlot = false) {.async.} = - returnBytesWas = returnBytes - reprocessSlotWas = reprocessSlot + market = MockMarket.new() + let onCleanUp = proc( + returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none + ) {.async.} = + returnBytesWas = some returnBytes + reprocessSlotWas = some reprocessSlot + returnedCollateralValue = returnedCollateral - let context = SalesContext( - market: market, - clock: clock - ) - agent = newSalesAgent(context, - request.id, - slotIndex, - request.some) + let context = SalesContext(market: market, clock: clock) + agent = newSalesAgent(context, request.id, slotIndex, request.some) agent.onCleanUp = onCleanUp state = SaleCancelled.new() - test "calls onCleanUp with returnBytes = false and reprocessSlot = true": + test "calls onCleanUp with returnBytes = false, reprocessSlot = true, and returnedCollateral = currentCollateral": + market.fillSlot( + requestId = request.id, + slotIndex = slotIndex, + proof = Groth16Proof.default, + host = Address.example, + collateral = currentCollateral, + ) discard await state.run(agent) - check eventually returnBytesWas == true - check eventually reprocessSlotWas == false + check eventually returnBytesWas == some true + check eventually reprocessSlotWas == some false + check eventually returnedCollateralValue == some currentCollateral diff --git a/tests/codex/sales/states/testerrored.nim b/tests/codex/sales/states/testerrored.nim index dc525894..9c8ee17a 100644 --- a/tests/codex/sales/states/testerrored.nim +++ b/tests/codex/sales/states/testerrored.nim @@ -24,26 +24,19 @@ asyncchecksuite "sales state 'errored'": var reprocessSlotWas = false setup: - let onCleanUp = proc (returnBytes = false, reprocessSlot = false) {.async.} = - returnBytesWas = returnBytes - reprocessSlotWas = reprocessSlot + let onCleanUp = proc( + returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none + ) {.async.} = + returnBytesWas = returnBytes + reprocessSlotWas = reprocessSlot - let context = SalesContext( - market: market, - clock: clock - ) - agent = newSalesAgent(context, - request.id, - slotIndex, - request.some) + let context = SalesContext(market: market, clock: clock) + agent = newSalesAgent(context, request.id, slotIndex, request.some) agent.onCleanUp = onCleanUp state = SaleErrored(error: newException(ValueError, "oh no!")) test "calls onCleanUp with returnBytes = false and reprocessSlot = true": - state = SaleErrored( - error: newException(ValueError, "oh no!"), - reprocessSlot: true - ) + state = SaleErrored(error: newException(ValueError, "oh no!"), reprocessSlot: true) discard await state.run(agent) check eventually returnBytesWas == true check eventually reprocessSlotWas == true diff --git a/tests/codex/sales/states/testfilled.nim b/tests/codex/sales/states/testfilled.nim index e0efb5fc..f8f77da6 100644 --- a/tests/codex/sales/states/testfilled.nim +++ b/tests/codex/sales/states/testfilled.nim @@ -15,7 +15,6 @@ import ../../examples import ../../helpers checksuite "sales state 'filled'": - let request = StorageRequest.example let slotIndex = (request.ask.slots div 2).u256 @@ -27,22 +26,23 @@ checksuite "sales state 'filled'": setup: market = MockMarket.new() - slot = MockSlot(requestId: request.id, - host: Address.example, - slotIndex: slotIndex, - proof: Groth16Proof.default) + slot = MockSlot( + requestId: request.id, + host: Address.example, + slotIndex: slotIndex, + proof: Groth16Proof.default, + ) market.requestEnds[request.id] = 321 onExpiryUpdatePassedExpiry = -1 - let onExpiryUpdate = proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] {.async.} = + let onExpiryUpdate = proc( + rootCid: string, expiry: SecondsSince1970 + ): Future[?!void] {.async.} = onExpiryUpdatePassedExpiry = expiry return success() let context = SalesContext(market: market, onExpiryUpdate: some onExpiryUpdate) - agent = newSalesAgent(context, - request.id, - slotIndex, - some request) + agent = newSalesAgent(context, request.id, slotIndex, some request) state = SaleFilled.new() test "switches to proving state when slot is filled by me": diff --git a/tests/codex/sales/states/testfilling.nim b/tests/codex/sales/states/testfilling.nim index 9a6f316d..f0ce7059 100644 --- a/tests/codex/sales/states/testfilling.nim +++ b/tests/codex/sales/states/testfilling.nim @@ -8,7 +8,6 @@ import ../../examples import ../../helpers checksuite "sales state 'filling'": - let request = StorageRequest.example let slotIndex = (request.ask.slots div 2).u256 var state: SaleFilling diff --git a/tests/codex/sales/states/testfinished.nim b/tests/codex/sales/states/testfinished.nim index a5f6690f..4b353014 100644 --- a/tests/codex/sales/states/testfinished.nim +++ b/tests/codex/sales/states/testfinished.nim @@ -1,19 +1,45 @@ -import std/unittest import pkg/questionable import pkg/codex/contracts/requests import pkg/codex/sales/states/finished import pkg/codex/sales/states/cancelled import pkg/codex/sales/states/failed +import pkg/codex/sales/salesagent +import pkg/codex/sales/salescontext +import pkg/codex/market + +import ../../../asynctest import ../../examples import ../../helpers +import ../../helpers/mockmarket +import ../../helpers/mockclock -checksuite "sales state 'finished'": - +asyncchecksuite "sales state 'finished'": let request = StorageRequest.example + let slotIndex = (request.ask.slots div 2).u256 + let clock = MockClock.new() + + let currentCollateral = UInt256.example + + var market: MockMarket var state: SaleFinished + var agent: SalesAgent + var returnBytesWas = bool.none + var reprocessSlotWas = bool.none + var returnedCollateralValue = UInt256.none setup: - state = SaleFinished.new() + market = MockMarket.new() + let onCleanUp = proc( + returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none + ) {.async.} = + returnBytesWas = some returnBytes + reprocessSlotWas = some reprocessSlot + returnedCollateralValue = returnedCollateral + + let context = SalesContext(market: market, clock: clock) + agent = newSalesAgent(context, request.id, slotIndex, request.some) + agent.onCleanUp = onCleanUp + state = SaleFinished(returnedCollateral: some currentCollateral) test "switches to cancelled state when request expires": let next = state.onCancelled(request) @@ -22,3 +48,9 @@ checksuite "sales state 'finished'": test "switches to failed state when request fails": let next = state.onFailed(request) check !next of SaleFailed + + test "calls onCleanUp with returnBytes = false, reprocessSlot = true, and returnedCollateral = currentCollateral": + discard await state.run(agent) + check eventually returnBytesWas == some false + check eventually reprocessSlotWas == some false + check eventually returnedCollateralValue == some currentCollateral diff --git a/tests/codex/sales/states/testignored.nim b/tests/codex/sales/states/testignored.nim index 4f4dbbf6..1c808e8b 100644 --- a/tests/codex/sales/states/testignored.nim +++ b/tests/codex/sales/states/testignored.nim @@ -24,18 +24,14 @@ asyncchecksuite "sales state 'ignored'": var reprocessSlotWas = false setup: - let onCleanUp = proc (returnBytes = false, reprocessSlot = false) {.async.} = - returnBytesWas = returnBytes - reprocessSlotWas = reprocessSlot + let onCleanUp = proc( + returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none + ) {.async.} = + returnBytesWas = returnBytes + reprocessSlotWas = reprocessSlot - let context = SalesContext( - market: market, - clock: clock - ) - agent = newSalesAgent(context, - request.id, - slotIndex, - request.some) + let context = SalesContext(market: market, clock: clock) + agent = newSalesAgent(context, request.id, slotIndex, request.some) agent.onCleanUp = onCleanUp state = SaleIgnored.new() diff --git a/tests/codex/sales/states/testinitialproving.nim b/tests/codex/sales/states/testinitialproving.nim index af12852b..97331a07 100644 --- a/tests/codex/sales/states/testinitialproving.nim +++ b/tests/codex/sales/states/testinitialproving.nim @@ -29,21 +29,16 @@ asyncchecksuite "sales state 'initialproving'": var receivedChallenge: ProofChallenge setup: - let onProve = proc (slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} = - receivedChallenge = challenge - return success(proof) - let context = SalesContext( - onProve: onProve.some, - market: market, - clock: clock - ) - agent = newSalesAgent(context, - request.id, - slotIndex, - request.some) + let onProve = proc( + slot: Slot, challenge: ProofChallenge + ): Future[?!Groth16Proof] {.async.} = + receivedChallenge = challenge + return success(proof) + let context = SalesContext(onProve: onProve.some, market: market, clock: clock) + agent = newSalesAgent(context, request.id, slotIndex, request.some) state = SaleInitialProving.new() - proc allowProofToStart {.async.} = + proc allowProofToStart() {.async.} = # it won't start proving until the next period await clock.advanceToNextPeriod(market) @@ -91,18 +86,14 @@ asyncchecksuite "sales state 'initialproving'": check SaleFilling(!next).proof == proof test "switches to errored state when onProve callback fails": - let onProveFailed: OnProve = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} = + let onProveFailed: OnProve = proc( + slot: Slot, challenge: ProofChallenge + ): Future[?!Groth16Proof] {.async.} = return failure("oh no!") - let proofFailedContext = SalesContext( - onProve: onProveFailed.some, - market: market, - clock: clock - ) - agent = newSalesAgent(proofFailedContext, - request.id, - slotIndex, - request.some) + let proofFailedContext = + SalesContext(onProve: onProveFailed.some, market: market, clock: clock) + agent = newSalesAgent(proofFailedContext, request.id, slotIndex, request.some) let future = state.run(agent) await allowProofToStart() diff --git a/tests/codex/sales/states/testpayout.nim b/tests/codex/sales/states/testpayout.nim new file mode 100644 index 00000000..b1748b45 --- /dev/null +++ b/tests/codex/sales/states/testpayout.nim @@ -0,0 +1,44 @@ +import pkg/questionable +import pkg/chronos +import pkg/codex/contracts/requests +import pkg/codex/sales/states/payout +import pkg/codex/sales/states/finished +import pkg/codex/sales/salesagent +import pkg/codex/sales/salescontext +import pkg/codex/market + +import ../../../asynctest +import ../../examples +import ../../helpers +import ../../helpers/mockmarket +import ../../helpers/mockclock + +asyncchecksuite "sales state 'payout'": + let request = StorageRequest.example + let slotIndex = (request.ask.slots div 2).u256 + let clock = MockClock.new() + + let currentCollateral = UInt256.example + + var market: MockMarket + var state: SalePayout + var agent: SalesAgent + + setup: + market = MockMarket.new() + + let context = SalesContext(market: market, clock: clock) + agent = newSalesAgent(context, request.id, slotIndex, request.some) + state = SalePayout.new() + + test "switches to 'finished' state and provides returnedCollateral": + market.fillSlot( + requestId = request.id, + slotIndex = slotIndex, + proof = Groth16Proof.default, + host = Address.example, + collateral = currentCollateral, + ) + let next = await state.run(agent) + check !next of SaleFinished + check SaleFinished(!next).returnedCollateral == some currentCollateral diff --git a/tests/codex/sales/states/testpreparing.nim b/tests/codex/sales/states/testpreparing.nim index 94febbac..e78ee25e 100644 --- a/tests/codex/sales/states/testpreparing.nim +++ b/tests/codex/sales/states/testpreparing.nim @@ -33,12 +33,12 @@ asyncchecksuite "sales state 'preparing'": var reservations: MockReservations setup: - availability = Availability( - totalSize: request.ask.slotSize + 100.u256, - freeSize: request.ask.slotSize + 100.u256, - duration: request.ask.duration + 60.u256, - minPrice: request.ask.pricePerSlot - 10.u256, - maxCollateral: request.ask.collateral + 400.u256 + availability = Availability.init( + totalSize = request.ask.slotSize + 100.u256, + freeSize = request.ask.slotSize + 100.u256, + duration = request.ask.duration + 60.u256, + minPricePerBytePerSecond = request.ask.pricePerBytePerSecond, + totalCollateral = request.ask.collateralPerSlot * request.ask.slots.u256, ) let repoDs = SQLiteDatastore.new(Memory).tryGet() let metaDs = SQLiteDatastore.new(Memory).tryGet() @@ -46,17 +46,11 @@ asyncchecksuite "sales state 'preparing'": await repo.start() state = SalePreparing.new() - context = SalesContext( - market: market, - clock: clock - ) + context = SalesContext(market: market, clock: clock) reservations = MockReservations.new(repo) context.reservations = reservations - agent = newSalesAgent(context, - request.id, - slotIndex, - request.some) + agent = newSalesAgent(context, request.id, slotIndex, request.some) teardown: await repo.stop() @@ -75,10 +69,8 @@ asyncchecksuite "sales state 'preparing'": proc createAvailability() {.async.} = let a = await reservations.createAvailability( - availability.totalSize, - availability.duration, - availability.minPrice, - availability.maxCollateral + availability.totalSize, availability.duration, + availability.minPricePerBytePerSecond, availability.totalCollateral, ) availability = a.get diff --git a/tests/codex/sales/states/testproving.nim b/tests/codex/sales/states/testproving.nim index 5f18746b..afdeb4d2 100644 --- a/tests/codex/sales/states/testproving.nim +++ b/tests/codex/sales/states/testproving.nim @@ -16,7 +16,6 @@ import ../../helpers/mockmarket import ../../helpers/mockclock asyncchecksuite "sales state 'proving'": - let slot = Slot.example let request = slot.request let proof = Groth16Proof.example @@ -30,14 +29,13 @@ asyncchecksuite "sales state 'proving'": setup: clock = MockClock.new() market = MockMarket.new() - let onProve = proc (slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} = - receivedChallenge = challenge - return success(proof) + let onProve = proc( + slot: Slot, challenge: ProofChallenge + ): Future[?!Groth16Proof] {.async.} = + receivedChallenge = challenge + return success(proof) let context = SalesContext(market: market, clock: clock, onProve: onProve.some) - agent = newSalesAgent(context, - request.id, - slot.slotIndex, - request.some) + agent = newSalesAgent(context, request.id, slot.slotIndex, request.some) state = SaleProving.new() proc advanceToNextPeriod(market: Market) {.async.} = diff --git a/tests/codex/sales/states/testsimulatedproving.nim b/tests/codex/sales/states/testsimulatedproving.nim index f4ca3ba9..1fc5331c 100644 --- a/tests/codex/sales/states/testsimulatedproving.nim +++ b/tests/codex/sales/states/testsimulatedproving.nim @@ -16,7 +16,6 @@ import ../../helpers/mockmarket import ../../helpers/mockclock asyncchecksuite "sales state 'simulated-proving'": - let slot = Slot.example let request = slot.request let proof = Groth16Proof.example @@ -43,13 +42,12 @@ asyncchecksuite "sales state 'simulated-proving'": market.setProofRequired(slot.id, true) subscription = await market.subscribeProofSubmission(onProofSubmission) - let onProve = proc (slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} = - return success(proof) + let onProve = proc( + slot: Slot, challenge: ProofChallenge + ): Future[?!Groth16Proof] {.async.} = + return success(proof) let context = SalesContext(market: market, clock: clock, onProve: onProve.some) - agent = newSalesAgent(context, - request.id, - slot.slotIndex, - request.some) + agent = newSalesAgent(context, request.id, slot.slotIndex, request.some) state = SaleProvingSimulated.new() state.failEveryNProofs = failEveryNProofs diff --git a/tests/codex/sales/states/testslotreserving.nim b/tests/codex/sales/states/testslotreserving.nim index 20d00745..1fd573fa 100644 --- a/tests/codex/sales/states/testslotreserving.nim +++ b/tests/codex/sales/states/testslotreserving.nim @@ -31,15 +31,9 @@ asyncchecksuite "sales state 'SlotReserving'": clock = MockClock.new() state = SaleSlotReserving.new() - context = SalesContext( - market: market, - clock: clock - ) + context = SalesContext(market: market, clock: clock) - agent = newSalesAgent(context, - request.id, - slotIndex, - request.some) + agent = newSalesAgent(context, request.id, slotIndex, request.some) test "switches to cancelled state when request expires": let next = state.onCancelled(request) @@ -65,3 +59,11 @@ asyncchecksuite "sales state 'SlotReserving'": check next of SaleErrored let errored = SaleErrored(next) check errored.error == error + + test "catches reservation not allowed error": + let error = newException(MarketError, "SlotReservations_ReservationNotAllowed") + market.setReserveSlotThrowError(some error) + let next = !(await state.run(agent)) + check next of SaleIgnored + check SaleIgnored(next).reprocessSlot == false + check SaleIgnored(next).returnBytes diff --git a/tests/codex/sales/states/testunknown.nim b/tests/codex/sales/states/testunknown.nim index e02b3c90..97730f49 100644 --- a/tests/codex/sales/states/testunknown.nim +++ b/tests/codex/sales/states/testunknown.nim @@ -15,7 +15,6 @@ import ../../examples import ../../helpers checksuite "sales state 'unknown'": - let request = StorageRequest.example let slotIndex = (request.ask.slots div 2).u256 let slotId = slotId(request.id, slotIndex) @@ -27,10 +26,7 @@ checksuite "sales state 'unknown'": setup: market = MockMarket.new() let context = SalesContext(market: market) - agent = newSalesAgent(context, - request.id, - slotIndex, - StorageRequest.none) + agent = newSalesAgent(context, request.id, slotIndex, StorageRequest.none) state = SaleUnknown.new() test "switches to error state when on chain state cannot be fetched": diff --git a/tests/codex/sales/testreservations.nim b/tests/codex/sales/testreservations.nim index 285ad0e3..a1c7d1a5 100644 --- a/tests/codex/sales/testreservations.nim +++ b/tests/codex/sales/testreservations.nim @@ -22,6 +22,7 @@ asyncchecksuite "Reservations module": repoDs: Datastore metaDs: Datastore reservations: Reservations + collateralPerByte: UInt256 let repoTmp = TempLevelDb.new() metaTmp = TempLevelDb.new() @@ -32,29 +33,25 @@ asyncchecksuite "Reservations module": metaDs = metaTmp.newDb() repo = RepoStore.new(repoDs, metaDs) reservations = Reservations.new(repo) + collateralPerByte = uint8.example.u256 teardown: await repoTmp.destroyDb() await metaTmp.destroyDb() proc createAvailability(): Availability = - let example = Availability.example - let totalSize = rand(100000..200000) + let example = Availability.example(collateralPerByte) + let totalSize = rand(100000 .. 200000).u256 + let totalCollateral = totalSize * collateralPerByte let availability = waitFor reservations.createAvailability( - totalSize.u256, - example.duration, - example.minPrice, - example.maxCollateral + totalSize, example.duration, example.minPricePerBytePerSecond, totalCollateral ) return availability.get proc createReservation(availability: Availability): Reservation = - let size = rand(1.. agent.data.requestId == request.id and agent.data.slotIndex == 0.u256) - check sales.agents.any(agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.u256) + check sales.agents.any( + agent => agent.data.requestId == request.id and agent.data.slotIndex == 0.u256 + ) + check sales.agents.any( + agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.u256 + ) asyncchecksuite "Sales": let @@ -124,6 +124,10 @@ asyncchecksuite "Sales": repoTmp = TempLevelDb.new() metaTmp = TempLevelDb.new() + var totalAvailabilitySize: UInt256 + var minPricePerBytePerSecond: UInt256 + var requestedCollateralPerByte: UInt256 + var totalCollateral: UInt256 var availability: Availability var request: StorageRequest var sales: Sales @@ -135,25 +139,27 @@ asyncchecksuite "Sales": var itemsProcessed: seq[SlotQueueItem] setup: - availability = Availability( - totalSize: 100.u256, - freeSize: 100.u256, - duration: 60.u256, - minPrice: 600.u256, - maxCollateral: 400.u256 + totalAvailabilitySize = 100.u256 + minPricePerBytePerSecond = 1.u256 + requestedCollateralPerByte = 1.u256 + totalCollateral = requestedCollateralPerByte * totalAvailabilitySize + availability = Availability.init( + totalSize = totalAvailabilitySize, + freeSize = totalAvailabilitySize, + duration = 60.u256, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = totalCollateral, ) request = StorageRequest( ask: StorageAsk( slots: 4, slotSize: 100.u256, duration: 60.u256, - reward: 10.u256, - collateral: 200.u256, + pricePerBytePerSecond: minPricePerBytePerSecond, + collateralPerByte: 1.u256, ), - content: StorageContent( - cid: "some cid" - ), - expiry: (getTime() + initDuration(hours=1)).toUnix.u256 + content: StorageContent(cid: "some cid"), + expiry: (getTime() + initDuration(hours = 1)).toUnix.u256, ) market = MockMarket.new() @@ -169,16 +175,20 @@ asyncchecksuite "Sales": await repo.start() sales = Sales.new(market, clock, repo) reservations = sales.context.reservations - sales.onStore = proc(request: StorageRequest, - slot: UInt256, - onBatch: BatchProc): Future[?!void] {.async.} = + sales.onStore = proc( + request: StorageRequest, slot: UInt256, onBatch: BatchProc + ): Future[?!void] {.async.} = return success() - sales.onExpiryUpdate = proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] {.async.} = + sales.onExpiryUpdate = proc( + rootCid: string, expiry: SecondsSince1970 + ): Future[?!void] {.async.} = return success() queue = sales.context.slotQueue - sales.onProve = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} = + sales.onProve = proc( + slot: Slot, challenge: ProofChallenge + ): Future[?!Groth16Proof] {.async.} = return success(proof) await sales.start() itemsProcessed = @[] @@ -197,28 +207,25 @@ asyncchecksuite "Sales": return false sales.agents[idx].query(description) == state.some - proc allowRequestToStart {.async.} = + proc allowRequestToStart() {.async.} = check eventually isInState(0, "SaleInitialProving") # it won't start proving until the next period await clock.advanceToNextPeriod(market) - proc getAvailability: Availability = + proc getAvailability(): Availability = let key = availability.id.key.get (waitFor reservations.get(key, Availability)).get proc createAvailability() = let a = waitFor reservations.createAvailability( - availability.totalSize, - availability.duration, - availability.minPrice, - availability.maxCollateral + availability.totalSize, availability.duration, + availability.minPricePerBytePerSecond, availability.totalCollateral, ) availability = a.get # update id - proc notProcessed(itemsProcessed: seq[SlotQueueItem], - request: StorageRequest): bool = + proc notProcessed(itemsProcessed: seq[SlotQueueItem], request: StorageRequest): bool = let items = SlotQueueItem.init(request) - for i in 0.. 0 # queue starts paused, allow items to be added to the queue + check eventually queue.len > 0 + # queue starts paused, allow items to be added to the queue check eventually queue.paused # The first processed item will be will have been re-pushed with `seen = # true`. Then, once this item is processed by the queue, its 'seen' flag @@ -312,14 +320,15 @@ asyncchecksuite "Sales": for item in items: check queue.contains(item) - for i in 0.. 0 # queue starts paused, allow items to be added to the queue + check eventually queue.len > 0 + # queue starts paused, allow items to be added to the queue check eventually queue.paused # The first processed item/slot will be filled (eventually). Subsequent # items will be processed and eventually re-pushed with `seen = true`. Once @@ -329,28 +338,29 @@ asyncchecksuite "Sales": # Therefore, there should eventually be 3 items remaining in the queue, all # seen. check eventually queue.len == 3 - for i in 0.. agent.data.requestId == request.id and agent.data.slotIndex == 0.u256) - check sales.agents.any(agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.u256) + check sales.agents.any( + agent => agent.data.requestId == request.id and agent.data.slotIndex == 0.u256 + ) + check sales.agents.any( + agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.u256 + ) test "deletes inactive reservations on load": createAvailability() discard await reservations.createReservation( - availability.id, - 100.u256, - RequestId.example, - UInt256.example) + availability.id, 100.u256, RequestId.example, UInt256.example, UInt256.example + ) check (await reservations.all(Reservation)).get.len == 1 await sales.load() check (await reservations.all(Reservation)).get.len == 0 diff --git a/tests/codex/sales/testsalesagent.nim b/tests/codex/sales/testsalesagent.nim index fe19ecb0..f17711d3 100644 --- a/tests/codex/sales/testsalesagent.nim +++ b/tests/codex/sales/testsalesagent.nim @@ -21,8 +21,11 @@ type MockState = ref object of SaleState MockErrorState = ref object of ErrorHandlingState -method `$`*(state: MockState): string = "MockState" -method `$`*(state: MockErrorState): string = "MockErrorState" +method `$`*(state: MockState): string = + "MockState" + +method `$`*(state: MockErrorState): string = + "MockErrorState" method onCancelled*(state: MockState, request: StorageRequest): ?State = onCancelCalled = true @@ -30,8 +33,9 @@ method onCancelled*(state: MockState, request: StorageRequest): ?State = method onFailed*(state: MockState, request: StorageRequest): ?State = onFailedCalled = true -method onSlotFilled*(state: MockState, requestId: RequestId, - slotIndex: UInt256): ?State = +method onSlotFilled*( + state: MockState, requestId: RequestId, slotIndex: UInt256 +): ?State = onSlotFilledCalled = true method onError*(state: MockErrorState, err: ref CatchableError): ?State = @@ -50,26 +54,21 @@ asyncchecksuite "Sales agent": setup: market = MockMarket.new() - market.requestExpiry[request.id] = getTime().toUnix() + request.expiry.truncate(int64) + market.requestExpiry[request.id] = + getTime().toUnix() + request.expiry.truncate(int64) clock = MockClock.new() context = SalesContext(market: market, clock: clock) slotIndex = 0.u256 onCancelCalled = false onFailedCalled = false onSlotFilledCalled = false - agent = newSalesAgent(context, - request.id, - slotIndex, - some request) + agent = newSalesAgent(context, request.id, slotIndex, some request) teardown: await agent.stop() test "can retrieve request": - agent = newSalesAgent(context, - request.id, - slotIndex, - none StorageRequest) + agent = newSalesAgent(context, request.id, slotIndex, none StorageRequest) market.requested = @[request] await agent.retrieveRequest() check agent.data.request == some request @@ -101,7 +100,9 @@ asyncchecksuite "Sales agent": clock.set(market.requestExpiry[request.id] + 1) check eventually onCancelCalled - for requestState in {RequestState.New, RequestState.Started, RequestState.Finished, RequestState.Failed}: + for requestState in { + RequestState.New, RequestState.Started, RequestState.Finished, RequestState.Failed + }: test "onCancelled is not called when request state is " & $requestState: agent.start(MockState.new()) await agent.subscribe() diff --git a/tests/codex/sales/testslotqueue.nim b/tests/codex/sales/testslotqueue.nim index 885e1037..2e0759ee 100644 --- a/tests/codex/sales/testslotqueue.nim +++ b/tests/codex/sales/testslotqueue.nim @@ -13,7 +13,6 @@ import ../helpers/mockslotqueueitem import ../examples suite "Slot queue start/stop": - var queue: SlotQueue setup: @@ -25,12 +24,15 @@ suite "Slot queue start/stop": test "starts out not running": check not queue.running + test "queue starts paused": + check queue.paused + test "can call start multiple times, and when already running": queue.start() queue.start() check queue.running - test "can call stop when alrady stopped": + test "can call stop when already stopped": await queue.stop() check not queue.running @@ -46,7 +48,6 @@ suite "Slot queue start/stop": check not queue.running suite "Slot queue workers": - var queue: SlotQueue proc onProcessSlot(item: SlotQueueItem, doneProcessing: Future[void]) {.async.} = @@ -106,7 +107,6 @@ suite "Slot queue workers": check eventually queue.activeWorkers == 1 suite "Slot queue": - var onProcessSlotCalled = false var onProcessSlotCalledWith: seq[(RequestId, uint16)] var queue: SlotQueue @@ -135,6 +135,11 @@ suite "Slot queue": check queue.len == 0 check $queue == "[]" + test "starts with 0 active workers": + newSlotQueue(maxSize = 2, maxWorkers = 2) + check eventually queue.running + check queue.activeWorkers == 0 + test "reports correct size": newSlotQueue(maxSize = 2, maxWorkers = 2) check queue.size == 2 @@ -142,16 +147,16 @@ suite "Slot queue": test "correctly compares SlotQueueItems": var requestA = StorageRequest.example requestA.ask.duration = 1.u256 - requestA.ask.reward = 1.u256 - check requestA.ask.pricePerSlot == 1.u256 - requestA.ask.collateral = 100000.u256 + requestA.ask.pricePerBytePerSecond = 1.u256 + check requestA.ask.pricePerSlot == 1.u256 * requestA.ask.slotSize + requestA.ask.collateralPerByte = 100000.u256 requestA.expiry = 1001.u256 var requestB = StorageRequest.example requestB.ask.duration = 100.u256 - requestB.ask.reward = 1000.u256 - check requestB.ask.pricePerSlot == 100000.u256 - requestB.ask.collateral = 1.u256 + requestB.ask.pricePerBytePerSecond = 1000.u256 + check requestB.ask.pricePerSlot == 100000.u256 * requestB.ask.slotSize + requestB.ask.collateralPerByte = 1.u256 requestB.expiry = 1000.u256 let itemA = SlotQueueItem.init(requestA, 0) @@ -166,20 +171,20 @@ suite "Slot queue": slotIndex: 0, slotSize: 1.u256, duration: 1.u256, - reward: 2.u256, # profitability is higher (good) - collateral: 1.u256, + pricePerBytePerSecond: 2.u256, # profitability is higher (good) + collateralPerByte: 1.u256, expiry: 1.u256, - seen: true # seen (bad), more weight than profitability + seen: true, # seen (bad), more weight than profitability ) let itemB = MockSlotQueueItem( requestId: request.id, slotIndex: 0, slotSize: 1.u256, duration: 1.u256, - reward: 1.u256, # profitability is lower (bad) - collateral: 1.u256, + pricePerBytePerSecond: 1.u256, # profitability is lower (bad) + collateralPerByte: 1.u256, expiry: 1.u256, - seen: false # not seen (good) + seen: false, # not seen (good) ) check itemB.toSlotQueueItem < itemA.toSlotQueueItem # B higher priority than A check itemA.toSlotQueueItem > itemB.toSlotQueueItem @@ -191,20 +196,21 @@ suite "Slot queue": slotIndex: 0, slotSize: 1.u256, duration: 1.u256, - reward: 1.u256, # reward is lower (bad) - collateral: 1.u256, # collateral is lower (good) + pricePerBytePerSecond: 1.u256, # reward is lower (bad) + collateralPerByte: 1.u256, # collateral is lower (good) expiry: 1.u256, - seen: false + seen: false, ) let itemB = MockSlotQueueItem( requestId: request.id, slotIndex: 0, slotSize: 1.u256, duration: 1.u256, - reward: 2.u256, # reward is higher (good), more weight than collateral - collateral: 2.u256, # collateral is higher (bad) + pricePerBytePerSecond: 2.u256, + # reward is higher (good), more weight than collateral + collateralPerByte: 2.u256, # collateral is higher (bad) expiry: 1.u256, - seen: false + seen: false, ) check itemB.toSlotQueueItem < itemA.toSlotQueueItem # < indicates higher priority @@ -216,20 +222,20 @@ suite "Slot queue": slotIndex: 0, slotSize: 1.u256, duration: 1.u256, - reward: 1.u256, - collateral: 2.u256, # collateral is higher (bad) + pricePerBytePerSecond: 1.u256, + collateralPerByte: 2.u256, # collateral is higher (bad) expiry: 2.u256, # expiry is longer (good) - seen: false + seen: false, ) let itemB = MockSlotQueueItem( requestId: request.id, slotIndex: 0, slotSize: 1.u256, duration: 1.u256, - reward: 1.u256, - collateral: 1.u256, # collateral is lower (good), more weight than expiry + pricePerBytePerSecond: 1.u256, + collateralPerByte: 1.u256, # collateral is lower (good), more weight than expiry expiry: 1.u256, # expiry is shorter (bad) - seen: false + seen: false, ) check itemB.toSlotQueueItem < itemA.toSlotQueueItem # < indicates higher priority @@ -241,20 +247,20 @@ suite "Slot queue": slotIndex: 0, slotSize: 1.u256, # slotSize is smaller (good) duration: 1.u256, - reward: 1.u256, - collateral: 1.u256, + pricePerBytePerSecond: 1.u256, + collateralPerByte: 1.u256, expiry: 1.u256, # expiry is shorter (bad) - seen: false + seen: false, ) let itemB = MockSlotQueueItem( requestId: request.id, slotIndex: 0, slotSize: 2.u256, # slotSize is larger (bad) duration: 1.u256, - reward: 1.u256, - collateral: 1.u256, + pricePerBytePerSecond: 1.u256, + collateralPerByte: 1.u256, expiry: 2.u256, # expiry is longer (good), more weight than slotSize - seen: false + seen: false, ) check itemB.toSlotQueueItem < itemA.toSlotQueueItem # < indicates higher priority @@ -266,30 +272,30 @@ suite "Slot queue": slotIndex: 0, slotSize: 2.u256, # slotSize is larger (bad) duration: 1.u256, - reward: 1.u256, - collateral: 1.u256, + pricePerBytePerSecond: 1.u256, + collateralPerByte: 1.u256, expiry: 1.u256, # expiry is shorter (bad) - seen: false + seen: false, ) let itemB = MockSlotQueueItem( requestId: request.id, slotIndex: 0, slotSize: 1.u256, # slotSize is smaller (good) duration: 1.u256, - reward: 1.u256, - collateral: 1.u256, + pricePerBytePerSecond: 1.u256, + collateralPerByte: 1.u256, expiry: 1.u256, - seen: false + seen: false, ) - check itemB.toSlotQueueItem < itemA.toSlotQueueItem # < indicates higher priority + check itemA.toSlotQueueItem < itemB.toSlotQueueItem # < indicates higher priority test "expands available all possible slot indices on init": let request = StorageRequest.example let items = SlotQueueItem.init(request) check items.len.uint64 == request.ask.slots var checked = 0 - for slotIndex in 0'u16.. item0 test "sorts items by expiry descending (longer expiry = higher priority)": var request = StorageRequest.example @@ -462,10 +464,10 @@ suite "Slot queue": let item1 = SlotQueueItem.init(request, 1) check item1 < item0 - test "sorts items by slot size ascending (smaller dataset = higher priority)": + test "sorts items by slot size descending (bigger dataset = higher profitability = higher priority)": var request = StorageRequest.example let item0 = SlotQueueItem.init(request, 0) - request.ask.slotSize -= 1.u256 + request.ask.slotSize += 1.u256 let item1 = SlotQueueItem.init(request, 1) check item1 < item0 @@ -480,21 +482,19 @@ suite "Slot queue": newSlotQueue(maxSize = 2, maxWorkers = 2) let item = SlotQueueItem.example check queue.push(item).isOk - check eventually onProcessSlotCalledWith == @[ - (item.requestId, item.slotIndex) - ] + check eventually onProcessSlotCalledWith == @[(item.requestId, item.slotIndex)] - test "should process items in correct order": + test "processes items in order of addition when only one item is added at a time": newSlotQueue(maxSize = 2, maxWorkers = 2) # sleeping after push allows the slotqueue loop to iterate, # calling the callback for each pushed/updated item var request = StorageRequest.example let item0 = SlotQueueItem.init(request, 0) - request.ask.reward += 1.u256 + request.ask.pricePerBytePerSecond += 1.u256 let item1 = SlotQueueItem.init(request, 1) - request.ask.reward += 1.u256 + request.ask.pricePerBytePerSecond += 1.u256 let item2 = SlotQueueItem.init(request, 2) - request.ask.reward += 1.u256 + request.ask.pricePerBytePerSecond += 1.u256 let item3 = SlotQueueItem.init(request, 3) check queue.push(item0).isOk @@ -506,7 +506,8 @@ suite "Slot queue": check queue.push(item3).isOk check eventually ( - onProcessSlotCalledWith == @[ + onProcessSlotCalledWith == + @[ (item0.requestId, item0.slotIndex), (item1.requestId, item1.slotIndex), (item2.requestId, item2.slotIndex), @@ -514,16 +515,35 @@ suite "Slot queue": ] ) - test "processing a 'seen' item pauses the queue": - newSlotQueue(maxSize = 4, maxWorkers = 4) - let request = StorageRequest.example - let item = SlotQueueItem.init(request.id, 0'u16, - request.ask, - request.expiry, - seen = true) - check queue.push(item).isOk - check eventually queue.paused - check onProcessSlotCalledWith.len == 0 + test "should process items in correct order according to the queue invariant when more than one item is added at a time": + newSlotQueue(maxSize = 4, maxWorkers = 2) + # sleeping after push allows the slotqueue loop to iterate, + # calling the callback for each pushed/updated item + var request = StorageRequest.example + let item0 = SlotQueueItem.init(request, 0) + request.ask.pricePerBytePerSecond += 1.u256 + let item1 = SlotQueueItem.init(request, 1) + request.ask.pricePerBytePerSecond += 1.u256 + let item2 = SlotQueueItem.init(request, 2) + request.ask.pricePerBytePerSecond += 1.u256 + let item3 = SlotQueueItem.init(request, 3) + + check queue.push(item0).isOk + check queue.push(item1).isOk + check queue.push(item2).isOk + check queue.push(item3).isOk + + await sleepAsync(1.millis) + + check eventually ( + onProcessSlotCalledWith == + @[ + (item3.requestId, item3.slotIndex), + (item2.requestId, item2.slotIndex), + (item1.requestId, item1.slotIndex), + (item0.requestId, item0.slotIndex), + ] + ) test "pushing items to queue unpauses queue": newSlotQueue(maxSize = 4, maxWorkers = 4) @@ -538,10 +558,8 @@ suite "Slot queue": test "pushing seen item does not unpause queue": newSlotQueue(maxSize = 4, maxWorkers = 4) let request = StorageRequest.example - let item0 = SlotQueueItem.init(request.id, 0'u16, - request.ask, - request.expiry, - seen = true) + let item0 = + SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = true) check queue.paused check queue.push(item0).isOk check queue.paused @@ -549,30 +567,58 @@ suite "Slot queue": test "paused queue waits for unpause before continuing processing": newSlotQueue(maxSize = 4, maxWorkers = 4) let request = StorageRequest.example - let item = SlotQueueItem.init(request.id, 1'u16, - request.ask, - request.expiry, - seen = false) + let item = + SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = false) check queue.paused # push causes unpause check queue.push(item).isOk # check all items processed - check eventually onProcessSlotCalledWith == @[ - (item.requestId, item.slotIndex), - ] + check eventually onProcessSlotCalledWith == @[(item.requestId, item.slotIndex)] check eventually queue.len == 0 + test "processing a 'seen' item pauses the queue": + newSlotQueue(maxSize = 4, maxWorkers = 4) + let request = StorageRequest.example + let unseen = + SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = false) + let seen = + SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = true) + # push causes unpause + check queue.push(unseen).isSuccess + # check all items processed + check eventually queue.len == 0 + # push seen item + check queue.push(seen).isSuccess + # queue should be paused + check eventually queue.paused + + test "processing a 'seen' item does not decrease the number of workers": + newSlotQueue(maxSize = 4, maxWorkers = 4) + let request = StorageRequest.example + let unseen = + SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = false) + let seen = + SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = true) + # push seen item to ensure that queue is pausing + check queue.push(seen).isSuccess + # unpause and pause a number of times + for _ in 0 ..< 10: + # push unseen item to unpause the queue + check queue.push(unseen).isSuccess + # wait for unseen item to be processed + check eventually queue.len == 1 + # wait for queue to pause because of seen item + check eventually queue.paused + # check that the number of workers equals maximimum workers + check eventually queue.activeWorkers == 0 + test "item 'seen' flags can be cleared": newSlotQueue(maxSize = 4, maxWorkers = 1) let request = StorageRequest.example - let item0 = SlotQueueItem.init(request.id, 0'u16, - request.ask, - request.expiry, - seen = true) - let item1 = SlotQueueItem.init(request.id, 1'u16, - request.ask, - request.expiry, - seen = true) + let item0 = + SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = true) + let item1 = + SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = true) check queue.push(item0).isOk check queue.push(item1).isOk check queue[0].seen diff --git a/tests/codex/sales/teststates.nim b/tests/codex/sales/teststates.nim index 4346afb5..fd918ccc 100644 --- a/tests/codex/sales/teststates.nim +++ b/tests/codex/sales/teststates.nim @@ -1,6 +1,7 @@ import ./states/testunknown import ./states/testdownloading import ./states/testfilling +import ./states/testpayout import ./states/testfinished import ./states/testinitialproving import ./states/testfilled diff --git a/tests/codex/slots/backends/helpers.nim b/tests/codex/slots/backends/helpers.nim index 7f789057..e1b6822a 100644 --- a/tests/codex/slots/backends/helpers.nim +++ b/tests/codex/slots/backends/helpers.nim @@ -16,123 +16,120 @@ import pkg/codex/utils/json export types func toJsonDecimal*(big: BigInt[254]): string = - let s = big.toDecimal.strip( leading = true, trailing = false, chars = {'0'} ) + let s = big.toDecimal.strip(leading = true, trailing = false, chars = {'0'}) if s.len == 0: "0" else: s func toJson*(g1: CircomG1): JsonNode = - %* { + %*{ "x": Bn254Fr.fromBytes(g1.x).get.toBig.toJsonDecimal, - "y": Bn254Fr.fromBytes(g1.y).get.toBig.toJsonDecimal + "y": Bn254Fr.fromBytes(g1.y).get.toBig.toJsonDecimal, } func toJson*(g2: CircomG2): JsonNode = - %* { + %*{ "x": [ Bn254Fr.fromBytes(g2.x[0]).get.toBig.toJsonDecimal, - Bn254Fr.fromBytes(g2.x[1]).get.toBig.toJsonDecimal], + Bn254Fr.fromBytes(g2.x[1]).get.toBig.toJsonDecimal, + ], "y": [ Bn254Fr.fromBytes(g2.y[0]).get.toBig.toJsonDecimal, - Bn254Fr.fromBytes(g2.y[1]).get.toBig.toJsonDecimal] + Bn254Fr.fromBytes(g2.y[1]).get.toBig.toJsonDecimal, + ], } proc toJson*(vpk: VerifyingKey): JsonNode = - let - ic = toSeq(cast[ptr UncheckedArray[CircomG1]](vpk.ic).toOpenArray(0, vpk.icLen.int - 1)) + let ic = + toSeq(cast[ptr UncheckedArray[CircomG1]](vpk.ic).toOpenArray(0, vpk.icLen.int - 1)) echo ic.len - %* { + %*{ "alpha1": vpk.alpha1.toJson, "beta2": vpk.beta2.toJson, "gamma2": vpk.gamma2.toJson, "delta2": vpk.delta2.toJson, - "ic": ic.mapIt( it.toJson ) + "ic": ic.mapIt(it.toJson), } func toJson*(input: ProofInputs[Poseidon2Hash]): JsonNode = - var - input = input + var input = input - %* { + %*{ "dataSetRoot": input.datasetRoot.toBig.toJsonDecimal, "entropy": input.entropy.toBig.toJsonDecimal, "nCellsPerSlot": input.nCellsPerSlot, "nSlotsPerDataSet": input.nSlotsPerDataSet, "slotIndex": input.slotIndex, "slotRoot": input.slotRoot.toDecimal, - "slotProof": input.slotProof.mapIt( it.toBig.toJsonDecimal ), - "cellData": input.samples.mapIt( - it.cellData.mapIt( it.toBig.toJsonDecimal ) - ), - "merklePaths": input.samples.mapIt( - it.merklePaths.mapIt( it.toBig.toJsonDecimal ) - ) + "slotProof": input.slotProof.mapIt(it.toBig.toJsonDecimal), + "cellData": input.samples.mapIt(it.cellData.mapIt(it.toBig.toJsonDecimal)), + "merklePaths": input.samples.mapIt(it.merklePaths.mapIt(it.toBig.toJsonDecimal)), } func toJson*(input: NormalizedProofInputs[Poseidon2Hash]): JsonNode = toJson(ProofInputs[Poseidon2Hash](input)) -func jsonToProofInput*(_: type Poseidon2Hash, inputJson: JsonNode): ProofInputs[Poseidon2Hash] = +func jsonToProofInput*( + _: type Poseidon2Hash, inputJson: JsonNode +): ProofInputs[Poseidon2Hash] = let - cellData = - inputJson["cellData"].mapIt( - it.mapIt( - block: - var - big: BigInt[256] - hash: Poseidon2Hash - data: array[32, byte] - assert bool(big.fromDecimal( it.str )) - assert data.marshal(big, littleEndian) + cellData = inputJson["cellData"].mapIt( + it.mapIt( + block: + var + big: BigInt[256] + hash: Poseidon2Hash + data: array[32, byte] + assert bool(big.fromDecimal(it.str)) + assert data.marshal(big, littleEndian) - Poseidon2Hash.fromBytes(data).get - ).concat # flatten out elements - ) + Poseidon2Hash.fromBytes(data).get + ).concat # flatten out elements + ) - merklePaths = - inputJson["merklePaths"].mapIt( - it.mapIt( - block: - var - big: BigInt[254] - hash: Poseidon2Hash - assert bool(big.fromDecimal( it.getStr )) - hash.fromBig( big ) - hash - ) - ) + merklePaths = inputJson["merklePaths"].mapIt( + it.mapIt( + block: + var + big: BigInt[254] + hash: Poseidon2Hash + assert bool(big.fromDecimal(it.getStr)) + hash.fromBig(big) + hash + ) + ) slotProof = inputJson["slotProof"].mapIt( block: var big: BigInt[254] hash: Poseidon2Hash - assert bool(big.fromDecimal( it.str )) - hash.fromBig( big ) + assert bool(big.fromDecimal(it.str)) + hash.fromBig(big) hash - ) + ) datasetRoot = block: var big: BigInt[254] hash: Poseidon2Hash - assert bool(big.fromDecimal( inputJson["dataSetRoot"].str )) - hash.fromBig( big ) + assert bool(big.fromDecimal(inputJson["dataSetRoot"].str)) + hash.fromBig(big) hash slotRoot = block: var big: BigInt[254] hash: Poseidon2Hash - assert bool(big.fromDecimal( inputJson["slotRoot"].str )) - hash.fromBig( big ) + assert bool(big.fromDecimal(inputJson["slotRoot"].str)) + hash.fromBig(big) hash entropy = block: var big: BigInt[254] hash: Poseidon2Hash - assert bool(big.fromDecimal( inputJson["entropy"].str )) - hash.fromBig( big ) + assert bool(big.fromDecimal(inputJson["entropy"].str)) + hash.fromBig(big) hash nCellsPerSlot = inputJson["nCellsPerSlot"].getInt @@ -147,9 +144,7 @@ func jsonToProofInput*(_: type Poseidon2Hash, inputJson: JsonNode): ProofInputs[ slotRoot: slotRoot, nCellsPerSlot: nCellsPerSlot, nSlotsPerDataSet: nSlotsPerDataSet, - samples: zip(cellData, merklePaths) - .mapIt(Sample[Poseidon2Hash]( - cellData: it[0], - merklePaths: it[1] - )) + samples: zip(cellData, merklePaths).mapIt( + Sample[Poseidon2Hash](cellData: it[0], merklePaths: it[1]) + ), ) diff --git a/tests/codex/slots/backends/testcircomcompat.nim b/tests/codex/slots/backends/testcircomcompat.nim index 60fb588f..b61d4f18 100644 --- a/tests/codex/slots/backends/testcircomcompat.nim +++ b/tests/codex/slots/backends/testcircomcompat.nim @@ -36,19 +36,17 @@ suite "Test Circom Compat Backend - control inputs": circom = CircomCompat.init(r1cs, wasm, zkey) teardown: - circom.release() # this comes from the rust FFI + circom.release() # this comes from the rust FFI test "Should verify with correct inputs": - let - proof = circom.prove(proofInputs).tryGet + let proof = circom.prove(proofInputs).tryGet check circom.verify(proof, proofInputs).tryGet test "Should not verify with incorrect inputs": proofInputs.slotIndex = 1 # change slot index - let - proof = circom.prove(proofInputs).tryGet + let proof = circom.prove(proofInputs).tryGet check circom.verify(proof, proofInputs).tryGet == false @@ -87,13 +85,9 @@ suite "Test Circom Compat Backend": store = RepoStore.new(repoDs, metaDs) - (manifest, protected, verifiable) = - await createVerifiableManifest( - store, - numDatasetBlocks, - ecK, ecM, - blockSize, - cellSize) + (manifest, protected, verifiable) = await createVerifiableManifest( + store, numDatasetBlocks, ecK, ecM, blockSize, cellSize + ) builder = Poseidon2Builder.new(store, verifiable).tryGet sampler = Poseidon2Sampler.new(slotId, store, builder).tryGet @@ -104,21 +98,18 @@ suite "Test Circom Compat Backend": proofInputs = (await sampler.getProofInput(challenge, samples)).tryGet teardown: - circom.release() # this comes from the rust FFI + circom.release() # this comes from the rust FFI await repoTmp.destroyDb() await metaTmp.destroyDb() - test "Should verify with correct input": - var - proof = circom.prove(proofInputs).tryGet + var proof = circom.prove(proofInputs).tryGet check circom.verify(proof, proofInputs).tryGet test "Should not verify with incorrect input": proofInputs.slotIndex = 1 # change slot index - let - proof = circom.prove(proofInputs).tryGet + let proof = circom.prove(proofInputs).tryGet check circom.verify(proof, proofInputs).tryGet == false diff --git a/tests/codex/slots/helpers.nim b/tests/codex/slots/helpers.nim index d3310462..03d87d12 100644 --- a/tests/codex/slots/helpers.nim +++ b/tests/codex/slots/helpers.nim @@ -1,4 +1,3 @@ - import std/sugar import pkg/chronos @@ -16,7 +15,9 @@ import pkg/codex/rng import ../helpers -proc storeManifest*(store: BlockStore, manifest: Manifest): Future[?!bt.Block] {.async.} = +proc storeManifest*( + store: BlockStore, manifest: Manifest +): Future[?!bt.Block] {.async.} = without encodedVerifiable =? manifest.encode(), err: trace "Unable to encode manifest" return failure(err) @@ -32,12 +33,13 @@ proc storeManifest*(store: BlockStore, manifest: Manifest): Future[?!bt.Block] { success blk proc makeManifest*( - cids: seq[Cid], - datasetSize: NBytes, - blockSize: NBytes, - store: BlockStore, - hcodec = Sha256HashCodec, - dataCodec = BlockCodec): Future[?!Manifest] {.async.} = + cids: seq[Cid], + datasetSize: NBytes, + blockSize: NBytes, + store: BlockStore, + hcodec = Sha256HashCodec, + dataCodec = BlockCodec, +): Future[?!Manifest] {.async.} = without tree =? CodexTree.init(cids), err: return failure(err) @@ -52,14 +54,14 @@ proc makeManifest*( # TODO add log here return failure(err) - let - manifest = Manifest.new( - treeCid = treeCid, - blockSize = blockSize, - datasetSize = datasetSize, - version = CIDv1, - hcodec = hcodec, - codec = dataCodec) + let manifest = Manifest.new( + treeCid = treeCid, + blockSize = blockSize, + datasetSize = datasetSize, + version = CIDv1, + hcodec = hcodec, + codec = dataCodec, + ) without manifestBlk =? await store.storeManifest(manifest), err: trace "Unable to store manifest" @@ -68,9 +70,8 @@ proc makeManifest*( success manifest proc createBlocks*( - chunker: Chunker, - store: BlockStore): Future[seq[bt.Block]] {.async.} = - + chunker: Chunker, store: BlockStore +): Future[seq[bt.Block]] {.async.} = collect(newSeq): while (let chunk = await chunker.getBytes(); chunk.len > 0): let blk = bt.Block.new(chunk).tryGet() @@ -78,24 +79,24 @@ proc createBlocks*( blk proc createProtectedManifest*( - datasetBlocks: seq[bt.Block], - store: BlockStore, - numDatasetBlocks: int, - ecK: int, ecM: int, - blockSize: NBytes, - originalDatasetSize: int, - totalDatasetSize: int): - Future[tuple[manifest: Manifest, protected: Manifest]] {.async.} = - + datasetBlocks: seq[bt.Block], + store: BlockStore, + numDatasetBlocks: int, + ecK: int, + ecM: int, + blockSize: NBytes, + originalDatasetSize: int, + totalDatasetSize: int, +): Future[tuple[manifest: Manifest, protected: Manifest]] {.async.} = let cids = datasetBlocks.mapIt(it.cid) - datasetTree = CodexTree.init(cids[0.. " & $expected & ")": + test "Can get slotBlockIndex from slotCellIndex (" & $input & " -> " & $expected & + ")": let slotBlockIndex = toBlkInSlot(input, numCells = cellsPerBlock) check: slotBlockIndex == expected for (input, expected) in [(10, 10), (31, 31), (32, 0), (63, 31), (64, 0)]: - test "Can get blockCellIndex from slotCellIndex (" & $input & " -> " & $expected & ")": + test "Can get blockCellIndex from slotCellIndex (" & $input & " -> " & $expected & + ")": let blockCellIndex = toCellInBlk(input, numCells = cellsPerBlock) check: diff --git a/tests/codex/slots/testbackendfactory.nim b/tests/codex/slots/testbackendfactory.nim index 87a7733a..a24bc41a 100644 --- a/tests/codex/slots/testbackendfactory.nim +++ b/tests/codex/slots/testbackendfactory.nim @@ -12,17 +12,13 @@ import pkg/codex/utils/natutils import ../helpers import ../examples -type - BackendUtilsMock = ref object of BackendUtils - argR1csFile: string - argWasmFile: string - argZKeyFile: string +type BackendUtilsMock = ref object of BackendUtils + argR1csFile: string + argWasmFile: string + argZKeyFile: string method initializeCircomBackend*( - self: BackendUtilsMock, - r1csFile: string, - wasmFile: string, - zKeyFile: string + self: BackendUtilsMock, r1csFile: string, wasmFile: string, zKeyFile: string ): AnyBackend = self.argR1csFile = r1csFile self.argWasmFile = wasmFile @@ -48,15 +44,13 @@ suite "Test BackendFactory": let config = CodexConf( cmd: StartUpCmd.persistence, - nat: NatConfig( - hasExtIp: false, - nat: NatNone), + nat: NatConfig(hasExtIp: false, nat: NatNone), metricsAddress: parseIpAddress("127.0.0.1"), persistenceCmd: PersistenceCmd.prover, marketplaceAddress: EthAddress.example.some, circomR1cs: InputFile("tests/circuits/fixtures/proof_main.r1cs"), circomWasm: InputFile("tests/circuits/fixtures/proof_main.wasm"), - circomZkey: InputFile("tests/circuits/fixtures/proof_main.zkey") + circomZkey: InputFile("tests/circuits/fixtures/proof_main.zkey"), ) backend = config.initializeBackend(utilsMock).tryGet @@ -70,16 +64,14 @@ suite "Test BackendFactory": let config = CodexConf( cmd: StartUpCmd.persistence, - nat: NatConfig( - hasExtIp: false, - nat: NatNone), + nat: NatConfig(hasExtIp: false, nat: NatNone), metricsAddress: parseIpAddress("127.0.0.1"), persistenceCmd: PersistenceCmd.prover, marketplaceAddress: EthAddress.example.some, # Set the circuitDir such that the tests/circuits/fixtures/ files # will be picked up as local files: - circuitDir: OutDir("tests/circuits/fixtures") + circuitDir: OutDir("tests/circuits/fixtures"), ) backend = config.initializeBackend(utilsMock).tryGet @@ -93,13 +85,11 @@ suite "Test BackendFactory": let config = CodexConf( cmd: StartUpCmd.persistence, - nat: NatConfig( - hasExtIp: false, - nat: NatNone), + nat: NatConfig(hasExtIp: false, nat: NatNone), metricsAddress: parseIpAddress("127.0.0.1"), persistenceCmd: PersistenceCmd.prover, marketplaceAddress: EthAddress.example.some, - circuitDir: OutDir(circuitDir) + circuitDir: OutDir(circuitDir), ) backendResult = config.initializeBackend(utilsMock) diff --git a/tests/codex/slots/testconverters.nim b/tests/codex/slots/testconverters.nim index cf18d6b2..58857f6b 100644 --- a/tests/codex/slots/testconverters.nim +++ b/tests/codex/slots/testconverters.nim @@ -10,8 +10,7 @@ import ../../asynctest import ../examples import ../merkletree/helpers -let - hash: Poseidon2Hash = toF(12345) +let hash: Poseidon2Hash = toF(12345) suite "Converters": test "CellBlock cid": @@ -44,4 +43,4 @@ suite "Converters": poseidonProof = toVerifiableProof(codexProof).tryGet() check: - Poseidon2Proof.example == poseidonProof + Poseidon2Proof.example == poseidonProof diff --git a/tests/codex/slots/testprover.nim b/tests/codex/slots/testprover.nim index f6deaeba..c567db55 100644 --- a/tests/codex/slots/testprover.nim +++ b/tests/codex/slots/testprover.nim @@ -36,15 +36,13 @@ suite "Test Prover": metaDs = metaTmp.newDb() config = CodexConf( cmd: StartUpCmd.persistence, - nat: NatConfig( - hasExtIp: false, - nat: NatNone), + nat: NatConfig(hasExtIp: false, nat: NatNone), metricsAddress: parseIpAddress("127.0.0.1"), persistenceCmd: PersistenceCmd.prover, circomR1cs: InputFile("tests/circuits/fixtures/proof_main.r1cs"), circomWasm: InputFile("tests/circuits/fixtures/proof_main.wasm"), circomZkey: InputFile("tests/circuits/fixtures/proof_main.zkey"), - numProofSamples: samples + numProofSamples: samples, ) backend = config.initializeBackend().tryGet() @@ -56,42 +54,35 @@ suite "Test Prover": await metaTmp.destroyDb() test "Should sample and prove a slot": - let - (_, _, verifiable) = - await createVerifiableManifest( - store, - 8, # number of blocks in the original dataset (before EC) - 5, # ecK - 3, # ecM - blockSize, - cellSize) + let (_, _, verifiable) = await createVerifiableManifest( + store, + 8, # number of blocks in the original dataset (before EC) + 5, # ecK + 3, # ecM + blockSize, + cellSize, + ) - let - (inputs, proof) = ( - await prover.prove(1, verifiable, challenge)).tryGet + let (inputs, proof) = (await prover.prove(1, verifiable, challenge)).tryGet check: (await prover.verify(proof, inputs)).tryGet == true test "Should generate valid proofs when slots consist of single blocks": - # To get single-block slots, we just need to set the number of blocks in # the original dataset to be the same as ecK. The total number of blocks # after generating random data for parity will be ecK + ecM, which will # match the number of slots. - let - (_, _, verifiable) = - await createVerifiableManifest( - store, - 2, # number of blocks in the original dataset (before EC) - 2, # ecK - 1, # ecM - blockSize, - cellSize) + let (_, _, verifiable) = await createVerifiableManifest( + store, + 2, # number of blocks in the original dataset (before EC) + 2, # ecK + 1, # ecM + blockSize, + cellSize, + ) - let - (inputs, proof) = ( - await prover.prove(1, verifiable, challenge)).tryGet + let (inputs, proof) = (await prover.prove(1, verifiable, challenge)).tryGet check: (await prover.verify(proof, inputs)).tryGet == true diff --git a/tests/codex/slots/testslotbuilder.nim b/tests/codex/slots/testslotbuilder.nim index 88ee9ae5..ef83bdee 100644 --- a/tests/codex/slots/testslotbuilder.nim +++ b/tests/codex/slots/testslotbuilder.nim @@ -27,8 +27,7 @@ import pkg/codex/slots {.all.} privateAccess(Poseidon2Builder) # enable access to private fields privateAccess(Manifest) # enable access to private fields -const - Strategy = SteppedStrategy +const Strategy = SteppedStrategy suite "Slot builder": let @@ -39,24 +38,27 @@ suite "Slot builder": numSlots = ecK + ecM numDatasetBlocks = 8 - numTotalBlocks = calcEcBlocksCount(numDatasetBlocks, ecK, ecM) # total number of blocks in the dataset after - # EC (should will match number of slots) + numTotalBlocks = calcEcBlocksCount(numDatasetBlocks, ecK, ecM) + # total number of blocks in the dataset after + # EC (should will match number of slots) originalDatasetSize = numDatasetBlocks * blockSize.int - totalDatasetSize = numTotalBlocks * blockSize.int + totalDatasetSize = numTotalBlocks * blockSize.int - numSlotBlocks = numTotalBlocks div numSlots - numBlockCells = (blockSize div cellSize).int # number of cells per block - numSlotCells = numSlotBlocks * numBlockCells # number of uncorrected slot cells - pow2SlotCells = nextPowerOfTwo(numSlotCells) # pow2 cells per slot - numPadSlotBlocks = (pow2SlotCells div numBlockCells) - numSlotBlocks # pow2 blocks per slot + numSlotBlocks = numTotalBlocks div numSlots + numBlockCells = (blockSize div cellSize).int # number of cells per block + numSlotCells = numSlotBlocks * numBlockCells # number of uncorrected slot cells + pow2SlotCells = nextPowerOfTwo(numSlotCells) # pow2 cells per slot + numPadSlotBlocks = (pow2SlotCells div numBlockCells) - numSlotBlocks + # pow2 blocks per slot - numSlotBlocksTotal = # pad blocks per slot + numSlotBlocksTotal = + # pad blocks per slot if numPadSlotBlocks > 0: - numPadSlotBlocks + numSlotBlocks - else: - numSlotBlocks + numPadSlotBlocks + numSlotBlocks + else: + numSlotBlocks - numBlocksTotal = numSlotBlocksTotal * numSlots + numBlocksTotal = numSlotBlocksTotal * numSlots # empty digest emptyDigest = SpongeMerkle.digest(newSeq[byte](blockSize.int), cellSize.int) @@ -78,18 +80,14 @@ suite "Slot builder": metaDs = metaTmp.newDb() localStore = RepoStore.new(repoDs, metaDs) - chunker = RandomChunker.new(Rng.instance(), size = totalDatasetSize, chunkSize = blockSize) + chunker = + RandomChunker.new(Rng.instance(), size = totalDatasetSize, chunkSize = blockSize) datasetBlocks = await chunker.createBlocks(localStore) - (manifest, protectedManifest) = - await createProtectedManifest( - datasetBlocks, - localStore, - numDatasetBlocks, - ecK, ecM, - blockSize, - originalDatasetSize, - totalDatasetSize) + (manifest, protectedManifest) = await createProtectedManifest( + datasetBlocks, localStore, numDatasetBlocks, ecK, ecM, blockSize, + originalDatasetSize, totalDatasetSize, + ) teardown: await localStore.close() @@ -109,55 +107,55 @@ suite "Slot builder": reset(chunker) test "Can only create builder with protected manifest": - let - unprotectedManifest = Manifest.new( - treeCid = Cid.example, - blockSize = blockSize.NBytes, - datasetSize = originalDatasetSize.NBytes) + let unprotectedManifest = Manifest.new( + treeCid = Cid.example, + blockSize = blockSize.NBytes, + datasetSize = originalDatasetSize.NBytes, + ) check: - Poseidon2Builder.new(localStore, unprotectedManifest, cellSize = cellSize) - .error.msg == "Manifest is not protected." + Poseidon2Builder.new(localStore, unprotectedManifest, cellSize = cellSize).error.msg == + "Manifest is not protected." test "Number of blocks must be devisable by number of slots": - let - mismatchManifest = Manifest.new( - manifest = Manifest.new( - treeCid = Cid.example, - blockSize = blockSize.NBytes, - datasetSize = originalDatasetSize.NBytes), + let mismatchManifest = Manifest.new( + manifest = Manifest.new( treeCid = Cid.example, - datasetSize = totalDatasetSize.NBytes, - ecK = ecK - 1, - ecM = ecM, - strategy = Strategy) + blockSize = blockSize.NBytes, + datasetSize = originalDatasetSize.NBytes, + ), + treeCid = Cid.example, + datasetSize = totalDatasetSize.NBytes, + ecK = ecK - 1, + ecM = ecM, + strategy = Strategy, + ) check: - Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize) - .error.msg == "Number of blocks must be divisable by number of slots." + Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize).error.msg == + "Number of blocks must be divisable by number of slots." test "Block size must be divisable by cell size": - let - mismatchManifest = Manifest.new( - manifest = Manifest.new( - treeCid = Cid.example, - blockSize = (blockSize + 1).NBytes, - datasetSize = (originalDatasetSize - 1).NBytes), + let mismatchManifest = Manifest.new( + manifest = Manifest.new( treeCid = Cid.example, - datasetSize = (totalDatasetSize - 1).NBytes, - ecK = ecK, - ecM = ecM, - strategy = Strategy) + blockSize = (blockSize + 1).NBytes, + datasetSize = (originalDatasetSize - 1).NBytes, + ), + treeCid = Cid.example, + datasetSize = (totalDatasetSize - 1).NBytes, + ecK = ecK, + ecM = ecM, + strategy = Strategy, + ) check: - Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize) - .error.msg == "Block size must be divisable by cell size." + Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize).error.msg == + "Block size must be divisable by cell size." test "Should build correct slot builder": - builder = Poseidon2Builder.new( - localStore, - protectedManifest, - cellSize = cellSize).tryGet() + builder = + Poseidon2Builder.new(localStore, protectedManifest, cellSize = cellSize).tryGet() check: builder.cellSize == cellSize @@ -169,15 +167,13 @@ suite "Slot builder": test "Should build slot hashes for all slots": let - steppedStrategy = Strategy.init( - 0, numBlocksTotal - 1, numSlots) + steppedStrategy = Strategy.init(0, numBlocksTotal - 1, numSlots) - builder = Poseidon2Builder.new( - localStore, - protectedManifest, - cellSize = cellSize).tryGet() + builder = Poseidon2Builder + .new(localStore, protectedManifest, cellSize = cellSize) + .tryGet() - for i in 0.. (protectedManifest.numSlotBlocks - 1): - emptyDigest - else: - SpongeMerkle.digest(datasetBlocks[idx].data, cellSize.int) + for i in 0 ..< numSlots: + let slotHashes = collect(newSeq): + for j, idx in steppedStrategy.getIndicies(i): + if j > (protectedManifest.numSlotBlocks - 1): + emptyDigest + else: + SpongeMerkle.digest(datasetBlocks[idx].data, cellSize.int) Merkle.digest(slotHashes) @@ -272,20 +262,18 @@ suite "Slot builder": test "Should build correct verification root manifest": let steppedStrategy = Strategy.init(0, numBlocksTotal - 1, numSlots) - builder = Poseidon2Builder.new( - localStore, - protectedManifest, - cellSize = cellSize).tryGet() + builder = Poseidon2Builder + .new(localStore, protectedManifest, cellSize = cellSize) + .tryGet() slotsHashes = collect(newSeq): - for i in 0.. (protectedManifest.numSlotBlocks - 1): - emptyDigest - else: - SpongeMerkle.digest(datasetBlocks[idx].data, cellSize.int) + for i in 0 ..< numSlots: + let slotHashes = collect(newSeq): + for j, idx in steppedStrategy.getIndicies(i): + if j > (protectedManifest.numSlotBlocks - 1): + emptyDigest + else: + SpongeMerkle.digest(datasetBlocks[idx].data, cellSize.int) Merkle.digest(slotHashes) @@ -300,68 +288,46 @@ suite "Slot builder": test "Should not build from verifiable manifest with 0 slots": var - builder = Poseidon2Builder.new( - localStore, - protectedManifest, - cellSize = cellSize).tryGet() + builder = Poseidon2Builder + .new(localStore, protectedManifest, cellSize = cellSize) + .tryGet() verifyManifest = (await builder.buildManifest()).tryGet() verifyManifest.slotRoots = @[] - check Poseidon2Builder.new( - localStore, - verifyManifest, - cellSize = cellSize).isErr + check Poseidon2Builder.new(localStore, verifyManifest, cellSize = cellSize).isErr test "Should not build from verifiable manifest with incorrect number of slots": var - builder = Poseidon2Builder.new( - localStore, - protectedManifest, - cellSize = cellSize).tryGet() + builder = Poseidon2Builder + .new(localStore, protectedManifest, cellSize = cellSize) + .tryGet() verifyManifest = (await builder.buildManifest()).tryGet() - verifyManifest.slotRoots.del( - verifyManifest.slotRoots.len - 1 - ) + verifyManifest.slotRoots.del(verifyManifest.slotRoots.len - 1) - check Poseidon2Builder.new( - localStore, - verifyManifest, - cellSize = cellSize).isErr + check Poseidon2Builder.new(localStore, verifyManifest, cellSize = cellSize).isErr test "Should not build from verifiable manifest with invalid verify root": - let - builder = Poseidon2Builder.new( - localStore, - protectedManifest, - cellSize = cellSize).tryGet() + let builder = + Poseidon2Builder.new(localStore, protectedManifest, cellSize = cellSize).tryGet() - var - verifyManifest = (await builder.buildManifest()).tryGet() + var verifyManifest = (await builder.buildManifest()).tryGet() - rng.shuffle( - Rng.instance, - verifyManifest.verifyRoot.data.buffer) + rng.shuffle(Rng.instance, verifyManifest.verifyRoot.data.buffer) - check Poseidon2Builder.new( - localStore, - verifyManifest, - cellSize = cellSize).isErr + check Poseidon2Builder.new(localStore, verifyManifest, cellSize = cellSize).isErr test "Should build from verifiable manifest": let - builder = Poseidon2Builder.new( - localStore, - protectedManifest, - cellSize = cellSize).tryGet() + builder = Poseidon2Builder + .new(localStore, protectedManifest, cellSize = cellSize) + .tryGet() verifyManifest = (await builder.buildManifest()).tryGet() - verificationBuilder = Poseidon2Builder.new( - localStore, - verifyManifest, - cellSize = cellSize).tryGet() + verificationBuilder = + Poseidon2Builder.new(localStore, verifyManifest, cellSize = cellSize).tryGet() check: builder.slotRoots == verificationBuilder.slotRoots diff --git a/tests/codex/stores/commonstoretests.nim b/tests/codex/stores/commonstoretests.nim index 7d6cc89a..5e722a8a 100644 --- a/tests/codex/stores/commonstoretests.nim +++ b/tests/codex/stores/commonstoretests.nim @@ -22,11 +22,9 @@ type Before* = proc(): Future[void] {.gcsafe.} After* = proc(): Future[void] {.gcsafe.} -proc commonBlockStoreTests*(name: string, - provider: StoreProvider, - before: Before = nil, - after: After = nil) = - +proc commonBlockStoreTests*( + name: string, provider: StoreProvider, before: Before = nil, after: After = nil +) = asyncchecksuite name & " Store Common": var newBlock, newBlock1, newBlock2, newBlock3: Block @@ -40,7 +38,8 @@ proc commonBlockStoreTests*(name: string, newBlock2 = Block.new("2".repeat(100).toBytes()).tryGet() newBlock3 = Block.new("3".repeat(100).toBytes()).tryGet() - (manifest, tree) = makeManifestAndTree(@[newBlock, newBlock1, newBlock2, newBlock3]).tryGet() + (manifest, tree) = + makeManifestAndTree(@[newBlock, newBlock1, newBlock2, newBlock3]).tryGet() if not isNil(before): await before() @@ -59,8 +58,9 @@ proc commonBlockStoreTests*(name: string, test "putBlock raises onBlockStored": var storedCid = Cid.example - proc onStored(cid: Cid) {.async.} = + proc onStored(cid: Cid) {.async.} = storedCid = cid + store.onBlockStored = onStored.some() (await store.putBlock(newBlock1)).tryGet() @@ -100,15 +100,13 @@ proc commonBlockStoreTests*(name: string, let blocks = @[newBlock1, newBlock2, newBlock3] - putHandles = await allFinished( - blocks.mapIt( store.putBlock( it ) )) + putHandles = await allFinished(blocks.mapIt(store.putBlock(it))) for handle in putHandles: check not handle.failed check handle.read.isOk - let - cids = (await store.listBlocks(blockType = BlockType.Block)).tryGet() + let cids = (await store.listBlocks(blockType = BlockType.Block)).tryGet() var count = 0 for c in cids: @@ -121,17 +119,18 @@ proc commonBlockStoreTests*(name: string, test "listBlocks Manifest": let blocks = @[newBlock1, newBlock2, newBlock3] - manifestBlock = Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() + manifestBlock = + Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() treeBlock = Block.new(tree.encode()).tryGet() putHandles = await allFinished( - (@[treeBlock, manifestBlock] & blocks).mapIt( store.putBlock( it ) )) + (@[treeBlock, manifestBlock] & blocks).mapIt(store.putBlock(it)) + ) for handle in putHandles: check not handle.failed check handle.read.isOk - let - cids = (await store.listBlocks(blockType = BlockType.Manifest)).tryGet() + let cids = (await store.listBlocks(blockType = BlockType.Manifest)).tryGet() var count = 0 for c in cids: @@ -145,17 +144,18 @@ proc commonBlockStoreTests*(name: string, test "listBlocks Both": let blocks = @[newBlock1, newBlock2, newBlock3] - manifestBlock = Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() + manifestBlock = + Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() treeBlock = Block.new(tree.encode()).tryGet() putHandles = await allFinished( - (@[treeBlock, manifestBlock] & blocks).mapIt( store.putBlock( it ) )) + (@[treeBlock, manifestBlock] & blocks).mapIt(store.putBlock(it)) + ) for handle in putHandles: check not handle.failed check handle.read.isOk - let - cids = (await store.listBlocks(blockType = BlockType.Both)).tryGet() + let cids = (await store.listBlocks(blockType = BlockType.Both)).tryGet() var count = 0 for c in cids: diff --git a/tests/codex/stores/repostore/testcoders.nim b/tests/codex/stores/repostore/testcoders.nim index 47cf4097..f4d2b5e7 100644 --- a/tests/codex/stores/repostore/testcoders.nim +++ b/tests/codex/stores/repostore/testcoders.nim @@ -12,7 +12,6 @@ import pkg/codex/stores/repostore/coders import ../../helpers checksuite "Test coders": - proc rand(T: type NBytes): T = rand(Natural).NBytes @@ -21,29 +20,18 @@ checksuite "Test coders": E(ordinals[rand(ordinals.len - 1)]) proc rand(T: type QuotaUsage): T = - QuotaUsage( - used: rand(NBytes), - reserved: rand(NBytes) - ) + QuotaUsage(used: rand(NBytes), reserved: rand(NBytes)) proc rand(T: type BlockMetadata): T = BlockMetadata( - expiry: rand(SecondsSince1970), - size: rand(NBytes), - refCount: rand(Natural) + expiry: rand(SecondsSince1970), size: rand(NBytes), refCount: rand(Natural) ) proc rand(T: type DeleteResult): T = - DeleteResult( - kind: rand(DeleteResultKind), - released: rand(NBytes) - ) + DeleteResult(kind: rand(DeleteResultKind), released: rand(NBytes)) proc rand(T: type StoreResult): T = - StoreResult( - kind: rand(StoreResultKind), - used: rand(NBytes) - ) + StoreResult(kind: rand(StoreResultKind), used: rand(NBytes)) test "Natural encode/decode": for val in newSeqWith[Natural](100, rand(Natural)) & @[Natural.low, Natural.high]: diff --git a/tests/codex/stores/testcachestore.nim b/tests/codex/stores/testcachestore.nim index 51c59bbf..e7025388 100644 --- a/tests/codex/stores/testcachestore.nim +++ b/tests/codex/stores/testcachestore.nim @@ -37,16 +37,13 @@ checksuite "Cache Store": # initial cache blocks total more than cache size, currentSize should # never exceed max cache size store = CacheStore.new( - blocks = @[newBlock1, newBlock2, newBlock3], - cacheSize = 200, - chunkSize = 1) + blocks = @[newBlock1, newBlock2, newBlock3], cacheSize = 200, chunkSize = 1 + ) check store.currentSize == 200'nb # cache size cannot be less than chunks size expect ValueError: - discard CacheStore.new( - cacheSize = 99, - chunkSize = 100) + discard CacheStore.new(cacheSize = 99, chunkSize = 100) test "putBlock": (await store.putBlock(newBlock1)).tryGet() @@ -58,10 +55,8 @@ checksuite "Cache Store": check not (await store.hasBlock(newBlock1.cid)).tryGet() # block being added causes removal of LRU block - store = CacheStore.new( - @[newBlock1, newBlock2, newBlock3], - cacheSize = 200, - chunkSize = 1) + store = + CacheStore.new(@[newBlock1, newBlock2, newBlock3], cacheSize = 200, chunkSize = 1) check: not (await store.hasBlock(newBlock1.cid)).tryGet() (await store.hasBlock(newBlock2.cid)).tryGet() @@ -69,5 +64,7 @@ checksuite "Cache Store": store.currentSize.int == newBlock2.data.len + newBlock3.data.len # 200 commonBlockStoreTests( - "Cache", proc: BlockStore = - BlockStore(CacheStore.new(cacheSize = 1000, chunkSize = 1))) + "Cache", + proc(): BlockStore = + BlockStore(CacheStore.new(cacheSize = 1000, chunkSize = 1)), +) diff --git a/tests/codex/stores/testkeyutils.nim b/tests/codex/stores/testkeyutils.nim index e1a8cf1b..238e2681 100644 --- a/tests/codex/stores/testkeyutils.nim +++ b/tests/codex/stores/testkeyutils.nim @@ -32,15 +32,15 @@ proc createManifestCid(): ?!Cid = codec = ManifestCodec version = CIDv1 - let hash = ? MultiHash.digest($mcodec, bytes).mapFailure - let cid = ? Cid.init(version, codec, hash).mapFailure + let hash = ?MultiHash.digest($mcodec, bytes).mapFailure + let cid = ?Cid.init(version, codec, hash).mapFailure return success cid checksuite "KeyUtils": test "makePrefixKey should create block key": let length = 6 let cid = Cid.example - let expectedPrefix = ($cid)[^length..^1] + let expectedPrefix = ($cid)[^length ..^ 1] let expectedPostfix = $cid let key = !makePrefixKey(length, cid).option @@ -56,7 +56,7 @@ checksuite "KeyUtils": test "makePrefixKey should create manifest key": let length = 6 let cid = !createManifestCid().option - let expectedPrefix = ($cid)[^length..^1] + let expectedPrefix = ($cid)[^length ..^ 1] let expectedPostfix = $cid let key = !makePrefixKey(length, cid).option diff --git a/tests/codex/stores/testmaintenance.nim b/tests/codex/stores/testmaintenance.nim index c63d6bef..e5ff519e 100644 --- a/tests/codex/stores/testmaintenance.nim +++ b/tests/codex/stores/testmaintenance.nim @@ -34,10 +34,7 @@ checksuite "BlockMaintainer": var testBe3: BlockExpiration proc createTestExpiration(expiry: SecondsSince1970): BlockExpiration = - BlockExpiration( - cid: bt.Block.example.cid, - expiry: expiry - ) + BlockExpiration(cid: bt.Block.example.cid, expiry: expiry) setup: mockClock = MockClock.new() @@ -56,11 +53,8 @@ checksuite "BlockMaintainer": mockTimer = MockTimer.new() blockMaintainer = BlockMaintainer.new( - mockRepoStore, - interval, - numberOfBlocksPerInterval = 2, - mockTimer, - mockClock) + mockRepoStore, interval, numberOfBlocksPerInterval = 2, mockTimer, mockClock + ) test "Start should start timer at provided interval": blockMaintainer.start() @@ -179,9 +173,11 @@ checksuite "BlockMaintainer": mockClock.set(650) await invokeTimerManyTimes() # First new block has expired - check mockRepoStore.delBlockCids == [testBe1.cid, testBe2.cid, testBe3.cid, testBe4.cid] + check mockRepoStore.delBlockCids == + [testBe1.cid, testBe2.cid, testBe3.cid, testBe4.cid] mockClock.set(750) await invokeTimerManyTimes() # Second new block has expired - check mockRepoStore.delBlockCids == [testBe1.cid, testBe2.cid, testBe3.cid, testBe4.cid, testBe5.cid] + check mockRepoStore.delBlockCids == + [testBe1.cid, testBe2.cid, testBe3.cid, testBe4.cid, testBe5.cid] diff --git a/tests/codex/stores/testqueryiterhelper.nim b/tests/codex/stores/testqueryiterhelper.nim index ddc769c8..5d3d68fd 100644 --- a/tests/codex/stores/testqueryiterhelper.nim +++ b/tests/codex/stores/testqueryiterhelper.nim @@ -18,8 +18,7 @@ proc decode(T: type string, bytes: seq[byte]): ?!T = success(string.fromBytes(bytes)) asyncchecksuite "Test QueryIter helper": - var - tds: TypedDatastore + var tds: TypedDatastore setupAll: tds = TypedDatastore.init(SQLiteDatastore.new(Memory).tryGet()) @@ -29,10 +28,7 @@ asyncchecksuite "Test QueryIter helper": test "Should auto-dispose when QueryIter finishes": let - source = { - "a": "11", - "b": "22" - }.toTable + source = {"a": "11", "b": "22"}.toTable Root = Key.init("/queryitertest").tryGet() for k, v in source: diff --git a/tests/codex/stores/testrepostore.nim b/tests/codex/stores/testrepostore.nim index 1bcffbf6..dda4ed82 100644 --- a/tests/codex/stores/testrepostore.nim +++ b/tests/codex/stores/testrepostore.nim @@ -23,7 +23,6 @@ import ../examples import ./commonstoretests checksuite "Test RepoStore start/stop": - var repoDs: Datastore metaDs: Datastore @@ -63,8 +62,7 @@ asyncchecksuite "RepoStore": repo: RepoStore - let - now: SecondsSince1970 = 123 + let now: SecondsSince1970 = 123 setup: repoDs = SQLiteDatastore.new(Memory).tryGet() @@ -191,8 +189,7 @@ asyncchecksuite "RepoStore": duration = 10.seconds blk = createTestBlock(100) - let - expectedExpiration = BlockExpiration(cid: blk.cid, expiry: now + 10) + let expectedExpiration = BlockExpiration(cid: blk.cid, expiry: now + 10) (await repo.putBlock(blk, duration.some)).tryGet @@ -202,11 +199,10 @@ asyncchecksuite "RepoStore": expectedExpiration in expirations test "Should store block with default expiration timestamp when not provided": - let - blk = createTestBlock(100) + let blk = createTestBlock(100) - let - expectedExpiration = BlockExpiration(cid: blk.cid, expiry: now + DefaultBlockTtl.seconds) + let expectedExpiration = + BlockExpiration(cid: blk.cid, expiry: now + DefaultBlockTtl.seconds) (await repo.putBlock(blk)).tryGet @@ -234,8 +230,7 @@ asyncchecksuite "RepoStore": (await repo.ensureExpiry(blk.cid, 0)).tryGet test "Should fail when updating expiry of non-existing block": - let - blk = createTestBlock(100) + let blk = createTestBlock(100) expect BlockNotFoundError: (await repo.ensureExpiry(blk.cid, 10)).tryGet @@ -296,7 +291,9 @@ asyncchecksuite "RepoStore": expirations.len == 0 test "Should retrieve block expiration information": - proc unpack(beIter: Future[?!AsyncIter[BlockExpiration]]): Future[seq[BlockExpiration]] {.async.} = + proc unpack( + beIter: Future[?!AsyncIter[BlockExpiration]] + ): Future[seq[BlockExpiration]] {.async.} = var expirations = newSeq[BlockExpiration](0) without iter =? (await beIter), err: return expirations @@ -311,22 +308,22 @@ asyncchecksuite "RepoStore": blk2 = createTestBlock(11) blk3 = createTestBlock(12) - let - expectedExpiration: SecondsSince1970 = now + 10 + let expectedExpiration: SecondsSince1970 = now + 10 proc assertExpiration(be: BlockExpiration, expectedBlock: bt.Block) = check: be.cid == expectedBlock.cid be.expiry == expectedExpiration - (await repo.putBlock(blk1, duration.some)).tryGet (await repo.putBlock(blk2, duration.some)).tryGet (await repo.putBlock(blk3, duration.some)).tryGet let - blockExpirations1 = await unpack(repo.getBlockExpirations(maxNumber=2, offset=0)) - blockExpirations2 = await unpack(repo.getBlockExpirations(maxNumber=2, offset=2)) + blockExpirations1 = + await unpack(repo.getBlockExpirations(maxNumber = 2, offset = 0)) + blockExpirations2 = + await unpack(repo.getBlockExpirations(maxNumber = 2, offset = 2)) check blockExpirations1.len == 2 assertExpiration(blockExpirations1[0], blk2) @@ -358,15 +355,18 @@ asyncchecksuite "RepoStore": check has.get commonBlockStoreTests( - "RepoStore Sql backend", proc: BlockStore = + "RepoStore Sql backend", + proc(): BlockStore = BlockStore( RepoStore.new( SQLiteDatastore.new(Memory).tryGet(), SQLiteDatastore.new(Memory).tryGet(), - clock = MockClock.new()))) + clock = MockClock.new(), + ) + ), +) -const - path = currentSourcePath().parentDir / "test" +const path = currentSourcePath().parentDir / "test" proc before() {.async.} = createDir(path) @@ -374,15 +374,18 @@ proc before() {.async.} = proc after() {.async.} = removeDir(path) -let - depth = path.split(DirSep).len +let depth = path.split(DirSep).len commonBlockStoreTests( - "RepoStore FS backend", proc: BlockStore = + "RepoStore FS backend", + proc(): BlockStore = BlockStore( RepoStore.new( FSDatastore.new(path, depth).tryGet(), SQLiteDatastore.new(Memory).tryGet(), - clock = MockClock.new())), + clock = MockClock.new(), + ) + ), before = before, - after = after) + after = after, +) diff --git a/tests/codex/testasyncheapqueue.nim b/tests/codex/testasyncheapqueue.nim index dcdafcf2..eb3767cd 100644 --- a/tests/codex/testasyncheapqueue.nim +++ b/tests/codex/testasyncheapqueue.nim @@ -7,8 +7,7 @@ import pkg/codex/rng import ../asynctest import ./helpers -type - Task* = tuple[name: string, priority: int] +type Task* = tuple[name: string, priority: int] proc `<`*(a, b: Task): bool = a.priority < b.priority @@ -70,7 +69,7 @@ checksuite "Synchronous tests": check res == @[9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - test "Test del": # Test del + test "Test del": var heap = newAsyncHeapQueue[int]() let data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0] for item in data: @@ -91,7 +90,7 @@ checksuite "Synchronous tests": heap.del(heap.find(2)) check heap.toSortedSeq == @[1, 3, 4, 8, 9] - test "Test del last": # Test del last + test "Test del last": var heap = newAsyncHeapQueue[int]() let data = [1, 2, 3] for item in data: @@ -190,11 +189,11 @@ asyncchecksuite "Asynchronous Tests": for item in data: check heap.pushNoWait(item).isOk - check heap[0] == ("b", 3) # sanity check for order + check heap[0] == ("b", 3) # sanity check for order - let fut = heap.pushOrUpdate(("c", 2)) # attempt to push a non existen item but block - check heap.popNoWait().tryGet() == ("b", 3) # pop one off - await fut # wait for push to complete + let fut = heap.pushOrUpdate(("c", 2)) # attempt to push a non existen item but block + check heap.popNoWait().tryGet() == ("b", 3) # pop one off + await fut # wait for push to complete check heap[0] == (name: "c", priority: 2) # check order again @@ -215,10 +214,7 @@ asyncchecksuite "Asynchronous Tests": let data = ["d", "b", "c", "a", "h", "e", "f", "g"] for item in data: - check heap.pushNoWait(( - name: item, - priority: Rng.instance().rand(data.len) - )).isOk + check heap.pushNoWait((name: item, priority: Rng.instance().rand(data.len))).isOk let del = heap[3] heap.delete(del) diff --git a/tests/codex/testasyncstreamwrapper.nim b/tests/codex/testasyncstreamwrapper.nim index 8a325351..2aa23039 100644 --- a/tests/codex/testasyncstreamwrapper.nim +++ b/tests/codex/testasyncstreamwrapper.nim @@ -9,12 +9,10 @@ import ../asynctest import ./helpers asyncchecksuite "AsyncStreamWrapper": - let data = "0123456789012345678901234567890123456789" let address = initTAddress("127.0.0.1:46001") - proc serveReadingClient(server: StreamServer, - transp: StreamTransport) {.async.} = + proc serveReadingClient(server: StreamServer, transp: StreamTransport) {.async.} = var wstream = newAsyncStreamWriter(transp) await wstream.write(data) await wstream.finish() @@ -73,7 +71,8 @@ asyncchecksuite "AsyncStreamWrapper": test "Write all data": var buf = newSeq[byte](data.len) - var server = createStreamServer(address, serveWritingClient(addr buf[0], buf.len), {ReuseAddr}) + var server = + createStreamServer(address, serveWritingClient(addr buf[0], buf.len), {ReuseAddr}) server.start() var transp = await connect(address) diff --git a/tests/codex/testchunking.nim b/tests/codex/testchunking.nim index 216fbcc4..2241a82b 100644 --- a/tests/codex/testchunking.nim +++ b/tests/codex/testchunking.nim @@ -13,14 +13,11 @@ import ./helpers # CancelledError* = object of FutureError # LPStreamError* = object of LPError -type - CrashingStreamWrapper* = ref object of LPStream - toRaise*: proc(): void {.gcsafe, raises: [CancelledError, LPStreamError].} +type CrashingStreamWrapper* = ref object of LPStream + toRaise*: proc(): void {.gcsafe, raises: [CancelledError, LPStreamError].} method readOnce*( - self: CrashingStreamWrapper, - pbytes: pointer, - nbytes: int + self: CrashingStreamWrapper, pbytes: pointer, nbytes: int ): Future[int] {.gcsafe, async: (raises: [CancelledError, LPStreamError]).} = self.toRaise() @@ -28,9 +25,9 @@ asyncchecksuite "Chunking": test "should return proper size chunks": var offset = 0 let contents = [1.byte, 2, 3, 4, 5, 6, 7, 8, 9, 0] - proc reader(data: ChunkBuffer, len: int): Future[int] - {.gcsafe, async, raises: [Defect].} = - + proc reader( + data: ChunkBuffer, len: int + ): Future[int] {.gcsafe, async, raises: [Defect].} = let read = min(contents.len - offset, len) if read == 0: return 0 @@ -39,9 +36,7 @@ asyncchecksuite "Chunking": offset += read return read - let chunker = Chunker.new( - reader = reader, - chunkSize = 2'nb) + let chunker = Chunker.new(reader = reader, chunkSize = 2'nb) check: (await chunker.getBytes()) == [1.byte, 2] @@ -54,9 +49,7 @@ asyncchecksuite "Chunking": test "should chunk LPStream": let stream = BufferStream.new() - let chunker = LPStreamChunker.new( - stream = stream, - chunkSize = 2'nb) + let chunker = LPStreamChunker.new(stream = stream, chunkSize = 2'nb) proc writer() {.async.} = for d in [@[1.byte, 2, 3, 4], @[5.byte, 6, 7, 8], @[9.byte, 0]]: @@ -97,9 +90,7 @@ asyncchecksuite "Chunking": proc raiseStreamException(exc: ref CancelledError | ref LPStreamError) {.async.} = let stream = CrashingStreamWrapper.new() - let chunker = LPStreamChunker.new( - stream = stream, - chunkSize = 2'nb) + let chunker = LPStreamChunker.new(stream = stream, chunkSize = 2'nb) stream.toRaise = proc(): void {.raises: [CancelledError, LPStreamError].} = raise exc @@ -118,4 +109,4 @@ asyncchecksuite "Chunking": test "stream should forward LPStreamError": expect LPStreamError: - await raiseStreamException(newException(LPStreamError, "test error")) \ No newline at end of file + await raiseStreamException(newException(LPStreamError, "test error")) diff --git a/tests/codex/testclock.nim b/tests/codex/testclock.nim index 513e4963..2b0158cf 100644 --- a/tests/codex/testclock.nim +++ b/tests/codex/testclock.nim @@ -12,15 +12,7 @@ checksuite "Clock": check restored == seconds test "SecondsSince1970 should support bytes conversions": - let secondsToTest: seq[int64] = @[ - int64.high, - int64.low, - 0, - 1, - 12345, - -1, - -12345 - ] + let secondsToTest: seq[int64] = @[int64.high, int64.low, 0, 1, 12345, -1, -12345] for seconds in secondsToTest: testConversion(seconds) diff --git a/tests/codex/testerasure.nim b/tests/codex/testerasure.nim index 96e900d8..952497e9 100644 --- a/tests/codex/testerasure.nim +++ b/tests/codex/testerasure.nim @@ -43,11 +43,8 @@ suite "Erasure encode/decode": await metaTmp.destroyDb() proc encode(buffers, parity: int): Future[Manifest] {.async.} = - let - encoded = (await erasure.encode( - manifest, - buffers.Natural, - parity.Natural)).tryGet() + let encoded = + (await erasure.encode(manifest, buffers.Natural, parity.Natural)).tryGet() check: encoded.blocksCount mod (buffers + parity) == 0 @@ -67,14 +64,13 @@ suite "Erasure encode/decode": column = rng.rand((encoded.blocksCount div encoded.steps) - 1) # random column dropped: seq[int] - for _ in 0.. blockSize": @@ -85,25 +79,22 @@ asyncchecksuite "StoreStream": else: check read == 1 - check sequentialBytes(buf,read,n) + check sequentialBytes(buf, read, n) n += read test "Read exact bytes within block boundary": - var - buf = newSeq[byte](5) + var buf = newSeq[byte](5) await stream.readExactly(addr buf[0], 5) - check sequentialBytes(buf,5,0) + check sequentialBytes(buf, 5, 0) test "Read exact bytes outside of block boundary": - var - buf = newSeq[byte](15) + var buf = newSeq[byte](15) await stream.readExactly(addr buf[0], 15) - check sequentialBytes(buf,15,0) + check sequentialBytes(buf, 15, 0) suite "StoreStream - Size Tests": - var stream: StoreStream teardown: @@ -111,9 +102,7 @@ suite "StoreStream - Size Tests": test "Should return dataset size as stream size": let manifest = Manifest.new( - treeCid = Cid.example, - datasetSize = 80.NBytes, - blockSize = 10.NBytes + treeCid = Cid.example, datasetSize = 80.NBytes, blockSize = 10.NBytes ) stream = StoreStream.new(CacheStore.new(), manifest) @@ -132,7 +121,7 @@ suite "StoreStream - Size Tests": ecM = 1, originalTreeCid = Cid.example, originalDatasetSize = 80.NBytes, # size without parity bytes - strategy = StrategyType.SteppedStrategy + strategy = StrategyType.SteppedStrategy, ) stream = StoreStream.new(CacheStore.new(), protectedManifest) diff --git a/tests/codex/testvalidation.nim b/tests/codex/testvalidation.nim index 2cfe2f06..95d913c3 100644 --- a/tests/codex/testvalidation.nim +++ b/tests/codex/testvalidation.nim @@ -22,28 +22,29 @@ asyncchecksuite "validation": let validationGroups = ValidationGroups(8).some let slot = Slot.example let proof = Groth16Proof.example - let collateral = slot.request.ask.collateral + let collateral = slot.request.ask.collateralPerSlot var market: MockMarket var clock: MockClock var groupIndex: uint16 var validation: Validation - proc initValidationConfig(maxSlots: MaxSlots, - validationGroups: ?ValidationGroups, - groupIndex: uint16 = 0): ValidationConfig = - without validationConfig =? ValidationConfig.init( - maxSlots, groups=validationGroups, groupIndex), error: + proc initValidationConfig( + maxSlots: MaxSlots, validationGroups: ?ValidationGroups, groupIndex: uint16 = 0 + ): ValidationConfig = + without validationConfig =? + ValidationConfig.init(maxSlots, groups = validationGroups, groupIndex), error: raiseAssert fmt"Creating ValidationConfig failed! Error msg: {error.msg}" validationConfig - - proc newValidation(clock: Clock, - market: Market, - maxSlots: MaxSlots, - validationGroups: ?ValidationGroups, - groupIndex: uint16 = 0): Validation = - let validationConfig = initValidationConfig( - maxSlots, validationGroups, groupIndex) + + proc newValidation( + clock: Clock, + market: Market, + maxSlots: MaxSlots, + validationGroups: ?ValidationGroups, + groupIndex: uint16 = 0, + ): Validation = + let validationConfig = initValidationConfig(maxSlots, validationGroups, groupIndex) Validation.new(clock, market, validationConfig) setup: @@ -52,14 +53,13 @@ asyncchecksuite "validation": market = MockMarket.new(clock = Clock(clock).some) market.config.proofs.period = period.u256 market.config.proofs.timeout = timeout.u256 - validation = newValidation( - clock, market, maxSlots, validationGroups, groupIndex) + validation = newValidation(clock, market, maxSlots, validationGroups, groupIndex) teardown: # calling stop on validation that did not start is harmless await validation.stop() - proc advanceToNextPeriod = + proc advanceToNextPeriod() = let periodicity = Periodicity(seconds: period.u256) let period = periodicity.periodOf(clock.now().u256) let periodEnd = periodicity.periodEnd(period) @@ -70,37 +70,44 @@ asyncchecksuite "validation": for (validationGroups, groupIndex) in [(100, 100'u16), (100, 101'u16)]: test "initializing ValidationConfig fails when groupIndex is " & - "greater than or equal to validationGroups " & - fmt"(testing for {groupIndex = }, {validationGroups = })": + "greater than or equal to validationGroups " & + fmt"(testing for {groupIndex = }, {validationGroups = })": let groups = ValidationGroups(validationGroups).some - let validationConfig = ValidationConfig.init( - maxSlots, groups = groups, groupIndex = groupIndex) + let validationConfig = + ValidationConfig.init(maxSlots, groups = groups, groupIndex = groupIndex) check validationConfig.isFailure == true - check validationConfig.error.msg == "The value of the group index " & - "must be less than validation groups! " & - fmt"(got: {groupIndex = }, groups = {!groups})" + check validationConfig.error.msg == + "The value of the group index " & "must be less than validation groups! " & + fmt"(got: {groupIndex = }, groups = {!groups})" test "initializing ValidationConfig fails when maxSlots is negative": let maxSlots = -1 - let validationConfig = ValidationConfig.init( - maxSlots = maxSlots, groups = ValidationGroups.none) + let validationConfig = + ValidationConfig.init(maxSlots = maxSlots, groups = ValidationGroups.none) check validationConfig.isFailure == true - check validationConfig.error.msg == "The value of maxSlots must " & - fmt"be greater than or equal to 0! (got: {maxSlots})" + check validationConfig.error.msg == + "The value of maxSlots must " & + fmt"be greater than or equal to 0! (got: {maxSlots})" test "initializing ValidationConfig fails when maxSlots is negative " & - "(validationGroups set)": + "(validationGroups set)": let maxSlots = -1 let groupIndex = 0'u16 - let validationConfig = ValidationConfig.init( - maxSlots = maxSlots, groups = validationGroups, groupIndex) + let validationConfig = + ValidationConfig.init(maxSlots = maxSlots, groups = validationGroups, groupIndex) check validationConfig.isFailure == true - check validationConfig.error.msg == "The value of maxSlots must " & - fmt"be greater than or equal to 0! (got: {maxSlots})" + check validationConfig.error.msg == + "The value of maxSlots must " & + fmt"be greater than or equal to 0! (got: {maxSlots})" test "slot is not observed if it is not in the validation group": - validation = newValidation(clock, market, maxSlots, validationGroups, - (groupIndex + 1) mod uint16(!validationGroups)) + validation = newValidation( + clock, + market, + maxSlots, + validationGroups, + (groupIndex + 1) mod uint16(!validationGroups), + ) await validation.start() await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral) check validation.slots.len == 0 @@ -117,7 +124,7 @@ asyncchecksuite "validation": check validation.slots == @[slot.id] test "slot should be observed if validation group is not set (and " & - "maxSlots is not 0)": + "maxSlots is not 0)": validation = newValidation(clock, market, maxSlots, ValidationGroups.none) await validation.start() await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral) @@ -151,49 +158,45 @@ asyncchecksuite "validation": test "it does not monitor more than the maximum number of slots": validation = newValidation(clock, market, maxSlots, ValidationGroups.none) await validation.start() - for _ in 0.. $i) + let iter = Iter.new(0 ..< 5).map((i: int) => $i) check: iter.toSeq() == @["0", "1", "2", "3", "4"] test "Should leave only odd items using `filter`": - let iter = Iter.new(0..<5) - .filter((i: int) => (i mod 2) == 1) + let iter = Iter.new(0 ..< 5).filter((i: int) => (i mod 2) == 1) check: iter.toSeq() == @[1, 3] test "Should leave only odd items using `mapFilter`": let - iter1 = Iter.new(0..<5) - iter2 = mapFilter[int, string](iter1, + iter1 = Iter.new(0 ..< 5) + iter2 = mapFilter[int, string]( + iter1, proc(i: int): ?string = if (i mod 2) == 1: some($i) else: - string.none + string.none, ) check: iter2.toSeq() == @["1", "3"] test "Should yield all items before err using `map`": - let - iter = Iter.new(0..<5) - .map( - proc (i: int): string = - if i < 3: - return $i - else: - raise newException(CatchableError, "Some error") - ) + let iter = Iter.new(0 ..< 5).map( + proc(i: int): string = + if i < 3: + return $i + else: + raise newException(CatchableError, "Some error") + ) var collected: seq[string] @@ -87,15 +81,13 @@ checksuite "Test Iter": iter.finished test "Should yield all items before err using `filter`": - let - iter = Iter.new(0..<5) - .filter( - proc (i: int): bool = - if i < 3: - return true - else: - raise newException(CatchableError, "Some error") - ) + let iter = Iter.new(0 ..< 5).filter( + proc(i: int): bool = + if i < 3: + return true + else: + raise newException(CatchableError, "Some error") + ) var collected: seq[int] @@ -109,14 +101,15 @@ checksuite "Test Iter": test "Should yield all items before err using `mapFilter`": let - iter1 = Iter.new(0..<5) - iter2 = mapFilter[int, string](iter1, - proc (i: int): ?string = - if i < 3: - return some($i) - else: - raise newException(CatchableError, "Some error") - ) + iter1 = Iter.new(0 ..< 5) + iter2 = mapFilter[int, string]( + iter1, + proc(i: int): ?string = + if i < 3: + return some($i) + else: + raise newException(CatchableError, "Some error"), + ) var collected: seq[string] diff --git a/tests/codex/utils/testkeyutils.nim b/tests/codex/utils/testkeyutils.nim index c16f21de..2124e682 100644 --- a/tests/codex/utils/testkeyutils.nim +++ b/tests/codex/utils/testkeyutils.nim @@ -29,4 +29,3 @@ checksuite "keyutils": test "reads key file when it does exist": let key = setupKey(path / "keyfile").get() check setupKey(path / "keyfile").get() == key - diff --git a/tests/codex/utils/testoptions.nim b/tests/codex/utils/testoptions.nim index eb566ad7..05f7509e 100644 --- a/tests/codex/utils/testoptions.nim +++ b/tests/codex/utils/testoptions.nim @@ -13,6 +13,7 @@ checksuite "optional casts": type BaseType = ref object of RootObj SubType = ref object of BaseType + let x: BaseType = SubType() check x as SubType == SubType(x).some @@ -21,6 +22,7 @@ checksuite "optional casts": BaseType = ref object of RootObj SubType = ref object of BaseType OtherType = ref object of BaseType + let x: BaseType = SubType() check x as OtherType == OtherType.none diff --git a/tests/codex/utils/testtrackedfutures.nim b/tests/codex/utils/testtrackedfutures.nim index cbabd39c..35074919 100644 --- a/tests/codex/utils/testtrackedfutures.nim +++ b/tests/codex/utils/testtrackedfutures.nim @@ -63,5 +63,3 @@ asyncchecksuite "tracked futures": check eventually fut2.cancelled check eventually fut3.cancelled check eventually module.trackedFutures.len == 0 - - diff --git a/tests/codex/utils/testutils.nim b/tests/codex/utils/testutils.nim index b8e386d0..92c883be 100644 --- a/tests/codex/utils/testutils.nim +++ b/tests/codex/utils/testutils.nim @@ -3,17 +3,17 @@ import std/unittest import pkg/codex/utils suite "findIt": - setup: type AnObject = object attribute1*: int - var objList = @[ - AnObject(attribute1: 1), - AnObject(attribute1: 3), - AnObject(attribute1: 5), - AnObject(attribute1: 3), - ] + var objList = + @[ + AnObject(attribute1: 1), + AnObject(attribute1: 3), + AnObject(attribute1: 5), + AnObject(attribute1: 3), + ] test "should retur index of first object matching predicate": assert objList.findIt(it.attribute1 == 3) == 1 @@ -22,15 +22,13 @@ suite "findIt": assert objList.findIt(it.attribute1 == 15) == -1 suite "parseDuration": - test "should parse durations": - var res: Duration # caller must still know if 'b' refers to bytes|bits + var res: Duration # caller must still know if 'b' refers to bytes|bits check parseDuration("10Hr", res) == 3 check res == hours(10) check parseDuration("64min", res) == 3 check res == minutes(64) check parseDuration("7m/block", res) == 2 # '/' stops parse - check res == minutes(7) # 1 shl 30, forced binary metric + check res == minutes(7) # 1 shl 30, forced binary metric check parseDuration("3d", res) == 2 # '/' stops parse - check res == days(3) # 1 shl 30, forced binary metric - + check res == days(3) # 1 shl 30, forced binary metric diff --git a/tests/config.nims b/tests/config.nims index 55858427..8ae00806 100644 --- a/tests/config.nims +++ b/tests/config.nims @@ -1,8 +1,14 @@ ---path:".." ---threads:on ---tlsEmulation:off +--path: + ".." +--threads: + on +--tlsEmulation: + off when not defined(chronicles_log_level): - --define:"chronicles_log_level:NONE" # compile all log statements - --define:"chronicles_sinks:textlines[dynamic]" # allow logs to be filtered at runtime - --"import":"logging" # ensure that logging is ignored at runtime + --define: + "chronicles_log_level:NONE" # compile all log statements + --define: + "chronicles_sinks:textlines[dynamic]" # allow logs to be filtered at runtime + --"import": + "logging" # ensure that logging is ignored at runtime diff --git a/tests/contracts/deployment.nim b/tests/contracts/deployment.nim index f62bb1be..f45aa625 100644 --- a/tests/contracts/deployment.nim +++ b/tests/contracts/deployment.nim @@ -16,7 +16,4 @@ proc address*(_: type Marketplace, dummyVerifier = false): Address = return address - if dummyVerifier: - hardhatMarketWithDummyVerifier - else: - hardhatMarketAddress + if dummyVerifier: hardhatMarketWithDummyVerifier else: hardhatMarketAddress diff --git a/tests/contracts/helpers/mockprovider.nim b/tests/contracts/helpers/mockprovider.nim index ce6e9e34..09e65398 100644 --- a/tests/contracts/helpers/mockprovider.nim +++ b/tests/contracts/helpers/mockprovider.nim @@ -12,9 +12,8 @@ type MockProvider* = ref object of Provider latest: ?int method getBlock*( - provider: MockProvider, - tag: BlockTag -): Future[?Block] {.async: (raises:[ProviderError]).} = + provider: MockProvider, tag: BlockTag +): Future[?Block] {.async: (raises: [ProviderError]).} = try: if tag == BlockTag.latest: if latestBlock =? provider.latest: @@ -33,7 +32,6 @@ method getBlock*( return Block.none except: return Block.none - proc updateEarliestAndLatest(provider: MockProvider, blockNumber: int) = if provider.earliest.isNone: @@ -54,9 +52,7 @@ proc addBlock*(provider: MockProvider, number: int, blk: Block) = proc newMockProvider*(): MockProvider = MockProvider( - blocks: newOrderedTable[int, Block](), - earliest: int.none, - latest: int.none + blocks: newOrderedTable[int, Block](), earliest: int.none, latest: int.none ) proc newMockProvider*(blocks: OrderedTableRef[int, Block]): MockProvider = @@ -65,21 +61,22 @@ proc newMockProvider*(blocks: OrderedTableRef[int, Block]): MockProvider = provider proc newMockProvider*( - numberOfBlocks: int, - earliestBlockNumber: int, - earliestBlockTimestamp: SecondsSince1970, - timeIntervalBetweenBlocks: SecondsSince1970 + numberOfBlocks: int, + earliestBlockNumber: int, + earliestBlockTimestamp: SecondsSince1970, + timeIntervalBetweenBlocks: SecondsSince1970, ): MockProvider = var blocks = newOrderedTable[int, provider.Block]() var blockNumber = earliestBlockNumber var blockTime = earliestBlockTimestamp - for i in 0.. 292 # 1728436106 => 292 # 1728436110 => 292 - proc generateExpectations( - blocks: seq[(UInt256, UInt256)]): seq[Expectations] = + proc generateExpectations(blocks: seq[(UInt256, UInt256)]): seq[Expectations] = var expectations: seq[Expectations] = @[] - for i in 0..= 3, "must be more than 3 blocks" let rng = Rng.instance() let chunker = RandomChunker.new( - rng, size = DefaultBlockSize * blocks.NBytes, chunkSize = DefaultBlockSize) + rng, size = DefaultBlockSize * blocks.NBytes, chunkSize = DefaultBlockSize + ) var data: seq[byte] while (let moar = await chunker.getBytes(); moar != []): data.add moar return byteutils.toHex(data) -proc example*(_: type RandomChunker): Future[string] {.async.} = +proc example*(_: type RandomChunker): Future[string] {.async.} = await RandomChunker.example(3) diff --git a/tests/helpers/multisetup.nim b/tests/helpers/multisetup.nim index 781b0062..aa434678 100644 --- a/tests/helpers/multisetup.nim +++ b/tests/helpers/multisetup.nim @@ -1,10 +1,9 @@ import pkg/chronos # Allow multiple setups and teardowns in a test suite -template asyncmultisetup* = - var setups: seq[proc: Future[void].Raising([AsyncExceptionError]) {.gcsafe.}] - var teardowns: seq[ - proc: Future[void].Raising([AsyncExceptionError]) {.gcsafe.}] +template asyncmultisetup*() = + var setups: seq[proc(): Future[void].Raising([AsyncExceptionError]) {.gcsafe.}] + var teardowns: seq[proc(): Future[void].Raising([AsyncExceptionError]) {.gcsafe.}] setup: for setup in setups: @@ -15,14 +14,18 @@ template asyncmultisetup* = await teardown() template setup(setupBody) {.inject, used.} = - setups.add(proc {.async: ( - handleException: true, raises: [AsyncExceptionError]).} = setupBody) + setups.add( + proc() {.async: (handleException: true, raises: [AsyncExceptionError]).} = + setupBody + ) template teardown(teardownBody) {.inject, used.} = - teardowns.insert(proc {.async: ( - handleException: true, raises: [AsyncExceptionError]).} = teardownBody) + teardowns.insert( + proc() {.async: (handleException: true, raises: [AsyncExceptionError]).} = + teardownBody + ) -template multisetup* = +template multisetup*() = var setups: seq[proc() {.gcsafe.}] var teardowns: seq[proc() {.gcsafe.}] @@ -35,8 +38,12 @@ template multisetup* = teardown() template setup(setupBody) {.inject, used.} = - let setupProc = proc = setupBody + let setupProc = proc() = + setupBody setups.add(setupProc) template teardown(teardownBody) {.inject, used.} = - teardowns.insert(proc = teardownBody) + teardowns.insert( + proc() = + teardownBody + ) diff --git a/tests/helpers/templeveldb.nim b/tests/helpers/templeveldb.nim index 05433691..dbc53bb4 100644 --- a/tests/helpers/templeveldb.nim +++ b/tests/helpers/templeveldb.nim @@ -4,10 +4,9 @@ import pkg/datastore import pkg/chronos import pkg/questionable/results -type - TempLevelDb* = ref object - currentPath: string - ds: LevelDbDatastore +type TempLevelDb* = ref object + currentPath: string + ds: LevelDbDatastore var number = 0 diff --git a/tests/helpers/trackers.nim b/tests/helpers/trackers.nim index f4b10a2e..ed8c5692 100644 --- a/tests/helpers/trackers.nim +++ b/tests/helpers/trackers.nim @@ -2,17 +2,17 @@ import pkg/codex/streams/storestream import std/unittest # From lip2p/tests/helpers -const trackerNames = [ - StoreStreamTrackerName - ] +const trackerNames = [StoreStreamTrackerName] iterator testTrackers*(extras: openArray[string] = []): TrackerBase = for name in trackerNames: let t = getTracker(name) - if not isNil(t): yield t + if not isNil(t): + yield t for name in extras: let t = getTracker(name) - if not isNil(t): yield t + if not isNil(t): + yield t proc checkTracker*(name: string) = var tracker = getTracker(name) @@ -27,4 +27,5 @@ proc checkTrackers*() = fail() try: GC_fullCollect() - except: discard + except: + discard diff --git a/tests/integration/clioption.nim b/tests/integration/clioption.nim index 5f756d80..f845fbca 100644 --- a/tests/integration/clioption.nim +++ b/tests/integration/clioption.nim @@ -1,7 +1,6 @@ -type - CliOption* = object - key*: string # option key, including `--` - value*: string # option value +type CliOption* = object + key*: string # option key, including `--` + value*: string # option value proc `$`*(option: CliOption): string = var res = option.key diff --git a/tests/integration/codexclient.nim b/tests/integration/codexclient.nim index 5e8761d1..7826b151 100644 --- a/tests/integration/codexclient.nim +++ b/tests/integration/codexclient.nim @@ -24,19 +24,19 @@ const HttpClientTimeoutMs = 60 * 1000 proc new*(_: type CodexClient, baseurl: string): CodexClient = CodexClient( - http: newHttpClient(timeout=HttpClientTimeoutMs), + http: newHttpClient(timeout = HttpClientTimeoutMs), baseurl: baseurl, - session: HttpSessionRef.new({HttpClientFlag.Http11Pipeline}) + session: HttpSessionRef.new({HttpClientFlag.Http11Pipeline}), ) proc info*(client: CodexClient): ?!JsonNode = let url = client.baseurl & "/debug/info" - JsonNode.parse( client.http.getContent(url) ) + JsonNode.parse(client.http.getContent(url)) proc setLogLevel*(client: CodexClient, level: string) = let url = client.baseurl & "/debug/chronicles/loglevel?level=" & level let headers = newHttpHeaders({"Content-Type": "text/plain"}) - let response = client.http.request(url, httpMethod=HttpPost, headers=headers) + let response = client.http.request(url, httpMethod = HttpPost, headers = headers) assert response.status == "200 OK" proc upload*(client: CodexClient, contents: string): ?!Cid = @@ -45,10 +45,9 @@ proc upload*(client: CodexClient, contents: string): ?!Cid = Cid.init(response.body).mapFailure proc download*(client: CodexClient, cid: Cid, local = false): ?!string = - let - response = client.http.get( - client.baseurl & "/data/" & $cid & - (if local: "" else: "/network/stream")) + let response = client.http.get( + client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream") + ) if response.status != "200 OK": return failure(response.status) @@ -56,9 +55,7 @@ proc download*(client: CodexClient, cid: Cid, local = false): ?!string = success response.body proc downloadManifestOnly*(client: CodexClient, cid: Cid): ?!string = - let - response = client.http.get( - client.baseurl & "/data/" & $cid & "/network/manifest") + let response = client.http.get(client.baseurl & "/data/" & $cid & "/network/manifest") if response.status != "200 OK": return failure(response.status) @@ -66,9 +63,7 @@ proc downloadManifestOnly*(client: CodexClient, cid: Cid): ?!string = success response.body proc downloadNoStream*(client: CodexClient, cid: Cid): ?!string = - let - response = client.http.post( - client.baseurl & "/data/" & $cid & "/network") + let response = client.http.post(client.baseurl & "/data/" & $cid & "/network") if response.status != "200 OK": return failure(response.status) @@ -76,14 +71,10 @@ proc downloadNoStream*(client: CodexClient, cid: Cid): ?!string = success response.body proc downloadBytes*( - client: CodexClient, - cid: Cid, - local = false): Future[?!seq[byte]] {.async.} = - - let uri = parseUri( - client.baseurl & "/data/" & $cid & - (if local: "" else: "/network/stream") - ) + client: CodexClient, cid: Cid, local = false +): Future[?!seq[byte]] {.async.} = + let uri = + parseUri(client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream")) let (status, bytes) = await client.session.fetch(uri) @@ -114,24 +105,24 @@ proc requestStorageRaw*( client: CodexClient, cid: Cid, duration: UInt256, - reward: UInt256, + pricePerBytePerSecond: UInt256, proofProbability: UInt256, - collateral: UInt256, + collateralPerByte: UInt256, expiry: uint = 0, nodes: uint = 3, - tolerance: uint = 1 + tolerance: uint = 1, ): Response = - ## Call request storage REST endpoint ## let url = client.baseurl & "/storage/request/" & $cid - let json = %*{ + let json = + %*{ "duration": duration, - "reward": reward, + "pricePerBytePerSecond": pricePerBytePerSecond, "proofProbability": proofProbability, - "collateral": collateral, + "collateralPerByte": collateralPerByte, "nodes": nodes, - "tolerance": tolerance + "tolerance": tolerance, } if expiry != 0: @@ -143,16 +134,19 @@ proc requestStorage*( client: CodexClient, cid: Cid, duration: UInt256, - reward: UInt256, + pricePerBytePerSecond: UInt256, proofProbability: UInt256, expiry: uint, - collateral: UInt256, + collateralPerByte: UInt256, nodes: uint = 3, - tolerance: uint = 1 + tolerance: uint = 1, ): ?!PurchaseId = ## Call request storage REST endpoint ## - let response = client.requestStorageRaw(cid, duration, reward, proofProbability, collateral, expiry, nodes, tolerance) + let response = client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, + nodes, tolerance, + ) if response.status != "200 OK": doAssert(false, response.body) PurchaseId.fromHex(response.body).catch @@ -180,25 +174,28 @@ proc getSlots*(client: CodexClient): ?!seq[Slot] = proc postAvailability*( client: CodexClient, - totalSize, duration, minPrice, maxCollateral: UInt256 + totalSize, duration, minPricePerBytePerSecond, totalCollateral: UInt256, ): ?!Availability = ## Post sales availability endpoint ## let url = client.baseurl & "/sales/availability" - let json = %*{ - "totalSize": totalSize, - "duration": duration, - "minPrice": minPrice, - "maxCollateral": maxCollateral, - } + let json = + %*{ + "totalSize": totalSize, + "duration": duration, + "minPricePerBytePerSecond": minPricePerBytePerSecond, + "totalCollateral": totalCollateral, + } let response = client.http.post(url, $json) - doAssert response.status == "201 Created", "expected 201 Created, got " & response.status & ", body: " & response.body + doAssert response.status == "201 Created", + "expected 201 Created, got " & response.status & ", body: " & response.body Availability.fromJson(response.body) proc patchAvailabilityRaw*( client: CodexClient, availabilityId: AvailabilityId, - totalSize, freeSize, duration, minPrice, maxCollateral: ?UInt256 = UInt256.none + totalSize, freeSize, duration, minPricePerBytePerSecond, totalCollateral: ?UInt256 = + UInt256.none, ): Response = ## Updates availability ## @@ -216,20 +213,27 @@ proc patchAvailabilityRaw*( if duration =? duration: json["duration"] = %duration - if minPrice =? minPrice: - json["minPrice"] = %minPrice + if minPricePerBytePerSecond =? minPricePerBytePerSecond: + json["minPricePerBytePerSecond"] = %minPricePerBytePerSecond - if maxCollateral =? maxCollateral: - json["maxCollateral"] = %maxCollateral + if totalCollateral =? totalCollateral: + json["totalCollateral"] = %totalCollateral client.http.patch(url, $json) proc patchAvailability*( client: CodexClient, availabilityId: AvailabilityId, - totalSize, duration, minPrice, maxCollateral: ?UInt256 = UInt256.none + totalSize, duration, minPricePerBytePerSecond, totalCollateral: ?UInt256 = + UInt256.none, ): void = - let response = client.patchAvailabilityRaw(availabilityId, totalSize=totalSize, duration=duration, minPrice=minPrice, maxCollateral=maxCollateral) + let response = client.patchAvailabilityRaw( + availabilityId, + totalSize = totalSize, + duration = duration, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = totalCollateral, + ) doAssert response.status == "200 OK", "expected 200 OK, got " & response.status proc getAvailabilities*(client: CodexClient): ?!seq[Availability] = @@ -238,7 +242,9 @@ proc getAvailabilities*(client: CodexClient): ?!seq[Availability] = let body = client.http.getContent(url) seq[Availability].fromJson(body) -proc getAvailabilityReservations*(client: CodexClient, availabilityId: AvailabilityId): ?!seq[Reservation] = +proc getAvailabilityReservations*( + client: CodexClient, availabilityId: AvailabilityId +): ?!seq[Reservation] = ## Retrieves Availability's Reservations let url = client.baseurl & "/sales/availability/" & $availabilityId & "/reservations" let body = client.http.getContent(url) @@ -249,23 +255,29 @@ proc close*(client: CodexClient) = proc restart*(client: CodexClient) = client.http.close() - client.http = newHttpClient(timeout=HttpClientTimeoutMs) + client.http = newHttpClient(timeout = HttpClientTimeoutMs) proc purchaseStateIs*(client: CodexClient, id: PurchaseId, state: string): bool = - client.getPurchase(id).option.?state == some state + client.getPurchase(id).option .? state == some state proc saleStateIs*(client: CodexClient, id: SlotId, state: string): bool = - client.getSalesAgent(id).option.?state == some state + client.getSalesAgent(id).option .? state == some state proc requestId*(client: CodexClient, id: PurchaseId): ?RequestId = - return client.getPurchase(id).option.?requestId + return client.getPurchase(id).option .? requestId -proc uploadRaw*(client: CodexClient, contents: string, headers = newHttpHeaders()): Response = - return client.http.request(client.baseurl & "/data", body = contents, httpMethod=HttpPost, headers = headers) +proc uploadRaw*( + client: CodexClient, contents: string, headers = newHttpHeaders() +): Response = + return client.http.request( + client.baseurl & "/data", body = contents, httpMethod = HttpPost, headers = headers + ) proc listRaw*(client: CodexClient): Response = - return client.http.request(client.baseurl & "/data", httpMethod=HttpGet) + return client.http.request(client.baseurl & "/data", httpMethod = HttpGet) proc downloadRaw*(client: CodexClient, cid: string, local = false): Response = - return client.http.request(client.baseurl & "/data/" & cid & - (if local: "" else: "/network/stream"), httpMethod=HttpGet) \ No newline at end of file + return client.http.request( + client.baseurl & "/data/" & cid & (if local: "" else: "/network/stream"), + httpMethod = HttpGet, + ) diff --git a/tests/integration/codexconfig.nim b/tests/integration/codexconfig.nim index f321364f..41d7109c 100644 --- a/tests/integration/codexconfig.nim +++ b/tests/integration/codexconfig.nim @@ -19,10 +19,12 @@ export confutils type CodexConfigs* = object configs*: seq[CodexConfig] + CodexConfig* = object cliOptions: Table[StartUpCmd, Table[string, CliOption]] cliPersistenceOptions: Table[PersistenceCmd, Table[string, CliOption]] debugEnabled*: bool + CodexConfigError* = object of CatchableError proc cliArgs*(config: CodexConfig): seq[string] {.gcsafe, raises: [CodexConfigError].} @@ -43,17 +45,17 @@ func nodes*(self: CodexConfigs): int = self.configs.len proc checkBounds(self: CodexConfigs, idx: int) {.raises: [CodexConfigError].} = - if idx notin 0.. 0: ": " & msg - else: "" + else: + "" try: return CodexConf.load(cmdLine = config.cliArgs, quitOnFailure = false) @@ -64,53 +66,42 @@ proc buildConfig( raiseCodexConfigError msg & e.msg.postFix proc addCliOption*( - config: var CodexConfig, - group = PersistenceCmd.noCmd, - cliOption: CliOption) {.raises: [CodexConfigError].} = - + config: var CodexConfig, group = PersistenceCmd.noCmd, cliOption: CliOption +) {.raises: [CodexConfigError].} = var options = config.cliPersistenceOptions.getOrDefault(group) options[cliOption.key] = cliOption # overwrite if already exists config.cliPersistenceOptions[group] = options discard config.buildConfig("Invalid cli arg " & $cliOption) proc addCliOption*( - config: var CodexConfig, - group = PersistenceCmd.noCmd, - key: string, value = "") {.raises: [CodexConfigError].} = - + config: var CodexConfig, group = PersistenceCmd.noCmd, key: string, value = "" +) {.raises: [CodexConfigError].} = config.addCliOption(group, CliOption(key: key, value: value)) proc addCliOption*( - config: var CodexConfig, - group = StartUpCmd.noCmd, - cliOption: CliOption) {.raises: [CodexConfigError].} = - + config: var CodexConfig, group = StartUpCmd.noCmd, cliOption: CliOption +) {.raises: [CodexConfigError].} = var options = config.cliOptions.getOrDefault(group) options[cliOption.key] = cliOption # overwrite if already exists config.cliOptions[group] = options discard config.buildConfig("Invalid cli arg " & $cliOption) proc addCliOption*( - config: var CodexConfig, - group = StartUpCmd.noCmd, - key: string, value = "") {.raises: [CodexConfigError].} = - + config: var CodexConfig, group = StartUpCmd.noCmd, key: string, value = "" +) {.raises: [CodexConfigError].} = config.addCliOption(group, CliOption(key: key, value: value)) proc addCliOption*( - config: var CodexConfig, - cliOption: CliOption) {.raises: [CodexConfigError].} = - + config: var CodexConfig, cliOption: CliOption +) {.raises: [CodexConfigError].} = config.addCliOption(StartUpCmd.noCmd, cliOption) proc addCliOption*( - config: var CodexConfig, - key: string, value = "") {.raises: [CodexConfigError].} = - + config: var CodexConfig, key: string, value = "" +) {.raises: [CodexConfigError].} = config.addCliOption(StartUpCmd.noCmd, CliOption(key: key, value: value)) -proc cliArgs*( - config: CodexConfig): seq[string] {.gcsafe, raises: [CodexConfigError].} = +proc cliArgs*(config: CodexConfig): seq[string] {.gcsafe, raises: [CodexConfigError].} = ## converts CodexConfig cli options and command groups in a sequence of args ## and filters out cli options by node index if provided in the CliOption var args: seq[string] = @[] @@ -121,14 +112,14 @@ proc cliArgs*( if cmd != StartUpCmd.noCmd: args.add $cmd var opts = config.cliOptions[cmd].values.toSeq - args = args.concat( opts.map(o => $o) ) + args = args.concat(opts.map(o => $o)) for cmd in PersistenceCmd: if config.cliPersistenceOptions.hasKey(cmd): if cmd != PersistenceCmd.noCmd: args.add $cmd var opts = config.cliPersistenceOptions[cmd].values.toSeq - args = args.concat( opts.map(o => $o) ) + args = args.concat(opts.map(o => $o)) return args @@ -142,9 +133,8 @@ proc logLevel*(config: CodexConfig): LogLevel {.raises: [CodexConfigError].} = return parseEnum[LogLevel](built.logLevel.toUpperAscii) proc debug*( - self: CodexConfigs, - idx: int, - enabled = true): CodexConfigs {.raises: [CodexConfigError].} = + self: CodexConfigs, idx: int, enabled = true +): CodexConfigs {.raises: [CodexConfigError].} = ## output log in stdout for a specific node in the group self.checkBounds idx @@ -161,17 +151,15 @@ proc debug*(self: CodexConfigs, enabled = true): CodexConfigs {.raises: [].} = return startConfig proc withLogFile*( - self: CodexConfigs, - idx: int): CodexConfigs {.raises: [CodexConfigError].} = - + self: CodexConfigs, idx: int +): CodexConfigs {.raises: [CodexConfigError].} = self.checkBounds idx var startConfig = self startConfig.configs[idx].addCliOption("--log-file", "") return startConfig -proc withLogFile*( - self: CodexConfigs): CodexConfigs {.raises: [CodexConfigError].} = +proc withLogFile*(self: CodexConfigs): CodexConfigs {.raises: [CodexConfigError].} = ## typically called from test, sets config such that a log file should be ## created var startConfig = self @@ -180,8 +168,8 @@ proc withLogFile*( return startConfig proc withLogFile*( - self: var CodexConfig, - logFile: string) {.raises: [CodexConfigError].} = #: CodexConfigs = + self: var CodexConfig, logFile: string +) {.raises: [CodexConfigError].} = #: CodexConfigs = ## typically called internally from the test suite, sets a log file path to ## be created during the test run, for a specified node in the group # var config = self @@ -189,18 +177,15 @@ proc withLogFile*( # return startConfig proc withLogLevel*( - self: CodexConfig, - level: LogLevel | string): CodexConfig {.raises: [CodexConfigError].} = - + self: CodexConfig, level: LogLevel | string +): CodexConfig {.raises: [CodexConfigError].} = var config = self config.addCliOption("--log-level", $level) return config proc withLogLevel*( - self: CodexConfigs, - idx: int, - level: LogLevel | string): CodexConfigs {.raises: [CodexConfigError].} = - + self: CodexConfigs, idx: int, level: LogLevel | string +): CodexConfigs {.raises: [CodexConfigError].} = self.checkBounds idx var startConfig = self @@ -208,86 +193,75 @@ proc withLogLevel*( return startConfig proc withLogLevel*( - self: CodexConfigs, - level: LogLevel | string): CodexConfigs {.raises: [CodexConfigError].} = - + self: CodexConfigs, level: LogLevel | string +): CodexConfigs {.raises: [CodexConfigError].} = var startConfig = self for config in startConfig.configs.mitems: config.addCliOption("--log-level", $level) return startConfig proc withSimulateProofFailures*( - self: CodexConfigs, - idx: int, - failEveryNProofs: int + self: CodexConfigs, idx: int, failEveryNProofs: int ): CodexConfigs {.raises: [CodexConfigError].} = - self.checkBounds idx var startConfig = self startConfig.configs[idx].addCliOption( - StartUpCmd.persistence, "--simulate-proof-failures", $failEveryNProofs) + StartUpCmd.persistence, "--simulate-proof-failures", $failEveryNProofs + ) return startConfig proc withSimulateProofFailures*( - self: CodexConfigs, - failEveryNProofs: int): CodexConfigs {.raises: [CodexConfigError].} = - + self: CodexConfigs, failEveryNProofs: int +): CodexConfigs {.raises: [CodexConfigError].} = var startConfig = self for config in startConfig.configs.mitems: config.addCliOption( - StartUpCmd.persistence, "--simulate-proof-failures", $failEveryNProofs) + StartUpCmd.persistence, "--simulate-proof-failures", $failEveryNProofs + ) return startConfig proc withValidationGroups*( - self: CodexConfigs, - groups: ValidationGroups): CodexConfigs {.raises: [CodexConfigError].} = - + self: CodexConfigs, groups: ValidationGroups +): CodexConfigs {.raises: [CodexConfigError].} = var startConfig = self for config in startConfig.configs.mitems: - config.addCliOption( - StartUpCmd.persistence, "--validator-groups", $(groups)) + config.addCliOption(StartUpCmd.persistence, "--validator-groups", $(groups)) return startConfig proc withValidationGroupIndex*( - self: CodexConfigs, - idx: int, - groupIndex: uint16): CodexConfigs {.raises: [CodexConfigError].} = - + self: CodexConfigs, idx: int, groupIndex: uint16 +): CodexConfigs {.raises: [CodexConfigError].} = self.checkBounds idx var startConfig = self startConfig.configs[idx].addCliOption( - StartUpCmd.persistence, "--validator-group-index", $groupIndex) + StartUpCmd.persistence, "--validator-group-index", $groupIndex + ) return startConfig proc withEthProvider*( - self: CodexConfigs, - idx: int, - ethProvider: string + self: CodexConfigs, idx: int, ethProvider: string ): CodexConfigs {.raises: [CodexConfigError].} = - self.checkBounds idx var startConfig = self - startConfig.configs[idx].addCliOption(StartUpCmd.persistence, - "--eth-provider", ethProvider) + startConfig.configs[idx].addCliOption( + StartUpCmd.persistence, "--eth-provider", ethProvider + ) return startConfig proc withEthProvider*( - self: CodexConfigs, - ethProvider: string): CodexConfigs {.raises: [CodexConfigError].} = - + self: CodexConfigs, ethProvider: string +): CodexConfigs {.raises: [CodexConfigError].} = var startConfig = self for config in startConfig.configs.mitems: - config.addCliOption(StartUpCmd.persistence, - "--eth-provider", ethProvider) + config.addCliOption(StartUpCmd.persistence, "--eth-provider", ethProvider) return startConfig proc logLevelWithTopics( - config: CodexConfig, - topics: varargs[string]): string {.raises: [CodexConfigError].} = - + config: CodexConfig, topics: varargs[string] +): string {.raises: [CodexConfigError].} = convertError: var logLevel = LogLevel.INFO let built = config.buildConfig("Invalid codex config cli params") @@ -296,10 +270,8 @@ proc logLevelWithTopics( return level proc withLogTopics*( - self: CodexConfigs, - idx: int, - topics: varargs[string]): CodexConfigs {.raises: [CodexConfigError].} = - + self: CodexConfigs, idx: int, topics: varargs[string] +): CodexConfigs {.raises: [CodexConfigError].} = self.checkBounds idx convertError: @@ -309,10 +281,8 @@ proc withLogTopics*( return startConfig.withLogLevel(idx, level) proc withLogTopics*( - self: CodexConfigs, - topics: varargs[string] + self: CodexConfigs, topics: varargs[string] ): CodexConfigs {.raises: [CodexConfigError].} = - var startConfig = self for config in startConfig.configs.mitems: let level = config.logLevelWithTopics(topics) @@ -320,10 +290,8 @@ proc withLogTopics*( return startConfig proc withStorageQuota*( - self: CodexConfigs, - idx: int, - quota: NBytes): CodexConfigs {.raises: [CodexConfigError].} = - + self: CodexConfigs, idx: int, quota: NBytes +): CodexConfigs {.raises: [CodexConfigError].} = self.checkBounds idx var startConfig = self @@ -331,9 +299,8 @@ proc withStorageQuota*( return startConfig proc withStorageQuota*( - self: CodexConfigs, - quota: NBytes): CodexConfigs {.raises: [CodexConfigError].} = - + self: CodexConfigs, quota: NBytes +): CodexConfigs {.raises: [CodexConfigError].} = var startConfig = self for config in startConfig.configs.mitems: config.addCliOption("--storage-quota", $quota) diff --git a/tests/integration/codexprocess.nim b/tests/integration/codexprocess.nim index 5097a968..79d4b040 100644 --- a/tests/integration/codexprocess.nim +++ b/tests/integration/codexprocess.nim @@ -18,9 +18,8 @@ export nodeprocess logScope: topics = "integration testing codex process" -type - CodexProcess* = ref object of NodeProcess - client: ?CodexClient +type CodexProcess* = ref object of NodeProcess + client: ?CodexClient method workingDir(node: CodexProcess): string = return currentSourcePath() / ".." / ".." / ".." diff --git a/tests/integration/hardhatconfig.nim b/tests/integration/hardhatconfig.nim index fbd04fe8..5de5bbc5 100644 --- a/tests/integration/hardhatconfig.nim +++ b/tests/integration/hardhatconfig.nim @@ -1,7 +1,6 @@ -type - HardhatConfig* = object - logFile*: bool - debugEnabled*: bool +type HardhatConfig* = object + logFile*: bool + debugEnabled*: bool proc debug*(self: HardhatConfig, enabled = true): HardhatConfig = ## output log in stdout diff --git a/tests/integration/hardhatprocess.nim b/tests/integration/hardhatprocess.nim index b4259de4..40c7942d 100644 --- a/tests/integration/hardhatprocess.nim +++ b/tests/integration/hardhatprocess.nim @@ -21,9 +21,8 @@ logScope: topics = "integration testing hardhat process" nodeName = "hardhat" -type - HardhatProcess* = ref object of NodeProcess - logFile: ?IoHandle +type HardhatProcess* = ref object of NodeProcess + logFile: ?IoHandle method workingDir(node: HardhatProcess): string = return currentSourcePath() / ".." / ".." / ".." / "vendor" / "codex-contracts-eth" @@ -41,22 +40,18 @@ method outputLineEndings(node: HardhatProcess): string {.raises: [].} = return "\n" proc openLogFile(node: HardhatProcess, logFilePath: string): IoHandle = - let logFileHandle = openFile( - logFilePath, - {OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate} - ) + let logFileHandle = + openFile(logFilePath, {OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate}) without fileHandle =? logFileHandle: fatal "failed to open log file", - path = logFilePath, - errorCode = $logFileHandle.error + path = logFilePath, errorCode = $logFileHandle.error raiseAssert "failed to open log file, aborting" return fileHandle method start*(node: HardhatProcess) {.async.} = - let poptions = node.processOptions + {AsyncProcessOption.StdErrToStdOut} trace "starting node", args = node.arguments, @@ -70,7 +65,7 @@ method start*(node: HardhatProcess) {.async.} = node.workingDir, @["node", "--export", "deployment-localhost.json"].concat(node.arguments), options = poptions, - stdoutHandle = AsyncProcess.Pipe + stdoutHandle = AsyncProcess.Pipe, ) except CancelledError as error: raise error @@ -78,12 +73,11 @@ method start*(node: HardhatProcess) {.async.} = error "failed to start hardhat process", error = e.msg proc startNode*( - _: type HardhatProcess, - args: seq[string], - debug: string | bool = false, - name: string + _: type HardhatProcess, + args: seq[string], + debug: string | bool = false, + name: string, ): Future[HardhatProcess] {.async.} = - var logFilePath = "" var arguments = newSeq[string]() @@ -100,7 +94,7 @@ proc startNode*( arguments: arguments, debug: ($debug != "false"), trackedFutures: TrackedFutures.new(), - name: "hardhat" + name: "hardhat", ) await hardhat.start() diff --git a/tests/integration/marketplacesuite.nim b/tests/integration/marketplacesuite.nim index e666ad17..4d155186 100644 --- a/tests/integration/marketplacesuite.nim +++ b/tests/integration/marketplacesuite.nim @@ -11,11 +11,8 @@ import ../contracts/deployment export mp export multinodes -template marketplacesuite*(name: string, - body: untyped) = - +template marketplacesuite*(name: string, body: untyped) = multinodesuite name: - var marketplace {.inject, used.}: Marketplace var period: uint64 var periodicity: Periodicity @@ -32,12 +29,12 @@ template marketplacesuite*(name: string, await ethProvider.advanceTimeTo(endOfPeriod + 1) template eventuallyP(condition: untyped, finalPeriod: Period): bool = - - proc eventuallyP: Future[bool] {.async.} = - while( - let currentPeriod = await getCurrentPeriod(); + proc eventuallyP(): Future[bool] {.async.} = + while ( + let currentPeriod = await getCurrentPeriod() currentPeriod <= finalPeriod - ): + ) + : if condition: return true await sleepAsync(1.millis) @@ -48,36 +45,50 @@ template marketplacesuite*(name: string, proc periods(p: int): uint64 = p.uint64 * period - proc createAvailabilities(datasetSize: int, duration: uint64) = + proc slotSize(blocks: int): UInt256 = + (DefaultBlockSize * blocks.NBytes).Natural.u256 + + proc datasetSize(blocks, nodes, tolerance: int): UInt256 = + (nodes + tolerance).u256 * slotSize(blocks) + + proc createAvailabilities( + datasetSize: UInt256, + duration: uint64, + collateralPerByte: UInt256, + minPricePerBytePerSecond: UInt256, + ) = + let totalCollateral = datasetSize * collateralPerByte # post availability to each provider - for i in 0.. //_.log - var logDir = currentSourcePath.parentDir() / - "logs" / - sanitize($starttime & "__" & name) / + var logDir = + currentSourcePath.parentDir() / "logs" / sanitize($starttime & "__" & name) / sanitize($currentTestName) createDir(logDir) @@ -123,10 +121,8 @@ template multinodesuite*(name: string, body: untyped) = return fileName proc newHardhatProcess( - config: HardhatConfig, - role: Role + config: HardhatConfig, role: Role ): Future[NodeProcess] {.async.} = - var args: seq[string] = @[] if config.logFile: let updatedLogFile = getLogFile(role, none int) @@ -141,11 +137,9 @@ template multinodesuite*(name: string, body: untyped) = trace "hardhat node started" return node - proc newCodexProcess(roleIdx: int, - conf: CodexConfig, - role: Role + proc newCodexProcess( + roleIdx: int, conf: CodexConfig, role: Role ): Future[NodeProcess] {.async.} = - let nodeIdx = running.len var config = conf @@ -153,9 +147,8 @@ template multinodesuite*(name: string, body: untyped) = raiseMultiNodeSuiteError "Cannot start node at nodeIdx " & $nodeIdx & ", not enough eth accounts." - let datadir = getTempDir() / "Codex" / - sanitize($starttime) / - sanitize($role & "_" & $roleIdx) + let datadir = + getTempDir() / "Codex" / sanitize($starttime) / sanitize($role & "_" & $roleIdx) try: if config.logFile.isSome: @@ -164,19 +157,16 @@ template multinodesuite*(name: string, body: untyped) = for bootstrapNode in bootstrapNodes: config.addCliOption("--bootstrap-node", bootstrapNode) - config.addCliOption("--api-port", $ await nextFreePort(8080 + nodeIdx)) + config.addCliOption("--api-port", $await nextFreePort(8080 + nodeIdx)) config.addCliOption("--data-dir", datadir) config.addCliOption("--nat", "none") config.addCliOption("--listen-addrs", "/ip4/127.0.0.1/tcp/0") - config.addCliOption("--disc-port", $ await nextFreePort(8090 + nodeIdx)) - + config.addCliOption("--disc-port", $await nextFreePort(8090 + nodeIdx)) except CodexConfigError as e: raiseMultiNodeSuiteError "invalid cli option, error: " & e.msg let node = await CodexProcess.startNode( - config.cliArgs, - config.debugEnabled, - $role & $roleIdx + config.cliArgs, config.debugEnabled, $role & $roleIdx ) try: @@ -187,25 +177,25 @@ template multinodesuite*(name: string, body: untyped) = return node - proc hardhat: HardhatProcess = + proc hardhat(): HardhatProcess = for r in running: if r.role == Role.Hardhat: return HardhatProcess(r.node) return nil - proc clients: seq[CodexProcess] {.used.} = + proc clients(): seq[CodexProcess] {.used.} = return collect: for r in running: if r.role == Role.Client: CodexProcess(r.node) - proc providers: seq[CodexProcess] {.used.} = + proc providers(): seq[CodexProcess] {.used.} = return collect: for r in running: if r.role == Role.Provider: CodexProcess(r.node) - proc validators: seq[CodexProcess] {.used.} = + proc validators(): seq[CodexProcess] {.used.} = return collect: for r in running: if r.role == Role.Validator: @@ -218,20 +208,30 @@ template multinodesuite*(name: string, body: untyped) = let clientIdx = clients().len var config = conf config.addCliOption(StartUpCmd.persistence, "--eth-provider", jsonRpcProviderUrl) - config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len]) + config.addCliOption( + StartUpCmd.persistence, "--eth-account", $accounts[running.len] + ) return await newCodexProcess(clientIdx, config, Role.Client) proc startProviderNode(conf: CodexConfig): Future[NodeProcess] {.async.} = let providerIdx = providers().len var config = conf config.addCliOption(StartUpCmd.persistence, "--eth-provider", jsonRpcProviderUrl) - config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len]) - config.addCliOption(PersistenceCmd.prover, "--circom-r1cs", - "vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.r1cs") - config.addCliOption(PersistenceCmd.prover, "--circom-wasm", - "vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.wasm") - config.addCliOption(PersistenceCmd.prover, "--circom-zkey", - "vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.zkey") + config.addCliOption( + StartUpCmd.persistence, "--eth-account", $accounts[running.len] + ) + config.addCliOption( + PersistenceCmd.prover, "--circom-r1cs", + "vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.r1cs", + ) + config.addCliOption( + PersistenceCmd.prover, "--circom-wasm", + "vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.wasm", + ) + config.addCliOption( + PersistenceCmd.prover, "--circom-zkey", + "vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.zkey", + ) return await newCodexProcess(providerIdx, config, Role.Provider) @@ -239,7 +239,9 @@ template multinodesuite*(name: string, body: untyped) = let validatorIdx = validators().len var config = conf config.addCliOption(StartUpCmd.persistence, "--eth-provider", jsonRpcProviderUrl) - config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len]) + config.addCliOption( + StartUpCmd.persistence, "--eth-account", $accounts[running.len] + ) config.addCliOption(StartUpCmd.persistence, "--validator") return await newCodexProcess(validatorIdx, config, Role.Validator) @@ -264,7 +266,7 @@ template multinodesuite*(name: string, body: untyped) = try: tryBody except CatchableError as er: - fatal message, error=er.msg + fatal message, error = er.msg echo "[FATAL] ", message, ": ", er.msg await teardownImpl() when declared(teardownAllIMPL): @@ -294,8 +296,7 @@ template multinodesuite*(name: string, body: untyped) = # Do not use websockets, but use http and polling to stop subscriptions # from being removed after 5 minutes ethProvider = JsonRpcProvider.new( - jsonRpcProviderUrl, - pollingInterval = chronos.milliseconds(100) + jsonRpcProviderUrl, pollingInterval = chronos.milliseconds(100) ) # if hardhat was NOT started by the test, take a snapshot so it can be # reverted in the test teardown @@ -304,8 +305,7 @@ template multinodesuite*(name: string, body: untyped) = accounts = await ethProvider.listAccounts() except CatchableError as e: echo "Hardhat not running. Run hardhat manually " & - "before executing tests, or include a " & - "HardhatConfig in the test setup." + "before executing tests, or include a " & "HardhatConfig in the test setup." fail() quit(1) @@ -313,30 +313,21 @@ template multinodesuite*(name: string, body: untyped) = failAndTeardownOnError "failed to start client nodes": for config in clients.configs: let node = await startClientNode(config) - running.add RunningNode( - role: Role.Client, - node: node - ) + running.add RunningNode(role: Role.Client, node: node) CodexProcess(node).updateBootstrapNodes() if var providers =? nodeConfigs.providers: failAndTeardownOnError "failed to start provider nodes": for config in providers.configs.mitems: let node = await startProviderNode(config) - running.add RunningNode( - role: Role.Provider, - node: node - ) + running.add RunningNode(role: Role.Provider, node: node) CodexProcess(node).updateBootstrapNodes() if var validators =? nodeConfigs.validators: failAndTeardownOnError "failed to start validator nodes": for config in validators.configs.mitems: let node = await startValidatorNode(config) - running.add RunningNode( - role: Role.Validator, - node: node - ) + running.add RunningNode(role: Role.Validator, node: node) # ensure that we have a recent block with a fresh timestamp discard await send(ethProvider, "evm_mine") diff --git a/tests/integration/nodeconfig.nim b/tests/integration/nodeconfig.nim index d6adb80f..a96c0525 100644 --- a/tests/integration/nodeconfig.nim +++ b/tests/integration/nodeconfig.nim @@ -3,11 +3,10 @@ import pkg/questionable export chronicles -type - NodeConfig* = ref object of RootObj - logFile*: bool - logLevel*: ?LogLevel - debugEnabled*: bool +type NodeConfig* = ref object of RootObj + logFile*: bool + logLevel*: ?LogLevel + debugEnabled*: bool proc debug*[T: NodeConfig](config: T, enabled = true): T = ## output log in stdout @@ -15,20 +14,12 @@ proc debug*[T: NodeConfig](config: T, enabled = true): T = startConfig.debugEnabled = enabled return startConfig -proc withLogFile*[T: NodeConfig]( - config: T, - logToFile: bool = true -): T = - +proc withLogFile*[T: NodeConfig](config: T, logToFile: bool = true): T = var startConfig = config startConfig.logFile = logToFile return startConfig -proc withLogLevel*[T: NodeConfig]( - config: NodeConfig, - level: LogLevel -): T = - +proc withLogLevel*[T: NodeConfig](config: NodeConfig, level: LogLevel): T = var startConfig = config startConfig.logLevel = some level return startConfig diff --git a/tests/integration/nodeconfigs.nim b/tests/integration/nodeconfigs.nim index 56309006..19e797e3 100644 --- a/tests/integration/nodeconfigs.nim +++ b/tests/integration/nodeconfigs.nim @@ -2,10 +2,8 @@ import pkg/questionable import ./codexconfig import ./hardhatconfig -type - NodeConfigs* = object - clients*: ?CodexConfigs - providers*: ?CodexConfigs - validators*: ?CodexConfigs - hardhat*: ?HardhatConfig - +type NodeConfigs* = object + clients*: ?CodexConfigs + providers*: ?CodexConfigs + validators*: ?CodexConfigs + hardhat*: ?HardhatConfig diff --git a/tests/integration/nodeprocess.nim b/tests/integration/nodeprocess.nim index a08b4fe1..d50dacbe 100644 --- a/tests/integration/nodeprocess.nim +++ b/tests/integration/nodeprocess.nim @@ -24,6 +24,7 @@ type debug: bool trackedFutures*: TrackedFutures name*: string + NodeProcessError* = object of CatchableError method workingDir(node: NodeProcess): string {.base, gcsafe.} = @@ -38,10 +39,12 @@ method startedOutput(node: NodeProcess): string {.base, gcsafe.} = method processOptions(node: NodeProcess): set[AsyncProcessOption] {.base, gcsafe.} = raiseAssert "not implemented" -method outputLineEndings(node: NodeProcess): string {.base, gcsafe raises: [].} = +method outputLineEndings(node: NodeProcess): string {.base, gcsafe, raises: [].} = raiseAssert "not implemented" -method onOutputLineCaptured(node: NodeProcess, line: string) {.base, gcsafe, raises: [].} = +method onOutputLineCaptured( + node: NodeProcess, line: string +) {.base, gcsafe, raises: [].} = raiseAssert "not implemented" method start*(node: NodeProcess) {.base, async.} = @@ -63,7 +66,7 @@ method start*(node: NodeProcess) {.base, async.} = node.workingDir, node.arguments, options = poptions, - stdoutHandle = AsyncProcess.Pipe + stdoutHandle = AsyncProcess.Pipe, ) except CancelledError as error: raise error @@ -71,11 +74,8 @@ method start*(node: NodeProcess) {.base, async.} = error "failed to start node process", error = e.msg proc captureOutput( - node: NodeProcess, - output: string, - started: Future[void] + node: NodeProcess, output: string, started: Future[void] ) {.async: (raises: []).} = - logScope: nodeName = node.name @@ -85,7 +85,7 @@ proc captureOutput( try: while node.process.running.option == some true: - while(let line = await stream.readLine(0, node.outputLineEndings); line != ""): + while (let line = await stream.readLine(0, node.outputLineEndings); line != ""): if node.debug: # would be nice if chronicles could parse and display with colors echo line @@ -97,27 +97,21 @@ proc captureOutput( await sleepAsync(1.millis) await sleepAsync(1.millis) - except CancelledError: discard # do not propagate as captureOutput was asyncSpawned - except AsyncStreamError as e: error "error reading output stream", error = e.msgDetail proc startNode*[T: NodeProcess]( - _: type T, - args: seq[string], - debug: string | bool = false, - name: string + _: type T, args: seq[string], debug: string | bool = false, name: string ): Future[T] {.async.} = - ## Starts a Codex Node with the specified arguments. ## Set debug to 'true' to see output of the node. let node = T( arguments: @args, debug: ($debug != "false"), trackedFutures: TrackedFutures.new(), - name: name + name: name, ) await node.start() return node @@ -144,7 +138,6 @@ method stop*(node: NodeProcess) {.base, async.} = raise error except CatchableError as e: error "error stopping node process", error = e.msg - finally: node.process = nil @@ -172,8 +165,8 @@ proc waitUntilStarted*(node: NodeProcess) {.async.} = await node.stop() # raise error here so that all nodes (not just this one) can be # shutdown gracefully - raise newException(NodeProcessError, "node did not output '" & - node.startedOutput & "'") + raise + newException(NodeProcessError, "node did not output '" & node.startedOutput & "'") proc restart*(node: NodeProcess) {.async.} = await node.stop() diff --git a/tests/integration/testblockexpiration.nim b/tests/integration/testblockexpiration.nim index e1d38a03..e3fad75c 100644 --- a/tests/integration/testblockexpiration.nim +++ b/tests/integration/testblockexpiration.nim @@ -24,17 +24,19 @@ ethersuite "Node block expiration tests": dataDir.removeDir() proc startTestNode(blockTtlSeconds: int) {.async.} = - node = await CodexProcess.startNode(@[ - "--api-port=8080", - "--data-dir=" & dataDir, - "--nat=none", - "--listen-addrs=/ip4/127.0.0.1/tcp/0", - "--disc-port=8090", - "--block-ttl=" & $blockTtlSeconds, - "--block-mi=1", - "--block-mn=10"], + node = await CodexProcess.startNode( + @[ + "--api-port=8080", + "--data-dir=" & dataDir, + "--nat=none", + "--listen-addrs=/ip4/127.0.0.1/tcp/0", + "--disc-port=8090", + "--block-ttl=" & $blockTtlSeconds, + "--block-mi=1", + "--block-mn=10", + ], false, - "cli-test-node" + "cli-test-node", ) await node.waitUntilStarted() @@ -47,16 +49,16 @@ ethersuite "Node block expiration tests": uploadResponse.body proc downloadTestFile(contentId: string, local = false): Response = - let client = newHttpClient(timeout=3000) - let downloadUrl = baseurl & "/data/" & - contentId & (if local: "" else: "/network/stream") + let client = newHttpClient(timeout = 3000) + let downloadUrl = + baseurl & "/data/" & contentId & (if local: "" else: "/network/stream") let content = client.get(downloadUrl) client.close() content proc hasFile(contentId: string): bool = - let client = newHttpClient(timeout=3000) + let client = newHttpClient(timeout = 3000) let dataLocalUrl = baseurl & "/data/" & contentId let content = client.get(dataLocalUrl) client.close() diff --git a/tests/integration/testcli.nim b/tests/integration/testcli.nim index fad85846..d9f2d081 100644 --- a/tests/integration/testcli.nim +++ b/tests/integration/testcli.nim @@ -8,36 +8,30 @@ import ./nodeprocess import ../examples asyncchecksuite "Command line interface": - let key = "4242424242424242424242424242424242424242424242424242424242424242" proc startCodex(args: seq[string]): Future[CodexProcess] {.async.} = - return await CodexProcess.startNode( - args, - false, - "cli-test-node" - ) + return await CodexProcess.startNode(args, false, "cli-test-node") test "complains when persistence is enabled without ethereum account": - let node = await startCodex(@[ - "persistence" - ]) + let node = await startCodex(@["persistence"]) await node.waitUntilOutput("Persistence enabled, but no Ethereum account was set") await node.stop() test "complains when ethereum private key file has wrong permissions": let unsafeKeyFile = genTempPath("", "") discard unsafeKeyFile.writeFile(key, 0o666) - let node = await startCodex(@[ - "persistence", - "--eth-private-key=" & unsafeKeyFile]) - await node.waitUntilOutput("Ethereum private key file does not have safe file permissions") + let node = await startCodex(@["persistence", "--eth-private-key=" & unsafeKeyFile]) + await node.waitUntilOutput( + "Ethereum private key file does not have safe file permissions" + ) await node.stop() discard removeFile(unsafeKeyFile) let marketplaceArg = "--marketplace-address=" & $EthAddress.example - expectedDownloadInstruction = "Proving circuit files are not found. Please run the following to download them:" + expectedDownloadInstruction = + "Proving circuit files are not found. Please run the following to download them:" test "suggests downloading of circuit files when persistence is enabled without accessible r1cs file": let node = await startCodex(@["persistence", "prover", marketplaceArg]) @@ -45,22 +39,22 @@ asyncchecksuite "Command line interface": await node.stop() test "suggests downloading of circuit files when persistence is enabled without accessible wasm file": - let node = await startCodex(@[ - "persistence", - "prover", - marketplaceArg, - "--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs" - ]) + let node = await startCodex( + @[ + "persistence", "prover", marketplaceArg, + "--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs", + ] + ) await node.waitUntilOutput(expectedDownloadInstruction) await node.stop() test "suggests downloading of circuit files when persistence is enabled without accessible zkey file": - let node = await startCodex(@[ - "persistence", - "prover", - marketplaceArg, - "--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs", - "--circom-wasm=tests/circuits/fixtures/proof_main.wasm" - ]) + let node = await startCodex( + @[ + "persistence", "prover", marketplaceArg, + "--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs", + "--circom-wasm=tests/circuits/fixtures/proof_main.wasm", + ] + ) await node.waitUntilOutput(expectedDownloadInstruction) await node.stop() diff --git a/tests/integration/testecbug.nim b/tests/integration/testecbug.nim index 0545d1d0..e7604de7 100644 --- a/tests/integration/testecbug.nim +++ b/tests/integration/testecbug.nim @@ -5,35 +5,32 @@ import ./nodeconfigs import ./hardhatconfig marketplacesuite "Bug #821 - node crashes during erasure coding": - test "should be able to create storage request and download dataset", NodeConfigs( - clients: - CodexConfigs.init(nodes=1) - # .debug() # uncomment to enable console log output.debug() - .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - .withLogTopics("node", "erasure", "marketplace", ) - .some, - - providers: - CodexConfigs.init(nodes=0) - # .debug() # uncomment to enable console log output - # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock") - .some, - ): - let reward = 400.u256 + clients: CodexConfigs + .init(nodes = 1) + # .debug() # uncomment to enable console log output.debug() + .withLogFile() + # uncomment to output log file to tests/integration/logs/ //_.log + .withLogTopics("node", "erasure", "marketplace").some, + providers: CodexConfigs.init(nodes = 0) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock") + .some, + ): + let pricePerBytePerSecond = 1.u256 let duration = 20.periods - let collateral = 200.u256 + let collateralPerByte = 1.u256 let expiry = 10.periods - let data = await RandomChunker.example(blocks=8) + let data = await RandomChunker.example(blocks = 8) let client = clients()[0] let clientApi = client.client let cid = clientApi.upload(data).get var requestId = none RequestId - proc onStorageRequested(eventResult: ?!StorageRequested)= + proc onStorageRequested(eventResult: ?!StorageRequested) = assert not eventResult.isErr requestId = some (!eventResult).requestId @@ -42,15 +39,15 @@ marketplacesuite "Bug #821 - node crashes during erasure coding": # client requests storage but requires multiple slots to host the content let id = await clientApi.requestStorage( cid, - duration=duration, - reward=reward, - expiry=expiry, - collateral=collateral, - nodes=3, - tolerance=1 + duration = duration, + pricePerBytePerSecond = pricePerBytePerSecond, + expiry = expiry, + collateralPerByte = collateralPerByte, + nodes = 3, + tolerance = 1, ) - check eventually(requestId.isSome, timeout=expiry.int * 1000) + check eventually(requestId.isSome, timeout = expiry.int * 1000) let request = await marketplace.getRequest(requestId.get) let cidFromRequest = Cid.init(request.content.cid).get() diff --git a/tests/integration/testmarketplace.nim b/tests/integration/testmarketplace.nim index 17a3ec17..bc030a1d 100644 --- a/tests/integration/testmarketplace.nim +++ b/tests/integration/testmarketplace.nim @@ -7,15 +7,21 @@ import ./nodeconfigs marketplacesuite "Marketplace": let marketplaceConfig = NodeConfigs( - clients: CodexConfigs.init(nodes=1).some, - providers: CodexConfigs.init(nodes=1).some, + clients: CodexConfigs.init(nodes = 1).some, + providers: CodexConfigs.init(nodes = 1).some, ) - + var host: CodexClient var hostAccount: Address var client: CodexClient var clientAccount: Address + const minPricePerBytePerSecond = 1.u256 + const collateralPerByte = 1.u256 + const blocks = 8 + const ecNodes = 3 + const ecTolerance = 1 + setup: host = providers()[0].client hostAccount = providers()[0].ethAccount @@ -29,23 +35,29 @@ marketplacesuite "Marketplace": test "nodes negotiate contracts on the marketplace", marketplaceConfig: let size = 0xFFFFFF.u256 - let data = await RandomChunker.example(blocks=8) + let data = await RandomChunker.example(blocks = blocks) # host makes storage available - let availability = host.postAvailability(totalSize=size, duration=20*60.u256, minPrice=300.u256, maxCollateral=300.u256).get + let availability = host.postAvailability( + totalSize = size, + duration = 20 * 60.u256, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = size * minPricePerBytePerSecond, + ).get # client requests storage let cid = client.upload(data).get let id = client.requestStorage( cid, - duration=20*60.u256, - reward=400.u256, - proofProbability=3.u256, - expiry=10*60, - collateral=200.u256, - nodes = 3, - tolerance = 1).get + duration = 20 * 60.u256, + pricePerBytePerSecond = minPricePerBytePerSecond, + proofProbability = 3.u256, + expiry = 10 * 60, + collateralPerByte = collateralPerByte, + nodes = ecNodes, + tolerance = ecTolerance, + ).get - check eventually(client.purchaseStateIs(id, "started"), timeout=10*60*1000) + check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000) let purchase = client.getPurchase(id).get check purchase.error == none string let availabilities = host.getAvailabilities().get @@ -57,33 +69,38 @@ marketplacesuite "Marketplace": check reservations.len == 3 check reservations[0].requestId == purchase.requestId - test "node slots gets paid out and rest of tokens are returned to client", marketplaceConfig: + test "node slots gets paid out and rest of tokens are returned to client", + marketplaceConfig: let size = 0xFFFFFF.u256 - let data = await RandomChunker.example(blocks = 8) + let data = await RandomChunker.example(blocks = blocks) let marketplace = Marketplace.new(Marketplace.address, ethProvider.getSigner()) let tokenAddress = await marketplace.token() let token = Erc20Token.new(tokenAddress, ethProvider.getSigner()) - let reward = 400.u256 - let duration = 20*60.u256 - let nodes = 3'u + let duration = 20 * 60.u256 # host makes storage available let startBalanceHost = await token.balanceOf(hostAccount) - discard host.postAvailability(totalSize=size, duration=20*60.u256, minPrice=300.u256, maxCollateral=300.u256).get + discard host.postAvailability( + totalSize = size, + duration = 20 * 60.u256, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = size * minPricePerBytePerSecond, + ).get # client requests storage let cid = client.upload(data).get let id = client.requestStorage( cid, - duration=duration, - reward=reward, - proofProbability=3.u256, - expiry=10*60, - collateral=200.u256, - nodes = nodes, - tolerance = 1).get + duration = duration, + pricePerBytePerSecond = minPricePerBytePerSecond, + proofProbability = 3.u256, + expiry = 10 * 60, + collateralPerByte = collateralPerByte, + nodes = ecNodes, + tolerance = ecTolerance, + ).get - check eventually(client.purchaseStateIs(id, "started"), timeout=10*60*1000) + check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000) let purchase = client.getPurchase(id).get check purchase.error == none string @@ -95,40 +112,42 @@ marketplacesuite "Marketplace": await ethProvider.advanceTime(duration) # Checking that the hosting node received reward for at least the time between - check eventually (await token.balanceOf(hostAccount)) - startBalanceHost >= (duration-5*60)*reward*nodes.u256 + let slotSize = slotSize(blocks) + let pricePerSlotPerSecond = minPricePerBytePerSecond * slotSize + check eventually (await token.balanceOf(hostAccount)) - startBalanceHost >= + (duration - 5 * 60) * pricePerSlotPerSecond * ecNodes.u256 # Checking that client node receives some funds back that were not used for the host nodes check eventually( (await token.balanceOf(clientAccount)) - clientBalanceBeforeFinished > 0, - timeout = 10*1000 # give client a bit of time to withdraw its funds + timeout = 10 * 1000, # give client a bit of time to withdraw its funds ) marketplacesuite "Marketplace payouts": + const minPricePerBytePerSecond = 1.u256 + const collateralPerByte = 1.u256 + const blocks = 8 + const ecNodes = 3 + const ecTolerance = 1 test "expired request partially pays out for stored time", NodeConfigs( # Uncomment to start Hardhat automatically, typically so logs can be inspected locally hardhat: HardhatConfig.none, - - clients: - CodexConfigs.init(nodes=1) - # .debug() # uncomment to enable console log output.debug() - # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("node", "erasure") - .some, - - providers: - CodexConfigs.init(nodes=1) - # .debug() # uncomment to enable console log output - # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock") - .some, - ): - let reward = 400.u256 + clients: CodexConfigs.init(nodes = 1) + # .debug() # uncomment to enable console log output.debug() + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("node", "erasure") + .some, + providers: CodexConfigs.init(nodes = 1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock") + .some, + ): let duration = 20.periods - let collateral = 200.u256 let expiry = 10.periods - let data = await RandomChunker.example(blocks=8) + let data = await RandomChunker.example(blocks = blocks) let client = clients()[0] let provider = providers()[0] let clientApi = client.client @@ -137,13 +156,16 @@ marketplacesuite "Marketplace payouts": let startBalanceClient = await token.balanceOf(client.ethAccount) # provider makes storage available + let datasetSize = datasetSize(blocks, ecNodes, ecTolerance) + let totalAvailabilitySize = datasetSize div 2 discard providerApi.postAvailability( # make availability size small enough that we can't fill all the slots, # thus causing a cancellation - totalSize=(data.len div 2).u256, - duration=duration.u256, - minPrice=reward, - maxCollateral=collateral) + totalSize = totalAvailabilitySize, + duration = duration.u256, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = collateralPerByte * totalAvailabilitySize, + ) let cid = clientApi.upload(data).get @@ -157,16 +179,16 @@ marketplacesuite "Marketplace payouts": # client requests storage but requires multiple slots to host the content let id = await clientApi.requestStorage( cid, - duration=duration, - reward=reward, - expiry=expiry, - collateral=collateral, - nodes=3, - tolerance=1 + duration = duration, + pricePerBytePerSecond = minPricePerBytePerSecond, + expiry = expiry, + collateralPerByte = collateralPerByte, + nodes = ecNodes, + tolerance = ecTolerance, ) # wait until one slot is filled - check eventually(slotIdxFilled.isSome, timeout=expiry.int * 1000) + check eventually(slotIdxFilled.isSome, timeout = expiry.int * 1000) let slotId = slotId(!clientApi.requestId(id), !slotIdxFilled) # wait until sale is cancelled @@ -175,18 +197,22 @@ marketplacesuite "Marketplace payouts": await advanceToNextPeriod() + let slotSize = slotSize(blocks) + let pricePerSlotPerSecond = minPricePerBytePerSecond * slotSize + check eventually ( - let endBalanceProvider = (await token.balanceOf(provider.ethAccount)); + let endBalanceProvider = (await token.balanceOf(provider.ethAccount)) endBalanceProvider > startBalanceProvider and - endBalanceProvider < startBalanceProvider + expiry.u256*reward + endBalanceProvider < startBalanceProvider + expiry.u256 * pricePerSlotPerSecond ) check eventually( ( - let endBalanceClient = (await token.balanceOf(client.ethAccount)); - let endBalanceProvider = (await token.balanceOf(provider.ethAccount)); - (startBalanceClient - endBalanceClient) == (endBalanceProvider - startBalanceProvider) + let endBalanceClient = (await token.balanceOf(client.ethAccount)) + let endBalanceProvider = (await token.balanceOf(provider.ethAccount)) + (startBalanceClient - endBalanceClient) == + (endBalanceProvider - startBalanceProvider) ), - timeout = 10*1000 # give client a bit of time to withdraw its funds + timeout = 10 * 1000, # give client a bit of time to withdraw its funds ) await subscription.unsubscribe() diff --git a/tests/integration/testproofs.nim b/tests/integration/testproofs.nim index 2e462d2c..b25643ad 100644 --- a/tests/integration/testproofs.nim +++ b/tests/integration/testproofs.nim @@ -14,45 +14,58 @@ export logutils logScope: topics = "integration test proofs" - marketplacesuite "Hosts submit regular proofs": + const minPricePerBytePerSecond = 1.u256 + const collateralPerByte = 1.u256 + const blocks = 8 + const ecNodes = 3 + const ecTolerance = 1 - test "hosts submit periodic proofs for slots they fill", NodeConfigs( - # Uncomment to start Hardhat automatically, typically so logs can be inspected locally - hardhat: - HardhatConfig.none, - - clients: - CodexConfigs.init(nodes=1) - # .debug() # uncomment to enable console log output - # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("node, marketplace") - .some, - - providers: - CodexConfigs.init(nodes=1) - # .debug() # uncomment to enable console log output - # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("marketplace", "sales", "reservations", "node", "clock") - .some, - ): + test "hosts submit periodic proofs for slots they fill", + NodeConfigs( + # Uncomment to start Hardhat automatically, typically so logs can be inspected locally + hardhat: HardhatConfig.none, + clients: CodexConfigs.init(nodes = 1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("node, marketplace") + .some, + providers: CodexConfigs.init(nodes = 1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("marketplace", "sales", "reservations", "node", "clock") + .some, + ): let client0 = clients()[0].client let expiry = 10.periods let duration = expiry + 5.periods - let data = await RandomChunker.example(blocks=8) - createAvailabilities(data.len * 2, duration) # TODO: better value for data.len + let data = await RandomChunker.example(blocks = blocks) + let datasetSize = + datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) + createAvailabilities( + datasetSize, duration, collateralPerByte, minPricePerBytePerSecond + ) let cid = client0.upload(data).get let purchaseId = await client0.requestStorage( cid, - expiry=expiry, - duration=duration, - nodes=3, - tolerance=1 + expiry = expiry, + duration = duration, + nodes = ecNodes, + tolerance = ecTolerance, + ) + + let purchase = client0.getPurchase(purchaseId).get + check purchase.error == none string + + let request = purchase.request.get + let slotSize = request.ask.slotSize + + check eventually( + client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000 ) - check eventually(client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000) var proofWasSubmitted = false proc onProofSubmitted(event: ?!ProofSubmitted) = @@ -60,65 +73,70 @@ marketplacesuite "Hosts submit regular proofs": let subscription = await marketplace.subscribe(ProofSubmitted, onProofSubmitted) - check eventually(proofWasSubmitted, timeout=(duration - expiry).int * 1000) + check eventually(proofWasSubmitted, timeout = (duration - expiry).int * 1000) await subscription.unsubscribe() - marketplacesuite "Simulate invalid proofs": - # TODO: these are very loose tests in that they are not testing EXACTLY how # proofs were marked as missed by the validator. These tests should be # tightened so that they are showing, as an integration test, that specific # proofs are being marked as missed by the validator. - test "slot is freed after too many invalid proofs submitted", NodeConfigs( - # Uncomment to start Hardhat automatically, typically so logs can be inspected locally - hardhat: - HardhatConfig.none, + const minPricePerBytePerSecond = 1.u256 + const collateralPerByte = 1.u256 + const blocks = 8 + const ecNodes = 3 + const ecTolerance = 1 - clients: - CodexConfigs.init(nodes=1) - # .debug() # uncomment to enable console log output - # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("node", "marketplace", "clock") - .some, - - providers: - CodexConfigs.init(nodes=1) - .withSimulateProofFailures(idx=0, failEveryNProofs=1) + test "slot is freed after too many invalid proofs submitted", + NodeConfigs( + # Uncomment to start Hardhat automatically, typically so logs can be inspected locally + hardhat: HardhatConfig.none, + clients: CodexConfigs.init(nodes = 1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("node", "marketplace", "clock") + .some, + providers: CodexConfigs + .init(nodes = 1) + .withSimulateProofFailures(idx = 0, failEveryNProofs = 1) # .debug() # uncomment to enable console log output # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log # .withLogTopics("marketplace", "sales", "reservations", "node", "clock", "slotsbuilder") .some, - - validators: - CodexConfigs.init(nodes=1) - # .debug() # uncomment to enable console log output - # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("validator", "onchain", "ethers", "clock") - .some - ): + validators: CodexConfigs.init(nodes = 1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("validator", "onchain", "ethers", "clock") + .some, + ): let client0 = clients()[0].client let expiry = 10.periods let duration = expiry + 10.periods - let data = await RandomChunker.example(blocks=8) - createAvailabilities(data.len * 2, duration) # TODO: better value for data.len + let data = await RandomChunker.example(blocks = blocks) + let datasetSize = + datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) + createAvailabilities( + datasetSize, duration, collateralPerByte, minPricePerBytePerSecond + ) let cid = client0.upload(data).get let purchaseId = await client0.requestStorage( cid, - expiry=expiry, - duration=duration, - nodes=3, - tolerance=1, - proofProbability=1 + expiry = expiry, + duration = duration, + nodes = ecNodes, + tolerance = ecTolerance, + proofProbability = 1, ) let requestId = client0.requestId(purchaseId).get - check eventually(client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000) + check eventually( + client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000 + ) var slotWasFreed = false proc onSlotFreed(event: ?!SlotFreed) = @@ -127,52 +145,52 @@ marketplacesuite "Simulate invalid proofs": let subscription = await marketplace.subscribe(SlotFreed, onSlotFreed) - check eventually(slotWasFreed, timeout=(duration - expiry).int * 1000) + check eventually(slotWasFreed, timeout = (duration - expiry).int * 1000) await subscription.unsubscribe() - test "slot is not freed when not enough invalid proofs submitted", NodeConfigs( - # Uncomment to start Hardhat automatically, typically so logs can be inspected locally - hardhat: HardhatConfig.none, - - clients: - CodexConfigs.init(nodes=1) - # .debug() # uncomment to enable console log output - # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("marketplace", "sales", "reservations", "node", "clock") - .some, - - providers: - CodexConfigs.init(nodes=1) - .withSimulateProofFailures(idx=0, failEveryNProofs=1) + test "slot is not freed when not enough invalid proofs submitted", + NodeConfigs( + # Uncomment to start Hardhat automatically, typically so logs can be inspected locally + hardhat: HardhatConfig.none, + clients: CodexConfigs.init(nodes = 1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("marketplace", "sales", "reservations", "node", "clock") + .some, + providers: CodexConfigs + .init(nodes = 1) + .withSimulateProofFailures(idx = 0, failEveryNProofs = 1) # .debug() # uncomment to enable console log output # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log # .withLogTopics("marketplace", "sales", "reservations", "node") .some, - - validators: - CodexConfigs.init(nodes=1) - # .debug() - # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("validator", "onchain", "ethers", "clock") - .some - ): + validators: CodexConfigs.init(nodes = 1) + # .debug() + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("validator", "onchain", "ethers", "clock") + .some, + ): let client0 = clients()[0].client let expiry = 10.periods let duration = expiry + 10.periods - let data = await RandomChunker.example(blocks=8) - createAvailabilities(data.len * 2, duration) # TODO: better value for data.len + let data = await RandomChunker.example(blocks = blocks) + let datasetSize = + datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) + createAvailabilities( + datasetSize, duration, collateralPerByte, minPricePerBytePerSecond + ) let cid = client0.upload(data).get let purchaseId = await client0.requestStorage( cid, - expiry=expiry, - duration=duration, - nodes=3, - tolerance=1, - proofProbability=1 + expiry = expiry, + duration = duration, + nodes = ecNodes, + tolerance = ecTolerance, + proofProbability = 1, ) let requestId = client0.requestId(purchaseId).get @@ -183,6 +201,7 @@ marketplacesuite "Simulate invalid proofs": if event.requestId == requestId: slotWasFilled = true + let filledSubscription = await marketplace.subscribe(SlotFilled, onSlotFilled) # wait for the first slot to be filled @@ -192,6 +211,7 @@ marketplacesuite "Simulate invalid proofs": proc onSlotFreed(event: ?!SlotFreed) = if event.isOk and event.value.requestId == requestId: slotWasFreed = true + let freedSubscription = await marketplace.subscribe(SlotFreed, onSlotFreed) # In 2 periods you cannot have enough invalid proofs submitted: @@ -202,6 +222,9 @@ marketplacesuite "Simulate invalid proofs": await freedSubscription.unsubscribe() # TODO: uncomment once fixed + # WARNING: in the meantime minPrice has changed to minPricePerBytePerSecond + # and maxCollateral has changed to totalCollateral - double check if + # it is set correctly below # test "host that submits invalid proofs is paid out less", NodeConfigs( # # Uncomment to start Hardhat automatically, typically so logs can be inspected locally # # hardhat: HardhatConfig().withLogFile(), @@ -242,8 +265,8 @@ marketplacesuite "Simulate invalid proofs": # discard provider0.client.postAvailability( # totalSize=slotSize, # should match 1 slot only # duration=totalPeriods.periods.u256, - # minPrice=300.u256, - # maxCollateral=200.u256 + # minPricePerBytePerSecond=minPricePerBytePerSecond, + # totalCollateral=slotSize * minPricePerBytePerSecond # ) # let cid = client0.upload(data).get @@ -275,8 +298,8 @@ marketplacesuite "Simulate invalid proofs": # discard provider1.client.postAvailability( # totalSize=slotSize, # should match 1 slot only # duration=totalPeriods.periods.u256, - # minPrice=300.u256, - # maxCollateral=200.u256 + # minPricePerBytePerSecond=minPricePerBytePerSecond, + # totalCollateral=slotSize * minPricePerBytePerSecond # ) # check eventually filledSlotIds.len > 1 @@ -284,8 +307,8 @@ marketplacesuite "Simulate invalid proofs": # discard provider2.client.postAvailability( # totalSize=slotSize, # should match 1 slot only # duration=totalPeriods.periods.u256, - # minPrice=300.u256, - # maxCollateral=200.u256 + # minPricePerBytePerSecond=minPricePerBytePerSecond, + # totalCollateral=slotSize * minPricePerBytePerSecond # ) # check eventually filledSlotIds.len > 2 diff --git a/tests/integration/testpurchasing.nim b/tests/integration/testpurchasing.nim index 4e5fa866..4e08e7a8 100644 --- a/tests/integration/testpurchasing.nim +++ b/tests/integration/testpurchasing.nim @@ -6,86 +6,121 @@ import ../contracts/time import ../examples twonodessuite "Purchasing": - test "node handles storage request", twoNodesConfig: - let data = await RandomChunker.example(blocks=2) + let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get - let id1 = client1.requestStorage(cid, duration=100.u256, reward=2.u256, proofProbability=3.u256, expiry=10, collateral=200.u256).get - let id2 = client1.requestStorage(cid, duration=400.u256, reward=5.u256, proofProbability=6.u256, expiry=10, collateral=201.u256).get + let id1 = client1.requestStorage( + cid, + duration = 100.u256, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + expiry = 10, + collateralPerByte = 1.u256, + ).get + let id2 = client1.requestStorage( + cid, + duration = 400.u256, + pricePerBytePerSecond = 2.u256, + proofProbability = 6.u256, + expiry = 10, + collateralPerByte = 2.u256, + ).get check id1 != id2 test "node retrieves purchase status", twoNodesConfig: # get one contiguous chunk let rng = rng.Rng.instance() - let chunker = RandomChunker.new(rng, size = DefaultBlockSize * 2, chunkSize = DefaultBlockSize * 2) + let chunker = RandomChunker.new( + rng, size = DefaultBlockSize * 2, chunkSize = DefaultBlockSize * 2 + ) let data = await chunker.getBytes() let cid = client1.upload(byteutils.toHex(data)).get let id = client1.requestStorage( cid, - duration=100.u256, - reward=2.u256, - proofProbability=3.u256, - expiry=30, - collateral=200.u256, - nodes=3, - tolerance=1).get + duration = 100.u256, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + expiry = 30, + collateralPerByte = 1.u256, + nodes = 3, + tolerance = 1, + ).get let request = client1.getPurchase(id).get.request.get check request.ask.duration == 100.u256 - check request.ask.reward == 2.u256 + check request.ask.pricePerBytePerSecond == 1.u256 check request.ask.proofProbability == 3.u256 check request.expiry == 30 - check request.ask.collateral == 200.u256 + check request.ask.collateralPerByte == 1.u256 check request.ask.slots == 3'u64 check request.ask.maxSlotLoss == 1'u64 # TODO: We currently do not support encoding single chunks # test "node retrieves purchase status with 1 chunk", twoNodesConfig: # let cid = client1.upload("some file contents").get - # let id = client1.requestStorage(cid, duration=1.u256, reward=2.u256, proofProbability=3.u256, expiry=30, collateral=200.u256, nodes=2, tolerance=1).get + # let id = client1.requestStorage( + # cid, duration=1.u256, pricePerBytePerSecond=1.u256, + # proofProbability=3.u256, expiry=30, collateralPerByte=1.u256, + # nodes=2, tolerance=1).get # let request = client1.getPurchase(id).get.request.get # check request.ask.duration == 1.u256 - # check request.ask.reward == 2.u256 + # check request.ask.pricePerBytePerSecond == 1.u256 # check request.ask.proofProbability == 3.u256 # check request.expiry == 30 - # check request.ask.collateral == 200.u256 + # check request.ask.collateralPerByte == 1.u256 # check request.ask.slots == 3'u64 # check request.ask.maxSlotLoss == 1'u64 test "node remembers purchase status after restart", twoNodesConfig: - let data = await RandomChunker.example(blocks=2) + let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get - let id = client1.requestStorage(cid, - duration=10*60.u256, - reward=2.u256, - proofProbability=3.u256, - expiry=5*60, - collateral=200.u256, - nodes=3.uint, - tolerance=1.uint).get - check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3*60*1000) + let id = client1.requestStorage( + cid, + duration = 10 * 60.u256, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + expiry = 5 * 60, + collateralPerByte = 1.u256, + nodes = 3.uint, + tolerance = 1.uint, + ).get + check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000) await node1.restart() client1.restart() - check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3*60*1000) + check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000) let request = client1.getPurchase(id).get.request.get - check request.ask.duration == (10*60).u256 - check request.ask.reward == 2.u256 + check request.ask.duration == (10 * 60).u256 + check request.ask.pricePerBytePerSecond == 1.u256 check request.ask.proofProbability == 3.u256 - check request.expiry == (5*60).u256 - check request.ask.collateral == 200.u256 + check request.expiry == (5 * 60).u256 + check request.ask.collateralPerByte == 1.u256 check request.ask.slots == 3'u64 check request.ask.maxSlotLoss == 1'u64 test "node requires expiry and its value to be in future", twoNodesConfig: - let data = await RandomChunker.example(blocks=2) + let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get - let responseMissing = client1.requestStorageRaw(cid, duration=1.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256) + let responseMissing = client1.requestStorageRaw( + cid, + duration = 1.u256, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + collateralPerByte = 1.u256, + ) check responseMissing.status == "400 Bad Request" check responseMissing.body == "Expiry required" - let responseBefore = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256, expiry=10) + let responseBefore = client1.requestStorageRaw( + cid, + duration = 10.u256, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + collateralPerByte = 1.u256, + expiry = 10, + ) check responseBefore.status == "400 Bad Request" - check "Expiry needs value bigger then zero and smaller then the request's duration" in responseBefore.body + check "Expiry needs value bigger then zero and smaller then the request's duration" in + responseBefore.body diff --git a/tests/integration/testrestapi.nim b/tests/integration/testrestapi.nim index 2d5c3392..52b722d6 100644 --- a/tests/integration/testrestapi.nim +++ b/tests/integration/testrestapi.nim @@ -21,7 +21,15 @@ twonodessuite "REST API": test "node shows used and available space", twoNodesConfig: discard client1.upload("some file contents").get - discard client1.postAvailability(totalSize=12.u256, duration=2.u256, minPrice=3.u256, maxCollateral=4.u256).get + let totalSize = 12.u256 + let minPricePerBytePerSecond = 1.u256 + let totalCollateral = totalSize * minPricePerBytePerSecond + discard client1.postAvailability( + totalSize = totalSize, + duration = 2.u256, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = totalCollateral, + ).get let space = client1.space().tryGet() check: space.totalBlocks == 2 @@ -42,114 +50,117 @@ twonodessuite "REST API": test "request storage fails for datasets that are too small", twoNodesConfig: let cid = client1.upload("some file contents").get - let response = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256, expiry=9) + let response = client1.requestStorageRaw( + cid, + duration = 10.u256, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + collateralPerByte = 1.u256, + expiry = 9, + ) check: response.status == "400 Bad Request" - response.body == "Dataset too small for erasure parameters, need at least " & $(2*DefaultBlockSize.int) & " bytes" + response.body == + "Dataset too small for erasure parameters, need at least " & + $(2 * DefaultBlockSize.int) & " bytes" test "request storage succeeds for sufficiently sized datasets", twoNodesConfig: - let data = await RandomChunker.example(blocks=2) + let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get - let response = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256, expiry=9) + let response = client1.requestStorageRaw( + cid, + duration = 10.u256, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + collateralPerByte = 1.u256, + expiry = 9, + ) check: response.status == "200 OK" test "request storage fails if tolerance is zero", twoNodesConfig: - let data = await RandomChunker.example(blocks=2) + let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get let duration = 100.u256 - let reward = 2.u256 + let pricePerBytePerSecond = 1.u256 let proofProbability = 3.u256 let expiry = 30.uint - let collateral = 200.u256 + let collateralPerByte = 1.u256 let nodes = 3 let tolerance = 0 - var responseBefore = client1.requestStorageRaw(cid, - duration, - reward, - proofProbability, - collateral, - expiry, - nodes.uint, - tolerance.uint) + var responseBefore = client1.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, + nodes.uint, tolerance.uint, + ) check responseBefore.status == "400 Bad Request" check responseBefore.body == "Tolerance needs to be bigger then zero" test "request storage fails if nodes and tolerance aren't correct", twoNodesConfig: - let data = await RandomChunker.example(blocks=2) + let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get let duration = 100.u256 - let reward = 2.u256 + let pricePerBytePerSecond = 1.u256 let proofProbability = 3.u256 let expiry = 30.uint - let collateral = 200.u256 + let collateralPerByte = 1.u256 let ecParams = @[(1, 1), (2, 1), (3, 2), (3, 3)] for ecParam in ecParams: let (nodes, tolerance) = ecParam - var responseBefore = client1.requestStorageRaw(cid, - duration, - reward, - proofProbability, - collateral, - expiry, - nodes.uint, - tolerance.uint) + var responseBefore = client1.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) check responseBefore.status == "400 Bad Request" - check responseBefore.body == "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`" + check responseBefore.body == + "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`" - test "request storage fails if tolerance > nodes (underflow protection)", twoNodesConfig: - let data = await RandomChunker.example(blocks=2) + test "request storage fails if tolerance > nodes (underflow protection)", + twoNodesConfig: + let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get let duration = 100.u256 - let reward = 2.u256 + let pricePerBytePerSecond = 1.u256 let proofProbability = 3.u256 let expiry = 30.uint - let collateral = 200.u256 + let collateralPerByte = 1.u256 let ecParams = @[(0, 1), (1, 2), (2, 3)] for ecParam in ecParams: let (nodes, tolerance) = ecParam - var responseBefore = client1.requestStorageRaw(cid, - duration, - reward, - proofProbability, - collateral, - expiry, - nodes.uint, - tolerance.uint) + var responseBefore = client1.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) check responseBefore.status == "400 Bad Request" - check responseBefore.body == "Invalid parameters: `tolerance` cannot be greater than `nodes`" + check responseBefore.body == + "Invalid parameters: `tolerance` cannot be greater than `nodes`" test "request storage succeeds if nodes and tolerance within range", twoNodesConfig: - let data = await RandomChunker.example(blocks=2) + let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get let duration = 100.u256 - let reward = 2.u256 + let pricePerBytePerSecond = 1.u256 let proofProbability = 3.u256 let expiry = 30.uint - let collateral = 200.u256 + let collateralPerByte = 1.u256 let ecParams = @[(3, 1), (5, 2)] for ecParam in ecParams: let (nodes, tolerance) = ecParam - var responseBefore = client1.requestStorageRaw(cid, - duration, - reward, - proofProbability, - collateral, - expiry, - nodes.uint, - tolerance.uint) + var responseBefore = client1.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) check responseBefore.status == "200 OK" @@ -161,13 +172,15 @@ twonodessuite "REST API": check response.body != "" test "node accepts file uploads with content disposition", twoNodesConfig: - let headers = newHttpHeaders({"Content-Disposition": "attachment; filename=\"example.txt\""}) + let headers = + newHttpHeaders({"Content-Disposition": "attachment; filename=\"example.txt\""}) let response = client1.uploadRaw("some file contents", headers) check response.status == "200 OK" check response.body != "" - test "node accepts file uploads with content disposition without filename", twoNodesConfig: + test "node accepts file uploads with content disposition without filename", + twoNodesConfig: let headers = newHttpHeaders({"Content-Disposition": "attachment"}) let response = client1.uploadRaw("some file contents", headers) @@ -175,7 +188,8 @@ twonodessuite "REST API": check response.body != "" test "upload fails if content disposition contains bad filename", twoNodesConfig: - let headers = newHttpHeaders({"Content-Disposition": "attachment; filename=\"exam*ple.txt\""}) + let headers = + newHttpHeaders({"Content-Disposition": "attachment; filename=\"exam*ple.txt\""}) let response = client1.uploadRaw("some file contents", headers) check response.status == "422 Unprocessable Entity" @@ -189,7 +203,12 @@ twonodessuite "REST API": check response.body == "The MIME type is not valid." test "node retrieve the metadata", twoNodesConfig: - let headers = newHttpHeaders({"Content-Type": "text/plain", "Content-Disposition": "attachment; filename=\"example.txt\""}) + let headers = newHttpHeaders( + { + "Content-Type": "text/plain", + "Content-Disposition": "attachment; filename=\"example.txt\"", + } + ) let uploadResponse = client1.uploadRaw("some file contents", headers) let cid = uploadResponse.body let listResponse = client1.listRaw() @@ -212,10 +231,12 @@ twonodessuite "REST API": check manifest["uploadedAt"].getInt() > 0 test "node set the headers when for download", twoNodesConfig: - let headers = newHttpHeaders({ - "Content-Disposition": "attachment; filename=\"example.txt\"", - "Content-Type": "text/plain" - }) + let headers = newHttpHeaders( + { + "Content-Disposition": "attachment; filename=\"example.txt\"", + "Content-Type": "text/plain", + } + ) let uploadResponse = client1.uploadRaw("some file contents", headers) let cid = uploadResponse.body @@ -228,7 +249,8 @@ twonodessuite "REST API": check response.headers.hasKey("Content-Type") == true check response.headers["Content-Type"] == "text/plain" check response.headers.hasKey("Content-Disposition") == true - check response.headers["Content-Disposition"] == "attachment; filename=\"example.txt\"" + check response.headers["Content-Disposition"] == + "attachment; filename=\"example.txt\"" let local = true let localResponse = client1.downloadRaw(cid, local) @@ -237,4 +259,5 @@ twonodessuite "REST API": check localResponse.headers.hasKey("Content-Type") == true check localResponse.headers["Content-Type"] == "text/plain" check localResponse.headers.hasKey("Content-Disposition") == true - check localResponse.headers["Content-Disposition"] == "attachment; filename=\"example.txt\"" \ No newline at end of file + check localResponse.headers["Content-Disposition"] == + "attachment; filename=\"example.txt\"" diff --git a/tests/integration/testsales.nim b/tests/integration/testsales.nim index f9af76e5..a77e5649 100644 --- a/tests/integration/testsales.nim +++ b/tests/integration/testsales.nim @@ -16,10 +16,12 @@ proc findItem[T](items: seq[T], item: T): ?!T = multinodesuite "Sales": let salesConfig = NodeConfigs( - clients: CodexConfigs.init(nodes=1).some, - providers: CodexConfigs.init(nodes=1).some, + clients: CodexConfigs.init(nodes = 1).some, + providers: CodexConfigs.init(nodes = 1).some, ) + let minPricePerBytePerSecond = 1.u256 + var host: CodexClient var client: CodexClient @@ -28,70 +30,124 @@ multinodesuite "Sales": client = clients()[0].client test "node handles new storage availability", salesConfig: - let availability1 = host.postAvailability(totalSize=1.u256, duration=2.u256, minPrice=3.u256, maxCollateral=4.u256).get - let availability2 = host.postAvailability(totalSize=4.u256, duration=5.u256, minPrice=6.u256, maxCollateral=7.u256).get + let availability1 = host.postAvailability( + totalSize = 1.u256, + duration = 2.u256, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 4.u256, + ).get + let availability2 = host.postAvailability( + totalSize = 4.u256, + duration = 5.u256, + minPricePerBytePerSecond = 6.u256, + totalCollateral = 7.u256, + ).get check availability1 != availability2 test "node lists storage that is for sale", salesConfig: - let availability = host.postAvailability(totalSize=1.u256, duration=2.u256, minPrice=3.u256, maxCollateral=4.u256).get + let availability = host.postAvailability( + totalSize = 1.u256, + duration = 2.u256, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 4.u256, + ).get check availability in host.getAvailabilities().get test "updating non-existing availability", salesConfig: - let nonExistingResponse = host.patchAvailabilityRaw(AvailabilityId.example, duration=100.u256.some, minPrice=200.u256.some, maxCollateral=200.u256.some) + let nonExistingResponse = host.patchAvailabilityRaw( + AvailabilityId.example, + duration = 100.u256.some, + minPricePerBytePerSecond = 2.u256.some, + totalCollateral = 200.u256.some, + ) check nonExistingResponse.status == "404 Not Found" test "updating availability", salesConfig: - let availability = host.postAvailability(totalSize=140000.u256, duration=200.u256, minPrice=300.u256, maxCollateral=300.u256).get + let availability = host.postAvailability( + totalSize = 140000.u256, + duration = 200.u256, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ).get - host.patchAvailability(availability.id, duration=100.u256.some, minPrice=200.u256.some, maxCollateral=200.u256.some) + host.patchAvailability( + availability.id, + duration = 100.u256.some, + minPricePerBytePerSecond = 2.u256.some, + totalCollateral = 200.u256.some, + ) let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get check updatedAvailability.duration == 100 - check updatedAvailability.minPrice == 200 - check updatedAvailability.maxCollateral == 200 + check updatedAvailability.minPricePerBytePerSecond == 2 + check updatedAvailability.totalCollateral == 200 check updatedAvailability.totalSize == 140000 check updatedAvailability.freeSize == 140000 test "updating availability - freeSize is not allowed to be changed", salesConfig: - let availability = host.postAvailability(totalSize=140000.u256, duration=200.u256, minPrice=300.u256, maxCollateral=300.u256).get - let freeSizeResponse = host.patchAvailabilityRaw(availability.id, freeSize=110000.u256.some) + let availability = host.postAvailability( + totalSize = 140000.u256, + duration = 200.u256, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ).get + let freeSizeResponse = + host.patchAvailabilityRaw(availability.id, freeSize = 110000.u256.some) check freeSizeResponse.status == "400 Bad Request" - check "not allowed" in freeSizeResponse.body + check "not allowed" in freeSizeResponse.body test "updating availability - updating totalSize", salesConfig: - let availability = host.postAvailability(totalSize=140000.u256, duration=200.u256, minPrice=300.u256, maxCollateral=300.u256).get - host.patchAvailability(availability.id, totalSize=100000.u256.some) + let availability = host.postAvailability( + totalSize = 140000.u256, + duration = 200.u256, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ).get + host.patchAvailability(availability.id, totalSize = 100000.u256.some) let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get check updatedAvailability.totalSize == 100000 check updatedAvailability.freeSize == 100000 - test "updating availability - updating totalSize does not allow bellow utilized", salesConfig: + test "updating availability - updating totalSize does not allow bellow utilized", + salesConfig: let originalSize = 0xFFFFFF.u256 - let data = await RandomChunker.example(blocks=8) - let availability = host.postAvailability(totalSize=originalSize, duration=20*60.u256, minPrice=300.u256, maxCollateral=300.u256).get + let data = await RandomChunker.example(blocks = 8) + let minPricePerBytePerSecond = 3.u256 + let collateralPerByte = 1.u256 + let totalCollateral = originalSize * collateralPerByte + let availability = host.postAvailability( + totalSize = originalSize, + duration = 20 * 60.u256, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = totalCollateral, + ).get # Lets create storage request that will utilize some of the availability's space let cid = client.upload(data).get let id = client.requestStorage( cid, - duration=20*60.u256, - reward=400.u256, - proofProbability=3.u256, - expiry=10*60, - collateral=200.u256, + duration = 20 * 60.u256, + pricePerBytePerSecond = minPricePerBytePerSecond, + proofProbability = 3.u256, + expiry = 10 * 60, + collateralPerByte = collateralPerByte, nodes = 3, - tolerance = 1).get + tolerance = 1, + ).get - check eventually(client.purchaseStateIs(id, "started"), timeout=10*60*1000) + check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000) let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get check updatedAvailability.totalSize != updatedAvailability.freeSize let utilizedSize = updatedAvailability.totalSize - updatedAvailability.freeSize - let totalSizeResponse = host.patchAvailabilityRaw(availability.id, totalSize=(utilizedSize-1.u256).some) + let totalSizeResponse = host.patchAvailabilityRaw( + availability.id, totalSize = (utilizedSize - 1.u256).some + ) check totalSizeResponse.status == "400 Bad Request" check "totalSize must be larger then current totalSize" in totalSizeResponse.body - host.patchAvailability(availability.id, totalSize=(originalSize + 20000).some) - let newUpdatedAvailability = (host.getAvailabilities().get).findItem(availability).get + host.patchAvailability(availability.id, totalSize = (originalSize + 20000).some) + let newUpdatedAvailability = + (host.getAvailabilities().get).findItem(availability).get check newUpdatedAvailability.totalSize == originalSize + 20000 check newUpdatedAvailability.freeSize - updatedAvailability.freeSize == 20000 diff --git a/tests/integration/testupdownload.nim b/tests/integration/testupdownload.nim index 73107b52..74bee8c7 100644 --- a/tests/integration/testupdownload.nim +++ b/tests/integration/testupdownload.nim @@ -56,7 +56,8 @@ twonodessuite "Uploads and downloads": let manifest = jsonData["manifest"] check manifest.hasKey("treeCid") == true - check manifest["treeCid"].getStr() == "zDzSvJTezk7bJNQqFq8k1iHXY84psNuUfZVusA5bBQQUSuyzDSVL" + check manifest["treeCid"].getStr() == + "zDzSvJTezk7bJNQqFq8k1iHXY84psNuUfZVusA5bBQQUSuyzDSVL" check manifest.hasKey("datasetSize") == true check manifest["datasetSize"].getInt() == 18 check manifest.hasKey("blockSize") == true @@ -83,12 +84,12 @@ twonodessuite "Uploads and downloads": test "reliable transfer test", twoNodesConfig: proc transferTest(a: CodexClient, b: CodexClient) {.async.} = - let data = await RandomChunker.example(blocks=8) + let data = await RandomChunker.example(blocks = 8) let cid = a.upload(data).get let response = b.download(cid).get check: response == data - for run in 0..10: + for run in 0 .. 10: await transferTest(client1, client2) await transferTest(client2, client1) diff --git a/tests/integration/testvalidator.nim b/tests/integration/testvalidator.nim index f010a23f..8b7fbc5b 100644 --- a/tests/integration/testvalidator.nim +++ b/tests/integration/testvalidator.nim @@ -15,11 +15,12 @@ export logutils logScope: topics = "integration test validation" -template eventuallyS(expression: untyped, timeout=10, step = 5, - cancelExpression: untyped = false): bool = +template eventuallyS( + expression: untyped, timeout = 10, step = 5, cancelExpression: untyped = false +): bool = bind Moment, now, seconds - proc eventuallyS: Future[bool] {.async.} = + proc eventuallyS(): Future[bool] {.async.} = let endTime = Moment.now() + timeout.seconds var secondsElapsed = 0 while not expression: @@ -33,16 +34,17 @@ template eventuallyS(expression: untyped, timeout=10, step = 5, await eventuallyS() marketplacesuite "Validation": - let nodes = 3 - let tolerance = 1 - let proofProbability = 1 + const blocks = 8 + const ecNodes = 3 + const ecTolerance = 1 + const proofProbability = 1 + + const collateralPerByte = 1.u256 + const minPricePerBytePerSecond = 1.u256 proc waitForRequestToFail( - marketplace: Marketplace, - requestId: RequestId, - timeout=10, - step = 5, - ): Future[bool] {.async.} = + marketplace: Marketplace, requestId: RequestId, timeout = 10, step = 5 + ): Future[bool] {.async.} = let endTime = Moment.now() + timeout.seconds var requestState = await marketplace.requestState(requestId) @@ -55,36 +57,35 @@ marketplacesuite "Validation": requestState = await marketplace.requestState(requestId) return true - test "validator marks proofs as missing when using validation groups", NodeConfigs( - # Uncomment to start Hardhat automatically, typically so logs can be inspected locally - hardhat: - HardhatConfig.none, - - clients: - CodexConfigs.init(nodes=1) + test "validator marks proofs as missing when using validation groups", + NodeConfigs( + # Uncomment to start Hardhat automatically, typically so logs can be inspected locally + hardhat: HardhatConfig.none, + clients: CodexConfigs + .init(nodes = 1) # .debug() # uncomment to enable console log output - .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - .withLogTopics("purchases", "onchain") - .some, - - providers: - CodexConfigs.init(nodes=1) - .withSimulateProofFailures(idx=0, failEveryNProofs=1) + .withLogFile() + # uncomment to output log file to tests/integration/logs/ //_.log + .withLogTopics("purchases", "onchain").some, + providers: CodexConfigs + .init(nodes = 1) + .withSimulateProofFailures(idx = 0, failEveryNProofs = 1) # .debug() # uncomment to enable console log output # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log # .withLogTopics("sales", "onchain") .some, - - validators: - CodexConfigs.init(nodes=2) + validators: CodexConfigs + .init(nodes = 2) .withValidationGroups(groups = 2) .withValidationGroupIndex(idx = 0, groupIndex = 0) .withValidationGroupIndex(idx = 1, groupIndex = 1) # .debug() # uncomment to enable console log output - .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - .withLogTopics("validator") # each topic as a separate string argument - .some - ): + .withLogFile() + # uncomment to output log file to tests/integration/logs/ //_.log + .withLogTopics("validator") + # each topic as a separate string argument + .some, + ): let client0 = clients()[0].client let expiry = 5.periods let duration = expiry + 10.periods @@ -95,27 +96,31 @@ marketplacesuite "Validation": var currentTime = await ethProvider.currentTime() let requestEndTime = currentTime.truncate(uint64) + duration - let data = await RandomChunker.example(blocks=8) - - # TODO: better value for data.len below. This TODO is also present in - # testproofs.nim - we may want to address it or remove the comment. - createAvailabilities(data.len * 2, duration) + let data = await RandomChunker.example(blocks = blocks) + let datasetSize = + datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) + createAvailabilities( + datasetSize, duration, collateralPerByte, minPricePerBytePerSecond + ) let cid = client0.upload(data).get let purchaseId = await client0.requestStorage( cid, - expiry=expiry, - duration=duration, - nodes=nodes, - tolerance=tolerance, - proofProbability=proofProbability + expiry = expiry, + duration = duration, + nodes = ecNodes, + tolerance = ecTolerance, + proofProbability = proofProbability, ) let requestId = client0.requestId(purchaseId).get debug "validation suite", purchaseId = purchaseId.toHex, requestId = requestId - if not eventuallyS(client0.purchaseStateIs(purchaseId, "started"), - timeout = (expiry + 60).int, step = 5): + if not eventuallyS( + client0.purchaseStateIs(purchaseId, "started"), + timeout = (expiry + 60).int, + step = 5, + ): debug "validation suite: timed out waiting for the purchase to start" fail() return @@ -127,31 +132,27 @@ marketplacesuite "Validation": debug "validation suite", secondsTillRequestEnd = secondsTillRequestEnd.seconds check await marketplace.waitForRequestToFail( - requestId, - timeout = secondsTillRequestEnd + 60, - step = 5 + requestId, timeout = secondsTillRequestEnd + 60, step = 5 ) - test "validator uses historical state to mark missing proofs", NodeConfigs( - # Uncomment to start Hardhat automatically, typically so logs can be inspected locally - hardhat: - HardhatConfig.none, - - clients: - CodexConfigs.init(nodes=1) + test "validator uses historical state to mark missing proofs", + NodeConfigs( + # Uncomment to start Hardhat automatically, typically so logs can be inspected locally + hardhat: HardhatConfig.none, + clients: CodexConfigs + .init(nodes = 1) # .debug() # uncomment to enable console log output - .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - .withLogTopics("purchases", "onchain") - .some, - - providers: - CodexConfigs.init(nodes=1) - .withSimulateProofFailures(idx=0, failEveryNProofs=1) + .withLogFile() + # uncomment to output log file to tests/integration/logs/ //_.log + .withLogTopics("purchases", "onchain").some, + providers: CodexConfigs + .init(nodes = 1) + .withSimulateProofFailures(idx = 0, failEveryNProofs = 1) # .debug() # uncomment to enable console log output # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log # .withLogTopics("sales", "onchain") - .some - ): + .some, + ): let client0 = clients()[0].client let expiry = 5.periods let duration = expiry + 10.periods @@ -162,51 +163,54 @@ marketplacesuite "Validation": var currentTime = await ethProvider.currentTime() let requestEndTime = currentTime.truncate(uint64) + duration - let data = await RandomChunker.example(blocks=8) - - # TODO: better value for data.len below. This TODO is also present in - # testproofs.nim - we may want to address it or remove the comment. - createAvailabilities(data.len * 2, duration) + let data = await RandomChunker.example(blocks = blocks) + let datasetSize = + datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) + createAvailabilities( + datasetSize, duration, collateralPerByte, minPricePerBytePerSecond + ) let cid = client0.upload(data).get let purchaseId = await client0.requestStorage( cid, - expiry=expiry, - duration=duration, - nodes=nodes, - tolerance=tolerance, - proofProbability=proofProbability + expiry = expiry, + duration = duration, + nodes = ecNodes, + tolerance = ecTolerance, + proofProbability = proofProbability, ) let requestId = client0.requestId(purchaseId).get debug "validation suite", purchaseId = purchaseId.toHex, requestId = requestId - if not eventuallyS(client0.purchaseStateIs(purchaseId, "started"), - timeout = (expiry + 60).int, step = 5): + if not eventuallyS( + client0.purchaseStateIs(purchaseId, "started"), + timeout = (expiry + 60).int, + step = 5, + ): debug "validation suite: timed out waiting for the purchase to start" fail() return - + # extra block just to make sure we have one that separates us # from the block containing the last (past) SlotFilled event discard await ethProvider.send("evm_mine") - var validators = CodexConfigs.init(nodes=2) + var validators = CodexConfigs + .init(nodes = 2) .withValidationGroups(groups = 2) .withValidationGroupIndex(idx = 0, groupIndex = 0) .withValidationGroupIndex(idx = 1, groupIndex = 1) # .debug() # uncomment to enable console log output - .withLogFile() # uncomment to output log file to: # tests/integration/logs/ //_.log + .withLogFile() + # uncomment to output log file to: # tests/integration/logs/ //_.log .withLogTopics("validator") # each topic as a separate string argument - + failAndTeardownOnError "failed to start validator nodes": for config in validators.configs.mitems: let node = await startValidatorNode(config) - running.add RunningNode( - role: Role.Validator, - node: node - ) - + running.add RunningNode(role: Role.Validator, node: node) + discard await ethProvider.send("evm_mine") currentTime = await ethProvider.currentTime() let secondsTillRequestEnd = (requestEndTime - currentTime.truncate(uint64)).int @@ -214,7 +218,5 @@ marketplacesuite "Validation": debug "validation suite", secondsTillRequestEnd = secondsTillRequestEnd.seconds check await marketplace.waitForRequestToFail( - requestId, - timeout = secondsTillRequestEnd + 60, - step = 5 + requestId, timeout = secondsTillRequestEnd + 60, step = 5 ) diff --git a/tests/integration/twonodes.nim b/tests/integration/twonodes.nim index ac2d149a..5666690e 100644 --- a/tests/integration/twonodes.nim +++ b/tests/integration/twonodes.nim @@ -10,9 +10,10 @@ import ./nodeconfigs export codexclient export multinodes -template twonodessuite*(name: string, body: untyped) = +template twonodessuite*(name: string, body: untyped) = multinodesuite name: - let twoNodesConfig {.inject, used.} = NodeConfigs(clients: CodexConfigs.init(nodes=2).some) + let twoNodesConfig {.inject, used.} = + NodeConfigs(clients: CodexConfigs.init(nodes = 2).some) var node1 {.inject, used.}: CodexProcess var node2 {.inject, used.}: CodexProcess diff --git a/tests/logging.nim b/tests/logging.nim index ece9c9b0..d165fe48 100644 --- a/tests/logging.nim +++ b/tests/logging.nim @@ -6,5 +6,5 @@ when not defined(nimscript): defaultChroniclesStream.output.writer = ignoreLogging - {.warning[UnusedImport]:off.} + {.warning[UnusedImport]: off.} {.used.} diff --git a/tests/testContracts.nim b/tests/testContracts.nim index aff2c1d7..d5ed7d6a 100644 --- a/tests/testContracts.nim +++ b/tests/testContracts.nim @@ -4,4 +4,4 @@ import ./contracts/testDeployment import ./contracts/testClock import ./contracts/testProvider -{.warning[UnusedImport]:off.} +{.warning[UnusedImport]: off.} diff --git a/tests/testIntegration.nim b/tests/testIntegration.nim index f0a59ee4..9a2dc472 100644 --- a/tests/testIntegration.nim +++ b/tests/testIntegration.nim @@ -9,4 +9,4 @@ import ./integration/testproofs import ./integration/testvalidator import ./integration/testecbug -{.warning[UnusedImport]:off.} +{.warning[UnusedImport]: off.} diff --git a/tests/testTaiko.nim b/tests/testTaiko.nim index c0a48396..8036e8a3 100644 --- a/tests/testTaiko.nim +++ b/tests/testTaiko.nim @@ -10,35 +10,33 @@ import pkg/questionable/results import ./asynctest import ./integration/nodes - suite "Taiko L2 Integration Tests": - var node1, node2: NodeProcess setup: doAssert existsEnv("CODEX_ETH_PRIVATE_KEY"), "Key for Taiko account missing" - node1 = startNode([ - "--data-dir=" & createTempDir("", ""), - "--api-port=8080", - "--nat=none", - "--disc-port=8090", - "--persistence", - "--eth-provider=https://rpc.test.taiko.xyz" - ]) + node1 = startNode( + [ + "--data-dir=" & createTempDir("", ""), "--api-port=8080", "--nat=none", + "--disc-port=8090", "--persistence", "--eth-provider=https://rpc.test.taiko.xyz", + ] + ) node1.waitUntilStarted() let bootstrap = (!node1.client.info())["spr"].getStr() - node2 = startNode([ - "--data-dir=" & createTempDir("", ""), - "--api-port=8081", - "--nat=none", - "--disc-port=8091", - "--bootstrap-node=" & bootstrap, - "--persistence", - "--eth-provider=https://rpc.test.taiko.xyz" - ]) + node2 = startNode( + [ + "--data-dir=" & createTempDir("", ""), + "--api-port=8081", + "--nat=none", + "--disc-port=8091", + "--bootstrap-node=" & bootstrap, + "--persistence", + "--eth-provider=https://rpc.test.taiko.xyz", + ] + ) node2.waitUntilStarted() teardown: @@ -48,27 +46,37 @@ suite "Taiko L2 Integration Tests": node2.removeDataDir() test "node 1 buys storage from node 2": + let size = 0xFFFFF.u256 + let minPricePerBytePerSecond = 1.u256 + let totalCollateral = size * minPricePerBytePerSecond discard node2.client.postAvailability( - size=0xFFFFF.u256, - duration=200.u256, - minPrice=300.u256, - maxCollateral=300.u256 + size = size, + duration = 200.u256, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = totalCollateral, ) let cid = !node1.client.upload("some file contents") echo " - requesting storage, expires in 5 minutes" let expiry = getTime().toUnix().uint64 + 5 * 60 - let purchase = !node1.client.requestStorage( - cid, - duration=30.u256, - reward=400.u256, - proofProbability=3.u256, - collateral=200.u256, - expiry=expiry.u256 - ) + let purchase = + !node1.client.requestStorage( + cid, + duration = 30.u256, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + collateralPerByte = 1.u256, + expiry = expiry.u256, + ) echo " - waiting for request to start, timeout 5 minutes" - check eventually(node1.client.getPurchase(purchase).?state == success "started", timeout = 5 * 60 * 1000) + check eventually( + node1.client.getPurchase(purchase) .? state == success "started", + timeout = 5 * 60 * 1000, + ) echo " - waiting for request to finish, timeout 1 minute" - check eventually(node1.client.getPurchase(purchase).?state == success "finished", timeout = 1 * 60 * 1000) + check eventually( + node1.client.getPurchase(purchase) .? state == success "finished", + timeout = 1 * 60 * 1000, + ) diff --git a/tests/testTools.nim b/tests/testTools.nim index b4675958..f3ead1d1 100644 --- a/tests/testTools.nim +++ b/tests/testTools.nim @@ -1,3 +1,3 @@ import ./tools/cirdl/testcirdl -{.warning[UnusedImport]:off.} +{.warning[UnusedImport]: off.} diff --git a/tests/tools/cirdl/testcirdl.nim b/tests/tools/cirdl/testcirdl.nim index a4fd0fc0..dc02be4d 100644 --- a/tests/tools/cirdl/testcirdl.nim +++ b/tests/tools/cirdl/testcirdl.nim @@ -21,20 +21,15 @@ suite "tools/cirdl": let args = [circuitPath, rpcEndpoint, $marketplaceAddress] - let process = osproc.startProcess( - cirdl, - workdir, - args, - options={poParentStreams} - ) + let process = osproc.startProcess(cirdl, workdir, args, options = {poParentStreams}) let returnCode = process.waitForExit() check returnCode == 0 check: - fileExists(circuitPath/"proof_main_verification_key.json") - fileExists(circuitPath/"proof_main.r1cs") - fileExists(circuitPath/"proof_main.wasm") - fileExists(circuitPath/"proof_main.zkey") + fileExists(circuitPath / "proof_main_verification_key.json") + fileExists(circuitPath / "proof_main.r1cs") + fileExists(circuitPath / "proof_main.wasm") + fileExists(circuitPath / "proof_main.zkey") removeDir(circuitPath) diff --git a/tools/scripts/git_pre_commit_format.sh b/tools/scripts/git_pre_commit_format.sh new file mode 100755 index 00000000..ffe2eca1 --- /dev/null +++ b/tools/scripts/git_pre_commit_format.sh @@ -0,0 +1,16 @@ +#!/bin/sh + +echo "Formatting changed files with pre-commit hook" + +# Regexp for grep to only choose some file extensions for formatting +exts="\.\(nim\|nims\)$" + +# Build nph lazily +make build-nph || (1>&2 echo "failed to build nph. Pre-commit formatting will not be done."; exit 0) + +# Format staged files +git diff --cached --name-only --diff-filter=ACMR | grep "$exts" | while read file; do + echo "Formatting $file" + make nph/"$file" + git add "$file" +done diff --git a/vendor/codex-contracts-eth b/vendor/codex-contracts-eth index dfab6102..e74d3397 160000 --- a/vendor/codex-contracts-eth +++ b/vendor/codex-contracts-eth @@ -1 +1 @@ -Subproject commit dfab6102e71d2acaff86af45b87be2536530c624 +Subproject commit e74d3397a133eaf1eb95d9ce59f56747a7c8c30b diff --git a/vendor/nph b/vendor/nph new file mode 160000 index 00000000..f1f04776 --- /dev/null +++ b/vendor/nph @@ -0,0 +1 @@ +Subproject commit f1f047760c6cb38d5c55d0ddb29b57a9c008a976