From 7065718e0912004492c75cbed1036c7c8ec8939e Mon Sep 17 00:00:00 2001 From: Arnaud Date: Thu, 27 Feb 2025 17:58:23 +0100 Subject: [PATCH 01/14] feat(marketplace): indicate that slot is being repaired when trying to download (#1083) * Indicate that slot is being repaired when trying to download * Fix tests * Apply nph * Calculate the repair collateral when adding the item into the queue * Add slotCollateral calculation with getRequest cache and remove populationItem function * Update with pricePerByte * Simplify StorageAsk parameter * Minor fixes * Move cache request to another PR * Rename SlotQueueItem collateral and required in init * Use override func to optimise calls when the slot state is known * Remove unused code * Cosmetic change * Use raiseMarketError helper * Add exceptions to async pragma * Cosmetic change * Use raiseMarketError helper * Let slotCollateral determines the slot sate * Use configSync to avoid async pragma in onStorageRequested * Add loadConfig function * Add CatchableError to async pragma * Add missing pragma raises errors * Move loadConfig * Avoid swallow CancelledError * Avoid swallowing CancelledError * Avoid swallowing CancelledError * Update error messages * Except MarketError instead of CatchableError * Fix merge issue * Log fatal when configuration cannot be loaded * Propagate MarketError in slotCollateral * Remove useless configSync * Use result with explicit error * Fix syntax --------- Signed-off-by: Arnaud --- codex/codex.nim | 4 + codex/contracts/market.nim | 126 +++++++++--- codex/market.nim | 41 +++- codex/node.nim | 17 +- codex/sales.nim | 92 ++++++--- codex/sales/salescontext.nim | 2 +- codex/sales/slotqueue.nim | 55 ++--- codex/sales/states/downloading.nim | 5 +- codex/sales/states/filling.nim | 17 +- tests/codex/helpers/mockmarket.nim | 65 +++++- tests/codex/helpers/mockslotqueueitem.nim | 4 +- tests/codex/node/testcontracts.nim | 2 +- tests/codex/sales/testsales.nim | 41 ++-- tests/codex/sales/testslotqueue.nim | 235 ++++++++++++++-------- tests/contracts/testMarket.nim | 31 +++ tests/examples.nim | 4 +- 16 files changed, 521 insertions(+), 220 deletions(-) diff --git a/codex/codex.nim b/codex/codex.nim index b8905205..8a03510c 100644 --- a/codex/codex.nim +++ b/codex/codex.nim @@ -134,6 +134,10 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} = if config.simulateProofFailures > 0: warn "Proof failure simulation is not enabled for this build! Configuration ignored" + if error =? (await market.loadConfig()).errorOption: + fatal "Cannot load market configuration", error = error.msg + quit QuitFailure + let purchasing = Purchasing.new(market, clock) let sales = Sales.new(market, clock, repo, proofFailures) client = some ClientInteractions.new(clock, purchasing) diff --git a/codex/contracts/market.nim b/codex/contracts/market.nim index 3c016a59..9079ac8a 100644 --- a/codex/contracts/market.nim +++ b/codex/contracts/market.nim @@ -55,11 +55,17 @@ template convertEthersError(body) = except EthersError as error: raiseMarketError(error.msgDetail) -proc config(market: OnChainMarket): Future[MarketplaceConfig] {.async.} = +proc config( + market: OnChainMarket +): Future[MarketplaceConfig] {.async: (raises: [CancelledError, MarketError]).} = without resolvedConfig =? market.configuration: - let fetchedConfig = await market.contract.configuration() - market.configuration = some fetchedConfig - return fetchedConfig + if err =? (await market.loadConfig()).errorOption: + raiseMarketError(err.msg) + + without config =? market.configuration: + raiseMarketError("Failed to access to config from the Marketplace contract") + + return config return resolvedConfig @@ -70,7 +76,26 @@ proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} = let token = Erc20Token.new(tokenAddress, market.signer) discard await token.increaseAllowance(market.contract.address(), amount).confirm(1) -method getZkeyHash*(market: OnChainMarket): Future[?string] {.async.} = +method loadConfig*( + market: OnChainMarket +): Future[?!void] {.async: (raises: [CancelledError]).} = + try: + without config =? market.configuration: + let fetchedConfig = await market.contract.configuration() + + market.configuration = some fetchedConfig + + return success() + except AsyncLockError, EthersError: + let err = getCurrentException() + return failure newException( + MarketError, + "Failed to fetch the config from the Marketplace contract: " & err.msg, + ) + +method getZkeyHash*( + market: OnChainMarket +): Future[?string] {.async: (raises: [CancelledError, MarketError]).} = let config = await market.config() return some config.proofs.zkeyHash @@ -78,18 +103,24 @@ method getSigner*(market: OnChainMarket): Future[Address] {.async.} = convertEthersError: return await market.signer.getAddress() -method periodicity*(market: OnChainMarket): Future[Periodicity] {.async.} = +method periodicity*( + market: OnChainMarket +): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} = convertEthersError: let config = await market.config() let period = config.proofs.period return Periodicity(seconds: period) -method proofTimeout*(market: OnChainMarket): Future[uint64] {.async.} = +method proofTimeout*( + market: OnChainMarket +): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} = convertEthersError: let config = await market.config() return config.proofs.timeout -method repairRewardPercentage*(market: OnChainMarket): Future[uint8] {.async.} = +method repairRewardPercentage*( + market: OnChainMarket +): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} = convertEthersError: let config = await market.config() return config.collateral.repairRewardPercentage @@ -99,7 +130,9 @@ method requestDurationLimit*(market: OnChainMarket): Future[uint64] {.async.} = let config = await market.config() return config.requestDurationLimit -method proofDowntime*(market: OnChainMarket): Future[uint8] {.async.} = +method proofDowntime*( + market: OnChainMarket +): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} = convertEthersError: let config = await market.config() return config.proofs.downtime @@ -128,19 +161,22 @@ method requestStorage(market: OnChainMarket, request: StorageRequest) {.async.} method getRequest*( market: OnChainMarket, id: RequestId -): Future[?StorageRequest] {.async.} = - let key = $id +): Future[?StorageRequest] {.async: (raises: [CancelledError]).} = + try: + let key = $id - if market.requestCache.contains(key): - return some market.requestCache[key] + if key in market.requestCache: + return some market.requestCache[key] - convertEthersError: - try: - let request = await market.contract.getRequest(id) - market.requestCache[key] = request - return some request - except Marketplace_UnknownRequest: - return none StorageRequest + let request = await market.contract.getRequest(id) + market.requestCache[key] = request + return some request + except Marketplace_UnknownRequest, KeyError: + warn "Cannot retrieve the request", error = getCurrentExceptionMsg() + return none StorageRequest + except EthersError, AsyncLockError: + error "Cannot retrieve the request", error = getCurrentExceptionMsg() + return none StorageRequest method requestState*( market: OnChainMarket, requestId: RequestId @@ -152,10 +188,17 @@ method requestState*( except Marketplace_UnknownRequest: return none RequestState -method slotState*(market: OnChainMarket, slotId: SlotId): Future[SlotState] {.async.} = +method slotState*( + market: OnChainMarket, slotId: SlotId +): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} = convertEthersError: - let overrides = CallOverrides(blockTag: some BlockTag.pending) - return await market.contract.slotState(slotId, overrides) + try: + let overrides = CallOverrides(blockTag: some BlockTag.pending) + return await market.contract.slotState(slotId, overrides) + except AsyncLockError as err: + raiseMarketError( + "Failed to fetch the slot state from the Marketplace contract: " & err.msg + ) method getRequestEnd*( market: OnChainMarket, id: RequestId @@ -507,3 +550,40 @@ method queryPastStorageRequestedEvents*( let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo) return await market.queryPastStorageRequestedEvents(fromBlock) + +method slotCollateral*( + market: OnChainMarket, requestId: RequestId, slotIndex: uint64 +): Future[?!UInt256] {.async: (raises: [CancelledError]).} = + let slotid = slotId(requestId, slotIndex) + + try: + let slotState = await market.slotState(slotid) + + without request =? await market.getRequest(requestId): + return failure newException( + MarketError, "Failure calculating the slotCollateral, cannot get the request" + ) + + return market.slotCollateral(request.ask.collateralPerSlot, slotState) + except MarketError as error: + error "Error when trying to calculate the slotCollateral", error = error.msg + return failure error + +method slotCollateral*( + market: OnChainMarket, collateralPerSlot: UInt256, slotState: SlotState +): ?!UInt256 {.raises: [].} = + if slotState == SlotState.Repair: + without repairRewardPercentage =? + market.configuration .? collateral .? repairRewardPercentage: + return failure newException( + MarketError, + "Failure calculating the slotCollateral, cannot get the reward percentage", + ) + + return success ( + collateralPerSlot - (collateralPerSlot * repairRewardPercentage.u256).div( + 100.u256 + ) + ) + + return success(collateralPerSlot) diff --git a/codex/market.nim b/codex/market.nim index 5417c8e1..c5177aeb 100644 --- a/codex/market.nim +++ b/codex/market.nim @@ -62,25 +62,40 @@ type ProofSubmitted* = object of MarketplaceEvent id*: SlotId -method getZkeyHash*(market: Market): Future[?string] {.base, async.} = +method loadConfig*( + market: Market +): Future[?!void] {.base, async: (raises: [CancelledError]).} = + raiseAssert("not implemented") + +method getZkeyHash*( + market: Market +): Future[?string] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") method getSigner*(market: Market): Future[Address] {.base, async.} = raiseAssert("not implemented") -method periodicity*(market: Market): Future[Periodicity] {.base, async.} = +method periodicity*( + market: Market +): Future[Periodicity] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") -method proofTimeout*(market: Market): Future[uint64] {.base, async.} = +method proofTimeout*( + market: Market +): Future[uint64] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") -method repairRewardPercentage*(market: Market): Future[uint8] {.base, async.} = +method repairRewardPercentage*( + market: Market +): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") method requestDurationLimit*(market: Market): Future[uint64] {.base, async.} = raiseAssert("not implemented") -method proofDowntime*(market: Market): Future[uint8] {.base, async.} = +method proofDowntime*( + market: Market +): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") method getPointer*(market: Market, slotId: SlotId): Future[uint8] {.base, async.} = @@ -102,7 +117,7 @@ method mySlots*(market: Market): Future[seq[SlotId]] {.base, async.} = method getRequest*( market: Market, id: RequestId -): Future[?StorageRequest] {.base, async.} = +): Future[?StorageRequest] {.base, async: (raises: [CancelledError]).} = raiseAssert("not implemented") method requestState*( @@ -110,7 +125,9 @@ method requestState*( ): Future[?RequestState] {.base, async.} = raiseAssert("not implemented") -method slotState*(market: Market, slotId: SlotId): Future[SlotState] {.base, async.} = +method slotState*( + market: Market, slotId: SlotId +): Future[SlotState] {.base, async: (raises: [CancelledError, MarketError]).} = raiseAssert("not implemented") method getRequestEnd*( @@ -270,3 +287,13 @@ method queryPastStorageRequestedEvents*( market: Market, blocksAgo: int ): Future[seq[StorageRequested]] {.base, async.} = raiseAssert("not implemented") + +method slotCollateral*( + market: Market, requestId: RequestId, slotIndex: uint64 +): Future[?!UInt256] {.base, async: (raises: [CancelledError]).} = + raiseAssert("not implemented") + +method slotCollateral*( + market: Market, collateralPerSlot: UInt256, slotState: SlotState +): ?!UInt256 {.base, gcsafe, raises: [].} = + raiseAssert("not implemented") diff --git a/codex/node.nim b/codex/node.nim index b0f66c90..b248e6df 100644 --- a/codex/node.nim +++ b/codex/node.nim @@ -591,7 +591,11 @@ proc requestStorage*( success purchase.id proc onStore( - self: CodexNodeRef, request: StorageRequest, slotIdx: uint64, blocksCb: BlocksCb + self: CodexNodeRef, + request: StorageRequest, + slotIdx: uint64, + blocksCb: BlocksCb, + isRepairing: bool = false, ): Future[?!void] {.async.} = ## store data in local storage ## @@ -604,6 +608,10 @@ proc onStore( trace "Received a request to store a slot" + # TODO: Use the isRepairing to manage the slot download. + # If isRepairing is true, the slot has to be repaired before + # being downloaded. + without manifest =? (await self.fetchManifest(cid)), err: trace "Unable to fetch manifest for cid", cid, err = err.msg return failure(err) @@ -745,9 +753,12 @@ proc start*(self: CodexNodeRef) {.async.} = if hostContracts =? self.contracts.host: hostContracts.sales.onStore = proc( - request: StorageRequest, slot: uint64, onBatch: BatchProc + request: StorageRequest, + slot: uint64, + onBatch: BatchProc, + isRepairing: bool = false, ): Future[?!void] = - self.onStore(request, slot, onBatch) + self.onStore(request, slot, onBatch, isRepairing) hostContracts.sales.onExpiryUpdate = proc( rootCid: Cid, expiry: SecondsSince1970 diff --git a/codex/sales.nim b/codex/sales.nim index 91d882b8..af594a9a 100644 --- a/codex/sales.nim +++ b/codex/sales.nim @@ -157,13 +157,28 @@ proc cleanUp( # Re-add items back into the queue to prevent small availabilities from # draining the queue. Seen items will be ordered last. if reprocessSlot and request =? data.request: - let queue = sales.context.slotQueue - var seenItem = SlotQueueItem.init( - data.requestId, data.slotIndex.uint16, data.ask, request.expiry, seen = true - ) - trace "pushing ignored item to queue, marked as seen" - if err =? queue.push(seenItem).errorOption: - error "failed to readd slot to queue", errorType = $(type err), error = err.msg + try: + without collateral =? + await sales.context.market.slotCollateral(data.requestId, data.slotIndex), err: + error "Failed to re-add item back to the slot queue: unable to calculate collateral", + error = err.msg + return + + let queue = sales.context.slotQueue + var seenItem = SlotQueueItem.init( + data.requestId, + data.slotIndex.uint16, + data.ask, + request.expiry, + seen = true, + collateral = collateral, + ) + trace "pushing ignored item to queue, marked as seen" + if err =? queue.push(seenItem).errorOption: + error "failed to readd slot to queue", errorType = $(type err), error = err.msg + except MarketError as e: + error "Failed to re-add item back to the slot queue.", error = e.msg + return await sales.remove(agent) @@ -283,7 +298,7 @@ proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} = proc onStorageRequested( sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: uint64 -) = +) {.raises: [].} = logScope: topics = "marketplace sales onStorageRequested" requestId @@ -294,7 +309,14 @@ proc onStorageRequested( trace "storage requested, adding slots to queue" - without items =? SlotQueueItem.init(requestId, ask, expiry).catch, err: + let market = sales.context.market + + without collateral =? market.slotCollateral(ask.collateralPerSlot, SlotState.Free), + err: + error "Request failure, unable to calculate collateral", error = err.msg + return + + without items =? SlotQueueItem.init(requestId, ask, expiry, collateral).catch, err: if err of SlotsOutOfRangeError: warn "Too many slots, cannot add to queue" else: @@ -319,35 +341,45 @@ proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: uint64) = trace "slot freed, adding to queue" - proc addSlotToQueue() {.async: (raises: []).} = + proc addSlotToQueue() {.async: (raises: [CancelledError]).} = let context = sales.context let market = context.market let queue = context.slotQueue + without request =? (await market.getRequest(requestId)), err: + error "unknown request in contract", error = err.msgDetail + return + + # Take the repairing state into consideration to calculate the collateral. + # This is particularly needed because it will affect the priority in the queue + # and we want to give the user the ability to tweak the parameters. + # Adding the repairing state directly in the queue priority calculation + # would not allow this flexibility. + without collateral =? + market.slotCollateral(request.ask.collateralPerSlot, SlotState.Repair), err: + error "Failed to add freed slot to queue: unable to calculate collateral", + error = err.msg + return + if slotIndex > uint16.high.uint64: error "Cannot cast slot index to uint16, value = ", slotIndex return - # first attempt to populate request using existing metadata in queue - without var found =? queue.populateItem(requestId, slotIndex.uint16): - trace "no existing request metadata, getting request info from contract" - # if there's no existing slot for that request, retrieve the request - # from the contract. - try: - without request =? await market.getRequest(requestId): - error "unknown request in contract" - return + without slotQueueItem =? + SlotQueueItem.init(request, slotIndex.uint16, collateral = collateral).catch, err: + warn "Too many slots, cannot add to queue", error = err.msgDetail + return - found = SlotQueueItem.init(request, slotIndex.uint16) - except CancelledError: - discard # do not propagate as addSlotToQueue was asyncSpawned - except CatchableError as e: - error "failed to get request from contract and add slots to queue", - error = e.msgDetail - - if err =? queue.push(found).errorOption: - error "failed to push slot items to queue", error = err.msgDetail + if err =? queue.push(slotQueueItem).errorOption: + if err of SlotQueueItemExistsError: + error "Failed to push item to queue becaue it already exists", + error = err.msgDetail + elif err of QueueNotRunningError: + warn "Failed to push item to queue becaue queue is not running", + error = err.msgDetail + # We could get rid of this by adding the storage ask in the SlotFreed event, + # so we would not need to call getRequest to get the collateralPerSlot. let fut = addSlotToQueue() sales.trackedFutures.track(fut) asyncSpawn fut @@ -356,7 +388,9 @@ proc subscribeRequested(sales: Sales) {.async.} = let context = sales.context let market = context.market - proc onStorageRequested(requestId: RequestId, ask: StorageAsk, expiry: uint64) = + proc onStorageRequested( + requestId: RequestId, ask: StorageAsk, expiry: uint64 + ) {.raises: [].} = sales.onStorageRequested(requestId, ask, expiry) try: diff --git a/codex/sales/salescontext.nim b/codex/sales/salescontext.nim index 6e6a3568..af940a4b 100644 --- a/codex/sales/salescontext.nim +++ b/codex/sales/salescontext.nim @@ -26,7 +26,7 @@ type BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, raises: [].} OnStore* = proc( - request: StorageRequest, slot: uint64, blocksCb: BlocksCb + request: StorageRequest, slot: uint64, blocksCb: BlocksCb, isRepairing: bool ): Future[?!void] {.gcsafe, upraises: [].} OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {. gcsafe, upraises: [] diff --git a/codex/sales/slotqueue.nim b/codex/sales/slotqueue.nim index a032d46b..fa57a983 100644 --- a/codex/sales/slotqueue.nim +++ b/codex/sales/slotqueue.nim @@ -34,7 +34,7 @@ type slotSize: uint64 duration: uint64 pricePerBytePerSecond: UInt256 - collateralPerByte: UInt256 + collateral: UInt256 # Collateral computed expiry: uint64 seen: bool @@ -76,9 +76,6 @@ proc profitability(item: SlotQueueItem): UInt256 = slotSize: item.slotSize, ).pricePerSlot -proc collateralPerSlot(item: SlotQueueItem): UInt256 = - StorageAsk(collateralPerByte: item.collateralPerByte, slotSize: item.slotSize).collateralPerSlot - proc `<`*(a, b: SlotQueueItem): bool = # for A to have a higher priority than B (in a min queue), A must be less than # B. @@ -95,8 +92,8 @@ proc `<`*(a, b: SlotQueueItem): bool = scoreA.addIf(a.profitability > b.profitability, 3) scoreB.addIf(a.profitability < b.profitability, 3) - scoreA.addIf(a.collateralPerSlot < b.collateralPerSlot, 2) - scoreB.addIf(a.collateralPerSlot > b.collateralPerSlot, 2) + scoreA.addIf(a.collateral < b.collateral, 2) + scoreB.addIf(a.collateral > b.collateral, 2) scoreA.addIf(a.expiry > b.expiry, 1) scoreB.addIf(a.expiry < b.expiry, 1) @@ -137,6 +134,7 @@ proc init*( slotIndex: uint16, ask: StorageAsk, expiry: uint64, + collateral: UInt256, seen = false, ): SlotQueueItem = SlotQueueItem( @@ -145,25 +143,32 @@ proc init*( slotSize: ask.slotSize, duration: ask.duration, pricePerBytePerSecond: ask.pricePerBytePerSecond, - collateralPerByte: ask.collateralPerByte, + collateral: collateral, expiry: expiry, seen: seen, ) proc init*( - _: type SlotQueueItem, request: StorageRequest, slotIndex: uint16 + _: type SlotQueueItem, + request: StorageRequest, + slotIndex: uint16, + collateral: UInt256, ): SlotQueueItem = - SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry) + SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry, collateral) proc init*( - _: type SlotQueueItem, requestId: RequestId, ask: StorageAsk, expiry: uint64 -): seq[SlotQueueItem] = + _: type SlotQueueItem, + requestId: RequestId, + ask: StorageAsk, + expiry: uint64, + collateral: UInt256, +): seq[SlotQueueItem] {.raises: [SlotsOutOfRangeError].} = if not ask.slots.inRange: raise newException(SlotsOutOfRangeError, "Too many slots") var i = 0'u16 proc initSlotQueueItem(): SlotQueueItem = - let item = SlotQueueItem.init(requestId, i, ask, expiry) + let item = SlotQueueItem.init(requestId, i, ask, expiry, collateral) inc i return item @@ -171,8 +176,10 @@ proc init*( Rng.instance.shuffle(items) return items -proc init*(_: type SlotQueueItem, request: StorageRequest): seq[SlotQueueItem] = - return SlotQueueItem.init(request.id, request.ask, request.expiry) +proc init*( + _: type SlotQueueItem, request: StorageRequest, collateral: UInt256 +): seq[SlotQueueItem] = + return SlotQueueItem.init(request.id, request.ask, request.expiry, collateral) proc inRange*(val: SomeUnsignedInt): bool = val.uint16 in SlotQueueSize.low .. SlotQueueSize.high @@ -234,25 +241,7 @@ proc unpause*(self: SlotQueue) = # set unpaused flag to true -- unblocks coroutines waiting on unpaused.wait() self.unpaused.fire() -proc populateItem*( - self: SlotQueue, requestId: RequestId, slotIndex: uint16 -): ?SlotQueueItem = - trace "populate item, items in queue", len = self.queue.len - for item in self.queue.items: - trace "populate item search", itemRequestId = item.requestId, requestId - if item.requestId == requestId: - return some SlotQueueItem( - requestId: requestId, - slotIndex: slotIndex, - slotSize: item.slotSize, - duration: item.duration, - pricePerBytePerSecond: item.pricePerBytePerSecond, - collateralPerByte: item.collateralPerByte, - expiry: item.expiry, - ) - return none SlotQueueItem - -proc push*(self: SlotQueue, item: SlotQueueItem): ?!void = +proc push*(self: SlotQueue, item: SlotQueueItem): ?!void {.raises: [].} = logScope: requestId = item.requestId slotIndex = item.slotIndex diff --git a/codex/sales/states/downloading.nim b/codex/sales/states/downloading.nim index 39137545..7cf304d3 100644 --- a/codex/sales/states/downloading.nim +++ b/codex/sales/states/downloading.nim @@ -67,8 +67,11 @@ method run*( return await reservations.release(reservation.id, reservation.availabilityId, bytes) try: + let slotId = slotId(request.id, data.slotIndex) + let isRepairing = (await context.market.slotState(slotId)) == SlotState.Repair + trace "Starting download" - if err =? (await onStore(request, data.slotIndex, onBlocks)).errorOption: + if err =? (await onStore(request, data.slotIndex, onBlocks, isRepairing)).errorOption: return some State(SaleErrored(error: err, reprocessSlot: false)) trace "Download complete" diff --git a/codex/sales/states/filling.nim b/codex/sales/states/filling.nim index 0c20a64e..03e2ef2b 100644 --- a/codex/sales/states/filling.nim +++ b/codex/sales/states/filling.nim @@ -38,18 +38,11 @@ method run*( slotIndex = data.slotIndex try: - let slotState = await market.slotState(slotId(data.requestId, data.slotIndex)) - let requestedCollateral = request.ask.collateralPerSlot - var collateral: UInt256 - - if slotState == SlotState.Repair: - # When repairing the node gets "discount" on the collateral that it needs to - let repairRewardPercentage = (await market.repairRewardPercentage).u256 - collateral = - requestedCollateral - - ((requestedCollateral * repairRewardPercentage)).div(100.u256) - else: - collateral = requestedCollateral + without collateral =? await market.slotCollateral(data.requestId, data.slotIndex), + err: + error "Failure attempting to fill slot: unable to calculate collateral", + error = err.msg + return debug "Filling slot" try: diff --git a/tests/codex/helpers/mockmarket.nim b/tests/codex/helpers/mockmarket.nim index 48b20f28..16806cb2 100644 --- a/tests/codex/helpers/mockmarket.nim +++ b/tests/codex/helpers/mockmarket.nim @@ -138,22 +138,35 @@ proc new*(_: type MockMarket, clock: ?Clock = Clock.none): MockMarket = signer: Address.example, config: config, canReserveSlot: true, clock: clock ) +method loadConfig*( + market: MockMarket +): Future[?!void] {.async: (raises: [CancelledError]).} = + discard + method getSigner*(market: MockMarket): Future[Address] {.async.} = return market.signer -method periodicity*(mock: MockMarket): Future[Periodicity] {.async.} = +method periodicity*( + mock: MockMarket +): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} = return Periodicity(seconds: mock.config.proofs.period) -method proofTimeout*(market: MockMarket): Future[uint64] {.async.} = +method proofTimeout*( + market: MockMarket +): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} = return market.config.proofs.timeout method requestDurationLimit*(market: MockMarket): Future[uint64] {.async.} = return market.config.requestDurationLimit -method proofDowntime*(market: MockMarket): Future[uint8] {.async.} = +method proofDowntime*( + market: MockMarket +): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} = return market.config.proofs.downtime -method repairRewardPercentage*(market: MockMarket): Future[uint8] {.async.} = +method repairRewardPercentage*( + market: MockMarket +): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} = return market.config.collateral.repairRewardPercentage method getPointer*(market: MockMarket, slotId: SlotId): Future[uint8] {.async.} = @@ -173,7 +186,7 @@ method mySlots*(market: MockMarket): Future[seq[SlotId]] {.async.} = method getRequest*( market: MockMarket, id: RequestId -): Future[?StorageRequest] {.async.} = +): Future[?StorageRequest] {.async: (raises: [CancelledError]).} = for request in market.requested: if request.id == id: return some request @@ -191,10 +204,16 @@ method requestState*( ): Future[?RequestState] {.async.} = return market.requestState .? [requestId] -method slotState*(market: MockMarket, slotId: SlotId): Future[SlotState] {.async.} = - if not market.slotState.hasKey(slotId): +method slotState*( + market: MockMarket, slotId: SlotId +): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} = + if slotId notin market.slotState: return SlotState.Free - return market.slotState[slotId] + + try: + return market.slotState[slotId] + except KeyError as e: + raiseAssert "SlotId not found in known slots (MockMarket.slotState)" method getRequestEnd*( market: MockMarket, id: RequestId @@ -534,3 +553,33 @@ method unsubscribe*(subscription: ProofSubmittedSubscription) {.async.} = method unsubscribe*(subscription: SlotReservationsFullSubscription) {.async.} = subscription.market.subscriptions.onSlotReservationsFull.keepItIf(it != subscription) + +method slotCollateral*( + market: MockMarket, requestId: RequestId, slotIndex: uint64 +): Future[?!UInt256] {.async: (raises: [CancelledError]).} = + let slotid = slotId(requestId, slotIndex) + + try: + let state = await slotState(market, slotid) + + without request =? await market.getRequest(requestId): + return failure newException( + MarketError, "Failure calculating the slotCollateral, cannot get the request" + ) + + return market.slotCollateral(request.ask.collateralPerSlot, state) + except MarketError as error: + error "Error when trying to calculate the slotCollateral", error = error.msg + return failure error + +method slotCollateral*( + market: MockMarket, collateralPerSlot: UInt256, slotState: SlotState +): ?!UInt256 {.raises: [].} = + if slotState == SlotState.Repair: + let repairRewardPercentage = market.config.collateral.repairRewardPercentage.u256 + + return success ( + collateralPerSlot - (collateralPerSlot * repairRewardPercentage).div(100.u256) + ) + + return success collateralPerSlot diff --git a/tests/codex/helpers/mockslotqueueitem.nim b/tests/codex/helpers/mockslotqueueitem.nim index 7a1505ec..8657850f 100644 --- a/tests/codex/helpers/mockslotqueueitem.nim +++ b/tests/codex/helpers/mockslotqueueitem.nim @@ -7,7 +7,7 @@ type MockSlotQueueItem* = object slotSize*: uint64 duration*: uint64 pricePerBytePerSecond*: UInt256 - collateralPerByte*: UInt256 + collateral*: UInt256 expiry*: uint64 seen*: bool @@ -19,8 +19,8 @@ proc toSlotQueueItem*(item: MockSlotQueueItem): SlotQueueItem = slotSize: item.slotSize, duration: item.duration, pricePerBytePerSecond: item.pricePerBytePerSecond, - collateralPerByte: item.collateralPerByte, ), expiry = item.expiry, seen = item.seen, + collateral = item.collateral, ) diff --git a/tests/codex/node/testcontracts.nim b/tests/codex/node/testcontracts.nim index 11f4f273..73dd8daf 100644 --- a/tests/codex/node/testcontracts.nim +++ b/tests/codex/node/testcontracts.nim @@ -125,7 +125,7 @@ asyncchecksuite "Test Node - Host contracts": fetchedBytes += blk.data.len.uint return success() - (await onStore(request, 1.uint64, onBlocks)).tryGet() + (await onStore(request, 1.uint64, onBlocks, isRepairing = false)).tryGet() check fetchedBytes == 12 * DefaultBlockSize.uint let indexer = verifiable.protectedStrategy.init( diff --git a/tests/codex/sales/testsales.nim b/tests/codex/sales/testsales.nim index f078cbee..e92f9607 100644 --- a/tests/codex/sales/testsales.nim +++ b/tests/codex/sales/testsales.nim @@ -62,7 +62,7 @@ asyncchecksuite "Sales - start": sales = Sales.new(market, clock, repo) reservations = sales.context.reservations sales.onStore = proc( - request: StorageRequest, slot: uint64, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = return success() @@ -181,7 +181,7 @@ asyncchecksuite "Sales": sales = Sales.new(market, clock, repo) reservations = sales.context.reservations sales.onStore = proc( - request: StorageRequest, slot: uint64, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = return success() @@ -229,7 +229,7 @@ asyncchecksuite "Sales": availability = a.get # update id proc notProcessed(itemsProcessed: seq[SlotQueueItem], request: StorageRequest): bool = - let items = SlotQueueItem.init(request) + let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot) for i in 0 ..< items.len: if itemsProcessed.contains(items[i]): return false @@ -266,7 +266,7 @@ asyncchecksuite "Sales": done.complete() createAvailability() await market.requestStorage(request) - let items = SlotQueueItem.init(request) + let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot) check eventually items.allIt(itemsProcessed.contains(it)) test "removes slots from slot queue once RequestCancelled emitted": @@ -287,13 +287,15 @@ asyncchecksuite "Sales": test "removes slot index from slot queue once SlotFilled emitted": let request1 = await addRequestToSaturatedQueue() market.emitSlotFilled(request1.id, 1.uint64) - let expected = SlotQueueItem.init(request1, 1'u16) + let expected = + SlotQueueItem.init(request1, 1'u16, collateral = request1.ask.collateralPerSlot) check always (not itemsProcessed.contains(expected)) test "removes slot index from slot queue once SlotReservationsFull emitted": let request1 = await addRequestToSaturatedQueue() market.emitSlotReservationsFull(request1.id, 1.uint64) - let expected = SlotQueueItem.init(request1, 1'u16) + let expected = + SlotQueueItem.init(request1, 1'u16, collateral = request1.ask.collateralPerSlot) check always (not itemsProcessed.contains(expected)) test "adds slot index to slot queue once SlotFreed emitted": @@ -303,14 +305,21 @@ asyncchecksuite "Sales": createAvailability() market.requested.add request # "contract" must be able to return request + market.emitSlotFreed(request.id, 2.uint64) - let expected = SlotQueueItem.init(request, 2.uint16) + without collateralPerSlot =? await market.slotCollateral(request.id, 2.uint64), + error: + fail() + + let expected = + SlotQueueItem.init(request, 2.uint16, collateral = request.ask.collateralPerSlot) + check eventually itemsProcessed.contains(expected) test "items in queue are readded (and marked seen) once ignored": await market.requestStorage(request) - let items = SlotQueueItem.init(request) + let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot) check eventually queue.len > 0 # queue starts paused, allow items to be added to the queue check eventually queue.paused @@ -331,7 +340,7 @@ asyncchecksuite "Sales": test "queue is paused once availability is insufficient to service slots in queue": createAvailability() # enough to fill a single slot await market.requestStorage(request) - let items = SlotQueueItem.init(request) + let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot) check eventually queue.len > 0 # queue starts paused, allow items to be added to the queue check eventually queue.paused @@ -348,7 +357,7 @@ asyncchecksuite "Sales": test "availability size is reduced by request slot size when fully downloaded": sales.onStore = proc( - request: StorageRequest, slot: uint64, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = let blk = bt.Block.new(@[1.byte]).get await onBatch(blk.repeat(request.ask.slotSize.int)) @@ -361,7 +370,7 @@ asyncchecksuite "Sales": test "non-downloaded bytes are returned to availability once finished": var slotIndex = 0.uint64 sales.onStore = proc( - request: StorageRequest, slot: uint64, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = slotIndex = slot let blk = bt.Block.new(@[1.byte]).get @@ -421,7 +430,7 @@ asyncchecksuite "Sales": var storingRequest: StorageRequest var storingSlot: uint64 sales.onStore = proc( - request: StorageRequest, slot: uint64, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = storingRequest = request storingSlot = slot @@ -434,7 +443,7 @@ asyncchecksuite "Sales": test "makes storage available again when data retrieval fails": let error = newException(IOError, "data retrieval failed") sales.onStore = proc( - request: StorageRequest, slot: uint64, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = return failure(error) createAvailability() @@ -503,7 +512,7 @@ asyncchecksuite "Sales": test "makes storage available again when other host fills the slot": let otherHost = Address.example sales.onStore = proc( - request: StorageRequest, slot: uint64, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = await sleepAsync(chronos.hours(1)) return success() @@ -519,7 +528,7 @@ asyncchecksuite "Sales": let origSize = availability.freeSize sales.onStore = proc( - request: StorageRequest, slot: uint64, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = await sleepAsync(chronos.hours(1)) return success() @@ -544,7 +553,7 @@ asyncchecksuite "Sales": let origSize = availability.freeSize sales.onStore = proc( - request: StorageRequest, slot: uint64, onBatch: BatchProc + request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false ): Future[?!void] {.async.} = await sleepAsync(chronos.hours(1)) return success() diff --git a/tests/codex/sales/testslotqueue.nim b/tests/codex/sales/testslotqueue.nim index 46c35b1c..03c658be 100644 --- a/tests/codex/sales/testslotqueue.nim +++ b/tests/codex/sales/testslotqueue.nim @@ -159,8 +159,10 @@ suite "Slot queue": requestB.ask.collateralPerByte = 1.u256 requestB.expiry = 1000.uint64 - let itemA = SlotQueueItem.init(requestA, 0) - let itemB = SlotQueueItem.init(requestB, 0) + let itemA = + SlotQueueItem.init(requestA, 0, collateral = requestA.ask.collateralPerSlot) + let itemB = + SlotQueueItem.init(requestB, 0, collateral = requestB.ask.collateralPerSlot) check itemB < itemA # B higher priority than A check itemA > itemB @@ -172,7 +174,7 @@ suite "Slot queue": slotSize: 1.uint64, duration: 1.uint64, pricePerBytePerSecond: 2.u256, # profitability is higher (good) - collateralPerByte: 1.u256, + collateral: 1.u256, expiry: 1.uint64, seen: true, # seen (bad), more weight than profitability ) @@ -182,7 +184,7 @@ suite "Slot queue": slotSize: 1.uint64, duration: 1.uint64, pricePerBytePerSecond: 1.u256, # profitability is lower (bad) - collateralPerByte: 1.u256, + collateral: 1.u256, expiry: 1.uint64, seen: false, # not seen (good) ) @@ -197,7 +199,7 @@ suite "Slot queue": slotSize: 1.uint64, duration: 1.uint64, pricePerBytePerSecond: 1.u256, # reward is lower (bad) - collateralPerByte: 1.u256, # collateral is lower (good) + collateral: 1.u256, # collateral is lower (good) expiry: 1.uint64, seen: false, ) @@ -208,7 +210,7 @@ suite "Slot queue": duration: 1.uint64, pricePerBytePerSecond: 2.u256, # reward is higher (good), more weight than collateral - collateralPerByte: 2.u256, # collateral is higher (bad) + collateral: 2.u256, # collateral is higher (bad) expiry: 1.uint64, seen: false, ) @@ -223,7 +225,7 @@ suite "Slot queue": slotSize: 1.uint64, duration: 1.uint64, pricePerBytePerSecond: 1.u256, - collateralPerByte: 2.u256, # collateral is higher (bad) + collateral: 2.u256, # collateral is higher (bad) expiry: 2.uint64, # expiry is longer (good) seen: false, ) @@ -233,7 +235,7 @@ suite "Slot queue": slotSize: 1.uint64, duration: 1.uint64, pricePerBytePerSecond: 1.u256, - collateralPerByte: 1.u256, # collateral is lower (good), more weight than expiry + collateral: 1.u256, # collateral is lower (good), more weight than expiry expiry: 1.uint64, # expiry is shorter (bad) seen: false, ) @@ -248,7 +250,7 @@ suite "Slot queue": slotSize: 1.uint64, # slotSize is smaller (good) duration: 1.uint64, pricePerBytePerSecond: 1.u256, - collateralPerByte: 1.u256, + collateral: 1.u256, expiry: 1.uint64, # expiry is shorter (bad) seen: false, ) @@ -258,7 +260,7 @@ suite "Slot queue": slotSize: 2.uint64, # slotSize is larger (bad) duration: 1.uint64, pricePerBytePerSecond: 1.u256, - collateralPerByte: 1.u256, + collateral: 1.u256, expiry: 2.uint64, # expiry is longer (good), more weight than slotSize seen: false, ) @@ -273,7 +275,7 @@ suite "Slot queue": slotSize: 2.uint64, # slotSize is larger (bad) duration: 1.uint64, pricePerBytePerSecond: 1.u256, - collateralPerByte: 1.u256, + collateral: 1.u256, expiry: 1.uint64, # expiry is shorter (bad) seen: false, ) @@ -283,7 +285,7 @@ suite "Slot queue": slotSize: 1.uint64, # slotSize is smaller (good) duration: 1.uint64, pricePerBytePerSecond: 1.u256, - collateralPerByte: 1.u256, + collateral: 1.u256, expiry: 1.uint64, seen: false, ) @@ -292,11 +294,16 @@ suite "Slot queue": test "expands available all possible slot indices on init": let request = StorageRequest.example - let items = SlotQueueItem.init(request) + let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot) check items.len.uint64 == request.ask.slots var checked = 0 for slotIndex in 0'u16 ..< request.ask.slots.uint16: - check items.anyIt(it == SlotQueueItem.init(request, slotIndex)) + check items.anyIt( + it == + SlotQueueItem.init( + request, slotIndex, collateral = request.ask.collateralPerSlot + ) + ) inc checked check checked == items.len @@ -322,34 +329,17 @@ suite "Slot queue": check isOk queue.push(item3) check isOk queue.push(item4) - test "populates item with exisiting request metadata": - newSlotQueue(maxSize = 8, maxWorkers = 1, processSlotDelay = 10.millis) - let request0 = StorageRequest.example - var request1 = StorageRequest.example - request1.ask.collateralPerByte += 1.u256 - let items0 = SlotQueueItem.init(request0) - let items1 = SlotQueueItem.init(request1) - check queue.push(items0).isOk - check queue.push(items1).isOk - let populated = !queue.populateItem(request1.id, 12'u16) - check populated.requestId == request1.id - check populated.slotIndex == 12'u16 - check populated.slotSize == request1.ask.slotSize - check populated.duration == request1.ask.duration - check populated.pricePerBytePerSecond == request1.ask.pricePerBytePerSecond - check populated.collateralPerByte == request1.ask.collateralPerByte - - test "does not find exisiting request metadata": - newSlotQueue(maxSize = 2, maxWorkers = 2) - let item = SlotQueueItem.example - check queue.populateItem(item.requestId, 12'u16).isNone - test "can support uint16.high slots": var request = StorageRequest.example let maxUInt16 = uint16.high let uint64Slots = uint64(maxUInt16) request.ask.slots = uint64Slots - let items = SlotQueueItem.init(request.id, request.ask, request.expiry) + let items = SlotQueueItem.init( + request.id, + request.ask, + request.expiry, + collateral = request.ask.collateralPerSlot, + ) check items.len.uint16 == maxUInt16 test "cannot support greater than uint16.high slots": @@ -358,7 +348,12 @@ suite "Slot queue": let uint64Slots = uint64(int32Slots) request.ask.slots = uint64Slots expect SlotsOutOfRangeError: - discard SlotQueueItem.init(request.id, request.ask, request.expiry) + discard SlotQueueItem.init( + request.id, + request.ask, + request.expiry, + collateral = request.ask.collateralPerSlot, + ) test "cannot push duplicate items": newSlotQueue(maxSize = 6, maxWorkers = 1, processSlotDelay = 15.millis) @@ -399,8 +394,10 @@ suite "Slot queue": let request0 = StorageRequest.example var request1 = StorageRequest.example request1.ask.collateralPerByte += 1.u256 - let items0 = SlotQueueItem.init(request0) - let items1 = SlotQueueItem.init(request1) + let items0 = + SlotQueueItem.init(request0, collateral = request0.ask.collateralPerSlot) + let items1 = + SlotQueueItem.init(request1, collateral = request1.ask.collateralPerSlot) check queue.push(items0).isOk check queue.push(items1).isOk let last = items1[items1.high] @@ -413,8 +410,10 @@ suite "Slot queue": let request0 = StorageRequest.example var request1 = StorageRequest.example request1.ask.collateralPerByte += 1.u256 - let items0 = SlotQueueItem.init(request0) - let items1 = SlotQueueItem.init(request1) + let items0 = + SlotQueueItem.init(request0, collateral = request0.ask.collateralPerSlot) + let items1 = + SlotQueueItem.init(request1, collateral = request1.ask.collateralPerSlot) check queue.push(items0).isOk check queue.push(items1).isOk queue.delete(request1.id) @@ -433,42 +432,56 @@ suite "Slot queue": request3.ask.collateralPerByte = request2.ask.collateralPerByte + 1 request4.ask.collateralPerByte = request3.ask.collateralPerByte + 1 request5.ask.collateralPerByte = request4.ask.collateralPerByte + 1 - let item0 = SlotQueueItem.init(request0, 0) - let item1 = SlotQueueItem.init(request1, 0) - let item2 = SlotQueueItem.init(request2, 0) - let item3 = SlotQueueItem.init(request3, 0) - let item4 = SlotQueueItem.init(request4, 0) - let item5 = SlotQueueItem.init(request5, 0) + let item0 = + SlotQueueItem.init(request0, 0, collateral = request0.ask.collateralPerSlot) + let item1 = + SlotQueueItem.init(request1, 0, collateral = request1.ask.collateralPerSlot) + let item2 = + SlotQueueItem.init(request2, 0, collateral = request2.ask.collateralPerSlot) + let item3 = + SlotQueueItem.init(request3, 0, collateral = request3.ask.collateralPerSlot) + let item4 = + SlotQueueItem.init(request4, 0, collateral = request4.ask.collateralPerSlot) + let item5 = + SlotQueueItem.init(request5, 0, collateral = request5.ask.collateralPerSlot) check queue.contains(item5) == false check queue.push(@[item0, item1, item2, item3, item4, item5]).isOk check queue.contains(item5) test "sorts items by profitability descending (higher pricePerBytePerSecond == higher priority == goes first in the list)": var request = StorageRequest.example - let item0 = SlotQueueItem.init(request, 0) + let item0 = + SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot) request.ask.pricePerBytePerSecond += 1.u256 - let item1 = SlotQueueItem.init(request, 1) + let item1 = + SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot) check item1 < item0 - test "sorts items by collateral ascending (higher required collateralPerByte = lower priority == comes later in the list)": + test "sorts items by collateral ascending (higher required collateral = lower priority == comes later in the list)": var request = StorageRequest.example - let item0 = SlotQueueItem.init(request, 0) - request.ask.collateralPerByte += 1.u256 - let item1 = SlotQueueItem.init(request, 1) + let item0 = + SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot) + let item1 = SlotQueueItem.init( + request, 1, collateral = request.ask.collateralPerSlot + 1.u256 + ) check item1 > item0 test "sorts items by expiry descending (longer expiry = higher priority)": var request = StorageRequest.example - let item0 = SlotQueueItem.init(request, 0) + let item0 = + SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot) request.expiry += 1 - let item1 = SlotQueueItem.init(request, 1) + let item1 = + SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot) check item1 < item0 test "sorts items by slot size descending (bigger dataset = higher profitability = higher priority)": var request = StorageRequest.example - let item0 = SlotQueueItem.init(request, 0) + let item0 = + SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot) request.ask.slotSize += 1 - let item1 = SlotQueueItem.init(request, 1) + let item1 = + SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot) check item1 < item0 test "should call callback once an item is added": @@ -489,13 +502,17 @@ suite "Slot queue": # sleeping after push allows the slotqueue loop to iterate, # calling the callback for each pushed/updated item var request = StorageRequest.example - let item0 = SlotQueueItem.init(request, 0) + let item0 = + SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot) request.ask.pricePerBytePerSecond += 1.u256 - let item1 = SlotQueueItem.init(request, 1) + let item1 = + SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot) request.ask.pricePerBytePerSecond += 1.u256 - let item2 = SlotQueueItem.init(request, 2) + let item2 = + SlotQueueItem.init(request, 2, collateral = request.ask.collateralPerSlot) request.ask.pricePerBytePerSecond += 1.u256 - let item3 = SlotQueueItem.init(request, 3) + let item3 = + SlotQueueItem.init(request, 3, collateral = request.ask.collateralPerSlot) check queue.push(item0).isOk await sleepAsync(1.millis) @@ -520,13 +537,17 @@ suite "Slot queue": # sleeping after push allows the slotqueue loop to iterate, # calling the callback for each pushed/updated item var request = StorageRequest.example - let item0 = SlotQueueItem.init(request, 0) + let item0 = + SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot) request.ask.pricePerBytePerSecond += 1.u256 - let item1 = SlotQueueItem.init(request, 1) + let item1 = + SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot) request.ask.pricePerBytePerSecond += 1.u256 - let item2 = SlotQueueItem.init(request, 2) + let item2 = + SlotQueueItem.init(request, 2, collateral = request.ask.collateralPerSlot) request.ask.pricePerBytePerSecond += 1.u256 - let item3 = SlotQueueItem.init(request, 3) + let item3 = + SlotQueueItem.init(request, 3, collateral = request.ask.collateralPerSlot) check queue.push(item0).isOk check queue.push(item1).isOk @@ -550,7 +571,7 @@ suite "Slot queue": queue.pause let request = StorageRequest.example - var items = SlotQueueItem.init(request) + var items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot) check queue.push(items).isOk # check all items processed check eventually queue.len == 0 @@ -558,8 +579,14 @@ suite "Slot queue": test "pushing seen item does not unpause queue": newSlotQueue(maxSize = 4, maxWorkers = 4) let request = StorageRequest.example - let item0 = - SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = true) + let item0 = SlotQueueItem.init( + request.id, + 0'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = true, + ) check queue.paused check queue.push(item0).isOk check queue.paused @@ -567,8 +594,14 @@ suite "Slot queue": test "paused queue waits for unpause before continuing processing": newSlotQueue(maxSize = 4, maxWorkers = 4) let request = StorageRequest.example - let item = - SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = false) + let item = SlotQueueItem.init( + request.id, + 1'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = false, + ) check queue.paused # push causes unpause check queue.push(item).isOk @@ -579,10 +612,22 @@ suite "Slot queue": test "processing a 'seen' item pauses the queue": newSlotQueue(maxSize = 4, maxWorkers = 4) let request = StorageRequest.example - let unseen = - SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = false) - let seen = - SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = true) + let unseen = SlotQueueItem.init( + request.id, + 0'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = false, + ) + let seen = SlotQueueItem.init( + request.id, + 1'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = true, + ) # push causes unpause check queue.push(unseen).isSuccess # check all items processed @@ -595,10 +640,22 @@ suite "Slot queue": test "processing a 'seen' item does not decrease the number of workers": newSlotQueue(maxSize = 4, maxWorkers = 4) let request = StorageRequest.example - let unseen = - SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = false) - let seen = - SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = true) + let unseen = SlotQueueItem.init( + request.id, + 0'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = false, + ) + let seen = SlotQueueItem.init( + request.id, + 1'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = true, + ) # push seen item to ensure that queue is pausing check queue.push(seen).isSuccess # unpause and pause a number of times @@ -615,10 +672,22 @@ suite "Slot queue": test "item 'seen' flags can be cleared": newSlotQueue(maxSize = 4, maxWorkers = 1) let request = StorageRequest.example - let item0 = - SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = true) - let item1 = - SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = true) + let item0 = SlotQueueItem.init( + request.id, + 0'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = true, + ) + let item1 = SlotQueueItem.init( + request.id, + 1'u16, + request.ask, + request.expiry, + request.ask.collateralPerSlot, + seen = true, + ) check queue.push(item0).isOk check queue.push(item1).isOk check queue[0].seen diff --git a/tests/contracts/testMarket.nim b/tests/contracts/testMarket.nim index 74d6a65e..068a4d2e 100644 --- a/tests/contracts/testMarket.nim +++ b/tests/contracts/testMarket.nim @@ -598,6 +598,37 @@ ethersuite "On-Chain Market": check endBalanceHost == (startBalanceHost + request.ask.collateralPerSlot) check endBalanceReward == (startBalanceReward + expectedPayout) + test "returns the collateral when the slot is not being repaired": + await market.requestStorage(request) + await market.reserveSlot(request.id, 0.uint64) + await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot) + + let slotId = request.slotId(0.uint64) + without collateral =? await market.slotCollateral(request.id, 0.uint64), error: + fail() + + check collateral == request.ask.collateralPerSlot + + test "calculates correctly the collateral when the slot is being repaired": + # Ensure that the config is loaded and repairRewardPercentage is available + discard await market.repairRewardPercentage() + + await market.requestStorage(request) + await market.reserveSlot(request.id, 0.uint64) + await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot) + await market.freeSlot(slotId(request.id, 0.uint64)) + + let slotId = request.slotId(0.uint64) + + without collateral =? await market.slotCollateral(request.id, 0.uint64), error: + fail() + + # slotCollateral + # repairRewardPercentage = 10 + # expected collateral = slotCollateral - slotCollateral * 0.1 + check collateral == + request.ask.collateralPerSlot - (request.ask.collateralPerSlot * 10).div(100.u256) + test "the request is added in cache after the fist access": await market.requestStorage(request) diff --git a/tests/examples.nim b/tests/examples.nim index 9b88b4a5..9ef4e292 100644 --- a/tests/examples.nim +++ b/tests/examples.nim @@ -72,7 +72,9 @@ proc example*(_: type Slot): Slot = proc example*(_: type SlotQueueItem): SlotQueueItem = let request = StorageRequest.example let slot = Slot.example - SlotQueueItem.init(request, slot.slotIndex.uint16) + SlotQueueItem.init( + request, slot.slotIndex.uint16, collateral = request.ask.collateralPerSlot + ) proc example(_: type G1Point): G1Point = G1Point(x: UInt256.example, y: UInt256.example) From eb09e610d5e1c649f32877d4d924332677a5fdd4 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Wed, 5 Mar 2025 09:35:46 +0100 Subject: [PATCH 02/14] fix(ci): handle coverage as a string to enable gcc 14 on linux (#1140) * Handle coverage as a string not a boolean * Update ubuntu version to latest --- .github/actions/nimbus-build-system/action.yml | 4 ++-- .github/workflows/nim-matrix.yml | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/actions/nimbus-build-system/action.yml b/.github/actions/nimbus-build-system/action.yml index 219966db..5d1917e3 100644 --- a/.github/actions/nimbus-build-system/action.yml +++ b/.github/actions/nimbus-build-system/action.yml @@ -89,7 +89,7 @@ runs: - name: Install gcc 14 on Linux # We don't want to install gcc 14 for coverage (Ubuntu 20.04) - if : ${{ inputs.os == 'linux' && !inputs.coverage }} + if : ${{ inputs.os == 'linux' && inputs.coverage != 'true' }} shell: ${{ inputs.shell }} {0} run: | # Add GCC-14 to alternatives @@ -202,7 +202,7 @@ runs: - name: Restore Nim toolchain binaries from cache id: nim-cache uses: actions/cache@v4 - if : ${{ !inputs.coverage }} + if : ${{ inputs.coverage != 'true' }} with: path: NimBinaries key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }} diff --git a/.github/workflows/nim-matrix.yml b/.github/workflows/nim-matrix.yml index 4d86d3bb..71129574 100644 --- a/.github/workflows/nim-matrix.yml +++ b/.github/workflows/nim-matrix.yml @@ -20,10 +20,10 @@ jobs: uses: fabiocaccamo/create-matrix-action@v5 with: matrix: | - os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} build: needs: matrix From 2a3a29720f8c3a6d8cb64c6e463dc3af0cf45c8b Mon Sep 17 00:00:00 2001 From: Giuliano Mega Date: Mon, 10 Mar 2025 10:27:16 -0300 Subject: [PATCH 03/14] Fixes Codex crashes on interrupted downloads (#1151) * fix: fixes Codex crashes on interrupted downloads * fix: add better feedback to 404, minor rewording in test comment --- codex/rest/api.nim | 24 +++++++++++++++++++----- tests/integration/testrestapi.nim | 30 ++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 5 deletions(-) diff --git a/codex/rest/api.nim b/codex/rest/api.nim index 89dbe220..6b8f2ac1 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -65,9 +65,15 @@ proc formatManifestBlocks(node: CodexNodeRef): Future[JsonNode] {.async.} = return %RestContentList.init(content) +proc isPending(resp: HttpResponseRef): bool = + ## Checks that an HttpResponseRef object is still pending; i.e., + ## that no body has yet been sent. This helps us guard against calling + ## sendBody(resp: HttpResponseRef, ...) twice, which is illegal. + return resp.getResponseState() == HttpResponseState.Empty + proc retrieveCid( node: CodexNodeRef, cid: Cid, local: bool = true, resp: HttpResponseRef -): Future[RestApiResponse] {.async.} = +): Future[void] {.async: (raises: [CancelledError, HttpWriteError]).} = ## Download a file from the node in a streaming ## manner ## @@ -79,16 +85,21 @@ proc retrieveCid( without stream =? (await node.retrieve(cid, local)), error: if error of BlockNotFoundError: resp.status = Http404 - return await resp.sendBody("") + await resp.sendBody( + "The requested CID could not be retrieved (" & error.msg & ")." + ) + return else: resp.status = Http500 - return await resp.sendBody(error.msg) + await resp.sendBody(error.msg) + return # It is ok to fetch again the manifest because it will hit the cache without manifest =? (await node.fetchManifest(cid)), err: error "Failed to fetch manifest", err = err.msg resp.status = Http404 - return await resp.sendBody(err.msg) + await resp.sendBody(err.msg) + return if manifest.mimetype.isSome: resp.setHeader("Content-Type", manifest.mimetype.get()) @@ -119,10 +130,13 @@ proc retrieveCid( await resp.sendChunk(addr buff[0], buff.len) await resp.finish() codex_api_downloads.inc() + except CancelledError as exc: + raise exc except CatchableError as exc: warn "Error streaming blocks", exc = exc.msg resp.status = Http500 - return await resp.sendBody("") + if resp.isPending(): + await resp.sendBody(exc.msg) finally: info "Sent bytes", cid = cid, bytes if not stream.isNil: diff --git a/tests/integration/testrestapi.nim b/tests/integration/testrestapi.nim index 2311dc22..20bf8bc8 100644 --- a/tests/integration/testrestapi.nim +++ b/tests/integration/testrestapi.nim @@ -1,4 +1,6 @@ import std/httpclient +import std/importutils +import std/net import std/sequtils import std/strformat from pkg/libp2p import `==`, `$`, Cid @@ -305,3 +307,31 @@ twonodessuite "REST API": let cid = Manifest.example().makeManifestBlock().get.cid let response = client1.deleteRaw($cid) check response.status == "204 No Content" + + test "should not crash if the download stream is closed before download completes", + twoNodesConfig: + privateAccess(client1.type) + privateAccess(client1.http.type) + + let cid = client1.upload(repeat("some file contents", 1000)).get + + try: + # Sadly, there's no high level API for preventing the client from + # consuming the whole response, and we need to close the socket + # before that happens if we want to trigger the bug, so we need to + # resort to this. + client1.http.getBody = false + let response = client1.downloadRaw($cid) + + # Read 4 bytes from the stream just to make sure we actually + # receive some data. + let data = client1.http.socket.recv(4) + check data.len == 4 + + # Prematurely closes the connection. + client1.http.close() + finally: + client1.http.getBody = true + + let response = client1.downloadRaw($cid) + check response.body == repeat("some file contents", 1000) From 703921df322e2c32b4f12786fc48e30989b025ca Mon Sep 17 00:00:00 2001 From: Arnaud Date: Mon, 10 Mar 2025 16:59:24 +0100 Subject: [PATCH 04/14] chore(restapi): add headers to support on progress when downloading (#1150) * Add headers to support on progress on download * Replace http session by http client in downloadBytes * Use int instead of int64 for datasetSize * Rename variable to avoid shallowing client --- codex/rest/api.nim | 3 +++ tests/integration/codexclient.nim | 12 ++++++------ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/codex/rest/api.nim b/codex/rest/api.nim index 6b8f2ac1..7cb0b43f 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -114,6 +114,8 @@ proc retrieveCid( else: resp.setHeader("Content-Disposition", "attachment") + resp.setHeader("Content-Length", $manifest.datasetSize.int) + await resp.prepareChunked() while not stream.atEof: @@ -342,6 +344,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute resp.setCorsHeaders("GET", corsOrigin) resp.setHeader("Access-Control-Headers", "X-Requested-With") + resp.setHeader("Access-Control-Expose-Headers", "Content-Disposition") await node.retrieveCid(cid.get(), local = false, resp = resp) router.api(MethodGet, "/api/codex/v1/data/{cid}/network/manifest") do( diff --git a/tests/integration/codexclient.nim b/tests/integration/codexclient.nim index 287f465f..f4c3f977 100644 --- a/tests/integration/codexclient.nim +++ b/tests/integration/codexclient.nim @@ -76,15 +76,15 @@ proc downloadNoStream*(client: CodexClient, cid: Cid): ?!string = proc downloadBytes*( client: CodexClient, cid: Cid, local = false ): Future[?!seq[byte]] {.async.} = - let uri = - parseUri(client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream")) + let uri = client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream") - let (status, bytes) = await client.session.fetch(uri) + let httpClient = newHttpClient() + let response = httpClient.get(uri) - if status != 200: - return failure("fetch failed with status " & $status) + if response.status != "200 OK": + return failure("fetch failed with status " & $response.status) - success bytes + success response.body.toBytes proc delete*(client: CodexClient, cid: Cid): ?!void = let From 17d3bb55cf63e3fe36724f28184035035c6a0aa9 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Wed, 12 Mar 2025 10:12:06 +0100 Subject: [PATCH 05/14] chore(marketplace): notify sales when duration, minPricePerBytePerSecond or totalCollateral is updated (#1148) * Call onAvailabilityAdded when freeSize, duration or minPricePerBytePerSecond is increased * Rename onAvailabilityAdded to onAvailabilitySaved * Rename OnAvailabilitySaved to OnAvailabilityUpserted * Go back to OnAvailabilitySaved --- codex/sales.nim | 8 +-- codex/sales/reservations.nim | 34 ++++++------ tests/codex/sales/testreservations.nim | 72 +++++++++++++++++++++++--- 3 files changed, 88 insertions(+), 26 deletions(-) diff --git a/codex/sales.nim b/codex/sales.nim index af594a9a..e2a884df 100644 --- a/codex/sales.nim +++ b/codex/sales.nim @@ -285,7 +285,7 @@ proc load*(sales: Sales) {.async.} = agent.start(SaleUnknown()) sales.agents.add agent -proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} = +proc OnAvailabilitySaved(sales: Sales, availability: Availability) {.async.} = ## When availabilities are modified or added, the queue should be unpaused if ## it was paused and any slots in the queue should have their `seen` flag ## cleared. @@ -528,10 +528,10 @@ proc startSlotQueue(sales: Sales) = slotQueue.start() - proc onAvailabilityAdded(availability: Availability) {.async.} = - await sales.onAvailabilityAdded(availability) + proc OnAvailabilitySaved(availability: Availability) {.async.} = + await sales.OnAvailabilitySaved(availability) - reservations.onAvailabilityAdded = onAvailabilityAdded + reservations.OnAvailabilitySaved = OnAvailabilitySaved proc subscribe(sales: Sales) {.async.} = await sales.subscribeRequested() diff --git a/codex/sales/reservations.nim b/codex/sales/reservations.nim index a64cb602..25ee2b99 100644 --- a/codex/sales/reservations.nim +++ b/codex/sales/reservations.nim @@ -82,11 +82,11 @@ type availabilityLock: AsyncLock # Lock for protecting assertions of availability's sizes when searching for matching availability repo: RepoStore - onAvailabilityAdded: ?OnAvailabilityAdded + OnAvailabilitySaved: ?OnAvailabilitySaved GetNext* = proc(): Future[?seq[byte]] {.upraises: [], gcsafe, closure.} IterDispose* = proc(): Future[?!void] {.gcsafe, closure.} - OnAvailabilityAdded* = + OnAvailabilitySaved* = proc(availability: Availability): Future[void] {.upraises: [], gcsafe.} StorableIter* = ref object finished*: bool @@ -189,10 +189,10 @@ logutils.formatIt(LogFormat.textLines, SomeStorableId): logutils.formatIt(LogFormat.json, SomeStorableId): it.to0xHexLog -proc `onAvailabilityAdded=`*( - self: Reservations, onAvailabilityAdded: OnAvailabilityAdded +proc `OnAvailabilitySaved=`*( + self: Reservations, OnAvailabilitySaved: OnAvailabilitySaved ) = - self.onAvailabilityAdded = some onAvailabilityAdded + self.OnAvailabilitySaved = some OnAvailabilitySaved func key*(id: AvailabilityId): ?!Key = ## sales / reservations / @@ -268,18 +268,18 @@ proc updateAvailability( trace "Creating new Availability" let res = await self.updateImpl(obj) # inform subscribers that Availability has been added - if onAvailabilityAdded =? self.onAvailabilityAdded: - # when chronos v4 is implemented, and OnAvailabilityAdded is annotated + if OnAvailabilitySaved =? self.OnAvailabilitySaved: + # when chronos v4 is implemented, and OnAvailabilitySaved is annotated # with async:(raises:[]), we can remove this try/catch as we know, with # certainty, that nothing will be raised try: - await onAvailabilityAdded(obj) + await OnAvailabilitySaved(obj) except CancelledError as e: raise e except CatchableError as e: # we don't have any insight into types of exceptions that - # `onAvailabilityAdded` can raise because it is caller-defined - warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg + # `OnAvailabilitySaved` can raise because it is caller-defined + warn "Unknown error during 'OnAvailabilitySaved' callback", error = e.msg return res else: return failure(err) @@ -300,21 +300,23 @@ proc updateAvailability( let res = await self.updateImpl(obj) - if oldAvailability.freeSize < obj.freeSize: # availability added + if oldAvailability.freeSize < obj.freeSize or oldAvailability.duration < obj.duration or + oldAvailability.minPricePerBytePerSecond < obj.minPricePerBytePerSecond or + oldAvailability.totalCollateral < obj.totalCollateral: # availability updated # inform subscribers that Availability has been modified (with increased # size) - if onAvailabilityAdded =? self.onAvailabilityAdded: - # when chronos v4 is implemented, and OnAvailabilityAdded is annotated + if OnAvailabilitySaved =? self.OnAvailabilitySaved: + # when chronos v4 is implemented, and OnAvailabilitySaved is annotated # with async:(raises:[]), we can remove this try/catch as we know, with # certainty, that nothing will be raised try: - await onAvailabilityAdded(obj) + await OnAvailabilitySaved(obj) except CancelledError as e: raise e except CatchableError as e: # we don't have any insight into types of exceptions that - # `onAvailabilityAdded` can raise because it is caller-defined - warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg + # `OnAvailabilitySaved` can raise because it is caller-defined + warn "Unknown error during 'OnAvailabilitySaved' callback", error = e.msg return res diff --git a/tests/codex/sales/testreservations.nim b/tests/codex/sales/testreservations.nim index 79fc3626..49df059d 100644 --- a/tests/codex/sales/testreservations.nim +++ b/tests/codex/sales/testreservations.nim @@ -283,35 +283,95 @@ asyncchecksuite "Reservations module": check updated.isErr check updated.error of NotExistsError - test "onAvailabilityAdded called when availability is created": + test "OnAvailabilitySaved called when availability is created": var added: Availability - reservations.onAvailabilityAdded = proc(a: Availability) {.async.} = + reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = added = a let availability = createAvailability() check added == availability - test "onAvailabilityAdded called when availability size is increased": + test "OnAvailabilitySaved called when availability size is increased": var availability = createAvailability() var added: Availability - reservations.onAvailabilityAdded = proc(a: Availability) {.async.} = + reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = added = a availability.freeSize += 1 discard await reservations.update(availability) check added == availability - test "onAvailabilityAdded is not called when availability size is decreased": + test "OnAvailabilitySaved is not called when availability size is decreased": var availability = createAvailability() var called = false - reservations.onAvailabilityAdded = proc(a: Availability) {.async.} = + reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = called = true availability.freeSize -= 1 discard await reservations.update(availability) check not called + test "OnAvailabilitySaved called when availability duration is increased": + var availability = createAvailability() + var added: Availability + reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = + added = a + availability.duration += 1 + discard await reservations.update(availability) + + check added == availability + + test "OnAvailabilitySaved is not called when availability duration is decreased": + var availability = createAvailability() + var called = false + reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = + called = true + availability.duration -= 1 + discard await reservations.update(availability) + + check not called + + test "OnAvailabilitySaved called when availability minPricePerBytePerSecond is increased": + var availability = createAvailability() + var added: Availability + reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = + added = a + availability.minPricePerBytePerSecond += 1.u256 + discard await reservations.update(availability) + + check added == availability + + test "OnAvailabilitySaved is not called when availability minPricePerBytePerSecond is decreased": + var availability = createAvailability() + var called = false + reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = + called = true + availability.minPricePerBytePerSecond -= 1.u256 + discard await reservations.update(availability) + + check not called + + test "OnAvailabilitySaved called when availability totalCollateral is increased": + var availability = createAvailability() + var added: Availability + reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = + added = a + availability.totalCollateral = availability.totalCollateral + 1.u256 + discard await reservations.update(availability) + + check added == availability + + test "OnAvailabilitySaved is not called when availability totalCollateral is decreased": + var availability = createAvailability() + var called = false + reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} = + called = true + availability.totalCollateral = availability.totalCollateral - 1.u256 + discard await reservations.update(availability) + + check not called + test "availabilities can be found": let availability = createAvailability() From 2538ff8da397f9afe94b866ae725a7d20f41d925 Mon Sep 17 00:00:00 2001 From: Arnaud Date: Wed, 12 Mar 2025 14:41:00 +0100 Subject: [PATCH 06/14] chore: create new httpClient per request (#1136) * Create new httpClient per request * Fix tests after rebase and close the clients at the end --- tests/integration/codexclient.nim | 86 ++++++++++++++-------------- tests/integration/testpurchasing.nim | 1 - tests/integration/testrestapi.nim | 13 +++-- 3 files changed, 51 insertions(+), 49 deletions(-) diff --git a/tests/integration/codexclient.nim b/tests/integration/codexclient.nim index f4c3f977..4a106253 100644 --- a/tests/integration/codexclient.nim +++ b/tests/integration/codexclient.nim @@ -14,33 +14,37 @@ import pkg/codex/sales/reservations export purchasing type CodexClient* = ref object - http: HttpClient baseurl: string - session: HttpSessionRef + httpClients: seq[HttpClient] type CodexClientError* = object of CatchableError const HttpClientTimeoutMs = 60 * 1000 proc new*(_: type CodexClient, baseurl: string): CodexClient = - CodexClient( - http: newHttpClient(timeout = HttpClientTimeoutMs), - baseurl: baseurl, - session: HttpSessionRef.new({HttpClientFlag.Http11Pipeline}), - ) + CodexClient(baseurl: baseurl, httpClients: newSeq[HttpClient]()) + +proc http*(client: CodexClient): HttpClient = + let httpClient = newHttpClient(timeout = HttpClientTimeoutMs) + client.httpClients.insert(httpClient) + return httpClient + +proc close*(client: CodexClient): void = + for httpClient in client.httpClients: + httpClient.close() proc info*(client: CodexClient): ?!JsonNode = let url = client.baseurl & "/debug/info" - JsonNode.parse(client.http.getContent(url)) + JsonNode.parse(client.http().getContent(url)) proc setLogLevel*(client: CodexClient, level: string) = let url = client.baseurl & "/debug/chronicles/loglevel?level=" & level let headers = newHttpHeaders({"Content-Type": "text/plain"}) - let response = client.http.request(url, httpMethod = HttpPost, headers = headers) + let response = client.http().request(url, httpMethod = HttpPost, headers = headers) assert response.status == "200 OK" proc upload*(client: CodexClient, contents: string): ?!Cid = - let response = client.http.post(client.baseurl & "/data", contents) + let response = client.http().post(client.baseurl & "/data", contents) assert response.status == "200 OK" Cid.init(response.body).mapFailure @@ -48,9 +52,9 @@ proc upload*(client: CodexClient, bytes: seq[byte]): ?!Cid = client.upload(string.fromBytes(bytes)) proc download*(client: CodexClient, cid: Cid, local = false): ?!string = - let response = client.http.get( - client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream") - ) + let response = client.http().get( + client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream") + ) if response.status != "200 OK": return failure(response.status) @@ -58,7 +62,8 @@ proc download*(client: CodexClient, cid: Cid, local = false): ?!string = success response.body proc downloadManifestOnly*(client: CodexClient, cid: Cid): ?!string = - let response = client.http.get(client.baseurl & "/data/" & $cid & "/network/manifest") + let response = + client.http().get(client.baseurl & "/data/" & $cid & "/network/manifest") if response.status != "200 OK": return failure(response.status) @@ -66,7 +71,7 @@ proc downloadManifestOnly*(client: CodexClient, cid: Cid): ?!string = success response.body proc downloadNoStream*(client: CodexClient, cid: Cid): ?!string = - let response = client.http.post(client.baseurl & "/data/" & $cid & "/network") + let response = client.http().post(client.baseurl & "/data/" & $cid & "/network") if response.status != "200 OK": return failure(response.status) @@ -78,8 +83,7 @@ proc downloadBytes*( ): Future[?!seq[byte]] {.async.} = let uri = client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream") - let httpClient = newHttpClient() - let response = httpClient.get(uri) + let response = client.http().get(uri) if response.status != "200 OK": return failure("fetch failed with status " & $response.status) @@ -89,7 +93,7 @@ proc downloadBytes*( proc delete*(client: CodexClient, cid: Cid): ?!void = let url = client.baseurl & "/data/" & $cid - response = client.http.delete(url) + response = client.http().delete(url) if response.status != "204 No Content": return failure(response.status) @@ -98,7 +102,7 @@ proc delete*(client: CodexClient, cid: Cid): ?!void = proc list*(client: CodexClient): ?!RestContentList = let url = client.baseurl & "/data" - let response = client.http.get(url) + let response = client.http().get(url) if response.status != "200 OK": return failure(response.status) @@ -107,7 +111,7 @@ proc list*(client: CodexClient): ?!RestContentList = proc space*(client: CodexClient): ?!RestRepoStore = let url = client.baseurl & "/space" - let response = client.http.get(url) + let response = client.http().get(url) if response.status != "200 OK": return failure(response.status) @@ -141,7 +145,7 @@ proc requestStorageRaw*( if expiry != 0: json["expiry"] = %($expiry) - return client.http.post(url, $json) + return client.http().post(url, $json) proc requestStorage*( client: CodexClient, @@ -167,7 +171,7 @@ proc requestStorage*( proc getPurchase*(client: CodexClient, purchaseId: PurchaseId): ?!RestPurchase = let url = client.baseurl & "/storage/purchases/" & purchaseId.toHex try: - let body = client.http.getContent(url) + let body = client.http().getContent(url) return RestPurchase.fromJson(body) except CatchableError as e: return failure e.msg @@ -175,14 +179,14 @@ proc getPurchase*(client: CodexClient, purchaseId: PurchaseId): ?!RestPurchase = proc getSalesAgent*(client: CodexClient, slotId: SlotId): ?!RestSalesAgent = let url = client.baseurl & "/sales/slots/" & slotId.toHex try: - let body = client.http.getContent(url) + let body = client.http().getContent(url) return RestSalesAgent.fromJson(body) except CatchableError as e: return failure e.msg proc getSlots*(client: CodexClient): ?!seq[Slot] = let url = client.baseurl & "/sales/slots" - let body = client.http.getContent(url) + let body = client.http().getContent(url) seq[Slot].fromJson(body) proc postAvailability*( @@ -200,7 +204,7 @@ proc postAvailability*( "minPricePerBytePerSecond": minPricePerBytePerSecond, "totalCollateral": totalCollateral, } - let response = client.http.post(url, $json) + let response = client.http().post(url, $json) doAssert response.status == "201 Created", "expected 201 Created, got " & response.status & ", body: " & response.body Availability.fromJson(response.body) @@ -233,7 +237,7 @@ proc patchAvailabilityRaw*( if totalCollateral =? totalCollateral: json["totalCollateral"] = %totalCollateral - client.http.patch(url, $json) + client.http().patch(url, $json) proc patchAvailability*( client: CodexClient, @@ -253,7 +257,7 @@ proc patchAvailability*( proc getAvailabilities*(client: CodexClient): ?!seq[Availability] = ## Call sales availability REST endpoint let url = client.baseurl & "/sales/availability" - let body = client.http.getContent(url) + let body = client.http().getContent(url) seq[Availability].fromJson(body) proc getAvailabilityReservations*( @@ -261,16 +265,9 @@ proc getAvailabilityReservations*( ): ?!seq[Reservation] = ## Retrieves Availability's Reservations let url = client.baseurl & "/sales/availability/" & $availabilityId & "/reservations" - let body = client.http.getContent(url) + let body = client.http().getContent(url) seq[Reservation].fromJson(body) -proc close*(client: CodexClient) = - client.http.close() - -proc restart*(client: CodexClient) = - client.http.close() - client.http = newHttpClient(timeout = HttpClientTimeoutMs) - proc purchaseStateIs*(client: CodexClient, id: PurchaseId, state: string): bool = client.getPurchase(id).option .? state == some state @@ -283,18 +280,23 @@ proc requestId*(client: CodexClient, id: PurchaseId): ?RequestId = proc uploadRaw*( client: CodexClient, contents: string, headers = newHttpHeaders() ): Response = - return client.http.request( - client.baseurl & "/data", body = contents, httpMethod = HttpPost, headers = headers - ) + return client.http().request( + client.baseurl & "/data", + body = contents, + httpMethod = HttpPost, + headers = headers, + ) proc listRaw*(client: CodexClient): Response = - return client.http.request(client.baseurl & "/data", httpMethod = HttpGet) + return client.http().request(client.baseurl & "/data", httpMethod = HttpGet) -proc downloadRaw*(client: CodexClient, cid: string, local = false): Response = - return client.http.request( +proc downloadRaw*( + client: CodexClient, cid: string, local = false, httpClient = client.http() +): Response = + return httpClient.request( client.baseurl & "/data/" & cid & (if local: "" else: "/network/stream"), httpMethod = HttpGet, ) proc deleteRaw*(client: CodexClient, cid: string): Response = - return client.http.request(client.baseurl & "/data/" & cid, httpMethod = HttpDelete) + return client.http().request(client.baseurl & "/data/" & cid, httpMethod = HttpDelete) diff --git a/tests/integration/testpurchasing.nim b/tests/integration/testpurchasing.nim index 259efcff..4eb5c775 100644 --- a/tests/integration/testpurchasing.nim +++ b/tests/integration/testpurchasing.nim @@ -89,7 +89,6 @@ twonodessuite "Purchasing": check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000) await node1.restart() - client1.restart() check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000) let request = client1.getPurchase(id).get.request.get diff --git a/tests/integration/testrestapi.nim b/tests/integration/testrestapi.nim index 20bf8bc8..7164372b 100644 --- a/tests/integration/testrestapi.nim +++ b/tests/integration/testrestapi.nim @@ -314,24 +314,25 @@ twonodessuite "REST API": privateAccess(client1.http.type) let cid = client1.upload(repeat("some file contents", 1000)).get + let httpClient = client1.http() try: # Sadly, there's no high level API for preventing the client from # consuming the whole response, and we need to close the socket # before that happens if we want to trigger the bug, so we need to # resort to this. - client1.http.getBody = false - let response = client1.downloadRaw($cid) + httpClient.getBody = false + let response = client1.downloadRaw($cid, httpClient = httpClient) # Read 4 bytes from the stream just to make sure we actually # receive some data. - let data = client1.http.socket.recv(4) + let data = httpClient.socket.recv(4) check data.len == 4 # Prematurely closes the connection. - client1.http.close() + httpClient.close() finally: - client1.http.getBody = true + httpClient.getBody = true - let response = client1.downloadRaw($cid) + let response = client1.downloadRaw($cid, httpClient = httpClient) check response.body == repeat("some file contents", 1000) From 1cac3e2a117eb9ce89c6ba4f6c324e44371da5fa Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Thu, 13 Mar 2025 08:33:15 -0600 Subject: [PATCH 07/14] Fix/rework async exceptions (#1130) * cleanup imports and logs * add BlockHandle type * revert deps * refactor: async error handling and future tracking improvements - Update async procedures to use explicit raises annotation - Modify TrackedFutures to handle futures with no raised exceptions - Replace `asyncSpawn` with explicit future tracking - Update test suites to use `unittest2` - Standardize error handling across network and async components - Remove deprecated error handling patterns This commit introduces a more robust approach to async error handling and future management, improving type safety and reducing potential runtime errors. * bump nim-serde * remove asyncSpawn * rework background downloads and prefetch * imporove logging * refactor: enhance async procedures with error handling and raise annotations * misc cleanup * misc * refactor: implement allFinishedFailed to aggregate future results with success and failure tracking * refactor: update error handling in reader procedures to raise ChunkerError and CancelledError * refactor: improve error handling in wantListHandler and accountHandler procedures * refactor: simplify LPStreamReadError creation by consolidating parameters * refactor: enhance error handling in AsyncStreamWrapper to catch unexpected errors * refactor: enhance error handling in advertiser and discovery loops to improve resilience * misc * refactor: improve code structure and readability * remove cancellation from addSlotToQueue * refactor: add assertion for unexpected errors in local store checks * refactor: prevent tracking of finished futures and improve test assertions * refactor: improve error handling in local store checks * remove usage of msgDetail * feat: add initial implementation of discovery engine and related components * refactor: improve task scheduling logic by removing unnecessary break statement * break after scheduling a task * make taskHandler cancelable * refactor: update async handlers to raise CancelledError * refactor(advertiser): streamline error handling and improve task flow in advertise loops * fix: correct spelling of "divisible" in error messages and comments * refactor(discovery): simplify discovery task loop and improve error handling * refactor(engine): filter peers before processing in cancelBlocks procedure --- codex/blockexchange/engine/advertiser.nim | 88 ++--- codex/blockexchange/engine/discovery.nim | 63 ++-- codex/blockexchange/engine/engine.nim | 320 ++++++++++-------- codex/blockexchange/engine/payments.nim | 5 +- codex/blockexchange/network/network.nim | 98 +++--- codex/blockexchange/network/networkpeer.nim | 68 ++-- codex/blockexchange/peers/peerctxstore.nim | 7 +- codex/blockexchange/protobuf/payments.nim | 6 +- codex/blockexchange/protobuf/presence.nim | 6 +- codex/chunker.nim | 11 +- codex/codex.nim | 18 +- codex/contracts/clock.nim | 14 +- codex/discovery.nim | 138 +++++--- codex/erasure/erasure.nim | 4 +- codex/errors.nim | 45 +-- codex/node.nim | 91 ++--- codex/rest/api.nim | 11 +- codex/sales.nim | 65 ++-- codex/sales/salesagent.nim | 1 - codex/sales/slotqueue.nim | 22 +- codex/slots/builder/builder.nim | 10 +- codex/slots/proofs/prover.nim | 2 + codex/streams/asyncstreamwrapper.nim | 8 +- codex/streams/storestream.nim | 6 +- codex/utils/asyncstatemachine.nim | 2 - codex/utils/timer.nim | 1 - codex/utils/trackedfutures.nim | 26 +- codex/validation.nim | 1 - tests/asynctest.nim | 4 +- .../blockexchange/discovery/testdiscovery.nim | 68 ++-- .../discovery/testdiscoveryengine.nim | 10 +- .../blockexchange/engine/testadvertiser.nim | 2 +- .../codex/blockexchange/engine/testengine.nim | 61 ++-- .../blockexchange/engine/testpayments.nim | 4 +- .../blockexchange/protobuf/testpayments.nim | 4 +- .../blockexchange/protobuf/testpresence.nim | 2 +- tests/codex/blockexchange/testnetwork.nim | 28 +- .../codex/blockexchange/testpeerctxstore.nim | 6 +- .../codex/blockexchange/testpendingblocks.nim | 2 +- tests/codex/helpers/mockchunker.nim | 2 +- tests/codex/helpers/mockdiscovery.nim | 41 ++- tests/codex/helpers/randomchunker.nim | 2 +- tests/codex/merkletree/generictreetests.nim | 2 +- tests/codex/merkletree/testcodexcoders.nim | 4 +- tests/codex/merkletree/testcodextree.nim | 2 +- tests/codex/merkletree/testmerkledigest.nim | 2 +- tests/codex/merkletree/testposeidon2tree.nim | 2 +- tests/codex/sales/states/testdownloading.nim | 4 +- tests/codex/sales/states/testfilled.nim | 2 +- tests/codex/sales/states/testfilling.nim | 4 +- tests/codex/sales/states/testunknown.nim | 2 +- tests/codex/sales/testsales.nim | 29 +- tests/codex/sales/testslotqueue.nim | 42 ++- tests/codex/slots/testslotbuilder.nim | 4 +- tests/codex/stores/repostore/testcoders.nim | 4 +- tests/codex/stores/testcachestore.nim | 2 +- tests/codex/stores/testkeyutils.nim | 2 +- tests/codex/stores/testmaintenance.nim | 2 +- tests/codex/stores/testrepostore.nim | 2 +- tests/codex/testasyncheapqueue.nim | 2 +- tests/codex/testchunking.nim | 13 +- tests/codex/testclock.nim | 4 +- tests/codex/testlogutils.nim | 3 +- tests/codex/testmanifest.nim | 2 +- tests/codex/testpurchasing.nim | 2 +- tests/codex/testsystemclock.nim | 6 +- tests/codex/utils/testiter.nim | 2 +- tests/codex/utils/testkeyutils.nim | 8 +- tests/codex/utils/testoptions.nim | 9 +- tests/codex/utils/testtrackedfutures.nim | 54 ++- tests/codex/utils/testutils.nim | 2 +- tests/helpers.nim | 32 ++ tests/helpers/trackers.nim | 2 +- vendor/nim-serde | 2 +- 74 files changed, 937 insertions(+), 690 deletions(-) diff --git a/codex/blockexchange/engine/advertiser.nim b/codex/blockexchange/engine/advertiser.nim index f5f28bc1..d094c454 100644 --- a/codex/blockexchange/engine/advertiser.nim +++ b/codex/blockexchange/engine/advertiser.nim @@ -41,80 +41,86 @@ type Advertiser* = ref object of RootObj advertiserRunning*: bool # Indicates if discovery is running concurrentAdvReqs: int # Concurrent advertise requests - advertiseLocalStoreLoop*: Future[void] # Advertise loop task handle + advertiseLocalStoreLoop*: Future[void].Raising([]) # Advertise loop task handle advertiseQueue*: AsyncQueue[Cid] # Advertise queue trackedFutures*: TrackedFutures # Advertise tasks futures advertiseLocalStoreLoopSleep: Duration # Advertise loop sleep inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests -proc addCidToQueue(b: Advertiser, cid: Cid) {.async.} = +proc addCidToQueue(b: Advertiser, cid: Cid) {.async: (raises: [CancelledError]).} = if cid notin b.advertiseQueue: await b.advertiseQueue.put(cid) + trace "Advertising", cid -proc advertiseBlock(b: Advertiser, cid: Cid) {.async.} = +proc advertiseBlock(b: Advertiser, cid: Cid) {.async: (raises: [CancelledError]).} = without isM =? cid.isManifest, err: warn "Unable to determine if cid is manifest" return - if isM: - without blk =? await b.localStore.getBlock(cid), err: - error "Error retrieving manifest block", cid, err = err.msg - return + try: + if isM: + without blk =? await b.localStore.getBlock(cid), err: + error "Error retrieving manifest block", cid, err = err.msg + return - without manifest =? Manifest.decode(blk), err: - error "Unable to decode as manifest", err = err.msg - return + without manifest =? Manifest.decode(blk), err: + error "Unable to decode as manifest", err = err.msg + return - # announce manifest cid and tree cid - await b.addCidToQueue(cid) - await b.addCidToQueue(manifest.treeCid) + # announce manifest cid and tree cid + await b.addCidToQueue(cid) + await b.addCidToQueue(manifest.treeCid) + except CancelledError as exc: + trace "Cancelled advertise block", cid + raise exc + except CatchableError as e: + error "failed to advertise block", cid, error = e.msgDetail proc advertiseLocalStoreLoop(b: Advertiser) {.async: (raises: []).} = - while b.advertiserRunning: - try: - if cids =? await b.localStore.listBlocks(blockType = BlockType.Manifest): - trace "Advertiser begins iterating blocks..." - for c in cids: - if cid =? await c: - await b.advertiseBlock(cid) - trace "Advertiser iterating blocks finished." + try: + while b.advertiserRunning: + try: + if cids =? await b.localStore.listBlocks(blockType = BlockType.Manifest): + trace "Advertiser begins iterating blocks..." + for c in cids: + if cid =? await c: + await b.advertiseBlock(cid) + trace "Advertiser iterating blocks finished." + except CatchableError as e: + error "Error in advertise local store loop", error = e.msgDetail + raiseAssert("Unexpected exception in advertiseLocalStoreLoop") await sleepAsync(b.advertiseLocalStoreLoopSleep) - except CancelledError: - break # do not propagate as advertiseLocalStoreLoop was asyncSpawned - except CatchableError as e: - error "failed to advertise blocks in local store", error = e.msgDetail + except CancelledError: + warn "Cancelled advertise local store loop" info "Exiting advertise task loop" proc processQueueLoop(b: Advertiser) {.async: (raises: []).} = - while b.advertiserRunning: - try: + try: + while b.advertiserRunning: let cid = await b.advertiseQueue.get() if cid in b.inFlightAdvReqs: continue - try: - let request = b.discovery.provide(cid) + let request = b.discovery.provide(cid) + b.inFlightAdvReqs[cid] = request + codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64) - b.inFlightAdvReqs[cid] = request - codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64) - await request - finally: + defer: b.inFlightAdvReqs.del(cid) codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64) - except CancelledError: - trace "Advertise task cancelled" - return - except CatchableError as exc: - warn "Exception in advertise task runner", exc = exc.msg + + await request + except CancelledError: + warn "Cancelled advertise task runner" info "Exiting advertise task runner" -proc start*(b: Advertiser) {.async.} = +proc start*(b: Advertiser) {.async: (raises: []).} = ## Start the advertiser ## @@ -134,13 +140,11 @@ proc start*(b: Advertiser) {.async.} = for i in 0 ..< b.concurrentAdvReqs: let fut = b.processQueueLoop() b.trackedFutures.track(fut) - asyncSpawn fut b.advertiseLocalStoreLoop = advertiseLocalStoreLoop(b) b.trackedFutures.track(b.advertiseLocalStoreLoop) - asyncSpawn b.advertiseLocalStoreLoop -proc stop*(b: Advertiser) {.async.} = +proc stop*(b: Advertiser) {.async: (raises: []).} = ## Stop the advertiser ## diff --git a/codex/blockexchange/engine/discovery.nim b/codex/blockexchange/engine/discovery.nim index c664f212..b32b8555 100644 --- a/codex/blockexchange/engine/discovery.nim +++ b/codex/blockexchange/engine/discovery.nim @@ -48,7 +48,7 @@ type DiscoveryEngine* = ref object of RootObj pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved discEngineRunning*: bool # Indicates if discovery is running concurrentDiscReqs: int # Concurrent discovery requests - discoveryLoop*: Future[void] # Discovery loop task handle + discoveryLoop*: Future[void].Raising([]) # Discovery loop task handle discoveryQueue*: AsyncQueue[Cid] # Discovery queue trackedFutures*: TrackedFutures # Tracked Discovery tasks futures minPeersPerBlock*: int # Max number of peers with block @@ -57,30 +57,21 @@ type DiscoveryEngine* = ref object of RootObj # Inflight discovery requests proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} = - while b.discEngineRunning: - for cid in toSeq(b.pendingBlocks.wantListBlockCids): - try: + try: + while b.discEngineRunning: + for cid in toSeq(b.pendingBlocks.wantListBlockCids): await b.discoveryQueue.put(cid) - except CancelledError: - trace "Discovery loop cancelled" - return - except CatchableError as exc: - warn "Exception in discovery loop", exc = exc.msg - try: - logScope: - sleep = b.discoveryLoopSleep - wanted = b.pendingBlocks.len await sleepAsync(b.discoveryLoopSleep) - except CancelledError: - discard # do not propagate as discoveryQueueLoop was asyncSpawned + except CancelledError: + trace "Discovery loop cancelled" proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} = ## Run discovery tasks ## - while b.discEngineRunning: - try: + try: + while b.discEngineRunning: let cid = await b.discoveryQueue.get() if cid in b.inFlightDiscReqs: @@ -90,35 +81,28 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} = let haves = b.peers.peersHave(cid) if haves.len < b.minPeersPerBlock: - try: - let request = b.discovery.find(cid).wait(DefaultDiscoveryTimeout) + let request = b.discovery.find(cid) + b.inFlightDiscReqs[cid] = request + codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64) - b.inFlightDiscReqs[cid] = request + defer: + b.inFlightDiscReqs.del(cid) codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64) - let peers = await request + if (await request.withTimeout(DefaultDiscoveryTimeout)) and + peers =? (await request).catch: let dialed = await allFinished(peers.mapIt(b.network.dialPeer(it.data))) for i, f in dialed: if f.failed: await b.discovery.removeProvider(peers[i].data.peerId) - finally: - b.inFlightDiscReqs.del(cid) - codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64) - except CancelledError: - trace "Discovery task cancelled" - return - except CatchableError as exc: - warn "Exception in discovery task runner", exc = exc.msg - except Exception as e: - # Raised by b.discovery.removeProvider somehow... - # This should not be catchable, and we should never get here. Therefore, - # raise a Defect. - raiseAssert "Exception when removing provider" + except CancelledError: + trace "Discovery task cancelled" + return info "Exiting discovery task runner" -proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} = +proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) = for cid in cids: if cid notin b.discoveryQueue: try: @@ -126,11 +110,11 @@ proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} = except CatchableError as exc: warn "Exception queueing discovery request", exc = exc.msg -proc start*(b: DiscoveryEngine) {.async.} = +proc start*(b: DiscoveryEngine) {.async: (raises: []).} = ## Start the discengine task ## - trace "Discovery engine start" + trace "Discovery engine starting" if b.discEngineRunning: warn "Starting discovery engine twice" @@ -140,12 +124,13 @@ proc start*(b: DiscoveryEngine) {.async.} = for i in 0 ..< b.concurrentDiscReqs: let fut = b.discoveryTaskLoop() b.trackedFutures.track(fut) - asyncSpawn fut b.discoveryLoop = b.discoveryQueueLoop() b.trackedFutures.track(b.discoveryLoop) -proc stop*(b: DiscoveryEngine) {.async.} = + trace "Discovery engine started" + +proc stop*(b: DiscoveryEngine) {.async: (raises: []).} = ## Stop the discovery engine ## diff --git a/codex/blockexchange/engine/engine.nim b/codex/blockexchange/engine/engine.nim index dafdd520..befb8ae9 100644 --- a/codex/blockexchange/engine/engine.nim +++ b/codex/blockexchange/engine/engine.nim @@ -93,12 +93,15 @@ type price*: UInt256 # attach task scheduler to engine -proc scheduleTask(self: BlockExcEngine, task: BlockExcPeerCtx): bool {.gcsafe.} = - self.taskQueue.pushOrUpdateNoWait(task).isOk() +proc scheduleTask(self: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, raises: [].} = + if self.taskQueue.pushOrUpdateNoWait(task).isOk(): + trace "Task scheduled for peer", peer = task.id + else: + warn "Unable to schedule task for peer", peer = task.id proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} -proc start*(self: BlockExcEngine) {.async.} = +proc start*(self: BlockExcEngine) {.async: (raises: []).} = ## Start the blockexc task ## @@ -115,7 +118,7 @@ proc start*(self: BlockExcEngine) {.async.} = let fut = self.blockexcTaskRunner() self.trackedFutures.track(fut) -proc stop*(self: BlockExcEngine) {.async.} = +proc stop*(self: BlockExcEngine) {.async: (raises: []).} = ## Stop the blockexc blockexc ## @@ -135,7 +138,7 @@ proc stop*(self: BlockExcEngine) {.async.} = proc sendWantHave( self: BlockExcEngine, addresses: seq[BlockAddress], peers: seq[BlockExcPeerCtx] -): Future[void] {.async.} = +): Future[void] {.async: (raises: [CancelledError]).} = for p in peers: let toAsk = addresses.filterIt(it notin p.peerHave) trace "Sending wantHave request", toAsk, peer = p.id @@ -144,7 +147,7 @@ proc sendWantHave( proc sendWantBlock( self: BlockExcEngine, addresses: seq[BlockAddress], blockPeer: BlockExcPeerCtx -): Future[void] {.async.} = +): Future[void] {.async: (raises: [CancelledError]).} = trace "Sending wantBlock request to", addresses, peer = blockPeer.id await self.network.request.sendWantList( blockPeer.id, addresses, wantType = WantType.WantBlock @@ -229,7 +232,7 @@ proc requestBlock*( proc blockPresenceHandler*( self: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence] -) {.async.} = +) {.async: (raises: []).} = trace "Received block presence from peer", peer, blocks = blocks.mapIt($it) let peerCtx = self.peers.get(peer) @@ -249,20 +252,23 @@ proc blockPresenceHandler*( if dontWantCids.len > 0: peerCtx.cleanPresence(dontWantCids) - let ourWantCids = ourWantList.filter do(address: BlockAddress) -> bool: - if address in peerHave and not self.pendingBlocks.retriesExhausted(address) and - not self.pendingBlocks.isInFlight(address): - self.pendingBlocks.setInFlight(address, true) - self.pendingBlocks.decRetries(address) - true - else: - false + let ourWantCids = ourWantList.filterIt( + it in peerHave and not self.pendingBlocks.retriesExhausted(it) and + not self.pendingBlocks.isInFlight(it) + ) + + for address in ourWantCids: + self.pendingBlocks.setInFlight(address, true) + self.pendingBlocks.decRetries(address) if ourWantCids.len > 0: trace "Peer has blocks in our wantList", peer, wants = ourWantCids - await self.sendWantBlock(ourWantCids, peerCtx) + if err =? catch(await self.sendWantBlock(ourWantCids, peerCtx)).errorOption: + warn "Failed to send wantBlock to peer", peer, err = err.msg -proc scheduleTasks(self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} = +proc scheduleTasks( + self: BlockExcEngine, blocksDelivery: seq[BlockDelivery] +) {.async: (raises: [CancelledError]).} = let cids = blocksDelivery.mapIt(it.blk.cid) # schedule any new peers to provide blocks to @@ -271,15 +277,21 @@ proc scheduleTasks(self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.a # schedule a peer if it wants at least one cid # and we have it in our local store if c in p.peerWantsCids: - if await (c in self.localStore): - if self.scheduleTask(p): - trace "Task scheduled for peer", peer = p.id - else: - warn "Unable to schedule task for peer", peer = p.id + try: + if await (c in self.localStore): + # TODO: the try/except should go away once blockstore tracks exceptions + self.scheduleTask(p) + break + except CancelledError as exc: + warn "Checking local store canceled", cid = c, err = exc.msg + return + except CatchableError as exc: + error "Error checking local store for cid", cid = c, err = exc.msg + raiseAssert "Unexpected error checking local store for cid" - break # do next peer - -proc cancelBlocks(self: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} = +proc cancelBlocks( + self: BlockExcEngine, addrs: seq[BlockAddress] +) {.async: (raises: [CancelledError]).} = ## Tells neighboring peers that we're no longer interested in a block. ## @@ -289,35 +301,43 @@ proc cancelBlocks(self: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} = trace "Sending block request cancellations to peers", addrs, peers = self.peers.peerIds - proc mapPeers(peerCtx: BlockExcPeerCtx): Future[BlockExcPeerCtx] {.async.} = - let blocks = addrs.filter do(a: BlockAddress) -> bool: - a in peerCtx.blocks + proc processPeer(peerCtx: BlockExcPeerCtx): Future[BlockExcPeerCtx] {.async.} = + await self.network.request.sendWantCancellations( + peer = peerCtx.id, addresses = addrs.filterIt(it in peerCtx) + ) - if blocks.len > 0: - trace "Sending block request cancellations to peer", peer = peerCtx.id, blocks - await self.network.request.sendWantCancellations( - peer = peerCtx.id, addresses = blocks + return peerCtx + + try: + let (succeededFuts, failedFuts) = await allFinishedFailed( + toSeq(self.peers.peers.values).filterIt(it.peerHave.anyIt(it in addrs)).map( + processPeer ) + ) + + (await allFinished(succeededFuts)).mapIt(it.read).apply do(peerCtx: BlockExcPeerCtx): peerCtx.cleanPresence(addrs) - peerCtx - let failed = (await allFinished(map(toSeq(self.peers.peers.values), mapPeers))).filterIt( - it.failed - ) - - if failed.len > 0: - warn "Failed to send block request cancellations to peers", peers = failed.len - else: - trace "Block request cancellations sent to peers", peers = self.peers.len + if failedFuts.len > 0: + warn "Failed to send block request cancellations to peers", peers = failedFuts.len + else: + trace "Block request cancellations sent to peers", peers = self.peers.len + except CancelledError as exc: + warn "Error sending block request cancellations", error = exc.msg + raise exc + except CatchableError as exc: + warn "Error sending block request cancellations", error = exc.msg proc resolveBlocks*( self: BlockExcEngine, blocksDelivery: seq[BlockDelivery] -) {.async.} = +) {.async: (raises: [CancelledError]).} = self.pendingBlocks.resolve(blocksDelivery) await self.scheduleTasks(blocksDelivery) await self.cancelBlocks(blocksDelivery.mapIt(it.address)) -proc resolveBlocks*(self: BlockExcEngine, blocks: seq[Block]) {.async.} = +proc resolveBlocks*( + self: BlockExcEngine, blocks: seq[Block] +) {.async: (raises: [CancelledError]).} = await self.resolveBlocks( blocks.mapIt( BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid)) @@ -326,7 +346,7 @@ proc resolveBlocks*(self: BlockExcEngine, blocks: seq[Block]) {.async.} = proc payForBlocks( self: BlockExcEngine, peer: BlockExcPeerCtx, blocksDelivery: seq[BlockDelivery] -) {.async.} = +) {.async: (raises: [CancelledError]).} = let sendPayment = self.network.request.sendPayment price = peer.price(blocksDelivery.mapIt(it.address)) @@ -367,7 +387,7 @@ proc validateBlockDelivery(self: BlockExcEngine, bd: BlockDelivery): ?!void = proc blocksDeliveryHandler*( self: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery] -) {.async.} = +) {.async: (raises: []).} = trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt(it.address)) var validatedBlocksDelivery: seq[BlockDelivery] @@ -376,41 +396,47 @@ proc blocksDeliveryHandler*( peer = peer address = bd.address - if err =? self.validateBlockDelivery(bd).errorOption: - warn "Block validation failed", msg = err.msg - continue - - if err =? (await self.localStore.putBlock(bd.blk)).errorOption: - error "Unable to store block", err = err.msg - continue - - if bd.address.leaf: - without proof =? bd.proof: - error "Proof expected for a leaf block delivery" + try: + if err =? self.validateBlockDelivery(bd).errorOption: + warn "Block validation failed", msg = err.msg continue - if err =? ( - await self.localStore.putCidAndProof( - bd.address.treeCid, bd.address.index, bd.blk.cid, proof - ) - ).errorOption: - error "Unable to store proof and cid for a block" + + if err =? (await self.localStore.putBlock(bd.blk)).errorOption: + error "Unable to store block", err = err.msg continue + if bd.address.leaf: + without proof =? bd.proof: + warn "Proof expected for a leaf block delivery" + continue + if err =? ( + await self.localStore.putCidAndProof( + bd.address.treeCid, bd.address.index, bd.blk.cid, proof + ) + ).errorOption: + warn "Unable to store proof and cid for a block" + continue + except CatchableError as exc: + warn "Error handling block delivery", error = exc.msg + continue + validatedBlocksDelivery.add(bd) - await self.resolveBlocks(validatedBlocksDelivery) codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64) let peerCtx = self.peers.get(peer) - if peerCtx != nil: - await self.payForBlocks(peerCtx, blocksDelivery) - ## shouldn't we remove them from the want-list instead of this: - peerCtx.cleanPresence(blocksDelivery.mapIt(it.address)) + if err =? catch(await self.payForBlocks(peerCtx, blocksDelivery)).errorOption: + warn "Error paying for blocks", err = err.msg + return + + if err =? catch(await self.resolveBlocks(validatedBlocksDelivery)).errorOption: + warn "Error resolving blocks", err = err.msg + return proc wantListHandler*( self: BlockExcEngine, peer: PeerId, wantList: WantList -) {.async.} = +) {.async: (raises: []).} = trace "Received want list from peer", peer, wantList = wantList.entries.len let peerCtx = self.peers.get(peer) @@ -422,68 +448,81 @@ proc wantListHandler*( presence: seq[BlockPresence] schedulePeer = false - for e in wantList.entries: - let idx = peerCtx.peerWants.findIt(it.address == e.address) + try: + for e in wantList.entries: + let idx = peerCtx.peerWants.findIt(it.address == e.address) - logScope: - peer = peerCtx.id - address = e.address - wantType = $e.wantType + logScope: + peer = peerCtx.id + address = e.address + wantType = $e.wantType - if idx < 0: # Adding new entry to peer wants - let - have = await e.address in self.localStore - price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE) + if idx < 0: # Adding new entry to peer wants + let + have = + try: + await e.address in self.localStore + except CatchableError as exc: + # TODO: should not be necessary once we have proper exception tracking on the BlockStore interface + false + price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE) - if e.cancel: - trace "Received cancelation for untracked block, skipping", address = e.address - continue + if e.cancel: + trace "Received cancelation for untracked block, skipping", + address = e.address + continue - trace "Processing want list entry", wantList = $e - case e.wantType - of WantType.WantHave: - if have: - presence.add( - BlockPresence( - address: e.address, `type`: BlockPresenceType.Have, price: price - ) - ) - else: - if e.sendDontHave: + trace "Processing want list entry", wantList = $e + case e.wantType + of WantType.WantHave: + if have: presence.add( BlockPresence( - address: e.address, `type`: BlockPresenceType.DontHave, price: price + address: e.address, `type`: BlockPresenceType.Have, price: price ) ) + else: + if e.sendDontHave: + presence.add( + BlockPresence( + address: e.address, `type`: BlockPresenceType.DontHave, price: price + ) + ) - codex_block_exchange_want_have_lists_received.inc() - of WantType.WantBlock: - peerCtx.peerWants.add(e) - schedulePeer = true - codex_block_exchange_want_block_lists_received.inc() - else: # Updating existing entry in peer wants - # peer doesn't want this block anymore - if e.cancel: - trace "Canceling want for block", address = e.address - peerCtx.peerWants.del(idx) - trace "Canceled block request", address = e.address, len = peerCtx.peerWants.len - else: - if e.wantType == WantType.WantBlock: + codex_block_exchange_want_have_lists_received.inc() + of WantType.WantBlock: + peerCtx.peerWants.add(e) schedulePeer = true - # peer might want to ask for the same cid with - # different want params - trace "Updating want for block", address = e.address - peerCtx.peerWants[idx] = e # update entry - trace "Updated block request", address = e.address, len = peerCtx.peerWants.len + codex_block_exchange_want_block_lists_received.inc() + else: # Updating existing entry in peer wants + # peer doesn't want this block anymore + if e.cancel: + trace "Canceling want for block", address = e.address + peerCtx.peerWants.del(idx) + trace "Canceled block request", + address = e.address, len = peerCtx.peerWants.len + else: + if e.wantType == WantType.WantBlock: + schedulePeer = true + # peer might want to ask for the same cid with + # different want params + trace "Updating want for block", address = e.address + peerCtx.peerWants[idx] = e # update entry + trace "Updated block request", + address = e.address, len = peerCtx.peerWants.len - if presence.len > 0: - trace "Sending presence to remote", items = presence.mapIt($it).join(",") - await self.network.request.sendPresence(peer, presence) + if presence.len > 0: + trace "Sending presence to remote", items = presence.mapIt($it).join(",") + await self.network.request.sendPresence(peer, presence) - if schedulePeer and not self.scheduleTask(peerCtx): - warn "Unable to schedule task for peer", peer + if schedulePeer: + self.scheduleTask(peerCtx) + except CancelledError as exc: #TODO: replace with CancelledError + warn "Error processing want list", error = exc.msg -proc accountHandler*(self: BlockExcEngine, peer: PeerId, account: Account) {.async.} = +proc accountHandler*( + self: BlockExcEngine, peer: PeerId, account: Account +) {.async: (raises: []).} = let context = self.peers.get(peer) if context.isNil: return @@ -492,7 +531,7 @@ proc accountHandler*(self: BlockExcEngine, peer: PeerId, account: Account) {.asy proc paymentHandler*( self: BlockExcEngine, peer: PeerId, payment: SignedState -) {.async.} = +) {.async: (raises: []).} = trace "Handling payments", peer without context =? self.peers.get(peer).option and account =? context.account: @@ -505,7 +544,9 @@ proc paymentHandler*( else: context.paymentChannel = self.wallet.acceptChannel(payment).option -proc setupPeer*(self: BlockExcEngine, peer: PeerId) {.async.} = +proc setupPeer*( + self: BlockExcEngine, peer: PeerId +) {.async: (raises: [CancelledError]).} = ## Perform initial setup, such as want ## list exchange ## @@ -524,9 +565,10 @@ proc setupPeer*(self: BlockExcEngine, peer: PeerId) {.async.} = await self.network.request.sendWantList(peer, cids, full = true) if address =? self.pricing .? address: + trace "Sending account to peer", peer await self.network.request.sendAccount(peer, Account(address: address)) -proc dropPeer*(self: BlockExcEngine, peer: PeerId) = +proc dropPeer*(self: BlockExcEngine, peer: PeerId) {.raises: [].} = ## Cleanup disconnected peer ## @@ -535,7 +577,9 @@ proc dropPeer*(self: BlockExcEngine, peer: PeerId) = # drop the peer from the peers table self.peers.remove(peer) -proc taskHandler*(self: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} = +proc taskHandler*( + self: BlockExcEngine, task: BlockExcPeerCtx +) {.gcsafe, async: (raises: [CancelledError, RetriesExhaustedError]).} = # Send to the peer blocks he wants to get, # if they present in our local store @@ -572,8 +616,11 @@ proc taskHandler*(self: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} let blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup)) - blocksDelivery = - blocksDeliveryFut.filterIt(it.completed and it.read.isOk).mapIt(it.read.get) + blocksDelivery = blocksDeliveryFut.filterIt(it.completed and it.value.isOk).mapIt: + if bd =? it.value: + bd + else: + raiseAssert "Unexpected error in local lookup" # All the wants that failed local lookup must be set to not-in-flight again. let @@ -595,15 +642,12 @@ proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} = ## trace "Starting blockexc task runner" - while self.blockexcRunning: - try: + try: + while self.blockexcRunning: let peerCtx = await self.taskQueue.pop() - await self.taskHandler(peerCtx) - except CancelledError: - break # do not propagate as blockexcTaskRunner was asyncSpawned - except CatchableError as e: - error "error running block exchange task", error = e.msgDetail + except CatchableError as exc: + error "error running block exchange task", error = exc.msg info "Exiting blockexc task runner" @@ -644,23 +688,29 @@ proc new*( network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined) network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left) - proc blockWantListHandler(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.} = + proc blockWantListHandler( + peer: PeerId, wantList: WantList + ): Future[void] {.async: (raises: []).} = self.wantListHandler(peer, wantList) proc blockPresenceHandler( peer: PeerId, presence: seq[BlockPresence] - ): Future[void] {.gcsafe.} = + ): Future[void] {.async: (raises: []).} = self.blockPresenceHandler(peer, presence) proc blocksDeliveryHandler( peer: PeerId, blocksDelivery: seq[BlockDelivery] - ): Future[void] {.gcsafe.} = + ): Future[void] {.async: (raises: []).} = self.blocksDeliveryHandler(peer, blocksDelivery) - proc accountHandler(peer: PeerId, account: Account): Future[void] {.gcsafe.} = + proc accountHandler( + peer: PeerId, account: Account + ): Future[void] {.async: (raises: []).} = self.accountHandler(peer, account) - proc paymentHandler(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} = + proc paymentHandler( + peer: PeerId, payment: SignedState + ): Future[void] {.async: (raises: []).} = self.paymentHandler(peer, payment) network.handlers = BlockExcHandlers( diff --git a/codex/blockexchange/engine/payments.nim b/codex/blockexchange/engine/payments.nim index 88953976..260a3005 100644 --- a/codex/blockexchange/engine/payments.nim +++ b/codex/blockexchange/engine/payments.nim @@ -7,6 +7,8 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. +{.push raises: [].} + import std/math import pkg/nitro import pkg/questionable/results @@ -15,9 +17,6 @@ import ../peers export nitro export results -push: - {.upraises: [].} - const ChainId* = 0.u256 # invalid chain id for now const Asset* = EthAddress.zero # invalid ERC20 asset address for now const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals diff --git a/codex/blockexchange/network/network.nim b/codex/blockexchange/network/network.nim index daf358de..26c07445 100644 --- a/codex/blockexchange/network/network.nim +++ b/codex/blockexchange/network/network.nim @@ -35,13 +35,15 @@ const DefaultMaxInflight* = 100 type - WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.} + WantListHandler* = + proc(peer: PeerId, wantList: WantList) {.gcsafe, async: (raises: []).} BlocksDeliveryHandler* = - proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.} + proc(peer: PeerId, blocks: seq[BlockDelivery]) {.gcsafe, async: (raises: []).} BlockPresenceHandler* = - proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.} - AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.} - PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} + proc(peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async: (raises: []).} + AccountHandler* = proc(peer: PeerId, account: Account) {.gcsafe, async: (raises: []).} + PaymentHandler* = + proc(peer: PeerId, payment: SignedState) {.gcsafe, async: (raises: []).} BlockExcHandlers* = object onWantList*: WantListHandler @@ -58,15 +60,20 @@ type wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false, - ): Future[void] {.gcsafe.} - WantCancellationSender* = - proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.} - BlocksDeliverySender* = - proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} - PresenceSender* = - proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} - AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.} - PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} + ) {.async: (raises: [CancelledError]).} + WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]) {. + async: (raises: [CancelledError]) + .} + BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {. + async: (raises: [CancelledError]) + .} + PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]) {. + async: (raises: [CancelledError]) + .} + AccountSender* = + proc(peer: PeerId, account: Account) {.async: (raises: [CancelledError]).} + PaymentSender* = + proc(peer: PeerId, payment: SignedState) {.async: (raises: [CancelledError]).} BlockExcRequest* = object sendWantList*: WantListSender @@ -98,7 +105,9 @@ proc isSelf*(b: BlockExcNetwork, peer: PeerId): bool = return b.peerId == peer -proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} = +proc send*( + b: BlockExcNetwork, id: PeerId, msg: pb.Message +) {.async: (raises: [CancelledError]).} = ## Send message to peer ## @@ -106,8 +115,9 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} = trace "Unable to send, peer not found", peerId = id return - let peer = b.peers[id] try: + let peer = b.peers[id] + await b.inflightSema.acquire() await peer.send(msg) except CancelledError as error: @@ -117,7 +127,9 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} = finally: b.inflightSema.release() -proc handleWantList(b: BlockExcNetwork, peer: NetworkPeer, list: WantList) {.async.} = +proc handleWantList( + b: BlockExcNetwork, peer: NetworkPeer, list: WantList +) {.async: (raises: []).} = ## Handle incoming want list ## @@ -133,7 +145,7 @@ proc sendWantList*( wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false, -): Future[void] = +) {.async: (raw: true, raises: [CancelledError]).} = ## Send a want message to peer ## @@ -154,14 +166,14 @@ proc sendWantList*( proc sendWantCancellations*( b: BlockExcNetwork, id: PeerId, addresses: seq[BlockAddress] -): Future[void] {.async.} = +): Future[void] {.async: (raises: [CancelledError]).} = ## Informs a remote peer that we're no longer interested in a set of blocks ## await b.sendWantList(id = id, addresses = addresses, cancel = true) proc handleBlocksDelivery( b: BlockExcNetwork, peer: NetworkPeer, blocksDelivery: seq[BlockDelivery] -) {.async.} = +) {.async: (raises: []).} = ## Handle incoming blocks ## @@ -170,7 +182,7 @@ proc handleBlocksDelivery( proc sendBlocksDelivery*( b: BlockExcNetwork, id: PeerId, blocksDelivery: seq[BlockDelivery] -): Future[void] = +) {.async: (raw: true, raises: [CancelledError]).} = ## Send blocks to remote ## @@ -178,7 +190,7 @@ proc sendBlocksDelivery*( proc handleBlockPresence( b: BlockExcNetwork, peer: NetworkPeer, presence: seq[BlockPresence] -) {.async.} = +) {.async: (raises: []).} = ## Handle block presence ## @@ -187,7 +199,7 @@ proc handleBlockPresence( proc sendBlockPresence*( b: BlockExcNetwork, id: PeerId, presence: seq[BlockPresence] -): Future[void] = +) {.async: (raw: true, raises: [CancelledError]).} = ## Send presence to remote ## @@ -195,20 +207,24 @@ proc sendBlockPresence*( proc handleAccount( network: BlockExcNetwork, peer: NetworkPeer, account: Account -) {.async.} = +) {.async: (raises: []).} = ## Handle account info ## if not network.handlers.onAccount.isNil: await network.handlers.onAccount(peer.id, account) -proc sendAccount*(b: BlockExcNetwork, id: PeerId, account: Account): Future[void] = +proc sendAccount*( + b: BlockExcNetwork, id: PeerId, account: Account +) {.async: (raw: true, raises: [CancelledError]).} = ## Send account info to remote ## b.send(id, Message(account: AccountMessage.init(account))) -proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[void] = +proc sendPayment*( + b: BlockExcNetwork, id: PeerId, payment: SignedState +) {.async: (raw: true, raises: [CancelledError]).} = ## Send payment to remote ## @@ -216,7 +232,7 @@ proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[ proc handlePayment( network: BlockExcNetwork, peer: NetworkPeer, payment: SignedState -) {.async.} = +) {.async: (raises: []).} = ## Handle payment ## @@ -225,7 +241,7 @@ proc handlePayment( proc rpcHandler( b: BlockExcNetwork, peer: NetworkPeer, msg: Message -) {.async: (raises: [CatchableError]).} = +) {.async: (raises: []).} = ## handle rpc messages ## if msg.wantList.entries.len > 0: @@ -250,7 +266,9 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer = if peer in b.peers: return b.peers.getOrDefault(peer, nil) - var getConn: ConnProvider = proc(): Future[Connection] {.async, gcsafe, closure.} = + var getConn: ConnProvider = proc(): Future[Connection] {. + async: (raises: [CancelledError]) + .} = try: trace "Getting new connection stream", peer return await b.switch.dial(peer, Codec) @@ -262,9 +280,7 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer = if not isNil(b.getConn): getConn = b.getConn - let rpcHandler = proc( - p: NetworkPeer, msg: Message - ) {.async: (raises: [CatchableError]).} = + let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async: (raises: []).} = await b.rpcHandler(p, msg) # create new pubsub peer @@ -353,26 +369,32 @@ proc new*( wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false, - ): Future[void] {.gcsafe.} = + ): Future[void] {.async: (raw: true, raises: [CancelledError]).} = self.sendWantList(id, cids, priority, cancel, wantType, full, sendDontHave) proc sendWantCancellations( id: PeerId, addresses: seq[BlockAddress] - ): Future[void] {.gcsafe.} = + ): Future[void] {.async: (raw: true, raises: [CancelledError]).} = self.sendWantCancellations(id, addresses) proc sendBlocksDelivery( id: PeerId, blocksDelivery: seq[BlockDelivery] - ): Future[void] {.gcsafe.} = + ): Future[void] {.async: (raw: true, raises: [CancelledError]).} = self.sendBlocksDelivery(id, blocksDelivery) - proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} = + proc sendPresence( + id: PeerId, presence: seq[BlockPresence] + ): Future[void] {.async: (raw: true, raises: [CancelledError]).} = self.sendBlockPresence(id, presence) - proc sendAccount(id: PeerId, account: Account): Future[void] {.gcsafe.} = + proc sendAccount( + id: PeerId, account: Account + ): Future[void] {.async: (raw: true, raises: [CancelledError]).} = self.sendAccount(id, account) - proc sendPayment(id: PeerId, payment: SignedState): Future[void] {.gcsafe.} = + proc sendPayment( + id: PeerId, payment: SignedState + ): Future[void] {.async: (raw: true, raises: [CancelledError]).} = self.sendPayment(id, payment) self.request = BlockExcRequest( diff --git a/codex/blockexchange/network/networkpeer.nim b/codex/blockexchange/network/networkpeer.nim index 4a100340..66c39294 100644 --- a/codex/blockexchange/network/networkpeer.nim +++ b/codex/blockexchange/network/networkpeer.nim @@ -7,9 +7,7 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. -import pkg/upraises -push: - {.upraises: [].} +{.push raises: [].} import pkg/chronos import pkg/libp2p @@ -18,6 +16,7 @@ import ../protobuf/blockexc import ../protobuf/message import ../../errors import ../../logutils +import ../../utils/trackedfutures logScope: topics = "codex blockexcnetworkpeer" @@ -25,11 +24,10 @@ logScope: const DefaultYieldInterval = 50.millis type - ConnProvider* = proc(): Future[Connection] {.gcsafe, closure.} + ConnProvider* = + proc(): Future[Connection] {.gcsafe, async: (raises: [CancelledError]).} - RPCHandler* = proc( - peer: NetworkPeer, msg: Message - ): Future[void].Raising(CatchableError) {.gcsafe.} + RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.gcsafe, async: (raises: []).} NetworkPeer* = ref object of RootObj id*: PeerId @@ -37,55 +35,60 @@ type sendConn: Connection getConn: ConnProvider yieldInterval*: Duration = DefaultYieldInterval + trackedFutures: TrackedFutures -proc connected*(b: NetworkPeer): bool = - not (isNil(b.sendConn)) and not (b.sendConn.closed or b.sendConn.atEof) +proc connected*(self: NetworkPeer): bool = + not (isNil(self.sendConn)) and not (self.sendConn.closed or self.sendConn.atEof) -proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} = +proc readLoop*(self: NetworkPeer, conn: Connection) {.async: (raises: []).} = if isNil(conn): - trace "No connection to read from", peer = b.id + trace "No connection to read from", peer = self.id return - trace "Attaching read loop", peer = b.id, connId = conn.oid + trace "Attaching read loop", peer = self.id, connId = conn.oid try: - var nextYield = Moment.now() + b.yieldInterval + var nextYield = Moment.now() + self.yieldInterval while not conn.atEof or not conn.closed: if Moment.now() > nextYield: - nextYield = Moment.now() + b.yieldInterval + nextYield = Moment.now() + self.yieldInterval trace "Yielding in read loop", - peer = b.id, nextYield = nextYield, interval = b.yieldInterval + peer = self.id, nextYield = nextYield, interval = self.yieldInterval await sleepAsync(10.millis) let data = await conn.readLp(MaxMessageSize.int) msg = Message.protobufDecode(data).mapFailure().tryGet() - trace "Received message", peer = b.id, connId = conn.oid - await b.handler(b, msg) + trace "Received message", peer = self.id, connId = conn.oid + await self.handler(self, msg) except CancelledError: trace "Read loop cancelled" except CatchableError as err: warn "Exception in blockexc read loop", msg = err.msg finally: - trace "Detaching read loop", peer = b.id, connId = conn.oid + trace "Detaching read loop", peer = self.id, connId = conn.oid await conn.close() -proc connect*(b: NetworkPeer): Future[Connection] {.async.} = - if b.connected: - trace "Already connected", peer = b.id, connId = b.sendConn.oid - return b.sendConn +proc connect*( + self: NetworkPeer +): Future[Connection] {.async: (raises: [CancelledError]).} = + if self.connected: + trace "Already connected", peer = self.id, connId = self.sendConn.oid + return self.sendConn - b.sendConn = await b.getConn() - asyncSpawn b.readLoop(b.sendConn) - return b.sendConn + self.sendConn = await self.getConn() + self.trackedFutures.track(self.readLoop(self.sendConn)) + return self.sendConn -proc send*(b: NetworkPeer, msg: Message) {.async.} = - let conn = await b.connect() +proc send*( + self: NetworkPeer, msg: Message +) {.async: (raises: [CancelledError, LPStreamError]).} = + let conn = await self.connect() if isNil(conn): - warn "Unable to get send connection for peer message not sent", peer = b.id + warn "Unable to get send connection for peer message not sent", peer = self.id return - trace "Sending message", peer = b.id, connId = conn.oid + trace "Sending message", peer = self.id, connId = conn.oid await conn.writeLp(protobufEncode(msg)) func new*( @@ -96,4 +99,9 @@ func new*( ): NetworkPeer = doAssert(not isNil(connProvider), "should supply connection provider") - NetworkPeer(id: peer, getConn: connProvider, handler: rpcHandler) + NetworkPeer( + id: peer, + getConn: connProvider, + handler: rpcHandler, + trackedFutures: TrackedFutures(), + ) diff --git a/codex/blockexchange/peers/peerctxstore.nim b/codex/blockexchange/peers/peerctxstore.nim index 739d92b5..ce2506a8 100644 --- a/codex/blockexchange/peers/peerctxstore.nim +++ b/codex/blockexchange/peers/peerctxstore.nim @@ -7,16 +7,13 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. +{.push raises: [].} + import std/sequtils import std/tables import std/algorithm import std/sequtils -import pkg/upraises - -push: - {.upraises: [].} - import pkg/chronos import pkg/libp2p diff --git a/codex/blockexchange/protobuf/payments.nim b/codex/blockexchange/protobuf/payments.nim index 5d010a81..885562c4 100644 --- a/codex/blockexchange/protobuf/payments.nim +++ b/codex/blockexchange/protobuf/payments.nim @@ -1,8 +1,9 @@ +{.push raises: [].} + import pkg/stew/byteutils import pkg/stint import pkg/nitro import pkg/questionable -import pkg/upraises import ./blockexc export AccountMessage @@ -11,9 +12,6 @@ export StateChannelUpdate export stint export nitro -push: - {.upraises: [].} - type Account* = object address*: EthAddress diff --git a/codex/blockexchange/protobuf/presence.nim b/codex/blockexchange/protobuf/presence.nim index d941746d..3b24a570 100644 --- a/codex/blockexchange/protobuf/presence.nim +++ b/codex/blockexchange/protobuf/presence.nim @@ -1,8 +1,9 @@ +{.push raises: [].} + import libp2p import pkg/stint import pkg/questionable import pkg/questionable/results -import pkg/upraises import ./blockexc import ../../blocktype @@ -11,9 +12,6 @@ export questionable export stint export BlockPresenceType -upraises.push: - {.upraises: [].} - type PresenceMessage* = blockexc.BlockPresence Presence* = object diff --git a/codex/chunker.nim b/codex/chunker.nim index f735aa4b..908dd0c0 100644 --- a/codex/chunker.nim +++ b/codex/chunker.nim @@ -28,8 +28,11 @@ const DefaultChunkSize* = DefaultBlockSize type # default reader type + ChunkerError* = object of CatchableError ChunkBuffer* = ptr UncheckedArray[byte] - Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.gcsafe, raises: [Defect].} + Reader* = proc(data: ChunkBuffer, len: int): Future[int] {. + gcsafe, async: (raises: [ChunkerError, CancelledError]) + .} # Reader that splits input data into fixed-size chunks Chunker* = ref object @@ -74,7 +77,7 @@ proc new*( proc reader( data: ChunkBuffer, len: int - ): Future[int] {.gcsafe, async, raises: [Defect].} = + ): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} = var res = 0 try: while res < len: @@ -85,7 +88,7 @@ proc new*( raise error except LPStreamError as error: error "LPStream error", err = error.msg - raise error + raise newException(ChunkerError, "LPStream error", error) except CatchableError as exc: error "CatchableError exception", exc = exc.msg raise newException(Defect, exc.msg) @@ -102,7 +105,7 @@ proc new*( proc reader( data: ChunkBuffer, len: int - ): Future[int] {.gcsafe, async, raises: [Defect].} = + ): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} = var total = 0 try: while total < len: diff --git a/codex/codex.nim b/codex/codex.nim index 8a03510c..391a94fc 100644 --- a/codex/codex.nim +++ b/codex/codex.nim @@ -177,14 +177,20 @@ proc start*(s: CodexServer) {.async.} = proc stop*(s: CodexServer) {.async.} = notice "Stopping codex node" - await allFuturesThrowing( - s.restServer.stop(), - s.codexNode.switch.stop(), - s.codexNode.stop(), - s.repoStore.stop(), - s.maintenance.stop(), + let res = await noCancel allFinishedFailed( + @[ + s.restServer.stop(), + s.codexNode.switch.stop(), + s.codexNode.stop(), + s.repoStore.stop(), + s.maintenance.stop(), + ] ) + if res.failure.len > 0: + error "Failed to stop codex node", failures = res.failure.len + raiseAssert "Failed to stop codex node" + proc new*( T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey ): CodexServer = diff --git a/codex/contracts/clock.nim b/codex/contracts/clock.nim index b5bf7ebb..b7863539 100644 --- a/codex/contracts/clock.nim +++ b/codex/contracts/clock.nim @@ -5,6 +5,7 @@ import pkg/chronos import pkg/stint import ../clock import ../conf +import ../utils/trackedfutures export clock @@ -18,9 +19,12 @@ type OnChainClock* = ref object of Clock blockNumber: UInt256 started: bool newBlock: AsyncEvent + trackedFutures: TrackedFutures proc new*(_: type OnChainClock, provider: Provider): OnChainClock = - OnChainClock(provider: provider, newBlock: newAsyncEvent()) + OnChainClock( + provider: provider, newBlock: newAsyncEvent(), trackedFutures: TrackedFutures() + ) proc update(clock: OnChainClock, blck: Block) = if number =? blck.number and number > clock.blockNumber: @@ -32,15 +36,12 @@ proc update(clock: OnChainClock, blck: Block) = blockTime = blck.timestamp, blockNumber = number, offset = clock.offset clock.newBlock.fire() -proc update(clock: OnChainClock) {.async.} = +proc update(clock: OnChainClock) {.async: (raises: []).} = try: if latest =? (await clock.provider.getBlock(BlockTag.latest)): clock.update(latest) - except CancelledError as error: - raise error except CatchableError as error: debug "error updating clock: ", error = error.msg - discard method start*(clock: OnChainClock) {.async.} = if clock.started: @@ -52,7 +53,7 @@ method start*(clock: OnChainClock) {.async.} = return # ignore block parameter; hardhat may call this with pending blocks - asyncSpawn clock.update() + clock.trackedFutures.track(clock.update()) await clock.update() @@ -64,6 +65,7 @@ method stop*(clock: OnChainClock) {.async.} = return await clock.subscription.unsubscribe() + await clock.trackedFutures.cancelTracked() clock.started = false method now*(clock: OnChainClock): SecondsSince1970 = diff --git a/codex/discovery.nim b/codex/discovery.nim index 9aa8c7d8..eed1f89b 100644 --- a/codex/discovery.nim +++ b/codex/discovery.nim @@ -7,6 +7,8 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. +{.push raises: [].} + import std/algorithm import std/sequtils @@ -54,70 +56,122 @@ proc toNodeId*(host: ca.Address): NodeId = readUintBE[256](keccak256.digest(host.toArray).data) -proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} = +proc findPeer*( + d: Discovery, peerId: PeerId +): Future[?PeerRecord] {.async: (raises: [CancelledError]).} = trace "protocol.resolve..." ## Find peer using the given Discovery object ## - let node = await d.protocol.resolve(toNodeId(peerId)) - return - if node.isSome(): - node.get().record.data.some - else: - PeerRecord.none + try: + let node = await d.protocol.resolve(toNodeId(peerId)) -method find*(d: Discovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} = + return + if node.isSome(): + node.get().record.data.some + else: + PeerRecord.none + except CancelledError as exc: + warn "Error finding peer", peerId = peerId, exc = exc.msg + raise exc + except CatchableError as exc: + warn "Error finding peer", peerId = peerId, exc = exc.msg + + return PeerRecord.none + +method find*( + d: Discovery, cid: Cid +): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} = ## Find block providers ## - without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure, error: - warn "Error finding providers for block", cid, error = error.msg - return providers.filterIt(not (it.data.peerId == d.peerId)) + try: + without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure, + error: + warn "Error finding providers for block", cid, error = error.msg -method provide*(d: Discovery, cid: Cid) {.async, base.} = + return providers.filterIt(not (it.data.peerId == d.peerId)) + except CancelledError as exc: + warn "Error finding providers for block", cid, exc = exc.msg + raise exc + except CatchableError as exc: + warn "Error finding providers for block", cid, exc = exc.msg + +method provide*(d: Discovery, cid: Cid) {.async: (raises: [CancelledError]), base.} = ## Provide a block Cid ## - let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get) + try: + let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get) - if nodes.len <= 0: - warn "Couldn't provide to any nodes!" + if nodes.len <= 0: + warn "Couldn't provide to any nodes!" + except CancelledError as exc: + warn "Error providing block", cid, exc = exc.msg + raise exc + except CatchableError as exc: + warn "Error providing block", cid, exc = exc.msg method find*( d: Discovery, host: ca.Address -): Future[seq[SignedPeerRecord]] {.async, base.} = +): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} = ## Find host providers ## - trace "Finding providers for host", host = $host - without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure, - error: - trace "Error finding providers for host", host = $host, exc = error.msg - return + try: + trace "Finding providers for host", host = $host + without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure, + error: + trace "Error finding providers for host", host = $host, exc = error.msg + return - if providers.len <= 0: - trace "No providers found", host = $host - return + if providers.len <= 0: + trace "No providers found", host = $host + return - providers.sort do(a, b: SignedPeerRecord) -> int: - system.cmp[uint64](a.data.seqNo, b.data.seqNo) + providers.sort do(a, b: SignedPeerRecord) -> int: + system.cmp[uint64](a.data.seqNo, b.data.seqNo) - return providers + return providers + except CancelledError as exc: + warn "Error finding providers for host", host = $host, exc = exc.msg + raise exc + except CatchableError as exc: + warn "Error finding providers for host", host = $host, exc = exc.msg -method provide*(d: Discovery, host: ca.Address) {.async, base.} = +method provide*( + d: Discovery, host: ca.Address +) {.async: (raises: [CancelledError]), base.} = ## Provide hosts ## - trace "Providing host", host = $host - let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get) - if nodes.len > 0: - trace "Provided to nodes", nodes = nodes.len + try: + trace "Providing host", host = $host + let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get) + if nodes.len > 0: + trace "Provided to nodes", nodes = nodes.len + except CancelledError as exc: + warn "Error providing host", host = $host, exc = exc.msg + raise exc + except CatchableError as exc: + warn "Error providing host", host = $host, exc = exc.msg -method removeProvider*(d: Discovery, peerId: PeerId): Future[void] {.base, gcsafe.} = +method removeProvider*( + d: Discovery, peerId: PeerId +): Future[void] {.base, gcsafe, async: (raises: [CancelledError]).} = ## Remove provider from providers table ## trace "Removing provider", peerId - d.protocol.removeProvidersLocal(peerId) + try: + await d.protocol.removeProvidersLocal(peerId) + except CancelledError as exc: + warn "Error removing provider", peerId = peerId, exc = exc.msg + raise exc + except CatchableError as exc: + warn "Error removing provider", peerId = peerId, exc = exc.msg + except Exception as exc: # Something in discv5 is raising Exception + warn "Error removing provider", peerId = peerId, exc = exc.msg + raiseAssert("Unexpected Exception in removeProvider") proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) = ## Update providers record @@ -145,12 +199,18 @@ proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) = if not d.protocol.isNil: d.protocol.updateRecord(d.dhtRecord).expect("Should update SPR") -proc start*(d: Discovery) {.async.} = - d.protocol.open() - await d.protocol.start() +proc start*(d: Discovery) {.async: (raises: []).} = + try: + d.protocol.open() + await d.protocol.start() + except CatchableError as exc: + error "Error starting discovery", exc = exc.msg -proc stop*(d: Discovery) {.async.} = - await d.protocol.closeWait() +proc stop*(d: Discovery) {.async: (raises: []).} = + try: + await noCancel d.protocol.closeWait() + except CatchableError as exc: + error "Error stopping discovery", exc = exc.msg proc new*( T: type Discovery, diff --git a/codex/erasure/erasure.nim b/codex/erasure/erasure.nim index 107f85bc..78ce3971 100644 --- a/codex/erasure/erasure.nim +++ b/codex/erasure/erasure.nim @@ -330,7 +330,7 @@ proc encodeAsync*( defer: freeDoubleArray(blockData, blocksLen) - ## Create an ecode task with block data + ## Create an ecode task with block data var task = EncodeTask( erasure: addr self, blockSize: blockSize, @@ -540,7 +540,7 @@ proc decodeAsync*( freeDoubleArray(blocksData, blocksLen) freeDoubleArray(parityData, parityLen) - ## Create an decode task with block data + ## Create an decode task with block data var task = DecodeTask( erasure: addr self, blockSize: blockSize, diff --git a/codex/errors.nim b/codex/errors.nim index 75cefde4..fadf7299 100644 --- a/codex/errors.nim +++ b/codex/errors.nim @@ -19,6 +19,8 @@ type CodexError* = object of CatchableError # base codex error CodexResult*[T] = Result[T, ref CodexError] + FinishedFailed*[T] = tuple[success: seq[Future[T]], failure: seq[Future[T]]] + template mapFailure*[T, V, E]( exp: Result[T, V], exc: typedesc[E] ): Result[T, ref CatchableError] = @@ -40,35 +42,18 @@ func toFailure*[T](exp: Option[T]): Result[T, ref CatchableError] {.inline.} = else: T.failure("Option is None") -# allFuturesThrowing was moved to the tests in libp2p -proc allFuturesThrowing*[T](args: varargs[Future[T]]): Future[void] = - var futs: seq[Future[T]] - for fut in args: - futs &= fut - proc call() {.async.} = - var first: ref CatchableError = nil - futs = await allFinished(futs) - for fut in futs: - if fut.failed: - let err = fut.readError() - if err of Defect: - raise err - else: - if err of CancelledError: - raise err - if isNil(first): - first = err - if not isNil(first): - raise first +proc allFinishedFailed*[T](futs: seq[Future[T]]): Future[FinishedFailed[T]] {.async.} = + ## Check if all futures have finished or failed + ## + ## TODO: wip, not sure if we want this - at the minimum, + ## we should probably avoid the async transform - return call() + var res: FinishedFailed[T] = (@[], @[]) + await allFutures(futs) + for f in futs: + if f.failed: + res.failure.add f + else: + res.success.add f -proc allFutureResult*[T](fut: seq[Future[T]]): Future[?!void] {.async.} = - try: - await allFuturesThrowing(fut) - except CancelledError as exc: - raise exc - except CatchableError as exc: - return failure(exc.msg) - - return success() + return res diff --git a/codex/node.nim b/codex/node.nim index b248e6df..203e034a 100644 --- a/codex/node.nim +++ b/codex/node.nim @@ -153,7 +153,11 @@ proc updateExpiry*( let ensuringFutures = Iter[int].new(0 ..< manifest.blocksCount).mapIt( self.networkStore.localStore.ensureExpiry(manifest.treeCid, it, expiry) ) - await allFuturesThrowing(ensuringFutures) + + let res = await allFinishedFailed(ensuringFutures) + if res.failure.len > 0: + trace "Some blocks failed to update expiry", len = res.failure.len + return failure("Some blocks failed to update expiry (" & $res.failure.len & " )") except CancelledError as exc: raise exc except CatchableError as exc: @@ -186,8 +190,10 @@ proc fetchBatched*( if not (await address in self.networkStore) or fetchLocal: self.networkStore.getBlock(address) - if blocksErr =? (await allFutureResult(blocks)).errorOption: - return failure(blocksErr) + let res = await allFinishedFailed(blocks) + if res.failure.len > 0: + trace "Some blocks failed to fetch", len = res.failure.len + return failure("Some blocks failed to fetch (" & $res.failure.len & " )") if not onBatch.isNil and batchErr =? (await onBatch(blocks.mapIt(it.read.get))).errorOption: @@ -213,6 +219,30 @@ proc fetchBatched*( let iter = Iter[int].new(0 ..< manifest.blocksCount) self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch, fetchLocal) +proc fetchDatasetAsync*( + self: CodexNodeRef, manifest: Manifest, fetchLocal = true +): Future[void] {.async: (raises: []).} = + ## Asynchronously fetch a dataset in the background. + ## This task will be tracked and cleaned up on node shutdown. + ## + try: + if err =? ( + await self.fetchBatched( + manifest = manifest, batchSize = DefaultFetchBatch, fetchLocal = fetchLocal + ) + ).errorOption: + error "Unable to fetch blocks", err = err.msg + except CancelledError as exc: + trace "Cancelled fetching blocks", exc = exc.msg + except CatchableError as exc: + error "Error fetching blocks", exc = exc.msg + +proc fetchDatasetAsyncTask*(self: CodexNodeRef, manifest: Manifest) = + ## Start fetching a dataset in the background. + ## The task will be tracked and cleaned up on node shutdown. + ## + self.trackedFutures.track(self.fetchDatasetAsync(manifest, fetchLocal = false)) + proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async.} = ## Streams the contents of a single block. ## @@ -223,36 +253,27 @@ proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async without blk =? (await self.networkStore.getBlock(BlockAddress.init(cid))), err: return failure(err) - proc streamOneBlock(): Future[void] {.async.} = + proc streamOneBlock(): Future[void] {.async: (raises: []).} = try: + defer: + await stream.pushEof() await stream.pushData(blk.data) except CatchableError as exc: trace "Unable to send block", cid, exc = exc.msg - discard - finally: - await stream.pushEof() self.trackedFutures.track(streamOneBlock()) LPStream(stream).success proc streamEntireDataset( - self: CodexNodeRef, - manifest: Manifest, - manifestCid: Cid, - prefetchBatch = DefaultFetchBatch, + self: CodexNodeRef, manifest: Manifest, manifestCid: Cid ): Future[?!LPStream] {.async.} = ## Streams the contents of the entire dataset described by the manifest. - ## Background jobs (erasure decoding and prefetching) will be cancelled when - ## the stream is closed. ## trace "Retrieving blocks from manifest", manifestCid - let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false)) - var jobs: seq[Future[void]] - if manifest.protected: # Retrieve, decode and save to the local store all EС groups - proc erasureJob(): Future[void] {.async.} = + proc erasureJob(): Future[void] {.async: (raises: []).} = try: # Spawn an erasure decoding job let erasure = Erasure.new( @@ -260,36 +281,17 @@ proc streamEntireDataset( ) without _ =? (await erasure.decode(manifest)), error: error "Unable to erasure decode manifest", manifestCid, exc = error.msg - except CancelledError: - trace "Erasure job cancelled", manifestCid except CatchableError as exc: trace "Error erasure decoding manifest", manifestCid, exc = exc.msg - jobs.add(erasureJob()) + self.trackedFutures.track(erasureJob()) - proc prefetch(): Future[void] {.async.} = - try: - if err =? - (await self.fetchBatched(manifest, prefetchBatch, fetchLocal = false)).errorOption: - error "Unable to fetch blocks", err = err.msg - except CancelledError: - trace "Prefetch job cancelled" - except CatchableError as exc: - error "Error fetching blocks", exc = exc.msg - - jobs.add(prefetch()) - - # Monitor stream completion and cancel background jobs when done - proc monitorStream() {.async.} = - try: - await stream.join() - finally: - await allFutures(jobs.mapIt(it.cancelAndWait)) - - self.trackedFutures.track(monitorStream()) + self.trackedFutures.track(self.fetchDatasetAsync(manifest, fetchLocal = false)) + # prefetch task should not fetch from local store + # Retrieve all blocks of the dataset sequentially from the local store or network trace "Creating store stream for manifest", manifestCid - stream.success + LPStream(StoreStream.new(self.networkStore, manifest, pad = false)).success proc retrieve*( self: CodexNodeRef, cid: Cid, local: bool = true @@ -632,8 +634,11 @@ proc onStore( let ensureExpiryFutures = blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry.toSecondsSince1970)) - if updateExpiryErr =? (await allFutureResult(ensureExpiryFutures)).errorOption: - return failure(updateExpiryErr) + + let res = await allFinishedFailed(ensureExpiryFutures) + if res.failure.len > 0: + trace "Some blocks failed to update expiry", len = res.failure.len + return failure("Some blocks failed to update expiry (" & $res.failure.len & " )") if not blocksCb.isNil and err =? (await blocksCb(blocks)).errorOption: trace "Unable to process blocks", err = err.msg diff --git a/codex/rest/api.nim b/codex/rest/api.nim index 7cb0b43f..054e1c2b 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -315,15 +315,8 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute error "Failed to fetch manifest", err = err.msg return RestApiResponse.error(Http404, err.msg, headers = headers) - proc fetchDatasetAsync(): Future[void] {.async.} = - try: - if err =? (await node.fetchBatched(manifest)).errorOption: - error "Unable to fetch dataset", cid = cid.get(), err = err.msg - except CatchableError as exc: - error "CatchableError when fetching dataset", cid = cid.get(), exc = exc.msg - discard - - asyncSpawn fetchDatasetAsync() + # Start fetching the dataset in the background + node.fetchDatasetAsyncTask(manifest) let json = %formatManifest(cid.get(), manifest) return RestApiResponse.response($json, contentType = "application/json") diff --git a/codex/sales.nim b/codex/sales.nim index e2a884df..998a2967 100644 --- a/codex/sales.nim +++ b/codex/sales.nim @@ -341,48 +341,51 @@ proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: uint64) = trace "slot freed, adding to queue" - proc addSlotToQueue() {.async: (raises: [CancelledError]).} = + proc addSlotToQueue() {.async: (raises: []).} = let context = sales.context let market = context.market let queue = context.slotQueue - without request =? (await market.getRequest(requestId)), err: - error "unknown request in contract", error = err.msgDetail - return + try: + without request =? (await market.getRequest(requestId)), err: + error "unknown request in contract", error = err.msgDetail + return - # Take the repairing state into consideration to calculate the collateral. - # This is particularly needed because it will affect the priority in the queue - # and we want to give the user the ability to tweak the parameters. - # Adding the repairing state directly in the queue priority calculation - # would not allow this flexibility. - without collateral =? - market.slotCollateral(request.ask.collateralPerSlot, SlotState.Repair), err: - error "Failed to add freed slot to queue: unable to calculate collateral", - error = err.msg - return + # Take the repairing state into consideration to calculate the collateral. + # This is particularly needed because it will affect the priority in the queue + # and we want to give the user the ability to tweak the parameters. + # Adding the repairing state directly in the queue priority calculation + # would not allow this flexibility. + without collateral =? + market.slotCollateral(request.ask.collateralPerSlot, SlotState.Repair), err: + error "Failed to add freed slot to queue: unable to calculate collateral", + error = err.msg + return - if slotIndex > uint16.high.uint64: - error "Cannot cast slot index to uint16, value = ", slotIndex - return + if slotIndex > uint16.high.uint64: + error "Cannot cast slot index to uint16, value = ", slotIndex + return - without slotQueueItem =? - SlotQueueItem.init(request, slotIndex.uint16, collateral = collateral).catch, err: - warn "Too many slots, cannot add to queue", error = err.msgDetail - return + without slotQueueItem =? + SlotQueueItem.init(request, slotIndex.uint16, collateral = collateral).catch, + err: + warn "Too many slots, cannot add to queue", error = err.msgDetail + return - if err =? queue.push(slotQueueItem).errorOption: - if err of SlotQueueItemExistsError: - error "Failed to push item to queue becaue it already exists", - error = err.msgDetail - elif err of QueueNotRunningError: - warn "Failed to push item to queue becaue queue is not running", - error = err.msgDetail + if err =? queue.push(slotQueueItem).errorOption: + if err of SlotQueueItemExistsError: + error "Failed to push item to queue becaue it already exists", + error = err.msgDetail + elif err of QueueNotRunningError: + warn "Failed to push item to queue becaue queue is not running", + error = err.msgDetail + except CatchableError as e: + warn "Failed to add slot to queue", error = e.msg # We could get rid of this by adding the storage ask in the SlotFreed event, # so we would not need to call getRequest to get the collateralPerSlot. let fut = addSlotToQueue() sales.trackedFutures.track(fut) - asyncSpawn fut proc subscribeRequested(sales: Sales) {.async.} = let context = sales.context @@ -522,7 +525,9 @@ proc startSlotQueue(sales: Sales) = let slotQueue = sales.context.slotQueue let reservations = sales.context.reservations - slotQueue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = + slotQueue.onProcessSlot = proc( + item: SlotQueueItem, done: Future[void] + ) {.async: (raises: []).} = trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex sales.processSlot(item, done) diff --git a/codex/sales/salesagent.nim b/codex/sales/salesagent.nim index e6328a83..f0abf3ee 100644 --- a/codex/sales/salesagent.nim +++ b/codex/sales/salesagent.nim @@ -103,7 +103,6 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} = error "Error while waiting for expiry to lapse", error = e.msgDetail data.cancelled = onCancelled() - asyncSpawn data.cancelled method onFulfilled*( agent: SalesAgent, requestId: RequestId diff --git a/codex/sales/slotqueue.nim b/codex/sales/slotqueue.nim index fa57a983..60700d44 100644 --- a/codex/sales/slotqueue.nim +++ b/codex/sales/slotqueue.nim @@ -3,7 +3,6 @@ import std/tables import pkg/chronos import pkg/questionable import pkg/questionable/results -import pkg/upraises import ../errors import ../clock import ../logutils @@ -17,8 +16,9 @@ logScope: topics = "marketplace slotqueue" type - OnProcessSlot* = - proc(item: SlotQueueItem, done: Future[void]): Future[void] {.gcsafe, upraises: [].} + OnProcessSlot* = proc(item: SlotQueueItem, done: Future[void]): Future[void] {. + gcsafe, async: (raises: []) + .} # Non-ref obj copies value when assigned, preventing accidental modification # of values which could cause an incorrect order (eg @@ -26,7 +26,7 @@ type # but the heap invariant would no longer be honoured. When non-ref, the # compiler can ensure that statement will fail). SlotQueueWorker = object - doneProcessing*: Future[void] + doneProcessing*: Future[void].Raising([]) SlotQueueItem* = object requestId: RequestId @@ -126,7 +126,17 @@ proc new*( # `newAsyncQueue` procedure proc init(_: type SlotQueueWorker): SlotQueueWorker = - SlotQueueWorker(doneProcessing: newFuture[void]("slotqueue.worker.processing")) + let workerFut = Future[void].Raising([]).init( + "slotqueue.worker.processing", {FutureFlag.OwnCancelSchedule} + ) + + workerFut.cancelCallback = proc(data: pointer) {.raises: [].} = + # this is equivalent to try: ... except CatchableError: ... + if not workerFut.finished: + workerFut.complete() + trace "Cancelling `SlotQueue` worker processing future" + + SlotQueueWorker(doneProcessing: workerFut) proc init*( _: type SlotQueueItem, @@ -419,7 +429,6 @@ proc run(self: SlotQueue) {.async: (raises: []).} = let fut = self.dispatch(worker, item) self.trackedFutures.track(fut) - asyncSpawn fut await sleepAsync(1.millis) # poll except CancelledError: @@ -447,7 +456,6 @@ proc start*(self: SlotQueue) = let fut = self.run() self.trackedFutures.track(fut) - asyncSpawn fut proc stop*(self: SlotQueue) {.async.} = if not self.running: diff --git a/codex/slots/builder/builder.nim b/codex/slots/builder/builder.nim index 30332f1c..1ea57a0f 100644 --- a/codex/slots/builder/builder.nim +++ b/codex/slots/builder/builder.nim @@ -315,13 +315,15 @@ proc new*[T, H]( cellSize = cellSize if (manifest.blocksCount mod manifest.numSlots) != 0: - trace "Number of blocks must be divisable by number of slots." - return failure("Number of blocks must be divisable by number of slots.") + const msg = "Number of blocks must be divisible by number of slots." + trace msg + return failure(msg) let cellSize = if manifest.verifiable: manifest.cellSize else: cellSize if (manifest.blockSize mod cellSize) != 0.NBytes: - trace "Block size must be divisable by cell size." - return failure("Block size must be divisable by cell size.") + const msg = "Block size must be divisible by cell size." + trace msg + return failure(msg) let numSlotBlocks = manifest.numSlotBlocks diff --git a/codex/slots/proofs/prover.nim b/codex/slots/proofs/prover.nim index 36fc0a05..b1aa77c0 100644 --- a/codex/slots/proofs/prover.nim +++ b/codex/slots/proofs/prover.nim @@ -38,7 +38,9 @@ type AnyProof* = CircomProof AnySampler* = Poseidon2Sampler + # add any other generic type here, eg. Poseidon2Sampler | ReinforceConcreteSampler AnyBuilder* = Poseidon2Builder + # add any other generic type here, eg. Poseidon2Builder | ReinforceConcreteBuilder AnyProofInputs* = ProofInputs[Poseidon2Hash] Prover* = ref object of RootObj diff --git a/codex/streams/asyncstreamwrapper.nim b/codex/streams/asyncstreamwrapper.nim index 6d5e703a..6708816d 100644 --- a/codex/streams/asyncstreamwrapper.nim +++ b/codex/streams/asyncstreamwrapper.nim @@ -57,6 +57,8 @@ template withExceptions(body: untyped) = raise newLPStreamEOFError() except AsyncStreamError as exc: raise newException(LPStreamError, exc.msg) + except CatchableError as exc: + raise newException(Defect, "Unexpected error in AsyncStreamWrapper", exc) method readOnce*( self: AsyncStreamWrapper, pbytes: pointer, nbytes: int @@ -74,11 +76,13 @@ method readOnce*( proc completeWrite( self: AsyncStreamWrapper, fut: Future[void], msgLen: int -): Future[void] {.async.} = +): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} = withExceptions: await fut -method write*(self: AsyncStreamWrapper, msg: seq[byte]): Future[void] = +method write*( + self: AsyncStreamWrapper, msg: seq[byte] +): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} = # Avoid a copy of msg being kept in the closure created by `{.async.}` as this # drives up memory usage diff --git a/codex/streams/storestream.nim b/codex/streams/storestream.nim index a68e2ea7..64a356de 100644 --- a/codex/streams/storestream.nim +++ b/codex/streams/storestream.nim @@ -67,13 +67,9 @@ method atEof*(self: StoreStream): bool = self.offset >= self.size type LPStreamReadError* = object of LPStreamError - par*: ref CatchableError proc newLPStreamReadError*(p: ref CatchableError): ref LPStreamReadError = - var w = newException(LPStreamReadError, "Read stream failed") - w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg - w.par = p - result = w + newException(LPStreamReadError, "Read stream failed", p) method readOnce*( self: StoreStream, pbytes: pointer, nbytes: int diff --git a/codex/utils/asyncstatemachine.nim b/codex/utils/asyncstatemachine.nim index 2d87ebc1..eb84378c 100644 --- a/codex/utils/asyncstatemachine.nim +++ b/codex/utils/asyncstatemachine.nim @@ -74,7 +74,6 @@ proc scheduler(machine: Machine) {.async: (raises: []).} = debug "enter state", state = fromState & " => " & $machine.state running = machine.run(machine.state) machine.trackedFutures.track(running) - asyncSpawn running except CancelledError: break # do not propagate bc it is asyncSpawned @@ -88,7 +87,6 @@ proc start*(machine: Machine, initialState: State) = machine.started = true let fut = machine.scheduler() machine.trackedFutures.track(fut) - asyncSpawn fut machine.schedule(Event.transition(machine.state, initialState)) proc stop*(machine: Machine) {.async.} = diff --git a/codex/utils/timer.nim b/codex/utils/timer.nim index 0a5a940a..5a9537cf 100644 --- a/codex/utils/timer.nim +++ b/codex/utils/timer.nim @@ -50,7 +50,6 @@ method start*( timer.callback = callback timer.interval = interval timer.loopFuture = timerLoop(timer) - asyncSpawn timer.loopFuture method stop*(timer: Timer) {.async, base.} = if timer.loopFuture != nil and not timer.loopFuture.finished: diff --git a/codex/utils/trackedfutures.nim b/codex/utils/trackedfutures.nim index eb3cc219..34007e08 100644 --- a/codex/utils/trackedfutures.nim +++ b/codex/utils/trackedfutures.nim @@ -5,9 +5,11 @@ import ../logutils {.push raises: [].} -type TrackedFutures* = ref object - futures: Table[uint, FutureBase] - cancelling: bool +type + TrackedFuture = Future[void].Raising([]) + TrackedFutures* = ref object + futures: Table[uint, TrackedFuture] + cancelling: bool logScope: topics = "trackable futures" @@ -15,15 +17,18 @@ logScope: proc len*(self: TrackedFutures): int = self.futures.len -proc removeFuture(self: TrackedFutures, future: FutureBase) = +proc removeFuture(self: TrackedFutures, future: TrackedFuture) = if not self.cancelling and not future.isNil: self.futures.del(future.id) -proc track*[T](self: TrackedFutures, fut: Future[T]) = +proc track*(self: TrackedFutures, fut: TrackedFuture) = if self.cancelling: return - self.futures[fut.id] = FutureBase(fut) + if fut.finished: + return + + self.futures[fut.id] = fut proc cb(udata: pointer) = self.removeFuture(fut) @@ -33,13 +38,8 @@ proc track*[T](self: TrackedFutures, fut: Future[T]) = proc cancelTracked*(self: TrackedFutures) {.async: (raises: []).} = self.cancelling = true - trace "cancelling tracked futures" - - var cancellations: seq[FutureBase] - for future in self.futures.values: - if not future.isNil and not future.finished: - cancellations.add future.cancelAndWait() - + trace "cancelling tracked futures", len = self.futures.len + let cancellations = self.futures.values.toSeq.mapIt(it.cancelAndWait()) await noCancel allFutures cancellations self.futures.clear() diff --git a/codex/validation.nim b/codex/validation.nim index 18a444a6..e6d74840 100644 --- a/codex/validation.nim +++ b/codex/validation.nim @@ -142,7 +142,6 @@ proc start*(validation: Validation) {.async.} = await validation.subscribeSlotFilled() await validation.restoreHistoricalState() validation.running = validation.run() - asyncSpawn validation.running proc stop*(validation: Validation) {.async.} = if not validation.running.isNil and not validation.running.finished: diff --git a/tests/asynctest.nim b/tests/asynctest.nim index 7c6a4afd..4db8277f 100644 --- a/tests/asynctest.nim +++ b/tests/asynctest.nim @@ -1,3 +1,3 @@ -import pkg/asynctest/chronos/unittest +import pkg/asynctest/chronos/unittest2 -export unittest +export unittest2 diff --git a/tests/codex/blockexchange/discovery/testdiscovery.nim b/tests/codex/blockexchange/discovery/testdiscovery.nim index 97a455e1..c54a1fff 100644 --- a/tests/codex/blockexchange/discovery/testdiscovery.nim +++ b/tests/codex/blockexchange/discovery/testdiscovery.nim @@ -84,12 +84,12 @@ asyncchecksuite "Block Advertising and Discovery": blockDiscovery.publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ): Future[void] {.async, gcsafe.} = + ): Future[void] {.async: (raises: [CancelledError]).} = return blockDiscovery.findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] {.async.} = + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = await engine.resolveBlocks(blocks.filterIt(it.cid == cid)) await allFuturesThrowing(allFinished(pendingBlocks)) @@ -97,17 +97,17 @@ asyncchecksuite "Block Advertising and Discovery": await engine.stop() test "Should advertise trees": - let - cids = @[manifest.treeCid] - advertised = initTable.collect: - for cid in cids: - {cid: newFuture[void]()} + let cids = @[manifest.treeCid] + var advertised = initTable.collect: + for cid in cids: + {cid: newFuture[void]()} blockDiscovery.publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ) {.async.} = - if cid in advertised and not advertised[cid].finished(): - advertised[cid].complete() + ) {.async: (raises: [CancelledError]).} = + advertised.withValue(cid, fut): + if not fut[].finished: + fut[].complete() await engine.start() await allFuturesThrowing(allFinished(toSeq(advertised.values))) @@ -118,7 +118,7 @@ asyncchecksuite "Block Advertising and Discovery": blockDiscovery.publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ) {.async.} = + ) {.async: (raises: [CancelledError]).} = check: cid notin blockCids @@ -138,7 +138,7 @@ asyncchecksuite "Block Advertising and Discovery": blockDiscovery.findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] = + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = check false await engine.start() @@ -221,17 +221,17 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ): Future[void] {.async.} = + ) {.async: (raises: [CancelledError]).} = advertised[cid] = switch[1].peerInfo.signedPeerRecord MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ): Future[void] {.async.} = + ) {.async: (raises: [CancelledError]).} = advertised[cid] = switch[2].peerInfo.signedPeerRecord MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ): Future[void] {.async.} = + ) {.async: (raises: [CancelledError]).} = advertised[cid] = switch[3].peerInfo.signedPeerRecord discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid) @@ -266,23 +266,21 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] {.async.} = - if cid in advertised: - result.add(advertised[cid]) + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = + advertised.withValue(cid, val): + result.add(val[]) let futs = collect(newSeq): for m in mBlocks[0 .. 2]: blockexc[0].engine.requestBlock(m.cid) - await allFuturesThrowing( - switch.mapIt(it.start()) & blockexc.mapIt(it.engine.start()) - ) - .wait(10.seconds) + await allFuturesThrowing(switch.mapIt(it.start())).wait(10.seconds) + await allFuturesThrowing(blockexc.mapIt(it.engine.start())).wait(10.seconds) await allFutures(futs).wait(10.seconds) - await allFuturesThrowing(blockexc.mapIt(it.engine.stop()) & switch.mapIt(it.stop())) - .wait(10.seconds) + await allFuturesThrowing(blockexc.mapIt(it.engine.stop())).wait(10.seconds) + await allFuturesThrowing(switch.mapIt(it.stop())).wait(10.seconds) test "E2E - Should advertise and discover blocks with peers already connected": # Distribute the blocks amongst 1..3 @@ -292,17 +290,17 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ): Future[void] {.async.} = + ) {.async: (raises: [CancelledError]).} = advertised[cid] = switch[1].peerInfo.signedPeerRecord MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ): Future[void] {.async.} = + ) {.async: (raises: [CancelledError]).} = advertised[cid] = switch[2].peerInfo.signedPeerRecord MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ): Future[void] {.async.} = + ) {.async: (raises: [CancelledError]).} = advertised[cid] = switch[3].peerInfo.signedPeerRecord discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid) @@ -337,18 +335,16 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] {.async.} = - if cid in advertised: - return @[advertised[cid]] + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = + advertised.withValue(cid, val): + return @[val[]] let futs = mBlocks[0 .. 2].mapIt(blockexc[0].engine.requestBlock(it.cid)) - await allFuturesThrowing( - switch.mapIt(it.start()) & blockexc.mapIt(it.engine.start()) - ) - .wait(10.seconds) + await allFuturesThrowing(switch.mapIt(it.start())).wait(10.seconds) + await allFuturesThrowing(blockexc.mapIt(it.engine.start())).wait(10.seconds) await allFutures(futs).wait(10.seconds) - await allFuturesThrowing(blockexc.mapIt(it.engine.stop()) & switch.mapIt(it.stop())) - .wait(10.seconds) + await allFuturesThrowing(blockexc.mapIt(it.engine.stop())).wait(10.seconds) + await allFuturesThrowing(switch.mapIt(it.stop())).wait(10.seconds) diff --git a/tests/codex/blockexchange/discovery/testdiscoveryengine.nim b/tests/codex/blockexchange/discovery/testdiscoveryengine.nim index 93704726..9efab1a6 100644 --- a/tests/codex/blockexchange/discovery/testdiscoveryengine.nim +++ b/tests/codex/blockexchange/discovery/testdiscoveryengine.nim @@ -68,7 +68,7 @@ asyncchecksuite "Test Discovery Engine": blockDiscovery.findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = pendingBlocks.resolve( blocks.filterIt(it.cid == cid).mapIt( BlockDelivery(blk: it, address: it.address) @@ -94,7 +94,7 @@ asyncchecksuite "Test Discovery Engine": blockDiscovery.findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = check cid == blocks[0].cid if not want.finished: want.complete() @@ -122,7 +122,7 @@ asyncchecksuite "Test Discovery Engine": var pendingCids = newSeq[Cid]() blockDiscovery.findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = check cid in pendingCids pendingCids.keepItIf(it != cid) check peerStore.len < minPeers @@ -159,12 +159,12 @@ asyncchecksuite "Test Discovery Engine": discoveryLoopSleep = 100.millis, concurrentDiscReqs = 2, ) - reqs = newFuture[void]() + reqs = Future[void].Raising([CancelledError]).init() count = 0 blockDiscovery.findBlockProvidersHandler = proc( d: MockDiscovery, cid: Cid - ): Future[seq[SignedPeerRecord]] {.gcsafe, async.} = + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = check cid == blocks[0].cid if count > 0: check false diff --git a/tests/codex/blockexchange/engine/testadvertiser.nim b/tests/codex/blockexchange/engine/testadvertiser.nim index 157564d6..83a70f65 100644 --- a/tests/codex/blockexchange/engine/testadvertiser.nim +++ b/tests/codex/blockexchange/engine/testadvertiser.nim @@ -34,7 +34,7 @@ asyncchecksuite "Advertiser": advertised = newSeq[Cid]() blockDiscovery.publishBlockProvideHandler = proc( d: MockDiscovery, cid: Cid - ) {.async, gcsafe.} = + ) {.async: (raises: [CancelledError]), gcsafe.} = advertised.add(cid) advertiser = Advertiser.new(localStore, blockDiscovery) diff --git a/tests/codex/blockexchange/engine/testengine.nim b/tests/codex/blockexchange/engine/testengine.nim index cc5511e8..0541c119 100644 --- a/tests/codex/blockexchange/engine/testengine.nim +++ b/tests/codex/blockexchange/engine/testengine.nim @@ -22,7 +22,7 @@ import ../../examples const NopSendWantCancellationsProc = proc( id: PeerId, addresses: seq[BlockAddress] -) {.gcsafe, async.} = +) {.async: (raises: [CancelledError]).} = discard asyncchecksuite "NetworkStore engine basic": @@ -66,20 +66,17 @@ asyncchecksuite "NetworkStore engine basic": wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false, - ) {.gcsafe, async.} = + ) {.async: (raises: [CancelledError]).} = check addresses.mapIt($it.cidOrTreeCid).sorted == blocks.mapIt($it.cid).sorted done.complete() let network = BlockExcNetwork(request: BlockExcRequest(sendWantList: sendWantList)) - localStore = CacheStore.new(blocks.mapIt(it)) discovery = DiscoveryEngine.new( localStore, peerStore, network, blockDiscovery, pendingBlocks ) - advertiser = Advertiser.new(localStore, blockDiscovery) - engine = BlockExcEngine.new( localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks ) @@ -93,7 +90,9 @@ asyncchecksuite "NetworkStore engine basic": test "Should send account to new peers": let pricing = Pricing.example - proc sendAccount(peer: PeerId, account: Account) {.gcsafe, async.} = + proc sendAccount( + peer: PeerId, account: Account + ) {.async: (raises: [CancelledError]).} = check account.address == pricing.address done.complete() @@ -186,7 +185,9 @@ asyncchecksuite "NetworkStore engine handlers": done = newFuture[void]() wantList = makeWantList(blocks.mapIt(it.cid)) - proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = + proc sendPresence( + peerId: PeerId, presence: seq[BlockPresence] + ) {.async: (raises: [CancelledError]).} = check presence.mapIt(it.address) == wantList.entries.mapIt(it.address) done.complete() @@ -203,7 +204,9 @@ asyncchecksuite "NetworkStore engine handlers": done = newFuture[void]() wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true) - proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = + proc sendPresence( + peerId: PeerId, presence: seq[BlockPresence] + ) {.async: (raises: [CancelledError]).} = check presence.mapIt(it.address) == wantList.entries.mapIt(it.address) for p in presence: check: @@ -222,7 +225,9 @@ asyncchecksuite "NetworkStore engine handlers": done = newFuture[void]() wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true) - proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = + proc sendPresence( + peerId: PeerId, presence: seq[BlockPresence] + ) {.async: (raises: [CancelledError]).} = for p in presence: if p.address.cidOrTreeCid != blocks[0].cid and p.address.cidOrTreeCid != blocks[1].cid: @@ -266,19 +271,21 @@ asyncchecksuite "NetworkStore engine handlers": peerContext.account = account.some peerContext.blocks = blocks.mapIt( - (it.address, Presence(address: it.address, price: rand(uint16).u256)) + (it.address, Presence(address: it.address, price: rand(uint16).u256, have: true)) ).toTable engine.network = BlockExcNetwork( request: BlockExcRequest( - sendPayment: proc(receiver: PeerId, payment: SignedState) {.gcsafe, async.} = + sendPayment: proc( + receiver: PeerId, payment: SignedState + ) {.async: (raises: [CancelledError]).} = let - amount = blocks.mapIt(peerContext.blocks[it.address].price).foldl(a + b) - + amount = + blocks.mapIt(peerContext.blocks[it.address].catch.get.price).foldl(a + b) balances = !payment.state.outcome.balances(Asset) check receiver == peerId - check balances[account.address.toDestination] == amount + check balances[account.address.toDestination].catch.get == amount done.complete(), # Install NOP for want list cancellations so they don't cause a crash @@ -286,10 +293,12 @@ asyncchecksuite "NetworkStore engine handlers": ) ) + let requestedBlocks = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.address)) await engine.blocksDeliveryHandler( peerId, blocks.mapIt(BlockDelivery(blk: it, address: it.address)) ) await done.wait(100.millis) + await allFuturesThrowing(requestedBlocks).wait(100.millis) test "Should handle block presence": var handles: @@ -303,7 +312,7 @@ asyncchecksuite "NetworkStore engine handlers": wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false, - ) {.gcsafe, async.} = + ) {.async: (raises: [CancelledError]).} = engine.pendingBlocks.resolve( blocks.filterIt(it.address in addresses).mapIt( BlockDelivery(blk: it, address: it.address) @@ -340,9 +349,9 @@ asyncchecksuite "NetworkStore engine handlers": proc sendWantCancellations( id: PeerId, addresses: seq[BlockAddress] - ) {.gcsafe, async.} = + ) {.async: (raises: [CancelledError]).} = for address in addresses: - cancellations[address].complete() + cancellations[address].catch.expect("address should exist").complete() engine.network = BlockExcNetwork( request: BlockExcRequest(sendWantCancellations: sendWantCancellations) @@ -416,7 +425,7 @@ asyncchecksuite "Block Download": wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false, - ) {.gcsafe, async.} = + ) {.async: (raises: [CancelledError]).} = check wantType == WantHave check not engine.pendingBlocks.isInFlight(address) check engine.pendingBlocks.retries(address) == retries @@ -433,7 +442,7 @@ asyncchecksuite "Block Download": discard (await pending).tryGet() test "Should retry block request": - let + var address = BlockAddress.init(blocks[0].cid) steps = newAsyncEvent() @@ -445,7 +454,7 @@ asyncchecksuite "Block Download": wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false, - ) {.gcsafe, async.} = + ) {.async: (raises: [CancelledError]).} = case wantType of WantHave: check engine.pendingBlocks.isInFlight(address) == false @@ -467,7 +476,7 @@ asyncchecksuite "Block Download": let pending = engine.requestBlock(address) await steps.wait() - # add blocks presence + # add blocks precense peerCtx.blocks = blocks.mapIt( (it.address, Presence(address: it.address, have: true, price: UInt256.example)) ).toTable @@ -493,7 +502,7 @@ asyncchecksuite "Block Download": wantType: WantType = WantType.WantHave, full: bool = false, sendDontHave: bool = false, - ) {.gcsafe, async.} = + ) {.async: (raises: [CancelledError]).} = done.complete() engine.pendingBlocks.blockRetries = 10 @@ -573,7 +582,7 @@ asyncchecksuite "Task Handler": test "Should send want-blocks in priority order": proc sendBlocksDelivery( id: PeerId, blocksDelivery: seq[BlockDelivery] - ) {.gcsafe, async.} = + ) {.async: (raises: [CancelledError]).} = check blocksDelivery.len == 2 check: blocksDelivery[1].address == blocks[0].address @@ -610,7 +619,7 @@ asyncchecksuite "Task Handler": test "Should set in-flight for outgoing blocks": proc sendBlocksDelivery( id: PeerId, blocksDelivery: seq[BlockDelivery] - ) {.gcsafe, async.} = + ) {.async: (raises: [CancelledError]).} = check peersCtx[0].peerWants[0].inFlight for blk in blocks: @@ -649,7 +658,9 @@ asyncchecksuite "Task Handler": let missing = @[Block.new("missing".toBytes).tryGet()] let price = (!engine.pricing).price - proc sendPresence(id: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = + proc sendPresence( + id: PeerId, presence: seq[BlockPresence] + ) {.async: (raises: [CancelledError]).} = check presence.mapIt(!Presence.init(it)) == @[ Presence(address: present[0].address, have: true, price: price), diff --git a/tests/codex/blockexchange/engine/testpayments.nim b/tests/codex/blockexchange/engine/testpayments.nim index 24d5dab6..e93cc837 100644 --- a/tests/codex/blockexchange/engine/testpayments.nim +++ b/tests/codex/blockexchange/engine/testpayments.nim @@ -1,10 +1,10 @@ -import std/unittest +import pkg/unittest2 import pkg/codex/stores import ../../examples import ../../helpers -checksuite "engine payments": +suite "Engine payments": let address = EthAddress.example let amount = 42.u256 diff --git a/tests/codex/blockexchange/protobuf/testpayments.nim b/tests/codex/blockexchange/protobuf/testpayments.nim index d0773d70..3ada0105 100644 --- a/tests/codex/blockexchange/protobuf/testpayments.nim +++ b/tests/codex/blockexchange/protobuf/testpayments.nim @@ -6,7 +6,7 @@ import ../../../asynctest import ../../examples import ../../helpers -checksuite "account protobuf messages": +suite "account protobuf messages": let account = Account(address: EthAddress.example) let message = AccountMessage.init(account) @@ -21,7 +21,7 @@ checksuite "account protobuf messages": incorrect.address.del(0) check Account.init(incorrect).isNone -checksuite "channel update messages": +suite "channel update messages": let state = SignedState.example let update = StateChannelUpdate.init(state) diff --git a/tests/codex/blockexchange/protobuf/testpresence.nim b/tests/codex/blockexchange/protobuf/testpresence.nim index 7e3b94e6..dc048c59 100644 --- a/tests/codex/blockexchange/protobuf/testpresence.nim +++ b/tests/codex/blockexchange/protobuf/testpresence.nim @@ -6,7 +6,7 @@ import ../../../asynctest import ../../examples import ../../helpers -checksuite "block presence protobuf messages": +suite "block presence protobuf messages": let cid = Cid.example address = BlockAddress(leaf: false, cid: cid) diff --git a/tests/codex/blockexchange/testnetwork.nim b/tests/codex/blockexchange/testnetwork.nim index 0fae4ffe..b9a51c9d 100644 --- a/tests/codex/blockexchange/testnetwork.nim +++ b/tests/codex/blockexchange/testnetwork.nim @@ -26,7 +26,7 @@ asyncchecksuite "Network - Handlers": blocks: seq[bt.Block] done: Future[void] - proc getConn(): Future[Connection] {.async.} = + proc getConn(): Future[Connection] {.async: (raises: [CancelledError]).} = return Connection(buffer) setup: @@ -45,7 +45,7 @@ asyncchecksuite "Network - Handlers": discard await networkPeer.connect() test "Want List handler": - proc wantListHandler(peer: PeerId, wantList: WantList) {.gcsafe, async.} = + proc wantListHandler(peer: PeerId, wantList: WantList) {.async: (raises: []).} = # check that we got the correct amount of entries check wantList.entries.len == 4 @@ -72,7 +72,7 @@ asyncchecksuite "Network - Handlers": test "Blocks Handler": proc blocksDeliveryHandler( peer: PeerId, blocksDelivery: seq[BlockDelivery] - ) {.gcsafe, async.} = + ) {.async: (raises: []).} = check blocks == blocksDelivery.mapIt(it.blk) done.complete() @@ -85,7 +85,9 @@ asyncchecksuite "Network - Handlers": await done.wait(500.millis) test "Presence Handler": - proc presenceHandler(peer: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = + proc presenceHandler( + peer: PeerId, presence: seq[BlockPresence] + ) {.async: (raises: []).} = for b in blocks: check: b.address in presence @@ -105,7 +107,7 @@ asyncchecksuite "Network - Handlers": test "Handles account messages": let account = Account(address: EthAddress.example) - proc handleAccount(peer: PeerId, received: Account) {.gcsafe, async.} = + proc handleAccount(peer: PeerId, received: Account) {.async: (raises: []).} = check received == account done.complete() @@ -119,7 +121,7 @@ asyncchecksuite "Network - Handlers": test "Handles payment messages": let payment = SignedState.example - proc handlePayment(peer: PeerId, received: SignedState) {.gcsafe, async.} = + proc handlePayment(peer: PeerId, received: SignedState) {.async: (raises: []).} = check received == payment done.complete() @@ -165,7 +167,7 @@ asyncchecksuite "Network - Senders": await allFuturesThrowing(switch1.stop(), switch2.stop()) test "Send want list": - proc wantListHandler(peer: PeerId, wantList: WantList) {.gcsafe, async.} = + proc wantListHandler(peer: PeerId, wantList: WantList) {.async: (raises: []).} = # check that we got the correct amount of entries check wantList.entries.len == 4 @@ -195,7 +197,7 @@ asyncchecksuite "Network - Senders": test "send blocks": proc blocksDeliveryHandler( peer: PeerId, blocksDelivery: seq[BlockDelivery] - ) {.gcsafe, async.} = + ) {.async: (raises: []).} = check blocks == blocksDelivery.mapIt(it.blk) done.complete() @@ -207,7 +209,9 @@ asyncchecksuite "Network - Senders": await done.wait(500.millis) test "send presence": - proc presenceHandler(peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async.} = + proc presenceHandler( + peer: PeerId, precense: seq[BlockPresence] + ) {.async: (raises: []).} = for b in blocks: check: b.address in precense @@ -226,7 +230,7 @@ asyncchecksuite "Network - Senders": test "send account": let account = Account(address: EthAddress.example) - proc handleAccount(peer: PeerId, received: Account) {.gcsafe, async.} = + proc handleAccount(peer: PeerId, received: Account) {.async: (raises: []).} = check received == account done.complete() @@ -238,7 +242,7 @@ asyncchecksuite "Network - Senders": test "send payment": let payment = SignedState.example - proc handlePayment(peer: PeerId, received: SignedState) {.gcsafe, async.} = + proc handlePayment(peer: PeerId, received: SignedState) {.async: (raises: []).} = check received == payment done.complete() @@ -276,7 +280,7 @@ asyncchecksuite "Network - Test Limits": let account = Account(address: EthAddress.example) network2.handlers.onAccount = proc( peer: PeerId, received: Account - ) {.gcsafe, async.} = + ) {.async: (raises: []).} = check false let fut = network1.send( diff --git a/tests/codex/blockexchange/testpeerctxstore.nim b/tests/codex/blockexchange/testpeerctxstore.nim index 6ea601d1..e2983d10 100644 --- a/tests/codex/blockexchange/testpeerctxstore.nim +++ b/tests/codex/blockexchange/testpeerctxstore.nim @@ -1,7 +1,7 @@ import std/sugar import std/sequtils -import std/unittest +import pkg/unittest2 import pkg/libp2p import pkg/codex/blockexchange/peers @@ -11,7 +11,7 @@ import pkg/codex/blockexchange/protobuf/presence import ../helpers import ../examples -checksuite "Peer Context Store": +suite "Peer Context Store": var store: PeerCtxStore peerCtx: BlockExcPeerCtx @@ -31,7 +31,7 @@ checksuite "Peer Context Store": test "Should get peer": check store.get(peerCtx.id) == peerCtx -checksuite "Peer Context Store Peer Selection": +suite "Peer Context Store Peer Selection": var store: PeerCtxStore peerCtxs: seq[BlockExcPeerCtx] diff --git a/tests/codex/blockexchange/testpendingblocks.nim b/tests/codex/blockexchange/testpendingblocks.nim index 29410db7..af1e6728 100644 --- a/tests/codex/blockexchange/testpendingblocks.nim +++ b/tests/codex/blockexchange/testpendingblocks.nim @@ -10,7 +10,7 @@ import pkg/codex/blockexchange import ../helpers import ../../asynctest -checksuite "Pending Blocks": +suite "Pending Blocks": test "Should add want handle": let pendingBlocks = PendingBlocksManager.new() diff --git a/tests/codex/helpers/mockchunker.nim b/tests/codex/helpers/mockchunker.nim index 0d38cf3b..eb51f7ca 100644 --- a/tests/codex/helpers/mockchunker.nim +++ b/tests/codex/helpers/mockchunker.nim @@ -21,7 +21,7 @@ proc new*( var consumed = 0 proc reader( data: ChunkBuffer, len: int - ): Future[int] {.async, gcsafe, raises: [Defect].} = + ): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} = if consumed >= dataset.len: return 0 diff --git a/tests/codex/helpers/mockdiscovery.nim b/tests/codex/helpers/mockdiscovery.nim index 42ad76a9..4110c577 100644 --- a/tests/codex/helpers/mockdiscovery.nim +++ b/tests/codex/helpers/mockdiscovery.nim @@ -14,29 +14,42 @@ import pkg/codex/discovery import pkg/contractabi/address as ca type MockDiscovery* = ref object of Discovery - findBlockProvidersHandler*: - proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.gcsafe.} - publishBlockProvideHandler*: proc(d: MockDiscovery, cid: Cid): Future[void] {.gcsafe.} - findHostProvidersHandler*: - proc(d: MockDiscovery, host: ca.Address): Future[seq[SignedPeerRecord]] {.gcsafe.} - publishHostProvideHandler*: - proc(d: MockDiscovery, host: ca.Address): Future[void] {.gcsafe.} + findBlockProvidersHandler*: proc( + d: MockDiscovery, cid: Cid + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} + + publishBlockProvideHandler*: + proc(d: MockDiscovery, cid: Cid): Future[void] {.async: (raises: [CancelledError]).} + + findHostProvidersHandler*: proc( + d: MockDiscovery, host: ca.Address + ): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} + + publishHostProvideHandler*: proc(d: MockDiscovery, host: ca.Address): Future[void] {. + async: (raises: [CancelledError]) + .} proc new*(T: type MockDiscovery): MockDiscovery = MockDiscovery() -proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} = +proc findPeer*( + d: Discovery, peerId: PeerId +): Future[?PeerRecord] {.async: (raises: [CancelledError]).} = ## mock find a peer - always return none - ## + ## return none(PeerRecord) -method find*(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async.} = +method find*( + d: MockDiscovery, cid: Cid +): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = if isNil(d.findBlockProvidersHandler): return return await d.findBlockProvidersHandler(d, cid) -method provide*(d: MockDiscovery, cid: Cid): Future[void] {.async.} = +method provide*( + d: MockDiscovery, cid: Cid +): Future[void] {.async: (raises: [CancelledError]).} = if isNil(d.publishBlockProvideHandler): return @@ -44,13 +57,15 @@ method provide*(d: MockDiscovery, cid: Cid): Future[void] {.async.} = method find*( d: MockDiscovery, host: ca.Address -): Future[seq[SignedPeerRecord]] {.async.} = +): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} = if isNil(d.findHostProvidersHandler): return return await d.findHostProvidersHandler(d, host) -method provide*(d: MockDiscovery, host: ca.Address): Future[void] {.async.} = +method provide*( + d: MockDiscovery, host: ca.Address +): Future[void] {.async: (raises: [CancelledError]).} = if isNil(d.publishHostProvideHandler): return diff --git a/tests/codex/helpers/randomchunker.nim b/tests/codex/helpers/randomchunker.nim index b482f67f..cf857595 100644 --- a/tests/codex/helpers/randomchunker.nim +++ b/tests/codex/helpers/randomchunker.nim @@ -26,7 +26,7 @@ proc new*( var consumed = 0 proc reader( data: ChunkBuffer, len: int - ): Future[int] {.async, gcsafe, raises: [Defect].} = + ): Future[int] {.async: (raises: [ChunkerError, CancelledError]), gcsafe.} = var alpha = toSeq(byte('A') .. byte('z')) if consumed >= size: diff --git a/tests/codex/merkletree/generictreetests.nim b/tests/codex/merkletree/generictreetests.nim index 0e1f7c9f..6244bc1c 100644 --- a/tests/codex/merkletree/generictreetests.nim +++ b/tests/codex/merkletree/generictreetests.nim @@ -1,4 +1,4 @@ -import std/unittest +import pkg/unittest2 import pkg/codex/merkletree diff --git a/tests/codex/merkletree/testcodexcoders.nim b/tests/codex/merkletree/testcodexcoders.nim index d9544083..6da56844 100644 --- a/tests/codex/merkletree/testcodexcoders.nim +++ b/tests/codex/merkletree/testcodexcoders.nim @@ -1,4 +1,4 @@ -import std/unittest +import pkg/unittest2 import pkg/questionable/results import pkg/stew/byteutils @@ -18,7 +18,7 @@ const data = [ "00000000000000000000000000000009".toBytes, "00000000000000000000000000000010".toBytes, ] -checksuite "merkletree - coders": +suite "merkletree - coders": test "encoding and decoding a tree yields the same tree": let tree = CodexTree.init(Sha256HashCodec, data).tryGet() diff --git a/tests/codex/merkletree/testcodextree.nim b/tests/codex/merkletree/testcodextree.nim index c4713d40..29390c16 100644 --- a/tests/codex/merkletree/testcodextree.nim +++ b/tests/codex/merkletree/testcodextree.nim @@ -1,6 +1,6 @@ -import std/unittest import std/sequtils +import pkg/unittest2 import pkg/questionable/results import pkg/stew/byteutils import pkg/libp2p diff --git a/tests/codex/merkletree/testmerkledigest.nim b/tests/codex/merkletree/testmerkledigest.nim index ccb138da..4cc2d197 100644 --- a/tests/codex/merkletree/testmerkledigest.nim +++ b/tests/codex/merkletree/testmerkledigest.nim @@ -1,7 +1,7 @@ -import std/unittest import std/sequtils import std/random +import pkg/unittest2 import pkg/poseidon2 import pkg/poseidon2/sponge diff --git a/tests/codex/merkletree/testposeidon2tree.nim b/tests/codex/merkletree/testposeidon2tree.nim index f60fdb39..e12751b7 100644 --- a/tests/codex/merkletree/testposeidon2tree.nim +++ b/tests/codex/merkletree/testposeidon2tree.nim @@ -1,6 +1,6 @@ -import std/unittest import std/sequtils +import pkg/unittest2 import pkg/poseidon2 import pkg/poseidon2/io import pkg/questionable/results diff --git a/tests/codex/sales/states/testdownloading.nim b/tests/codex/sales/states/testdownloading.nim index 3df45749..71376fc8 100644 --- a/tests/codex/sales/states/testdownloading.nim +++ b/tests/codex/sales/states/testdownloading.nim @@ -1,4 +1,4 @@ -import std/unittest +import pkg/unittest2 import pkg/questionable import pkg/codex/contracts/requests import pkg/codex/sales/states/cancelled @@ -8,7 +8,7 @@ import pkg/codex/sales/states/filled import ../../examples import ../../helpers -checksuite "sales state 'downloading'": +suite "sales state 'downloading'": let request = StorageRequest.example let slotIndex = request.ask.slots div 2 var state: SaleDownloading diff --git a/tests/codex/sales/states/testfilled.nim b/tests/codex/sales/states/testfilled.nim index 04ff26db..f077b780 100644 --- a/tests/codex/sales/states/testfilled.nim +++ b/tests/codex/sales/states/testfilled.nim @@ -14,7 +14,7 @@ import ../../helpers/mockmarket import ../../examples import ../../helpers -checksuite "sales state 'filled'": +suite "sales state 'filled'": let request = StorageRequest.example let slotIndex = request.ask.slots div 2 diff --git a/tests/codex/sales/states/testfilling.nim b/tests/codex/sales/states/testfilling.nim index ce1d32f2..1a26753d 100644 --- a/tests/codex/sales/states/testfilling.nim +++ b/tests/codex/sales/states/testfilling.nim @@ -1,4 +1,4 @@ -import std/unittest +import pkg/unittest2 import pkg/questionable import pkg/codex/contracts/requests import pkg/codex/sales/states/filling @@ -7,7 +7,7 @@ import pkg/codex/sales/states/failed import ../../examples import ../../helpers -checksuite "sales state 'filling'": +suite "sales state 'filling'": let request = StorageRequest.example let slotIndex = request.ask.slots div 2 var state: SaleFilling diff --git a/tests/codex/sales/states/testunknown.nim b/tests/codex/sales/states/testunknown.nim index 5e9f81f9..98b23224 100644 --- a/tests/codex/sales/states/testunknown.nim +++ b/tests/codex/sales/states/testunknown.nim @@ -14,7 +14,7 @@ import ../../helpers/mockmarket import ../../examples import ../../helpers -checksuite "sales state 'unknown'": +suite "sales state 'unknown'": let request = StorageRequest.example let slotIndex = request.ask.slots div 2 let slotId = slotId(request.id, slotIndex) diff --git a/tests/codex/sales/testsales.nim b/tests/codex/sales/testsales.nim index e92f9607..74ea8a2b 100644 --- a/tests/codex/sales/testsales.nim +++ b/tests/codex/sales/testsales.nim @@ -236,10 +236,17 @@ asyncchecksuite "Sales": return true proc addRequestToSaturatedQueue(): Future[StorageRequest] {.async.} = - queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = - await sleepAsync(10.millis) - itemsProcessed.add item - done.complete() + queue.onProcessSlot = proc( + item: SlotQueueItem, done: Future[void] + ) {.async: (raises: []).} = + try: + await sleepAsync(10.millis) + itemsProcessed.add item + except CancelledError as exc: + checkpoint(exc.msg) + finally: + if not done.finished: + done.complete() var request1 = StorageRequest.example request1.ask.collateralPerByte = request.ask.collateralPerByte + 1 @@ -261,9 +268,12 @@ asyncchecksuite "Sales": waitFor run() test "processes all request's slots once StorageRequested emitted": - queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = + queue.onProcessSlot = proc( + item: SlotQueueItem, done: Future[void] + ) {.async: (raises: []).} = itemsProcessed.add item - done.complete() + if not done.finished: + done.complete() createAvailability() await market.requestStorage(request) let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot) @@ -299,9 +309,12 @@ asyncchecksuite "Sales": check always (not itemsProcessed.contains(expected)) test "adds slot index to slot queue once SlotFreed emitted": - queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = + queue.onProcessSlot = proc( + item: SlotQueueItem, done: Future[void] + ) {.async: (raises: []).} = itemsProcessed.add item - done.complete() + if not done.finished: + done.complete() createAvailability() market.requested.add request # "contract" must be able to return request diff --git a/tests/codex/sales/testslotqueue.nim b/tests/codex/sales/testslotqueue.nim index 03c658be..7abad7eb 100644 --- a/tests/codex/sales/testslotqueue.nim +++ b/tests/codex/sales/testslotqueue.nim @@ -50,12 +50,19 @@ suite "Slot queue start/stop": suite "Slot queue workers": var queue: SlotQueue - proc onProcessSlot(item: SlotQueueItem, doneProcessing: Future[void]) {.async.} = - await sleepAsync(1000.millis) + proc onProcessSlot( + item: SlotQueueItem, doneProcessing: Future[void] + ) {.async: (raises: []).} = # this is not illustrative of the realistic scenario as the # `doneProcessing` future would be passed to another context before being # completed and therefore is not as simple as making the callback async - doneProcessing.complete() + try: + await sleepAsync(1000.millis) + except CatchableError as exc: + checkpoint(exc.msg) + finally: + if not doneProcessing.finished: + doneProcessing.complete() setup: let request = StorageRequest.example @@ -89,9 +96,14 @@ suite "Slot queue workers": check eventually queue.activeWorkers == 3 test "discards workers once processing completed": - proc processSlot(item: SlotQueueItem, done: Future[void]) {.async.} = - await sleepAsync(1.millis) - done.complete() + proc processSlot(item: SlotQueueItem, done: Future[void]) {.async: (raises: []).} = + try: + await sleepAsync(1.millis) + except CatchableError as exc: + checkpoint(exc.msg) + finally: + if not done.finished: + done.complete() queue.onProcessSlot = processSlot @@ -114,11 +126,19 @@ suite "Slot queue": proc newSlotQueue(maxSize, maxWorkers: int, processSlotDelay = 1.millis) = queue = SlotQueue.new(maxWorkers, maxSize.uint16) - queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = - await sleepAsync(processSlotDelay) - onProcessSlotCalled = true - onProcessSlotCalledWith.add (item.requestId, item.slotIndex) - done.complete() + queue.onProcessSlot = proc( + item: SlotQueueItem, done: Future[void] + ) {.async: (raises: []).} = + try: + await sleepAsync(processSlotDelay) + except CatchableError as exc: + checkpoint(exc.msg) + finally: + onProcessSlotCalled = true + onProcessSlotCalledWith.add (item.requestId, item.slotIndex) + if not done.finished: + done.complete() + queue.start() setup: diff --git a/tests/codex/slots/testslotbuilder.nim b/tests/codex/slots/testslotbuilder.nim index ef83bdee..9a2043a8 100644 --- a/tests/codex/slots/testslotbuilder.nim +++ b/tests/codex/slots/testslotbuilder.nim @@ -133,7 +133,7 @@ suite "Slot builder": check: Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize).error.msg == - "Number of blocks must be divisable by number of slots." + "Number of blocks must be divisible by number of slots." test "Block size must be divisable by cell size": let mismatchManifest = Manifest.new( @@ -151,7 +151,7 @@ suite "Slot builder": check: Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize).error.msg == - "Block size must be divisable by cell size." + "Block size must be divisible by cell size." test "Should build correct slot builder": builder = diff --git a/tests/codex/stores/repostore/testcoders.nim b/tests/codex/stores/repostore/testcoders.nim index f4d2b5e7..9d341af0 100644 --- a/tests/codex/stores/repostore/testcoders.nim +++ b/tests/codex/stores/repostore/testcoders.nim @@ -1,6 +1,6 @@ -import std/unittest import std/random +import pkg/unittest2 import pkg/stew/objects import pkg/questionable import pkg/questionable/results @@ -11,7 +11,7 @@ import pkg/codex/stores/repostore/coders import ../../helpers -checksuite "Test coders": +suite "Test coders": proc rand(T: type NBytes): T = rand(Natural).NBytes diff --git a/tests/codex/stores/testcachestore.nim b/tests/codex/stores/testcachestore.nim index e7025388..03075e1a 100644 --- a/tests/codex/stores/testcachestore.nim +++ b/tests/codex/stores/testcachestore.nim @@ -11,7 +11,7 @@ import ./commonstoretests import ../../asynctest import ../helpers -checksuite "Cache Store": +suite "Cache Store": var newBlock, newBlock1, newBlock2, newBlock3: Block store: CacheStore diff --git a/tests/codex/stores/testkeyutils.nim b/tests/codex/stores/testkeyutils.nim index 238e2681..86365c5c 100644 --- a/tests/codex/stores/testkeyutils.nim +++ b/tests/codex/stores/testkeyutils.nim @@ -36,7 +36,7 @@ proc createManifestCid(): ?!Cid = let cid = ?Cid.init(version, codec, hash).mapFailure return success cid -checksuite "KeyUtils": +suite "KeyUtils": test "makePrefixKey should create block key": let length = 6 let cid = Cid.example diff --git a/tests/codex/stores/testmaintenance.nim b/tests/codex/stores/testmaintenance.nim index e5ff519e..89e75700 100644 --- a/tests/codex/stores/testmaintenance.nim +++ b/tests/codex/stores/testmaintenance.nim @@ -21,7 +21,7 @@ import ../examples import codex/stores/maintenance -checksuite "BlockMaintainer": +suite "BlockMaintainer": var mockRepoStore: MockRepoStore var interval: Duration var mockTimer: MockTimer diff --git a/tests/codex/stores/testrepostore.nim b/tests/codex/stores/testrepostore.nim index 0279b56f..5274d046 100644 --- a/tests/codex/stores/testrepostore.nim +++ b/tests/codex/stores/testrepostore.nim @@ -24,7 +24,7 @@ import ../helpers/mockclock import ../examples import ./commonstoretests -checksuite "Test RepoStore start/stop": +suite "Test RepoStore start/stop": var repoDs: Datastore metaDs: Datastore diff --git a/tests/codex/testasyncheapqueue.nim b/tests/codex/testasyncheapqueue.nim index a9c6769b..2d2cfb0c 100644 --- a/tests/codex/testasyncheapqueue.nim +++ b/tests/codex/testasyncheapqueue.nim @@ -22,7 +22,7 @@ proc toSortedSeq[T](h: AsyncHeapQueue[T], queueType = QueueType.Min): seq[T] = while tmp.len > 0: result.add(popNoWait(tmp).tryGet()) -checksuite "Synchronous tests": +suite "Synchronous tests": test "Test pushNoWait - Min": var heap = newAsyncHeapQueue[int]() let data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0] diff --git a/tests/codex/testchunking.nim b/tests/codex/testchunking.nim index 2241a82b..44202c40 100644 --- a/tests/codex/testchunking.nim +++ b/tests/codex/testchunking.nim @@ -27,7 +27,7 @@ asyncchecksuite "Chunking": let contents = [1.byte, 2, 3, 4, 5, 6, 7, 8, 9, 0] proc reader( data: ChunkBuffer, len: int - ): Future[int] {.gcsafe, async, raises: [Defect].} = + ): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} = let read = min(contents.len - offset, len) if read == 0: return 0 @@ -97,8 +97,13 @@ asyncchecksuite "Chunking": discard (await chunker.getBytes()) test "stream should forward LPStreamError": - expect LPStreamError: + try: await raiseStreamException(newException(LPStreamError, "test error")) + except ChunkerError as exc: + check exc.parent of LPStreamError + except CatchableError as exc: + checkpoint("Unexpected error: " & exc.msg) + fail() test "stream should catch LPStreamEOFError": await raiseStreamException(newException(LPStreamEOFError, "test error")) @@ -106,7 +111,3 @@ asyncchecksuite "Chunking": test "stream should forward CancelledError": expect CancelledError: await raiseStreamException(newException(CancelledError, "test error")) - - test "stream should forward LPStreamError": - expect LPStreamError: - await raiseStreamException(newException(LPStreamError, "test error")) diff --git a/tests/codex/testclock.nim b/tests/codex/testclock.nim index 2b0158cf..967de672 100644 --- a/tests/codex/testclock.nim +++ b/tests/codex/testclock.nim @@ -1,9 +1,9 @@ -import std/unittest +import pkg/unittest2 import codex/clock import ./helpers -checksuite "Clock": +suite "Clock": proc testConversion(seconds: SecondsSince1970) = let asBytes = seconds.toBytes diff --git a/tests/codex/testlogutils.nim b/tests/codex/testlogutils.nim index b2694ee9..2077fb81 100644 --- a/tests/codex/testlogutils.nim +++ b/tests/codex/testlogutils.nim @@ -1,6 +1,7 @@ import std/options import std/strutils -import std/unittest + +import pkg/unittest2 import pkg/codex/blocktype import pkg/codex/conf import pkg/codex/contracts/requests diff --git a/tests/codex/testmanifest.nim b/tests/codex/testmanifest.nim index 241bec61..ea9465d5 100644 --- a/tests/codex/testmanifest.nim +++ b/tests/codex/testmanifest.nim @@ -13,7 +13,7 @@ import ../asynctest import ./helpers import ./examples -checksuite "Manifest": +suite "Manifest": let manifest = Manifest.new(treeCid = Cid.example, blockSize = 1.MiBs, datasetSize = 100.MiBs) diff --git a/tests/codex/testpurchasing.nim b/tests/codex/testpurchasing.nim index 5a4e85e9..1834ee03 100644 --- a/tests/codex/testpurchasing.nim +++ b/tests/codex/testpurchasing.nim @@ -116,7 +116,7 @@ asyncchecksuite "Purchasing": await purchase.wait() check market.withdrawn == @[request.id] -checksuite "Purchasing state machine": +suite "Purchasing state machine": var purchasing: Purchasing var market: MockMarket var clock: MockClock diff --git a/tests/codex/testsystemclock.nim b/tests/codex/testsystemclock.nim index 6f743283..3f179260 100644 --- a/tests/codex/testsystemclock.nim +++ b/tests/codex/testsystemclock.nim @@ -1,10 +1,10 @@ import std/times -import std/unittest -import codex/systemclock +import pkg/unittest2 +import pkg/codex/systemclock import ./helpers -checksuite "SystemClock": +suite "SystemClock": test "Should get now": let clock = SystemClock.new() diff --git a/tests/codex/utils/testiter.nim b/tests/codex/utils/testiter.nim index 801e1937..ec19c484 100644 --- a/tests/codex/utils/testiter.nim +++ b/tests/codex/utils/testiter.nim @@ -7,7 +7,7 @@ import pkg/codex/utils/iter import ../../asynctest import ../helpers -checksuite "Test Iter": +suite "Test Iter": test "Should be finished": let iter = Iter[int].empty() diff --git a/tests/codex/utils/testkeyutils.nim b/tests/codex/utils/testkeyutils.nim index 2124e682..104258f3 100644 --- a/tests/codex/utils/testkeyutils.nim +++ b/tests/codex/utils/testkeyutils.nim @@ -1,12 +1,14 @@ -import std/unittest import std/os -import codex/utils/keyutils + +import pkg/unittest2 +import pkg/codex/utils/keyutils + import ../helpers when defined(windows): import stew/windows/acl -checksuite "keyutils": +suite "keyutils": let path = getTempDir() / "CodexTest" setup: diff --git a/tests/codex/utils/testoptions.nim b/tests/codex/utils/testoptions.nim index 05f7509e..650715bc 100644 --- a/tests/codex/utils/testoptions.nim +++ b/tests/codex/utils/testoptions.nim @@ -1,8 +1,9 @@ -import std/unittest -import codex/utils/options +import pkg/unittest2 +import pkg/codex/utils/options + import ../helpers -checksuite "optional casts": +suite "optional casts": test "casting value to same type works": check 42 as int == some 42 @@ -31,7 +32,7 @@ checksuite "optional casts": check 42.some as string == string.none check int.none as int == int.none -checksuite "Optionalize": +suite "Optionalize": test "does not except non-object types": static: doAssert not compiles(Optionalize(int)) diff --git a/tests/codex/utils/testtrackedfutures.nim b/tests/codex/utils/testtrackedfutures.nim index 35074919..993d5b43 100644 --- a/tests/codex/utils/testtrackedfutures.nim +++ b/tests/codex/utils/testtrackedfutures.nim @@ -17,47 +17,71 @@ asyncchecksuite "tracked futures": check module.trackedFutures.len == 0 test "tracks unfinished futures": - let fut = newFuture[void]("test") + let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule}) module.trackedFutures.track(fut) check module.trackedFutures.len == 1 test "does not track completed futures": - let fut = newFuture[void]("test") + let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule}) fut.complete() module.trackedFutures.track(fut) - check eventually module.trackedFutures.len == 0 - - test "does not track failed futures": - let fut = newFuture[void]("test") - fut.fail((ref CatchableError)(msg: "some error")) - module.trackedFutures.track(fut) - check eventually module.trackedFutures.len == 0 + check module.trackedFutures.len == 0 test "does not track cancelled futures": - let fut = newFuture[void]("test") + let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule}) + fut.cancelCallback = proc(data: pointer) = + fut.cancelAndSchedule() # manually schedule the cancel + await fut.cancelAndWait() module.trackedFutures.track(fut) check eventually module.trackedFutures.len == 0 test "removes tracked future when finished": - let fut = newFuture[void]("test") + let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule}) module.trackedFutures.track(fut) + check module.trackedFutures.len == 1 fut.complete() check eventually module.trackedFutures.len == 0 test "removes tracked future when cancelled": - let fut = newFuture[void]("test") + let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule}) + fut.cancelCallback = proc(data: pointer) = + fut.cancelAndSchedule() # manually schedule the cancel + module.trackedFutures.track(fut) + check module.trackedFutures.len == 1 + await fut.cancelAndWait() + check eventually module.trackedFutures.len == 0 + + test "completed and removes future on cancel": + let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule}) + fut.cancelCallback = proc(data: pointer) = + fut.complete() + + module.trackedFutures.track(fut) + check module.trackedFutures.len == 1 await fut.cancelAndWait() check eventually module.trackedFutures.len == 0 test "cancels and removes all tracked futures": - let fut1 = newFuture[void]("test1") - let fut2 = newFuture[void]("test2") - let fut3 = newFuture[void]("test3") + let fut1 = Future[void].Raising([]).init("test1", {FutureFlag.OwnCancelSchedule}) + fut1.cancelCallback = proc(data: pointer) = + fut1.cancelAndSchedule() # manually schedule the cancel + + let fut2 = Future[void].Raising([]).init("test2", {FutureFlag.OwnCancelSchedule}) + fut2.cancelCallback = proc(data: pointer) = + fut2.cancelAndSchedule() # manually schedule the cancel + + let fut3 = Future[void].Raising([]).init("test3", {FutureFlag.OwnCancelSchedule}) + fut3.cancelCallback = proc(data: pointer) = + fut3.cancelAndSchedule() # manually schedule the cancel + module.trackedFutures.track(fut1) + check module.trackedFutures.len == 1 module.trackedFutures.track(fut2) + check module.trackedFutures.len == 2 module.trackedFutures.track(fut3) + check module.trackedFutures.len == 3 await module.trackedFutures.cancelTracked() check eventually fut1.cancelled check eventually fut2.cancelled diff --git a/tests/codex/utils/testutils.nim b/tests/codex/utils/testutils.nim index 92c883be..b0bb20b5 100644 --- a/tests/codex/utils/testutils.nim +++ b/tests/codex/utils/testutils.nim @@ -1,4 +1,4 @@ -import std/unittest +import pkg/unittest2 import pkg/codex/utils diff --git a/tests/helpers.nim b/tests/helpers.nim index a6a6ff44..82b544f1 100644 --- a/tests/helpers.nim +++ b/tests/helpers.nim @@ -2,4 +2,36 @@ import helpers/multisetup import helpers/trackers import helpers/templeveldb +import std/sequtils, chronos + export multisetup, trackers, templeveldb + +### taken from libp2p errorhelpers.nim +proc allFuturesThrowing*(args: varargs[FutureBase]): Future[void] = + # This proc is only meant for use in tests / not suitable for general use. + # - Swallowing errors arbitrarily instead of aggregating them is bad design + # - It raises `CatchableError` instead of the union of the `futs` errors, + # inflating the caller's `raises` list unnecessarily. `macro` could fix it + let futs = @args + ( + proc() {.async: (raises: [CatchableError]).} = + await allFutures(futs) + var firstErr: ref CatchableError + for fut in futs: + if fut.failed: + let err = fut.error() + if err of CancelledError: + raise err + if firstErr == nil: + firstErr = err + if firstErr != nil: + raise firstErr + )() + +proc allFuturesThrowing*[T](futs: varargs[Future[T]]): Future[void] = + allFuturesThrowing(futs.mapIt(FutureBase(it))) + +proc allFuturesThrowing*[T, E]( # https://github.com/nim-lang/Nim/issues/23432 + futs: varargs[InternalRaisesFuture[T, E]] +): Future[void] = + allFuturesThrowing(futs.mapIt(FutureBase(it))) diff --git a/tests/helpers/trackers.nim b/tests/helpers/trackers.nim index ed8c5692..898053c2 100644 --- a/tests/helpers/trackers.nim +++ b/tests/helpers/trackers.nim @@ -1,5 +1,5 @@ import pkg/codex/streams/storestream -import std/unittest +import pkg/unittest2 # From lip2p/tests/helpers const trackerNames = [StoreStreamTrackerName] diff --git a/vendor/nim-serde b/vendor/nim-serde index c82e85c6..5ced7c88 160000 --- a/vendor/nim-serde +++ b/vendor/nim-serde @@ -1 +1 @@ -Subproject commit c82e85c62436218592fbe876df5ac389ef8b964b +Subproject commit 5ced7c88b97d99c582285ce796957fb71fd42434 From a0ddcef08da8f71fccf80d976a73f5771c545eb4 Mon Sep 17 00:00:00 2001 From: Ben Bierens <39762930+benbierens@users.noreply.github.com> Date: Thu, 13 Mar 2025 23:45:44 +0100 Subject: [PATCH 08/14] changes trace to info for updates of the annouce/dht record logs (#1156) --- codex/discovery.nim | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/codex/discovery.nim b/codex/discovery.nim index eed1f89b..4a211c20 100644 --- a/codex/discovery.nim +++ b/codex/discovery.nim @@ -179,7 +179,7 @@ proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) = d.announceAddrs = @addrs - trace "Updating announce record", addrs = d.announceAddrs + info "Updating announce record", addrs = d.announceAddrs d.providerRecord = SignedPeerRecord .init(d.key, PeerRecord.init(d.peerId, d.announceAddrs)) .expect("Should construct signed record").some @@ -191,7 +191,7 @@ proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) = ## Update providers record ## - trace "Updating Dht record", addrs = addrs + info "Updating Dht record", addrs = addrs d.dhtRecord = SignedPeerRecord .init(d.key, PeerRecord.init(d.peerId, @addrs)) .expect("Should construct signed record").some From a5db757de39df6e08807e8917d8f924e4a68d76a Mon Sep 17 00:00:00 2001 From: Eric <5089238+emizzle@users.noreply.github.com> Date: Fri, 14 Mar 2025 09:46:05 +1100 Subject: [PATCH 09/14] fix: ethers no longer leaks AsyncLockError (#1146) * fix: ethers no longer leaks AsyncLockError * Add message to convertEthersEthers - adds a message to convertEthersError allowing contextual error messages - replaces try/except EthersError with convertEthersError (PR feedback) * bump ethers after PR merged upstream --- codex/contracts/market.nim | 117 +++++++++++++++++++------------------ vendor/nim-ethers | 2 +- 2 files changed, 60 insertions(+), 59 deletions(-) diff --git a/codex/contracts/market.nim b/codex/contracts/market.nim index 9079ac8a..58495b45 100644 --- a/codex/contracts/market.nim +++ b/codex/contracts/market.nim @@ -1,3 +1,4 @@ +import std/strformat import std/strutils import pkg/ethers import pkg/upraises @@ -49,11 +50,17 @@ func new*( proc raiseMarketError(message: string) {.raises: [MarketError].} = raise newException(MarketError, message) -template convertEthersError(body) = +func prefixWith(suffix, prefix: string, separator = ": "): string = + if prefix.len > 0: + return &"{prefix}{separator}{suffix}" + else: + return suffix + +template convertEthersError(msg: string = "", body) = try: body except EthersError as error: - raiseMarketError(error.msgDetail) + raiseMarketError(error.msgDetail.prefixWith(msg)) proc config( market: OnChainMarket @@ -71,7 +78,7 @@ proc config( proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} = debug "Approving tokens", amount - convertEthersError: + convertEthersError("Failed to approve funds"): let tokenAddress = await market.contract.token() let token = Erc20Token.new(tokenAddress, market.signer) discard await token.increaseAllowance(market.contract.address(), amount).confirm(1) @@ -86,8 +93,7 @@ method loadConfig*( market.configuration = some fetchedConfig return success() - except AsyncLockError, EthersError: - let err = getCurrentException() + except EthersError as err: return failure newException( MarketError, "Failed to fetch the config from the Marketplace contract: " & err.msg, @@ -100,13 +106,13 @@ method getZkeyHash*( return some config.proofs.zkeyHash method getSigner*(market: OnChainMarket): Future[Address] {.async.} = - convertEthersError: + convertEthersError("Failed to get signer address"): return await market.signer.getAddress() method periodicity*( market: OnChainMarket ): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} = - convertEthersError: + convertEthersError("Failed to get Marketplace config"): let config = await market.config() let period = config.proofs.period return Periodicity(seconds: period) @@ -114,47 +120,47 @@ method periodicity*( method proofTimeout*( market: OnChainMarket ): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} = - convertEthersError: + convertEthersError("Failed to get Marketplace config"): let config = await market.config() return config.proofs.timeout method repairRewardPercentage*( market: OnChainMarket ): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} = - convertEthersError: + convertEthersError("Failed to get Marketplace config"): let config = await market.config() return config.collateral.repairRewardPercentage method requestDurationLimit*(market: OnChainMarket): Future[uint64] {.async.} = - convertEthersError: + convertEthersError("Failed to get Marketplace config"): let config = await market.config() return config.requestDurationLimit method proofDowntime*( market: OnChainMarket ): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} = - convertEthersError: + convertEthersError("Failed to get Marketplace config"): let config = await market.config() return config.proofs.downtime method getPointer*(market: OnChainMarket, slotId: SlotId): Future[uint8] {.async.} = - convertEthersError: + convertEthersError("Failed to get slot pointer"): let overrides = CallOverrides(blockTag: some BlockTag.pending) return await market.contract.getPointer(slotId, overrides) method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} = - convertEthersError: + convertEthersError("Failed to get my requests"): return await market.contract.myRequests method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} = - convertEthersError: + convertEthersError("Failed to get my slots"): let slots = await market.contract.mySlots() debug "Fetched my slots", numSlots = len(slots) return slots method requestStorage(market: OnChainMarket, request: StorageRequest) {.async.} = - convertEthersError: + convertEthersError("Failed to request storage"): debug "Requesting storage" await market.approveFunds(request.totalPrice()) discard await market.contract.requestStorage(request).confirm(1) @@ -174,14 +180,14 @@ method getRequest*( except Marketplace_UnknownRequest, KeyError: warn "Cannot retrieve the request", error = getCurrentExceptionMsg() return none StorageRequest - except EthersError, AsyncLockError: - error "Cannot retrieve the request", error = getCurrentExceptionMsg() + except EthersError as e: + error "Cannot retrieve the request", error = e.msg return none StorageRequest method requestState*( market: OnChainMarket, requestId: RequestId ): Future[?RequestState] {.async.} = - convertEthersError: + convertEthersError("Failed to get request state"): try: let overrides = CallOverrides(blockTag: some BlockTag.pending) return some await market.contract.requestState(requestId, overrides) @@ -191,31 +197,26 @@ method requestState*( method slotState*( market: OnChainMarket, slotId: SlotId ): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} = - convertEthersError: - try: - let overrides = CallOverrides(blockTag: some BlockTag.pending) - return await market.contract.slotState(slotId, overrides) - except AsyncLockError as err: - raiseMarketError( - "Failed to fetch the slot state from the Marketplace contract: " & err.msg - ) + convertEthersError("Failed to fetch the slot state from the Marketplace contract"): + let overrides = CallOverrides(blockTag: some BlockTag.pending) + return await market.contract.slotState(slotId, overrides) method getRequestEnd*( market: OnChainMarket, id: RequestId ): Future[SecondsSince1970] {.async.} = - convertEthersError: + convertEthersError("Failed to get request end"): return await market.contract.requestEnd(id) method requestExpiresAt*( market: OnChainMarket, id: RequestId ): Future[SecondsSince1970] {.async.} = - convertEthersError: + convertEthersError("Failed to get request expiry"): return await market.contract.requestExpiry(id) method getHost( market: OnChainMarket, requestId: RequestId, slotIndex: uint64 ): Future[?Address] {.async.} = - convertEthersError: + convertEthersError("Failed to get slot's host"): let slotId = slotId(requestId, slotIndex) let address = await market.contract.getHost(slotId) if address != Address.default: @@ -226,11 +227,11 @@ method getHost( method currentCollateral*( market: OnChainMarket, slotId: SlotId ): Future[UInt256] {.async.} = - convertEthersError: + convertEthersError("Failed to get slot's current collateral"): return await market.contract.currentCollateral(slotId) method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} = - convertEthersError: + convertEthersError("Failed to get active slot"): try: return some await market.contract.getActiveSlot(slotId) except Marketplace_SlotIsFree: @@ -243,7 +244,7 @@ method fillSlot( proof: Groth16Proof, collateral: UInt256, ) {.async.} = - convertEthersError: + convertEthersError("Failed to fill slot"): logScope: requestId slotIndex @@ -254,7 +255,7 @@ method fillSlot( trace "fillSlot transaction completed" method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} = - convertEthersError: + convertEthersError("Failed to free slot"): var freeSlot: Future[Confirmable] if rewardRecipient =? market.rewardRecipient: # If --reward-recipient specified, use it as the reward recipient, and use @@ -273,11 +274,11 @@ method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} = discard await freeSlot.confirm(1) method withdrawFunds(market: OnChainMarket, requestId: RequestId) {.async.} = - convertEthersError: + convertEthersError("Failed to withdraw funds"): discard await market.contract.withdrawFunds(requestId).confirm(1) method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} = - convertEthersError: + convertEthersError("Failed to get proof requirement"): try: let overrides = CallOverrides(blockTag: some BlockTag.pending) return await market.contract.isProofRequired(id, overrides) @@ -285,7 +286,7 @@ method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async return false method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} = - convertEthersError: + convertEthersError("Failed to get future proof requirement"): try: let overrides = CallOverrides(blockTag: some BlockTag.pending) return await market.contract.willProofBeRequired(id, overrides) @@ -295,18 +296,18 @@ method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.a method getChallenge*( market: OnChainMarket, id: SlotId ): Future[ProofChallenge] {.async.} = - convertEthersError: + convertEthersError("Failed to get proof challenge"): let overrides = CallOverrides(blockTag: some BlockTag.pending) return await market.contract.getChallenge(id, overrides) method submitProof*(market: OnChainMarket, id: SlotId, proof: Groth16Proof) {.async.} = - convertEthersError: + convertEthersError("Failed to submit proof"): discard await market.contract.submitProof(id, proof).confirm(1) method markProofAsMissing*( market: OnChainMarket, id: SlotId, period: Period ) {.async.} = - convertEthersError: + convertEthersError("Failed to mark proof as missing"): discard await market.contract.markProofAsMissing(id, period).confirm(1) method canProofBeMarkedAsMissing*( @@ -325,7 +326,7 @@ method canProofBeMarkedAsMissing*( method reserveSlot*( market: OnChainMarket, requestId: RequestId, slotIndex: uint64 ) {.async.} = - convertEthersError: + convertEthersError("Failed to reserve slot"): discard await market.contract .reserveSlot( requestId, @@ -338,7 +339,7 @@ method reserveSlot*( method canReserveSlot*( market: OnChainMarket, requestId: RequestId, slotIndex: uint64 ): Future[bool] {.async.} = - convertEthersError: + convertEthersError("Unable to determine if slot can be reserved"): return await market.contract.canReserveSlot(requestId, slotIndex) method subscribeRequests*( @@ -351,7 +352,7 @@ method subscribeRequests*( callback(event.requestId, event.ask, event.expiry) - convertEthersError: + convertEthersError("Failed to subscribe to StorageRequested events"): let subscription = await market.contract.subscribe(StorageRequested, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -365,7 +366,7 @@ method subscribeSlotFilled*( callback(event.requestId, event.slotIndex) - convertEthersError: + convertEthersError("Failed to subscribe to SlotFilled events"): let subscription = await market.contract.subscribe(SlotFilled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -379,7 +380,7 @@ method subscribeSlotFilled*( if eventRequestId == requestId and eventSlotIndex == slotIndex: callback(requestId, slotIndex) - convertEthersError: + convertEthersError("Failed to subscribe to SlotFilled events"): return await market.subscribeSlotFilled(onSlotFilled) method subscribeSlotFreed*( @@ -392,7 +393,7 @@ method subscribeSlotFreed*( callback(event.requestId, event.slotIndex) - convertEthersError: + convertEthersError("Failed to subscribe to SlotFreed events"): let subscription = await market.contract.subscribe(SlotFreed, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -407,7 +408,7 @@ method subscribeSlotReservationsFull*( callback(event.requestId, event.slotIndex) - convertEthersError: + convertEthersError("Failed to subscribe to SlotReservationsFull events"): let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -421,7 +422,7 @@ method subscribeFulfillment( callback(event.requestId) - convertEthersError: + convertEthersError("Failed to subscribe to RequestFulfilled events"): let subscription = await market.contract.subscribe(RequestFulfilled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -436,7 +437,7 @@ method subscribeFulfillment( if event.requestId == requestId: callback(event.requestId) - convertEthersError: + convertEthersError("Failed to subscribe to RequestFulfilled events"): let subscription = await market.contract.subscribe(RequestFulfilled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -450,7 +451,7 @@ method subscribeRequestCancelled*( callback(event.requestId) - convertEthersError: + convertEthersError("Failed to subscribe to RequestCancelled events"): let subscription = await market.contract.subscribe(RequestCancelled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -465,7 +466,7 @@ method subscribeRequestCancelled*( if event.requestId == requestId: callback(event.requestId) - convertEthersError: + convertEthersError("Failed to subscribe to RequestCancelled events"): let subscription = await market.contract.subscribe(RequestCancelled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -479,7 +480,7 @@ method subscribeRequestFailed*( callback(event.requestId) - convertEthersError: + convertEthersError("Failed to subscribe to RequestFailed events"): let subscription = await market.contract.subscribe(RequestFailed, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -494,7 +495,7 @@ method subscribeRequestFailed*( if event.requestId == requestId: callback(event.requestId) - convertEthersError: + convertEthersError("Failed to subscribe to RequestFailed events"): let subscription = await market.contract.subscribe(RequestFailed, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -508,7 +509,7 @@ method subscribeProofSubmission*( callback(event.id) - convertEthersError: + convertEthersError("Failed to subscribe to ProofSubmitted events"): let subscription = await market.contract.subscribe(ProofSubmitted, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) @@ -518,13 +519,13 @@ method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} = method queryPastSlotFilledEvents*( market: OnChainMarket, fromBlock: BlockTag ): Future[seq[SlotFilled]] {.async.} = - convertEthersError: + convertEthersError("Failed to get past SlotFilled events from block"): return await market.contract.queryFilter(SlotFilled, fromBlock, BlockTag.latest) method queryPastSlotFilledEvents*( market: OnChainMarket, blocksAgo: int ): Future[seq[SlotFilled]] {.async.} = - convertEthersError: + convertEthersError("Failed to get past SlotFilled events"): let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo) return await market.queryPastSlotFilledEvents(fromBlock) @@ -532,21 +533,21 @@ method queryPastSlotFilledEvents*( method queryPastSlotFilledEvents*( market: OnChainMarket, fromTime: SecondsSince1970 ): Future[seq[SlotFilled]] {.async.} = - convertEthersError: + convertEthersError("Failed to get past SlotFilled events from time"): let fromBlock = await market.contract.provider.blockNumberForEpoch(fromTime) return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock)) method queryPastStorageRequestedEvents*( market: OnChainMarket, fromBlock: BlockTag ): Future[seq[StorageRequested]] {.async.} = - convertEthersError: + convertEthersError("Failed to get past StorageRequested events from block"): return await market.contract.queryFilter(StorageRequested, fromBlock, BlockTag.latest) method queryPastStorageRequestedEvents*( market: OnChainMarket, blocksAgo: int ): Future[seq[StorageRequested]] {.async.} = - convertEthersError: + convertEthersError("Failed to get past StorageRequested events"): let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo) return await market.queryPastStorageRequestedEvents(fromBlock) diff --git a/vendor/nim-ethers b/vendor/nim-ethers index d2b11a86..b505ef1a 160000 --- a/vendor/nim-ethers +++ b/vendor/nim-ethers @@ -1 +1 @@ -Subproject commit d2b11a865796a55296027f8ffba68398035ad435 +Subproject commit b505ef1ab889be8161bb1efb4908e3dfde5bc1c9 From f1b84dc6d1b295a59e6060f8a1026a5b1e280a9e Mon Sep 17 00:00:00 2001 From: tianzedavid <168427849+tianzedavid@users.noreply.github.com> Date: Fri, 14 Mar 2025 06:46:44 +0800 Subject: [PATCH 10/14] chore: fix some typos (#1110) Signed-off-by: tianzedavid Co-authored-by: Dmitriy Ryajov --- README.md | 4 ++-- nix/default.nix | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index d073057f..2a15051f 100644 --- a/README.md +++ b/README.md @@ -59,8 +59,8 @@ Feel free to dive in, contributions are welcomed! Open an issue or submit PRs. ### Linting and formatting -`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is requrired to adhere to its styling. +`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling. If you are setting up fresh setup, in order to get `nph` run `make build-nph`. In order to format files run `make nph/`. -If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior commiting them. +If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them. If you are using VSCode and the [NimLang](https://marketplace.visualstudio.com/items?itemName=NimLang.nimlang) extension you can enable "Format On Save" (eq. the `nim.formatOnSave` property) that will format the files using `nph`. \ No newline at end of file diff --git a/nix/default.nix b/nix/default.nix index 691e2af3..b5823f86 100644 --- a/nix/default.nix +++ b/nix/default.nix @@ -56,7 +56,7 @@ in pkgs.gcc13Stdenv.mkDerivation rec { fakeCargo ]; - # Disable CPU optmizations that make binary not portable. + # Disable CPU optimizations that make binary not portable. NIMFLAGS = "-d:disableMarchNative -d:git_revision_override=${revision}"; # Avoid Nim cache permission errors. XDG_CACHE_HOME = "/tmp"; From 75db491d84116f4cf9e38550d0bc52763d42a81f Mon Sep 17 00:00:00 2001 From: munna0908 <88337208+munna0908@users.noreply.github.com> Date: Fri, 14 Mar 2025 18:39:18 +0530 Subject: [PATCH 11/14] fix: optimise erasure encode/decode (#1123) * avoid copying block,parity data to shared memory * use alloc instead of allocShared * code cleanup --- codex/erasure/erasure.nim | 82 +++++++++++++------------------------ codex/utils/arrayutils.nim | 13 ++++++ tests/codex/testerasure.nim | 16 ++++---- 3 files changed, 49 insertions(+), 62 deletions(-) diff --git a/codex/erasure/erasure.nim b/codex/erasure/erasure.nim index 78ce3971..884969d0 100644 --- a/codex/erasure/erasure.nim +++ b/codex/erasure/erasure.nim @@ -310,10 +310,10 @@ proc leopardEncodeTask(tp: Taskpool, task: ptr EncodeTask) {.gcsafe.} = else: task[].success.store(true) -proc encodeAsync*( +proc asyncEncode*( self: Erasure, blockSize, blocksLen, parityLen: int, - data: ref seq[seq[byte]], + blocks: ref seq[seq[byte]], parity: ptr UncheckedArray[ptr UncheckedArray[byte]], ): Future[?!void] {.async: (raises: [CancelledError]).} = without threadPtr =? ThreadSignalPtr.new(): @@ -322,13 +322,10 @@ proc encodeAsync*( defer: threadPtr.close().expect("closing once works") - var blockData = createDoubleArray(blocksLen, blockSize) - - for i in 0 ..< data[].len: - copyMem(blockData[i], addr data[i][0], blockSize) + var data = makeUncheckedArray(blocks) defer: - freeDoubleArray(blockData, blocksLen) + dealloc(data) ## Create an ecode task with block data var task = EncodeTask( @@ -336,7 +333,7 @@ proc encodeAsync*( blockSize: blockSize, blocksLen: blocksLen, parityLen: parityLen, - blocks: blockData, + blocks: data, parity: parity, signal: threadPtr, ) @@ -348,18 +345,13 @@ proc encodeAsync*( self.taskPool.spawn leopardEncodeTask(self.taskPool, t) let threadFut = threadPtr.wait() - try: - await threadFut.join() - except CatchableError as exc: - try: - await threadFut - except AsyncError as asyncExc: - return failure(asyncExc.msg) - finally: - if exc of CancelledError: - raise (ref CancelledError) exc - else: - return failure(exc.msg) + if joinErr =? catch(await threadFut.join()).errorOption: + if err =? catch(await noCancel threadFut).errorOption: + return failure(err) + if joinErr of CancelledError: + raise (ref CancelledError) joinErr + else: + return failure(joinErr) if not t.success.load(): return failure("Leopard encoding failed") @@ -409,7 +401,7 @@ proc encodeData( try: if err =? ( - await self.encodeAsync( + await self.asyncEncode( manifest.blockSize.int, params.ecK, params.ecM, data, parity ) ).errorOption: @@ -489,6 +481,7 @@ proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} = task[].erasure.decoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen) defer: decoder.release() + discard task[].signal.fireSync() if ( let res = decoder.decode( @@ -506,9 +499,7 @@ proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} = else: task[].success.store(true) - discard task[].signal.fireSync() - -proc decodeAsync*( +proc asyncDecode*( self: Erasure, blockSize, blocksLen, parityLen: int, blocks, parity: ref seq[seq[byte]], @@ -521,24 +512,12 @@ proc decodeAsync*( threadPtr.close().expect("closing once works") var - blocksData = createDoubleArray(blocksLen, blockSize) - parityData = createDoubleArray(parityLen, blockSize) - - for i in 0 ..< blocks[].len: - if blocks[i].len > 0: - copyMem(blocksData[i], addr blocks[i][0], blockSize) - else: - blocksData[i] = nil - - for i in 0 ..< parity[].len: - if parity[i].len > 0: - copyMem(parityData[i], addr parity[i][0], blockSize) - else: - parityData[i] = nil + blockData = makeUncheckedArray(blocks) + parityData = makeUncheckedArray(parity) defer: - freeDoubleArray(blocksData, blocksLen) - freeDoubleArray(parityData, parityLen) + dealloc(blockData) + dealloc(parityData) ## Create an decode task with block data var task = DecodeTask( @@ -547,7 +526,7 @@ proc decodeAsync*( blocksLen: blocksLen, parityLen: parityLen, recoveredLen: blocksLen, - blocks: blocksData, + blocks: blockData, parity: parityData, recovered: recovered, signal: threadPtr, @@ -560,18 +539,13 @@ proc decodeAsync*( self.taskPool.spawn leopardDecodeTask(self.taskPool, t) let threadFut = threadPtr.wait() - try: - await threadFut.join() - except CatchableError as exc: - try: - await threadFut - except AsyncError as asyncExc: - return failure(asyncExc.msg) - finally: - if exc of CancelledError: - raise (ref CancelledError) exc - else: - return failure(exc.msg) + if joinErr =? catch(await threadFut.join()).errorOption: + if err =? catch(await noCancel threadFut).errorOption: + return failure(err) + if joinErr of CancelledError: + raise (ref CancelledError) joinErr + else: + return failure(joinErr) if not t.success.load(): return failure("Leopard encoding failed") @@ -627,7 +601,7 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} = trace "Erasure decoding data" try: if err =? ( - await self.decodeAsync( + await self.asyncDecode( encoded.blockSize.int, encoded.ecK, encoded.ecM, data, parityData, recovered ) ).errorOption: diff --git a/codex/utils/arrayutils.nim b/codex/utils/arrayutils.nim index c398921f..e36a0cb3 100644 --- a/codex/utils/arrayutils.nim +++ b/codex/utils/arrayutils.nim @@ -23,3 +23,16 @@ proc freeDoubleArray*( # Free outer array if not arr.isNil: deallocShared(arr) + +proc makeUncheckedArray*( + data: ref seq[seq[byte]] +): ptr UncheckedArray[ptr UncheckedArray[byte]] = + result = cast[ptr UncheckedArray[ptr UncheckedArray[byte]]](alloc0( + sizeof(ptr UncheckedArray[byte]) * data[].len + )) + + for i, blk in data[]: + if blk.len > 0: + result[i] = cast[ptr UncheckedArray[byte]](addr blk[0]) + else: + result[i] = nil diff --git a/tests/codex/testerasure.nim b/tests/codex/testerasure.nim index d469b379..5046bac2 100644 --- a/tests/codex/testerasure.nim +++ b/tests/codex/testerasure.nim @@ -228,7 +228,7 @@ suite "Erasure encode/decode": discard (await erasure.decode(encoded)).tryGet() test "Should concurrently encode/decode multiple datasets": - const iterations = 2 + const iterations = 5 let datasetSize = 1.MiBs @@ -335,18 +335,18 @@ suite "Erasure encode/decode": for i in 0 ..< parityLen: paritySeq[i] = cast[seq[byte]](parity[i]) - # call encodeAsync to get the parity + # call asyncEncode to get the parity let encFut = - await erasure.encodeAsync(BlockSize.int, blocksLen, parityLen, data, parity) + await erasure.asyncEncode(BlockSize.int, blocksLen, parityLen, data, parity) check encFut.isOk - let decFut = await erasure.decodeAsync( + let decFut = await erasure.asyncDecode( BlockSize.int, blocksLen, parityLen, data, paritySeq, recovered ) check decFut.isOk - # call encodeAsync and cancel the task - let encodeFut = erasure.encodeAsync( + # call asyncEncode and cancel the task + let encodeFut = erasure.asyncEncode( BlockSize.int, blocksLen, parityLen, data, cancelledTaskParity ) encodeFut.cancel() @@ -359,8 +359,8 @@ suite "Erasure encode/decode": for i in 0 ..< parityLen: check equalMem(parity[i], cancelledTaskParity[i], BlockSize.int) - # call decodeAsync and cancel the task - let decodeFut = erasure.decodeAsync( + # call asyncDecode and cancel the task + let decodeFut = erasure.asyncDecode( BlockSize.int, blocksLen, parityLen, data, paritySeq, cancelledTaskRecovered ) decodeFut.cancel() From 54177e9fbfd143534b51131d7893459fe7469f4b Mon Sep 17 00:00:00 2001 From: Giuliano Mega Date: Mon, 17 Mar 2025 17:08:24 -0300 Subject: [PATCH 12/14] feat(integration): use async client instead of standard Nim HTTP client (#1159) * WiP: migrating CodexClient to chronos http client * fix(api): fixes #1163 * feat: fully working API integration tests * convert most of the tests in testupdownload * feat: working updownload tests on async client * feat: make testsales work with async codexclient * feat: make testpurchasing work with async codexclient * feat: make testblockexpiration work with async codexclient * feat: make marketplacesuite work with async codexclient * make testproofs work with async codexclient * chore: refactor client to express higher level in terms of lower level operations * fix: set correct content-length for erasure-coded datasets * feat: make testecbug work with async client * feat: make testvalidator work with async client * refactor: simplify request aliases, add close operation * wire back client.close at node shutdown * refactor: remove unused exception * fix: use await instead of waitFor on async call sites --- codex/rest/api.nim | 11 +- tests/integration/codexclient.nim | 357 ++++++++++++++-------- tests/integration/codexprocess.nim | 2 +- tests/integration/marketplacesuite.nim | 26 +- tests/integration/multinodes.nim | 10 +- tests/integration/testblockexpiration.nim | 10 +- tests/integration/testecbug.nim | 31 +- tests/integration/testmarketplace.nim | 52 ++-- tests/integration/testproofs.nim | 38 +-- tests/integration/testpurchasing.nim | 104 ++++--- tests/integration/testrestapi.nim | 302 +++++++++--------- tests/integration/testsales.nim | 153 ++++++---- tests/integration/testupdownload.nim | 41 +-- tests/integration/testvalidator.nim | 16 +- tests/testTaiko.nim | 2 +- 15 files changed, 656 insertions(+), 499 deletions(-) diff --git a/codex/rest/api.nim b/codex/rest/api.nim index 054e1c2b..553cb91c 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -114,9 +114,14 @@ proc retrieveCid( else: resp.setHeader("Content-Disposition", "attachment") - resp.setHeader("Content-Length", $manifest.datasetSize.int) + # For erasure-coded datasets, we need to return the _original_ length; i.e., + # the length of the non-erasure-coded dataset, as that's what we will be + # returning to the client. + let contentLength = + if manifest.protected: manifest.originalDatasetSize else: manifest.datasetSize + resp.setHeader("Content-Length", $(contentLength.int)) - await resp.prepareChunked() + await resp.prepare(HttpResponseStreamType.Plain) while not stream.atEof: var @@ -129,7 +134,7 @@ proc retrieveCid( bytes += buff.len - await resp.sendChunk(addr buff[0], buff.len) + await resp.send(addr buff[0], buff.len) await resp.finish() codex_api_downloads.inc() except CancelledError as exc: diff --git a/tests/integration/codexclient.nim b/tests/integration/codexclient.nim index 4a106253..ef76b577 100644 --- a/tests/integration/codexclient.nim +++ b/tests/integration/codexclient.nim @@ -4,119 +4,216 @@ import std/strutils from pkg/libp2p import Cid, `$`, init import pkg/stint import pkg/questionable/results -import pkg/chronos/apps/http/[httpserver, shttpserver, httpclient] +import pkg/chronos/apps/http/[httpserver, shttpserver, httpclient, httptable] import pkg/codex/logutils import pkg/codex/rest/json import pkg/codex/purchasing import pkg/codex/errors import pkg/codex/sales/reservations -export purchasing +export purchasing, httptable, httpclient type CodexClient* = ref object baseurl: string - httpClients: seq[HttpClient] - -type CodexClientError* = object of CatchableError - -const HttpClientTimeoutMs = 60 * 1000 + session: HttpSessionRef proc new*(_: type CodexClient, baseurl: string): CodexClient = - CodexClient(baseurl: baseurl, httpClients: newSeq[HttpClient]()) + CodexClient(session: HttpSessionRef.new(), baseurl: baseurl) -proc http*(client: CodexClient): HttpClient = - let httpClient = newHttpClient(timeout = HttpClientTimeoutMs) - client.httpClients.insert(httpClient) - return httpClient +proc close*(self: CodexClient): Future[void] {.async: (raises: []).} = + await self.session.closeWait() -proc close*(client: CodexClient): void = - for httpClient in client.httpClients: - httpClient.close() +proc request( + self: CodexClient, + httpMethod: httputils.HttpMethod, + url: string, + body: openArray[char] = [], + headers: openArray[HttpHeaderTuple] = [], +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + HttpClientRequestRef + .new( + self.session, + url, + httpMethod, + version = HttpVersion11, + flags = {}, + maxResponseHeadersSize = HttpMaxHeadersSize, + headers = headers, + body = body.toOpenArrayByte(0, len(body) - 1), + ).get + .send() -proc info*(client: CodexClient): ?!JsonNode = - let url = client.baseurl & "/debug/info" - JsonNode.parse(client.http().getContent(url)) +proc post( + self: CodexClient, + url: string, + body: string = "", + headers: seq[HttpHeaderTuple] = @[], +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return self.request(MethodPost, url, headers = headers, body = body) -proc setLogLevel*(client: CodexClient, level: string) = - let url = client.baseurl & "/debug/chronicles/loglevel?level=" & level - let headers = newHttpHeaders({"Content-Type": "text/plain"}) - let response = client.http().request(url, httpMethod = HttpPost, headers = headers) - assert response.status == "200 OK" +proc get( + self: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[] +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return self.request(MethodGet, url, headers = headers) -proc upload*(client: CodexClient, contents: string): ?!Cid = - let response = client.http().post(client.baseurl & "/data", contents) - assert response.status == "200 OK" - Cid.init(response.body).mapFailure +proc delete( + self: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[] +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return self.request(MethodDelete, url, headers = headers) -proc upload*(client: CodexClient, bytes: seq[byte]): ?!Cid = - client.upload(string.fromBytes(bytes)) +proc patch( + self: CodexClient, + url: string, + body: string = "", + headers: seq[HttpHeaderTuple] = @[], +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return self.request(MethodPatch, url, headers = headers, body = body) -proc download*(client: CodexClient, cid: Cid, local = false): ?!string = - let response = client.http().get( - client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream") - ) +proc body*( + response: HttpClientResponseRef +): Future[string] {.async: (raises: [CancelledError, HttpError]).} = + return bytesToString (await response.getBodyBytes()) - if response.status != "200 OK": - return failure(response.status) +proc getContent( + client: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[] +): Future[string] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.get(url, headers) + return await response.body - success response.body +proc info*( + client: CodexClient +): Future[?!JsonNode] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.get(client.baseurl & "/debug/info") + return JsonNode.parse(await response.body) -proc downloadManifestOnly*(client: CodexClient, cid: Cid): ?!string = - let response = - client.http().get(client.baseurl & "/data/" & $cid & "/network/manifest") +proc setLogLevel*( + client: CodexClient, level: string +): Future[void] {.async: (raises: [CancelledError, HttpError]).} = + let + url = client.baseurl & "/debug/chronicles/loglevel?level=" & level + headers = @[("Content-Type", "text/plain")] + response = await client.post(url, headers = headers, body = "") + assert response.status == 200 - if response.status != "200 OK": - return failure(response.status) +proc uploadRaw*( + client: CodexClient, contents: string, headers: seq[HttpHeaderTuple] = @[] +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return client.post(client.baseurl & "/data", body = contents, headers = headers) - success response.body +proc upload*( + client: CodexClient, contents: string +): Future[?!Cid] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.uploadRaw(contents) + assert response.status == 200 + Cid.init(await response.body).mapFailure -proc downloadNoStream*(client: CodexClient, cid: Cid): ?!string = - let response = client.http().post(client.baseurl & "/data/" & $cid & "/network") +proc upload*( + client: CodexClient, bytes: seq[byte] +): Future[?!Cid] {.async: (raw: true).} = + return client.upload(string.fromBytes(bytes)) - if response.status != "200 OK": - return failure(response.status) - - success response.body +proc downloadRaw*( + client: CodexClient, cid: string, local = false +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return + client.get(client.baseurl & "/data/" & cid & (if local: "" else: "/network/stream")) proc downloadBytes*( client: CodexClient, cid: Cid, local = false -): Future[?!seq[byte]] {.async.} = - let uri = client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream") +): Future[?!seq[byte]] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.downloadRaw($cid, local = local) - let response = client.http().get(uri) + if response.status != 200: + return failure($response.status) - if response.status != "200 OK": - return failure("fetch failed with status " & $response.status) + success await response.getBodyBytes() - success response.body.toBytes +proc download*( + client: CodexClient, cid: Cid, local = false +): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} = + without response =? await client.downloadBytes(cid, local = local), err: + return failure(err) + return success bytesToString(response) -proc delete*(client: CodexClient, cid: Cid): ?!void = - let - url = client.baseurl & "/data/" & $cid - response = client.http().delete(url) +proc downloadNoStream*( + client: CodexClient, cid: Cid +): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.post(client.baseurl & "/data/" & $cid & "/network") - if response.status != "204 No Content": - return failure(response.status) + if response.status != 200: + return failure($response.status) + + success await response.body + +proc downloadManifestOnly*( + client: CodexClient, cid: Cid +): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} = + let response = + await client.get(client.baseurl & "/data/" & $cid & "/network/manifest") + + if response.status != 200: + return failure($response.status) + + success await response.body + +proc deleteRaw*( + client: CodexClient, cid: string +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return client.delete(client.baseurl & "/data/" & cid) + +proc delete*( + client: CodexClient, cid: Cid +): Future[?!void] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.deleteRaw($cid) + + if response.status != 204: + return failure($response.status) success() -proc list*(client: CodexClient): ?!RestContentList = - let url = client.baseurl & "/data" - let response = client.http().get(url) +proc listRaw*( + client: CodexClient +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = + return client.get(client.baseurl & "/data") - if response.status != "200 OK": - return failure(response.status) +proc list*( + client: CodexClient +): Future[?!RestContentList] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.listRaw() - RestContentList.fromJson(response.body) + if response.status != 200: + return failure($response.status) -proc space*(client: CodexClient): ?!RestRepoStore = + RestContentList.fromJson(await response.body) + +proc space*( + client: CodexClient +): Future[?!RestRepoStore] {.async: (raises: [CancelledError, HttpError]).} = let url = client.baseurl & "/space" - let response = client.http().get(url) + let response = await client.get(url) - if response.status != "200 OK": - return failure(response.status) + if response.status != 200: + return failure($response.status) - RestRepoStore.fromJson(response.body) + RestRepoStore.fromJson(await response.body) proc requestStorageRaw*( client: CodexClient, @@ -128,7 +225,9 @@ proc requestStorageRaw*( expiry: uint64 = 0, nodes: uint = 3, tolerance: uint = 1, -): Response = +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = ## Call request storage REST endpoint ## let url = client.baseurl & "/storage/request/" & $cid @@ -145,7 +244,7 @@ proc requestStorageRaw*( if expiry != 0: json["expiry"] = %($expiry) - return client.http().post(url, $json) + return client.post(url, $json) proc requestStorage*( client: CodexClient, @@ -157,43 +256,45 @@ proc requestStorage*( collateralPerByte: UInt256, nodes: uint = 3, tolerance: uint = 1, -): ?!PurchaseId = +): Future[?!PurchaseId] {.async: (raises: [CancelledError, HttpError]).} = ## Call request storage REST endpoint ## - let response = client.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, - nodes, tolerance, - ) - if response.status != "200 OK": - doAssert(false, response.body) - PurchaseId.fromHex(response.body).catch + let + response = await client.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, + nodes, tolerance, + ) + body = await response.body -proc getPurchase*(client: CodexClient, purchaseId: PurchaseId): ?!RestPurchase = + if response.status != 200: + doAssert(false, body) + PurchaseId.fromHex(body).catch + +proc getPurchase*( + client: CodexClient, purchaseId: PurchaseId +): Future[?!RestPurchase] {.async: (raises: [CancelledError, HttpError]).} = let url = client.baseurl & "/storage/purchases/" & purchaseId.toHex try: - let body = client.http().getContent(url) + let body = await client.getContent(url) return RestPurchase.fromJson(body) except CatchableError as e: return failure e.msg -proc getSalesAgent*(client: CodexClient, slotId: SlotId): ?!RestSalesAgent = +proc getSalesAgent*( + client: CodexClient, slotId: SlotId +): Future[?!RestSalesAgent] {.async: (raises: [CancelledError, HttpError]).} = let url = client.baseurl & "/sales/slots/" & slotId.toHex try: - let body = client.http().getContent(url) + let body = await client.getContent(url) return RestSalesAgent.fromJson(body) except CatchableError as e: return failure e.msg -proc getSlots*(client: CodexClient): ?!seq[Slot] = - let url = client.baseurl & "/sales/slots" - let body = client.http().getContent(url) - seq[Slot].fromJson(body) - proc postAvailability*( client: CodexClient, totalSize, duration: uint64, minPricePerBytePerSecond, totalCollateral: UInt256, -): ?!Availability = +): Future[?!Availability] {.async: (raises: [CancelledError, HttpError]).} = ## Post sales availability endpoint ## let url = client.baseurl & "/sales/availability" @@ -204,17 +305,21 @@ proc postAvailability*( "minPricePerBytePerSecond": minPricePerBytePerSecond, "totalCollateral": totalCollateral, } - let response = client.http().post(url, $json) - doAssert response.status == "201 Created", - "expected 201 Created, got " & response.status & ", body: " & response.body - Availability.fromJson(response.body) + let response = await client.post(url, $json) + let body = await response.body + + doAssert response.status == 201, + "expected 201 Created, got " & $response.status & ", body: " & body + Availability.fromJson(body) proc patchAvailabilityRaw*( client: CodexClient, availabilityId: AvailabilityId, totalSize, freeSize, duration: ?uint64 = uint64.none, minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none, -): Response = +): Future[HttpClientResponseRef] {. + async: (raw: true, raises: [CancelledError, HttpError]) +.} = ## Updates availability ## let url = client.baseurl & "/sales/availability/" & $availabilityId @@ -237,66 +342,50 @@ proc patchAvailabilityRaw*( if totalCollateral =? totalCollateral: json["totalCollateral"] = %totalCollateral - client.http().patch(url, $json) + client.patch(url, $json) proc patchAvailability*( client: CodexClient, availabilityId: AvailabilityId, totalSize, duration: ?uint64 = uint64.none, minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none, -): void = - let response = client.patchAvailabilityRaw( +): Future[void] {.async: (raises: [CancelledError, HttpError]).} = + let response = await client.patchAvailabilityRaw( availabilityId, totalSize = totalSize, duration = duration, minPricePerBytePerSecond = minPricePerBytePerSecond, totalCollateral = totalCollateral, ) - doAssert response.status == "200 OK", "expected 200 OK, got " & response.status + doAssert response.status == 200, "expected 200 OK, got " & $response.status -proc getAvailabilities*(client: CodexClient): ?!seq[Availability] = +proc getAvailabilities*( + client: CodexClient +): Future[?!seq[Availability]] {.async: (raises: [CancelledError, HttpError]).} = ## Call sales availability REST endpoint let url = client.baseurl & "/sales/availability" - let body = client.http().getContent(url) + let body = await client.getContent(url) seq[Availability].fromJson(body) proc getAvailabilityReservations*( client: CodexClient, availabilityId: AvailabilityId -): ?!seq[Reservation] = +): Future[?!seq[Reservation]] {.async: (raises: [CancelledError, HttpError]).} = ## Retrieves Availability's Reservations let url = client.baseurl & "/sales/availability/" & $availabilityId & "/reservations" - let body = client.http().getContent(url) + let body = await client.getContent(url) seq[Reservation].fromJson(body) -proc purchaseStateIs*(client: CodexClient, id: PurchaseId, state: string): bool = - client.getPurchase(id).option .? state == some state +proc purchaseStateIs*( + client: CodexClient, id: PurchaseId, state: string +): Future[bool] {.async: (raises: [CancelledError, HttpError]).} = + (await client.getPurchase(id)).option .? state == some state -proc saleStateIs*(client: CodexClient, id: SlotId, state: string): bool = - client.getSalesAgent(id).option .? state == some state +proc saleStateIs*( + client: CodexClient, id: SlotId, state: string +): Future[bool] {.async: (raises: [CancelledError, HttpError]).} = + (await client.getSalesAgent(id)).option .? state == some state -proc requestId*(client: CodexClient, id: PurchaseId): ?RequestId = - return client.getPurchase(id).option .? requestId - -proc uploadRaw*( - client: CodexClient, contents: string, headers = newHttpHeaders() -): Response = - return client.http().request( - client.baseurl & "/data", - body = contents, - httpMethod = HttpPost, - headers = headers, - ) - -proc listRaw*(client: CodexClient): Response = - return client.http().request(client.baseurl & "/data", httpMethod = HttpGet) - -proc downloadRaw*( - client: CodexClient, cid: string, local = false, httpClient = client.http() -): Response = - return httpClient.request( - client.baseurl & "/data/" & cid & (if local: "" else: "/network/stream"), - httpMethod = HttpGet, - ) - -proc deleteRaw*(client: CodexClient, cid: string): Response = - return client.http().request(client.baseurl & "/data/" & cid, httpMethod = HttpDelete) +proc requestId*( + client: CodexClient, id: PurchaseId +): Future[?RequestId] {.async: (raises: [CancelledError, HttpError]).} = + return (await client.getPurchase(id)).option .? requestId diff --git a/tests/integration/codexprocess.nim b/tests/integration/codexprocess.nim index 79d4b040..3eca5b04 100644 --- a/tests/integration/codexprocess.nim +++ b/tests/integration/codexprocess.nim @@ -68,7 +68,7 @@ method stop*(node: CodexProcess) {.async.} = trace "stopping codex client" if client =? node.client: - client.close() + await client.close() node.client = none CodexClient method removeDataDir*(node: CodexProcess) = diff --git a/tests/integration/marketplacesuite.nim b/tests/integration/marketplacesuite.nim index d7502bf4..1e09963b 100644 --- a/tests/integration/marketplacesuite.nim +++ b/tests/integration/marketplacesuite.nim @@ -60,13 +60,13 @@ template marketplacesuite*(name: string, body: untyped) = duration: uint64, collateralPerByte: UInt256, minPricePerBytePerSecond: UInt256, - ) = + ): Future[void] {.async: (raises: [CancelledError, HttpError, ConfigurationError]).} = let totalCollateral = datasetSize.u256 * collateralPerByte # post availability to each provider for i in 0 ..< providers().len: let provider = providers()[i].client - discard provider.postAvailability( + discard await provider.postAvailability( totalSize = datasetSize, duration = duration.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, @@ -83,16 +83,18 @@ template marketplacesuite*(name: string, body: untyped) = expiry: uint64 = 4.periods, nodes = providers().len, tolerance = 0, - ): Future[PurchaseId] {.async.} = - let id = client.requestStorage( - cid, - expiry = expiry, - duration = duration, - proofProbability = proofProbability, - collateralPerByte = collateralPerByte, - pricePerBytePerSecond = pricePerBytePerSecond, - nodes = nodes.uint, - tolerance = tolerance.uint, + ): Future[PurchaseId] {.async: (raises: [CancelledError, HttpError]).} = + let id = ( + await client.requestStorage( + cid, + expiry = expiry, + duration = duration, + proofProbability = proofProbability, + collateralPerByte = collateralPerByte, + pricePerBytePerSecond = pricePerBytePerSecond, + nodes = nodes.uint, + tolerance = tolerance.uint, + ) ).get return id diff --git a/tests/integration/multinodes.nim b/tests/integration/multinodes.nim index bade6899..0003b216 100644 --- a/tests/integration/multinodes.nim +++ b/tests/integration/multinodes.nim @@ -275,8 +275,10 @@ template multinodesuite*(name: string, body: untyped) = fail() quit(1) - proc updateBootstrapNodes(node: CodexProcess) = - without ninfo =? node.client.info(): + proc updateBootstrapNodes( + node: CodexProcess + ): Future[void] {.async: (raises: [CatchableError]).} = + without ninfo =? await node.client.info(): # raise CatchableError instead of Defect (with .get or !) so we # can gracefully shutdown and prevent zombies raiseMultiNodeSuiteError "Failed to get node info" @@ -315,14 +317,14 @@ template multinodesuite*(name: string, body: untyped) = for config in clients.configs: let node = await startClientNode(config) running.add RunningNode(role: Role.Client, node: node) - CodexProcess(node).updateBootstrapNodes() + await CodexProcess(node).updateBootstrapNodes() if var providers =? nodeConfigs.providers: failAndTeardownOnError "failed to start provider nodes": for config in providers.configs.mitems: let node = await startProviderNode(config) running.add RunningNode(role: Role.Provider, node: node) - CodexProcess(node).updateBootstrapNodes() + await CodexProcess(node).updateBootstrapNodes() if var validators =? nodeConfigs.validators: failAndTeardownOnError "failed to start validator nodes": diff --git a/tests/integration/testblockexpiration.nim b/tests/integration/testblockexpiration.nim index 7e742c2a..6a33f3c6 100644 --- a/tests/integration/testblockexpiration.nim +++ b/tests/integration/testblockexpiration.nim @@ -18,11 +18,11 @@ multinodesuite "Node block expiration tests": let client = clients()[0] let clientApi = client.client - let contentId = clientApi.upload(content).get + let contentId = (await clientApi.upload(content)).get await sleepAsync(2.seconds) - let download = clientApi.download(contentId, local = true) + let download = await clientApi.download(contentId, local = true) check: download.isOk @@ -39,12 +39,12 @@ multinodesuite "Node block expiration tests": let client = clients()[0] let clientApi = client.client - let contentId = clientApi.upload(content).get + let contentId = (await clientApi.upload(content)).get await sleepAsync(3.seconds) - let download = clientApi.download(contentId, local = true) + let download = await clientApi.download(contentId, local = true) check: download.isFailure - download.error.msg == "404 Not Found" + download.error.msg == "404" diff --git a/tests/integration/testecbug.nim b/tests/integration/testecbug.nim index 29a3bc6f..6b86fd29 100644 --- a/tests/integration/testecbug.nim +++ b/tests/integration/testecbug.nim @@ -13,21 +13,18 @@ marketplacesuite "Bug #821 - node crashes during erasure coding": .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log .withLogTopics("node", "erasure", "marketplace").some, - providers: CodexConfigs.init(nodes = 0) - # .debug() # uncomment to enable console log output - # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock") - .some, + providers: CodexConfigs.init(nodes = 0).some, ): - let pricePerBytePerSecond = 1.u256 - let duration = 20.periods - let collateralPerByte = 1.u256 - let expiry = 10.periods - let data = await RandomChunker.example(blocks = 8) - let client = clients()[0] - let clientApi = client.client + let + pricePerBytePerSecond = 1.u256 + duration = 20.periods + collateralPerByte = 1.u256 + expiry = 10.periods + data = await RandomChunker.example(blocks = 8) + client = clients()[0] + clientApi = client.client - let cid = clientApi.upload(data).get + let cid = (await clientApi.upload(data)).get var requestId = none RequestId proc onStorageRequested(eventResult: ?!StorageRequested) = @@ -49,9 +46,11 @@ marketplacesuite "Bug #821 - node crashes during erasure coding": check eventually(requestId.isSome, timeout = expiry.int * 1000) - let request = await marketplace.getRequest(requestId.get) - let cidFromRequest = request.content.cid - let downloaded = await clientApi.downloadBytes(cidFromRequest, local = true) + let + request = await marketplace.getRequest(requestId.get) + cidFromRequest = request.content.cid + downloaded = await clientApi.downloadBytes(cidFromRequest, local = true) + check downloaded.isOk check downloaded.get.toHex == data.toHex diff --git a/tests/integration/testmarketplace.nim b/tests/integration/testmarketplace.nim index 727f3fad..dee3645e 100644 --- a/tests/integration/testmarketplace.nim +++ b/tests/integration/testmarketplace.nim @@ -37,15 +37,17 @@ marketplacesuite "Marketplace": let size = 0xFFFFFF.uint64 let data = await RandomChunker.example(blocks = blocks) # host makes storage available - let availability = host.postAvailability( - totalSize = size, - duration = 20 * 60.uint64, - minPricePerBytePerSecond = minPricePerBytePerSecond, - totalCollateral = size.u256 * minPricePerBytePerSecond, + let availability = ( + await host.postAvailability( + totalSize = size, + duration = 20 * 60.uint64, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = size.u256 * minPricePerBytePerSecond, + ) ).get # client requests storage - let cid = client.upload(data).get + let cid = (await client.upload(data)).get let id = await client.requestStorage( cid, duration = 20 * 60.uint64, @@ -57,15 +59,17 @@ marketplacesuite "Marketplace": tolerance = ecTolerance, ) - check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000) - let purchase = client.getPurchase(id).get + check eventually( + await client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000 + ) + let purchase = (await client.getPurchase(id)).get check purchase.error == none string - let availabilities = host.getAvailabilities().get + let availabilities = (await host.getAvailabilities()).get check availabilities.len == 1 let newSize = availabilities[0].freeSize check newSize > 0 and newSize < size - let reservations = host.getAvailabilityReservations(availability.id).get + let reservations = (await host.getAvailabilityReservations(availability.id)).get check reservations.len == 3 check reservations[0].requestId == purchase.requestId @@ -80,15 +84,17 @@ marketplacesuite "Marketplace": # host makes storage available let startBalanceHost = await token.balanceOf(hostAccount) - discard host.postAvailability( - totalSize = size, - duration = 20 * 60.uint64, - minPricePerBytePerSecond = minPricePerBytePerSecond, - totalCollateral = size.u256 * minPricePerBytePerSecond, + discard ( + await host.postAvailability( + totalSize = size, + duration = 20 * 60.uint64, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = size.u256 * minPricePerBytePerSecond, + ) ).get # client requests storage - let cid = client.upload(data).get + let cid = (await client.upload(data)).get let id = await client.requestStorage( cid, duration = duration, @@ -100,8 +106,10 @@ marketplacesuite "Marketplace": tolerance = ecTolerance, ) - check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000) - let purchase = client.getPurchase(id).get + check eventually( + await client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000 + ) + let purchase = (await client.getPurchase(id)).get check purchase.error == none string let clientBalanceBeforeFinished = await token.balanceOf(clientAccount) @@ -158,7 +166,7 @@ marketplacesuite "Marketplace payouts": # provider makes storage available let datasetSize = datasetSize(blocks, ecNodes, ecTolerance) let totalAvailabilitySize = (datasetSize div 2).truncate(uint64) - discard providerApi.postAvailability( + discard await providerApi.postAvailability( # make availability size small enough that we can't fill all the slots, # thus causing a cancellation totalSize = totalAvailabilitySize, @@ -167,7 +175,7 @@ marketplacesuite "Marketplace payouts": totalCollateral = collateralPerByte * totalAvailabilitySize.u256, ) - let cid = clientApi.upload(data).get + let cid = (await clientApi.upload(data)).get var slotIdxFilled = none uint64 proc onSlotFilled(eventResult: ?!SlotFilled) = @@ -189,11 +197,11 @@ marketplacesuite "Marketplace payouts": # wait until one slot is filled check eventually(slotIdxFilled.isSome, timeout = expiry.int * 1000) - let slotId = slotId(!clientApi.requestId(id), !slotIdxFilled) + let slotId = slotId(!(await clientApi.requestId(id)), !slotIdxFilled) # wait until sale is cancelled await ethProvider.advanceTime(expiry.u256) - check eventually providerApi.saleStateIs(slotId, "SaleCancelled") + check eventually await providerApi.saleStateIs(slotId, "SaleCancelled") await advanceToNextPeriod() diff --git a/tests/integration/testproofs.nim b/tests/integration/testproofs.nim index ab29ca4e..b0ede765 100644 --- a/tests/integration/testproofs.nim +++ b/tests/integration/testproofs.nim @@ -42,14 +42,14 @@ marketplacesuite "Hosts submit regular proofs": let data = await RandomChunker.example(blocks = blocks) let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) - createAvailabilities( + await createAvailabilities( datasetSize.truncate(uint64), duration, collateralPerByte, minPricePerBytePerSecond, ) - let cid = client0.upload(data).get + let cid = (await client0.upload(data)).get let purchaseId = await client0.requestStorage( cid, @@ -59,13 +59,13 @@ marketplacesuite "Hosts submit regular proofs": tolerance = ecTolerance, ) - let purchase = client0.getPurchase(purchaseId).get + let purchase = (await client0.getPurchase(purchaseId)).get check purchase.error == none string let slotSize = slotSize(blocks, ecNodes, ecTolerance) check eventually( - client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000 + await client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000 ) var proofWasSubmitted = false @@ -119,27 +119,29 @@ marketplacesuite "Simulate invalid proofs": let data = await RandomChunker.example(blocks = blocks) let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) - createAvailabilities( + await createAvailabilities( datasetSize.truncate(uint64), duration, collateralPerByte, minPricePerBytePerSecond, ) - let cid = client0.upload(data).get + let cid = (await client0.upload(data)).get - let purchaseId = await client0.requestStorage( - cid, - expiry = expiry, - duration = duration, - nodes = ecNodes, - tolerance = ecTolerance, - proofProbability = 1.u256, + let purchaseId = ( + await client0.requestStorage( + cid, + expiry = expiry, + duration = duration, + nodes = ecNodes, + tolerance = ecTolerance, + proofProbability = 1.u256, + ) ) - let requestId = client0.requestId(purchaseId).get + let requestId = (await client0.requestId(purchaseId)).get check eventually( - client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000 + await client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000 ) var slotWasFreed = false @@ -182,14 +184,14 @@ marketplacesuite "Simulate invalid proofs": let data = await RandomChunker.example(blocks = blocks) let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) - createAvailabilities( + await createAvailabilities( datasetSize.truncate(uint64), duration, collateralPerByte, minPricePerBytePerSecond, ) - let cid = client0.upload(data).get + let cid = (await client0.upload(data)).get let purchaseId = await client0.requestStorage( cid, @@ -199,7 +201,7 @@ marketplacesuite "Simulate invalid proofs": tolerance = ecTolerance, proofProbability = 1.u256, ) - let requestId = client0.requestId(purchaseId).get + let requestId = (await client0.requestId(purchaseId)).get var slotWasFilled = false proc onSlotFilled(eventResult: ?!SlotFilled) = diff --git a/tests/integration/testpurchasing.nim b/tests/integration/testpurchasing.nim index 4eb5c775..e5adebe2 100644 --- a/tests/integration/testpurchasing.nim +++ b/tests/integration/testpurchasing.nim @@ -8,22 +8,26 @@ import ../examples twonodessuite "Purchasing": test "node handles storage request", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get - let id1 = client1.requestStorage( - cid, - duration = 100.uint64, - pricePerBytePerSecond = 1.u256, - proofProbability = 3.u256, - expiry = 10.uint64, - collateralPerByte = 1.u256, + let cid = (await client1.upload(data)).get + let id1 = ( + await client1.requestStorage( + cid, + duration = 100.uint64, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + expiry = 10.uint64, + collateralPerByte = 1.u256, + ) ).get - let id2 = client1.requestStorage( - cid, - duration = 400.uint64, - pricePerBytePerSecond = 2.u256, - proofProbability = 6.u256, - expiry = 10.uint64, - collateralPerByte = 2.u256, + let id2 = ( + await client1.requestStorage( + cid, + duration = 400.uint64, + pricePerBytePerSecond = 2.u256, + proofProbability = 6.u256, + expiry = 10.uint64, + collateralPerByte = 2.u256, + ) ).get check id1 != id2 @@ -34,19 +38,21 @@ twonodessuite "Purchasing": rng, size = DefaultBlockSize * 2, chunkSize = DefaultBlockSize * 2 ) let data = await chunker.getBytes() - let cid = client1.upload(byteutils.toHex(data)).get - let id = client1.requestStorage( - cid, - duration = 100.uint64, - pricePerBytePerSecond = 1.u256, - proofProbability = 3.u256, - expiry = 30.uint64, - collateralPerByte = 1.u256, - nodes = 3, - tolerance = 1, + let cid = (await client1.upload(byteutils.toHex(data))).get + let id = ( + await client1.requestStorage( + cid, + duration = 100.uint64, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + expiry = 30.uint64, + collateralPerByte = 1.u256, + nodes = 3, + tolerance = 1, + ) ).get - let request = client1.getPurchase(id).get.request.get + let request = (await client1.getPurchase(id)).get.request.get check request.content.cid.data.buffer.len > 0 check request.ask.duration == 100.uint64 @@ -75,23 +81,29 @@ twonodessuite "Purchasing": test "node remembers purchase status after restart", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get - let id = client1.requestStorage( - cid, - duration = 10 * 60.uint64, - pricePerBytePerSecond = 1.u256, - proofProbability = 3.u256, - expiry = 5 * 60.uint64, - collateralPerByte = 1.u256, - nodes = 3.uint, - tolerance = 1.uint, + let cid = (await client1.upload(data)).get + let id = ( + await client1.requestStorage( + cid, + duration = 10 * 60.uint64, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + expiry = 5 * 60.uint64, + collateralPerByte = 1.u256, + nodes = 3.uint, + tolerance = 1.uint, + ) ).get - check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000) + check eventually( + await client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000 + ) await node1.restart() - check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000) - let request = client1.getPurchase(id).get.request.get + check eventually( + await client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000 + ) + let request = (await client1.getPurchase(id)).get.request.get check request.ask.duration == (10 * 60).uint64 check request.ask.pricePerBytePerSecond == 1.u256 check request.ask.proofProbability == 3.u256 @@ -102,19 +114,19 @@ twonodessuite "Purchasing": test "node requires expiry and its value to be in future", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get + let cid = (await client1.upload(data)).get - let responseMissing = client1.requestStorageRaw( + let responseMissing = await client1.requestStorageRaw( cid, duration = 1.uint64, pricePerBytePerSecond = 1.u256, proofProbability = 3.u256, collateralPerByte = 1.u256, ) - check responseMissing.status == "400 Bad Request" - check responseMissing.body == "Expiry required" + check responseMissing.status == 400 + check (await responseMissing.body) == "Expiry required" - let responseBefore = client1.requestStorageRaw( + let responseBefore = await client1.requestStorageRaw( cid, duration = 10.uint64, pricePerBytePerSecond = 1.u256, @@ -122,6 +134,6 @@ twonodessuite "Purchasing": collateralPerByte = 1.u256, expiry = 10.uint64, ) - check responseBefore.status == "400 Bad Request" + check responseBefore.status == 400 check "Expiry needs value bigger then zero and smaller then the request's duration" in - responseBefore.body + (await responseBefore.body) diff --git a/tests/integration/testrestapi.nim b/tests/integration/testrestapi.nim index 7164372b..761eda31 100644 --- a/tests/integration/testrestapi.nim +++ b/tests/integration/testrestapi.nim @@ -1,4 +1,3 @@ -import std/httpclient import std/importutils import std/net import std/sequtils @@ -14,29 +13,31 @@ import json twonodessuite "REST API": test "nodes can print their peer information", twoNodesConfig: - check !client1.info() != !client2.info() + check !(await client1.info()) != !(await client2.info()) test "nodes can set chronicles log level", twoNodesConfig: - client1.setLogLevel("DEBUG;TRACE:codex") + await client1.setLogLevel("DEBUG;TRACE:codex") test "node accepts file uploads", twoNodesConfig: - let cid1 = client1.upload("some file contents").get - let cid2 = client1.upload("some other contents").get + let cid1 = (await client1.upload("some file contents")).get + let cid2 = (await client1.upload("some other contents")).get check cid1 != cid2 test "node shows used and available space", twoNodesConfig: - discard client1.upload("some file contents").get + discard (await client1.upload("some file contents")).get let totalSize = 12.uint64 let minPricePerBytePerSecond = 1.u256 let totalCollateral = totalSize.u256 * minPricePerBytePerSecond - discard client1.postAvailability( - totalSize = totalSize, - duration = 2.uint64, - minPricePerBytePerSecond = minPricePerBytePerSecond, - totalCollateral = totalCollateral, + discard ( + await client1.postAvailability( + totalSize = totalSize, + duration = 2.uint64, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = totalCollateral, + ) ).get - let space = client1.space().tryGet() + let space = (await client1.space()).tryGet() check: space.totalBlocks == 2 space.quotaMaxBytes == 21474836480.NBytes @@ -47,48 +48,52 @@ twonodessuite "REST API": let content1 = "some file contents" let content2 = "some other contents" - let cid1 = client1.upload(content1).get - let cid2 = client1.upload(content2).get - let list = client1.list().get + let cid1 = (await client1.upload(content1)).get + let cid2 = (await client1.upload(content2)).get + let list = (await client1.list()).get check: [cid1, cid2].allIt(it in list.content.mapIt(it.cid)) test "request storage fails for datasets that are too small", twoNodesConfig: - let cid = client1.upload("some file contents").get - let response = client1.requestStorageRaw( - cid, - duration = 10.uint64, - pricePerBytePerSecond = 1.u256, - proofProbability = 3.u256, - collateralPerByte = 1.u256, - expiry = 9.uint64, + let cid = (await client1.upload("some file contents")).get + let response = ( + await client1.requestStorageRaw( + cid, + duration = 10.uint64, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + collateralPerByte = 1.u256, + expiry = 9.uint64, + ) ) check: - response.status == "400 Bad Request" - response.body == + response.status == 400 + (await response.body) == "Dataset too small for erasure parameters, need at least " & $(2 * DefaultBlockSize.int) & " bytes" test "request storage succeeds for sufficiently sized datasets", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get - let response = client1.requestStorageRaw( - cid, - duration = 10.uint64, - pricePerBytePerSecond = 1.u256, - proofProbability = 3.u256, - collateralPerByte = 1.u256, - expiry = 9.uint64, + let cid = (await client1.upload(data)).get + let response = ( + await client1.requestStorageRaw( + cid, + duration = 10.uint64, + pricePerBytePerSecond = 1.u256, + proofProbability = 3.u256, + collateralPerByte = 1.u256, + expiry = 9.uint64, + ) ) check: - response.status == "200 OK" + response.status == 200 test "request storage fails if tolerance is zero", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get + let cid = (await client1.upload(data)).get let duration = 100.uint64 let pricePerBytePerSecond = 1.u256 let proofProbability = 3.u256 @@ -97,17 +102,19 @@ twonodessuite "REST API": let nodes = 3 let tolerance = 0 - var responseBefore = client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, - nodes.uint, tolerance.uint, + var responseBefore = ( + await client1.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) ) - check responseBefore.status == "400 Bad Request" - check responseBefore.body == "Tolerance needs to be bigger then zero" + check responseBefore.status == 400 + check (await responseBefore.body) == "Tolerance needs to be bigger then zero" test "request storage fails if duration exceeds limit", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get + let cid = (await client1.upload(data)).get let duration = (31 * 24 * 60 * 60).uint64 # 31 days TODO: this should not be hardcoded, but waits for https://github.com/codex-storage/nim-codex/issues/1056 let proofProbability = 3.u256 @@ -117,17 +124,19 @@ twonodessuite "REST API": let tolerance = 2 let pricePerBytePerSecond = 1.u256 - var responseBefore = client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry, - nodes.uint, tolerance.uint, + var responseBefore = ( + await client1.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) ) - check responseBefore.status == "400 Bad Request" - check "Duration exceeds limit of" in responseBefore.body + check responseBefore.status == 400 + check "Duration exceeds limit of" in (await responseBefore.body) test "request storage fails if nodes and tolerance aren't correct", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get + let cid = (await client1.upload(data)).get let duration = 100.uint64 let pricePerBytePerSecond = 1.u256 let proofProbability = 3.u256 @@ -138,19 +147,21 @@ twonodessuite "REST API": for ecParam in ecParams: let (nodes, tolerance) = ecParam - var responseBefore = client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, - expiry, nodes.uint, tolerance.uint, + var responseBefore = ( + await client1.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) ) - check responseBefore.status == "400 Bad Request" - check responseBefore.body == + check responseBefore.status == 400 + check (await responseBefore.body) == "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`" test "request storage fails if tolerance > nodes (underflow protection)", twoNodesConfig: let data = await RandomChunker.example(blocks = 2) - let cid = client1.upload(data).get + let cid = (await client1.upload(data)).get let duration = 100.uint64 let pricePerBytePerSecond = 1.u256 let proofProbability = 3.u256 @@ -161,13 +172,15 @@ twonodessuite "REST API": for ecParam in ecParams: let (nodes, tolerance) = ecParam - var responseBefore = client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, - expiry, nodes.uint, tolerance.uint, + var responseBefore = ( + await client1.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) ) - check responseBefore.status == "400 Bad Request" - check responseBefore.body == + check responseBefore.status == 400 + check (await responseBefore.body) == "Invalid parameters: `tolerance` cannot be greater than `nodes`" for ecParams in @[ @@ -177,70 +190,69 @@ twonodessuite "REST API": test "request storage succeeds if nodes and tolerance within range " & fmt"({minBlocks=}, {nodes=}, {tolerance=})", twoNodesConfig: let data = await RandomChunker.example(blocks = minBlocks) - let cid = client1.upload(data).get + let cid = (await client1.upload(data)).get let duration = 100.uint64 let pricePerBytePerSecond = 1.u256 let proofProbability = 3.u256 let expiry = 30.uint64 let collateralPerByte = 1.u256 - var responseBefore = client1.requestStorageRaw( - cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, - expiry, nodes.uint, tolerance.uint, + var responseBefore = ( + await client1.requestStorageRaw( + cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, + expiry, nodes.uint, tolerance.uint, + ) ) - check responseBefore.status == "200 OK" + check responseBefore.status == 200 test "node accepts file uploads with content type", twoNodesConfig: - let headers = newHttpHeaders({"Content-Type": "text/plain"}) - let response = client1.uploadRaw("some file contents", headers) + let headers = @[("Content-Type", "text/plain")] + let response = await client1.uploadRaw("some file contents", headers) - check response.status == "200 OK" - check response.body != "" + check response.status == 200 + check (await response.body) != "" test "node accepts file uploads with content disposition", twoNodesConfig: - let headers = - newHttpHeaders({"Content-Disposition": "attachment; filename=\"example.txt\""}) - let response = client1.uploadRaw("some file contents", headers) + let headers = @[("Content-Disposition", "attachment; filename=\"example.txt\"")] + let response = await client1.uploadRaw("some file contents", headers) - check response.status == "200 OK" - check response.body != "" + check response.status == 200 + check (await response.body) != "" test "node accepts file uploads with content disposition without filename", twoNodesConfig: - let headers = newHttpHeaders({"Content-Disposition": "attachment"}) - let response = client1.uploadRaw("some file contents", headers) + let headers = @[("Content-Disposition", "attachment")] + let response = await client1.uploadRaw("some file contents", headers) - check response.status == "200 OK" - check response.body != "" + check response.status == 200 + check (await response.body) != "" test "upload fails if content disposition contains bad filename", twoNodesConfig: - let headers = - newHttpHeaders({"Content-Disposition": "attachment; filename=\"exam*ple.txt\""}) - let response = client1.uploadRaw("some file contents", headers) + let headers = @[("Content-Disposition", "attachment; filename=\"exam*ple.txt\"")] + let response = await client1.uploadRaw("some file contents", headers) - check response.status == "422 Unprocessable Entity" - check response.body == "The filename is not valid." + check response.status == 422 + check (await response.body) == "The filename is not valid." test "upload fails if content type is invalid", twoNodesConfig: - let headers = newHttpHeaders({"Content-Type": "hello/world"}) - let response = client1.uploadRaw("some file contents", headers) + let headers = @[("Content-Type", "hello/world")] + let response = await client1.uploadRaw("some file contents", headers) - check response.status == "422 Unprocessable Entity" - check response.body == "The MIME type 'hello/world' is not valid." + check response.status == 422 + check (await response.body) == "The MIME type 'hello/world' is not valid." test "node retrieve the metadata", twoNodesConfig: - let headers = newHttpHeaders( - { - "Content-Type": "text/plain", - "Content-Disposition": "attachment; filename=\"example.txt\"", - } - ) - let uploadResponse = client1.uploadRaw("some file contents", headers) - let cid = uploadResponse.body - let listResponse = client1.listRaw() + let headers = + @[ + ("Content-Type", "text/plain"), + ("Content-Disposition", "attachment; filename=\"example.txt\""), + ] + let uploadResponse = await client1.uploadRaw("some file contents", headers) + let cid = await uploadResponse.body + let listResponse = await client1.listRaw() - let jsonData = parseJson(listResponse.body) + let jsonData = parseJson(await listResponse.body) check jsonData.hasKey("content") == true @@ -256,83 +268,79 @@ twonodessuite "REST API": check manifest["mimetype"].getStr() == "text/plain" test "node set the headers when for download", twoNodesConfig: - let headers = newHttpHeaders( - { - "Content-Disposition": "attachment; filename=\"example.txt\"", - "Content-Type": "text/plain", - } - ) + let headers = + @[ + ("Content-Disposition", "attachment; filename=\"example.txt\""), + ("Content-Type", "text/plain"), + ] - let uploadResponse = client1.uploadRaw("some file contents", headers) - let cid = uploadResponse.body + let uploadResponse = await client1.uploadRaw("some file contents", headers) + let cid = await uploadResponse.body - check uploadResponse.status == "200 OK" + check uploadResponse.status == 200 - let response = client1.downloadRaw(cid) + let response = await client1.downloadRaw(cid) - check response.status == "200 OK" - check response.headers.hasKey("Content-Type") == true - check response.headers["Content-Type"] == "text/plain" - check response.headers.hasKey("Content-Disposition") == true - check response.headers["Content-Disposition"] == + check response.status == 200 + check "Content-Type" in response.headers + check response.headers.getString("Content-Type") == "text/plain" + check "Content-Disposition" in response.headers + check response.headers.getString("Content-Disposition") == "attachment; filename=\"example.txt\"" let local = true - let localResponse = client1.downloadRaw(cid, local) + let localResponse = await client1.downloadRaw(cid, local) - check localResponse.status == "200 OK" - check localResponse.headers.hasKey("Content-Type") == true - check localResponse.headers["Content-Type"] == "text/plain" - check localResponse.headers.hasKey("Content-Disposition") == true - check localResponse.headers["Content-Disposition"] == + check localResponse.status == 200 + check "Content-Type" in localResponse.headers + check localResponse.headers.getString("Content-Type") == "text/plain" + check "Content-Disposition" in localResponse.headers + check localResponse.headers.getString("Content-Disposition") == "attachment; filename=\"example.txt\"" test "should delete a dataset when requested", twoNodesConfig: - let cid = client1.upload("some file contents").get + let cid = (await client1.upload("some file contents")).get - var response = client1.downloadRaw($cid, local = true) - check response.body == "some file contents" + var response = await client1.downloadRaw($cid, local = true) + check (await response.body) == "some file contents" - client1.delete(cid).get + (await client1.delete(cid)).get - response = client1.downloadRaw($cid, local = true) - check response.status == "404 Not Found" + response = await client1.downloadRaw($cid, local = true) + check response.status == 404 test "should return 200 when attempting delete of non-existing block", twoNodesConfig: - let response = client1.deleteRaw($(Cid.example())) - check response.status == "204 No Content" + let response = await client1.deleteRaw($(Cid.example())) + check response.status == 204 test "should return 200 when attempting delete of non-existing dataset", twoNodesConfig: let cid = Manifest.example().makeManifestBlock().get.cid - let response = client1.deleteRaw($cid) - check response.status == "204 No Content" + let response = await client1.deleteRaw($cid) + check response.status == 204 test "should not crash if the download stream is closed before download completes", twoNodesConfig: - privateAccess(client1.type) - privateAccess(client1.http.type) + # FIXME this is not a good test. For some reason, to get this to fail, I have to + # store content that is several times the default stream buffer size, otherwise + # the test will succeed even when the bug is present. Since this is probably some + # setting that is internal to chronos, it might change in future versions, + # invalidating this test. Works on Chronos 4.0.3. - let cid = client1.upload(repeat("some file contents", 1000)).get - let httpClient = client1.http() + let + contents = repeat("b", DefaultStreamBufferSize * 10) + cid = (await client1.upload(contents)).get + response = await client1.downloadRaw($cid) - try: - # Sadly, there's no high level API for preventing the client from - # consuming the whole response, and we need to close the socket - # before that happens if we want to trigger the bug, so we need to - # resort to this. - httpClient.getBody = false - let response = client1.downloadRaw($cid, httpClient = httpClient) + let reader = response.getBodyReader() - # Read 4 bytes from the stream just to make sure we actually - # receive some data. - let data = httpClient.socket.recv(4) - check data.len == 4 + # Read 4 bytes from the stream just to make sure we actually + # receive some data. + check (bytesToString await reader.read(4)) == "bbbb" - # Prematurely closes the connection. - httpClient.close() - finally: - httpClient.getBody = true + # Abruptly closes the stream (we have to dig all the way to the transport + # or Chronos will close things "nicely"). + response.connection.reader.tsource.close() - let response = client1.downloadRaw($cid, httpClient = httpClient) - check response.body == repeat("some file contents", 1000) + let response2 = await client1.downloadRaw($cid) + check (await response2.body) == contents diff --git a/tests/integration/testsales.nim b/tests/integration/testsales.nim index 6c5c30d5..2d7a199c 100644 --- a/tests/integration/testsales.nim +++ b/tests/integration/testsales.nim @@ -30,54 +30,63 @@ multinodesuite "Sales": client = clients()[0].client test "node handles new storage availability", salesConfig: - let availability1 = host.postAvailability( - totalSize = 1.uint64, - duration = 2.uint64, - minPricePerBytePerSecond = 3.u256, - totalCollateral = 4.u256, + let availability1 = ( + await host.postAvailability( + totalSize = 1.uint64, + duration = 2.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 4.u256, + ) ).get - let availability2 = host.postAvailability( - totalSize = 4.uint64, - duration = 5.uint64, - minPricePerBytePerSecond = 6.u256, - totalCollateral = 7.u256, + let availability2 = ( + await host.postAvailability( + totalSize = 4.uint64, + duration = 5.uint64, + minPricePerBytePerSecond = 6.u256, + totalCollateral = 7.u256, + ) ).get check availability1 != availability2 test "node lists storage that is for sale", salesConfig: - let availability = host.postAvailability( - totalSize = 1.uint64, - duration = 2.uint64, - minPricePerBytePerSecond = 3.u256, - totalCollateral = 4.u256, + let availability = ( + await host.postAvailability( + totalSize = 1.uint64, + duration = 2.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 4.u256, + ) ).get - check availability in host.getAvailabilities().get + check availability in (await host.getAvailabilities()).get test "updating non-existing availability", salesConfig: - let nonExistingResponse = host.patchAvailabilityRaw( + let nonExistingResponse = await host.patchAvailabilityRaw( AvailabilityId.example, duration = 100.uint64.some, minPricePerBytePerSecond = 2.u256.some, totalCollateral = 200.u256.some, ) - check nonExistingResponse.status == "404 Not Found" + check nonExistingResponse.status == 404 test "updating availability", salesConfig: - let availability = host.postAvailability( - totalSize = 140000.uint64, - duration = 200.uint64, - minPricePerBytePerSecond = 3.u256, - totalCollateral = 300.u256, + let availability = ( + await host.postAvailability( + totalSize = 140000.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) ).get - host.patchAvailability( + await host.patchAvailability( availability.id, duration = 100.uint64.some, minPricePerBytePerSecond = 2.u256.some, totalCollateral = 200.u256.some, ) - let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get + let updatedAvailability = + ((await host.getAvailabilities()).get).findItem(availability).get check updatedAvailability.duration == 100.uint64 check updatedAvailability.minPricePerBytePerSecond == 2 check updatedAvailability.totalCollateral == 200 @@ -85,26 +94,31 @@ multinodesuite "Sales": check updatedAvailability.freeSize == 140000.uint64 test "updating availability - freeSize is not allowed to be changed", salesConfig: - let availability = host.postAvailability( - totalSize = 140000.uint64, - duration = 200.uint64, - minPricePerBytePerSecond = 3.u256, - totalCollateral = 300.u256, + let availability = ( + await host.postAvailability( + totalSize = 140000.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) ).get let freeSizeResponse = - host.patchAvailabilityRaw(availability.id, freeSize = 110000.uint64.some) - check freeSizeResponse.status == "400 Bad Request" - check "not allowed" in freeSizeResponse.body + await host.patchAvailabilityRaw(availability.id, freeSize = 110000.uint64.some) + check freeSizeResponse.status == 400 + check "not allowed" in (await freeSizeResponse.body) test "updating availability - updating totalSize", salesConfig: - let availability = host.postAvailability( - totalSize = 140000.uint64, - duration = 200.uint64, - minPricePerBytePerSecond = 3.u256, - totalCollateral = 300.u256, + let availability = ( + await host.postAvailability( + totalSize = 140000.uint64, + duration = 200.uint64, + minPricePerBytePerSecond = 3.u256, + totalCollateral = 300.u256, + ) ).get - host.patchAvailability(availability.id, totalSize = 100000.uint64.some) - let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get + await host.patchAvailability(availability.id, totalSize = 100000.uint64.some) + let updatedAvailability = + ((await host.getAvailabilities()).get).findItem(availability).get check updatedAvailability.totalSize == 100000 check updatedAvailability.freeSize == 100000 @@ -115,38 +129,51 @@ multinodesuite "Sales": let minPricePerBytePerSecond = 3.u256 let collateralPerByte = 1.u256 let totalCollateral = originalSize.u256 * collateralPerByte - let availability = host.postAvailability( - totalSize = originalSize, - duration = 20 * 60.uint64, - minPricePerBytePerSecond = minPricePerBytePerSecond, - totalCollateral = totalCollateral, + let availability = ( + await host.postAvailability( + totalSize = originalSize, + duration = 20 * 60.uint64, + minPricePerBytePerSecond = minPricePerBytePerSecond, + totalCollateral = totalCollateral, + ) ).get # Lets create storage request that will utilize some of the availability's space - let cid = client.upload(data).get - let id = client.requestStorage( - cid, - duration = 20 * 60.uint64, - pricePerBytePerSecond = minPricePerBytePerSecond, - proofProbability = 3.u256, - expiry = (10 * 60).uint64, - collateralPerByte = collateralPerByte, - nodes = 3, - tolerance = 1, + let cid = (await client.upload(data)).get + let id = ( + await client.requestStorage( + cid, + duration = 20 * 60.uint64, + pricePerBytePerSecond = minPricePerBytePerSecond, + proofProbability = 3.u256, + expiry = (10 * 60).uint64, + collateralPerByte = collateralPerByte, + nodes = 3, + tolerance = 1, + ) ).get - check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000) - let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get + check eventually( + await client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000 + ) + let updatedAvailability = + ((await host.getAvailabilities()).get).findItem(availability).get check updatedAvailability.totalSize != updatedAvailability.freeSize let utilizedSize = updatedAvailability.totalSize - updatedAvailability.freeSize - let totalSizeResponse = - host.patchAvailabilityRaw(availability.id, totalSize = (utilizedSize - 1).some) - check totalSizeResponse.status == "400 Bad Request" - check "totalSize must be larger then current totalSize" in totalSizeResponse.body + let totalSizeResponse = ( + await host.patchAvailabilityRaw( + availability.id, totalSize = (utilizedSize - 1).some + ) + ) + check totalSizeResponse.status == 400 + check "totalSize must be larger then current totalSize" in + (await totalSizeResponse.body) - host.patchAvailability(availability.id, totalSize = (originalSize + 20000).some) + await host.patchAvailability( + availability.id, totalSize = (originalSize + 20000).some + ) let newUpdatedAvailability = - (host.getAvailabilities().get).findItem(availability).get + ((await host.getAvailabilities()).get).findItem(availability).get check newUpdatedAvailability.totalSize == originalSize + 20000 check newUpdatedAvailability.freeSize - updatedAvailability.freeSize == 20000 diff --git a/tests/integration/testupdownload.nim b/tests/integration/testupdownload.nim index 05d3a496..24e6039c 100644 --- a/tests/integration/testupdownload.nim +++ b/tests/integration/testupdownload.nim @@ -9,11 +9,11 @@ twonodessuite "Uploads and downloads": let content1 = "some file contents" let content2 = "some other contents" - let cid1 = client1.upload(content1).get - let cid2 = client2.upload(content2).get + let cid1 = (await client1.upload(content1)).get + let cid2 = (await client2.upload(content2)).get - let resp1 = client1.download(cid1, local = true).get - let resp2 = client2.download(cid2, local = true).get + let resp1 = (await client1.download(cid1, local = true)).get + let resp2 = (await client2.download(cid2, local = true)).get check: content1 == resp1 @@ -23,11 +23,11 @@ twonodessuite "Uploads and downloads": let content1 = "some file contents" let content2 = "some other contents" - let cid1 = client1.upload(content1).get - let cid2 = client2.upload(content2).get + let cid1 = (await client1.upload(content1)).get + let cid2 = (await client2.upload(content2)).get - let resp2 = client1.download(cid2, local = false).get - let resp1 = client2.download(cid1, local = false).get + let resp2 = (await client1.download(cid2, local = false)).get + let resp1 = (await client2.download(cid1, local = false)).get check: content1 == resp1 @@ -35,11 +35,12 @@ twonodessuite "Uploads and downloads": test "node fails retrieving non-existing local file", twoNodesConfig: let content1 = "some file contents" - let cid1 = client1.upload(content1).get # upload to first node - let resp2 = client2.download(cid1, local = true) # try retrieving from second node + let cid1 = (await client1.upload(content1)).get # upload to first node + let resp2 = + await client2.download(cid1, local = true) # try retrieving from second node check: - resp2.error.msg == "404 Not Found" + resp2.error.msg == "404" proc checkRestContent(cid: Cid, content: ?!string) = let c = content.tryGet() @@ -67,26 +68,28 @@ twonodessuite "Uploads and downloads": test "node allows downloading only manifest", twoNodesConfig: let content1 = "some file contents" - let cid1 = client1.upload(content1).get + let cid1 = (await client1.upload(content1)).get - let resp2 = client1.downloadManifestOnly(cid1) + let resp2 = await client1.downloadManifestOnly(cid1) checkRestContent(cid1, resp2) test "node allows downloading content without stream", twoNodesConfig: - let content1 = "some file contents" - let cid1 = client1.upload(content1).get + let + content1 = "some file contents" + cid1 = (await client1.upload(content1)).get + resp1 = await client2.downloadNoStream(cid1) - let resp1 = client2.downloadNoStream(cid1) checkRestContent(cid1, resp1) - let resp2 = client2.download(cid1, local = true).get + + let resp2 = (await client2.download(cid1, local = true)).get check: content1 == resp2 test "reliable transfer test", twoNodesConfig: proc transferTest(a: CodexClient, b: CodexClient) {.async.} = let data = await RandomChunker.example(blocks = 8) - let cid = a.upload(data).get - let response = b.download(cid).get + let cid = (await a.upload(data)).get + let response = (await b.download(cid)).get check: @response.mapIt(it.byte) == data diff --git a/tests/integration/testvalidator.nim b/tests/integration/testvalidator.nim index 7f4bc851..0d1a50e8 100644 --- a/tests/integration/testvalidator.nim +++ b/tests/integration/testvalidator.nim @@ -99,14 +99,14 @@ marketplacesuite "Validation": let data = await RandomChunker.example(blocks = blocks) let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) - createAvailabilities( + await createAvailabilities( datasetSize.truncate(uint64), duration, collateralPerByte, minPricePerBytePerSecond, ) - let cid = client0.upload(data).get + let cid = (await client0.upload(data)).get let purchaseId = await client0.requestStorage( cid, expiry = expiry, @@ -115,12 +115,12 @@ marketplacesuite "Validation": tolerance = ecTolerance, proofProbability = proofProbability, ) - let requestId = client0.requestId(purchaseId).get + let requestId = (await client0.requestId(purchaseId)).get debug "validation suite", purchaseId = purchaseId.toHex, requestId = requestId if not eventuallyS( - client0.purchaseStateIs(purchaseId, "started"), + await client0.purchaseStateIs(purchaseId, "started"), timeout = (expiry + 60).int, step = 5, ): @@ -169,14 +169,14 @@ marketplacesuite "Validation": let data = await RandomChunker.example(blocks = blocks) let datasetSize = datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance) - createAvailabilities( + await createAvailabilities( datasetSize.truncate(uint64), duration, collateralPerByte, minPricePerBytePerSecond, ) - let cid = client0.upload(data).get + let cid = (await client0.upload(data)).get let purchaseId = await client0.requestStorage( cid, expiry = expiry, @@ -185,12 +185,12 @@ marketplacesuite "Validation": tolerance = ecTolerance, proofProbability = proofProbability, ) - let requestId = client0.requestId(purchaseId).get + let requestId = (await client0.requestId(purchaseId)).get debug "validation suite", purchaseId = purchaseId.toHex, requestId = requestId if not eventuallyS( - client0.purchaseStateIs(purchaseId, "started"), + await client0.purchaseStateIs(purchaseId, "started"), timeout = (expiry + 60).int, step = 5, ): diff --git a/tests/testTaiko.nim b/tests/testTaiko.nim index 8036e8a3..b1555bfb 100644 --- a/tests/testTaiko.nim +++ b/tests/testTaiko.nim @@ -24,7 +24,7 @@ suite "Taiko L2 Integration Tests": ) node1.waitUntilStarted() - let bootstrap = (!node1.client.info())["spr"].getStr() + let bootstrap = (!(await node1.client.info()))["spr"].getStr() node2 = startNode( [ From 9d7b521519329766cee675ece4013614adc7c6ad Mon Sep 17 00:00:00 2001 From: Arnaud Date: Tue, 18 Mar 2025 08:06:46 +0100 Subject: [PATCH 13/14] chore: add missing custom errors (#1134) * Add missing custom errors * Separate mock state errors * Remove the Option in the error setters * Wrap the contract errors in MarketError * Remove async raises (needs to address it in another PR) * Wrap contract errors into specific error types * Rename SlotNotFreeError to SlotStateMismatchError --- codex/contracts/market.nim | 36 +++++++++++------ codex/contracts/marketplace.nim | 1 + codex/market.nim | 2 + codex/sales/states/filling.nim | 12 +++--- codex/sales/states/slotreserving.nim | 9 ++--- tests/codex/helpers/mockmarket.nim | 23 +++++++++-- tests/codex/sales/states/testfilling.nim | 40 ++++++++++++++++++- .../codex/sales/states/testslotreserving.nim | 9 +++-- 8 files changed, 100 insertions(+), 32 deletions(-) diff --git a/codex/contracts/market.nim b/codex/contracts/market.nim index 58495b45..0b846099 100644 --- a/codex/contracts/market.nim +++ b/codex/contracts/market.nim @@ -249,10 +249,16 @@ method fillSlot( requestId slotIndex - await market.approveFunds(collateral) - trace "calling fillSlot on contract" - discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(1) - trace "fillSlot transaction completed" + try: + await market.approveFunds(collateral) + trace "calling fillSlot on contract" + discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(1) + trace "fillSlot transaction completed" + except Marketplace_SlotNotFree as parent: + raise newException( + SlotStateMismatchError, "Failed to fill slot because the slot is not free", + parent, + ) method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} = convertEthersError("Failed to free slot"): @@ -327,14 +333,20 @@ method reserveSlot*( market: OnChainMarket, requestId: RequestId, slotIndex: uint64 ) {.async.} = convertEthersError("Failed to reserve slot"): - discard await market.contract - .reserveSlot( - requestId, - slotIndex, - # reserveSlot runs out of gas for unknown reason, but 100k gas covers it - TransactionOverrides(gasLimit: some 100000.u256), - ) - .confirm(1) + try: + discard await market.contract + .reserveSlot( + requestId, + slotIndex, + # reserveSlot runs out of gas for unknown reason, but 100k gas covers it + TransactionOverrides(gasLimit: some 100000.u256), + ) + .confirm(1) + except SlotReservations_ReservationNotAllowed: + raise newException( + SlotReservationNotAllowedError, + "Failed to reserve slot because reservation is not allowed", + ) method canReserveSlot*( market: OnChainMarket, requestId: RequestId, slotIndex: uint64 diff --git a/codex/contracts/marketplace.nim b/codex/contracts/marketplace.nim index 761caada..686414fb 100644 --- a/codex/contracts/marketplace.nim +++ b/codex/contracts/marketplace.nim @@ -53,6 +53,7 @@ type Proofs_ProofAlreadyMarkedMissing* = object of SolidityError Proofs_InvalidProbability* = object of SolidityError Periods_InvalidSecondsPerPeriod* = object of SolidityError + SlotReservations_ReservationNotAllowed* = object of SolidityError proc configuration*(marketplace: Marketplace): MarketplaceConfig {.contract, view.} proc token*(marketplace: Marketplace): Address {.contract, view.} diff --git a/codex/market.nim b/codex/market.nim index c5177aeb..dd8e14ba 100644 --- a/codex/market.nim +++ b/codex/market.nim @@ -18,6 +18,8 @@ export periods type Market* = ref object of RootObj MarketError* = object of CodexError + SlotStateMismatchError* = object of MarketError + SlotReservationNotAllowedError* = object of MarketError Subscription* = ref object of RootObj OnRequest* = proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, upraises: [].} diff --git a/codex/sales/states/filling.nim b/codex/sales/states/filling.nim index 03e2ef2b..13644223 100644 --- a/codex/sales/states/filling.nim +++ b/codex/sales/states/filling.nim @@ -30,6 +30,7 @@ method run*( ): Future[?State] {.async: (raises: []).} = let data = SalesAgent(machine).data let market = SalesAgent(machine).context.market + without (request =? data.request): raiseAssert "Request not set" @@ -42,17 +43,16 @@ method run*( err: error "Failure attempting to fill slot: unable to calculate collateral", error = err.msg - return + return some State(SaleErrored(error: err)) debug "Filling slot" try: await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral) + except SlotStateMismatchError as e: + debug "Slot is already filled, ignoring slot" + return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) except MarketError as e: - if e.msg.contains "Slot is not free": - debug "Slot is already filled, ignoring slot" - return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) - else: - return some State(SaleErrored(error: e)) + return some State(SaleErrored(error: e)) # other CatchableErrors are handled "automatically" by the SaleState return some State(SaleFilled()) diff --git a/codex/sales/states/slotreserving.nim b/codex/sales/states/slotreserving.nim index a67c51a0..e9ac8dcd 100644 --- a/codex/sales/states/slotreserving.nim +++ b/codex/sales/states/slotreserving.nim @@ -44,12 +44,11 @@ method run*( try: trace "Reserving slot" await market.reserveSlot(data.requestId, data.slotIndex) + except SlotReservationNotAllowedError as e: + debug "Slot cannot be reserved, ignoring", error = e.msg + return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) except MarketError as e: - if e.msg.contains "SlotReservations_ReservationNotAllowed": - debug "Slot cannot be reserved, ignoring", error = e.msg - return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) - else: - return some State(SaleErrored(error: e)) + return some State(SaleErrored(error: e)) # other CatchableErrors are handled "automatically" by the SaleState trace "Slot successfully reserved" diff --git a/tests/codex/helpers/mockmarket.nim b/tests/codex/helpers/mockmarket.nim index 16806cb2..edf8a62d 100644 --- a/tests/codex/helpers/mockmarket.nim +++ b/tests/codex/helpers/mockmarket.nim @@ -46,7 +46,8 @@ type subscriptions: Subscriptions config*: MarketplaceConfig canReserveSlot*: bool - reserveSlotThrowError*: ?(ref MarketError) + errorOnReserveSlot*: ?(ref MarketError) + errorOnFillSlot*: ?(ref CatchableError) clock: ?Clock Fulfillment* = object @@ -289,6 +290,9 @@ proc fillSlot*( host: Address, collateral = 0.u256, ) = + if error =? market.errorOnFillSlot: + raise error + let slot = MockSlot( requestId: requestId, slotIndex: slotIndex, @@ -370,7 +374,7 @@ method canProofBeMarkedAsMissing*( method reserveSlot*( market: MockMarket, requestId: RequestId, slotIndex: uint64 ) {.async.} = - if error =? market.reserveSlotThrowError: + if error =? market.errorOnReserveSlot: raise error method canReserveSlot*( @@ -381,8 +385,19 @@ method canReserveSlot*( func setCanReserveSlot*(market: MockMarket, canReserveSlot: bool) = market.canReserveSlot = canReserveSlot -func setReserveSlotThrowError*(market: MockMarket, error: ?(ref MarketError)) = - market.reserveSlotThrowError = error +func setErrorOnReserveSlot*(market: MockMarket, error: ref MarketError) = + market.errorOnReserveSlot = + if error.isNil: + none (ref MarketError) + else: + some error + +func setErrorOnFillSlot*(market: MockMarket, error: ref CatchableError) = + market.errorOnFillSlot = + if error.isNil: + none (ref CatchableError) + else: + some error method subscribeRequests*( market: MockMarket, callback: OnRequest diff --git a/tests/codex/sales/states/testfilling.nim b/tests/codex/sales/states/testfilling.nim index 1a26753d..f746b5a8 100644 --- a/tests/codex/sales/states/testfilling.nim +++ b/tests/codex/sales/states/testfilling.nim @@ -1,18 +1,31 @@ -import pkg/unittest2 import pkg/questionable import pkg/codex/contracts/requests import pkg/codex/sales/states/filling import pkg/codex/sales/states/cancelled import pkg/codex/sales/states/failed +import pkg/codex/sales/states/ignored +import pkg/codex/sales/states/errored +import pkg/codex/sales/salesagent +import pkg/codex/sales/salescontext +import ../../../asynctest import ../../examples import ../../helpers +import ../../helpers/mockmarket +import ../../helpers/mockclock suite "sales state 'filling'": let request = StorageRequest.example let slotIndex = request.ask.slots div 2 var state: SaleFilling + var market: MockMarket + var clock: MockClock + var agent: SalesAgent setup: + clock = MockClock.new() + market = MockMarket.new() + let context = SalesContext(market: market, clock: clock) + agent = newSalesAgent(context, request.id, slotIndex, request.some) state = SaleFilling.new() test "switches to cancelled state when request expires": @@ -22,3 +35,28 @@ suite "sales state 'filling'": test "switches to failed state when request fails": let next = state.onFailed(request) check !next of SaleFailed + + test "run switches to ignored when slot is not free": + let error = newException( + SlotStateMismatchError, "Failed to fill slot because the slot is not free" + ) + market.setErrorOnFillSlot(error) + market.requested.add(request) + market.slotState[request.slotId(slotIndex)] = SlotState.Filled + + let next = !(await state.run(agent)) + check next of SaleIgnored + check SaleIgnored(next).reprocessSlot == false + check SaleIgnored(next).returnBytes + + test "run switches to errored with other error ": + let error = newException(MarketError, "some error") + market.setErrorOnFillSlot(error) + market.requested.add(request) + market.slotState[request.slotId(slotIndex)] = SlotState.Filled + + let next = !(await state.run(agent)) + check next of SaleErrored + + let errored = SaleErrored(next) + check errored.error == error diff --git a/tests/codex/sales/states/testslotreserving.nim b/tests/codex/sales/states/testslotreserving.nim index d9ecdfc8..0e2e2cc7 100644 --- a/tests/codex/sales/states/testslotreserving.nim +++ b/tests/codex/sales/states/testslotreserving.nim @@ -54,15 +54,16 @@ asyncchecksuite "sales state 'SlotReserving'": test "run switches to errored when slot reservation errors": let error = newException(MarketError, "some error") - market.setReserveSlotThrowError(some error) + market.setErrorOnReserveSlot(error) let next = !(await state.run(agent)) check next of SaleErrored let errored = SaleErrored(next) check errored.error == error - test "catches reservation not allowed error": - let error = newException(MarketError, "SlotReservations_ReservationNotAllowed") - market.setReserveSlotThrowError(some error) + test "run switches to ignored when reservation is not allowed": + let error = + newException(SlotReservationNotAllowedError, "Reservation is not allowed") + market.setErrorOnReserveSlot(error) let next = !(await state.run(agent)) check next of SaleIgnored check SaleIgnored(next).reprocessSlot == false From 3a312596bf1b7cc6842047112777488bc9f0e4f8 Mon Sep 17 00:00:00 2001 From: munna0908 <88337208+munna0908@users.noreply.github.com> Date: Fri, 21 Mar 2025 07:41:00 +0530 Subject: [PATCH 14/14] deps: upgrade libp2p & constantine (#1167) * upgrade libp2p and constantine * fix libp2p update issues * add missing vendor package * add missing vendor package --- .gitmodules | 10 ++++++++++ codex/blockexchange/engine/engine.nim | 4 +++- codex/blockexchange/network/network.nim | 8 ++++++-- codex/blockexchange/protobuf/message.nim | 10 ++++------ codex/merkletree/codex/coders.nim | 12 ++++++------ vendor/constantine | 2 +- vendor/nim-codex-dht | 2 +- vendor/nim-libp2p | 2 +- vendor/nim-ngtcp2 | 1 + vendor/nim-quic | 1 + 10 files changed, 34 insertions(+), 18 deletions(-) create mode 160000 vendor/nim-ngtcp2 create mode 160000 vendor/nim-quic diff --git a/.gitmodules b/.gitmodules index ece88749..5cc2bfab 100644 --- a/.gitmodules +++ b/.gitmodules @@ -221,3 +221,13 @@ [submodule "vendor/nph"] path = vendor/nph url = https://github.com/arnetheduck/nph.git +[submodule "vendor/nim-quic"] + path = vendor/nim-quic + url = https://github.com/vacp2p/nim-quic.git + ignore = untracked + branch = master +[submodule "vendor/nim-ngtcp2"] + path = vendor/nim-ngtcp2 + url = https://github.com/vacp2p/nim-ngtcp2.git + ignore = untracked + branch = master diff --git a/codex/blockexchange/engine/engine.nim b/codex/blockexchange/engine/engine.nim index befb8ae9..35785cfe 100644 --- a/codex/blockexchange/engine/engine.nim +++ b/codex/blockexchange/engine/engine.nim @@ -678,7 +678,9 @@ proc new*( advertiser: advertiser, ) - proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} = + proc peerEventHandler( + peerId: PeerId, event: PeerEvent + ): Future[void] {.gcsafe, async: (raises: [CancelledError]).} = if event.kind == PeerEventKind.Joined: await self.setupPeer(peerId) else: diff --git a/codex/blockexchange/network/network.nim b/codex/blockexchange/network/network.nim index 26c07445..d4754110 100644 --- a/codex/blockexchange/network/network.nim +++ b/codex/blockexchange/network/network.nim @@ -323,7 +323,9 @@ method init*(self: BlockExcNetwork) = ## Perform protocol initialization ## - proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} = + proc peerEventHandler( + peerId: PeerId, event: PeerEvent + ): Future[void] {.gcsafe, async: (raises: [CancelledError]).} = if event.kind == PeerEventKind.Joined: self.setupPeer(peerId) else: @@ -332,7 +334,9 @@ method init*(self: BlockExcNetwork) = self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined) self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left) - proc handler(conn: Connection, proto: string) {.async.} = + proc handler( + conn: Connection, proto: string + ): Future[void] {.async: (raises: [CancelledError]).} = let peerId = conn.peerId let blockexcPeer = self.getOrCreatePeer(peerId) await blockexcPeer.readLoop(conn) # attach read loop diff --git a/codex/blockexchange/protobuf/message.nim b/codex/blockexchange/protobuf/message.nim index 73cb60f1..4db89729 100644 --- a/codex/blockexchange/protobuf/message.nim +++ b/codex/blockexchange/protobuf/message.nim @@ -97,7 +97,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: WantList) = pb.write(field, ipb) proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) = - var ipb = initProtoBuffer(maxSize = MaxBlockSize) + var ipb = initProtoBuffer() ipb.write(1, value.blk.cid.data.buffer) ipb.write(2, value.blk.data) ipb.write(3, value.address) @@ -128,7 +128,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) = pb.write(field, ipb) proc protobufEncode*(value: Message): seq[byte] = - var ipb = initProtoBuffer(maxSize = MaxMessageSize) + var ipb = initProtoBuffer() ipb.write(1, value.wantList) for v in value.payload: ipb.write(3, v) @@ -254,16 +254,14 @@ proc decode*( proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] = var value = Message() - pb = initProtoBuffer(msg, maxSize = MaxMessageSize) + pb = initProtoBuffer(msg) ipb: ProtoBuffer sublist: seq[seq[byte]] if ?pb.getField(1, ipb): value.wantList = ?WantList.decode(ipb) if ?pb.getRepeatedField(3, sublist): for item in sublist: - value.payload.add( - ?BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize)) - ) + value.payload.add(?BlockDelivery.decode(initProtoBuffer(item))) if ?pb.getRepeatedField(4, sublist): for item in sublist: value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item))) diff --git a/codex/merkletree/codex/coders.nim b/codex/merkletree/codex/coders.nim index b8209991..1d50707c 100644 --- a/codex/merkletree/codex/coders.nim +++ b/codex/merkletree/codex/coders.nim @@ -27,11 +27,11 @@ const MaxMerkleTreeSize = 100.MiBs.uint const MaxMerkleProofSize = 1.MiBs.uint proc encode*(self: CodexTree): seq[byte] = - var pb = initProtoBuffer(maxSize = MaxMerkleTreeSize) + var pb = initProtoBuffer() pb.write(1, self.mcodec.uint64) pb.write(2, self.leavesCount.uint64) for node in self.nodes: - var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize) + var nodesPb = initProtoBuffer() nodesPb.write(1, node) nodesPb.finish() pb.write(3, nodesPb) @@ -40,7 +40,7 @@ proc encode*(self: CodexTree): seq[byte] = pb.buffer proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree = - var pb = initProtoBuffer(data, maxSize = MaxMerkleTreeSize) + var pb = initProtoBuffer(data) var mcodecCode: uint64 var leavesCount: uint64 discard ?pb.getField(1, mcodecCode).mapFailure @@ -63,13 +63,13 @@ proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree = CodexTree.fromNodes(mcodec, nodes, leavesCount.int) proc encode*(self: CodexProof): seq[byte] = - var pb = initProtoBuffer(maxSize = MaxMerkleProofSize) + var pb = initProtoBuffer() pb.write(1, self.mcodec.uint64) pb.write(2, self.index.uint64) pb.write(3, self.nleaves.uint64) for node in self.path: - var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize) + var nodesPb = initProtoBuffer() nodesPb.write(1, node) nodesPb.finish() pb.write(4, nodesPb) @@ -78,7 +78,7 @@ proc encode*(self: CodexProof): seq[byte] = pb.buffer proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof = - var pb = initProtoBuffer(data, maxSize = MaxMerkleProofSize) + var pb = initProtoBuffer(data) var mcodecCode: uint64 var index: uint64 var nleaves: uint64 diff --git a/vendor/constantine b/vendor/constantine index bc3845aa..8d6a6a38 160000 --- a/vendor/constantine +++ b/vendor/constantine @@ -1 +1 @@ -Subproject commit bc3845aa492b52f7fef047503b1592e830d1a774 +Subproject commit 8d6a6a38b90fb8ee3ec2230839773e69aab36d80 diff --git a/vendor/nim-codex-dht b/vendor/nim-codex-dht index 4bd3a39e..f6eef1ac 160000 --- a/vendor/nim-codex-dht +++ b/vendor/nim-codex-dht @@ -1 +1 @@ -Subproject commit 4bd3a39e0030f8ee269ef217344b6b59ec2be6dc +Subproject commit f6eef1ac95c70053b2518f1e3909c909ed8701a6 diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p index 036e110a..c08d8073 160000 --- a/vendor/nim-libp2p +++ b/vendor/nim-libp2p @@ -1 +1 @@ -Subproject commit 036e110a6080fba1a1662c58cfd8c21f9a548021 +Subproject commit c08d80734989b028b3d1705f2188d783a343aac0 diff --git a/vendor/nim-ngtcp2 b/vendor/nim-ngtcp2 new file mode 160000 index 00000000..6834f475 --- /dev/null +++ b/vendor/nim-ngtcp2 @@ -0,0 +1 @@ +Subproject commit 6834f4756b6af58356ac9c4fef3d71db3c3ae5fe diff --git a/vendor/nim-quic b/vendor/nim-quic new file mode 160000 index 00000000..ddcb31ff --- /dev/null +++ b/vendor/nim-quic @@ -0,0 +1 @@ +Subproject commit ddcb31ffb74b5460ab37fd13547eca90594248bc